content
stringlengths
7
1.05M
fixed_cases
stringlengths
1
1.28M
""" # Definition for a Node. class Node(object): def __init__(self, val, children): self.val = val self.children = children """ class Codec: def serialize(self, root): """Encodes a tree to a single string. :type root: Node :rtype: str """ vals = [] def preorder(node): if node: vals.append(str(node.val)) for child in node.children: preorder(child) vals.append('#') preorder(root) return ' '.join(vals) def deserialize(self, data): """Decodes your encoded data to tree. :type data: str :rtype: Node """ if not data: return None stream = iter(data.split()) val = int(next(stream)) root = Node(val, []) def build(node): while True: val = next(stream) if val == "#": break child = Node(int(val), []) node.children.append(child) build(child) build(root) return root # Your Codec object will be instantiated and called as such: # codec = Codec() # codec.deserialize(codec.serialize(root))
""" # Definition for a Node. class Node(object): def __init__(self, val, children): self.val = val self.children = children """ class Codec: def serialize(self, root): """Encodes a tree to a single string. :type root: Node :rtype: str """ vals = [] def preorder(node): if node: vals.append(str(node.val)) for child in node.children: preorder(child) vals.append('#') preorder(root) return ' '.join(vals) def deserialize(self, data): """Decodes your encoded data to tree. :type data: str :rtype: Node """ if not data: return None stream = iter(data.split()) val = int(next(stream)) root = node(val, []) def build(node): while True: val = next(stream) if val == '#': break child = node(int(val), []) node.children.append(child) build(child) build(root) return root
# Copyright (c) Nikita Sychev, 29.04.2017 # Licensed by MIT a = input() b = input() c = input() n = len(a) a_new = "" b_new = "" c_new = "" for i in range(n): unused = chr(ord('A') + ord('B') + ord('C') + ord('?') - ord(a[i]) - ord(b[i]) - ord(c[i])) if a[i] == '?': a_new += unused else: a_new += a[i] if b[i] == '?': b_new += unused else: b_new += b[i] if c[i] == '?': c_new += unused else: c_new += c[i] print(a_new) print(b_new) print(c_new)
a = input() b = input() c = input() n = len(a) a_new = '' b_new = '' c_new = '' for i in range(n): unused = chr(ord('A') + ord('B') + ord('C') + ord('?') - ord(a[i]) - ord(b[i]) - ord(c[i])) if a[i] == '?': a_new += unused else: a_new += a[i] if b[i] == '?': b_new += unused else: b_new += b[i] if c[i] == '?': c_new += unused else: c_new += c[i] print(a_new) print(b_new) print(c_new)
def sum(x,y): print("sum"," =",(x+y)) def subtract(x,y): print("difference"," =",(x-y)) def divide(x,y): print("division"," =",(x/y)) def multiply(x,y): print("multiplication"," =",(x/y))
def sum(x, y): print('sum', ' =', x + y) def subtract(x, y): print('difference', ' =', x - y) def divide(x, y): print('division', ' =', x / y) def multiply(x, y): print('multiplication', ' =', x / y)
def plug_in(symbol_values): s = symbol_values['s'] p = len(s.sites) / s.volume rho = float(s.density) mbar = rho / p v_a = 1 / p return {'p': len(s.sites) / s.volume, 'rho': float(s.density), 'v_a': v_a, 'mbar': mbar} DESCRIPTION = """ Model calculating the atomic density from the corresponding structure object of the material """ config = { "name": "density", "connections": [ { "inputs": [ "s" ], "outputs": [ "p", "rho", "mbar", "v_a" ] } ], "categories": [ "mechanical" ], "symbol_property_map": { "s": "structure", "p": "atomic_density", "rho": "density", "v_a": "volume_per_atom", "mbar": "mass_per_atom" }, "description": DESCRIPTION, "references": [], "plug_in": plug_in }
def plug_in(symbol_values): s = symbol_values['s'] p = len(s.sites) / s.volume rho = float(s.density) mbar = rho / p v_a = 1 / p return {'p': len(s.sites) / s.volume, 'rho': float(s.density), 'v_a': v_a, 'mbar': mbar} description = '\nModel calculating the atomic density from the corresponding \nstructure object of the material\n' config = {'name': 'density', 'connections': [{'inputs': ['s'], 'outputs': ['p', 'rho', 'mbar', 'v_a']}], 'categories': ['mechanical'], 'symbol_property_map': {'s': 'structure', 'p': 'atomic_density', 'rho': 'density', 'v_a': 'volume_per_atom', 'mbar': 'mass_per_atom'}, 'description': DESCRIPTION, 'references': [], 'plug_in': plug_in}
# 110. Balanced Binary Tree # Runtime: 3232 ms, faster than 5.10% of Python3 online submissions for Balanced Binary Tree. # Memory Usage: 17.6 MB, less than 100.00% of Python3 online submissions for Balanced Binary Tree. # Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right class Solution: # Recursive def isBalanced(self, root: TreeNode) -> bool: if not root: return True else: def get_depth(node: TreeNode) -> int: if not node: return 0 else: return max(get_depth(node.left), get_depth(node.right)) + 1 depth_diff = abs(get_depth(root.left) - get_depth(root.right)) <= 1 balance_child = self.isBalanced(root.left) and self.isBalanced(root.right) return depth_diff and balance_child
class Solution: def is_balanced(self, root: TreeNode) -> bool: if not root: return True else: def get_depth(node: TreeNode) -> int: if not node: return 0 else: return max(get_depth(node.left), get_depth(node.right)) + 1 depth_diff = abs(get_depth(root.left) - get_depth(root.right)) <= 1 balance_child = self.isBalanced(root.left) and self.isBalanced(root.right) return depth_diff and balance_child
# Link: https://leetcode.com/problems/reverse-substrings-between-each-pair-of-parentheses/ # Time: O(N) # Space: O(N) def reverse_parentheses(s): stack, queue = [], [] for char in s: if char == ")": while stack[-1] != "(": queue.append(stack.pop()) stack.pop() # remove '(' while queue: stack.append(queue.pop(0)) else: stack.append(char) return "".join(stack) def main(): s = "(ed(et(oc))el)" print(reverse_parentheses(s)) if __name__ == "__main__": main()
def reverse_parentheses(s): (stack, queue) = ([], []) for char in s: if char == ')': while stack[-1] != '(': queue.append(stack.pop()) stack.pop() while queue: stack.append(queue.pop(0)) else: stack.append(char) return ''.join(stack) def main(): s = '(ed(et(oc))el)' print(reverse_parentheses(s)) if __name__ == '__main__': main()
x = input("Enter a string") y = input("Enter a string") z = input("Enter a string") for i in range(10): print("This is some output from the lab ",i) print("Your input was " + x) print("Your input was " + y) print("Your input was " + z)
x = input('Enter a string') y = input('Enter a string') z = input('Enter a string') for i in range(10): print('This is some output from the lab ', i) print('Your input was ' + x) print('Your input was ' + y) print('Your input was ' + z)
"""Load dependencies needed to compile CLIF as a 3rd-party consumer.""" load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") LLVM_COMMIT = "1f21de535d37997c41b9b1ecb2f7ca0e472e9f77" # 2021-01-15 LLVM_BAZEL_TAG = "llvm-project-%s" % (LLVM_COMMIT,) LLVM_BAZEL_SHA256 = "f2fd051574fdddae8f8fff81f986d1165b51dc0b62b70d9d47685df9f2d804e1" LLVM_SHA256 = "3620b7e6efa72e73e2e83420d71dfc7fdf4c81cff1ee692f03e3151600250fe0" LLVM_URLS = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), "https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), ] def clif_deps(): """Load common dependencies needed to compile and use CLIF.""" if not native.existing_rule("llvm-project"): http_archive( name = "llvm-bazel", sha256 = LLVM_BAZEL_SHA256, strip_prefix = "llvm-bazel-{tag}/llvm-bazel".format(tag = LLVM_BAZEL_TAG), url = "https://github.com/google/llvm-bazel/archive/{tag}.tar.gz".format(tag = LLVM_BAZEL_TAG), ) http_archive( name = "llvm-project-raw", build_file_content = "#empty", sha256 = LLVM_SHA256, strip_prefix = "llvm-project-" + LLVM_COMMIT, urls = LLVM_URLS, ) if not native.existing_rule("com_google_protobuf"): http_archive( name = "com_google_protobuf", sha256 = "bf0e5070b4b99240183b29df78155eee335885e53a8af8683964579c214ad301", strip_prefix = "protobuf-3.14.0", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/protocolbuffers/protobuf/archive/v3.14.0.zip", "https://github.com/protocolbuffers/protobuf/archive/v3.14.0.zip", ], ) if not native.existing_rule("com_google_absl"): http_archive( name = "com_google_absl", sha256 = "6622893ab117501fc23268a2936e0d46ee6cb0319dcf2275e33a708cd9634ea6", strip_prefix = "abseil-cpp-20200923.3", urls = ["https://github.com/abseil/abseil-cpp/archive/20200923.3.zip"], ) if not native.existing_rule("com_google_googletest"): http_archive( name = "com_google_googletest", sha256 = "9dc9157a9a1551ec7a7e43daea9a694a0bb5fb8bec81235d8a1e6ef64c716dcb", strip_prefix = "googletest-release-1.10.0", urls = [ "https://mirror.bazel.build/github.com/google/googletest/archive/release-1.10.0.tar.gz", "https://github.com/google/googletest/archive/release-1.10.0.tar.gz", ], ) if not native.existing_rule("com_github_gflags_gflags"): http_archive( name = "com_github_gflags_gflags", sha256 = "34af2f15cf7367513b352bdcd2493ab14ce43692d2dcd9dfc499492966c64dcf", strip_prefix = "gflags-2.2.2", urls = ["https://github.com/gflags/gflags/archive/v2.2.2.tar.gz"], ) if not native.existing_rule("com_github_google_glog"): http_archive( name = "com_github_google_glog", sha256 = "62efeb57ff70db9ea2129a16d0f908941e355d09d6d83c9f7b18557c0a7ab59e", strip_prefix = "glog-d516278b1cd33cd148e8989aec488b6049a4ca0b", urls = ["https://github.com/google/glog/archive/d516278b1cd33cd148e8989aec488b6049a4ca0b.zip"], ) if not native.existing_rule("io_abseil_py"): http_archive( name = "io_abseil_py", sha256 = "ac357a83c27464f5a612fda94704d0cc4fd4be1f2c0667c1819c4037e875f7aa", strip_prefix = "abseil-py-pypi-v0.11.0", urls = [ "https://mirror.bazel.build/github.com/abseil/abseil-py/archive/pypi-v0.11.0.zip", "https://github.com/abseil/abseil-py/archive/pypi-v0.11.0.zip", ], ) if not native.existing_rule("six_archive"): http_archive( name = "six_archive", build_file = "@com_google_protobuf//:third_party/six.BUILD", sha256 = "d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73", urls = ["https://pypi.python.org/packages/source/s/six/six-1.12.0.tar.gz"], ) # rules_python 0.1.0 is needed for pip_install. Older versions of # rules_python might not have pip_install functionalities. if not native.existing_rule("rules_python"): http_archive( name = "rules_python", sha256 = "b6d46438523a3ec0f3cead544190ee13223a52f6a6765a29eae7b7cc24cc83a0", url = "https://github.com/bazelbuild/rules_python/releases/download/0.1.0/rules_python-0.1.0.tar.gz", )
"""Load dependencies needed to compile CLIF as a 3rd-party consumer.""" load('@bazel_tools//tools/build_defs/repo:http.bzl', 'http_archive') llvm_commit = '1f21de535d37997c41b9b1ecb2f7ca0e472e9f77' llvm_bazel_tag = 'llvm-project-%s' % (LLVM_COMMIT,) llvm_bazel_sha256 = 'f2fd051574fdddae8f8fff81f986d1165b51dc0b62b70d9d47685df9f2d804e1' llvm_sha256 = '3620b7e6efa72e73e2e83420d71dfc7fdf4c81cff1ee692f03e3151600250fe0' llvm_urls = ['https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz'.format(commit=LLVM_COMMIT), 'https://github.com/llvm/llvm-project/archive/{commit}.tar.gz'.format(commit=LLVM_COMMIT)] def clif_deps(): """Load common dependencies needed to compile and use CLIF.""" if not native.existing_rule('llvm-project'): http_archive(name='llvm-bazel', sha256=LLVM_BAZEL_SHA256, strip_prefix='llvm-bazel-{tag}/llvm-bazel'.format(tag=LLVM_BAZEL_TAG), url='https://github.com/google/llvm-bazel/archive/{tag}.tar.gz'.format(tag=LLVM_BAZEL_TAG)) http_archive(name='llvm-project-raw', build_file_content='#empty', sha256=LLVM_SHA256, strip_prefix='llvm-project-' + LLVM_COMMIT, urls=LLVM_URLS) if not native.existing_rule('com_google_protobuf'): http_archive(name='com_google_protobuf', sha256='bf0e5070b4b99240183b29df78155eee335885e53a8af8683964579c214ad301', strip_prefix='protobuf-3.14.0', urls=['https://storage.googleapis.com/mirror.tensorflow.org/github.com/protocolbuffers/protobuf/archive/v3.14.0.zip', 'https://github.com/protocolbuffers/protobuf/archive/v3.14.0.zip']) if not native.existing_rule('com_google_absl'): http_archive(name='com_google_absl', sha256='6622893ab117501fc23268a2936e0d46ee6cb0319dcf2275e33a708cd9634ea6', strip_prefix='abseil-cpp-20200923.3', urls=['https://github.com/abseil/abseil-cpp/archive/20200923.3.zip']) if not native.existing_rule('com_google_googletest'): http_archive(name='com_google_googletest', sha256='9dc9157a9a1551ec7a7e43daea9a694a0bb5fb8bec81235d8a1e6ef64c716dcb', strip_prefix='googletest-release-1.10.0', urls=['https://mirror.bazel.build/github.com/google/googletest/archive/release-1.10.0.tar.gz', 'https://github.com/google/googletest/archive/release-1.10.0.tar.gz']) if not native.existing_rule('com_github_gflags_gflags'): http_archive(name='com_github_gflags_gflags', sha256='34af2f15cf7367513b352bdcd2493ab14ce43692d2dcd9dfc499492966c64dcf', strip_prefix='gflags-2.2.2', urls=['https://github.com/gflags/gflags/archive/v2.2.2.tar.gz']) if not native.existing_rule('com_github_google_glog'): http_archive(name='com_github_google_glog', sha256='62efeb57ff70db9ea2129a16d0f908941e355d09d6d83c9f7b18557c0a7ab59e', strip_prefix='glog-d516278b1cd33cd148e8989aec488b6049a4ca0b', urls=['https://github.com/google/glog/archive/d516278b1cd33cd148e8989aec488b6049a4ca0b.zip']) if not native.existing_rule('io_abseil_py'): http_archive(name='io_abseil_py', sha256='ac357a83c27464f5a612fda94704d0cc4fd4be1f2c0667c1819c4037e875f7aa', strip_prefix='abseil-py-pypi-v0.11.0', urls=['https://mirror.bazel.build/github.com/abseil/abseil-py/archive/pypi-v0.11.0.zip', 'https://github.com/abseil/abseil-py/archive/pypi-v0.11.0.zip']) if not native.existing_rule('six_archive'): http_archive(name='six_archive', build_file='@com_google_protobuf//:third_party/six.BUILD', sha256='d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73', urls=['https://pypi.python.org/packages/source/s/six/six-1.12.0.tar.gz']) if not native.existing_rule('rules_python'): http_archive(name='rules_python', sha256='b6d46438523a3ec0f3cead544190ee13223a52f6a6765a29eae7b7cc24cc83a0', url='https://github.com/bazelbuild/rules_python/releases/download/0.1.0/rules_python-0.1.0.tar.gz')
class Solution: def reverseBits(self, n): result = 0 for i in range(32): result <<= 1 if n & 1 > 0: result += 1 n >>= 1 return result
class Solution: def reverse_bits(self, n): result = 0 for i in range(32): result <<= 1 if n & 1 > 0: result += 1 n >>= 1 return result
i = 1 n = 1 S = int(0) while i <= 39: somar = i/n S = S + somar n = n*2 i = i + 2 print('%.2f'%S)
i = 1 n = 1 s = int(0) while i <= 39: somar = i / n s = S + somar n = n * 2 i = i + 2 print('%.2f' % S)
''' Class to define a LIFO Stack ''' class UnderflowException(Exception): ''' Raised when any element access operation is attempted on an empty stack. ''' pass class Stack(object): ''' Implements a Stack or a LIFO-style collection of elements. ''' def __init__(self): self.stack = [] def push(self, elem): ''' Push an element to the top of the stack. ''' self.stack.append(elem) def pop(self): ''' Remove and returns the top element of the stack. ''' if self.stack: return self.stack.pop() else: raise UnderflowException("Stack is empty!!") def top(self): ''' Returns the next element of the stack. ''' if self.stack: return self.stack[-1] else: return None def isEmpty(self): ''' Returns True iff stack is empty. ''' return len(self.stack) == 0 def makeCopy(self): ''' Returns a new object which is an exact copy of the current stack ''' newObj = Stack() newObj.stack = list(self.stack) return newObj
""" Class to define a LIFO Stack """ class Underflowexception(Exception): """ Raised when any element access operation is attempted on an empty stack. """ pass class Stack(object): """ Implements a Stack or a LIFO-style collection of elements. """ def __init__(self): self.stack = [] def push(self, elem): """ Push an element to the top of the stack. """ self.stack.append(elem) def pop(self): """ Remove and returns the top element of the stack. """ if self.stack: return self.stack.pop() else: raise underflow_exception('Stack is empty!!') def top(self): """ Returns the next element of the stack. """ if self.stack: return self.stack[-1] else: return None def is_empty(self): """ Returns True iff stack is empty. """ return len(self.stack) == 0 def make_copy(self): """ Returns a new object which is an exact copy of the current stack """ new_obj = stack() newObj.stack = list(self.stack) return newObj
#!/usr/bin/env python3 def eval_jumps1(jumps): cloned = list(jumps) pc = 0 i = 0 while 0 <= pc < len(cloned): old_pc = cloned[pc] cloned[pc] += 1 pc += old_pc i += 1 return i def eval_jumps2(jumps): cloned = list(jumps) pc = 0 i = 0 while 0 <= pc < len(cloned): old_pc = cloned[pc] if cloned[pc] >= 3: cloned[pc] -= 1 else: cloned[pc] += 1 pc += old_pc i += 1 return i def main(): jumps = [] with open('input.txt') as fh: for line in fh: jumps.append(int(line)) print(eval_jumps1(jumps)) print(eval_jumps2(jumps)) if __name__ == '__main__': main()
def eval_jumps1(jumps): cloned = list(jumps) pc = 0 i = 0 while 0 <= pc < len(cloned): old_pc = cloned[pc] cloned[pc] += 1 pc += old_pc i += 1 return i def eval_jumps2(jumps): cloned = list(jumps) pc = 0 i = 0 while 0 <= pc < len(cloned): old_pc = cloned[pc] if cloned[pc] >= 3: cloned[pc] -= 1 else: cloned[pc] += 1 pc += old_pc i += 1 return i def main(): jumps = [] with open('input.txt') as fh: for line in fh: jumps.append(int(line)) print(eval_jumps1(jumps)) print(eval_jumps2(jumps)) if __name__ == '__main__': main()
class Solution: def solve(self, n, k): ans = [] for i in range(k): ans.append(x := max(1,n-26*(k-i-1))) n -= x return "".join(chr(ord('a')+x-1) for x in ans)
class Solution: def solve(self, n, k): ans = [] for i in range(k): ans.append((x := max(1, n - 26 * (k - i - 1)))) n -= x return ''.join((chr(ord('a') + x - 1) for x in ans))
load("@build_bazel_rules_nodejs//:index.bzl", "pkg_web") load(":tools/ngsw_config.bzl", _ngsw_config = "ngsw_config") def pkg_pwa( name, srcs, index_html, ngsw_config, additional_root_paths = []): pkg_web( name = "%s_web" % name, srcs = srcs + ["@npm//:node_modules/@angular/service-worker/ngsw-worker.js", "@npm//:node_modules/zone.js/dist/zone.min.js"], additional_root_paths = additional_root_paths + ["npm/node_modules/@angular/service-worker"], visibility = ["//visibility:private"], ) _ngsw_config( name = name, src = ":%s_web" % name, config = ngsw_config, index_html = index_html, tags = ["app"], )
load('@build_bazel_rules_nodejs//:index.bzl', 'pkg_web') load(':tools/ngsw_config.bzl', _ngsw_config='ngsw_config') def pkg_pwa(name, srcs, index_html, ngsw_config, additional_root_paths=[]): pkg_web(name='%s_web' % name, srcs=srcs + ['@npm//:node_modules/@angular/service-worker/ngsw-worker.js', '@npm//:node_modules/zone.js/dist/zone.min.js'], additional_root_paths=additional_root_paths + ['npm/node_modules/@angular/service-worker'], visibility=['//visibility:private']) _ngsw_config(name=name, src=':%s_web' % name, config=ngsw_config, index_html=index_html, tags=['app'])
def calculate_amstrong_numbers_3_digits(): result = [] for item in range(100, 1000): sum = 0 for number in str(item): sum += int(number) ** 3 if sum == item: result.append(item) return result print("3-digit Amstrong Numbers:") print(calculate_amstrong_numbers_3_digits())
def calculate_amstrong_numbers_3_digits(): result = [] for item in range(100, 1000): sum = 0 for number in str(item): sum += int(number) ** 3 if sum == item: result.append(item) return result print('3-digit Amstrong Numbers:') print(calculate_amstrong_numbers_3_digits())
def int_to_Roman(num): val = [ 1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1 ] syb = [ "M", "CM", "D", "CD", "C", "XC", "L", "XL", "X", "IX", "V", "IV", "I" ] roman_num = '' i = 0 while num > 0: for _ in range(num // val[i]): roman_num += syb[i] num -= val[i] i += 1 return roman_num num = int(input()) if(num>=1 and num<=3999): while(num>=1 and num<=3999): roman = str(int_to_Roman(num)) max=ord(roman[0]) for i in range(len(roman)): if(max<ord(roman[i])): max = ord(roman[i]) base = max - ord('A') + 11 new_num = 0 for i in range(len(roman)): r = roman[len(roman)-i-1] n1 = ord(roman[len(roman)-i-1]) - ord('A') + 10 new_num = new_num + (n1*(base**i)) num = new_num print(num) else: print(num)
def int_to__roman(num): val = [1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1] syb = ['M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I'] roman_num = '' i = 0 while num > 0: for _ in range(num // val[i]): roman_num += syb[i] num -= val[i] i += 1 return roman_num num = int(input()) if num >= 1 and num <= 3999: while num >= 1 and num <= 3999: roman = str(int_to__roman(num)) max = ord(roman[0]) for i in range(len(roman)): if max < ord(roman[i]): max = ord(roman[i]) base = max - ord('A') + 11 new_num = 0 for i in range(len(roman)): r = roman[len(roman) - i - 1] n1 = ord(roman[len(roman) - i - 1]) - ord('A') + 10 new_num = new_num + n1 * base ** i num = new_num print(num) else: print(num)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sat Oct 5 19:33:09 2019 @author: sodatab MITx: 6.00.1x """ """ Midterm-03 Closest Power ------------------------- Implement a function called closest_power that meets the specifications below. def closest_power(base, num): ''' base: base of the exponential, integer > 1 num: number you want to be closest to, integer > 0 Find the integer exponent such that base**exponent is closest to num. Note that the base**exponent may be either greater or smaller than num. In case of a tie, return the smaller value. Returns the exponent. ''' """ """Answer Script:""" def closest_power(base, num): ''' base: base of the exponential, integer > 1 num: number you want to be closest to, integer > 0 Find the integer exponent such that base**exponent is closest to num. Note that the base**exponent may be either greater or smaller than num. In case of a tie, return the smaller value. Returns the exponent. ''' assert base > 1 assert num > 0 exponent = 0 while True: if base**exponent > num: if abs(num - base**(exponent - 1)) == abs(base**exponent - num): return exponent - 1 break else: return exponent break elif base**exponent == num: return exponent break else: exponent += 1 return exponent
""" Created on Sat Oct 5 19:33:09 2019 @author: sodatab MITx: 6.00.1x """ "\nMidterm-03 Closest Power\n-------------------------\nImplement a function called closest_power that meets the specifications below.\n\ndef closest_power(base, num):\n '''\n base: base of the exponential, integer > 1\n num: number you want to be closest to, integer > 0\n Find the integer exponent such that base**exponent is closest to num.\n Note that the base**exponent may be either greater or smaller than num.\n In case of a tie, return the smaller value.\n Returns the exponent.\n '''\n" 'Answer Script:' def closest_power(base, num): """ base: base of the exponential, integer > 1 num: number you want to be closest to, integer > 0 Find the integer exponent such that base**exponent is closest to num. Note that the base**exponent may be either greater or smaller than num. In case of a tie, return the smaller value. Returns the exponent. """ assert base > 1 assert num > 0 exponent = 0 while True: if base ** exponent > num: if abs(num - base ** (exponent - 1)) == abs(base ** exponent - num): return exponent - 1 break else: return exponent break elif base ** exponent == num: return exponent break else: exponent += 1 return exponent
class A: def f(self): return B() a = A() a.f() # NameError: name 'B' is not defined b = B() # NameError: name 'B' is not defined def f(): b = B() f() # NameError: name 'B' is not defined class B: pass
class A: def f(self): return b() a = a() a.f() b = b() def f(): b = b() f() class B: pass
def reverse_list(items): start = 0 end = len(items) - 1 while start < end: items[start], items[end] = items[end], items[start] start += 1 end -= 1 return items if __name__ == '__main__': items = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] print(reverse_list(items))
def reverse_list(items): start = 0 end = len(items) - 1 while start < end: (items[start], items[end]) = (items[end], items[start]) start += 1 end -= 1 return items if __name__ == '__main__': items = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] print(reverse_list(items))
#!/usr/bin/python3 # # MAGIC_SEED = 1956 #PATH TRAIN_PATH = 'train' PREDICT_PATH = 'predict' # Dataset properties CSV_PATH="C:\\Users\\dmitr_000\\.keras\\datasets\\Imbalance_data.csv" # Header names DT_DSET ="Date Time" RCPOWER_DSET= "Imbalance" DISCRET =10 # The time cutoffs for the formation of the validation and test sequence in the format of the parameter passed # to the timedelta() like as 'days=<value>' or 'hours=<value>' or 'minutes=<value>' # TEST_CUT_OFF = 60 # 'hours=1' VAL_CUT_OFF = 360 # 'hours=6' 'days=1' # Log files LOG_FILE_NAME="Imbalance" #training model EPOCHS=10 N_STEPS = 32 N_FEATURES = 1 #LSTM models LSTM_POSSIBLE_TYPES={'LSTM':(0,"Vanilla_LSTM"), 'stacked LSTM':(1,"Stacked_LSTM") ,\ 'Bidirectional LSTM':(2,"B_dir_LSTM"),'CNN LSTM':(3,"CNN_LSTM")} LSTM_TYPE='LSTM' UNITS =32 #CNN models FILTERS = 64 KERNEL_SIZE = 2 POOL_SIZE = 2 FOLDER_PATH_SAVED_CNN_MODEL="ConvNN" #MLP model HIDDEN_NEYRONS = 16 DROPOUT = 0.2 FOLDER_PATH_SAVED_MLP_MODEL="MLP" # Chartin. Matplotlib.pyplot is used for charting STOP_ON_CHART_SHOW=False # simple class for logging class _loging(): pass
magic_seed = 1956 train_path = 'train' predict_path = 'predict' csv_path = 'C:\\Users\\dmitr_000\\.keras\\datasets\\Imbalance_data.csv' dt_dset = 'Date Time' rcpower_dset = 'Imbalance' discret = 10 test_cut_off = 60 val_cut_off = 360 log_file_name = 'Imbalance' epochs = 10 n_steps = 32 n_features = 1 lstm_possible_types = {'LSTM': (0, 'Vanilla_LSTM'), 'stacked LSTM': (1, 'Stacked_LSTM'), 'Bidirectional LSTM': (2, 'B_dir_LSTM'), 'CNN LSTM': (3, 'CNN_LSTM')} lstm_type = 'LSTM' units = 32 filters = 64 kernel_size = 2 pool_size = 2 folder_path_saved_cnn_model = 'ConvNN' hidden_neyrons = 16 dropout = 0.2 folder_path_saved_mlp_model = 'MLP' stop_on_chart_show = False class _Loging: pass
def unsupervised_distr(distr): variables = {k: k + '_u' for k in distr.var + distr.cond_var if k != 'z'} distr_unsupervised = distr.replace_var(**variables) return distr_unsupervised, variables def unsupervised_distr_no_var(distr): variables = {k: k + '_u' for k in distr.var + distr.cond_var if k != 'z'} distr_unsupervised = distr.replace_var(**variables) return distr_unsupervised
def unsupervised_distr(distr): variables = {k: k + '_u' for k in distr.var + distr.cond_var if k != 'z'} distr_unsupervised = distr.replace_var(**variables) return (distr_unsupervised, variables) def unsupervised_distr_no_var(distr): variables = {k: k + '_u' for k in distr.var + distr.cond_var if k != 'z'} distr_unsupervised = distr.replace_var(**variables) return distr_unsupervised
class Solution: def uniqueMorseRepresentations(self, words: List[str]) -> int: table = [".-","-...","-.-.","-..",".","..-.","--.","....","..",".---","-.-",".-..","--","-.","---",".--.","--.-",".-.","...","-","..-","...-",".--","-..-","-.--","--.."] return len({''.join(table[ord(c) - ord('a')] for c in word) for word in words})
class Solution: def unique_morse_representations(self, words: List[str]) -> int: table = ['.-', '-...', '-.-.', '-..', '.', '..-.', '--.', '....', '..', '.---', '-.-', '.-..', '--', '-.', '---', '.--.', '--.-', '.-.', '...', '-', '..-', '...-', '.--', '-..-', '-.--', '--..'] return len({''.join((table[ord(c) - ord('a')] for c in word)) for word in words})
#!/bin/python3 """ Recommender algorithm Weight sum of similarity vector """ WEIGHTS = [1, 1, 1, 1] # rank, bgg_url, game_id, names, min_players, max_players, avg_time, min_time, # max_time, year, avg_rating, geek_rating, num_votes, image_url, age, mechanic, # owned, category, designer, weight def recommend_similar(data, row, vectors, names, weights=None, count=20): """Recommend similar items to one that is provided as 1st argument weights is ordered list of weights given to each component in vector. v(geek_rating, category, mechanic, designer, weight) """ if weights is None: weights = WEIGHTS coeffs = {} for key in vectors.keys(): if key == row.game_id: continue similar_coeff = weighted_sum(vectors[key], weights) if similar_coeff in coeffs.keys(): coeffs[similar_coeff].append(key) else: coeffs[similar_coeff] = [key] return sort_dict_values(data, coeffs) def weighted_sum(vector, weights): """Computes weighted sum of componentes of given network and its weights""" # v(category, mechanic, designer, weight) return sum(i*j for i, j in zip(vector, weights)) def sort_dict_values(data, coeffs): srted_keys = sorted(coeffs.keys(), reverse=True) srted = [] for c, k in zip(coeffs.items(), srted_keys): rank = [] for item in coeffs[k]: rank.append(data.ix[data['game_id'] == item]['rank'].values[0]) srted.append(rank) if len(srted) >= 20: break return srted
""" Recommender algorithm Weight sum of similarity vector """ weights = [1, 1, 1, 1] def recommend_similar(data, row, vectors, names, weights=None, count=20): """Recommend similar items to one that is provided as 1st argument weights is ordered list of weights given to each component in vector. v(geek_rating, category, mechanic, designer, weight) """ if weights is None: weights = WEIGHTS coeffs = {} for key in vectors.keys(): if key == row.game_id: continue similar_coeff = weighted_sum(vectors[key], weights) if similar_coeff in coeffs.keys(): coeffs[similar_coeff].append(key) else: coeffs[similar_coeff] = [key] return sort_dict_values(data, coeffs) def weighted_sum(vector, weights): """Computes weighted sum of componentes of given network and its weights""" return sum((i * j for (i, j) in zip(vector, weights))) def sort_dict_values(data, coeffs): srted_keys = sorted(coeffs.keys(), reverse=True) srted = [] for (c, k) in zip(coeffs.items(), srted_keys): rank = [] for item in coeffs[k]: rank.append(data.ix[data['game_id'] == item]['rank'].values[0]) srted.append(rank) if len(srted) >= 20: break return srted
expected_output = { "list_of_neighbors": ["192.168.197.254"], "vrf": { "default": { "neighbor": { "192.168.197.254": { "address_family": { "ipv4 unicast": { "advertise_bit": 0, "bgp_table_version": 1, "dynamic_slow_peer_recovered": "never", "index": 0, "last_detected_dynamic_slow_peer": "never", "last_received_refresh_end_of_rib": "never", "last_received_refresh_start_of_rib": "never", "last_sent_refresh_end_of_rib": "1w5d", "last_sent_refresh_start_of_rib": "1w5d", "local_policy_denied_prefixes_counters": { "inbound": {"total": 0}, "outbound": {"total": 0}, }, "max_nlri": 0, "min_nlri": 0, "neighbor_version": "1/0", "output_queue_size": 0, "prefix_activity_counters": { "received": { "explicit_withdraw": 0, "implicit_withdraw": 0, "prefixes_current": 0, "prefixes_total": 0, "used_as_bestpath": 0, "used_as_multipath": 0, }, "sent": { "explicit_withdraw": 0, "implicit_withdraw": 0, "prefixes_current": 0, "prefixes_total": 0, "used_as_bestpath": "n/a", "used_as_multipath": "n/a", }, }, "refresh_activity_counters": { "received": { "refresh_end_of_rib": 0, "refresh_start_of_rib": 0, }, "sent": { "refresh_end_of_rib": 1, "refresh_start_of_rib": 1, }, }, "refresh_epoch": 1, "refresh_out": 0, "slow_peer_detection": False, "slow_peer_split_update_group_dynamic": False, }, "l2vpn vpls": { "advertise_bit": 1, "bgp_table_version": 9431, "community_attribute_sent": True, "dynamic_slow_peer_recovered": "never", "extended_community_attribute_sent": True, "index": 38, "last_detected_dynamic_slow_peer": "never", "last_received_refresh_end_of_rib": "02:01:32", "last_received_refresh_start_of_rib": "02:01:36", "last_sent_refresh_end_of_rib": "02:41:38", "last_sent_refresh_start_of_rib": "02:41:38", "local_policy_denied_prefixes_counters": { "inbound": { "bestpath_from_this_peer": "n/a", "total": 0, }, "outbound": { "bestpath_from_this_peer": 402, "total": 402, }, }, "max_nlri": 199, "min_nlri": 0, "neighbor_version": "9431/0", "output_queue_size": 0, "prefix_activity_counters": { "received": { "explicit_withdraw": 0, "implicit_withdraw": 402, "prefixes_total": 603, "used_as_bestpath": 201, "used_as_multipath": 0, }, "sent": { "explicit_withdraw": 307, "implicit_withdraw": 5646, "prefixes_total": 6356, "used_as_bestpath": "n/a", "used_as_multipath": "n/a", }, }, "refresh_activity_counters": { "received": { "refresh_end_of_rib": 2, "refresh_start_of_rib": 2, }, "sent": { "refresh_end_of_rib": 1, "refresh_start_of_rib": 1, }, }, "refresh_epoch": 3, "refresh_in": 4, "refresh_out": 0, "route_reflector_client": True, "slow_peer_detection": False, "slow_peer_split_update_group_dynamic": False, "suppress_ldp_signaling": True, "update_group_member": 38, }, "vpnv4 unicast": { "advertise_bit": 2, "bgp_table_version": 29454374, "dynamic_slow_peer_recovered": "never", "extended_community_attribute_sent": True, "index": 44, "last_detected_dynamic_slow_peer": "never", "last_received_refresh_end_of_rib": "02:01:32", "last_received_refresh_start_of_rib": "02:01:36", "last_sent_refresh_end_of_rib": "02:41:11", "last_sent_refresh_start_of_rib": "02:41:38", "local_policy_denied_prefixes_counters": { "inbound": { "bestpath_from_this_peer": "n/a", "total": 0, }, "outbound": { "bestpath_from_this_peer": 3100, "total": 3100, }, }, "max_nlri": 270, "min_nlri": 0, "neighbor_version": "29454374/0", "output_queue_size": 0, "prefix_activity_counters": { "received": { "explicit_withdraw": 206, "implicit_withdraw": 40708, "prefixes_total": 61115, "used_as_bestpath": 20201, "used_as_multipath": 0, }, "sent": { "explicit_withdraw": 1131817, "implicit_withdraw": 64677991, "prefixes_total": 68207251, "used_as_bestpath": "n/a", "used_as_multipath": "n/a", }, }, "refresh_activity_counters": { "received": { "refresh_end_of_rib": 1, "refresh_start_of_rib": 1, }, "sent": { "refresh_end_of_rib": 1, "refresh_start_of_rib": 1, }, }, "refresh_epoch": 2, "refresh_in": 4, "refresh_out": 27, "route_reflector_client": True, "slow_peer_detection": False, "slow_peer_split_update_group_dynamic": False, "update_group_member": 44, }, }, "bgp_neighbor_session": {"sessions": 1}, "bgp_negotiated_capabilities": { "enhanced_refresh": "advertised and received", "four_octets_asn": "advertised and received", "graceful_restart": "advertised and received", "graceful_restart_af_advertised_by_peer": [ "vpnv4 unicast", "l2vpn vpls", ], "ipv4_unicast": "advertised", "l2vpn_vpls": "advertised and received", "multisession": "advertised", "remote_restart_timer": 120, "route_refresh": "advertised and received(new)", "stateful_switchover": "NO for session 1", "vpnv4_unicast": "advertised and received", }, "bgp_negotiated_keepalive_timers": { "hold_time": 90, "keepalive_interval": 30, "min_holdtime": 0, }, "bgp_neighbor_counters": { "messages": { "in_queue_depth": 0, "out_queue_depth": 0, "received": { "keepalives": 346, "notifications": 0, "opens": 1, "route_refresh": 0, "total": 13183, "updates": 12830, }, "sent": { "keepalives": 347, "notifications": 0, "opens": 1, "route_refresh": 4, "total": 12180, "updates": 11824, }, } }, "bgp_session_transport": { "address_tracking_status": "enabled", "connection": { "dropped": 38, "established": 39, "last_reset": "02:42:06", "reset_reason": "Peer closed the session", }, "gr_restart_time": 120, "gr_stalepath_time": 360, "graceful_restart": "enabled", "min_time_between_advertisement_runs": 0, "rib_route_ip": "192.168.197.254", "sso": False, "tcp_connection": False, "tcp_path_mtu_discovery": "enabled", }, "bgp_version": 4, "link": "internal", "remote_as": 5918, "router_id": "192.168.197.254", "session_state": "Idle", "shutdown": False, } } } }, }
expected_output = {'list_of_neighbors': ['192.168.197.254'], 'vrf': {'default': {'neighbor': {'192.168.197.254': {'address_family': {'ipv4 unicast': {'advertise_bit': 0, 'bgp_table_version': 1, 'dynamic_slow_peer_recovered': 'never', 'index': 0, 'last_detected_dynamic_slow_peer': 'never', 'last_received_refresh_end_of_rib': 'never', 'last_received_refresh_start_of_rib': 'never', 'last_sent_refresh_end_of_rib': '1w5d', 'last_sent_refresh_start_of_rib': '1w5d', 'local_policy_denied_prefixes_counters': {'inbound': {'total': 0}, 'outbound': {'total': 0}}, 'max_nlri': 0, 'min_nlri': 0, 'neighbor_version': '1/0', 'output_queue_size': 0, 'prefix_activity_counters': {'received': {'explicit_withdraw': 0, 'implicit_withdraw': 0, 'prefixes_current': 0, 'prefixes_total': 0, 'used_as_bestpath': 0, 'used_as_multipath': 0}, 'sent': {'explicit_withdraw': 0, 'implicit_withdraw': 0, 'prefixes_current': 0, 'prefixes_total': 0, 'used_as_bestpath': 'n/a', 'used_as_multipath': 'n/a'}}, 'refresh_activity_counters': {'received': {'refresh_end_of_rib': 0, 'refresh_start_of_rib': 0}, 'sent': {'refresh_end_of_rib': 1, 'refresh_start_of_rib': 1}}, 'refresh_epoch': 1, 'refresh_out': 0, 'slow_peer_detection': False, 'slow_peer_split_update_group_dynamic': False}, 'l2vpn vpls': {'advertise_bit': 1, 'bgp_table_version': 9431, 'community_attribute_sent': True, 'dynamic_slow_peer_recovered': 'never', 'extended_community_attribute_sent': True, 'index': 38, 'last_detected_dynamic_slow_peer': 'never', 'last_received_refresh_end_of_rib': '02:01:32', 'last_received_refresh_start_of_rib': '02:01:36', 'last_sent_refresh_end_of_rib': '02:41:38', 'last_sent_refresh_start_of_rib': '02:41:38', 'local_policy_denied_prefixes_counters': {'inbound': {'bestpath_from_this_peer': 'n/a', 'total': 0}, 'outbound': {'bestpath_from_this_peer': 402, 'total': 402}}, 'max_nlri': 199, 'min_nlri': 0, 'neighbor_version': '9431/0', 'output_queue_size': 0, 'prefix_activity_counters': {'received': {'explicit_withdraw': 0, 'implicit_withdraw': 402, 'prefixes_total': 603, 'used_as_bestpath': 201, 'used_as_multipath': 0}, 'sent': {'explicit_withdraw': 307, 'implicit_withdraw': 5646, 'prefixes_total': 6356, 'used_as_bestpath': 'n/a', 'used_as_multipath': 'n/a'}}, 'refresh_activity_counters': {'received': {'refresh_end_of_rib': 2, 'refresh_start_of_rib': 2}, 'sent': {'refresh_end_of_rib': 1, 'refresh_start_of_rib': 1}}, 'refresh_epoch': 3, 'refresh_in': 4, 'refresh_out': 0, 'route_reflector_client': True, 'slow_peer_detection': False, 'slow_peer_split_update_group_dynamic': False, 'suppress_ldp_signaling': True, 'update_group_member': 38}, 'vpnv4 unicast': {'advertise_bit': 2, 'bgp_table_version': 29454374, 'dynamic_slow_peer_recovered': 'never', 'extended_community_attribute_sent': True, 'index': 44, 'last_detected_dynamic_slow_peer': 'never', 'last_received_refresh_end_of_rib': '02:01:32', 'last_received_refresh_start_of_rib': '02:01:36', 'last_sent_refresh_end_of_rib': '02:41:11', 'last_sent_refresh_start_of_rib': '02:41:38', 'local_policy_denied_prefixes_counters': {'inbound': {'bestpath_from_this_peer': 'n/a', 'total': 0}, 'outbound': {'bestpath_from_this_peer': 3100, 'total': 3100}}, 'max_nlri': 270, 'min_nlri': 0, 'neighbor_version': '29454374/0', 'output_queue_size': 0, 'prefix_activity_counters': {'received': {'explicit_withdraw': 206, 'implicit_withdraw': 40708, 'prefixes_total': 61115, 'used_as_bestpath': 20201, 'used_as_multipath': 0}, 'sent': {'explicit_withdraw': 1131817, 'implicit_withdraw': 64677991, 'prefixes_total': 68207251, 'used_as_bestpath': 'n/a', 'used_as_multipath': 'n/a'}}, 'refresh_activity_counters': {'received': {'refresh_end_of_rib': 1, 'refresh_start_of_rib': 1}, 'sent': {'refresh_end_of_rib': 1, 'refresh_start_of_rib': 1}}, 'refresh_epoch': 2, 'refresh_in': 4, 'refresh_out': 27, 'route_reflector_client': True, 'slow_peer_detection': False, 'slow_peer_split_update_group_dynamic': False, 'update_group_member': 44}}, 'bgp_neighbor_session': {'sessions': 1}, 'bgp_negotiated_capabilities': {'enhanced_refresh': 'advertised and received', 'four_octets_asn': 'advertised and received', 'graceful_restart': 'advertised and received', 'graceful_restart_af_advertised_by_peer': ['vpnv4 unicast', 'l2vpn vpls'], 'ipv4_unicast': 'advertised', 'l2vpn_vpls': 'advertised and received', 'multisession': 'advertised', 'remote_restart_timer': 120, 'route_refresh': 'advertised and received(new)', 'stateful_switchover': 'NO for session 1', 'vpnv4_unicast': 'advertised and received'}, 'bgp_negotiated_keepalive_timers': {'hold_time': 90, 'keepalive_interval': 30, 'min_holdtime': 0}, 'bgp_neighbor_counters': {'messages': {'in_queue_depth': 0, 'out_queue_depth': 0, 'received': {'keepalives': 346, 'notifications': 0, 'opens': 1, 'route_refresh': 0, 'total': 13183, 'updates': 12830}, 'sent': {'keepalives': 347, 'notifications': 0, 'opens': 1, 'route_refresh': 4, 'total': 12180, 'updates': 11824}}}, 'bgp_session_transport': {'address_tracking_status': 'enabled', 'connection': {'dropped': 38, 'established': 39, 'last_reset': '02:42:06', 'reset_reason': 'Peer closed the session'}, 'gr_restart_time': 120, 'gr_stalepath_time': 360, 'graceful_restart': 'enabled', 'min_time_between_advertisement_runs': 0, 'rib_route_ip': '192.168.197.254', 'sso': False, 'tcp_connection': False, 'tcp_path_mtu_discovery': 'enabled'}, 'bgp_version': 4, 'link': 'internal', 'remote_as': 5918, 'router_id': '192.168.197.254', 'session_state': 'Idle', 'shutdown': False}}}}}
class RepositoryTests(TestCase): """Unit tests for Repository operations.""" fixtures = ["test_scmtools"] def setUp(self): super(RepositoryTests, self).setUp() self.local_repo_path = os.path.join(os.path.dirname(__file__), "..", "testdata", "git_repo") self.repository = Repository.objects.create(name="Git test repo", path=self.local_repo_path, tool=Tool.objects.get(name="Git")) self.scmtool_cls = self.repository.get_scmtool().__class__ self.old_get_file = self.scmtool_cls.get_file self.old_file_exists = self.scmtool_cls.file_exists def tearDown(self): super(RepositoryTests, self).tearDown() cache.clear() self.scmtool_cls.get_file = self.old_get_file self.scmtool_cls.file_exists = self.old_file_exists def test_archive(self): """Testing Repository.archive""" self.repository.archive() self.assertTrue(self.repository.name.startswith("ar:Git test repo:")) self.assertTrue(self.repository.archived) self.assertFalse(self.repository.public) self.assertIsNotNone(self.repository.archived_timestamp) repository = Repository.objects.get(pk=self.repository.pk) self.assertEqual(repository.name, self.repository.name) self.assertEqual(repository.archived, self.repository.archived) self.assertEqual(repository.public, self.repository.public) self.assertEqual(repository.archived_timestamp, self.repository.archived_timestamp) def test_archive_no_save(self): """Testing Repository.archive with save=False""" self.repository.archive(save=False) self.assertTrue(self.repository.name.startswith("ar:Git test repo:")) self.assertTrue(self.repository.archived) self.assertFalse(self.repository.public) self.assertIsNotNone(self.repository.archived_timestamp) repository = Repository.objects.get(pk=self.repository.pk) self.assertNotEqual(repository.name, self.repository.name) self.assertNotEqual(repository.archived, self.repository.archived) self.assertNotEqual(repository.public, self.repository.public) self.assertNotEqual(repository.archived_timestamp, self.repository.archived_timestamp) def test_clean_without_conflict(self): """Testing Repository.clean without name/path conflicts""" with self.assertNumQueries(1): self.repository.clean() def test_clean_with_name_conflict(self): """Testing Repository.clean with name conflict""" repository = Repository(name=self.repository.name, path="path/to/repo.git", tool=self.repository.tool) with self.assertRaises(ValidationError) as ctx: with self.assertNumQueries(1): repository.clean() self.assertEqual(ctx.exception.message_dict, {"name": ["A repository with this name already exists"]}) def test_clean_with_path_conflict(self): """Testing Repository.clean with path conflict""" repository = Repository(name="New test repo", path=self.repository.path, tool=self.repository.tool) with self.assertRaises(ValidationError) as ctx: with self.assertNumQueries(1): repository.clean() self.assertEqual(ctx.exception.message_dict, {"path": ["A repository with this path already exists"]}) def test_clean_with_name_and_path_conflict(self): """Testing Repository.clean with name and path conflict""" repository = Repository(name=self.repository.name, path=self.repository.path, tool=self.repository.tool) with self.assertRaises(ValidationError) as ctx: with self.assertNumQueries(1): repository.clean() self.assertEqual(ctx.exception.message_dict, {"name": ["A repository with this name already exists"], "path": ["A repository with this path already exists"]}) def test_clean_with_path_conflict_with_archived(self): """Testing Repository.clean with archived repositories ignored for path conflict """ self.repository.archive() repository = Repository(name="New test repo", path=self.repository.path, tool=self.repository.tool) with self.assertNumQueries(1): repository.clean() def test_get_file_caching(self): """Testing Repository.get_file caches result""" def get_file(self, path, revision, **kwargs): num_calls["get_file"] += 1 return b"file data" num_calls = {"get_file": 0} path = "readme" revision = "e965047" request = {} self.scmtool_cls.get_file = get_file data1 = self.repository.get_file(path, revision, request=request) data2 = self.repository.get_file(path, revision, request=request) self.assertIsInstance(data1, bytes) self.assertIsInstance(data2, bytes) self.assertEqual(data1, b"file data") self.assertEqual(data1, data2) self.assertEqual(num_calls["get_file"], 1) def test_get_file_signals(self): """Testing Repository.get_file emits signals""" def on_fetching_file(sender, path, revision, request, **kwargs): found_signals.append(("fetching_file", path, revision, request)) def on_fetched_file(sender, path, revision, request, **kwargs): found_signals.append(("fetched_file", path, revision, request)) found_signals = [] fetching_file.connect(on_fetching_file, sender=self.repository) fetched_file.connect(on_fetched_file, sender=self.repository) path = "readme" revision = "e965047" request = {} self.repository.get_file(path, revision, request=request) self.assertEqual(len(found_signals), 2) self.assertEqual(found_signals[0], ("fetching_file", path, revision, request)) self.assertEqual(found_signals[1], ("fetched_file", path, revision, request)) def test_get_file_exists_caching_when_exists(self): """Testing Repository.get_file_exists caches result when exists""" def file_exists(self, path, revision, **kwargs): num_calls["get_file_exists"] += 1 return True num_calls = {"get_file_exists": 0} path = "readme" revision = "e965047" request = {} self.scmtool_cls.file_exists = file_exists exists1 = self.repository.get_file_exists(path, revision, request=request) exists2 = self.repository.get_file_exists(path, revision, request=request) self.assertTrue(exists1) self.assertTrue(exists2) self.assertEqual(num_calls["get_file_exists"], 1) def test_get_file_exists_caching_when_not_exists(self): """Testing Repository.get_file_exists doesn't cache result when the file does not exist """ def file_exists(self, path, revision, **kwargs): num_calls["get_file_exists"] += 1 return False num_calls = {"get_file_exists": 0} path = "readme" revision = "12345" request = {} self.scmtool_cls.file_exists = file_exists exists1 = self.repository.get_file_exists(path, revision, request=request) exists2 = self.repository.get_file_exists(path, revision, request=request) self.assertFalse(exists1) self.assertFalse(exists2) self.assertEqual(num_calls["get_file_exists"], 2) def test_get_file_exists_caching_with_fetched_file(self): """Testing Repository.get_file_exists uses get_file's cached result""" def get_file(self, path, revision, **kwargs): num_calls["get_file"] += 1 return b"file data" def file_exists(self, path, revision, **kwargs): num_calls["get_file_exists"] += 1 return True num_calls = {"get_file_exists": 0, "get_file": 0} path = "readme" revision = "e965047" request = {} self.scmtool_cls.get_file = get_file self.scmtool_cls.file_exists = file_exists self.repository.get_file(path, revision, request=request) exists1 = self.repository.get_file_exists(path, revision, request=request) exists2 = self.repository.get_file_exists(path, revision, request=request) self.assertTrue(exists1) self.assertTrue(exists2) self.assertEqual(num_calls["get_file"], 1) self.assertEqual(num_calls["get_file_exists"], 0) def test_get_file_exists_signals(self): """Testing Repository.get_file_exists emits signals""" def on_checking(sender, path, revision, request, **kwargs): found_signals.append(("checking_file_exists", path, revision, request)) def on_checked(sender, path, revision, request, **kwargs): found_signals.append(("checked_file_exists", path, revision, request)) found_signals = [] checking_file_exists.connect(on_checking, sender=self.repository) checked_file_exists.connect(on_checked, sender=self.repository) path = "readme" revision = "e965047" request = {} self.repository.get_file_exists(path, revision, request=request) self.assertEqual(len(found_signals), 2) self.assertEqual(found_signals[0], ("checking_file_exists", path, revision, request)) self.assertEqual(found_signals[1], ("checked_file_exists", path, revision, request)) def test_repository_name_with_255_characters(self): """Testing Repository.name with 255 characters""" self.repository = Repository.objects.create(name="t" * 255, path=self.local_repo_path, tool=Tool.objects.get(name="Git")) self.assertEqual(len(self.repository.name), 255) def test_is_accessible_by_with_public(self): """Testing Repository.is_accessible_by with public repository""" user = self.create_user() repository = self.create_repository() self.assertTrue(repository.is_accessible_by(user)) self.assertTrue(repository.is_accessible_by(AnonymousUser())) def test_is_accessible_by_with_public_and_hidden(self): """Testing Repository.is_accessible_by with public hidden repository""" user = self.create_user() repository = self.create_repository(visible=False) self.assertTrue(repository.is_accessible_by(user)) self.assertTrue(repository.is_accessible_by(AnonymousUser())) def test_is_accessible_by_with_private_and_not_member(self): """Testing Repository.is_accessible_by with private repository and user not a member """ user = self.create_user() repository = self.create_repository(public=False) self.assertFalse(repository.is_accessible_by(user)) self.assertFalse(repository.is_accessible_by(AnonymousUser())) def test_is_accessible_by_with_private_and_member(self): """Testing Repository.is_accessible_by with private repository and user is a member """ user = self.create_user() repository = self.create_repository(public=False) repository.users.add(user) self.assertTrue(repository.is_accessible_by(user)) def test_is_accessible_by_with_private_and_member_by_group(self): """Testing Repository.is_accessible_by with private repository and user is a member by group """ user = self.create_user() group = self.create_review_group(invite_only=True) group.users.add(user) repository = self.create_repository(public=False) repository.review_groups.add(group) self.assertTrue(repository.is_accessible_by(user)) def test_is_accessible_by_with_private_and_superuser(self): """Testing Repository.is_accessible_by with private repository and user is a superuser """ user = self.create_user(is_superuser=True) repository = self.create_repository(public=False) self.assertTrue(repository.is_accessible_by(user)) def test_is_accessible_by_with_private_hidden_not_member(self): """Testing Repository.is_accessible_by with private hidden repository and user not a member """ user = self.create_user() repository = self.create_repository(public=False, visible=False) self.assertFalse(repository.is_accessible_by(user)) def test_is_accessible_by_with_private_hidden_and_member(self): """Testing Repository.is_accessible_by with private hidden repository and user is a member """ user = self.create_user() repository = self.create_repository(public=False, visible=False) repository.users.add(user) self.assertTrue(repository.is_accessible_by(user)) def test_is_accessible_by_with_private_hidden_and_member_by_group(self): """Testing Repository.is_accessible_by with private hidden repository and user is a member """ user = self.create_user() group = self.create_review_group(invite_only=True) group.users.add(user) repository = self.create_repository(public=False, visible=False) repository.review_groups.add(group) self.assertTrue(repository.is_accessible_by(user)) def test_is_accessible_by_with_private_hidden_and_superuser(self): """Testing Repository.is_accessible_by with private hidden repository and superuser """ user = self.create_user(is_superuser=True) repository = self.create_repository(public=False, visible=False) self.assertTrue(repository.is_accessible_by(user)) @add_fixtures(["test_users", "test_site"]) def test_is_accessible_by_with_local_site_accessible(self): """Testing Repository.is_accessible_by with Local Site accessible by user """ user = self.create_user() repository = self.create_repository(with_local_site=True) repository.local_site.users.add(user) self.assertTrue(repository.is_accessible_by(user)) @add_fixtures(["test_users", "test_site"]) def test_is_accessible_by_with_local_site_not_accessible(self): """Testing Repository.is_accessible_by with Local Site not accessible by user """ user = self.create_user() repository = self.create_repository(with_local_site=True) self.assertFalse(repository.is_accessible_by(user)) self.assertFalse(repository.is_accessible_by(AnonymousUser()))
class Repositorytests(TestCase): """Unit tests for Repository operations.""" fixtures = ['test_scmtools'] def set_up(self): super(RepositoryTests, self).setUp() self.local_repo_path = os.path.join(os.path.dirname(__file__), '..', 'testdata', 'git_repo') self.repository = Repository.objects.create(name='Git test repo', path=self.local_repo_path, tool=Tool.objects.get(name='Git')) self.scmtool_cls = self.repository.get_scmtool().__class__ self.old_get_file = self.scmtool_cls.get_file self.old_file_exists = self.scmtool_cls.file_exists def tear_down(self): super(RepositoryTests, self).tearDown() cache.clear() self.scmtool_cls.get_file = self.old_get_file self.scmtool_cls.file_exists = self.old_file_exists def test_archive(self): """Testing Repository.archive""" self.repository.archive() self.assertTrue(self.repository.name.startswith('ar:Git test repo:')) self.assertTrue(self.repository.archived) self.assertFalse(self.repository.public) self.assertIsNotNone(self.repository.archived_timestamp) repository = Repository.objects.get(pk=self.repository.pk) self.assertEqual(repository.name, self.repository.name) self.assertEqual(repository.archived, self.repository.archived) self.assertEqual(repository.public, self.repository.public) self.assertEqual(repository.archived_timestamp, self.repository.archived_timestamp) def test_archive_no_save(self): """Testing Repository.archive with save=False""" self.repository.archive(save=False) self.assertTrue(self.repository.name.startswith('ar:Git test repo:')) self.assertTrue(self.repository.archived) self.assertFalse(self.repository.public) self.assertIsNotNone(self.repository.archived_timestamp) repository = Repository.objects.get(pk=self.repository.pk) self.assertNotEqual(repository.name, self.repository.name) self.assertNotEqual(repository.archived, self.repository.archived) self.assertNotEqual(repository.public, self.repository.public) self.assertNotEqual(repository.archived_timestamp, self.repository.archived_timestamp) def test_clean_without_conflict(self): """Testing Repository.clean without name/path conflicts""" with self.assertNumQueries(1): self.repository.clean() def test_clean_with_name_conflict(self): """Testing Repository.clean with name conflict""" repository = repository(name=self.repository.name, path='path/to/repo.git', tool=self.repository.tool) with self.assertRaises(ValidationError) as ctx: with self.assertNumQueries(1): repository.clean() self.assertEqual(ctx.exception.message_dict, {'name': ['A repository with this name already exists']}) def test_clean_with_path_conflict(self): """Testing Repository.clean with path conflict""" repository = repository(name='New test repo', path=self.repository.path, tool=self.repository.tool) with self.assertRaises(ValidationError) as ctx: with self.assertNumQueries(1): repository.clean() self.assertEqual(ctx.exception.message_dict, {'path': ['A repository with this path already exists']}) def test_clean_with_name_and_path_conflict(self): """Testing Repository.clean with name and path conflict""" repository = repository(name=self.repository.name, path=self.repository.path, tool=self.repository.tool) with self.assertRaises(ValidationError) as ctx: with self.assertNumQueries(1): repository.clean() self.assertEqual(ctx.exception.message_dict, {'name': ['A repository with this name already exists'], 'path': ['A repository with this path already exists']}) def test_clean_with_path_conflict_with_archived(self): """Testing Repository.clean with archived repositories ignored for path conflict """ self.repository.archive() repository = repository(name='New test repo', path=self.repository.path, tool=self.repository.tool) with self.assertNumQueries(1): repository.clean() def test_get_file_caching(self): """Testing Repository.get_file caches result""" def get_file(self, path, revision, **kwargs): num_calls['get_file'] += 1 return b'file data' num_calls = {'get_file': 0} path = 'readme' revision = 'e965047' request = {} self.scmtool_cls.get_file = get_file data1 = self.repository.get_file(path, revision, request=request) data2 = self.repository.get_file(path, revision, request=request) self.assertIsInstance(data1, bytes) self.assertIsInstance(data2, bytes) self.assertEqual(data1, b'file data') self.assertEqual(data1, data2) self.assertEqual(num_calls['get_file'], 1) def test_get_file_signals(self): """Testing Repository.get_file emits signals""" def on_fetching_file(sender, path, revision, request, **kwargs): found_signals.append(('fetching_file', path, revision, request)) def on_fetched_file(sender, path, revision, request, **kwargs): found_signals.append(('fetched_file', path, revision, request)) found_signals = [] fetching_file.connect(on_fetching_file, sender=self.repository) fetched_file.connect(on_fetched_file, sender=self.repository) path = 'readme' revision = 'e965047' request = {} self.repository.get_file(path, revision, request=request) self.assertEqual(len(found_signals), 2) self.assertEqual(found_signals[0], ('fetching_file', path, revision, request)) self.assertEqual(found_signals[1], ('fetched_file', path, revision, request)) def test_get_file_exists_caching_when_exists(self): """Testing Repository.get_file_exists caches result when exists""" def file_exists(self, path, revision, **kwargs): num_calls['get_file_exists'] += 1 return True num_calls = {'get_file_exists': 0} path = 'readme' revision = 'e965047' request = {} self.scmtool_cls.file_exists = file_exists exists1 = self.repository.get_file_exists(path, revision, request=request) exists2 = self.repository.get_file_exists(path, revision, request=request) self.assertTrue(exists1) self.assertTrue(exists2) self.assertEqual(num_calls['get_file_exists'], 1) def test_get_file_exists_caching_when_not_exists(self): """Testing Repository.get_file_exists doesn't cache result when the file does not exist """ def file_exists(self, path, revision, **kwargs): num_calls['get_file_exists'] += 1 return False num_calls = {'get_file_exists': 0} path = 'readme' revision = '12345' request = {} self.scmtool_cls.file_exists = file_exists exists1 = self.repository.get_file_exists(path, revision, request=request) exists2 = self.repository.get_file_exists(path, revision, request=request) self.assertFalse(exists1) self.assertFalse(exists2) self.assertEqual(num_calls['get_file_exists'], 2) def test_get_file_exists_caching_with_fetched_file(self): """Testing Repository.get_file_exists uses get_file's cached result""" def get_file(self, path, revision, **kwargs): num_calls['get_file'] += 1 return b'file data' def file_exists(self, path, revision, **kwargs): num_calls['get_file_exists'] += 1 return True num_calls = {'get_file_exists': 0, 'get_file': 0} path = 'readme' revision = 'e965047' request = {} self.scmtool_cls.get_file = get_file self.scmtool_cls.file_exists = file_exists self.repository.get_file(path, revision, request=request) exists1 = self.repository.get_file_exists(path, revision, request=request) exists2 = self.repository.get_file_exists(path, revision, request=request) self.assertTrue(exists1) self.assertTrue(exists2) self.assertEqual(num_calls['get_file'], 1) self.assertEqual(num_calls['get_file_exists'], 0) def test_get_file_exists_signals(self): """Testing Repository.get_file_exists emits signals""" def on_checking(sender, path, revision, request, **kwargs): found_signals.append(('checking_file_exists', path, revision, request)) def on_checked(sender, path, revision, request, **kwargs): found_signals.append(('checked_file_exists', path, revision, request)) found_signals = [] checking_file_exists.connect(on_checking, sender=self.repository) checked_file_exists.connect(on_checked, sender=self.repository) path = 'readme' revision = 'e965047' request = {} self.repository.get_file_exists(path, revision, request=request) self.assertEqual(len(found_signals), 2) self.assertEqual(found_signals[0], ('checking_file_exists', path, revision, request)) self.assertEqual(found_signals[1], ('checked_file_exists', path, revision, request)) def test_repository_name_with_255_characters(self): """Testing Repository.name with 255 characters""" self.repository = Repository.objects.create(name='t' * 255, path=self.local_repo_path, tool=Tool.objects.get(name='Git')) self.assertEqual(len(self.repository.name), 255) def test_is_accessible_by_with_public(self): """Testing Repository.is_accessible_by with public repository""" user = self.create_user() repository = self.create_repository() self.assertTrue(repository.is_accessible_by(user)) self.assertTrue(repository.is_accessible_by(anonymous_user())) def test_is_accessible_by_with_public_and_hidden(self): """Testing Repository.is_accessible_by with public hidden repository""" user = self.create_user() repository = self.create_repository(visible=False) self.assertTrue(repository.is_accessible_by(user)) self.assertTrue(repository.is_accessible_by(anonymous_user())) def test_is_accessible_by_with_private_and_not_member(self): """Testing Repository.is_accessible_by with private repository and user not a member """ user = self.create_user() repository = self.create_repository(public=False) self.assertFalse(repository.is_accessible_by(user)) self.assertFalse(repository.is_accessible_by(anonymous_user())) def test_is_accessible_by_with_private_and_member(self): """Testing Repository.is_accessible_by with private repository and user is a member """ user = self.create_user() repository = self.create_repository(public=False) repository.users.add(user) self.assertTrue(repository.is_accessible_by(user)) def test_is_accessible_by_with_private_and_member_by_group(self): """Testing Repository.is_accessible_by with private repository and user is a member by group """ user = self.create_user() group = self.create_review_group(invite_only=True) group.users.add(user) repository = self.create_repository(public=False) repository.review_groups.add(group) self.assertTrue(repository.is_accessible_by(user)) def test_is_accessible_by_with_private_and_superuser(self): """Testing Repository.is_accessible_by with private repository and user is a superuser """ user = self.create_user(is_superuser=True) repository = self.create_repository(public=False) self.assertTrue(repository.is_accessible_by(user)) def test_is_accessible_by_with_private_hidden_not_member(self): """Testing Repository.is_accessible_by with private hidden repository and user not a member """ user = self.create_user() repository = self.create_repository(public=False, visible=False) self.assertFalse(repository.is_accessible_by(user)) def test_is_accessible_by_with_private_hidden_and_member(self): """Testing Repository.is_accessible_by with private hidden repository and user is a member """ user = self.create_user() repository = self.create_repository(public=False, visible=False) repository.users.add(user) self.assertTrue(repository.is_accessible_by(user)) def test_is_accessible_by_with_private_hidden_and_member_by_group(self): """Testing Repository.is_accessible_by with private hidden repository and user is a member """ user = self.create_user() group = self.create_review_group(invite_only=True) group.users.add(user) repository = self.create_repository(public=False, visible=False) repository.review_groups.add(group) self.assertTrue(repository.is_accessible_by(user)) def test_is_accessible_by_with_private_hidden_and_superuser(self): """Testing Repository.is_accessible_by with private hidden repository and superuser """ user = self.create_user(is_superuser=True) repository = self.create_repository(public=False, visible=False) self.assertTrue(repository.is_accessible_by(user)) @add_fixtures(['test_users', 'test_site']) def test_is_accessible_by_with_local_site_accessible(self): """Testing Repository.is_accessible_by with Local Site accessible by user """ user = self.create_user() repository = self.create_repository(with_local_site=True) repository.local_site.users.add(user) self.assertTrue(repository.is_accessible_by(user)) @add_fixtures(['test_users', 'test_site']) def test_is_accessible_by_with_local_site_not_accessible(self): """Testing Repository.is_accessible_by with Local Site not accessible by user """ user = self.create_user() repository = self.create_repository(with_local_site=True) self.assertFalse(repository.is_accessible_by(user)) self.assertFalse(repository.is_accessible_by(anonymous_user()))
def check_paranthesis(inp): stack = [] c = 1 for i in inp: if i in ['(','[','{']: stack.append(i) c += 1 else: if len(stack) == 0: return c elif (i == ')' and stack[-1] == '(') or (i == ']' and stack[-1] == '[') or (i == '}' and stack[-1] == '{'): stack.pop() c += 1 else: return c return 0 if __name__ == "__main__": inp = input() print(check_paranthesis(inp)) ''' got a string containing {,},[,],(,). you have to check that the paranthesis are balanced or not. if yes than print 0 else print the index+1 value where error occurs input 1. {([])}[] 2. {{[]}}} output 1. 0 2. 7 '''
def check_paranthesis(inp): stack = [] c = 1 for i in inp: if i in ['(', '[', '{']: stack.append(i) c += 1 elif len(stack) == 0: return c elif i == ')' and stack[-1] == '(' or (i == ']' and stack[-1] == '[') or (i == '}' and stack[-1] == '{'): stack.pop() c += 1 else: return c return 0 if __name__ == '__main__': inp = input() print(check_paranthesis(inp)) ' got a string containing {,},[,],(,). you have to check that the paranthesis are balanced or not.\n if yes than print 0 else print the index+1 value where error occurs\n \ninput\n1. {([])}[]\n2. {{[]}}} \n\noutput\n\n1. 0\n2. 7 '
# Bazel macro that instantiates a native cc_test rule for an S2 test. def s2test(name, deps = [], size = "small"): native.cc_test( name = name, srcs = ["%s.cc" % (name)], copts = [ "-Iexternal/gtest/include", "-DS2_TEST_DEGENERACIES", "-DS2_USE_GFLAGS", "-DS2_USE_GLOG", "-DHASH_NAMESPACE=std", "-Wno-deprecated-declarations", "-Wno-format", "-Wno-non-virtual-dtor", "-Wno-parentheses", "-Wno-sign-compare", "-Wno-strict-aliasing", "-Wno-unused-function", "-Wno-unused-private-field", "-Wno-unused-variable", "-Wno-unused-function", ], deps = [":s2testing"] + deps, size = size, )
def s2test(name, deps=[], size='small'): native.cc_test(name=name, srcs=['%s.cc' % name], copts=['-Iexternal/gtest/include', '-DS2_TEST_DEGENERACIES', '-DS2_USE_GFLAGS', '-DS2_USE_GLOG', '-DHASH_NAMESPACE=std', '-Wno-deprecated-declarations', '-Wno-format', '-Wno-non-virtual-dtor', '-Wno-parentheses', '-Wno-sign-compare', '-Wno-strict-aliasing', '-Wno-unused-function', '-Wno-unused-private-field', '-Wno-unused-variable', '-Wno-unused-function'], deps=[':s2testing'] + deps, size=size)
# # PySNMP MIB module CT-DAWANDEVCONN-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CT-DAWANDEVCONN-MIB # Produced by pysmi-0.3.4 at Wed May 1 12:28:40 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ValueSizeConstraint, ValueRangeConstraint, ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint") cabletron, = mibBuilder.importSymbols("CTRON-OIDS", "cabletron") ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup") Gauge32, ModuleIdentity, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, Counter32, NotificationType, MibIdentifier, Integer32, iso, Counter64, IpAddress, Unsigned32, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "ModuleIdentity", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "Counter32", "NotificationType", "MibIdentifier", "Integer32", "iso", "Counter64", "IpAddress", "Unsigned32", "TimeTicks") TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString") ctSSA = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4497)) daWanDevConn = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4497, 23)) daWanDevConnTable = MibTable((1, 3, 6, 1, 4, 1, 52, 4497, 23, 1), ) if mibBuilder.loadTexts: daWanDevConnTable.setStatus('mandatory') if mibBuilder.loadTexts: daWanDevConnTable.setDescription('A list of Demand Access remote WAN connections') daWanDevConnEntry = MibTableRow((1, 3, 6, 1, 4, 1, 52, 4497, 23, 1, 1), ).setIndexNames((0, "CT-DAWANDEVCONN-MIB", "daWanDeviceIndex"), (0, "CT-DAWANDEVCONN-MIB", "daWanConnectionIndex")) if mibBuilder.loadTexts: daWanDevConnEntry.setStatus('mandatory') if mibBuilder.loadTexts: daWanDevConnEntry.setDescription('An entry containing wan connection information and statistics.') daWanDeviceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4497, 23, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly") if mibBuilder.loadTexts: daWanDeviceIndex.setStatus('mandatory') if mibBuilder.loadTexts: daWanDeviceIndex.setDescription('This is the index into this table. This index uniquely identifies the connection associated with the device.') daWanConnectionIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4497, 23, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly") if mibBuilder.loadTexts: daWanConnectionIndex.setStatus('mandatory') if mibBuilder.loadTexts: daWanConnectionIndex.setDescription('This is the index into this table. This index uniquely identifies the connection associated with the device.') mibBuilder.exportSymbols("CT-DAWANDEVCONN-MIB", daWanDeviceIndex=daWanDeviceIndex, daWanConnectionIndex=daWanConnectionIndex, daWanDevConnEntry=daWanDevConnEntry, daWanDevConn=daWanDevConn, daWanDevConnTable=daWanDevConnTable, ctSSA=ctSSA)
(object_identifier, octet_string, integer) = mibBuilder.importSymbols('ASN1', 'ObjectIdentifier', 'OctetString', 'Integer') (named_values,) = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues') (value_size_constraint, value_range_constraint, constraints_union, constraints_intersection, single_value_constraint) = mibBuilder.importSymbols('ASN1-REFINEMENT', 'ValueSizeConstraint', 'ValueRangeConstraint', 'ConstraintsUnion', 'ConstraintsIntersection', 'SingleValueConstraint') (cabletron,) = mibBuilder.importSymbols('CTRON-OIDS', 'cabletron') (module_compliance, notification_group) = mibBuilder.importSymbols('SNMPv2-CONF', 'ModuleCompliance', 'NotificationGroup') (gauge32, module_identity, object_identity, mib_scalar, mib_table, mib_table_row, mib_table_column, bits, counter32, notification_type, mib_identifier, integer32, iso, counter64, ip_address, unsigned32, time_ticks) = mibBuilder.importSymbols('SNMPv2-SMI', 'Gauge32', 'ModuleIdentity', 'ObjectIdentity', 'MibScalar', 'MibTable', 'MibTableRow', 'MibTableColumn', 'Bits', 'Counter32', 'NotificationType', 'MibIdentifier', 'Integer32', 'iso', 'Counter64', 'IpAddress', 'Unsigned32', 'TimeTicks') (textual_convention, display_string) = mibBuilder.importSymbols('SNMPv2-TC', 'TextualConvention', 'DisplayString') ct_ssa = mib_identifier((1, 3, 6, 1, 4, 1, 52, 4497)) da_wan_dev_conn = mib_identifier((1, 3, 6, 1, 4, 1, 52, 4497, 23)) da_wan_dev_conn_table = mib_table((1, 3, 6, 1, 4, 1, 52, 4497, 23, 1)) if mibBuilder.loadTexts: daWanDevConnTable.setStatus('mandatory') if mibBuilder.loadTexts: daWanDevConnTable.setDescription('A list of Demand Access remote WAN connections') da_wan_dev_conn_entry = mib_table_row((1, 3, 6, 1, 4, 1, 52, 4497, 23, 1, 1)).setIndexNames((0, 'CT-DAWANDEVCONN-MIB', 'daWanDeviceIndex'), (0, 'CT-DAWANDEVCONN-MIB', 'daWanConnectionIndex')) if mibBuilder.loadTexts: daWanDevConnEntry.setStatus('mandatory') if mibBuilder.loadTexts: daWanDevConnEntry.setDescription('An entry containing wan connection information and statistics.') da_wan_device_index = mib_table_column((1, 3, 6, 1, 4, 1, 52, 4497, 23, 1, 1, 1), integer32().subtype(subtypeSpec=value_range_constraint(0, 2147483647))).setMaxAccess('readonly') if mibBuilder.loadTexts: daWanDeviceIndex.setStatus('mandatory') if mibBuilder.loadTexts: daWanDeviceIndex.setDescription('This is the index into this table. This index uniquely identifies the connection associated with the device.') da_wan_connection_index = mib_table_column((1, 3, 6, 1, 4, 1, 52, 4497, 23, 1, 1, 2), integer32().subtype(subtypeSpec=value_range_constraint(0, 2147483647))).setMaxAccess('readonly') if mibBuilder.loadTexts: daWanConnectionIndex.setStatus('mandatory') if mibBuilder.loadTexts: daWanConnectionIndex.setDescription('This is the index into this table. This index uniquely identifies the connection associated with the device.') mibBuilder.exportSymbols('CT-DAWANDEVCONN-MIB', daWanDeviceIndex=daWanDeviceIndex, daWanConnectionIndex=daWanConnectionIndex, daWanDevConnEntry=daWanDevConnEntry, daWanDevConn=daWanDevConn, daWanDevConnTable=daWanDevConnTable, ctSSA=ctSSA)
class Node(object): def __init__(self, item): self.item = item self.next = None def get_item(self): return self.item def get_next(self): return self.next def set_item(self, new_item): self.item = new_item def set_next(self, new_next): self.next = new_next class LinkedList(object): def __init__(self): self.head = None def is_empty(self): return self.head == None def add(self, item): temp = Node(item) temp.set_next(self.head) self.head = temp def count_size(self): count = 0 current = self.head while current is not None: count += 1 current = current.get_next() return count def search(self, item): found = False current = self.head while not found and current is not None: if item == current.get_item(): found = True current = current.get_next() return found def remove(self, item): found = False current = self.head previous = None while not found: if item == current.get_item(): found = true else: previous = current current = current.get_next() if previous == None: self.head = current.get_next() else: previous.set_next(current.get_next())
class Node(object): def __init__(self, item): self.item = item self.next = None def get_item(self): return self.item def get_next(self): return self.next def set_item(self, new_item): self.item = new_item def set_next(self, new_next): self.next = new_next class Linkedlist(object): def __init__(self): self.head = None def is_empty(self): return self.head == None def add(self, item): temp = node(item) temp.set_next(self.head) self.head = temp def count_size(self): count = 0 current = self.head while current is not None: count += 1 current = current.get_next() return count def search(self, item): found = False current = self.head while not found and current is not None: if item == current.get_item(): found = True current = current.get_next() return found def remove(self, item): found = False current = self.head previous = None while not found: if item == current.get_item(): found = true else: previous = current current = current.get_next() if previous == None: self.head = current.get_next() else: previous.set_next(current.get_next())
class Solution(object): # # Backtracking Approach - TLE # def canJump(self, nums): # """ # :type nums: List[int] # :rtype: bool # """ # return self.canJumpHelper(nums, 0) # # def canJumpHelper(self, nums, currentIdx): # if currentIdx == len(nums) - 1: # return True # nextMaxJumpIdx = min(currentIdx + nums[currentIdx], len(nums) - 1) # for i in range(nextMaxJumpIdx, currentIdx, # -1): # Python for loop decrementing index >> https://stackoverflow.com/questions/28650128/python-for-loop-decrementing-index/28650284 # if self.canJumpHelper(nums, i): # return True # return False # # # DP top-down with memoization Approach - TLE # def canJump(self, nums): # """ # :type nums: List[int] # :rtype: bool # """ # memo = [0] * len(nums) # memo[-1] = True # Here True means reachable and True means not rechable # return self.canJumpHelper(nums, 0, memo) # # def canJumpHelper(self, nums, currentIdx, memo): # if memo[currentIdx] != 0: # return memo[currentIdx] # nextMaxJumpIdx = min(currentIdx + nums[currentIdx], len(nums) - 1) # for i in range(nextMaxJumpIdx, currentIdx, -1): # if self.canJumpHelper(nums, i, memo): # memo[currentIdx] = True # return True # memo[currentIdx] = False # return False # # # DP bottom-up with memoization Approach - TLE # def canJump(self, nums): # """ # :type nums: List[int] # :rtype: bool # """ # memo = [0] * len(nums) # memo[-1] = True # Here True means reachable and True means not rechable # for i in range(len(nums) - 1, -1, -1): # nextMaxJumpIdx = min(i + nums[i], len(nums) - 1) # for j in range(i + 1, nextMaxJumpIdx + 1): # if memo[j] is True: # memo[i] = True # break # return True if memo[0] == True else False # Greedy def canJump(self, nums): """ :type nums: List[int] :rtype: bool """ lastPosition = len(nums) - 1 for i in range(len(nums) - 1, -1, -1): nextMaxJump = i + nums[i] if nextMaxJump >= lastPosition: lastPosition = i return True if lastPosition == 0 else False sol = Solution() # input = [2,3,0,1,4] input = [3,2,1,0,4] # input = [2,0,6,9,8,4,5,0,8,9,1,2,9,6,8,8,0,6,3,1,2,2,1,2,6,5,3,1,2,2,6,4,2,4,3,0,0,0,3,8,2,4,0,1,2,0,1,4,6,5,8,0,7,9,3,4,6,6,5,8,9,3,4,3,7,0,4,9,0,9,8,4,3,0,7,7,1,9,1,9,4,9,0,1,9,5,7,7,1,5,8,2,8,2,6,8,2,2,7,5,1,7,9,6] output = sol.canJump(input) print('res: ', output)
class Solution(object): def can_jump(self, nums): """ :type nums: List[int] :rtype: bool """ last_position = len(nums) - 1 for i in range(len(nums) - 1, -1, -1): next_max_jump = i + nums[i] if nextMaxJump >= lastPosition: last_position = i return True if lastPosition == 0 else False sol = solution() input = [3, 2, 1, 0, 4] output = sol.canJump(input) print('res: ', output)
# The major optimization is to do arithmetic in base 10 in the main loop, avoiding division and modulo def compute(): # Initialize n = 1000000000 # The pattern is greater than 10^18, so start searching at 10^9 ndigits = [0] * 10 # In base 10, little-endian temp = n for i in range(len(ndigits)): ndigits[i] = temp % 10 temp //= 10 n2digits = [0] * 19 # Based on length of pattern temp = n * n for i in range(len(n2digits)): n2digits[i] = temp % 10 temp //= 10 # Increment and search while not is_concealed_square(n2digits): # Add 20n + 100 so that n2digits = (n + 10)^2 add_20n(ndigits, n2digits) add_10pow(n2digits, 2) # Since n^2 ends with 0, n must end with 0 n += 10 add_10pow(ndigits, 1) # Now n2digits = n^2 return str(n) def is_concealed_square(n): for i in range(1, 10): # Scan for 1 to 9 if n[20 - i * 2] != i: return False return n[0] == 0 # Special case for 0 def add_10pow(n, i): while n[i] == 9: n[i] = 0 i += 1 n[i] += 1 def add_20n(n, n2): carry = 0 i = 0 while i < len(n): sum = n[i] * 2 + n2[i + 1] + carry n2[i + 1] = sum % 10 carry = sum // 10 i += 1 i += 1 while carry > 0: sum = n2[i] + carry n2[i] = sum % 10 carry = sum // 10 i += 1 if __name__ == "__main__": print(compute())
def compute(): n = 1000000000 ndigits = [0] * 10 temp = n for i in range(len(ndigits)): ndigits[i] = temp % 10 temp //= 10 n2digits = [0] * 19 temp = n * n for i in range(len(n2digits)): n2digits[i] = temp % 10 temp //= 10 while not is_concealed_square(n2digits): add_20n(ndigits, n2digits) add_10pow(n2digits, 2) n += 10 add_10pow(ndigits, 1) return str(n) def is_concealed_square(n): for i in range(1, 10): if n[20 - i * 2] != i: return False return n[0] == 0 def add_10pow(n, i): while n[i] == 9: n[i] = 0 i += 1 n[i] += 1 def add_20n(n, n2): carry = 0 i = 0 while i < len(n): sum = n[i] * 2 + n2[i + 1] + carry n2[i + 1] = sum % 10 carry = sum // 10 i += 1 i += 1 while carry > 0: sum = n2[i] + carry n2[i] = sum % 10 carry = sum // 10 i += 1 if __name__ == '__main__': print(compute())
def parse_string_time(input_time: str) -> float: total_amount = 0 times = _slice_input_times(input_time) for _amount, duration_type in times: amount = _to_float(_amount) multiplier = _parse_multiplier(duration_type) total_amount += amount * multiplier return total_amount def _parse_multiplier(duration_type): multiplier = 0 if 'ms' == duration_type: multiplier = .001 elif 'sec' == duration_type: multiplier = 1 elif 'min' == duration_type: multiplier = 60 elif 'hr' == duration_type: multiplier = 60 return multiplier def _slice_input_times(input_time: str) -> iter: input_time_chunks = iter(input_time.split(' ')) input_time_tuples = zip(input_time_chunks, input_time_chunks) return input_time_tuples def _to_float(amount: str) -> float: try: amount = float(amount) except: amount = 0.0 return amount
def parse_string_time(input_time: str) -> float: total_amount = 0 times = _slice_input_times(input_time) for (_amount, duration_type) in times: amount = _to_float(_amount) multiplier = _parse_multiplier(duration_type) total_amount += amount * multiplier return total_amount def _parse_multiplier(duration_type): multiplier = 0 if 'ms' == duration_type: multiplier = 0.001 elif 'sec' == duration_type: multiplier = 1 elif 'min' == duration_type: multiplier = 60 elif 'hr' == duration_type: multiplier = 60 return multiplier def _slice_input_times(input_time: str) -> iter: input_time_chunks = iter(input_time.split(' ')) input_time_tuples = zip(input_time_chunks, input_time_chunks) return input_time_tuples def _to_float(amount: str) -> float: try: amount = float(amount) except: amount = 0.0 return amount
"""This module contains j2cl_js_provider helpers.""" load( "@io_bazel_rules_closure//closure:defs.bzl", "CLOSURE_JS_TOOLCHAIN_ATTRS", "closure_js_binary", "create_closure_js_library", "web_library", ) def create_js_lib_struct(j2cl_info, extra_providers = []): return struct( providers = [j2cl_info] + extra_providers, closure_js_library = j2cl_info._private_.js_info.closure_js_library, exports = j2cl_info._private_.js_info.exports, ) def j2cl_js_provider(ctx, srcs = [], deps = [], exports = [], artifact_suffix = ""): """ Creates a js provider from provided sources, deps and exports. """ default_j2cl_suppresses = [ "analyzerChecks", "JSC_UNKNOWN_EXPR_TYPE", ] suppresses = default_j2cl_suppresses + getattr(ctx.attr, "js_suppress", []) js = create_closure_js_library( ctx, srcs, deps, exports, suppresses, convention = "GOOGLE", ) return struct( closure_js_library = js.closure_js_library, exports = js.exports, ) def js_devserver( name, entry_point_defs, deps, dev_resources, **kwargs): """Creates a development server target.""" closure_js_binary( name = name, compilation_level = "BUNDLE", defs = entry_point_defs, deps = deps, # For J2CL it is in impractical to embed all source into sourcemap since # it bloats sourcemaps as well as it slows down bundling. nodefs = ["--source_map_include_content"], **kwargs ) web_library( name = "%s_server" % name, srcs = dev_resources, path = "/", tags = [ "ibazel_live_reload", # Enable ibazel reload server. "ibazel_notify_changes", # Do not to restart the server on changes. ], ) js_binary = closure_js_binary J2CL_JS_TOOLCHAIN_ATTRS = CLOSURE_JS_TOOLCHAIN_ATTRS J2CL_JS_ATTRS = { "js_suppress": attr.string_list(), } JS_PROVIDER_NAME = "closure_js_library" J2CL_OPTIMIZED_DEFS = [ "--define=goog.DEBUG=false", ] # Place holder until we implement unit testing support for open-source. J2CL_TEST_DEFS = []
"""This module contains j2cl_js_provider helpers.""" load('@io_bazel_rules_closure//closure:defs.bzl', 'CLOSURE_JS_TOOLCHAIN_ATTRS', 'closure_js_binary', 'create_closure_js_library', 'web_library') def create_js_lib_struct(j2cl_info, extra_providers=[]): return struct(providers=[j2cl_info] + extra_providers, closure_js_library=j2cl_info._private_.js_info.closure_js_library, exports=j2cl_info._private_.js_info.exports) def j2cl_js_provider(ctx, srcs=[], deps=[], exports=[], artifact_suffix=''): """ Creates a js provider from provided sources, deps and exports. """ default_j2cl_suppresses = ['analyzerChecks', 'JSC_UNKNOWN_EXPR_TYPE'] suppresses = default_j2cl_suppresses + getattr(ctx.attr, 'js_suppress', []) js = create_closure_js_library(ctx, srcs, deps, exports, suppresses, convention='GOOGLE') return struct(closure_js_library=js.closure_js_library, exports=js.exports) def js_devserver(name, entry_point_defs, deps, dev_resources, **kwargs): """Creates a development server target.""" closure_js_binary(name=name, compilation_level='BUNDLE', defs=entry_point_defs, deps=deps, nodefs=['--source_map_include_content'], **kwargs) web_library(name='%s_server' % name, srcs=dev_resources, path='/', tags=['ibazel_live_reload', 'ibazel_notify_changes']) js_binary = closure_js_binary j2_cl_js_toolchain_attrs = CLOSURE_JS_TOOLCHAIN_ATTRS j2_cl_js_attrs = {'js_suppress': attr.string_list()} js_provider_name = 'closure_js_library' j2_cl_optimized_defs = ['--define=goog.DEBUG=false'] j2_cl_test_defs = []
class Solution(object): def getSum(self, a, b): """ :type a: int :type b: int :rtype: int """ MAX, MASK = 0x7FFFFFFF, 0xFFFFFFFF while b != 0: a, b = (a ^ b) & MASK, ((a & b) << 1) & MASK return a if a <= MAX else ~(a ^ MASK)
class Solution(object): def get_sum(self, a, b): """ :type a: int :type b: int :rtype: int """ (max, mask) = (2147483647, 4294967295) while b != 0: (a, b) = ((a ^ b) & MASK, (a & b) << 1 & MASK) return a if a <= MAX else ~(a ^ MASK)
HITBOX_OCTANE = 0 HITBOX_DOMINUS = 1 HITBOX_PLANK = 2 HITBOX_BREAKOUT = 3 HITBOX_HYBRID = 4 HITBOX_BATMOBILE = 5
hitbox_octane = 0 hitbox_dominus = 1 hitbox_plank = 2 hitbox_breakout = 3 hitbox_hybrid = 4 hitbox_batmobile = 5
N, M = map(int, input().split()) A = list(map(int, input().split())) threshold = sum(A) / (4 * M) if len([a for a in A if a >= threshold]) >= M: print('Yes') else: print('No')
(n, m) = map(int, input().split()) a = list(map(int, input().split())) threshold = sum(A) / (4 * M) if len([a for a in A if a >= threshold]) >= M: print('Yes') else: print('No')
class Solution: def longestCommonPrefix(self, strs: List[str]) -> str: # exception if len(strs) == 0: return "" # sort first! strs.sort() # strategy # compare first and last string in sorted strings! # pick first element in strs pick = strs[0] prefix = '' for i in range (len(pick)): if strs[len(strs)-1][i] == pick[i]: prefix += strs[len(strs)-1][i] else: break return prefix
class Solution: def longest_common_prefix(self, strs: List[str]) -> str: if len(strs) == 0: return '' strs.sort() pick = strs[0] prefix = '' for i in range(len(pick)): if strs[len(strs) - 1][i] == pick[i]: prefix += strs[len(strs) - 1][i] else: break return prefix
class Node: def __init__(self, data): self.data = data self.left = None self.right = None class BST: def __init__(self): self.root = None def _add(self, node, data): if data <= node.data: if node.left: self._add(node.left, data) else: node.left = Node(data) else: if node.right: self._add(node.right, data) else: node.right = Node(data) def add(self, data): if self.root: self._add(self.root, data) else: self.root = Node(data) def same_tree(self, t1, t2): if not t1 and not t2: return True if not t1 or not t2: return False if t1.data != t2.data: return False return self.same_tree(t1.left, t2.left) and self.same_tree(t1.right, t2.right) def main(): B1 = BST() nodes = [8, 3, 10, 1, 6, 4, 7, 13, 14] for n in nodes: B1.add(n) B2 = BST() nodes = [8, 3, 10, 1, 6, 4, 7, 13, 14] for n in nodes: B2.add(n) print(B1.same_tree(B1.root, B2.root)) if __name__ == '__main__': main()
class Node: def __init__(self, data): self.data = data self.left = None self.right = None class Bst: def __init__(self): self.root = None def _add(self, node, data): if data <= node.data: if node.left: self._add(node.left, data) else: node.left = node(data) elif node.right: self._add(node.right, data) else: node.right = node(data) def add(self, data): if self.root: self._add(self.root, data) else: self.root = node(data) def same_tree(self, t1, t2): if not t1 and (not t2): return True if not t1 or not t2: return False if t1.data != t2.data: return False return self.same_tree(t1.left, t2.left) and self.same_tree(t1.right, t2.right) def main(): b1 = bst() nodes = [8, 3, 10, 1, 6, 4, 7, 13, 14] for n in nodes: B1.add(n) b2 = bst() nodes = [8, 3, 10, 1, 6, 4, 7, 13, 14] for n in nodes: B2.add(n) print(B1.same_tree(B1.root, B2.root)) if __name__ == '__main__': main()
class SymbolMapper(object): def __init__(self): self.symbolmap = {0: '0', 1: '+', -1: '-'} @staticmethod def normalize(value): return 0 if value == 0 else value / abs(value) def inputs2symbols(self, inputs): return map( lambda value: self.symbolmap[SymbolMapper.normalize(value)], inputs)
class Symbolmapper(object): def __init__(self): self.symbolmap = {0: '0', 1: '+', -1: '-'} @staticmethod def normalize(value): return 0 if value == 0 else value / abs(value) def inputs2symbols(self, inputs): return map(lambda value: self.symbolmap[SymbolMapper.normalize(value)], inputs)
#!/usr/bin/env python3 # -*- coding: UTF-8 -*- # (c)2021 .direwolf <kururinmiracle@outlook.com> # Licensed under the MIT License. class AffNoteTypeError(Exception): pass class AffNoteIndexError(Exception): pass class AffNoteValueError(Exception): pass class AffSceneTypeError(Exception): pass class AffReadError(Exception): pass
class Affnotetypeerror(Exception): pass class Affnoteindexerror(Exception): pass class Affnotevalueerror(Exception): pass class Affscenetypeerror(Exception): pass class Affreaderror(Exception): pass
class RC4 : def __init__(self): self.S = [] def preprocess_hex_chars(self, text) : """ Preprocess text by decoding hex characters into ASCII characters """ preprocessed_text = '' i = 0 while i < len(text) : if '\\x' == text[i:i+2] : c = int(text[i+2:i+4], base=16) preprocessed_text += chr(c) i += 4 else : preprocessed_text += text[i] i += 1 return preprocessed_text def ksa(self, key) : """ Key-Scheduling Algorithm Initialize S array, and then permute it using a key """ if len(key) < 2 : raise Exception("Key must be at least 2 characters long") self.S = [i for i in range(256)] j = 0 for i in range(256) : j = (j + self.S[i] + ord(key[i % len(key)])) % 256 self.S[i], self.S[j] = self.S[j], self.S[i] def prga(self, plaintext) : """ Pseudo-Random Generation Algorithm Generate keystream by swapping S[i] and S[j], then summing them """ if len(plaintext) == 0 : raise Exception("Plaintext cannot be empty") keystream = '' i = 0; j = 0 for idx in range(len(plaintext)) : i = (i + 1) % 256 j = (j + self.S[i]) % 256 self.S[i], self.S[j] = self.S[j], self.S[i] t = (self.S[i] + self.S[j]) % 256 keystream += chr(self.S[t]) return keystream def encrypt(self, plaintext, key) : """ Encrypt plaintext by given key using RC4 algorithm """ if len(plaintext) == 0 : raise Exception("Plaintext cannot be empty") if len(key) < 2 : raise Exception("Key must be at least 2 characters long") ciphertext = '' self.ksa(key) keystream = self.prga(plaintext) for idx in range(len(plaintext)) : c = chr(ord(keystream[idx]) ^ ord(plaintext[idx])) ciphertext += c if c.isprintable() else r'\x{0:02x}'.format(ord(c)) return ciphertext def decrypt(self, ciphertext, key) : """ Decrypt ciphertext by given key using RC4 algorithm """ if len(ciphertext) == 0 : raise Exception("Ciphertext cannot be empty") if len(key) < 2 : raise Exception("Key must be at least 2 characters long") ciphertext = self.preprocess_hex_chars(ciphertext) plaintext = '' self.ksa(key) keystream = self.prga(ciphertext) for idx in range(len(ciphertext)) : p = chr(ord(keystream[idx]) ^ ord(ciphertext[idx])) plaintext += p if p.isprintable() else r'\x{0:02x}'.format(ord(p)) return plaintext # print(r"\x{0:02x}".format(ord('a'))) # print(u'\x61') # print('\x61') # rc4 = RC4() # key = "secret_key" # cip = rc4.encrypt("kriptografi sangat menyenangkan", key) # print(cip) # pla = rc4.decrypt(cip, key) # print(pla)
class Rc4: def __init__(self): self.S = [] def preprocess_hex_chars(self, text): """ Preprocess text by decoding hex characters into ASCII characters """ preprocessed_text = '' i = 0 while i < len(text): if '\\x' == text[i:i + 2]: c = int(text[i + 2:i + 4], base=16) preprocessed_text += chr(c) i += 4 else: preprocessed_text += text[i] i += 1 return preprocessed_text def ksa(self, key): """ Key-Scheduling Algorithm Initialize S array, and then permute it using a key """ if len(key) < 2: raise exception('Key must be at least 2 characters long') self.S = [i for i in range(256)] j = 0 for i in range(256): j = (j + self.S[i] + ord(key[i % len(key)])) % 256 (self.S[i], self.S[j]) = (self.S[j], self.S[i]) def prga(self, plaintext): """ Pseudo-Random Generation Algorithm Generate keystream by swapping S[i] and S[j], then summing them """ if len(plaintext) == 0: raise exception('Plaintext cannot be empty') keystream = '' i = 0 j = 0 for idx in range(len(plaintext)): i = (i + 1) % 256 j = (j + self.S[i]) % 256 (self.S[i], self.S[j]) = (self.S[j], self.S[i]) t = (self.S[i] + self.S[j]) % 256 keystream += chr(self.S[t]) return keystream def encrypt(self, plaintext, key): """ Encrypt plaintext by given key using RC4 algorithm """ if len(plaintext) == 0: raise exception('Plaintext cannot be empty') if len(key) < 2: raise exception('Key must be at least 2 characters long') ciphertext = '' self.ksa(key) keystream = self.prga(plaintext) for idx in range(len(plaintext)): c = chr(ord(keystream[idx]) ^ ord(plaintext[idx])) ciphertext += c if c.isprintable() else '\\x{0:02x}'.format(ord(c)) return ciphertext def decrypt(self, ciphertext, key): """ Decrypt ciphertext by given key using RC4 algorithm """ if len(ciphertext) == 0: raise exception('Ciphertext cannot be empty') if len(key) < 2: raise exception('Key must be at least 2 characters long') ciphertext = self.preprocess_hex_chars(ciphertext) plaintext = '' self.ksa(key) keystream = self.prga(ciphertext) for idx in range(len(ciphertext)): p = chr(ord(keystream[idx]) ^ ord(ciphertext[idx])) plaintext += p if p.isprintable() else '\\x{0:02x}'.format(ord(p)) return plaintext
# Ignore file list ignore_filelist = [ 'teslagun.activeitem', 'teslagun2.activeitem', ] ignore_filelist_patch = [ ]
ignore_filelist = ['teslagun.activeitem', 'teslagun2.activeitem'] ignore_filelist_patch = []
# This is input for <FileUploadToCommons.py> that actually writes the content to Wikimedia commons using the API #See https://pypi.org/project/mwtemplates/ # ===============BEGIN TEMPLETE====================== # Lets use a minimally filled {{Infomormation}} template - https://commons.wikimedia.org/wiki/Template:Information fileTemplate = """ =={{{{int:filedesc}}}}== {{{{Information |author = {author} |description = {{{{en|1=Page {page} of the Album amicorum Jacob Heyblocq KB131H26}}}} |source = https://resolver.kb.nl/resolve?urn=EuropeanaTravel:131H26:{page} }}}} =={{{{int:license-header}}}}== {{{{Koninklijke Bibliotheek}}}} {{{{PD-art|PD-old-70-1923}}}} [[Category:Album amicorum van Jacobus Heyblocq]] """ # ==============END TEMPLATE==================== def writeFileTemplate(dataframe,rowdict): # input = 1 full row from the Excel sheet, formatted as dict # Input = 1 row from Excel file, as dict # Ouput = Commons source code for a file page, based on Information-template fileText = fileTemplate.format( page = rowdict['page'], author = rowdict['contributorname'].strip() ) return fileText
file_template = '\n=={{{{int:filedesc}}}}==\n{{{{Information\n |author = {author}\n |description = {{{{en|1=Page {page} of the Album amicorum Jacob Heyblocq KB131H26}}}} \n |source = https://resolver.kb.nl/resolve?urn=EuropeanaTravel:131H26:{page}\n}}}}\n\n=={{{{int:license-header}}}}==\n{{{{Koninklijke Bibliotheek}}}}\n{{{{PD-art|PD-old-70-1923}}}}\n\n[[Category:Album amicorum van Jacobus Heyblocq]]\n' def write_file_template(dataframe, rowdict): file_text = fileTemplate.format(page=rowdict['page'], author=rowdict['contributorname'].strip()) return fileText
# funcao como parametro de funcao # so imprime se o numero estiver correto def imprime_com_condicao(num, fcond): if fcond(num): print(num) def par(x): return x % 2 == 0 def impar(x): return not par(x) # Programa Principal # neste caso nao imprimira imprime_com_condicao(5, par)
def imprime_com_condicao(num, fcond): if fcond(num): print(num) def par(x): return x % 2 == 0 def impar(x): return not par(x) imprime_com_condicao(5, par)
n = int(input().strip()) a = list(map(int, input().strip().split(' '))) swaps = 0 for i in range(n): temp=0 for j in range(n-1): if a[j] > a[j+1]: temp = a[j] a[j] = a[j+1] a[j+1] = temp swaps+=1 if swaps==0: print("Array is sorted in", swaps, "swaps.") print("First Element:", a[0]) print("Last Element:", a[-1]) else: print("Array is sorted in", swaps, "swaps.") print("First Element:", a[0]) print("Last Element:", a[-1])
n = int(input().strip()) a = list(map(int, input().strip().split(' '))) swaps = 0 for i in range(n): temp = 0 for j in range(n - 1): if a[j] > a[j + 1]: temp = a[j] a[j] = a[j + 1] a[j + 1] = temp swaps += 1 if swaps == 0: print('Array is sorted in', swaps, 'swaps.') print('First Element:', a[0]) print('Last Element:', a[-1]) else: print('Array is sorted in', swaps, 'swaps.') print('First Element:', a[0]) print('Last Element:', a[-1])
t = int(input()) for i in range(t): r = int(input()) length = r*5 width = length*0.6 left = -1*length*0.45 right = length*0.55 w = width/2 # print coordinates print('Case '+str(i+1)+':') # upper left print("%.0f %.0f" % (left, w)) # upper right print("%.0f %.0f" % (right, w)) # lower right print("%.0f %.0f" % (right, -1*w)) # lower left print("%.0f %.0f" % (left, -1*w))
t = int(input()) for i in range(t): r = int(input()) length = r * 5 width = length * 0.6 left = -1 * length * 0.45 right = length * 0.55 w = width / 2 print('Case ' + str(i + 1) + ':') print('%.0f %.0f' % (left, w)) print('%.0f %.0f' % (right, w)) print('%.0f %.0f' % (right, -1 * w)) print('%.0f %.0f' % (left, -1 * w))
myString = "g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj." inTab = "yzabcdefghijklmnopqrstuvwx" outTab = "abcdefghijklmnopqrstuvwxyz" transTab = str.maketrans(inTab, outTab) outString = myString.translate(transTab) print(outString) urlStr = "map" outUrl = urlStr.translate(transTab) print(outUrl)
my_string = "g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj." in_tab = 'yzabcdefghijklmnopqrstuvwx' out_tab = 'abcdefghijklmnopqrstuvwxyz' trans_tab = str.maketrans(inTab, outTab) out_string = myString.translate(transTab) print(outString) url_str = 'map' out_url = urlStr.translate(transTab) print(outUrl)
def sortByHeight(a): heights = [] # Store all the heights in a list for i in range(len(a)): if a[i] != -1: heights.append(a[i]) # Sort the heights heights = sorted(heights) # Replace the heights in the original list j = 0 for i in range(len(a)): if a[i] != -1: a[i] = heights[j] j += 1 return a
def sort_by_height(a): heights = [] for i in range(len(a)): if a[i] != -1: heights.append(a[i]) heights = sorted(heights) j = 0 for i in range(len(a)): if a[i] != -1: a[i] = heights[j] j += 1 return a
# -*- coding: utf-8 -*- """ Created on Sat Feb 29 09:54:51 2020 @author: bruger """
""" Created on Sat Feb 29 09:54:51 2020 @author: bruger """
class Solution: def reverseWords(self, set): return ' '.join(set.split()[::-1]) if __name__ == "__main__": solution = Solution() print(solution.reverseWords("the sky is blue")) print(solution.reverseWords(" hello world! "))
class Solution: def reverse_words(self, set): return ' '.join(set.split()[::-1]) if __name__ == '__main__': solution = solution() print(solution.reverseWords('the sky is blue')) print(solution.reverseWords(' hello world! '))
class SleuthError(Exception): pass class SleuthNotFoundError(SleuthError): pass
class Sleutherror(Exception): pass class Sleuthnotfounderror(SleuthError): pass
def insertion_sort(nums: list[float]) -> list[float]: for start in range(1, len(nums)): index = start while nums[index] < nums[index - 1] and index > 0: nums[index], nums[index - 1] = nums[index - 1], nums[index] index -= 1 return nums
def insertion_sort(nums: list[float]) -> list[float]: for start in range(1, len(nums)): index = start while nums[index] < nums[index - 1] and index > 0: (nums[index], nums[index - 1]) = (nums[index - 1], nums[index]) index -= 1 return nums
# coding:utf-8 class FakeBot(object): """Fake Bot object """ def __init__(self): self.msg = '' def send_message(self, text='', **kwargs): self.msg = text class FakeUpdate(object): def __init__(self): self.message = FakeMessage() class FakeMessage(object): """Docstring for FakeMessage. """ def __init__(self): """TODO: to be defined1. """ self.chat_id = 1
class Fakebot(object): """Fake Bot object """ def __init__(self): self.msg = '' def send_message(self, text='', **kwargs): self.msg = text class Fakeupdate(object): def __init__(self): self.message = fake_message() class Fakemessage(object): """Docstring for FakeMessage. """ def __init__(self): """TODO: to be defined1. """ self.chat_id = 1
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. bing_edge = { 'color': { 'color': '#228372' }, 'title': 'Bing-related Parsing Functions', 'label': 'B' } def run(unfurl, node): if node.data_type == 'url.query.pair': if 'bing' in unfurl.find_preceding_domain(node): if node.key == 'pq': unfurl.add_to_queue( data_type='descriptor', key=None, value=f'"Previous" Search Query: {node.value}', hover='Previous terms entered by the user; auto-complete or suggestions <br>' 'may have been used to reach the actual search terms (in <b>q</b>)', parent_id=node.node_id, incoming_edge_config=bing_edge) elif node.key == 'q': unfurl.add_to_queue( data_type='descriptor', key=None, value=f'Search Query: {node.value}', hover='Terms used in the Bing search', parent_id=node.node_id, incoming_edge_config=bing_edge) elif node.key == 'first': unfurl.add_to_queue( data_type='descriptor', key=None, value=f'Starting Result: {node.value}', hover='Bing search by default shows 8 results per page; higher <br>' '"first" values may indicate browsing more subsequent results pages.', parent_id=node.node_id, incoming_edge_config=bing_edge)
bing_edge = {'color': {'color': '#228372'}, 'title': 'Bing-related Parsing Functions', 'label': 'B'} def run(unfurl, node): if node.data_type == 'url.query.pair': if 'bing' in unfurl.find_preceding_domain(node): if node.key == 'pq': unfurl.add_to_queue(data_type='descriptor', key=None, value=f'"Previous" Search Query: {node.value}', hover='Previous terms entered by the user; auto-complete or suggestions <br>may have been used to reach the actual search terms (in <b>q</b>)', parent_id=node.node_id, incoming_edge_config=bing_edge) elif node.key == 'q': unfurl.add_to_queue(data_type='descriptor', key=None, value=f'Search Query: {node.value}', hover='Terms used in the Bing search', parent_id=node.node_id, incoming_edge_config=bing_edge) elif node.key == 'first': unfurl.add_to_queue(data_type='descriptor', key=None, value=f'Starting Result: {node.value}', hover='Bing search by default shows 8 results per page; higher <br>"first" values may indicate browsing more subsequent results pages.', parent_id=node.node_id, incoming_edge_config=bing_edge)
"""Module for user fixtures""" USER = { 'email': 'test_user@example.com', 'password': 'Password@1234', } USER_INVALID = {'email': '', 'password': ''} SUPERUSER = { 'email': 'test_userII@example.com', 'password': 'password1234', } UNREGISTERED_USER = { 'email': 'unregistered@example1.com', 'password': 'Password@1234' } TEST_AUTH_USER = { 'email': 'test_auth_user@example.com', 'password': 'Password@12345', }
"""Module for user fixtures""" user = {'email': 'test_user@example.com', 'password': 'Password@1234'} user_invalid = {'email': '', 'password': ''} superuser = {'email': 'test_userII@example.com', 'password': 'password1234'} unregistered_user = {'email': 'unregistered@example1.com', 'password': 'Password@1234'} test_auth_user = {'email': 'test_auth_user@example.com', 'password': 'Password@12345'}
class Term(str): @property def is_variable(self): return self[0] == "?" @property def is_constant(self): return self[0] != "?" @property def arity(self): return 0 class ListTerm(tuple): def __init__(self, *args): self.is_function = None tuple.__init__(self, *args) def function(self): ' must *only* be one level deep ' if self.is_function is None: self.is_function = True for term in self: if not isinstance(term, Term): self.is_function = False break return self.is_function @property def is_constant(self): ' all elements are constant (recursive definition) ' for term in self: if not term.is_constant: return False return True @property def arity(self): return len(self) def __str__(self): return "(%s)" % " ".join(str(x) for x in self) __repr__ = __str__ ############################################################################### def is_function(term): if isinstance(term, ListTerm): return term.function() return False ############################################################################### def tokenize(s): return s.replace('(', ' ( ').replace(')', ' ) ').split() class SymbolFactory(object): def __init__(self): self.symbol_pool = dict() def create(self, clz, *args): # make args hashable new_args = [] for arg in args: if isinstance(arg, list): arg = tuple(arg) new_args.append(arg) args = tuple(new_args) # symbol[clz] -> clz_pool[args] -> instance try: clz_pool = self.symbol_pool[clz] except KeyError: clz_pool = self.symbol_pool[clz] = {} try: instance = clz_pool[args] except KeyError: instance = clz_pool[args] = clz(*args) return instance def to_symbols(self, s): stack = [] current_list = [] # strip comment lines lines = [] for line in s.splitlines(): line = line.strip() useful = line.split(";") line = useful[0].strip() if line: lines.append(line) s = " ".join(lines) for token in tokenize(s): if token == '(': stack.append(current_list) current_list = [] elif token == ')': list_term = self.create(ListTerm, current_list) current_list = stack.pop() current_list.append(list_term) else: current_list.append(self.create(Term, token)) for sexpr in current_list: assert isinstance(sexpr, (Term, ListTerm)) yield sexpr def symbolize(self, string): ' takes a single symbol as a string and internalises ' line = list(self.to_symbols(string)) assert len(line) == 1 return line[0]
class Term(str): @property def is_variable(self): return self[0] == '?' @property def is_constant(self): return self[0] != '?' @property def arity(self): return 0 class Listterm(tuple): def __init__(self, *args): self.is_function = None tuple.__init__(self, *args) def function(self): """ must *only* be one level deep """ if self.is_function is None: self.is_function = True for term in self: if not isinstance(term, Term): self.is_function = False break return self.is_function @property def is_constant(self): """ all elements are constant (recursive definition) """ for term in self: if not term.is_constant: return False return True @property def arity(self): return len(self) def __str__(self): return '(%s)' % ' '.join((str(x) for x in self)) __repr__ = __str__ def is_function(term): if isinstance(term, ListTerm): return term.function() return False def tokenize(s): return s.replace('(', ' ( ').replace(')', ' ) ').split() class Symbolfactory(object): def __init__(self): self.symbol_pool = dict() def create(self, clz, *args): new_args = [] for arg in args: if isinstance(arg, list): arg = tuple(arg) new_args.append(arg) args = tuple(new_args) try: clz_pool = self.symbol_pool[clz] except KeyError: clz_pool = self.symbol_pool[clz] = {} try: instance = clz_pool[args] except KeyError: instance = clz_pool[args] = clz(*args) return instance def to_symbols(self, s): stack = [] current_list = [] lines = [] for line in s.splitlines(): line = line.strip() useful = line.split(';') line = useful[0].strip() if line: lines.append(line) s = ' '.join(lines) for token in tokenize(s): if token == '(': stack.append(current_list) current_list = [] elif token == ')': list_term = self.create(ListTerm, current_list) current_list = stack.pop() current_list.append(list_term) else: current_list.append(self.create(Term, token)) for sexpr in current_list: assert isinstance(sexpr, (Term, ListTerm)) yield sexpr def symbolize(self, string): """ takes a single symbol as a string and internalises """ line = list(self.to_symbols(string)) assert len(line) == 1 return line[0]
# Copyright (c) 2019-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # def f_gold ( a , b , k ) : c1 = ( b - a ) - 1 c2 = ( k - b ) + ( a - 1 ) if ( c1 == c2 ) : return 0 return min ( c1 , c2 ) #TOFILL if __name__ == '__main__': param = [ (83,98,86,), (3,39,87,), (11,96,30,), (50,67,48,), (40,16,32,), (62,86,76,), (40,78,71,), (66,11,74,), (6,9,19,), (25,5,5,) ] n_success = 0 for i, parameters_set in enumerate(param): if f_filled(*parameters_set) == f_gold(*parameters_set): n_success+=1 print("#Results: %i, %i" % (n_success, len(param)))
def f_gold(a, b, k): c1 = b - a - 1 c2 = k - b + (a - 1) if c1 == c2: return 0 return min(c1, c2) if __name__ == '__main__': param = [(83, 98, 86), (3, 39, 87), (11, 96, 30), (50, 67, 48), (40, 16, 32), (62, 86, 76), (40, 78, 71), (66, 11, 74), (6, 9, 19), (25, 5, 5)] n_success = 0 for (i, parameters_set) in enumerate(param): if f_filled(*parameters_set) == f_gold(*parameters_set): n_success += 1 print('#Results: %i, %i' % (n_success, len(param)))
"""Exceptions of the pygmx package.""" class InvalidMagicException(Exception): pass class InvalidIndexException(Exception): pass class UnknownLenError(Exception): pass class FileTypeError(Exception): pass class XTCError(Exception): pass
"""Exceptions of the pygmx package.""" class Invalidmagicexception(Exception): pass class Invalidindexexception(Exception): pass class Unknownlenerror(Exception): pass class Filetypeerror(Exception): pass class Xtcerror(Exception): pass
def get_locale_name(something, lang): if type(something) == str: pass if type(something) == list: pass if type(something) == dict: pass
def get_locale_name(something, lang): if type(something) == str: pass if type(something) == list: pass if type(something) == dict: pass
phoneNumber = {} x = int(input()) for i in range(x): name, number = input().split() phoneNumber[name] = number for i in range(x): query_name = input() if query_name in phoneNumber.keys(): print(query_name + "=" + phoneNumber[query_name]) else: print("Not found")
phone_number = {} x = int(input()) for i in range(x): (name, number) = input().split() phoneNumber[name] = number for i in range(x): query_name = input() if query_name in phoneNumber.keys(): print(query_name + '=' + phoneNumber[query_name]) else: print('Not found')
#!/usr/bin/python # -*- coding: utf-8 -*- # # Control.py # class Control: def __init__(self, TagName): self._hosted = False # Is added to Form self._script = '' # Javascript code (Used before added to Form) self._id = '' # Used By Form to identify control Dont change self._script += 'var Control = document.createElement("' \ + TagName + '");' self.events = {} self.Controls = [] pass # Below function is only used by Form ! def initialize(self, Id, parent): self._id = Id self.parent = parent self._hosted = True return self._script + 'Control.id = "' + self._id + '";' def child_handler(self): for Control in self.Controls: self.parent.Add(Control) self.send('appendChild(document.getElementById("' + Control.Id + '"));') @property def Id(self, Id=None): if self.Id == None: self.Id = Id return self.Id @Id.getter def Id(self): return self._id def send(self, message, evaluate=False): """This will change script(Javascript) """ if self._hosted == True: # check if control is added to Form if evaluate: return self.parent.evaluate('document.getElementById("' + self._id + '").' + message) else: self.parent.run('document.getElementById("' + self._id + '").' + message) else: self._script += 'Control.' self._script += message def run(self, script): self.send(script) def evaluate(self, script): self.send(script, True) def innerHTML(self, html): """The innerHTML property sets or returns the HTML content (inner HTML) of an element.""" self.send('innerHTML;', True) def innerHTML(self, html): self.send('innerHTML="' + html + '";') def setAttribute(self, attributename, attributevalue): """The setAttribute() method adds the specified attribute to an element, and gives it the specified value.""" self.send('setAttribute("' + attributename + '", "' + attributevalue + '");') def addEventListener( self, event, function, useCapture=False, ): """The addEventListener() method attaches an event handler to the specified element.""" self.events[str(event)] = function self.send('addEventListener("' + event + '", notify_server, ' + str(useCapture).lower() + ');') def fire_event(self, EventData): for event in self.events: if event == EventData['type']: f = self.events[event] f(self, EventData) def Width(self, w): self.send('width="' + str(w) + '";') def Height(self, h): self.send('height="' + str(h) + '";') def Size(self, w, h): self.Width(w) self.Height(h) def appendChild(self, node): if self._hosted: self.parent.Add(node) self.send('appendChild(document.getElementById("' + node.Id + '"));') else: self.Controls.append(node) def style(self, prop, style): self.send('style.' + prop + '="' + style + '";') def classList(self): def add(self, classname): self.send('classList.add("' + classname + '");')
class Control: def __init__(self, TagName): self._hosted = False self._script = '' self._id = '' self._script += 'var Control = document.createElement("' + TagName + '");' self.events = {} self.Controls = [] pass def initialize(self, Id, parent): self._id = Id self.parent = parent self._hosted = True return self._script + 'Control.id = "' + self._id + '";' def child_handler(self): for control in self.Controls: self.parent.Add(Control) self.send('appendChild(document.getElementById("' + Control.Id + '"));') @property def id(self, Id=None): if self.Id == None: self.Id = Id return self.Id @Id.getter def id(self): return self._id def send(self, message, evaluate=False): """This will change script(Javascript) """ if self._hosted == True: if evaluate: return self.parent.evaluate('document.getElementById("' + self._id + '").' + message) else: self.parent.run('document.getElementById("' + self._id + '").' + message) else: self._script += 'Control.' self._script += message def run(self, script): self.send(script) def evaluate(self, script): self.send(script, True) def inner_html(self, html): """The innerHTML property sets or returns the HTML content (inner HTML) of an element.""" self.send('innerHTML;', True) def inner_html(self, html): self.send('innerHTML="' + html + '";') def set_attribute(self, attributename, attributevalue): """The setAttribute() method adds the specified attribute to an element, and gives it the specified value.""" self.send('setAttribute("' + attributename + '", "' + attributevalue + '");') def add_event_listener(self, event, function, useCapture=False): """The addEventListener() method attaches an event handler to the specified element.""" self.events[str(event)] = function self.send('addEventListener("' + event + '", notify_server, ' + str(useCapture).lower() + ');') def fire_event(self, EventData): for event in self.events: if event == EventData['type']: f = self.events[event] f(self, EventData) def width(self, w): self.send('width="' + str(w) + '";') def height(self, h): self.send('height="' + str(h) + '";') def size(self, w, h): self.Width(w) self.Height(h) def append_child(self, node): if self._hosted: self.parent.Add(node) self.send('appendChild(document.getElementById("' + node.Id + '"));') else: self.Controls.append(node) def style(self, prop, style): self.send('style.' + prop + '="' + style + '";') def class_list(self): def add(self, classname): self.send('classList.add("' + classname + '");')
AVAILABLE = [ { 'account_name': 'ExampleTwitterMarkovBot', # derived from https://twitter.com/ExampleTwitterMarkovBot 'corpora': ('example_corpus1', 'example_corpus2'), 'description': 'An example configuration for a bot instance', 'twitter_key': '', # twitter app API key 'twitter_secret': '' # twitter app API secret } ]
available = [{'account_name': 'ExampleTwitterMarkovBot', 'corpora': ('example_corpus1', 'example_corpus2'), 'description': 'An example configuration for a bot instance', 'twitter_key': '', 'twitter_secret': ''}]
if __name__ == "__main__": with open("input.txt", "r") as f: input_list = f.readlines() x = 0 y = 0 for input in input_list: command, string_val = input.split(" ") val = int(string_val) if command == "forward": x += val elif command == "down": y += val elif command == "up": y -= val else: raise ValueError(f"{command} is not a valid command") print(x * y)
if __name__ == '__main__': with open('input.txt', 'r') as f: input_list = f.readlines() x = 0 y = 0 for input in input_list: (command, string_val) = input.split(' ') val = int(string_val) if command == 'forward': x += val elif command == 'down': y += val elif command == 'up': y -= val else: raise value_error(f'{command} is not a valid command') print(x * y)
class TransactionError(Exception): pass class TransactionTimeoutError(Exception): pass class TransactionFinished(Exception): pass
class Transactionerror(Exception): pass class Transactiontimeouterror(Exception): pass class Transactionfinished(Exception): pass
class AdvancedArithmetic(object): def divisorSum(n): raise NotImplementedError class Calculator(AdvancedArithmetic): def divisorSum(self, n): divisor_sum = 0 for divisor in range(2, n): if n % divisor == 0: divisor_sum += divisor return divisor_sum + n + (0 if n is 1 else 1)
class Advancedarithmetic(object): def divisor_sum(n): raise NotImplementedError class Calculator(AdvancedArithmetic): def divisor_sum(self, n): divisor_sum = 0 for divisor in range(2, n): if n % divisor == 0: divisor_sum += divisor return divisor_sum + n + (0 if n is 1 else 1)
# Copyright 2018 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Implementation of resource bundle/importing rules.""" load( "@bazel_skylib//lib:partial.bzl", "partial", ) load( "@build_bazel_rules_apple//apple:providers.bzl", "AppleResourceBundleInfo", ) load( "@build_bazel_rules_apple//apple:utils.bzl", "group_files_by_directory", ) load( "@build_bazel_rules_apple//apple/internal:resources.bzl", "NewAppleResourceInfo", "resources", ) def _apple_bundle_import_impl(ctx): """Implementation of the apple_bundle_import rule.""" bundle_groups = group_files_by_directory( ctx.files.bundle_imports, ["bundle"], attr = "bundle_imports", ) if len(bundle_groups) != 1: fail( "There has to be exactly 1 imported bundle. Found:\n{}".format( "\n".join(bundle_groups.keys()), ), ) parent_dir_param = partial.make( resources.bundle_relative_parent_dir, extension = "bundle", ) resource_provider = resources.bucketize( ctx.files.bundle_imports, parent_dir_param = parent_dir_param, ) return [ AppleResourceBundleInfo(), resource_provider, ] apple_bundle_import = rule( implementation = _apple_bundle_import_impl, attrs = { "bundle_imports": attr.label_list( allow_empty = False, allow_files = True, mandatory = True, doc = """ The list of files under a .bundle directory to be propagated to the top-level bundling target. """, ), }, doc = """ This rule encapsulates an already-built bundle. It is defined by a list of files in exactly one .bundle directory. apple_bundle_import targets need to be added to library targets through the data attribute, or to other resource targets (i.e. apple_resource_bundle) through the resources attribute. """, ) def _apple_resource_bundle_impl(ctx): providers = [] bundle_name = "{}.bundle".format(ctx.attr.bundle_name or ctx.label.name) infoplists = resources.collect(ctx.attr, res_attrs = ["infoplists"]) if infoplists: providers.append( resources.bucketize_typed( infoplists, "infoplists", parent_dir_param = bundle_name, ), ) resource_files = resources.collect(ctx.attr, res_attrs = ["resources"]) if resource_files: providers.append( resources.bucketize( resource_files, parent_dir_param = bundle_name, ), ) if ctx.attr.structured_resources: # Avoid processing PNG files that are referenced through the structured_resources # attribute. This is mostly for legacy reasons and should get cleaned up in the future. providers.append( resources.bucketize( resources.collect(ctx.attr, res_attrs = ["structured_resources"]), parent_dir_param = partial.make( resources.structured_resources_parent_dir, parent_dir = bundle_name, ), avoid_buckets = ["pngs"], ), ) # Find any targets added through resources which might propagate the NewAppleResourceInfo # provider, for example, apple_resource_bundle or apple_bundle_import targets. resource_providers = [ x[NewAppleResourceInfo] for x in ctx.attr.resources if NewAppleResourceInfo in x ] if resource_providers: resources_merged_provider = resources.merge_providers(resource_providers) providers.append(resources.nest_in_bundle(resources_merged_provider, bundle_name)) return [ AppleResourceBundleInfo(), resources.merge_providers(providers), ] apple_resource_bundle = rule( implementation = _apple_resource_bundle_impl, attrs = { "bundle_name": attr.string( doc = """ The desired name of the bundle (without the `.bundle` extension). If this attribute is not set, then the `name` of the target will be used instead. """, ), "infoplists": attr.label_list( allow_empty = True, allow_files = True, doc = """ Infoplist files to be merged into the bundle's Info.plist. Duplicate keys between infoplist files will cause an error if and only if the values conflict. Bazel will perform variable substitution on the Info.plist file for the following values (if they are strings in the top-level dict of the plist): ${BUNDLE_NAME}: This target's name and bundle suffix (.bundle or .app) in the form name.suffix. ${PRODUCT_NAME}: This target's name. ${TARGET_NAME}: This target's name. The key in ${} may be suffixed with :rfc1034identifier (for example ${PRODUCT_NAME::rfc1034identifier}) in which case Bazel will replicate Xcode's behavior and replace non-RFC1034-compliant characters with -. """, ), "resources": attr.label_list( allow_empty = True, allow_files = True, doc = """ Files to include in the resource bundle. Files that are processable resources, like .xib, .storyboard, .strings, .png, and others, will be processed by the Apple bundling rules that have those files as dependencies. Other file types that are not processed will be copied verbatim. These files are placed in the root of the resource bundle (e.g. Payload/foo.app/bar.bundle/...) in most cases. However, if they appear to be localized (i.e. are contained in a directory called *.lproj), they will be placed in a directory of the same name in the app bundle. You can also add other `apple_resource_bundle` and `apple_bundle_import` targets into `resources`, and the resource bundle structures will be propagated into the final bundle. """, ), "structured_resources": attr.label_list( allow_empty = True, allow_files = True, doc = """ Files to include in the final resource bundle. They are not processed or compiled in any way besides the processing done by the rules that actually generate them. These files are placed in the bundle root in the same structure passed to this argument, so ["res/foo.png"] will end up in res/foo.png inside the bundle. """, ), }, doc = """ This rule encapsulates a target which is provided to dependers as a bundle. An apple_resource_bundle's resources are put in a resource bundle in the top level Apple bundle dependent. apple_resource_bundle targets need to be added to library targets through the data attribute. """, ) def _apple_resource_group_impl(ctx): """Implementation of the apple_resource_group rule.""" resource_providers = [] if ctx.attr.resources: resource_files = resources.collect(ctx.attr, res_attrs = ["resources"]) if resource_files: resource_providers.append( resources.bucketize(resource_files), ) if ctx.attr.structured_resources: # TODO(kaipi): Validate that structured_resources doesn't have processable resources, # e.g. we shouldn't accept xib files that should be compiled before bundling. structured_files = resources.collect( ctx.attr, res_attrs = ["structured_resources"], ) # Avoid processing PNG files that are referenced through the structured_resources # attribute. This is mostly for legacy reasons and should get cleaned up in the future. resource_providers.append( resources.bucketize( structured_files, parent_dir_param = partial.make( resources.structured_resources_parent_dir, ), avoid_buckets = ["pngs"], ), ) # Find any targets added through resources which might propagate the NewAppleResourceInfo # provider, for example, other apple_resource_group and apple_resource_bundle targets. resource_providers.extend([ x[NewAppleResourceInfo] for x in ctx.attr.resources if NewAppleResourceInfo in x ]) if resource_providers: # If any providers were collected, merge them. return [resources.merge_providers(resource_providers)] return [] apple_resource_group = rule( implementation = _apple_resource_group_impl, attrs = { "resources": attr.label_list( allow_empty = True, allow_files = True, doc = """ Files to include in the final bundle that depends on this target. Files that are processable resources, like .xib, .storyboard, .strings, .png, and others, will be processed by the Apple bundling rules that have those files as dependencies. Other file types that are not processed will be copied verbatim. These files are placed in the root of the final bundle (e.g. Payload/foo.app/...) in most cases. However, if they appear to be localized (i.e. are contained in a directory called *.lproj), they will be placed in a directory of the same name in the app bundle. You can also add apple_resource_bundle and apple_bundle_import targets into `resources`, and the resource bundle structures will be propagated into the final bundle. """, ), "structured_resources": attr.label_list( allow_empty = True, allow_files = True, doc = """ Files to include in the final application bundle. They are not processed or compiled in any way besides the processing done by the rules that actually generate them. These files are placed in the bundle root in the same structure passed to this argument, so ["res/foo.png"] will end up in res/foo.png inside the bundle. """, ), }, doc = """ This rule encapsulates a target which provides resources to dependents. An apple_resource_group's resources are put in the top-level Apple bundle dependent. apple_resource_group targets need to be added to library targets through the data attribute. If `apple_resource_bundle` or `apple_bundle_import` dependencies are added to `resources`, the resource bundle structures are maintained at the final top-level bundle. """, )
"""Implementation of resource bundle/importing rules.""" load('@bazel_skylib//lib:partial.bzl', 'partial') load('@build_bazel_rules_apple//apple:providers.bzl', 'AppleResourceBundleInfo') load('@build_bazel_rules_apple//apple:utils.bzl', 'group_files_by_directory') load('@build_bazel_rules_apple//apple/internal:resources.bzl', 'NewAppleResourceInfo', 'resources') def _apple_bundle_import_impl(ctx): """Implementation of the apple_bundle_import rule.""" bundle_groups = group_files_by_directory(ctx.files.bundle_imports, ['bundle'], attr='bundle_imports') if len(bundle_groups) != 1: fail('There has to be exactly 1 imported bundle. Found:\n{}'.format('\n'.join(bundle_groups.keys()))) parent_dir_param = partial.make(resources.bundle_relative_parent_dir, extension='bundle') resource_provider = resources.bucketize(ctx.files.bundle_imports, parent_dir_param=parent_dir_param) return [apple_resource_bundle_info(), resource_provider] apple_bundle_import = rule(implementation=_apple_bundle_import_impl, attrs={'bundle_imports': attr.label_list(allow_empty=False, allow_files=True, mandatory=True, doc='\nThe list of files under a .bundle directory to be propagated to the top-level bundling target.\n')}, doc='\nThis rule encapsulates an already-built bundle. It is defined by a list of files in exactly one\n.bundle directory. apple_bundle_import targets need to be added to library targets through the\ndata attribute, or to other resource targets (i.e. apple_resource_bundle) through the resources\nattribute.\n') def _apple_resource_bundle_impl(ctx): providers = [] bundle_name = '{}.bundle'.format(ctx.attr.bundle_name or ctx.label.name) infoplists = resources.collect(ctx.attr, res_attrs=['infoplists']) if infoplists: providers.append(resources.bucketize_typed(infoplists, 'infoplists', parent_dir_param=bundle_name)) resource_files = resources.collect(ctx.attr, res_attrs=['resources']) if resource_files: providers.append(resources.bucketize(resource_files, parent_dir_param=bundle_name)) if ctx.attr.structured_resources: providers.append(resources.bucketize(resources.collect(ctx.attr, res_attrs=['structured_resources']), parent_dir_param=partial.make(resources.structured_resources_parent_dir, parent_dir=bundle_name), avoid_buckets=['pngs'])) resource_providers = [x[NewAppleResourceInfo] for x in ctx.attr.resources if NewAppleResourceInfo in x] if resource_providers: resources_merged_provider = resources.merge_providers(resource_providers) providers.append(resources.nest_in_bundle(resources_merged_provider, bundle_name)) return [apple_resource_bundle_info(), resources.merge_providers(providers)] apple_resource_bundle = rule(implementation=_apple_resource_bundle_impl, attrs={'bundle_name': attr.string(doc='\nThe desired name of the bundle (without the `.bundle` extension). If this attribute is not set,\nthen the `name` of the target will be used instead.\n'), 'infoplists': attr.label_list(allow_empty=True, allow_files=True, doc="\nInfoplist files to be merged into the bundle's Info.plist. Duplicate keys between infoplist files\nwill cause an error if and only if the values conflict.\nBazel will perform variable substitution on the Info.plist file for the following values (if they\nare strings in the top-level dict of the plist):\n\n${BUNDLE_NAME}: This target's name and bundle suffix (.bundle or .app) in the form name.suffix.\n${PRODUCT_NAME}: This target's name.\n${TARGET_NAME}: This target's name.\nThe key in ${} may be suffixed with :rfc1034identifier (for example\n${PRODUCT_NAME::rfc1034identifier}) in which case Bazel will replicate Xcode's behavior and replace\nnon-RFC1034-compliant characters with -.\n"), 'resources': attr.label_list(allow_empty=True, allow_files=True, doc='\nFiles to include in the resource bundle. Files that are processable resources, like .xib,\n.storyboard, .strings, .png, and others, will be processed by the Apple bundling rules that have\nthose files as dependencies. Other file types that are not processed will be copied verbatim. These\nfiles are placed in the root of the resource bundle (e.g. Payload/foo.app/bar.bundle/...) in most\ncases. However, if they appear to be localized (i.e. are contained in a directory called *.lproj),\nthey will be placed in a directory of the same name in the app bundle.\n\nYou can also add other `apple_resource_bundle` and `apple_bundle_import` targets into `resources`,\nand the resource bundle structures will be propagated into the final bundle.\n'), 'structured_resources': attr.label_list(allow_empty=True, allow_files=True, doc='\nFiles to include in the final resource bundle. They are not processed or compiled in any way\nbesides the processing done by the rules that actually generate them. These files are placed in the\nbundle root in the same structure passed to this argument, so ["res/foo.png"] will end up in\nres/foo.png inside the bundle.\n')}, doc="\nThis rule encapsulates a target which is provided to dependers as a bundle. An\napple_resource_bundle's resources are put in a resource bundle in the top level Apple bundle\ndependent. apple_resource_bundle targets need to be added to library targets through the\ndata attribute.\n") def _apple_resource_group_impl(ctx): """Implementation of the apple_resource_group rule.""" resource_providers = [] if ctx.attr.resources: resource_files = resources.collect(ctx.attr, res_attrs=['resources']) if resource_files: resource_providers.append(resources.bucketize(resource_files)) if ctx.attr.structured_resources: structured_files = resources.collect(ctx.attr, res_attrs=['structured_resources']) resource_providers.append(resources.bucketize(structured_files, parent_dir_param=partial.make(resources.structured_resources_parent_dir), avoid_buckets=['pngs'])) resource_providers.extend([x[NewAppleResourceInfo] for x in ctx.attr.resources if NewAppleResourceInfo in x]) if resource_providers: return [resources.merge_providers(resource_providers)] return [] apple_resource_group = rule(implementation=_apple_resource_group_impl, attrs={'resources': attr.label_list(allow_empty=True, allow_files=True, doc='\nFiles to include in the final bundle that depends on this target. Files that are processable\nresources, like .xib, .storyboard, .strings, .png, and others, will be processed by the Apple\nbundling rules that have those files as dependencies. Other file types that are not processed will\nbe copied verbatim. These files are placed in the root of the final bundle (e.g.\nPayload/foo.app/...) in most cases. However, if they appear to be localized (i.e. are contained in a\ndirectory called *.lproj), they will be placed in a directory of the same name in the app bundle.\n\nYou can also add apple_resource_bundle and apple_bundle_import targets into `resources`, and the\nresource bundle structures will be propagated into the final bundle.\n'), 'structured_resources': attr.label_list(allow_empty=True, allow_files=True, doc='\nFiles to include in the final application bundle. They are not processed or compiled in any way\nbesides the processing done by the rules that actually generate them. These files are placed in the\nbundle root in the same structure passed to this argument, so ["res/foo.png"] will end up in\nres/foo.png inside the bundle.\n')}, doc="\nThis rule encapsulates a target which provides resources to dependents. An\napple_resource_group's resources are put in the top-level Apple bundle dependent.\napple_resource_group targets need to be added to library targets through the data attribute. If\n`apple_resource_bundle` or `apple_bundle_import` dependencies are added to `resources`, the resource\nbundle structures are maintained at the final top-level bundle.\n")
def gcd(a: int, b: int) -> int: # supposed a >= b if b > a: return gcd(b, a) elif a % b == 0: return b return gcd(b, a % b)
def gcd(a: int, b: int) -> int: if b > a: return gcd(b, a) elif a % b == 0: return b return gcd(b, a % b)
def data_loader(f_name, l_name): with open(f_name, mode='r', encoding='utf-8') as f: data = list(set(f.readlines())) label = [l_name for i in range(len(data))] return data, label
def data_loader(f_name, l_name): with open(f_name, mode='r', encoding='utf-8') as f: data = list(set(f.readlines())) label = [l_name for i in range(len(data))] return (data, label)
# TODO: maybe store them .sql files and read them as string # example: https://cloud.google.com/blog/products/application-development/how-to-schedule-a-recurring-python-script-on-gcp # def file_to_string(sql_path): # """Converts a SQL file holding a SQL query to a string. # Args: # sql_path: String containing a file path # Returns: # String representation of a file's contents # """ # with open(sql_path, 'r') as sql_file: # return sql_file.read() sample_events_query = """ SELECT CONCAT(stream_id, "_" , user_pseudo_id, "_" , event_name, "_" , event_timestamp) AS event_id, event_date AS event_date, event_timestamp AS event_timestamp, event_name AS event_name, event_previous_timestamp AS event_previous_timestamp, event_value_in_usd AS event_value_in_usd, event_bundle_sequence_id AS event_bundle_sequence_id, event_server_timestamp_offset AS event_server_timestamp_offset, user_id AS user_id, user_pseudo_id AS user_pseudo_id, privacy_info.analytics_storage AS privacy_info_analytics_storage, privacy_info.ads_storage AS privacy_info_ads_storage, privacy_info.uses_transient_token AS privacy_info_uses_transient_token, user_first_touch_timestamp AS user_first_touch_timestamp, user_ltv.revenue AS user_ltv_revenue, user_ltv.currency AS user_ltv_currency, device.category AS device_category, device.mobile_brand_name AS device_mobile_brand_name, device.mobile_model_name AS device_mobile_model_name, device.mobile_marketing_name AS device_mobile_marketing_name, device.mobile_os_hardware_model AS device_mobile_os_hardware_model, device.operating_system AS device_operating_system, device.operating_system_version AS device_operating_system_version, device.vendor_id AS device_vendor_id, device.advertising_id AS device_advertising_id, device.language AS device_language, device.is_limited_ad_tracking AS device_is_limited_ad_tracking, device.time_zone_offset_seconds AS device_time_zone_offset_seconds, device.browser AS device_browser, device.browser_version AS device_browser_version, device.web_info.browser AS device_web_info_browser, device.web_info.browser_version AS device_web_info_browser_version, device.web_info.hostname AS device_web_info_hostname, geo.continent AS geo_continent, geo.country AS geo_country, geo.region AS geo_region, geo.city AS geo_city, geo.sub_continent AS geo_sub_continent, geo.metro AS geo_metro, app_info.id AS app_info_id, app_info.version AS app_info_version, app_info.install_store AS app_info_install_store, app_info.firebase_app_id AS app_info_firebase_app_id, app_info.install_source AS app_info_install_source, traffic_source.name AS traffic_source_name, traffic_source.medium AS traffic_source_medium, traffic_source.source AS traffic_source_source, stream_id AS stream_id, platform AS platform, event_dimensions.hostname AS event_dimensions_hostname, ecommerce.total_item_quantity AS ecommerce_total_item_quantity, ecommerce.purchase_revenue_in_usd AS ecommerce_purchase_revenue_in_usd, ecommerce.purchase_revenue AS ecommerce_purchase_revenue, ecommerce.refund_value_in_usd AS ecommerce_refund_value_in_usd, ecommerce.refund_value AS ecommerce_refund_value, ecommerce.shipping_value_in_usd AS ecommerce_shipping_value_in_usd, ecommerce.shipping_value AS ecommerce_shipping_value, ecommerce.tax_value_in_usd AS ecommerce_tax_value_in_usd, ecommerce.tax_value AS ecommerce_tax_value, ecommerce.unique_items AS ecommerce_unique_items, ecommerce.transaction_id AS ecommerce_transaction_id FROM `gcp-project.dataset.events_date_shard` """ sample_event_params_query = """ SELECT CONCAT(stream_id, "_" , user_pseudo_id, "_" , event_name, "_" , event_timestamp) AS event_id, event_params.key as event_params_key, CONCAT(IFNULL(event_params.value.string_value, ''), IFNULL(CAST(event_params.value.int_value AS STRING), ''), IFNULL(CAST(event_params.value.float_value AS STRING), ''), IFNULL(CAST(event_params.value.double_value AS STRING), '')) AS event_params_value FROM `gcp-project.dataset.events_date_shard` ,UNNEST (event_params) AS event_params """ sample_user_properties_query = """ SELECT CONCAT(stream_id, "_" , user_pseudo_id, "_" , event_name, "_" , event_timestamp) AS event_id, user_properties.key as user_properties_key , CONCAT(IFNULL(user_properties.value.string_value, ''), IFNULL(CAST(user_properties.value.int_value AS STRING), ''), IFNULL(CAST(user_properties.value.float_value AS STRING), ''), IFNULL(CAST(user_properties.value.double_value AS STRING), '')) AS user_properties_value, user_properties.value.set_timestamp_micros as user_properties_value_set_timestamp_micros FROM `gcp-project.dataset.events_date_shard` ,UNNEST (user_properties) AS user_properties """ sample_items_query = """ SELECT CONCAT(stream_id, "_" , user_pseudo_id, "_" , event_name, "_" , event_timestamp) AS event_id, items.item_id AS items_item_id, items.item_name AS items_item_name, items.item_brand AS items_item_brand, items.item_variant AS items_item_variant, items.item_category AS items_item_category, items.item_category2 AS items_item_category2, items.item_category3 AS items_item_category3, items.item_category4 AS items_item_category4, items.item_category5 AS items_item_category5, items.price_in_usd AS items_price_in_usd, items.price AS items_price, items.quantity AS items_quantity, items.item_revenue_in_usd AS items_item_revenue_in_usd, items.item_revenue AS items_item_revenue, items.item_refund_in_usd AS items_item_refund_in_usd, items.item_refund AS items_item_refund, items.coupon AS items_coupon, items.affiliation AS items_affiliation, items.location_id AS items_location_id, items.item_list_id AS items_item_list_id, items.item_list_name AS items_item_list_name, items.item_list_index AS items_item_list_index, items.promotion_id AS items_promotion_id, items.promotion_name AS items_promotion_name, items.creative_name AS items_creative_name, items.creative_slot AS items_creative_slot FROM `gcp-project.dataset.events_date_shard` ,UNNEST(items) AS items """
sample_events_query = '\nSELECT \nCONCAT(stream_id, "_" , user_pseudo_id, "_" , event_name, "_" , event_timestamp) AS event_id,\n\nevent_date AS event_date,\nevent_timestamp AS event_timestamp,\nevent_name AS event_name,\nevent_previous_timestamp AS event_previous_timestamp,\nevent_value_in_usd AS event_value_in_usd,\nevent_bundle_sequence_id AS event_bundle_sequence_id,\nevent_server_timestamp_offset AS event_server_timestamp_offset,\nuser_id AS user_id,\nuser_pseudo_id AS user_pseudo_id,\n\nprivacy_info.analytics_storage AS privacy_info_analytics_storage,\nprivacy_info.ads_storage AS privacy_info_ads_storage,\nprivacy_info.uses_transient_token AS privacy_info_uses_transient_token,\nuser_first_touch_timestamp AS user_first_touch_timestamp,\n\nuser_ltv.revenue AS user_ltv_revenue,\nuser_ltv.currency AS user_ltv_currency,\n\ndevice.category AS device_category,\ndevice.mobile_brand_name AS device_mobile_brand_name,\ndevice.mobile_model_name AS device_mobile_model_name,\ndevice.mobile_marketing_name AS device_mobile_marketing_name,\ndevice.mobile_os_hardware_model AS device_mobile_os_hardware_model,\ndevice.operating_system AS device_operating_system,\ndevice.operating_system_version AS device_operating_system_version,\ndevice.vendor_id AS device_vendor_id,\ndevice.advertising_id AS device_advertising_id,\ndevice.language AS device_language,\ndevice.is_limited_ad_tracking AS device_is_limited_ad_tracking,\ndevice.time_zone_offset_seconds AS device_time_zone_offset_seconds,\ndevice.browser AS device_browser,\ndevice.browser_version AS device_browser_version,\n\ndevice.web_info.browser AS device_web_info_browser,\ndevice.web_info.browser_version AS device_web_info_browser_version,\ndevice.web_info.hostname AS device_web_info_hostname,\n\ngeo.continent AS geo_continent,\ngeo.country AS geo_country,\ngeo.region AS geo_region,\ngeo.city AS geo_city,\ngeo.sub_continent AS geo_sub_continent,\ngeo.metro AS geo_metro,\n\napp_info.id AS app_info_id,\napp_info.version AS app_info_version,\napp_info.install_store AS app_info_install_store,\napp_info.firebase_app_id AS app_info_firebase_app_id,\napp_info.install_source AS app_info_install_source,\n\ntraffic_source.name AS traffic_source_name,\ntraffic_source.medium AS traffic_source_medium,\ntraffic_source.source AS traffic_source_source,\nstream_id AS stream_id,\nplatform AS platform,\n\nevent_dimensions.hostname AS event_dimensions_hostname,\n\necommerce.total_item_quantity AS ecommerce_total_item_quantity,\necommerce.purchase_revenue_in_usd AS ecommerce_purchase_revenue_in_usd,\necommerce.purchase_revenue AS ecommerce_purchase_revenue,\necommerce.refund_value_in_usd AS ecommerce_refund_value_in_usd,\necommerce.refund_value AS ecommerce_refund_value,\necommerce.shipping_value_in_usd AS ecommerce_shipping_value_in_usd,\necommerce.shipping_value AS ecommerce_shipping_value,\necommerce.tax_value_in_usd AS ecommerce_tax_value_in_usd,\necommerce.tax_value AS ecommerce_tax_value,\necommerce.unique_items AS ecommerce_unique_items,\necommerce.transaction_id AS ecommerce_transaction_id\n FROM `gcp-project.dataset.events_date_shard` \n' sample_event_params_query = '\nSELECT \nCONCAT(stream_id, "_" , user_pseudo_id, "_" , event_name, "_" , event_timestamp) AS event_id,\nevent_params.key as event_params_key,\nCONCAT(IFNULL(event_params.value.string_value, \'\'), \nIFNULL(CAST(event_params.value.int_value AS STRING), \'\'), \nIFNULL(CAST(event_params.value.float_value AS STRING), \'\'), \nIFNULL(CAST(event_params.value.double_value AS STRING), \'\'))\n\nAS event_params_value\n\nFROM `gcp-project.dataset.events_date_shard` \n,UNNEST (event_params) AS event_params\n' sample_user_properties_query = '\nSELECT \nCONCAT(stream_id, "_" , user_pseudo_id, "_" , event_name, "_" , event_timestamp) AS event_id,\nuser_properties.key\tas user_properties_key\t,\nCONCAT(IFNULL(user_properties.value.string_value, \'\'), \nIFNULL(CAST(user_properties.value.int_value AS STRING), \'\'), \nIFNULL(CAST(user_properties.value.float_value AS STRING), \'\'), \nIFNULL(CAST(user_properties.value.double_value AS STRING), \'\'))\nAS user_properties_value,\nuser_properties.value.set_timestamp_micros\tas user_properties_value_set_timestamp_micros\n FROM `gcp-project.dataset.events_date_shard` \n ,UNNEST (user_properties) AS user_properties\n' sample_items_query = '\nSELECT \nCONCAT(stream_id, "_" , user_pseudo_id, "_" , event_name, "_" , event_timestamp) AS event_id,\n\nitems.item_id AS items_item_id,\nitems.item_name AS items_item_name,\nitems.item_brand AS items_item_brand,\nitems.item_variant AS items_item_variant,\nitems.item_category AS items_item_category,\nitems.item_category2 AS items_item_category2,\nitems.item_category3 AS items_item_category3,\nitems.item_category4 AS items_item_category4,\nitems.item_category5 AS items_item_category5,\nitems.price_in_usd AS items_price_in_usd,\nitems.price AS items_price,\nitems.quantity AS items_quantity,\nitems.item_revenue_in_usd AS items_item_revenue_in_usd,\nitems.item_revenue AS items_item_revenue,\nitems.item_refund_in_usd AS items_item_refund_in_usd,\nitems.item_refund AS items_item_refund,\nitems.coupon AS items_coupon,\nitems.affiliation AS items_affiliation,\nitems.location_id AS items_location_id,\nitems.item_list_id AS items_item_list_id,\nitems.item_list_name AS items_item_list_name,\nitems.item_list_index AS items_item_list_index,\nitems.promotion_id AS items_promotion_id,\nitems.promotion_name AS items_promotion_name,\nitems.creative_name AS items_creative_name,\nitems.creative_slot AS items_creative_slot\n\n FROM `gcp-project.dataset.events_date_shard` \n ,UNNEST(items) AS items\n'
def in_order_traversal(node, visit_func): if node is not None: in_order_traversal(node.left, visit_func) visit_func(node.data) in_order_traversal(node.right, visit_func) def pre_order_traversal(node, visit_func): if node is not None: visit_func(node.data) pre_order_traversal(node.left, visit_func) pre_order_traversal(node.right, visit_func) def post_order_traversal(node, visit_func): if node is not None: post_order_traversal(node.left, visit_func) post_order_traversal(node.right, visit_func) visit_func(node.data)
def in_order_traversal(node, visit_func): if node is not None: in_order_traversal(node.left, visit_func) visit_func(node.data) in_order_traversal(node.right, visit_func) def pre_order_traversal(node, visit_func): if node is not None: visit_func(node.data) pre_order_traversal(node.left, visit_func) pre_order_traversal(node.right, visit_func) def post_order_traversal(node, visit_func): if node is not None: post_order_traversal(node.left, visit_func) post_order_traversal(node.right, visit_func) visit_func(node.data)
''' Desenvolva um algoritmo que solicite seu ano de nacimento e o ano anoAtual Calcule a idade e apresente na tela. Para fins de simplificacao, despreze o dia e mes do ano. Apos o calculo verifique se a idade e maior ou igual a 18 anos e apresente na tela a mensagen informando que ja e possivel tirar a carteira de motorista caso seja de maior. ''' nasc = int(input("Digite seu ano de nacimento: \n")) anoAtual = int(input('Digite o ano atual: \n')) r = anoAtual - nasc print('Voce tem {} anos de idade'.format(r)) if(r >= 18): print("Ual vc ja pode tirar sua carteira com {} anos ".format(r))
""" Desenvolva um algoritmo que solicite seu ano de nacimento e o ano anoAtual Calcule a idade e apresente na tela. Para fins de simplificacao, despreze o dia e mes do ano. Apos o calculo verifique se a idade e maior ou igual a 18 anos e apresente na tela a mensagen informando que ja e possivel tirar a carteira de motorista caso seja de maior. """ nasc = int(input('Digite seu ano de nacimento: \n')) ano_atual = int(input('Digite o ano atual: \n')) r = anoAtual - nasc print('Voce tem {} anos de idade'.format(r)) if r >= 18: print('Ual vc ja pode tirar sua carteira com {} anos '.format(r))
class Feature: def __init__(self, name, selector, data_type, number_of_values, patterns): self.name = name self.selector = selector self.pattern = patterns[data_type] self.multiple_values = number_of_values != 'single'
class Feature: def __init__(self, name, selector, data_type, number_of_values, patterns): self.name = name self.selector = selector self.pattern = patterns[data_type] self.multiple_values = number_of_values != 'single'
## rolling mean & variance class OnlineStats: def __init__(self): self.reset() def reset(self) -> None: self.n = 0 self.mean = 0.0 self.m2 = 0.0 def update(self, x: float) -> None: """Update stats for new observation.""" self.n += 1 new_mean = self.mean + (x - self.mean) / self.n self.m2 += (x - self.mean) * (x - new_mean) self.mean = new_mean @property def var(self) -> float: if self.n > 1: return self.m2 / (self.n - 1) else: return 0.0 @property def precision(self) -> float: """Inverse of variance.""" if self.n > 1: return (self.n - 1) / max(self.m2, 1.0e-6) else: return 1.0 @property def std(self) -> float: return self.var ** 0.5 def __repr__(self): return f'<OnlineStats mean={self.mean} std={self.std}'
class Onlinestats: def __init__(self): self.reset() def reset(self) -> None: self.n = 0 self.mean = 0.0 self.m2 = 0.0 def update(self, x: float) -> None: """Update stats for new observation.""" self.n += 1 new_mean = self.mean + (x - self.mean) / self.n self.m2 += (x - self.mean) * (x - new_mean) self.mean = new_mean @property def var(self) -> float: if self.n > 1: return self.m2 / (self.n - 1) else: return 0.0 @property def precision(self) -> float: """Inverse of variance.""" if self.n > 1: return (self.n - 1) / max(self.m2, 1e-06) else: return 1.0 @property def std(self) -> float: return self.var ** 0.5 def __repr__(self): return f'<OnlineStats mean={self.mean} std={self.std}'
def get_multiples(num=1,c=10): # if n > 0: (what about negative multiples?) a = 0 num2 = num while a < c: if num2%num == 0: yield num2 num2 += 1 a += 1 else: num2 += 1 multiples_two = get_multiples(2,3) for i in multiples_two: print(i) default_multiples = get_multiples() multiples_5 = get_multiples(5,6) l = [] for i in range(6): l.append(next(multiples_5)) print(l) # OR for i in range(11): # this results in a StopIteration (as by default c=10) print(next(default_multiples))
def get_multiples(num=1, c=10): a = 0 num2 = num while a < c: if num2 % num == 0: yield num2 num2 += 1 a += 1 else: num2 += 1 multiples_two = get_multiples(2, 3) for i in multiples_two: print(i) default_multiples = get_multiples() multiples_5 = get_multiples(5, 6) l = [] for i in range(6): l.append(next(multiples_5)) print(l) for i in range(11): print(next(default_multiples))
# Copyright 2010-2011, RTLCores. All rights reserved. # http://rtlcores.com # See LICENSE.txt class CmdArgs(list): def __init__(self, value=[], cmd=None): list.__init__(self, value) self.cmd = cmd def conv(self): if(self.cmd == None): return self else: return self.cmd(self)
class Cmdargs(list): def __init__(self, value=[], cmd=None): list.__init__(self, value) self.cmd = cmd def conv(self): if self.cmd == None: return self else: return self.cmd(self)
def main() -> None: n, m = map(int, input().split()) g = [[] for _ in range(n)] for _ in range(m): u, v = map(int, input().split()) u -= 1 v -= 1 g[u].append(v) g[v].append(u) INF = 1 << 60 min_length = [INF] * (1 << n) remain = 1 << n min_length[0] = 0 remain -= 1 # bfs added_to_que = [[False] * n for _ in range(1 << n)] for i in range(n): added_to_que[1 << i][i] = True current_length = 1 que = [(1 << i, i) for i in range(n)] while remain: new_que = [] for s, i in que: if min_length[s] == INF: min_length[s] = current_length remain -= 1 for j in g[i]: t = s ^ (1 << j) if added_to_que[t][j]: continue added_to_que[t][j] = True new_que.append((t, j)) current_length += 1 que = new_que print(sum(min_length)) if __name__ == "__main__": main()
def main() -> None: (n, m) = map(int, input().split()) g = [[] for _ in range(n)] for _ in range(m): (u, v) = map(int, input().split()) u -= 1 v -= 1 g[u].append(v) g[v].append(u) inf = 1 << 60 min_length = [INF] * (1 << n) remain = 1 << n min_length[0] = 0 remain -= 1 added_to_que = [[False] * n for _ in range(1 << n)] for i in range(n): added_to_que[1 << i][i] = True current_length = 1 que = [(1 << i, i) for i in range(n)] while remain: new_que = [] for (s, i) in que: if min_length[s] == INF: min_length[s] = current_length remain -= 1 for j in g[i]: t = s ^ 1 << j if added_to_que[t][j]: continue added_to_que[t][j] = True new_que.append((t, j)) current_length += 1 que = new_que print(sum(min_length)) if __name__ == '__main__': main()
"""A rich command line interface for PyPI.""" __name__ = "pypi-command-line" __title__ = __name__ __license__ = "MIT" __version__ = "0.4.0" __author__ = "Arian Mollik Wasi" __github__ = "https://github.com/wasi-master/pypi-cli"
"""A rich command line interface for PyPI.""" __name__ = 'pypi-command-line' __title__ = __name__ __license__ = 'MIT' __version__ = '0.4.0' __author__ = 'Arian Mollik Wasi' __github__ = 'https://github.com/wasi-master/pypi-cli'
""" Name: MultiSURF.py Authors: Gediminas Bertasius and Ryan Urbanowicz - Written at Dartmouth College, Hanover, NH, USA Contact: ryan.j.urbanowicz@darmouth.edu Created: December 4, 2013 Modified: August 25,2014 Description: --------------------------------------------------------------------------------------------------------------------------------------------------------- ReBATE V1.0: includes stand-alone Python code to run any of the included/available Relief-Based algorithms designed for attribute filtering/ranking. These algorithms are a quick way to identify attributes in the dataset that may be most important to predicting some phenotypic endpoint. These scripts output an ordered set of attribute names, along with respective scores (uniquely determined by the particular algorithm selected). Certain algorithms require key run parameters to be specified. This code is largely based on the Relief-Based algorithms implemented in the Multifactor Dimensionality Reduction (MDR) software. However these implementations have been expanded to accomodate continuous attributes (and continuous attributes mixed with discrete attributes) as well as a continuous endpoint. This code also accomodates missing data points. Built into this code, is a strategy to automatically detect from the loaded data, these relevant characteristics. Copyright (C) 2013 Ryan Urbanowicz This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABLILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA --------------------------------------------------------------------------------------------------------------------------------------------------------- """ def Run_MultiSURF(data): """ Called to run the MultiSURF algorithm. #PARAM x- is a matrix containing the attributes of all instances in the dataset #PARAM y- is a matrix containing the class of a data instance """ x = [ row[0] for row in data.trainFormatted ] y = [ row[1] for row in data.trainFormatted ] print("Running MultiSURF Algorithm...") scores=MultiSURF(x,y,data) print("MultiSURF run complete.") return scores def MultiSURF(x,y,data): """ Controls major MultiSURF loops. """ ScoreList=[] for i in range(data.numAttributes): #initializing attributes' scores to 0 ScoreList.append(0) #Precompute distances between all unique instance pairs within the dataset. print("Precomputing Distance Array") distanceArray = calculateDistanceArray(x,data) print("Computed") same_class_bound = data.phenSD D=[] avg_distances=[] for i in range(data.numTrainInstances): dist_vector=[] dist_vector=get_individual_distances(i,data,distanceArray) avg_distances.append(get_average(dist_vector)) std_dev=get_std_dev(dist_vector,avg_distances[i]) D.append(std_dev/2.0) for k in range(data.numAttributes): #looping through attributes if data.attributeInfo[k][0]: #Continuous Attribute minA=data.attributeInfo[k][1][0] maxA=data.attributeInfo[k][1][1] count_hit_near=0 count_miss_near=0 count_hit_far=0 count_miss_far=0 diff_hit_near=0 #initializing the score to 0 diff_miss_near=0 diff_hit_far=0 diff_miss_far=0 for i in range(data.numTrainInstances): for j in range(i,data.numTrainInstances): if i!=j and x[i][k]!=data.labelMissingData and x[j][k]!=data.labelMissingData: locator = [i,j] locator = sorted(locator, reverse=True) #Access corect half of table (result of removed table redundancy) d = distanceArray[locator[0]][locator[1]] if (d<avg_distances[i]-D[i]): #Near if data.discretePhenotype: #discrete endpoint if y[i]==y[j]: #Same Endpoint count_hit_near+=1 if x[i][k]!=x[j][k]: if data.attributeInfo[k][0]: #Continuous Attribute (closer att scores for near same phen should yield larger att penalty) diff_hit_near-=(abs(x[i][k]-x[j][k])/(maxA-minA)) else:#Discrete diff_hit_near-=1 else: #Different Endpoint count_miss_near+=1 if x[i][k]!=x[j][k]: if data.attributeInfo[k][0]: #Continuous Attribute (farther att scores for near diff phen should yield larger att bonus) diff_miss_near+=abs(x[i][k]-x[j][k])/(maxA-minA) else:#Discrete diff_miss_near+=1 else:#continuous endpoint if abs(y[i]-y[j])<same_class_bound: count_hit_near+=1 if x[i][k]!=x[j][k]: if data.attributeInfo[k][0]: #Continuous Attribute diff_hit_near-=(abs(x[i][k]-x[j][k])/(maxA-minA)) else:#Discrete diff_hit_near-=1 else: count_miss_near+=1 if x[i][k]!=x[j][k]: if data.attributeInfo[k][0]: #Continuous Attribute diff_miss_near+=abs(x[i][k]-x[j][k])/(maxA-minA) else:#Discrete diff_miss_near+=1 if (d>avg_distances[i]+D[i]): #Far if data.discretePhenotype: #discrete endpoint if y[i]==y[j]: count_hit_far+=1 if data.attributeInfo[k][0]: #Continuous Attribute diff_hit_far-=(abs(x[i][k]-x[j][k]))/(maxA-minA) #Attribute being similar is more important. else:#Discrete if x[i][k]==x[j][k]: diff_hit_far-=1 else: count_miss_far+=1 if data.attributeInfo[k][0]: #Continuous Attribute diff_miss_far+=abs(x[i][k]-x[j][k])/(maxA-minA) #Attribute being similar is more important. else:#Discrete if x[i][k]==x[j][k]: diff_miss_far+=1 else:#continuous endpoint if abs(y[i]-y[j])<same_class_bound: count_hit_far+=1 if data.attributeInfo[k][0]: #Continuous Attribute diff_hit_far-=(abs(x[i][k]-x[j][k]))/(maxA-minA) #Attribute being similar is more important. else:#Discrete if x[i][k]==x[j][k]: diff_hit_far-=1 else: count_miss_far+=1 if data.attributeInfo[k][0]: #Continuous Attribute diff_miss_far+=abs(x[i][k]-x[j][k])/(maxA-minA) #Attribute being similar is more important. else:#Discrete if x[i][k]==x[j][k]: diff_miss_far+=1 hit_proportion=count_hit_near/float(count_hit_near+count_miss_near) miss_proportion=count_miss_near/float(count_hit_near+count_miss_near) diff=diff_hit_near*miss_proportion+diff_miss_near*hit_proportion #applying weighting scheme to balance the scores hit_proportion=count_hit_far/float(count_hit_far+count_miss_far) miss_proportion=count_miss_far/float(count_hit_far+count_miss_far) diff+=diff_hit_far*miss_proportion+diff_miss_far*hit_proportion #applying weighting scheme to balance the scores ScoreList[k]+=diff return ScoreList def multiClassMultiSURF(x,y,data): """ Controls major MultiSURF loops. """ ScoreList=[] for i in range(data.numAttributes): #initializing attributes' scores to 0 ScoreList.append(0) #Precompute distances between all unique instance pairs within the dataset. print("Precomputing Distance Array") distanceArray = calculateDistanceArray(x,data) print("Computed") #For MulitClass Array Only multiclass_map = None if data.discretePhenotype and len(data.phenotypeList) > 2: multiclass_map = makeMultiClassMap(y,data) D=[] avg_distances=[] for i in range(data.numTrainInstances): dist_vector=[] dist_vector=get_individual_distances(i,data,distanceArray) avg_distances.append(get_average(dist_vector)) std_dev=get_std_dev(dist_vector,avg_distances[i]) D.append(std_dev/2.0) for k in range(data.numAttributes): #looping through attributes if data.attributeInfo[k][0]: #Continuous Attribute minA=data.attributeInfo[k][1][0] maxA=data.attributeInfo[k][1][1] count_hit_near=0 count_miss_near=0 count_hit_far=0 count_miss_far=0 diff_hit_near=0 #initializing the score to 0 diff_miss_near=0 diff_hit_far=0 diff_miss_far=0 class_Store_near = makeClassPairMap(multiclass_map) class_Store_far = makeClassPairMap(multiclass_map) for i in range(data.numTrainInstances): for j in range(i,data.numTrainInstances): if i!=j and x[i][k]!=data.labelMissingData and x[j][k]!=data.labelMissingData: locator = [i,j] locator = sorted(locator, reverse=True) #Access corect half of table (result of removed table redundancy) d = distanceArray[locator[0]][locator[1]] if (d<avg_distances[i]-D[i]): #Near if y[i]==y[j]: count_hit_near+=1 if x[i][k]!=x[j][k]: if data.attributeInfo[k][0]: #Continuous Attribute diff_hit_near-=abs(x[i][k]-x[j][k])/(maxA-minA) else:#Discrete diff_hit_near-=1 else: count_miss_near+=1 locator = [y[i],y[j]] locator = sorted(locator, reverse = True) tempString = str(locator[0])+str(locator[1]) class_Store_near[tempString][0] += 1 if x[i][k]!=x[j][k]: if data.attributeInfo[k][0]: #Continuous Attribute class_Store_near[tempString][1]+=abs(x[i][k]-x[j][k])/(maxA-minA) else:#Discrete class_Store_near[tempString][1]+=1 if (d>avg_distances[i]+D[i]): #Far if y[i]==y[j]: count_hit_far+=1 if data.attributeInfo[k][0]: #Continuous Attribute diff_hit_far-=(1-abs(x[i][k]-x[j][k]))/(maxA-minA) #Attribute being similar is more important. else:#Discrete if x[i][k]==x[j][k]: diff_hit_far-=1 else: count_miss_far+=1 locator = [y[i],y[j]] locator = sorted(locator, reverse = True) tempString = str(locator[0])+str(locator[1]) class_Store_far[tempString][0] += 1 if data.attributeInfo[k][0]: #Continuous Attribute class_Store_far[tempString][1]+=abs(x[i][k]-x[j][k])/(maxA-minA) #Attribute being similar is more important. else:#Discrete if x[i][k]==x[j][k]: class_Store_far[tempString][1]+=1 #Near missSum = 0 for each in class_Store_near: missSum += class_Store_near[each][0] hit_proportion=count_hit_near/float(count_hit_near+count_miss_near) #Correcting for Missing Data. miss_proportion=count_miss_near/float(count_hit_near+count_miss_near) for each in class_Store_near: diff_miss_near += (class_Store_near[each][0]/float(missSum))*class_Store_near[each][1] diff_miss_near = diff_miss_near * float(len(class_Store_near)) diff = diff_miss_near*hit_proportion + diff_hit_near*miss_proportion #Far missSum = 0 for each in class_Store_far: missSum += class_Store_far[each][0] hit_proportion=count_hit_far/float(count_hit_far+count_miss_far) #Correcting for Missing Data. miss_proportion=count_miss_far/float(count_hit_far+count_miss_far) for each in class_Store_far: diff_miss_far += (class_Store_far[each][0]/float(missSum))*class_Store_far[each][1] diff_miss_far = diff_miss_far * float(len(class_Store_far)) diff += diff_miss_far*hit_proportion + diff_hit_far*miss_proportion ScoreList[k]+=diff return ScoreList def get_std_dev(dist_vector,avg): sum=0; for i in range(len(dist_vector)): sum+=(dist_vector[i]-avg)**2 sum=sum/float(len(dist_vector)) return (sum**0.5) def get_average(dist_vector): sum=0 for i in range(len(dist_vector)): sum+=dist_vector[i]; return sum/float(len(dist_vector)) def get_individual_distances(i,data,distanceArray): d=[] for j in range(data.numTrainInstances): if (i!=j): locator = [i,j] locator = sorted(locator, reverse=True) #Access corect half of table (result of removed table redundancy) d.append(distanceArray[locator[0]][locator[1]]) return d def calculateDistanceArray(x,data): #make empty distance array container (we will only fill up the non redundant half of the array distArray = [] for i in range(data.numTrainInstances): distArray.append([]) for j in range(data.numTrainInstances): distArray[i].append(None) for i in range(1, data.numTrainInstances): for j in range(0,i): distArray[i][j] = calculate_distance(x[i],x[j],data) return distArray def makeMultiClassMap(y, data): #finding number of classes in the dataset and storing them into the map multiclass_map={} for i in range(data.numTrainInstances): if (y[i] not in multiclass_map): multiclass_map[y[i]]=0 else: multiclass_map[y[i]]+=1 for each in data.phenotypeList: #For each class store probability of class occurrence in dataset. multiclass_map[each] = multiclass_map[each]/float(data.numTrainInstances) return multiclass_map def makeClassPairMap(multiclass_map): #finding number of classes in the dataset and storing them into the map classPair_map={} for each in multiclass_map: for other in multiclass_map: if each != other: locator = [each,other] locator = sorted(locator, reverse = True) tempString = str(locator[0])+str(locator[1]) if (tempString not in classPair_map): classPair_map[tempString] = [0,0] return classPair_map def calculate_distance(a,b,data): """ Calculates the distance between two instances in the dataset. Handles discrete and continuous attributes. Continuous attributes are accomodated by scaling the distance difference within the context of the observed attribute range. If a respective data point is missing from either instance, it is left out of the distance calculation. """ d=0 #distance for i in range(data.numAttributes): if a[i]!=data.labelMissingData and b[i]!=data.labelMissingData: if not data.attributeInfo[i][0]: #Discrete Attribute if a[i] != b[i]: d+=1 else: #Continuous Attribute min_bound=float(data.attributeInfo[i][1][0]) max_bound=float(data.attributeInfo[i][1][1]) d+=abs(float(a[i])-float(b[i]))/float(max_bound-min_bound) #Kira & Rendell, 1992 -handling continiuous attributes return d
""" Name: MultiSURF.py Authors: Gediminas Bertasius and Ryan Urbanowicz - Written at Dartmouth College, Hanover, NH, USA Contact: ryan.j.urbanowicz@darmouth.edu Created: December 4, 2013 Modified: August 25,2014 Description: --------------------------------------------------------------------------------------------------------------------------------------------------------- ReBATE V1.0: includes stand-alone Python code to run any of the included/available Relief-Based algorithms designed for attribute filtering/ranking. These algorithms are a quick way to identify attributes in the dataset that may be most important to predicting some phenotypic endpoint. These scripts output an ordered set of attribute names, along with respective scores (uniquely determined by the particular algorithm selected). Certain algorithms require key run parameters to be specified. This code is largely based on the Relief-Based algorithms implemented in the Multifactor Dimensionality Reduction (MDR) software. However these implementations have been expanded to accomodate continuous attributes (and continuous attributes mixed with discrete attributes) as well as a continuous endpoint. This code also accomodates missing data points. Built into this code, is a strategy to automatically detect from the loaded data, these relevant characteristics. Copyright (C) 2013 Ryan Urbanowicz This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABLILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA --------------------------------------------------------------------------------------------------------------------------------------------------------- """ def run__multi_surf(data): """ Called to run the MultiSURF algorithm. #PARAM x- is a matrix containing the attributes of all instances in the dataset #PARAM y- is a matrix containing the class of a data instance """ x = [row[0] for row in data.trainFormatted] y = [row[1] for row in data.trainFormatted] print('Running MultiSURF Algorithm...') scores = multi_surf(x, y, data) print('MultiSURF run complete.') return scores def multi_surf(x, y, data): """ Controls major MultiSURF loops. """ score_list = [] for i in range(data.numAttributes): ScoreList.append(0) print('Precomputing Distance Array') distance_array = calculate_distance_array(x, data) print('Computed') same_class_bound = data.phenSD d = [] avg_distances = [] for i in range(data.numTrainInstances): dist_vector = [] dist_vector = get_individual_distances(i, data, distanceArray) avg_distances.append(get_average(dist_vector)) std_dev = get_std_dev(dist_vector, avg_distances[i]) D.append(std_dev / 2.0) for k in range(data.numAttributes): if data.attributeInfo[k][0]: min_a = data.attributeInfo[k][1][0] max_a = data.attributeInfo[k][1][1] count_hit_near = 0 count_miss_near = 0 count_hit_far = 0 count_miss_far = 0 diff_hit_near = 0 diff_miss_near = 0 diff_hit_far = 0 diff_miss_far = 0 for i in range(data.numTrainInstances): for j in range(i, data.numTrainInstances): if i != j and x[i][k] != data.labelMissingData and (x[j][k] != data.labelMissingData): locator = [i, j] locator = sorted(locator, reverse=True) d = distanceArray[locator[0]][locator[1]] if d < avg_distances[i] - D[i]: if data.discretePhenotype: if y[i] == y[j]: count_hit_near += 1 if x[i][k] != x[j][k]: if data.attributeInfo[k][0]: diff_hit_near -= abs(x[i][k] - x[j][k]) / (maxA - minA) else: diff_hit_near -= 1 else: count_miss_near += 1 if x[i][k] != x[j][k]: if data.attributeInfo[k][0]: diff_miss_near += abs(x[i][k] - x[j][k]) / (maxA - minA) else: diff_miss_near += 1 elif abs(y[i] - y[j]) < same_class_bound: count_hit_near += 1 if x[i][k] != x[j][k]: if data.attributeInfo[k][0]: diff_hit_near -= abs(x[i][k] - x[j][k]) / (maxA - minA) else: diff_hit_near -= 1 else: count_miss_near += 1 if x[i][k] != x[j][k]: if data.attributeInfo[k][0]: diff_miss_near += abs(x[i][k] - x[j][k]) / (maxA - minA) else: diff_miss_near += 1 if d > avg_distances[i] + D[i]: if data.discretePhenotype: if y[i] == y[j]: count_hit_far += 1 if data.attributeInfo[k][0]: diff_hit_far -= abs(x[i][k] - x[j][k]) / (maxA - minA) elif x[i][k] == x[j][k]: diff_hit_far -= 1 else: count_miss_far += 1 if data.attributeInfo[k][0]: diff_miss_far += abs(x[i][k] - x[j][k]) / (maxA - minA) elif x[i][k] == x[j][k]: diff_miss_far += 1 elif abs(y[i] - y[j]) < same_class_bound: count_hit_far += 1 if data.attributeInfo[k][0]: diff_hit_far -= abs(x[i][k] - x[j][k]) / (maxA - minA) elif x[i][k] == x[j][k]: diff_hit_far -= 1 else: count_miss_far += 1 if data.attributeInfo[k][0]: diff_miss_far += abs(x[i][k] - x[j][k]) / (maxA - minA) elif x[i][k] == x[j][k]: diff_miss_far += 1 hit_proportion = count_hit_near / float(count_hit_near + count_miss_near) miss_proportion = count_miss_near / float(count_hit_near + count_miss_near) diff = diff_hit_near * miss_proportion + diff_miss_near * hit_proportion hit_proportion = count_hit_far / float(count_hit_far + count_miss_far) miss_proportion = count_miss_far / float(count_hit_far + count_miss_far) diff += diff_hit_far * miss_proportion + diff_miss_far * hit_proportion ScoreList[k] += diff return ScoreList def multi_class_multi_surf(x, y, data): """ Controls major MultiSURF loops. """ score_list = [] for i in range(data.numAttributes): ScoreList.append(0) print('Precomputing Distance Array') distance_array = calculate_distance_array(x, data) print('Computed') multiclass_map = None if data.discretePhenotype and len(data.phenotypeList) > 2: multiclass_map = make_multi_class_map(y, data) d = [] avg_distances = [] for i in range(data.numTrainInstances): dist_vector = [] dist_vector = get_individual_distances(i, data, distanceArray) avg_distances.append(get_average(dist_vector)) std_dev = get_std_dev(dist_vector, avg_distances[i]) D.append(std_dev / 2.0) for k in range(data.numAttributes): if data.attributeInfo[k][0]: min_a = data.attributeInfo[k][1][0] max_a = data.attributeInfo[k][1][1] count_hit_near = 0 count_miss_near = 0 count_hit_far = 0 count_miss_far = 0 diff_hit_near = 0 diff_miss_near = 0 diff_hit_far = 0 diff_miss_far = 0 class__store_near = make_class_pair_map(multiclass_map) class__store_far = make_class_pair_map(multiclass_map) for i in range(data.numTrainInstances): for j in range(i, data.numTrainInstances): if i != j and x[i][k] != data.labelMissingData and (x[j][k] != data.labelMissingData): locator = [i, j] locator = sorted(locator, reverse=True) d = distanceArray[locator[0]][locator[1]] if d < avg_distances[i] - D[i]: if y[i] == y[j]: count_hit_near += 1 if x[i][k] != x[j][k]: if data.attributeInfo[k][0]: diff_hit_near -= abs(x[i][k] - x[j][k]) / (maxA - minA) else: diff_hit_near -= 1 else: count_miss_near += 1 locator = [y[i], y[j]] locator = sorted(locator, reverse=True) temp_string = str(locator[0]) + str(locator[1]) class_Store_near[tempString][0] += 1 if x[i][k] != x[j][k]: if data.attributeInfo[k][0]: class_Store_near[tempString][1] += abs(x[i][k] - x[j][k]) / (maxA - minA) else: class_Store_near[tempString][1] += 1 if d > avg_distances[i] + D[i]: if y[i] == y[j]: count_hit_far += 1 if data.attributeInfo[k][0]: diff_hit_far -= (1 - abs(x[i][k] - x[j][k])) / (maxA - minA) elif x[i][k] == x[j][k]: diff_hit_far -= 1 else: count_miss_far += 1 locator = [y[i], y[j]] locator = sorted(locator, reverse=True) temp_string = str(locator[0]) + str(locator[1]) class_Store_far[tempString][0] += 1 if data.attributeInfo[k][0]: class_Store_far[tempString][1] += abs(x[i][k] - x[j][k]) / (maxA - minA) elif x[i][k] == x[j][k]: class_Store_far[tempString][1] += 1 miss_sum = 0 for each in class_Store_near: miss_sum += class_Store_near[each][0] hit_proportion = count_hit_near / float(count_hit_near + count_miss_near) miss_proportion = count_miss_near / float(count_hit_near + count_miss_near) for each in class_Store_near: diff_miss_near += class_Store_near[each][0] / float(missSum) * class_Store_near[each][1] diff_miss_near = diff_miss_near * float(len(class_Store_near)) diff = diff_miss_near * hit_proportion + diff_hit_near * miss_proportion miss_sum = 0 for each in class_Store_far: miss_sum += class_Store_far[each][0] hit_proportion = count_hit_far / float(count_hit_far + count_miss_far) miss_proportion = count_miss_far / float(count_hit_far + count_miss_far) for each in class_Store_far: diff_miss_far += class_Store_far[each][0] / float(missSum) * class_Store_far[each][1] diff_miss_far = diff_miss_far * float(len(class_Store_far)) diff += diff_miss_far * hit_proportion + diff_hit_far * miss_proportion ScoreList[k] += diff return ScoreList def get_std_dev(dist_vector, avg): sum = 0 for i in range(len(dist_vector)): sum += (dist_vector[i] - avg) ** 2 sum = sum / float(len(dist_vector)) return sum ** 0.5 def get_average(dist_vector): sum = 0 for i in range(len(dist_vector)): sum += dist_vector[i] return sum / float(len(dist_vector)) def get_individual_distances(i, data, distanceArray): d = [] for j in range(data.numTrainInstances): if i != j: locator = [i, j] locator = sorted(locator, reverse=True) d.append(distanceArray[locator[0]][locator[1]]) return d def calculate_distance_array(x, data): dist_array = [] for i in range(data.numTrainInstances): distArray.append([]) for j in range(data.numTrainInstances): distArray[i].append(None) for i in range(1, data.numTrainInstances): for j in range(0, i): distArray[i][j] = calculate_distance(x[i], x[j], data) return distArray def make_multi_class_map(y, data): multiclass_map = {} for i in range(data.numTrainInstances): if y[i] not in multiclass_map: multiclass_map[y[i]] = 0 else: multiclass_map[y[i]] += 1 for each in data.phenotypeList: multiclass_map[each] = multiclass_map[each] / float(data.numTrainInstances) return multiclass_map def make_class_pair_map(multiclass_map): class_pair_map = {} for each in multiclass_map: for other in multiclass_map: if each != other: locator = [each, other] locator = sorted(locator, reverse=True) temp_string = str(locator[0]) + str(locator[1]) if tempString not in classPair_map: classPair_map[tempString] = [0, 0] return classPair_map def calculate_distance(a, b, data): """ Calculates the distance between two instances in the dataset. Handles discrete and continuous attributes. Continuous attributes are accomodated by scaling the distance difference within the context of the observed attribute range. If a respective data point is missing from either instance, it is left out of the distance calculation. """ d = 0 for i in range(data.numAttributes): if a[i] != data.labelMissingData and b[i] != data.labelMissingData: if not data.attributeInfo[i][0]: if a[i] != b[i]: d += 1 else: min_bound = float(data.attributeInfo[i][1][0]) max_bound = float(data.attributeInfo[i][1][1]) d += abs(float(a[i]) - float(b[i])) / float(max_bound - min_bound) return d
# import libraries and modules # provide a class storing the configuration of the back-end engine class LXMconfig: # constructor of the class def __init__(self): self.idb_c1 = None self.idb_c2 = None self.idb_c3 = None self.lxm_conf = {} self.program_name = "lx_allocator" self.lxm_conf["header_request_id"] = 0 self.lxm_conf["header_forwarded_for"] = 1 self.lxm_conf["service_ip"] = "0.0.0.0" self.lxm_conf["service_port"] = 9000 self.lxm_conf["influx_server"] = "0.0.0.0" self.lxm_conf["influx_port"] = 8086 self.lxm_conf["debug"] = 1 self.lxm_conf["hard_exit"] = True self.lxm_conf["hard_startup"] = True self.lxm_conf["lxm_db1"] = "lxm_ddi_performance" self.lxm_conf["lxm_db2"] = "lxm_allocation" self.lxm_conf["lxm_db3"] = "lxm_maintenance" self.lxm_conf["cleanup_maintenance"] = 1 self.lxm_conf["backend_URL"] = None self.lxm_conf["keycloak_URL"] = None self.lxm_conf["KC_REALM"] = None self.lxm_conf["KC_CLID"] = None self.lxm_conf["KC_SECRET"] = None self.lxm_conf["heappe_middleware_available"] = 0 self.lxm_conf["openstack_available"] = 0 self.lxm_conf["hpc_centers"] = None self.lxm_conf["heappe_service_URLs"] = None self.lxm_conf["transfer_sizes"] = "" self.lxm_conf["transfer_speeds"] = "" # define configuration routines def getConfiguration(self, conf_path): config = conf_path conf = open(config, "r") go = True while go: line = conf.readline() if line == "": go = False else: # the character '#' is used to put a line comment in the # configuration file if (line[0] == "#") or (line[0] == "\n") or (line[0] == "\t"): continue fields = line.split("=") param = str(fields[0]) value = str(fields[1]) param = param.strip("\n ") value = value.strip("\n ") # parse the file and create the configuration dictionary if param == "influx_server": self.lxm_conf["influx_server"] = value elif param == "influx_port": self.lxm_conf["influx_port"] = int(value) elif param == "debug": self.lxm_conf["debug"] = int(value) elif param == "service_ip": self.lxm_conf["service_ip"] = value elif param == "service_port": self.lxm_conf["service_port"] = int(value) elif param == "influx_db1": self.lxm_conf["lxm_db1"] = value elif param == "influx_db2": self.lxm_conf["lxm_db2"] = value elif param == "influx_db3": self.lxm_conf["lxm_db3"] = value elif param == "cleanup_maintenance": self.lxm_conf["cleanup_maintenance"] = int(value) elif param == "header_request_id": self.lxm_conf["header_request_id"] = int(value) elif param == "header_forwarded_for": self.lxm_conf["header_forwarded_for"] = int(value) elif param == "hard_exit": if int(value) == 1: self.lxm_conf["hard_exit"] = True else: self.lxm_conf["hard_exit"] = False elif param == "hard_startup": if int(value) == 1: self.lxm_conf["hard_startup"] = True else: self.lxm_conf["hard_startup"] = False elif param == "hpc_centers": self.lxm_conf["hpc_centers"] = value elif param == "transfer_sizes": self.lxm_conf["transfer_sizes"] = value elif param == "transfer_speeds": self.lxm_conf["transfer_speeds"] = value elif param == "heappe_middleware_available": self.lxm_conf["heappe_middleware_available"] = int(value) elif ( param == "heappe_service_URLs" and self.lxm_conf["heappe_middleware_available"] == 1 ): self.lxm_conf["heappe_service_URLs"] = value elif param == "openstack_available": self.lxm_conf["openstack_available"] = int(value) elif param == "backend_URL": self.lxm_conf["backend_URL"] = value elif param == "keycloak_URL": self.lxm_conf["keycloak_URL"] = value elif param == "KC_REALM": self.lxm_conf["KC_REALM"] = value elif param == "KC_CLID": self.lxm_conf["KC_CLID"] = value elif param == "KC_SECRET": self.lxm_conf["KC_SECRET"] = value else: print(" error - unrecognized option (%s)" % (param)) conf.close() return 0 # print out the current configuration def postConfiguration(self): if self.lxm_conf["debug"] == 1: print(" --------------------------------------------------------") print(" Backend Service Configuration:") print(" --------------------------------------------------------") if self.lxm_conf["hard_exit"]: print(" hard-exit mode : True") else: print(" hard-exit mode : False") if self.lxm_conf["hard_startup"]: print(" hard-startup mode : True") else: print(" hard-startup mode : False") print(" debug mode : %d" % (self.lxm_conf["debug"])) print(" service address : %s " % (self.lxm_conf["service_ip"])) print(" service port : %-5d " % (self.lxm_conf["service_port"])) print(" influxDB address : %s " % (self.lxm_conf["influx_server"])) print(" influxDB port : %-5d " % (self.lxm_conf["influx_port"])) print(" db_client_1 : %s " % (self.lxm_conf["lxm_db1"])) print(" db_client_2 : %s " % (self.lxm_conf["lxm_db2"])) print(" db_client_3 : %s " % (self.lxm_conf["lxm_db3"])) print(" --------------------------------------------------------") # executing some action before serving the routes (initialization) if self.lxm_conf["debug"] == 1: print(" (dbg) the webapp is more verbose to support debugging") return 0
class Lxmconfig: def __init__(self): self.idb_c1 = None self.idb_c2 = None self.idb_c3 = None self.lxm_conf = {} self.program_name = 'lx_allocator' self.lxm_conf['header_request_id'] = 0 self.lxm_conf['header_forwarded_for'] = 1 self.lxm_conf['service_ip'] = '0.0.0.0' self.lxm_conf['service_port'] = 9000 self.lxm_conf['influx_server'] = '0.0.0.0' self.lxm_conf['influx_port'] = 8086 self.lxm_conf['debug'] = 1 self.lxm_conf['hard_exit'] = True self.lxm_conf['hard_startup'] = True self.lxm_conf['lxm_db1'] = 'lxm_ddi_performance' self.lxm_conf['lxm_db2'] = 'lxm_allocation' self.lxm_conf['lxm_db3'] = 'lxm_maintenance' self.lxm_conf['cleanup_maintenance'] = 1 self.lxm_conf['backend_URL'] = None self.lxm_conf['keycloak_URL'] = None self.lxm_conf['KC_REALM'] = None self.lxm_conf['KC_CLID'] = None self.lxm_conf['KC_SECRET'] = None self.lxm_conf['heappe_middleware_available'] = 0 self.lxm_conf['openstack_available'] = 0 self.lxm_conf['hpc_centers'] = None self.lxm_conf['heappe_service_URLs'] = None self.lxm_conf['transfer_sizes'] = '' self.lxm_conf['transfer_speeds'] = '' def get_configuration(self, conf_path): config = conf_path conf = open(config, 'r') go = True while go: line = conf.readline() if line == '': go = False else: if line[0] == '#' or line[0] == '\n' or line[0] == '\t': continue fields = line.split('=') param = str(fields[0]) value = str(fields[1]) param = param.strip('\n ') value = value.strip('\n ') if param == 'influx_server': self.lxm_conf['influx_server'] = value elif param == 'influx_port': self.lxm_conf['influx_port'] = int(value) elif param == 'debug': self.lxm_conf['debug'] = int(value) elif param == 'service_ip': self.lxm_conf['service_ip'] = value elif param == 'service_port': self.lxm_conf['service_port'] = int(value) elif param == 'influx_db1': self.lxm_conf['lxm_db1'] = value elif param == 'influx_db2': self.lxm_conf['lxm_db2'] = value elif param == 'influx_db3': self.lxm_conf['lxm_db3'] = value elif param == 'cleanup_maintenance': self.lxm_conf['cleanup_maintenance'] = int(value) elif param == 'header_request_id': self.lxm_conf['header_request_id'] = int(value) elif param == 'header_forwarded_for': self.lxm_conf['header_forwarded_for'] = int(value) elif param == 'hard_exit': if int(value) == 1: self.lxm_conf['hard_exit'] = True else: self.lxm_conf['hard_exit'] = False elif param == 'hard_startup': if int(value) == 1: self.lxm_conf['hard_startup'] = True else: self.lxm_conf['hard_startup'] = False elif param == 'hpc_centers': self.lxm_conf['hpc_centers'] = value elif param == 'transfer_sizes': self.lxm_conf['transfer_sizes'] = value elif param == 'transfer_speeds': self.lxm_conf['transfer_speeds'] = value elif param == 'heappe_middleware_available': self.lxm_conf['heappe_middleware_available'] = int(value) elif param == 'heappe_service_URLs' and self.lxm_conf['heappe_middleware_available'] == 1: self.lxm_conf['heappe_service_URLs'] = value elif param == 'openstack_available': self.lxm_conf['openstack_available'] = int(value) elif param == 'backend_URL': self.lxm_conf['backend_URL'] = value elif param == 'keycloak_URL': self.lxm_conf['keycloak_URL'] = value elif param == 'KC_REALM': self.lxm_conf['KC_REALM'] = value elif param == 'KC_CLID': self.lxm_conf['KC_CLID'] = value elif param == 'KC_SECRET': self.lxm_conf['KC_SECRET'] = value else: print(' error - unrecognized option (%s)' % param) conf.close() return 0 def post_configuration(self): if self.lxm_conf['debug'] == 1: print(' --------------------------------------------------------') print(' Backend Service Configuration:') print(' --------------------------------------------------------') if self.lxm_conf['hard_exit']: print(' hard-exit mode : True') else: print(' hard-exit mode : False') if self.lxm_conf['hard_startup']: print(' hard-startup mode : True') else: print(' hard-startup mode : False') print(' debug mode : %d' % self.lxm_conf['debug']) print(' service address : %s ' % self.lxm_conf['service_ip']) print(' service port : %-5d ' % self.lxm_conf['service_port']) print(' influxDB address : %s ' % self.lxm_conf['influx_server']) print(' influxDB port : %-5d ' % self.lxm_conf['influx_port']) print(' db_client_1 : %s ' % self.lxm_conf['lxm_db1']) print(' db_client_2 : %s ' % self.lxm_conf['lxm_db2']) print(' db_client_3 : %s ' % self.lxm_conf['lxm_db3']) print(' --------------------------------------------------------') if self.lxm_conf['debug'] == 1: print(' (dbg) the webapp is more verbose to support debugging') return 0
{ "targets": [ { "target_name": "priorityqueue_native", "sources": [ "src/priorityqueue_native.cpp", "src/ObjectHolder.cpp", "src/index.d.ts"], "cflags": ["-Wall", "-std=c++11"], 'xcode_settings': { 'OTHER_CFLAGS': [ '-std=c++11' ], }, "conditions": [ [ 'OS=="mac"', { "xcode_settings": { 'GCC_ENABLE_CPP_EXCEPTIONS': 'YES', 'OTHER_CPLUSPLUSFLAGS' : ['-std=c++11','-stdlib=libc++'], 'OTHER_LDFLAGS': ['-stdlib=libc++'], 'MACOSX_DEPLOYMENT_TARGET': '10.7' } } ] ] } ] }
{'targets': [{'target_name': 'priorityqueue_native', 'sources': ['src/priorityqueue_native.cpp', 'src/ObjectHolder.cpp', 'src/index.d.ts'], 'cflags': ['-Wall', '-std=c++11'], 'xcode_settings': {'OTHER_CFLAGS': ['-std=c++11']}, 'conditions': [['OS=="mac"', {'xcode_settings': {'GCC_ENABLE_CPP_EXCEPTIONS': 'YES', 'OTHER_CPLUSPLUSFLAGS': ['-std=c++11', '-stdlib=libc++'], 'OTHER_LDFLAGS': ['-stdlib=libc++'], 'MACOSX_DEPLOYMENT_TARGET': '10.7'}}]]}]}
class User: """ User class for creating password locker account and logging in """ user_credentials = [] def __init__(self, fullname, username, password): self.fullname = fullname self.username = username self.password = password def save_user(self): """ a funtion for saving user credentials after creating a account """ User.user_credentials.append(self) @classmethod def verify_user(cls, user_name, user_password): """ verify is the user has created an account and exists in the list.Returns a boolean value """ if len(cls.user_credentials) == 0: return False else: for user in cls.user_credentials: if user.username == user_name and user.password == user_password: return True return False
class User: """ User class for creating password locker account and logging in """ user_credentials = [] def __init__(self, fullname, username, password): self.fullname = fullname self.username = username self.password = password def save_user(self): """ a funtion for saving user credentials after creating a account """ User.user_credentials.append(self) @classmethod def verify_user(cls, user_name, user_password): """ verify is the user has created an account and exists in the list.Returns a boolean value """ if len(cls.user_credentials) == 0: return False else: for user in cls.user_credentials: if user.username == user_name and user.password == user_password: return True return False
"""Breadth-first search shortest path implementations.""" def bfs_shortest_path(graph, x, y): """Find shortest number of edges between nodes x and y. :x: a node :y: a node :Returns: shortest number of edges from node x to y or -1 if none exists """ if x == y: return 0 visited = [x] q = [x] # keep tab on distances from `x` dist = {x: 0, y: -1} while q: v = q.pop(0) if v == y: dist[y] = dist[v] if dist[y] == -1 else min(dist[y], dist[v]) for a in graph[v]: if a not in visited: visited.append(a) dist[a] = dist[v] + 1 q.append(a) return dist[y] def bfs_shortest_path_print(graph, x, y): """Return shortest path between nodes x and y.""" visited = [] q = [[x]] while q: path = q.pop(0) node = path[-1] if node not in visited: for adjacent in graph[node]: new_path = list(path) new_path.append(adjacent) if adjacent == y: return new_path q.append(new_path) visited.append(node) return f'No path from {x} to {y}.' if __name__ == '__main__': graph = { 'A': ['B', 'C', 'E'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F', 'G'], 'D': ['B'], 'E': ['A', 'B', 'D'], 'F': ['C'], 'G': ['C'], } print(bfs_shortest_path(graph, 'G', 'D')) print(bfs_shortest_path_print(graph, 'G', 'D'))
"""Breadth-first search shortest path implementations.""" def bfs_shortest_path(graph, x, y): """Find shortest number of edges between nodes x and y. :x: a node :y: a node :Returns: shortest number of edges from node x to y or -1 if none exists """ if x == y: return 0 visited = [x] q = [x] dist = {x: 0, y: -1} while q: v = q.pop(0) if v == y: dist[y] = dist[v] if dist[y] == -1 else min(dist[y], dist[v]) for a in graph[v]: if a not in visited: visited.append(a) dist[a] = dist[v] + 1 q.append(a) return dist[y] def bfs_shortest_path_print(graph, x, y): """Return shortest path between nodes x and y.""" visited = [] q = [[x]] while q: path = q.pop(0) node = path[-1] if node not in visited: for adjacent in graph[node]: new_path = list(path) new_path.append(adjacent) if adjacent == y: return new_path q.append(new_path) visited.append(node) return f'No path from {x} to {y}.' if __name__ == '__main__': graph = {'A': ['B', 'C', 'E'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F', 'G'], 'D': ['B'], 'E': ['A', 'B', 'D'], 'F': ['C'], 'G': ['C']} print(bfs_shortest_path(graph, 'G', 'D')) print(bfs_shortest_path_print(graph, 'G', 'D'))
''' ''' print('\nNested Function\n') def function_1(text): text = text def function_2(): print(text) function_2() if __name__ == '__main__': function_1('Welcome') print('\n closure Function \n') def function_1(text): text = text def function_2(): print(text) return function_2 if __name__ == '__main__': myFunction = function_1('Thanks !') myFunction()
""" """ print('\nNested Function\n') def function_1(text): text = text def function_2(): print(text) function_2() if __name__ == '__main__': function_1('Welcome') print('\n closure Function \n') def function_1(text): text = text def function_2(): print(text) return function_2 if __name__ == '__main__': my_function = function_1('Thanks !') my_function()
# def mul(a=1,b=3): # c=a*b # return c # def add(a=1,b=2): # c=a+b # return c class Student: def __init__(self,first,last,kid): self.fname = first self.lname = last self.kid = kid self.email = first + '.' + last +'@tamuk.edu' def firstname(self): return self.fname stu_1 = Student('ashwitha','devireddy','k00442409') stu_2 = Student('santosh','kesireddy','k00442410') print(stu_1.email) print(stu_2.email) print(stu_1.firstname())
class Student: def __init__(self, first, last, kid): self.fname = first self.lname = last self.kid = kid self.email = first + '.' + last + '@tamuk.edu' def firstname(self): return self.fname stu_1 = student('ashwitha', 'devireddy', 'k00442409') stu_2 = student('santosh', 'kesireddy', 'k00442410') print(stu_1.email) print(stu_2.email) print(stu_1.firstname())
def get_initial(name): initial = name[0:1].upper() return initial first_name = input('Enter your first name: ') first_name_initial = get_initial (first_name) middle_name = input('Enter your middle name: ') middle_name_initial = get_initial (middle_name) last_name = input('Enter your last name: ') last_name_initial = get_initial (last_name) print('You initials are: ' + first_name_initial + middle_name_initial + last_name_initial)
def get_initial(name): initial = name[0:1].upper() return initial first_name = input('Enter your first name: ') first_name_initial = get_initial(first_name) middle_name = input('Enter your middle name: ') middle_name_initial = get_initial(middle_name) last_name = input('Enter your last name: ') last_name_initial = get_initial(last_name) print('You initials are: ' + first_name_initial + middle_name_initial + last_name_initial)
class Password: ''' class of the password file ''' def __init__(self, page, password): self.page = page self.password = password ''' function for class properties ''' ''' user properties ''' user_password = [] def save_page(self): Password.user_passwords.append(self) ''' save password created by new user ''' def delete_page(self): Password.user_passwords.remove(self) ''' deletes password created by new user ''' def display_page(cls): return cls.user_passwords ''' displays new user passwords generated ''' def find_by_page(cls, pager): for pagy in cls.user_passwords: if pagy.page == pager: return pagy ''' function generates new user generated passwords ''' def page_exists(cls, pager): for pagy in cls.user_passwords: if pagy.page == pager: return pagy return False ''' functions displays already generated account credentials '''
class Password: """ class of the password file """ def __init__(self, page, password): self.page = page self.password = password '\nfunction for class properties\n' '\nuser properties\n' user_password = [] def save_page(self): Password.user_passwords.append(self) '\n save password created by new user\n ' def delete_page(self): Password.user_passwords.remove(self) '\n deletes password created by new user\n ' def display_page(cls): return cls.user_passwords '\ndisplays new user passwords generated\n' def find_by_page(cls, pager): for pagy in cls.user_passwords: if pagy.page == pager: return pagy '\nfunction generates new user generated passwords\n' def page_exists(cls, pager): for pagy in cls.user_passwords: if pagy.page == pager: return pagy return False '\nfunctions displays already generated account credentials \n'
# This is the main settings file for package setup and PyPi deployment. # Sphinx configuration is in the docsrc folder # Main package name PACKAGE_NAME = 'dstream_excel' # Package version in the format (major, minor, release) PACKAGE_VERSION_TUPLE = (0, 4, 6) # Short description of the package PACKAGE_SHORT_DESCRIPTION = 'Automate data collection from Thompson Reuters Datastream ' \ 'using the excel plugin' # Long description of the package PACKAGE_DESCRIPTION = """ Use this tool to drive Excel using the Thompson Reuters Eikon plugin to download Datastream data. See more at the repo page: https://github.com/nickderobertis/datastream-excel-downloader-py """ # Author PACKAGE_AUTHOR = "Nick DeRobertis" # Author email PACKAGE_AUTHOR_EMAIL = 'whoopnip@gmail.com' # Name of license for package PACKAGE_LICENSE = 'MIT' # Classifications for the package, see common settings below PACKAGE_CLASSIFIERS = [ # How mature is this project? Common values are # 3 - Alpha # 4 - Beta # 5 - Production/Stable 'Development Status :: 3 - Alpha', # Indicate who your project is intended for 'Intended Audience :: Developers', # Specify the Python versions you support here. In particular, ensure # that you indicate whether you support Python 2, Python 3 or both. 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Operating System :: Microsoft :: Windows', ] # Add any third party packages you use in requirements here PACKAGE_INSTALL_REQUIRES = [ # Include the names of the packages and any required versions in as strings # e.g. # 'package', # 'otherpackage>=1,<2' 'pypiwin32', 'pandas', 'numpy', 'openpyxl', 'xlrd', 'exceldriver', 'processfiles', 'xlwings', ] # Sphinx executes all the import statements as it generates the documentation. To avoid having to install all # the necessary packages, third-party packages can be passed to mock imports to just skip the import. # By default, everything in PACKAGE_INSTALL_REQUIRES will be passed as mock imports, along with anything here. # This variable is useful if a package includes multiple packages which need to be ignored. DOCS_OTHER_MOCK_IMPORTS = [ 'pythoncom', 'win32com', 'pywintypes', 'winreg' ] PACKAGE_URLS = { 'Code': 'https://github.com/nickderobertis/datastream-excel-downloader-py', 'Documentation': 'https://nickderobertis.github.io/datastream-excel-downloader-py/' }
package_name = 'dstream_excel' package_version_tuple = (0, 4, 6) package_short_description = 'Automate data collection from Thompson Reuters Datastream using the excel plugin' package_description = '\nUse this tool to drive Excel using the Thompson Reuters Eikon plugin to download Datastream data.\nSee more at the repo page: https://github.com/nickderobertis/datastream-excel-downloader-py\n' package_author = 'Nick DeRobertis' package_author_email = 'whoopnip@gmail.com' package_license = 'MIT' package_classifiers = ['Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Operating System :: Microsoft :: Windows'] package_install_requires = ['pypiwin32', 'pandas', 'numpy', 'openpyxl', 'xlrd', 'exceldriver', 'processfiles', 'xlwings'] docs_other_mock_imports = ['pythoncom', 'win32com', 'pywintypes', 'winreg'] package_urls = {'Code': 'https://github.com/nickderobertis/datastream-excel-downloader-py', 'Documentation': 'https://nickderobertis.github.io/datastream-excel-downloader-py/'}
while True: try: e = str(input()).strip() c = e.replace('.', '').replace('-', '') s = 0 for i in range(9): s += int(c[i]) * (i + 1) b1 = s % 11 b1 = 0 if b1 == 10 else b1 s = 0 if b1 == int(e[-2]): for i in range(9): s += int(c[i]) * (9 - i) b2 = s % 11 b2 = 0 if b2 == 10 else b2 if b2 == int(e[-1]): print('CPF valido') else: print('CPF invalido') else: print('CPF invalido') except EOFError: break
while True: try: e = str(input()).strip() c = e.replace('.', '').replace('-', '') s = 0 for i in range(9): s += int(c[i]) * (i + 1) b1 = s % 11 b1 = 0 if b1 == 10 else b1 s = 0 if b1 == int(e[-2]): for i in range(9): s += int(c[i]) * (9 - i) b2 = s % 11 b2 = 0 if b2 == 10 else b2 if b2 == int(e[-1]): print('CPF valido') else: print('CPF invalido') else: print('CPF invalido') except EOFError: break
# Intersection of 2 arrays class Solution: def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]: counter = None result = [] if len(nums1) < len(nums2): counter = collections.Counter(nums1) for n in nums2: if n in counter and counter[n] > 0: counter[n] -= 1 result.append(n) else: counter = collections.Counter(nums2) for n in nums1: if n in counter and counter[n] > 0: counter[n] -= 1 result.append(n) return result
class Solution: def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]: counter = None result = [] if len(nums1) < len(nums2): counter = collections.Counter(nums1) for n in nums2: if n in counter and counter[n] > 0: counter[n] -= 1 result.append(n) else: counter = collections.Counter(nums2) for n in nums1: if n in counter and counter[n] > 0: counter[n] -= 1 result.append(n) return result
class propertyDecorator(object): def __init__(self, x): self._x = x @property def x(self): return self._x @x.setter def x(self, value): self._x = value pd = propertyDecorator(100) print(pd.x) pd.x = 10 print(pd.x)
class Propertydecorator(object): def __init__(self, x): self._x = x @property def x(self): return self._x @x.setter def x(self, value): self._x = value pd = property_decorator(100) print(pd.x) pd.x = 10 print(pd.x)
countries = ["USA", "Spain", "France", "Canada"] for country in countries: print(country) data = "Hello from python" for out in data: print(out) for numero in range(8): print(numero)
countries = ['USA', 'Spain', 'France', 'Canada'] for country in countries: print(country) data = 'Hello from python' for out in data: print(out) for numero in range(8): print(numero)
# X = { # "0xFF": { # "id": 255, # "name": "meta", # "0x00": { # "type_id": 0, # "type_name": "sequence_number", # "length": 2, # "params": [ # "nmsb", # "nlsb" # ], # "dtype": "int", # "mask": [ # 255, # 255 # ], # "default" : [0, 0] # }, # "0x01": { # "type_id": 1, # "type_name": "text_event", # "length": -1, # "params": "text", # "dtype": "str", # "mask": 127, # "default" : "Enter the Text" # }, # "0x02": { # "type_id": 2, # "type_name": "copyright_notice", # "length": -1, # "params": "text", # "dtype": "str", # "mask": 127, # "default" : "No Copyright" # }, # "0x03": { # "type_id": 3, # "type_name": "track_name", # "length": -1, # "params": "text", # "dtype": "str", # "mask": 127, # "default" : "Track 0" # }, # "0x04": { # "type_id": 4, # "type_name": "instrument_name", # "length": -1, # "params": "text", # "dtype": "str", # "mask": 127, # "default" : "Piano" # }, # "0x05": { # "type_id": 5, # "type_name": "lyrics", # "length": -1, # "params": "text", # "dtype": "str", # "mask": 127, # "default" : "LYRICS" # }, # "0x06": { # "type_id": 6, # "type_name": "marker", # "length": -1, # "params": "text", # "dtype": "str", # "mask": 127, # "default" : "*" # }, # "0x07": { # "type_id": 7, # "type_name": "cue_point", # "length": -1, # "params": "text", # "dtype": "str", # "mask": 127, # "default" : "-" # }, # "0x20": { # "type_id": 32, # "type_name": "midi_ch_prefix", # "length": 1, # "params": [ # "channel" # ], # "dtype": "int", # "mask": [ # 15 # ], # "default" : [0] # }, # "0x21": { # "type_id": 33, # "type_name": "midi_port", # "length": 1, # "params": [ # "port_no" # ], # "dtype": "int", # "mask": [ # 15 # ], # "default" : [0] # }, # "0x2F": { # "type_id": 47, # "type_name": "end_of_track", # "length": 0, # "params": None, # "dtype": "None", # "mask": 127, # "default" : None # }, # "0x51": { # "type_id": 81, # "type_name": "set_tempo", # "length": 3, # "params": [ # "musec_per_quat_note", # "musec_per_quat_note", # "musec_per_quat_note" # ], # "dtype": "int", # "mask": [ # 127, # 127, # 127 # ], # "default" : [0, 0, 0] # }, # "0x54": { # "type_id": 84, # "type_name": "smpte_offset", # "length": 5, # "params": [ # "hr", # "min", # "sec", # "fr", # "subfr" # ], # "dtype": "int", # "mask": [ # 24, # 60, # 60, # 30, # 100 # ], # "default" : [0, 0 , 0, 0, 0] # }, # "0x58": { # "type_id": 88, # "type_name": "time_sig", # "length": 4, # "params": [ # "numer", # "denom", # "metro", # "32nds" # ], # "dtype": "int", # "mask": [ # 255, # 255, # 255, # 255 # ], # "default" : [0, 0, 0, 0] # }, # "0x59": { # "type_id": 89, # "type_name": "key_sig", # "length": 2, # "params": [ # "key", # "scale" # ], # "dtype": "int", # "mask": [ # 15, # 1 # ], # "default" : [0, 1] # }, # "0x7F": { # "type_id": 127, # "type_name": "sequence_specifier", # "length": -1, # "params": "text", # "dtype": "any", # "mask": 127, # "default" : [0] # } # } # } X = { 255: { 0: {'dtype': 'int', 'length': 2, 'mask': (255, 255), 'params': ('nmsb', 'nlsb'), 'type_id': 0, 'type_name': 'sequence_number', "default" : [0, 0] }, 1: {'dtype': 'str', 'length': -1, 'mask': 127, 'params': 'text', 'type_id': 1, 'type_name': 'text_event', "default" : "Enter the Text" }, 2: {'dtype': 'str', 'length': -1, 'mask': 127, 'params': 'text', 'type_id': 2, 'type_name': 'copyright_notice', "default" : "No Copyright" }, 3: {'dtype': 'str', 'length': -1, 'mask': 127, 'params': 'text', 'type_id': 3, 'type_name': 'track_name', "default" : "Track 0" }, 4: {'dtype': 'str', 'length': -1, 'mask': 127, 'params': 'text', 'type_id': 4, 'type_name': 'instrument_name', "default" : "Piano" }, 5: {'dtype': 'str', 'length': -1, 'mask': 127, 'params': 'text', 'type_id': 5, 'type_name': 'lyrics', "default" : "LYRICS" }, 6: {'dtype': 'str', 'length': -1, 'mask': 127, 'params': 'text', 'type_id': 6, 'type_name': 'marker', "default" : "*" }, 7: {'dtype': 'str', 'length': -1, 'mask': 127, 'params': 'text', 'type_id': 7, 'type_name': 'cue_point', "default" : "-" }, 32: {'dtype': 'int', 'length': 1, 'mask': (15,), 'params': ('channel',), 'type_id': 32, 'type_name': 'midi_ch_prefix', "default" : [0] }, 33: {'dtype': 'int', 'length': 1, 'mask': (15,), 'params': ('port_no',), 'type_id': 33, 'type_name': 'midi_port', "default" : [0] }, 47: {'dtype': 'None', 'length': 0, 'mask': 127, 'params': None, 'type_id': 47, 'type_name': 'end_of_track', "default" : None }, 81: {'dtype': 'int', 'length': 3, 'mask': (127, 127, 127), 'params': ('musec_per_quat_note', 'musec_per_quat_note', 'musec_per_quat_note'), 'type_id': 81, 'type_name': 'set_tempo', "default" : [0, 0, 0] }, 84: {'dtype': 'int', 'length': 5, 'mask': (24, 60, 60, 30, 100), 'params': ('hr', 'min', 'sec', 'fr', 'subfr'), 'type_id': 84, 'type_name': 'smpte_offset', "default" : [0, 0, 0, 0, 0] }, 88: {'dtype': 'int', 'length': 4, 'mask': (255, 255, 255, 255), 'type_id': 88, 'type_name': 'time_sig', "default" : [0, 0, 0, 0] }, 89: {'dtype': 'int', 'length': 2, 'mask': (15, 1), 'params': ('key', 'scale'), 'type_id': 89, 'type_name': 'key_sig', "default" : [0, 0] }, 127: {'dtype': 'any', 'length': -1, 'mask': 127, 'params': 'text', 'type_id': 127, 'type_name': 'sequence_specifier', "default" : [0,] }, 'id': 255, 'name': 'meta' } }
x = {255: {0: {'dtype': 'int', 'length': 2, 'mask': (255, 255), 'params': ('nmsb', 'nlsb'), 'type_id': 0, 'type_name': 'sequence_number', 'default': [0, 0]}, 1: {'dtype': 'str', 'length': -1, 'mask': 127, 'params': 'text', 'type_id': 1, 'type_name': 'text_event', 'default': 'Enter the Text'}, 2: {'dtype': 'str', 'length': -1, 'mask': 127, 'params': 'text', 'type_id': 2, 'type_name': 'copyright_notice', 'default': 'No Copyright'}, 3: {'dtype': 'str', 'length': -1, 'mask': 127, 'params': 'text', 'type_id': 3, 'type_name': 'track_name', 'default': 'Track 0'}, 4: {'dtype': 'str', 'length': -1, 'mask': 127, 'params': 'text', 'type_id': 4, 'type_name': 'instrument_name', 'default': 'Piano'}, 5: {'dtype': 'str', 'length': -1, 'mask': 127, 'params': 'text', 'type_id': 5, 'type_name': 'lyrics', 'default': 'LYRICS'}, 6: {'dtype': 'str', 'length': -1, 'mask': 127, 'params': 'text', 'type_id': 6, 'type_name': 'marker', 'default': '*'}, 7: {'dtype': 'str', 'length': -1, 'mask': 127, 'params': 'text', 'type_id': 7, 'type_name': 'cue_point', 'default': '-'}, 32: {'dtype': 'int', 'length': 1, 'mask': (15,), 'params': ('channel',), 'type_id': 32, 'type_name': 'midi_ch_prefix', 'default': [0]}, 33: {'dtype': 'int', 'length': 1, 'mask': (15,), 'params': ('port_no',), 'type_id': 33, 'type_name': 'midi_port', 'default': [0]}, 47: {'dtype': 'None', 'length': 0, 'mask': 127, 'params': None, 'type_id': 47, 'type_name': 'end_of_track', 'default': None}, 81: {'dtype': 'int', 'length': 3, 'mask': (127, 127, 127), 'params': ('musec_per_quat_note', 'musec_per_quat_note', 'musec_per_quat_note'), 'type_id': 81, 'type_name': 'set_tempo', 'default': [0, 0, 0]}, 84: {'dtype': 'int', 'length': 5, 'mask': (24, 60, 60, 30, 100), 'params': ('hr', 'min', 'sec', 'fr', 'subfr'), 'type_id': 84, 'type_name': 'smpte_offset', 'default': [0, 0, 0, 0, 0]}, 88: {'dtype': 'int', 'length': 4, 'mask': (255, 255, 255, 255), 'type_id': 88, 'type_name': 'time_sig', 'default': [0, 0, 0, 0]}, 89: {'dtype': 'int', 'length': 2, 'mask': (15, 1), 'params': ('key', 'scale'), 'type_id': 89, 'type_name': 'key_sig', 'default': [0, 0]}, 127: {'dtype': 'any', 'length': -1, 'mask': 127, 'params': 'text', 'type_id': 127, 'type_name': 'sequence_specifier', 'default': [0]}, 'id': 255, 'name': 'meta'}}
def make_car(car_manufacturer,car_model,**Other_attributes): Other_attributes['car_model']=car_model Other_attributes['car_manufacturer']=car_manufacturer return Other_attributes print(make_car('Toyota','Rav 4',color='blue'))
def make_car(car_manufacturer, car_model, **Other_attributes): Other_attributes['car_model'] = car_model Other_attributes['car_manufacturer'] = car_manufacturer return Other_attributes print(make_car('Toyota', 'Rav 4', color='blue'))
# -*- coding: utf-8 -*- """ Created on Fri Feb 22 16:21:21 2019 @author: x """
""" Created on Fri Feb 22 16:21:21 2019 @author: x """
def find_largest_palindrome(range_start: int, range_end: int) -> int: largest: int = 0 product: int i: int for i in range(range_start, range_end): j: int for j in range(range_start, range_end): product = i * j if is_palindrome(str(product)) and product > largest: largest = product return largest def is_palindrome(input_string: str) -> bool: input_length: int = len(input_string) direct: int index: int for index in range(0, input_length): direct = input_length - index - 1 if direct <= index: return True elif input_string[direct] != input_string[index]: return False return False
def find_largest_palindrome(range_start: int, range_end: int) -> int: largest: int = 0 product: int i: int for i in range(range_start, range_end): j: int for j in range(range_start, range_end): product = i * j if is_palindrome(str(product)) and product > largest: largest = product return largest def is_palindrome(input_string: str) -> bool: input_length: int = len(input_string) direct: int index: int for index in range(0, input_length): direct = input_length - index - 1 if direct <= index: return True elif input_string[direct] != input_string[index]: return False return False
# SPDX-License-Identifier: MIT # Copyright (c) 2020 Akumatic # # https://adventofcode.com/2020/day/18 def read_file() -> list: with open(f"{__file__.rstrip('code.py')}input.txt", "r") as f: return [line.strip() for line in f.readlines()] def evaluate(values: list, operators: list, precedence: bool) -> int: if not precedence: # "+" and "*" have same precedence levels result = int(values[0]) for i in range(len(operators)): if operators[i] == "+": result += int(values[i+1]) else: # operators[i] == "*" result *= int(values[i+1]) else: # "+" and "*" have different precedence levels; "+" evaluated before "*" while True: try: idx = operators.index("+") values = values[:idx] + [values[idx] + values[idx+1]] + values[idx+2:] operators = operators[:idx] + operators[idx+1:] except ValueError: break result = 1 for factor in values: result *= factor return result def parse(expression: str, precedence: bool = False) -> int: expression = expression.replace(" ", "") values = list() operators = list() i = 0 while i < len(expression): if expression[i] == "+": operators.append("+") i += 1 elif expression[i] == "*": operators.append("*") i += 1 elif expression[i] == "(": # find correct closing bracket layer = 1 j = i + 1 while j < len(expression): if expression[j] == "(": layer += 1 elif expression[j] == ")": if layer == 1: break layer -= 1 j += 1 # evaluate expression between brackets values.append(parse(expression[i+1:j], precedence)) i += j - i + 1 else: # numbers j = i value = 0 while j < len(expression) and expression[j].isnumeric(): value = value * 10 + int(expression[j]) j += 1 values.append(value) i += j - i return evaluate(values, operators, precedence) def part1(input: list) -> int: return sum([parse(line) for line in input]) def part2(input: list) -> int: return sum([parse(line, precedence=True) for line in input]) if __name__ == "__main__": input = read_file() print(f"Part 1: {part1(input)}") print(f"Part 2: {part2(input)}")
def read_file() -> list: with open(f"{__file__.rstrip('code.py')}input.txt", 'r') as f: return [line.strip() for line in f.readlines()] def evaluate(values: list, operators: list, precedence: bool) -> int: if not precedence: result = int(values[0]) for i in range(len(operators)): if operators[i] == '+': result += int(values[i + 1]) else: result *= int(values[i + 1]) else: while True: try: idx = operators.index('+') values = values[:idx] + [values[idx] + values[idx + 1]] + values[idx + 2:] operators = operators[:idx] + operators[idx + 1:] except ValueError: break result = 1 for factor in values: result *= factor return result def parse(expression: str, precedence: bool=False) -> int: expression = expression.replace(' ', '') values = list() operators = list() i = 0 while i < len(expression): if expression[i] == '+': operators.append('+') i += 1 elif expression[i] == '*': operators.append('*') i += 1 elif expression[i] == '(': layer = 1 j = i + 1 while j < len(expression): if expression[j] == '(': layer += 1 elif expression[j] == ')': if layer == 1: break layer -= 1 j += 1 values.append(parse(expression[i + 1:j], precedence)) i += j - i + 1 else: j = i value = 0 while j < len(expression) and expression[j].isnumeric(): value = value * 10 + int(expression[j]) j += 1 values.append(value) i += j - i return evaluate(values, operators, precedence) def part1(input: list) -> int: return sum([parse(line) for line in input]) def part2(input: list) -> int: return sum([parse(line, precedence=True) for line in input]) if __name__ == '__main__': input = read_file() print(f'Part 1: {part1(input)}') print(f'Part 2: {part2(input)}')
class AuthenticationError(Exception): pass class MarketClosedError(Exception): pass class MarketEmptyError(Exception): pass class InternalStateBotError(Exception): pass
class Authenticationerror(Exception): pass class Marketclosederror(Exception): pass class Marketemptyerror(Exception): pass class Internalstateboterror(Exception): pass
class Database: def __init__(self): self.stuff = {} def cleanup(self): self.stuff = {} def save(self, id, timestamp, count): self.stuff[id] = { 'timestamp': timestamp, 'count': count } def find_all(self, id): return [self.stuff[id]] def count(self): return len(self.stuff)
class Database: def __init__(self): self.stuff = {} def cleanup(self): self.stuff = {} def save(self, id, timestamp, count): self.stuff[id] = {'timestamp': timestamp, 'count': count} def find_all(self, id): return [self.stuff[id]] def count(self): return len(self.stuff)
def assemble_message(message: str, error: bool = False) -> str: print("Assembling Message") if error: message = "-ERR {0}".format(message) else: message = "+OK {0}".format(message) return message
def assemble_message(message: str, error: bool=False) -> str: print('Assembling Message') if error: message = '-ERR {0}'.format(message) else: message = '+OK {0}'.format(message) return message
def longest_special_subseq(_n, dist, chars): chars = tuple(ord(char) - ord("a") for char in chars) print(chars) lengths = [0] * 26 # print(lengths) for char in chars: c_from = max(char - dist, 0) c_to = min(char + dist, 25) longest = max(lengths[c_from: c_to+1]) print(longest) lengths[char] = longest + 1 return max(lengths) t = int(input()) for tc in range(t): n, k, s = input().split() n = int(n) k = int(k) print(longest_special_subseq(n, k, s))
def longest_special_subseq(_n, dist, chars): chars = tuple((ord(char) - ord('a') for char in chars)) print(chars) lengths = [0] * 26 for char in chars: c_from = max(char - dist, 0) c_to = min(char + dist, 25) longest = max(lengths[c_from:c_to + 1]) print(longest) lengths[char] = longest + 1 return max(lengths) t = int(input()) for tc in range(t): (n, k, s) = input().split() n = int(n) k = int(k) print(longest_special_subseq(n, k, s))