content
stringlengths
7
1.05M
fixed_cases
stringlengths
1
1.28M
class BaseError(Exception): pass class AddQueryInvalid(Exception): def __init__(self, exception, message="Parameter Add condition is invalid"): self.message = message self.exception = exception super().__init__(self.message) def __str__(self): return f'{self.message}: {self.exception}' class DataValueInvalid(BaseError): def __init__(self, exception, message="Invalid value data given"): self.message = message self.exception = exception super().__init__(self.message) def __str__(self): return f'{self.message}: {self.exception}' class NoneTypeValue(BaseError): def __init__(self, exception, message="The value is empty or none"): self.message = message self.exception = exception super().__init__(self.message) def __str__(self): return f'{self.message}: {self.exception}'
class Baseerror(Exception): pass class Addqueryinvalid(Exception): def __init__(self, exception, message='Parameter Add condition is invalid'): self.message = message self.exception = exception super().__init__(self.message) def __str__(self): return f'{self.message}: {self.exception}' class Datavalueinvalid(BaseError): def __init__(self, exception, message='Invalid value data given'): self.message = message self.exception = exception super().__init__(self.message) def __str__(self): return f'{self.message}: {self.exception}' class Nonetypevalue(BaseError): def __init__(self, exception, message='The value is empty or none'): self.message = message self.exception = exception super().__init__(self.message) def __str__(self): return f'{self.message}: {self.exception}'
""" Django Extras ~~~~~~~~~~~~~ Extensions for Django to solve common development situations not (or not yet) covered by the core Django framework. """ __version__ = '0.3'
""" Django Extras ~~~~~~~~~~~~~ Extensions for Django to solve common development situations not (or not yet) covered by the core Django framework. """ __version__ = '0.3'
lang1 = {'Hindi','Urdu'} lang2= {'Punjabi'} print("Original Set: ", lang2) lang2 = lang1.copy() print("Copied Set: ", lang2)
lang1 = {'Hindi', 'Urdu'} lang2 = {'Punjabi'} print('Original Set: ', lang2) lang2 = lang1.copy() print('Copied Set: ', lang2)
def datacfg(): datacfg = dict() datacfg['abalone'] = dict() datacfg['abalone']['filepath'] = '.\\data\\abalone.pkl' datacfg['abalone']['targets'] = ['rings'] datacfg['abalone']['probtype'] = ['regression', 'classification'] datacfg['autoMpg'] = dict() datacfg['autoMpg']['filepath'] = '.\\data\\autoMpg.pkl' datacfg['autoMpg']['targets'] = ['mpg'] datacfg['autoMpg']['probtype'] = ['regression'] datacfg['bankMarketing'] = dict() datacfg['bankMarketing']['filepath'] = '.\\data\\bankMarketing.pkl' datacfg['bankMarketing']['targets'] = ['y'] datacfg['bankMarketing']['probtype'] = ['classification'] datacfg['bostonHousing'] = dict() datacfg['bostonHousing']['filepath'] = '.\\data\\bostonHousing.pkl' datacfg['bostonHousing']['targets'] = ['PRICE'] datacfg['bostonHousing']['probtype'] = ['regression'] datacfg['heartDiseaseWisconsin'] = dict() datacfg['heartDiseaseWisconsin']['filepath'] = '.\\data\\heartDiseaseWisconsin.pkl' datacfg['heartDiseaseWisconsin']['targets'] = ['num'] datacfg['heartDiseaseWisconsin']['probtype'] = ['regression'] datacfg['wineCultivar'] = dict() datacfg['wineCultivar']['filepath'] = '.\\data\\wineCultivar.pkl' datacfg['wineCultivar']['targets'] = ['cultivar'] datacfg['wineCultivar']['probtype'] = ['classification'] datacfg['winequality-red'] = dict() datacfg['winequality-red']['filepath'] = '.\\data\\winequality-red.pkl' datacfg['winequality-red']['targets'] = ['quality'] datacfg['winequality-red']['probtype'] = ['regression', 'classification'] datacfg['winequality-white'] = dict() datacfg['winequality-white']['filepath'] = '.\\data\\winequality-white.pkl' datacfg['winequality-white']['targets'] = ['quality'] datacfg['winequality-white']['probtype'] = ['regression', 'classification'] datacfg['iris'] = dict() datacfg['iris']['filepath'] = '.\\data\\iris.pkl' datacfg['iris']['targets'] = ['class'] datacfg['iris']['probtype'] = ['classification'] datacfg['pima'] = dict() datacfg['pima']['filepath'] = '.\\data\\pima.pkl' datacfg['pima']['targets'] = ['class'] datacfg['pima']['probtype'] = ['classification'] datacfg['adult'] = dict() datacfg['adult']['filepath'] = '.\\data\\adult.pkl' datacfg['adult']['targets'] = ['class'] datacfg['adult']['probtype'] = ['classification'] return datacfg
def datacfg(): datacfg = dict() datacfg['abalone'] = dict() datacfg['abalone']['filepath'] = '.\\data\\abalone.pkl' datacfg['abalone']['targets'] = ['rings'] datacfg['abalone']['probtype'] = ['regression', 'classification'] datacfg['autoMpg'] = dict() datacfg['autoMpg']['filepath'] = '.\\data\\autoMpg.pkl' datacfg['autoMpg']['targets'] = ['mpg'] datacfg['autoMpg']['probtype'] = ['regression'] datacfg['bankMarketing'] = dict() datacfg['bankMarketing']['filepath'] = '.\\data\\bankMarketing.pkl' datacfg['bankMarketing']['targets'] = ['y'] datacfg['bankMarketing']['probtype'] = ['classification'] datacfg['bostonHousing'] = dict() datacfg['bostonHousing']['filepath'] = '.\\data\\bostonHousing.pkl' datacfg['bostonHousing']['targets'] = ['PRICE'] datacfg['bostonHousing']['probtype'] = ['regression'] datacfg['heartDiseaseWisconsin'] = dict() datacfg['heartDiseaseWisconsin']['filepath'] = '.\\data\\heartDiseaseWisconsin.pkl' datacfg['heartDiseaseWisconsin']['targets'] = ['num'] datacfg['heartDiseaseWisconsin']['probtype'] = ['regression'] datacfg['wineCultivar'] = dict() datacfg['wineCultivar']['filepath'] = '.\\data\\wineCultivar.pkl' datacfg['wineCultivar']['targets'] = ['cultivar'] datacfg['wineCultivar']['probtype'] = ['classification'] datacfg['winequality-red'] = dict() datacfg['winequality-red']['filepath'] = '.\\data\\winequality-red.pkl' datacfg['winequality-red']['targets'] = ['quality'] datacfg['winequality-red']['probtype'] = ['regression', 'classification'] datacfg['winequality-white'] = dict() datacfg['winequality-white']['filepath'] = '.\\data\\winequality-white.pkl' datacfg['winequality-white']['targets'] = ['quality'] datacfg['winequality-white']['probtype'] = ['regression', 'classification'] datacfg['iris'] = dict() datacfg['iris']['filepath'] = '.\\data\\iris.pkl' datacfg['iris']['targets'] = ['class'] datacfg['iris']['probtype'] = ['classification'] datacfg['pima'] = dict() datacfg['pima']['filepath'] = '.\\data\\pima.pkl' datacfg['pima']['targets'] = ['class'] datacfg['pima']['probtype'] = ['classification'] datacfg['adult'] = dict() datacfg['adult']['filepath'] = '.\\data\\adult.pkl' datacfg['adult']['targets'] = ['class'] datacfg['adult']['probtype'] = ['classification'] return datacfg
# Define echo def echo(n): """Return the inner_echo function.""" # Define inner_echo def inner_echo(word1): """Concatenate n copies of word1.""" echo_word = word1 * n return echo_word # Return inner_echo return inner_echo # Call echo: twice twice = echo(2) # Call echo: thrice thrice = echo(3) # Call twice() and thrice() then print print(twice('hello'), thrice('hello'))
def echo(n): """Return the inner_echo function.""" def inner_echo(word1): """Concatenate n copies of word1.""" echo_word = word1 * n return echo_word return inner_echo twice = echo(2) thrice = echo(3) print(twice('hello'), thrice('hello'))
def my_function(function): function() print(my_function(lambda : 99))
def my_function(function): function() print(my_function(lambda : 99))
class Tokenizer: def tokenize(self, text): # remove comments but keep empty lines instead # to preserve line numbers lines = text.splitlines() lines = [('' if line.strip().startswith(';') else line) for line in lines] t = '\n'.join(lines) # expand symbols for easier tokenization t = t.replace('(', ' ( ') t = t.replace(')', ' ) ') tokens = t.split() return tokens
class Tokenizer: def tokenize(self, text): lines = text.splitlines() lines = ['' if line.strip().startswith(';') else line for line in lines] t = '\n'.join(lines) t = t.replace('(', ' ( ') t = t.replace(')', ' ) ') tokens = t.split() return tokens
#!/usr/bin/env python3 bicycles = ["trek", "cannondale", "redline", "specialized"] print("{}\n".format(bicycles)) print("{} Object type: {}".format("bicycles", type(bicycles))) print(bicycles[0]) for bike in bicycles: print(bike.title())
bicycles = ['trek', 'cannondale', 'redline', 'specialized'] print('{}\n'.format(bicycles)) print('{} Object type: {}'.format('bicycles', type(bicycles))) print(bicycles[0]) for bike in bicycles: print(bike.title())
def merge(a: list, b: list) -> list: i = 0 j = 0 c = [0 for _ in range(len(a) + len(b))] while i < len(a) or j < len(b): if j == len(b) or i < len(a) and a[i] < b[j]: c[i + j] = a[i] i += 1 else: c[i + j] = b[j] j += 1 return c def sort(a: list) -> list: n = len(a) if n <= 1: return a l = a[:n//2] r = a[n//2:] l = sort(l) r = sort(r) return merge(l, r) a = [1, 4, 5, 7, 2, 3, 6] print(sort(a))
def merge(a: list, b: list) -> list: i = 0 j = 0 c = [0 for _ in range(len(a) + len(b))] while i < len(a) or j < len(b): if j == len(b) or (i < len(a) and a[i] < b[j]): c[i + j] = a[i] i += 1 else: c[i + j] = b[j] j += 1 return c def sort(a: list) -> list: n = len(a) if n <= 1: return a l = a[:n // 2] r = a[n // 2:] l = sort(l) r = sort(r) return merge(l, r) a = [1, 4, 5, 7, 2, 3, 6] print(sort(a))
class CORSMiddleware: def process_response(self, request, response): response['Access-Control-Allow-Origin'] = "*" return response
class Corsmiddleware: def process_response(self, request, response): response['Access-Control-Allow-Origin'] = '*' return response
# Copyright 2019, Kay Hayen, mailto:kay.hayen@gmail.com # # Python test originally created or extracted from other peoples work. The # parts from me are licensed as below. It is at least Free Software where # it's copied from other people. In these cases, that will normally be # indicated. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This taken from CPython's pystone test, and is an extract of it I made to analyse the # differences between CPython and Nuitka performance. It was under PSF 2 license. It's not # very useful anymore, but it is under that license still. LOOPS = 50000 class Record: def __init__(self, PtrComp = None, Discr = 0, EnumComp = 0, IntComp = 0, StringComp = 0): self.PtrComp = PtrComp self.Discr = Discr self.EnumComp = EnumComp self.IntComp = IntComp self.StringComp = StringComp def copy(self): return Record(self.PtrComp, self.Discr, self.EnumComp, self.IntComp, self.StringComp) def Proc1(PtrParIn): PtrParIn.PtrComp = NextRecord = PtrGlb.copy() PtrParIn.IntComp = 5 NextRecord.IntComp = PtrParIn.IntComp NextRecord.PtrComp = PtrParIn.PtrComp if NextRecord.Discr == 17: NextRecord.IntComp = 6 NextRecord.EnumComp = Proc6(PtrParIn.EnumComp) NextRecord.PtrComp = PtrGlb.PtrComp NextRecord.IntComp = Proc7(NextRecord.IntComp, 10) else: PtrParIn = NextRecord.copy() NextRecord.PtrComp = None return PtrParIn def benchmark(loops): global PtrGlb global PtrGlbNext PtrGlb = Record() for i in xrange(loops): PtrGlb = Proc1(PtrGlb) if __name__ == "__main__": benchmark(LOOPS)
loops = 50000 class Record: def __init__(self, PtrComp=None, Discr=0, EnumComp=0, IntComp=0, StringComp=0): self.PtrComp = PtrComp self.Discr = Discr self.EnumComp = EnumComp self.IntComp = IntComp self.StringComp = StringComp def copy(self): return record(self.PtrComp, self.Discr, self.EnumComp, self.IntComp, self.StringComp) def proc1(PtrParIn): PtrParIn.PtrComp = next_record = PtrGlb.copy() PtrParIn.IntComp = 5 NextRecord.IntComp = PtrParIn.IntComp NextRecord.PtrComp = PtrParIn.PtrComp if NextRecord.Discr == 17: NextRecord.IntComp = 6 NextRecord.EnumComp = proc6(PtrParIn.EnumComp) NextRecord.PtrComp = PtrGlb.PtrComp NextRecord.IntComp = proc7(NextRecord.IntComp, 10) else: ptr_par_in = NextRecord.copy() NextRecord.PtrComp = None return PtrParIn def benchmark(loops): global PtrGlb global PtrGlbNext ptr_glb = record() for i in xrange(loops): ptr_glb = proc1(PtrGlb) if __name__ == '__main__': benchmark(LOOPS)
# Write a Python program to # input a string and change its # case to Sentence case using capitalize() # function. It will convert only the first # (beginning)lower case letter to upper case. # it will capitalize only first letter # of a Sentence. # The user will enter String # during program execution # Start of the Python program # display message to enter string # and get input string str1 = input("Enter a string to convert into Sentence case:") # use capitalize() method sentence_string = str1.capitalize() # now print UPPER CASE string print(str1, "In sentence case =", sentence_string)
str1 = input('Enter a string to convert into Sentence case:') sentence_string = str1.capitalize() print(str1, 'In sentence case =', sentence_string)
# SPDX-License-Identifier: BSD-2-Clause QEMU_SPECIAL_KEYS = { " " : "spc", "!" : "shift-1", '"' : "shift-apostrophe", "#" : "shift-3", "$" : "shift-4", "%" : "shift-5", "&" : "shift-7", "'" : "apostrophe", "(" : "shift-9", ")" : "shift-0", "*" : "shift-8", "+" : "shift-equal", "," : "comma", "-" : "minus", "." : "dot", "/" : "slash", ":" : "shift-semicolon", ";" : "semicolon", "<" : "shift-comma", "=" : "equal", ">" : "shift-dot", "?" : "shift-slash", "@" : "shift-2", "[" : "bracket_left", "\\" : "backslash", "]" : "bracket_right", "^" : "shift-6", "_" : "shift-minus", "`" : "grave_accent", "{" : "shift-bracket_left", "|" : "shift-backslash", "}" : "shift-bracket_right", "~" : "shift-grave_accent" } KEYS_MAP = { chr(x): chr(x) if x in range(ord('a'), ord('z')+1) or x in range(ord('0'),ord('9')+1) else 'shift-' + chr(x).lower() if x in range(ord('A'), ord('Z')+1) else QEMU_SPECIAL_KEYS[chr(x)] if chr(x) in QEMU_SPECIAL_KEYS else None for x in range(27, 128) }
qemu_special_keys = {' ': 'spc', '!': 'shift-1', '"': 'shift-apostrophe', '#': 'shift-3', '$': 'shift-4', '%': 'shift-5', '&': 'shift-7', "'": 'apostrophe', '(': 'shift-9', ')': 'shift-0', '*': 'shift-8', '+': 'shift-equal', ',': 'comma', '-': 'minus', '.': 'dot', '/': 'slash', ':': 'shift-semicolon', ';': 'semicolon', '<': 'shift-comma', '=': 'equal', '>': 'shift-dot', '?': 'shift-slash', '@': 'shift-2', '[': 'bracket_left', '\\': 'backslash', ']': 'bracket_right', '^': 'shift-6', '_': 'shift-minus', '`': 'grave_accent', '{': 'shift-bracket_left', '|': 'shift-backslash', '}': 'shift-bracket_right', '~': 'shift-grave_accent'} keys_map = {chr(x): chr(x) if x in range(ord('a'), ord('z') + 1) or x in range(ord('0'), ord('9') + 1) else 'shift-' + chr(x).lower() if x in range(ord('A'), ord('Z') + 1) else QEMU_SPECIAL_KEYS[chr(x)] if chr(x) in QEMU_SPECIAL_KEYS else None for x in range(27, 128)}
class XSDSimpleTypeFontSize(XSDSimpleType): """The font-size can be one of the CSS font sizes (xx-small, x-small, small, medium, large, x-large, xx-large) or a numeric point size. .. todo:: Better documentation. """ _UNION = [XSDSimpleTypeCssFontSize, XSDSimpleTypeDecimal] XSD_TREE = XSDTree(ET.fromstring(""" <xs:simpleType xmlns:xs="http://www.w3.org/2001/XMLSchema" name="font-size"> <xs:annotation> <xs:documentation>The font-size can be one of the CSS font sizes (xx-small, x-small, small, medium, large, x-large, xx-large) or a numeric point size.</xs:documentation> </xs:annotation> <xs:union memberTypes="xs:decimal css-font-size" /> </xs:simpleType> """ )) class XSDSimpleTypeYesNoNumber(XSDSimpleType): """The yes-no-number type is used for attributes that can be either boolean or numeric values. .. todo:: Better documentation. """ _UNION = [XSDSimpleTypeYesNo, XSDSimpleTypeDecimal] XSD_TREE = XSDTree(ET.fromstring(""" <xs:simpleType xmlns:xs="http://www.w3.org/2001/XMLSchema" name="yes-no-number"> <xs:annotation> <xs:documentation>The yes-no-number type is used for attributes that can be either boolean or numeric values.</xs:documentation> </xs:annotation> <xs:union memberTypes="yes-no xs:decimal" /> </xs:simpleType> """ )) class XSDSimpleTypePositiveIntegerOrEmpty(XSDSimpleTypePositiveInteger): """The positive-integer-or-empty values can be either a positive integer or an empty string. .. todo:: Better documentation. """ _FORCED_PERMITTED = [''] XSD_TREE = XSDTree(ET.fromstring(""" <xs:simpleType xmlns:xs="http://www.w3.org/2001/XMLSchema" name="positive-integer-or-empty"> <xs:annotation> <xs:documentation>The positive-integer-or-empty values can be either a positive integer or an empty string.</xs:documentation> </xs:annotation> <xs:union memberTypes="xs:positiveInteger"> <xs:simpleType> <xs:restriction base="xs:string"> <xs:enumeration value="" /> </xs:restriction> </xs:simpleType> </xs:union> </xs:simpleType> """ )) def __init__(self, value='', *args, **kwargs): super().__init__(value=value, *args, **kwargs) class XSDSimpleTypeNumberOrNormal(XSDSimpleTypeDecimal): """The number-or-normal values can be either a decimal number or the string "normal". This is used by the line-height and letter-spacing attributes. .. todo:: Better documentation. """ _FORCED_PERMITTED = ['normal'] XSD_TREE = XSDTree(ET.fromstring(""" <xs:simpleType xmlns:xs="http://www.w3.org/2001/XMLSchema" name="number-or-normal"> <xs:annotation> <xs:documentation>The number-or-normal values can be either a decimal number or the string "normal". This is used by the line-height and letter-spacing attributes.</xs:documentation> </xs:annotation> <xs:union memberTypes="xs:decimal"> <xs:simpleType> <xs:restriction base="xs:token"> <xs:enumeration value="normal" /> </xs:restriction> </xs:simpleType> </xs:union> </xs:simpleType> """ ))
class Xsdsimpletypefontsize(XSDSimpleType): """The font-size can be one of the CSS font sizes (xx-small, x-small, small, medium, large, x-large, xx-large) or a numeric point size. .. todo:: Better documentation. """ _union = [XSDSimpleTypeCssFontSize, XSDSimpleTypeDecimal] xsd_tree = xsd_tree(ET.fromstring('\n<xs:simpleType xmlns:xs="http://www.w3.org/2001/XMLSchema" name="font-size">\n <xs:annotation>\n <xs:documentation>The font-size can be one of the CSS font sizes (xx-small, x-small, small, medium, large, x-large, xx-large) or a numeric point size.</xs:documentation>\n </xs:annotation>\n <xs:union memberTypes="xs:decimal css-font-size" />\n</xs:simpleType>\n')) class Xsdsimpletypeyesnonumber(XSDSimpleType): """The yes-no-number type is used for attributes that can be either boolean or numeric values. .. todo:: Better documentation. """ _union = [XSDSimpleTypeYesNo, XSDSimpleTypeDecimal] xsd_tree = xsd_tree(ET.fromstring('\n<xs:simpleType xmlns:xs="http://www.w3.org/2001/XMLSchema" name="yes-no-number">\n <xs:annotation>\n <xs:documentation>The yes-no-number type is used for attributes that can be either boolean or numeric values.</xs:documentation>\n </xs:annotation>\n <xs:union memberTypes="yes-no xs:decimal" />\n</xs:simpleType>\n')) class Xsdsimpletypepositiveintegerorempty(XSDSimpleTypePositiveInteger): """The positive-integer-or-empty values can be either a positive integer or an empty string. .. todo:: Better documentation. """ _forced_permitted = [''] xsd_tree = xsd_tree(ET.fromstring('\n<xs:simpleType xmlns:xs="http://www.w3.org/2001/XMLSchema" name="positive-integer-or-empty">\n <xs:annotation>\n <xs:documentation>The positive-integer-or-empty values can be either a positive integer or an empty string.</xs:documentation>\n </xs:annotation>\n <xs:union memberTypes="xs:positiveInteger">\n <xs:simpleType>\n <xs:restriction base="xs:string">\n <xs:enumeration value="" />\n </xs:restriction>\n </xs:simpleType>\n </xs:union>\n</xs:simpleType>\n')) def __init__(self, value='', *args, **kwargs): super().__init__(*args, value=value, **kwargs) class Xsdsimpletypenumberornormal(XSDSimpleTypeDecimal): """The number-or-normal values can be either a decimal number or the string "normal". This is used by the line-height and letter-spacing attributes. .. todo:: Better documentation. """ _forced_permitted = ['normal'] xsd_tree = xsd_tree(ET.fromstring('\n<xs:simpleType xmlns:xs="http://www.w3.org/2001/XMLSchema" name="number-or-normal">\n <xs:annotation>\n <xs:documentation>The number-or-normal values can be either a decimal number or the string "normal". This is used by the line-height and letter-spacing attributes.</xs:documentation>\n </xs:annotation>\n <xs:union memberTypes="xs:decimal">\n <xs:simpleType>\n <xs:restriction base="xs:token">\n <xs:enumeration value="normal" />\n </xs:restriction>\n </xs:simpleType>\n </xs:union>\n</xs:simpleType>\n'))
# You are given two arrays (no duplicates): arr1 and arr2 where arr1 is a subset of arr2. # For each item x in arr1 find the first proceeding number, y, in arr2 such that y > x and # is to the right hand side of x in arr2. If such number does not exist, return -1. # # Ex. # arr1 = [4, 1, 2] # arr2 = [1, 3, 4, 2] # output = [-1, 3, -1] # the first number in arr1, 4, all numbers to the right of 4 in arr2 are less than 4. Thus return -1 # the second number in arr1, 1, the first number greater than 1 in arr2 is 3 # the third number in arr1, 2, there are no more numbers to the right of 2 in arr2, so return -1 # the basic idea is to iterate through arr2 once while maintaining a stack # for the i'th iteration. If the last item placed on the stack is less than # arr2[i], then we know that arr[i] is the next largest element of arr2[i-1] # # once we have determined that arr2[i] is greater than the previous element, # map that previous element to arr2[i] by popping the stack. if the new top # value on the stack is still less than arr2[i], map that one too, etc. # if, however, arr2[i] is not greater than the last element on the stack, push # arr[2] onto the stack. # finally, iterate through arr1 and replace each element in arr1 with the key/value # counterpart in the map. If we never mapped it in map, then there is no next # greater element, return -1 # RUN TIME ANALYSIS # this algorithm takes two parses hence it is O(n) runtime. def nextGreaterElement(targetArr, numArr): dict = {} #hashmap stack = [] #stack using a list for num in numArr: while stack and stack[len(stack) - 1] < num: dict.update({stack.pop() : num}) stack.append(num) for i in range(len(targetArr)): targetArr[i] = dict.get(targetArr[i], -1) return targetArr #driver code arr1 = [4,1,2] arr2 = [1,3,4,2] print(nextGreaterElement(arr1,arr2))
def next_greater_element(targetArr, numArr): dict = {} stack = [] for num in numArr: while stack and stack[len(stack) - 1] < num: dict.update({stack.pop(): num}) stack.append(num) for i in range(len(targetArr)): targetArr[i] = dict.get(targetArr[i], -1) return targetArr arr1 = [4, 1, 2] arr2 = [1, 3, 4, 2] print(next_greater_element(arr1, arr2))
def solve(floor, room): pass if __name__ == '__main__': num_tc = int(input()) for _ in range(num_tc): k = int(input()) n = int(input()) print(solve(k, n))
def solve(floor, room): pass if __name__ == '__main__': num_tc = int(input()) for _ in range(num_tc): k = int(input()) n = int(input()) print(solve(k, n))
def getData(self, device): """ Returns a list of tuples like {controller, device, data} with data elements """ cam = self.Devices[device["id"]]['objOfCapture'] _, frame = cam.read() if frame is None: return [] height = np.size(frame, 0) width = np.size(frame, 1) deviceName = Misc.hasKey(device, 'name', device["id"]) dataReturn = [] auxData = '"t":"{}", "ext":"{}", "W":"{}", "H":"{}"' if self.getRGB: dataRgb = Data() dataRgb.source_type = self.ME_TYPE dataRgb.source_name = 'CamController' dataRgb.source_item = deviceName dataRgb.data = frame dataRgb.aux = '{' + auxData.format('image', 'png', width, height) + '}' dataReturn.append(dataRgb) return dataReturn
def get_data(self, device): """ Returns a list of tuples like {controller, device, data} with data elements """ cam = self.Devices[device['id']]['objOfCapture'] (_, frame) = cam.read() if frame is None: return [] height = np.size(frame, 0) width = np.size(frame, 1) device_name = Misc.hasKey(device, 'name', device['id']) data_return = [] aux_data = '"t":"{}", "ext":"{}", "W":"{}", "H":"{}"' if self.getRGB: data_rgb = data() dataRgb.source_type = self.ME_TYPE dataRgb.source_name = 'CamController' dataRgb.source_item = deviceName dataRgb.data = frame dataRgb.aux = '{' + auxData.format('image', 'png', width, height) + '}' dataReturn.append(dataRgb) return dataReturn
class Solution: def getSkyline(self, buildings: List[List[int]]) -> List[List[int]]: n = len(buildings) if n == 0: return [] if n == 1: xLeft,xRight,y = buildings[0] return [[xLeft,y],[xRight,0]] leftSky = self.getSkyline(buildings[: n//2]) rightSky = self.getSkyline(buildings[n//2 :]) print('left',leftSky,'rihgt',rightSky) return self.merge(leftSky,rightSky) def merge(self,left,right): leftLen = len(left) rightLen = len(right) pl,pr = 0,0 currY = leftY = rightY= 0 result = [] while pl < leftLen and pr < rightLen: leftPoint,rightPoint = left[pl],right[pr] if leftPoint[0] < rightPoint[0]: x,leftY = leftPoint pl += 1 else: x,rightY = rightPoint pr += 1 maxY = max(leftY,rightY) if currY != maxY: self.update(x,maxY,result) currY = maxY print('first merge',result) self.appendSky(pl,left,leftLen,leftY,currY,result) self.appendSky(pr,right,rightLen,rightY,currY,result) return result def update(self,x,y,result): if not result or result[-1][0] != x: result.append([x,y]) else: result[-1][1] = y def appendSky(self,p,num,n,y,currY,result): while p <n: x,y = num[p] p+=1 if currY != y: self.update(x,y,result) currY = y
class Solution: def get_skyline(self, buildings: List[List[int]]) -> List[List[int]]: n = len(buildings) if n == 0: return [] if n == 1: (x_left, x_right, y) = buildings[0] return [[xLeft, y], [xRight, 0]] left_sky = self.getSkyline(buildings[:n // 2]) right_sky = self.getSkyline(buildings[n // 2:]) print('left', leftSky, 'rihgt', rightSky) return self.merge(leftSky, rightSky) def merge(self, left, right): left_len = len(left) right_len = len(right) (pl, pr) = (0, 0) curr_y = left_y = right_y = 0 result = [] while pl < leftLen and pr < rightLen: (left_point, right_point) = (left[pl], right[pr]) if leftPoint[0] < rightPoint[0]: (x, left_y) = leftPoint pl += 1 else: (x, right_y) = rightPoint pr += 1 max_y = max(leftY, rightY) if currY != maxY: self.update(x, maxY, result) curr_y = maxY print('first merge', result) self.appendSky(pl, left, leftLen, leftY, currY, result) self.appendSky(pr, right, rightLen, rightY, currY, result) return result def update(self, x, y, result): if not result or result[-1][0] != x: result.append([x, y]) else: result[-1][1] = y def append_sky(self, p, num, n, y, currY, result): while p < n: (x, y) = num[p] p += 1 if currY != y: self.update(x, y, result) curr_y = y
for i in range(1, 100 + 1): text = '' if i % 3 == 0: text += 'Fizz' if i % 5 == 0: text += 'Buzz' if text == '': print(i) else: print(text)
for i in range(1, 100 + 1): text = '' if i % 3 == 0: text += 'Fizz' if i % 5 == 0: text += 'Buzz' if text == '': print(i) else: print(text)
def is_palindrome(n): if str(n) == str(n)[::-1]: return True return False # Examples: is_palindrome(101) # True is_palindrome(147) # False
def is_palindrome(n): if str(n) == str(n)[::-1]: return True return False is_palindrome(101) is_palindrome(147)
def rest_error_response(Status,Message,Format = 'XML',TwilioCode = None,TwilioMessage = None): response = { "Status" : Status, "Message" : Message } if TwilioCode is not None: response['Code'] = TwilioCode if TwilioMessage is not None: response['MoreInfo'] = TwilioMessage if Format == '' or Format == 'XML' or Format == 'HTML': response = { 'TwilioResponse' : { 'RestException' : response } } return response
def rest_error_response(Status, Message, Format='XML', TwilioCode=None, TwilioMessage=None): response = {'Status': Status, 'Message': Message} if TwilioCode is not None: response['Code'] = TwilioCode if TwilioMessage is not None: response['MoreInfo'] = TwilioMessage if Format == '' or Format == 'XML' or Format == 'HTML': response = {'TwilioResponse': {'RestException': response}} return response
# -*- coding: utf-8 -*- # Copyright 2019 Cohesity Inc. class ObjectClassEnum(object): """Implementation of the 'ObjectClass' enum. Specifies the object class of the principal (either 'kGroup' or 'kUser'). 'kUser' specifies a user object class. 'kGroup' specifies a group object class. 'kComputer' specifies a computer object class. Attributes: KUSER: TODO: type description here. KGROUP: TODO: type description here. KCOMPUTER: TODO: type description here. """ KUSER = 'kUser' KGROUP = 'kGroup' KCOMPUTER = 'kComputer'
class Objectclassenum(object): """Implementation of the 'ObjectClass' enum. Specifies the object class of the principal (either 'kGroup' or 'kUser'). 'kUser' specifies a user object class. 'kGroup' specifies a group object class. 'kComputer' specifies a computer object class. Attributes: KUSER: TODO: type description here. KGROUP: TODO: type description here. KCOMPUTER: TODO: type description here. """ kuser = 'kUser' kgroup = 'kGroup' kcomputer = 'kComputer'
# Copyright 2020 AUI, Inc. Washington DC, USA # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def shadow(vis_dataset, shadow_parms, storage_parms): """ .. todo:: This function is not yet implemented Flag all baselines for antennas that are shadowed beyond the specified tolerance. All antennas in the zarr-file metadata (and their corresponding diameters) will be considered for shadow-flag calculations. For a given timestep, an antenna is flagged if any of its baselines (projected onto the uv-plane) is shorter than radius_1 + radius_2 - tolerance. The value of 'w' is used to determine which antenna is behind the other. The phase-reference center is used for antenna-pointing direction. Antennas that are not part of the observation, but still physically present and shadowing other antennas that are being used, must be added to the meta-data list in the zarr prior to calling this method. Inputs : (1) shadowlimit or tolerance (in m) (2) array name for output flags. Default = FLAG Returns ------- vis_dataset : xarray.core.dataset.Dataset """
def shadow(vis_dataset, shadow_parms, storage_parms): """ .. todo:: This function is not yet implemented Flag all baselines for antennas that are shadowed beyond the specified tolerance. All antennas in the zarr-file metadata (and their corresponding diameters) will be considered for shadow-flag calculations. For a given timestep, an antenna is flagged if any of its baselines (projected onto the uv-plane) is shorter than radius_1 + radius_2 - tolerance. The value of 'w' is used to determine which antenna is behind the other. The phase-reference center is used for antenna-pointing direction. Antennas that are not part of the observation, but still physically present and shadowing other antennas that are being used, must be added to the meta-data list in the zarr prior to calling this method. Inputs : (1) shadowlimit or tolerance (in m) (2) array name for output flags. Default = FLAG Returns ------- vis_dataset : xarray.core.dataset.Dataset """
""" Simple BMW ConnectedDrive API. init file for backward compatibility """ # empty
""" Simple BMW ConnectedDrive API. init file for backward compatibility """
# -*- coding:utf-8 -*- class ListNode: def __init__(self, x): self.val = x self.next = None class Solution: def deleteNode(self, listNode, toDeleteNode): if listNode == None or toDeleteNode == None: return listNode if listNode == toDeleteNode: return listNode.next if toDeleteNode.next == None: while(listNode.next != toDeleteNode): listNode = listNode.next listNode.next = None return listNode toDeleteNode.val = toDeleteNode.next.val toDeleteNode.next = toDeleteNode.next.next return listNode if __name__ == "__main__": listNode = ListNode(1) listNode.next = ListNode(2) listNode.next.next = ListNode(3) solution = Solution() headNode = ListNode(1) headNode.next = listNode.next toDeleteNode = listNode.next toDeleteNode.next = listNode.next.next result = solution.deleteNode(listNode, toDeleteNode) print(result)
class Listnode: def __init__(self, x): self.val = x self.next = None class Solution: def delete_node(self, listNode, toDeleteNode): if listNode == None or toDeleteNode == None: return listNode if listNode == toDeleteNode: return listNode.next if toDeleteNode.next == None: while listNode.next != toDeleteNode: list_node = listNode.next listNode.next = None return listNode toDeleteNode.val = toDeleteNode.next.val toDeleteNode.next = toDeleteNode.next.next return listNode if __name__ == '__main__': list_node = list_node(1) listNode.next = list_node(2) listNode.next.next = list_node(3) solution = solution() head_node = list_node(1) headNode.next = listNode.next to_delete_node = listNode.next toDeleteNode.next = listNode.next.next result = solution.deleteNode(listNode, toDeleteNode) print(result)
def build_filter(args): return Filter(args) class Filter: def __init__(self, args): if args == '': message = b'<empty commit message>' else: message = args.encode('utf8') self.message = message def commit_message_filter(self,commit_data): # Only write the commit message if the recorded commit # message is null. if commit_data['desc'] == b'\x00': commit_data['desc'] = self.message
def build_filter(args): return filter(args) class Filter: def __init__(self, args): if args == '': message = b'<empty commit message>' else: message = args.encode('utf8') self.message = message def commit_message_filter(self, commit_data): if commit_data['desc'] == b'\x00': commit_data['desc'] = self.message
# Calculates the total cost of a restaurant bill # including the meal cost, tip amount, and sales tax # Declare variables tipRate = 0.18 # 18% salesTaxRate = 0.07 # 7% # Prompt user for cost of meal mealCost = float(input('\nEnter total cost of meal: $')) # Calculate and display total tip, # total sales tax, and bill total tipCost = mealCost * tipRate salesTaxCost = mealCost * salesTaxRate totalBill = mealCost + tipCost + salesTaxCost print('\nWith 18% tip: $', format(tipCost, ',.2f'), sep='') print('With 7% sales tax: $', format(salesTaxCost, ',.2f'), sep='') print('Restaurant bill total: $', format(totalBill, ',.2f'), '\n', sep='')
tip_rate = 0.18 sales_tax_rate = 0.07 meal_cost = float(input('\nEnter total cost of meal: $')) tip_cost = mealCost * tipRate sales_tax_cost = mealCost * salesTaxRate total_bill = mealCost + tipCost + salesTaxCost print('\nWith 18% tip: $', format(tipCost, ',.2f'), sep='') print('With 7% sales tax: $', format(salesTaxCost, ',.2f'), sep='') print('Restaurant bill total: $', format(totalBill, ',.2f'), '\n', sep='')
#!/usr/bin/env python __author__ = "yangyanzhan" __email__ = "yangyanzhan@gmail.com" __url__ = "https://github.com/yangyanzhan/code-camp" __blog__ = "https://yanzhan.site" __youtube__ = "https://www.youtube.com/channel/UCDkz-__gl3frqLexukpG0DA?view_as=subscriber" __twitter__ = "https://twitter.com/YangYanzhan" mapping = {} class Solution: def generateParenthesis(self, n: int) -> List[str]: if n == 0: return [] if n in mapping: return mapping[n] solutions = [] for i in range(n): parts1 = self.generateParenthesis(i) parts2 = self.generateParenthesis(n - 1 - i) if len(parts1) == 0: parts1 = [""] if len(parts2) == 0: parts2 = [""] for part1 in parts1: for part2 in parts2: solution = "(" + part1 + ")" + part2 solutions.append(solution) mapping[n] = solutions return solutions
__author__ = 'yangyanzhan' __email__ = 'yangyanzhan@gmail.com' __url__ = 'https://github.com/yangyanzhan/code-camp' __blog__ = 'https://yanzhan.site' __youtube__ = 'https://www.youtube.com/channel/UCDkz-__gl3frqLexukpG0DA?view_as=subscriber' __twitter__ = 'https://twitter.com/YangYanzhan' mapping = {} class Solution: def generate_parenthesis(self, n: int) -> List[str]: if n == 0: return [] if n in mapping: return mapping[n] solutions = [] for i in range(n): parts1 = self.generateParenthesis(i) parts2 = self.generateParenthesis(n - 1 - i) if len(parts1) == 0: parts1 = [''] if len(parts2) == 0: parts2 = [''] for part1 in parts1: for part2 in parts2: solution = '(' + part1 + ')' + part2 solutions.append(solution) mapping[n] = solutions return solutions
# Copyright (c) 2016 Ericsson AB. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Copyright (c) 2017 Wind River Systems, Inc. # RPC_API_VERSION = "1.0" TOPIC_DC_MANAGER = "dcmanager" PATCH_VAULT_DIR = "/opt/patch-vault" # Well known region names SYSTEM_CONTROLLER_NAME = "SystemController" DEFAULT_REGION_NAME = "RegionOne" # Subcloud management state MANAGEMENT_UNMANAGED = "unmanaged" MANAGEMENT_MANAGED = "managed" # Subcloud availability status AVAILABILITY_OFFLINE = "offline" AVAILABILITY_ONLINE = "online" # Subcloud sync status SYNC_STATUS_UNKNOWN = "unknown" SYNC_STATUS_IN_SYNC = "in-sync" SYNC_STATUS_OUT_OF_SYNC = "out-of-sync" # Subcloud endpoint related database fields ENDPOINT_SYNC_STATUS = "endpoint_sync_status" SYNC_STATUS = "sync_status" ENDPOINT_TYPE = "endpoint_type" # Service group status SERVICE_GROUP_STATUS_ACTIVE = "active" # Availability fail count AVAIL_FAIL_COUNT_TO_ALARM = 2 AVAIL_FAIL_COUNT_MAX = 9999 # Software update type SW_UPDATE_TYPE_PATCH = "patch" SW_UPDATE_TYPE_UPGRADE = "upgrade" # Software update states SW_UPDATE_STATE_INITIAL = "initial" SW_UPDATE_STATE_APPLYING = "applying" SW_UPDATE_STATE_ABORT_REQUESTED = "abort requested" SW_UPDATE_STATE_ABORTING = "aborting" SW_UPDATE_STATE_COMPLETE = "complete" SW_UPDATE_STATE_ABORTED = "aborted" SW_UPDATE_STATE_FAILED = "failed" SW_UPDATE_STATE_DELETING = "deleting" SW_UPDATE_STATE_DELETED = "deleted" # Software update actions SW_UPDATE_ACTION_APPLY = "apply" SW_UPDATE_ACTION_ABORT = "abort" # Subcloud apply types SUBCLOUD_APPLY_TYPE_PARALLEL = "parallel" SUBCLOUD_APPLY_TYPE_SERIAL = "serial" # Strategy step states STRATEGY_STATE_INITIAL = "initial" STRATEGY_STATE_UPDATING_PATCHES = "updating patches" STRATEGY_STATE_CREATING_STRATEGY = "creating strategy" STRATEGY_STATE_APPLYING_STRATEGY = "applying strategy" STRATEGY_STATE_FINISHING = "finishing" STRATEGY_STATE_COMPLETE = "complete" STRATEGY_STATE_ABORTED = "aborted" STRATEGY_STATE_FAILED = "failed" SW_UPDATE_DEFAULT_TITLE = "all clouds default"
rpc_api_version = '1.0' topic_dc_manager = 'dcmanager' patch_vault_dir = '/opt/patch-vault' system_controller_name = 'SystemController' default_region_name = 'RegionOne' management_unmanaged = 'unmanaged' management_managed = 'managed' availability_offline = 'offline' availability_online = 'online' sync_status_unknown = 'unknown' sync_status_in_sync = 'in-sync' sync_status_out_of_sync = 'out-of-sync' endpoint_sync_status = 'endpoint_sync_status' sync_status = 'sync_status' endpoint_type = 'endpoint_type' service_group_status_active = 'active' avail_fail_count_to_alarm = 2 avail_fail_count_max = 9999 sw_update_type_patch = 'patch' sw_update_type_upgrade = 'upgrade' sw_update_state_initial = 'initial' sw_update_state_applying = 'applying' sw_update_state_abort_requested = 'abort requested' sw_update_state_aborting = 'aborting' sw_update_state_complete = 'complete' sw_update_state_aborted = 'aborted' sw_update_state_failed = 'failed' sw_update_state_deleting = 'deleting' sw_update_state_deleted = 'deleted' sw_update_action_apply = 'apply' sw_update_action_abort = 'abort' subcloud_apply_type_parallel = 'parallel' subcloud_apply_type_serial = 'serial' strategy_state_initial = 'initial' strategy_state_updating_patches = 'updating patches' strategy_state_creating_strategy = 'creating strategy' strategy_state_applying_strategy = 'applying strategy' strategy_state_finishing = 'finishing' strategy_state_complete = 'complete' strategy_state_aborted = 'aborted' strategy_state_failed = 'failed' sw_update_default_title = 'all clouds default'
#!/usr/bin/python3 """Student to JSON""" class Student: """representation of a student""" def __init__(self, first_name, last_name, age): """instantiation of the student""" self.first_name = first_name self.last_name = last_name self.age = age def to_json(self): """retrieves a dictionary representation of a Student instance""" return self.__dict__
"""Student to JSON""" class Student: """representation of a student""" def __init__(self, first_name, last_name, age): """instantiation of the student""" self.first_name = first_name self.last_name = last_name self.age = age def to_json(self): """retrieves a dictionary representation of a Student instance""" return self.__dict__
# Licensed under a 3-clause BSD style license - see LICENSE.rst # -*- coding: utf-8 -*- def get_package_data(): # pragma: no cover return { str(_PACKAGE_NAME_ + '.tags.core.tests'): ['data/*.yaml']}
def get_package_data(): return {str(_PACKAGE_NAME_ + '.tags.core.tests'): ['data/*.yaml']}
t = ( 10, 11, 12, 34, 99, 4, 98) print (t[0]) t1 = (1, 1, 1, 2, 3, 4, 65, 65, 3, 2) #tuple with single element print (t1.count(1)) print (t1.index(65))
t = (10, 11, 12, 34, 99, 4, 98) print(t[0]) t1 = (1, 1, 1, 2, 3, 4, 65, 65, 3, 2) print(t1.count(1)) print(t1.index(65))
connChoices=( {'name':'automatic', 'rate':{'min':0, 'max':5000, 'def': 0}, 'conn':{'min':0, 'max':100, 'def': 0}, 'automatic':1}, {'name':'unlimited', 'rate':{'min':0, 'max':5000, 'def': 0, 'div': 50}, 'conn':{'min':4, 'max':100, 'def': 4}}, {'name':'dialup/isdn', 'rate':{'min':3, 'max': 8, 'def': 5}, 'conn':{'min':2, 'max': 3, 'def': 2}, 'initiate': 12}, {'name':'dsl/cable slow', 'rate':{'min':10, 'max': 48, 'def': 13}, 'conn':{'min':4, 'max': 20, 'def': 4}}, {'name':'dsl/cable fast', 'rate':{'min':20, 'max': 100, 'def': 40}, 'conn':{'min':4, 'max': 30, 'def': 6}}, {'name':'T1', 'rate':{'min':100, 'max': 300, 'def':150}, 'conn':{'min':4, 'max': 40, 'def':10}}, {'name':'T3+', 'rate':{'min':400, 'max':2000, 'def':500}, 'conn':{'min':4, 'max':100, 'def':20}}, {'name':'seeder', 'rate':{'min':0, 'max':5000, 'def':0, 'div': 50}, 'conn':{'min':1, 'max':100, 'def':1}}, {'name':'SUPER-SEED', 'super-seed':1} ) connChoiceList = map(lambda x:x['name'], connChoices)
conn_choices = ({'name': 'automatic', 'rate': {'min': 0, 'max': 5000, 'def': 0}, 'conn': {'min': 0, 'max': 100, 'def': 0}, 'automatic': 1}, {'name': 'unlimited', 'rate': {'min': 0, 'max': 5000, 'def': 0, 'div': 50}, 'conn': {'min': 4, 'max': 100, 'def': 4}}, {'name': 'dialup/isdn', 'rate': {'min': 3, 'max': 8, 'def': 5}, 'conn': {'min': 2, 'max': 3, 'def': 2}, 'initiate': 12}, {'name': 'dsl/cable slow', 'rate': {'min': 10, 'max': 48, 'def': 13}, 'conn': {'min': 4, 'max': 20, 'def': 4}}, {'name': 'dsl/cable fast', 'rate': {'min': 20, 'max': 100, 'def': 40}, 'conn': {'min': 4, 'max': 30, 'def': 6}}, {'name': 'T1', 'rate': {'min': 100, 'max': 300, 'def': 150}, 'conn': {'min': 4, 'max': 40, 'def': 10}}, {'name': 'T3+', 'rate': {'min': 400, 'max': 2000, 'def': 500}, 'conn': {'min': 4, 'max': 100, 'def': 20}}, {'name': 'seeder', 'rate': {'min': 0, 'max': 5000, 'def': 0, 'div': 50}, 'conn': {'min': 1, 'max': 100, 'def': 1}}, {'name': 'SUPER-SEED', 'super-seed': 1}) conn_choice_list = map(lambda x: x['name'], connChoices)
# apis_v1/documentation_source/voter_ballot_list_retrieve_doc.py # Brought to you by We Vote. Be good. # -*- coding: UTF-8 -*- def voter_ballot_list_retrieve_doc_template_values(url_root): """ Show documentation about voterBallotListRetrieve """ required_query_parameter_list = [ { 'name': 'voter_device_id', 'value': 'string', # boolean, integer, long, string 'description': 'An 88 character unique identifier linked to a voter record on the server', }, { 'name': 'api_key', 'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string 'description': 'The unique key provided to any organization using the WeVoteServer APIs', }, ] optional_query_parameter_list = [ ] potential_status_codes_list = [ { 'code': 'VALID_VOTER_DEVICE_ID_MISSING', 'description': 'A valid voter_device_id parameter was not included. Cannot proceed.', }, { 'code': 'VALID_VOTER_ID_MISSING', 'description': 'A valid voter_id was not found from voter_device_id. Cannot proceed.', }, ] try_now_link_variables_dict = { } api_response = '{\n' \ ' "status": string,\n' \ ' "success": boolean,\n' \ ' "voter_device_id": string (88 characters long),\n' \ ' "voter_ballot_list": list\n' \ ' [\n' \ ' "ballot_returned_we_vote_id": string,\n' \ ' "ballot_location_shortcut": string,\n' \ ' "election_description_text": string,\n' \ ' "election_day_text": string,\n' \ ' "google_civic_election_id": integer,\n' \ ' "original_text_for_map_search": string,\n' \ ' "state_code_list": list\n' \ ' [],\n' \ ' ],\n' \ '}' template_values = { 'api_name': 'voterBallotListRetrieve', 'api_slug': 'voterBallotListRetrieve', 'api_introduction': "Retrieve a list of ballots per voter_id", 'try_now_link': 'apis_v1:voterBallotListRetrieveView', 'try_now_link_variables_dict': try_now_link_variables_dict, 'url_root': url_root, 'get_or_post': 'GET', 'required_query_parameter_list': required_query_parameter_list, 'optional_query_parameter_list': optional_query_parameter_list, 'api_response': api_response, 'api_response_notes': "", 'potential_status_codes_list': potential_status_codes_list, } return template_values
def voter_ballot_list_retrieve_doc_template_values(url_root): """ Show documentation about voterBallotListRetrieve """ required_query_parameter_list = [{'name': 'voter_device_id', 'value': 'string', 'description': 'An 88 character unique identifier linked to a voter record on the server'}, {'name': 'api_key', 'value': 'string (from post, cookie, or get (in that order))', 'description': 'The unique key provided to any organization using the WeVoteServer APIs'}] optional_query_parameter_list = [] potential_status_codes_list = [{'code': 'VALID_VOTER_DEVICE_ID_MISSING', 'description': 'A valid voter_device_id parameter was not included. Cannot proceed.'}, {'code': 'VALID_VOTER_ID_MISSING', 'description': 'A valid voter_id was not found from voter_device_id. Cannot proceed.'}] try_now_link_variables_dict = {} api_response = '{\n "status": string,\n "success": boolean,\n "voter_device_id": string (88 characters long),\n "voter_ballot_list": list\n [\n "ballot_returned_we_vote_id": string,\n "ballot_location_shortcut": string,\n "election_description_text": string,\n "election_day_text": string,\n "google_civic_election_id": integer,\n "original_text_for_map_search": string,\n "state_code_list": list\n [],\n ],\n}' template_values = {'api_name': 'voterBallotListRetrieve', 'api_slug': 'voterBallotListRetrieve', 'api_introduction': 'Retrieve a list of ballots per voter_id', 'try_now_link': 'apis_v1:voterBallotListRetrieveView', 'try_now_link_variables_dict': try_now_link_variables_dict, 'url_root': url_root, 'get_or_post': 'GET', 'required_query_parameter_list': required_query_parameter_list, 'optional_query_parameter_list': optional_query_parameter_list, 'api_response': api_response, 'api_response_notes': '', 'potential_status_codes_list': potential_status_codes_list} return template_values
# "sb" pytest fixture test in a method with no class def test_sb_fixture_with_no_class(sb): sb.open("https://google.com/ncr") sb.type('input[title="Search"]', "SeleniumBase GitHub\n") sb.click('a[href*="github.com/seleniumbase/SeleniumBase"]') sb.click('a[title="seleniumbase"]') # "sb" pytest fixture test in a method inside a class class Test_SB_Fixture: def test_sb_fixture_inside_class(self, sb): sb.open("https://google.com/ncr") sb.type('input[title="Search"]', "SeleniumBase GitHub\n") sb.click('a[href*="github.com/seleniumbase/SeleniumBase"]') sb.click('a[title="examples"]')
def test_sb_fixture_with_no_class(sb): sb.open('https://google.com/ncr') sb.type('input[title="Search"]', 'SeleniumBase GitHub\n') sb.click('a[href*="github.com/seleniumbase/SeleniumBase"]') sb.click('a[title="seleniumbase"]') class Test_Sb_Fixture: def test_sb_fixture_inside_class(self, sb): sb.open('https://google.com/ncr') sb.type('input[title="Search"]', 'SeleniumBase GitHub\n') sb.click('a[href*="github.com/seleniumbase/SeleniumBase"]') sb.click('a[title="examples"]')
# Copyright 2017 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=line-too-long,g-explicit-length-test """A convenience class replicating some lua table syntax with a python dict. In general, should behave like a dictionary except that we can use dot notation to access keys. Users should be careful to only provide keys suitable for instance variable names. Nota bene: do not use the key "keys" since it will collide with the method keys. Usage example: >>> t = T(a=5,b='kaw', c=T(v=[],x=33)) >>> t.a 5 >>> t.z = None >>> print t T(a=5, z=None, c=T(x=33, v=[]), b='kaw') >>> t2 = T({'h':'f','x':4}) >>> t2 T(h='f', x=4) >>> t2['x'] 4 """ class T(object): """Class for emulating lua tables.""" def __init__(self, *args, **kwargs): if len(args) > 1 or (len(args) == 1 and len(kwargs) > 0): errmsg = '''constructor only allows a single dict as a positional argument or keyword arguments''' raise ValueError(errmsg) if len(args) == 1 and isinstance(args[0], dict): self.__dict__.update(args[0]) else: self.__dict__.update(kwargs) def __repr__(self): fmt = ', '.join('%s=%s' for i in range(len(self.__dict__))) kwargstr = fmt % tuple( x for tup in self.__dict__.items() for x in [str(tup[0]), repr(tup[1])]) return 'T(' + kwargstr + ')' def __getitem__(self, key): return self.__dict__[key] def __setitem__(self, key, val): self.__dict__[key] = val def __delitem__(self, key): del self.__dict__[key] def __iter__(self): return iter(self.__dict__) def __len__(self): return len(self.__dict__) def keys(self): # Needed for dict(T( ... )) to work. return self.__dict__.keys() def iteritems(self): return [(k, self.__dict__.get(k)) for k in self.__dict__]
"""A convenience class replicating some lua table syntax with a python dict. In general, should behave like a dictionary except that we can use dot notation to access keys. Users should be careful to only provide keys suitable for instance variable names. Nota bene: do not use the key "keys" since it will collide with the method keys. Usage example: >>> t = T(a=5,b='kaw', c=T(v=[],x=33)) >>> t.a 5 >>> t.z = None >>> print t T(a=5, z=None, c=T(x=33, v=[]), b='kaw') >>> t2 = T({'h':'f','x':4}) >>> t2 T(h='f', x=4) >>> t2['x'] 4 """ class T(object): """Class for emulating lua tables.""" def __init__(self, *args, **kwargs): if len(args) > 1 or (len(args) == 1 and len(kwargs) > 0): errmsg = 'constructor only allows a single dict as a positional\n argument or keyword arguments' raise value_error(errmsg) if len(args) == 1 and isinstance(args[0], dict): self.__dict__.update(args[0]) else: self.__dict__.update(kwargs) def __repr__(self): fmt = ', '.join(('%s=%s' for i in range(len(self.__dict__)))) kwargstr = fmt % tuple((x for tup in self.__dict__.items() for x in [str(tup[0]), repr(tup[1])])) return 'T(' + kwargstr + ')' def __getitem__(self, key): return self.__dict__[key] def __setitem__(self, key, val): self.__dict__[key] = val def __delitem__(self, key): del self.__dict__[key] def __iter__(self): return iter(self.__dict__) def __len__(self): return len(self.__dict__) def keys(self): return self.__dict__.keys() def iteritems(self): return [(k, self.__dict__.get(k)) for k in self.__dict__]
template = ' <!DOCTYPE html> \ <html lang="en"> \ <head> \ <title>AssiStudy</title> \ <meta charset="UTF-8"> \ <meta name="viewport" co ntent="width=device-width, initial-scale=1"> \ <!--===============================================================================================--> \ <link rel="icon" type="image/png" href="/static/images/icons/favicon.ico"/> \ <!--===============================================================================================--> \ <link rel="stylesheet" type="text/css" href="/static/vendor/bootstrap/css/bootstrap.min.css"> \ <!--===============================================================================================--> \ <link rel="stylesheet" type="text/css" href="/static/fonts/font-awesome-4.7.0/css/font-awesome.min.css"> \ <!--===============================================================================================--> \ <link rel="stylesheet" type="text/css" href="/static/vendor/animate/animate.css"> \ <!--===============================================================================================--> \ <link rel="stylesheet" type="text/css" href="/static/vendor/css-hamburgers/hamburgers.min.css"> \ <!--===============================================================================================--> \ <link rel="stylesheet" type="text/css" href="/static/vendor/animsition/css/animsition.min.css"> \ <!--===============================================================================================--> \ <link rel="stylesheet" type="text/css" href="/static/vendor/select2/select2.min.css"> \ <!--===============================================================================================--> \ <link rel="stylesheet" type="text/css" href="/static/vendor/daterangepicker/daterangepicker.css"> \ <!--===============================================================================================--> \ <link rel="stylesheet" type="text/css" href="/static/css/util.css"> \ <link rel="stylesheet" type="text/css" href="/static/css/main.css"> \ <!--===============================================================================================--> \ </head> \ <body> \ <div class="container-contact100"> \ <div class="wrap-contact100"> \ <div class="wrap-input100 validate-input" data-validate = "abstract is required"> \ <span class="label-input100">{}</span> \ <textarea class="input100" name="message" id="abstract" placeholder="Your abstract here..."></textarea> \ <span class="focus-input100"></span> \ </div> \ </div> \ </div> \ <div id="dropDownSelect1"></div> \ <!--===============================================================================================--> \ <script src="https://code.jquery.com/jquery-3.2.1.min.js"></script> \ <script src="/static/vendor/jquery/jquery-3.2.1.min.js"></script> \ <script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js" integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q" crossorigin="anonymous"></script> \ <!--===============================================================================================--> \ <script src="/static/vendor/animsition/js/animsition.min.js"></script> \ <!--===============================================================================================--> \ <script src="/static/vendor/bootstrap/js/popper.js"></script> \ <script src="/static/vendor/bootstrap/js/bootstrap.min.js"></script> \ <!--===============================================================================================--> \ <script src="/static/vendor/select2/select2.min.js"></script> \ <script> \ $(".selection-2").select2({ \ minimumResultsForSearch: 20, \ dropdownParent: $("#dropDownSelect1") \ }); \ </script> \ <!--===============================================================================================--> \ <script src="/static/vendor/daterangepicker/moment.min.js"></script> \ <script src="/static/vendor/daterangepicker/daterangepicker.js"></script> \ <!--===============================================================================================--> \ <script src="/static/vendor/countdowntime/countdowntime.js"></script> \ <!-- <script src="js/main.js"></script> --> \ <!--===============================================================================================--> \ </body> \ </html> '.format("wowowowowowo")
template = ' <!DOCTYPE html> <html lang="en"> <head> \t<title>AssiStudy</title> \t<meta charset="UTF-8"> \t<meta name="viewport" co ntent="width=device-width, initial-scale=1"> <!--===============================================================================================--> \t<link rel="icon" type="image/png" href="/static/images/icons/favicon.ico"/> <!--===============================================================================================--> \t<link rel="stylesheet" type="text/css" href="/static/vendor/bootstrap/css/bootstrap.min.css"> <!--===============================================================================================--> \t<link rel="stylesheet" type="text/css" href="/static/fonts/font-awesome-4.7.0/css/font-awesome.min.css"> <!--===============================================================================================--> \t<link rel="stylesheet" type="text/css" href="/static/vendor/animate/animate.css"> <!--===============================================================================================--> \t<link rel="stylesheet" type="text/css" href="/static/vendor/css-hamburgers/hamburgers.min.css"> <!--===============================================================================================--> \t<link rel="stylesheet" type="text/css" href="/static/vendor/animsition/css/animsition.min.css"> <!--===============================================================================================--> \t<link rel="stylesheet" type="text/css" href="/static/vendor/select2/select2.min.css"> <!--===============================================================================================--> \t<link rel="stylesheet" type="text/css" href="/static/vendor/daterangepicker/daterangepicker.css"> <!--===============================================================================================--> \t<link rel="stylesheet" type="text/css" href="/static/css/util.css"> \t<link rel="stylesheet" type="text/css" href="/static/css/main.css"> <!--===============================================================================================--> </head> <body> \t<div class="container-contact100"> \t\t<div class="wrap-contact100"> \t\t\t<div class="wrap-input100 validate-input" data-validate = "abstract is required"> \t\t\t\t\t<span class="label-input100">{}</span> \t\t\t\t\t<textarea class="input100" name="message" id="abstract" placeholder="Your abstract here..."></textarea> \t\t\t\t\t<span class="focus-input100"></span> \t\t\t</div> \t\t</div> \t</div> \t<div id="dropDownSelect1"></div> <!--===============================================================================================--> \t<script src="https://code.jquery.com/jquery-3.2.1.min.js"></script> \t<script src="/static/vendor/jquery/jquery-3.2.1.min.js"></script> \t<script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js" integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q" crossorigin="anonymous"></script> <!--===============================================================================================--> \t<script src="/static/vendor/animsition/js/animsition.min.js"></script> <!--===============================================================================================--> \t<script src="/static/vendor/bootstrap/js/popper.js"></script> \t<script src="/static/vendor/bootstrap/js/bootstrap.min.js"></script> <!--===============================================================================================--> \t<script src="/static/vendor/select2/select2.min.js"></script> \t<script> \t\t$(".selection-2").select2({ \t\t\tminimumResultsForSearch: 20, \t\t\tdropdownParent: $("#dropDownSelect1") \t\t}); \t</script> <!--===============================================================================================--> \t<script src="/static/vendor/daterangepicker/moment.min.js"></script> \t<script src="/static/vendor/daterangepicker/daterangepicker.js"></script> <!--===============================================================================================--> \t<script src="/static/vendor/countdowntime/countdowntime.js"></script> <!--\t<script src="js/main.js"></script> --> <!--===============================================================================================--> </body> </html> '.format('wowowowowowo')
class Config: # dataset related exemplar_size = 127 # exemplar size instance_size = 255 # instance size context_amount = 0.5 # context amount # training related num_per_epoch = 53200 # num of samples per epoch train_ratio = 0.9 # training ratio of VID dataset frame_range = 100 # frame range of choosing the instance train_batch_size = 8 # training batch size valid_batch_size = 8 # validation batch size train_num_workers = 8 # number of workers of train dataloader valid_num_workers = 8 # number of workers of validation dataloader lr = 1e-2 # learning rate of SGD momentum = 0.0 # momentum of SGD weight_decay = 0.0 # weight decay of optimizator step_size = 25 # step size of LR_Schedular gamma = 0.1 # decay rate of LR_Schedular epoch = 30 # total epoch seed = 1234 # seed to sample training videos log_dir = './models/logs' # log dirs radius = 16 # radius of positive label response_scale = 1e-3 # normalize of response max_translate = 3 # max translation of random shift # tracking related scale_step = 1.0375 # scale step of instance image num_scale = 3 # number of scales scale_lr = 0.59 # scale learning rate response_up_stride = 16 # response upsample stride response_sz = 17 # response size train_response_sz = 15 # train response size window_influence = 0.176 # window influence scale_penalty = 0.9745 # scale penalty total_stride = 8 # total stride of backbone sample_type = 'uniform' gray_ratio = 0.25 blur_ratio = 0.15 config = Config()
class Config: exemplar_size = 127 instance_size = 255 context_amount = 0.5 num_per_epoch = 53200 train_ratio = 0.9 frame_range = 100 train_batch_size = 8 valid_batch_size = 8 train_num_workers = 8 valid_num_workers = 8 lr = 0.01 momentum = 0.0 weight_decay = 0.0 step_size = 25 gamma = 0.1 epoch = 30 seed = 1234 log_dir = './models/logs' radius = 16 response_scale = 0.001 max_translate = 3 scale_step = 1.0375 num_scale = 3 scale_lr = 0.59 response_up_stride = 16 response_sz = 17 train_response_sz = 15 window_influence = 0.176 scale_penalty = 0.9745 total_stride = 8 sample_type = 'uniform' gray_ratio = 0.25 blur_ratio = 0.15 config = config()
__all__ = ('indent_string',) def indent_string(s, num_spaces): add_newline = False if s[-1] == '\n': add_newline = True s = s[:-1] s = '\n'.join(num_spaces * ' ' + line for line in s.split('\n')) if add_newline: s += '\n' return s
__all__ = ('indent_string',) def indent_string(s, num_spaces): add_newline = False if s[-1] == '\n': add_newline = True s = s[:-1] s = '\n'.join((num_spaces * ' ' + line for line in s.split('\n'))) if add_newline: s += '\n' return s
# model settings model = dict( type='MAE', backbone=dict( type='MAEViT', arch='small', patch_size=16, mask_ratio=0.75), neck=dict( type='MAEPretrainDecoder', patch_size=16, in_chans=3, embed_dim=768, decoder_embed_dim=512, decoder_depth=6, # 3/4 * eocoder depth decoder_num_heads=16, mlp_ratio=4., ), head=dict(type='MAEPretrainHead', norm_pix=True, patch_size=16) )
model = dict(type='MAE', backbone=dict(type='MAEViT', arch='small', patch_size=16, mask_ratio=0.75), neck=dict(type='MAEPretrainDecoder', patch_size=16, in_chans=3, embed_dim=768, decoder_embed_dim=512, decoder_depth=6, decoder_num_heads=16, mlp_ratio=4.0), head=dict(type='MAEPretrainHead', norm_pix=True, patch_size=16))
expected_output = { "report_id":{ "1634211151":{ "metric_name":"ENTITLEMENT", "feature_name":"dna-advantage", "metric_value":"regid.2017-05.com.cisco.c9300_dna_advantage,1.0_411773c3-2116-4c10-94a4-5d357fe6ff18", "udi":{ "pid":"C9300-24UX", "sn":"FCW2303D16Y" }, "previous_report_id":"0", "next_report_id":"1634211155", "state":"ACK", "state_change_reason":"ACKED", "start_time":"Oct 14 11:32:40 2021 UTC", "end_time":"Oct 14 11:32:48 2021 UTC", "storage_state":"DELETED", "transaction_id":"0", "transaction_message":"<none>" } } }
expected_output = {'report_id': {'1634211151': {'metric_name': 'ENTITLEMENT', 'feature_name': 'dna-advantage', 'metric_value': 'regid.2017-05.com.cisco.c9300_dna_advantage,1.0_411773c3-2116-4c10-94a4-5d357fe6ff18', 'udi': {'pid': 'C9300-24UX', 'sn': 'FCW2303D16Y'}, 'previous_report_id': '0', 'next_report_id': '1634211155', 'state': 'ACK', 'state_change_reason': 'ACKED', 'start_time': 'Oct 14 11:32:40 2021 UTC', 'end_time': 'Oct 14 11:32:48 2021 UTC', 'storage_state': 'DELETED', 'transaction_id': '0', 'transaction_message': '<none>'}}}
""" Technique - Sum of all multiples of 3 + Sum of all those multiples of 5 that are not multiple of 3 - This Multiples of 15 should be subtracted as that has been added twice. - A Pythonic implementation which uses a list comprehension. Note - The optimisation that only those multiples of 5 are added which are not the multiple of 3 eliminates the need to subtract the multiples of 15. - Among the slowest algorithms since lists are not very fast. Instrumentation - System Details: 8x Intel Core i7-3630QM CPU @ 2.40GHz, 16GB RAM, Ubuntu 14.04 - Input Details: NUMBERS_BELOW_N = 10 million - Time for 10 runs: Minimum - 0.9 sec, Average - 1.545 sec, Maximum 1.96 sec - Not among the fastest or the slowest algorithms. """ def answer(n): result = sum([x for x in range(3, n, 3)]) result = result + sum([x for x in range(5, n, 5) if x % 3 != 0]) return result
""" Technique - Sum of all multiples of 3 + Sum of all those multiples of 5 that are not multiple of 3 - This Multiples of 15 should be subtracted as that has been added twice. - A Pythonic implementation which uses a list comprehension. Note - The optimisation that only those multiples of 5 are added which are not the multiple of 3 eliminates the need to subtract the multiples of 15. - Among the slowest algorithms since lists are not very fast. Instrumentation - System Details: 8x Intel Core i7-3630QM CPU @ 2.40GHz, 16GB RAM, Ubuntu 14.04 - Input Details: NUMBERS_BELOW_N = 10 million - Time for 10 runs: Minimum - 0.9 sec, Average - 1.545 sec, Maximum 1.96 sec - Not among the fastest or the slowest algorithms. """ def answer(n): result = sum([x for x in range(3, n, 3)]) result = result + sum([x for x in range(5, n, 5) if x % 3 != 0]) return result
''' Encrypt and decrypt a string ''' def encrypt(s): return s.encode('hex') def decrypt(s): return s.decode('hex')
""" Encrypt and decrypt a string """ def encrypt(s): return s.encode('hex') def decrypt(s): return s.decode('hex')
test = { 'name': 'List Indexing', 'points': 0, 'suites': [ { 'cases': [ { 'code': r""" >>> x = [1, 3, [5, 7], 9] # Write the expression that indexes into x to output the 7 3450d5df7f6d639c9dc883cf31cc62bd # locked >>> x = [[7]] # Write the expression that indexes into x to output the 7 03e2a566fa83f164d9923dd9c2392471 # locked >>> x = [3, 2, 1, [9, 8, 7]] # Write the expression that indexes into x to output the 7 d24c8f2d11e78746dea49a7fde70642f # locked >>> x = [[3, [5, 7], 9]] # Write the expression that indexes into x to output the 7 9e7bc1866151855ea8424d3c51f4dbe6 # locked """, 'hidden': False, 'locked': True } ], 'scored': False, 'type': 'wwpp' }, { 'cases': [ { 'code': r""" >>> lst = [3, 2, 7, [84, 83, 82]] >>> lst[4] 8dfecce35cfbb620490b1aa9637bdafd # locked >>> lst[3][0] 89d2c4e2851d68c81d820360eb31bc36 # locked """, 'hidden': False, 'locked': True } ], 'scored': False, 'type': 'wwpp' } ] }
test = {'name': 'List Indexing', 'points': 0, 'suites': [{'cases': [{'code': '\n >>> x = [1, 3, [5, 7], 9] # Write the expression that indexes into x to output the 7\n 3450d5df7f6d639c9dc883cf31cc62bd\n # locked\n >>> x = [[7]] # Write the expression that indexes into x to output the 7\n 03e2a566fa83f164d9923dd9c2392471\n # locked\n >>> x = [3, 2, 1, [9, 8, 7]] # Write the expression that indexes into x to output the 7\n d24c8f2d11e78746dea49a7fde70642f\n # locked\n >>> x = [[3, [5, 7], 9]] # Write the expression that indexes into x to output the 7\n 9e7bc1866151855ea8424d3c51f4dbe6\n # locked\n ', 'hidden': False, 'locked': True}], 'scored': False, 'type': 'wwpp'}, {'cases': [{'code': '\n >>> lst = [3, 2, 7, [84, 83, 82]]\n >>> lst[4]\n 8dfecce35cfbb620490b1aa9637bdafd\n # locked\n >>> lst[3][0]\n 89d2c4e2851d68c81d820360eb31bc36\n # locked\n ', 'hidden': False, 'locked': True}], 'scored': False, 'type': 'wwpp'}]}
class NotFoundError(Exception): """ Class of custom Exception about Not Found Args: data (all types): input data """ def __init__(self, data): self.data = data def __str__(self): return f"Not Found {self.data}, please check the name again" class CollisionError(Exception): """ Class of custom Exception about Collision Args: data (all types): input data """ def __init__(self, data): self.data = data def __str__(self): return f"Check the collision.. {self.data}, please check settings again" class LimitJointError(Exception): """ Class of custom Exception about Collision Args: data (all types): input data """ def __init__(self, *data): self.data = data def __str__(self): return f"Check the joints.. {self.data}, please check current joints setting again" class OriValueError(Exception): """ Class of custom Exception about Orientation Value Args: data (all types): input data """ def __init__(self, data): self.data = data def __str__(self): return "Expecting the shape of the orientation to be (3,), (3,3), or (4,), instead got:""{}".format(self.data)
class Notfounderror(Exception): """ Class of custom Exception about Not Found Args: data (all types): input data """ def __init__(self, data): self.data = data def __str__(self): return f'Not Found {self.data}, please check the name again' class Collisionerror(Exception): """ Class of custom Exception about Collision Args: data (all types): input data """ def __init__(self, data): self.data = data def __str__(self): return f'Check the collision.. {self.data}, please check settings again' class Limitjointerror(Exception): """ Class of custom Exception about Collision Args: data (all types): input data """ def __init__(self, *data): self.data = data def __str__(self): return f'Check the joints.. {self.data}, please check current joints setting again' class Orivalueerror(Exception): """ Class of custom Exception about Orientation Value Args: data (all types): input data """ def __init__(self, data): self.data = data def __str__(self): return 'Expecting the shape of the orientation to be (3,), (3,3), or (4,), instead got:{}'.format(self.data)
# Defines number of epochs n_epochs = 200 losses = [] for epoch in range(n_epochs): # inner loop loss = mini_batch(device, train_loader, train_step) losses.append(loss)
n_epochs = 200 losses = [] for epoch in range(n_epochs): loss = mini_batch(device, train_loader, train_step) losses.append(loss)
#----------------------------------------------------------------------------- # Runtime: 32ms # Memory Usage: # Link: #----------------------------------------------------------------------------- class Solution: def canJump(self, nums: [int]) -> bool: max_right = 0 for i, num in enumerate(nums): if i > max_right: return False else: max_right = max(max_right, i + num) return True
class Solution: def can_jump(self, nums: [int]) -> bool: max_right = 0 for (i, num) in enumerate(nums): if i > max_right: return False else: max_right = max(max_right, i + num) return True
def floyd_warshall(start): d = [float("Inf") for i in range(n)] dist = [[graph[j][i] for i in range(n)] for j in range(n)] for k in range(n): for i in range(n): for j in range(n): dist[i][j] = min(dist[i][j], dist[j][k] + dist[k][j]) return dist if __name__ == "__main__": n, m = map(int, input().split()) graph = [[float("Inf") for i in range (n)] for j in range(n)] for i in range(n): graph[i][i] = 0 for i in range(m): u, v, c = map(int, input().split()) graph[u][v] = c # for line in floyd_warshall(0):
def floyd_warshall(start): d = [float('Inf') for i in range(n)] dist = [[graph[j][i] for i in range(n)] for j in range(n)] for k in range(n): for i in range(n): for j in range(n): dist[i][j] = min(dist[i][j], dist[j][k] + dist[k][j]) return dist if __name__ == '__main__': (n, m) = map(int, input().split()) graph = [[float('Inf') for i in range(n)] for j in range(n)] for i in range(n): graph[i][i] = 0 for i in range(m): (u, v, c) = map(int, input().split()) graph[u][v] = c
__revision__ = '$Id: __init__.py,v 1.2 2006/08/12 15:56:26 jkloth Exp $' __all__ = ['XmlFormatter', 'ApiFormatter', 'ExtensionFormatter', 'CommandLineFormatter', ]
__revision__ = '$Id: __init__.py,v 1.2 2006/08/12 15:56:26 jkloth Exp $' __all__ = ['XmlFormatter', 'ApiFormatter', 'ExtensionFormatter', 'CommandLineFormatter']
display = { 0: 'ABCEFG', 1: 'CF', 2: 'ACDEG', 3: 'ACDFG', 4: 'BCDF', 5: 'ABDFG', 6: 'ABDEFG', 7: 'ACF', 8: 'ABCDEFG', 9: 'ABCDFG' } solve_it = { 'a': 'C', 'b': 'F', 'c': 'G', 'd': 'A', 'e': 'B', 'f': 'D', 'g': 'E' } def part1(input_str: str) -> None: count = 0 for line in input_str.split('\n'): for output_num in line.split('|')[1].strip().split(' '): if len(output_num) in (2, 3, 4, 7): count += 1 print(f'Day 8 Part 1: Count: {count}') def part2(input_str: str) -> None: total = 0 line_num = 1 for line in input_str.split('\n'): io_parts = line.split('|') input_list = io_parts[0].strip().split(' ') output_list = io_parts[1].strip().split(' ') key_dict = decode_input(input_list) output_num = decode_output(output_list, key_dict) total += output_num print(f'Line {line_num}: {output_num}') line_num += 1 print(f'Day 8 Part 2: Total = {total}') def decode_input(input_list: list[str]) -> dict[str, str]: # display = { # 1: 'CF', # 7: 'ACF', # 4: 'BCDF', # 5: 'ABDFG', => ADG(BF) # 2: 'ACDEG', => ADG(CE) # 3: 'ACDFG', => ADG(CF) # 9: 'ABCDFG' # 0: 'ABCEFG', # 6: 'ABDEFG', # 8: 'ABCDEFG', # } # 'ab', 'abd', 'abef', 'bcdef', 'acdfg', 'abcdf', 'abcdef', 'bcdefg', 'abcdeg', 'abcdefg' # 1. # ab -> CF => (ab ~ CF) # abd -> ACF => d = 'A' # 2. # abef -> BCDF => (ef ~ BD) # cdf -> ADG => (cf ~ DG) # f = 'D', e = 'B', c = 'G' # 3. # bcdef -> (ABDG)b => b = 'F', a = 'C' # acdfg -> (ADG)ag => g = 'E' sorted_list = [''.join(sorted(list_num)) for list_num in sorted(input_list, key=len)] tmp_dict = {} solved_dict = {} for x in sorted_list: if len(x) in (2, 3, 4, 7): tmp_dict.update({len(x): x}) elif len(x) == 5: my_list = tmp_dict.get(5, []) my_list.append(x) tmp_dict.update({5: sorted(my_list)}) # 1. my_a = ''.join(set(tmp_dict.get(3))-set(tmp_dict.get(2))) solved_dict.update({'A': my_a}) # 2. four_and_two = set(tmp_dict.get(4)) - set(tmp_dict.get(2)) five_and_three = set.intersection(set(tmp_dict.get(5)[0]), set(tmp_dict.get(5)[1]), set(tmp_dict.get(5)[2])) five_and_two = copy_set(five_and_three) five_and_two.discard(solved_dict.get('A')) tmp_d = four_and_two.intersection(five_and_two) my_d = ''.join(tmp_d) tmp_b = copy_set(four_and_two) tmp_b.discard(my_d) my_b = ''.join(tmp_b) tmp_g = copy_set(five_and_two) tmp_g.discard(my_d) my_g = ''.join(tmp_g) solved_dict.update({'D': my_d}) solved_dict.update({'B': my_b}) solved_dict.update({'G': my_g}) # 3. for tmp_5 in tmp_dict.get(5): tmp_5_set = set(tmp_5) for v in solved_dict.values(): tmp_5_set.discard(v) if len(tmp_5_set) == 1: my_f = ''.join(tmp_5_set) solved_dict.update({'F': my_f}) tmp_2 = set(tmp_dict.get(2)) tmp_2.discard(my_f) my_c = ''.join(tmp_2) solved_dict.update({'C': my_c}) break # 4. tmp_7_set = set(tmp_dict.get(7)) for v in solved_dict.values(): tmp_7_set.discard(v) my_e = ''.join(tmp_7_set) solved_dict.update({'E': my_e}) print(sorted_list) print(solved_dict) return solved_dict def copy_set(my_set: set) -> set: return set([x for x in my_set]) def find_intersections(master_list: list[list[str]]) -> set[str]: result = [] for idx1 in range(0, len(master_list)): s1 = set(master_list[idx1]) for idx2 in range(0, len(master_list)): if idx2 == idx1: continue else: s2 = set(master_list[idx2]) s3 = s1.intersection(s2) result.extend(s3) return set(result) def decode_output(output_list: list[str], key_dict: dict[str, str]) -> int: my_key_dict = dict((v, k) for k, v in key_dict.items()) str_value = '' for output_num in output_list: decode = '' for char in sorted(output_num): decode += my_key_dict.get(char).upper() decode = ''.join(sorted(decode)) for k, v in display.items(): if v == decode: str_value += str(k) break return int(str_value) if __name__ == '__main__': with open('../../resources/2021/inputd8a.txt', 'r') as f: test_string = f.read() part1(test_string) part2(test_string) with open('../../resources/2021/inputd8.txt', 'r') as f: test_input = f.read() part1(test_input) part2(test_input)
display = {0: 'ABCEFG', 1: 'CF', 2: 'ACDEG', 3: 'ACDFG', 4: 'BCDF', 5: 'ABDFG', 6: 'ABDEFG', 7: 'ACF', 8: 'ABCDEFG', 9: 'ABCDFG'} solve_it = {'a': 'C', 'b': 'F', 'c': 'G', 'd': 'A', 'e': 'B', 'f': 'D', 'g': 'E'} def part1(input_str: str) -> None: count = 0 for line in input_str.split('\n'): for output_num in line.split('|')[1].strip().split(' '): if len(output_num) in (2, 3, 4, 7): count += 1 print(f'Day 8 Part 1: Count: {count}') def part2(input_str: str) -> None: total = 0 line_num = 1 for line in input_str.split('\n'): io_parts = line.split('|') input_list = io_parts[0].strip().split(' ') output_list = io_parts[1].strip().split(' ') key_dict = decode_input(input_list) output_num = decode_output(output_list, key_dict) total += output_num print(f'Line {line_num}: {output_num}') line_num += 1 print(f'Day 8 Part 2: Total = {total}') def decode_input(input_list: list[str]) -> dict[str, str]: sorted_list = [''.join(sorted(list_num)) for list_num in sorted(input_list, key=len)] tmp_dict = {} solved_dict = {} for x in sorted_list: if len(x) in (2, 3, 4, 7): tmp_dict.update({len(x): x}) elif len(x) == 5: my_list = tmp_dict.get(5, []) my_list.append(x) tmp_dict.update({5: sorted(my_list)}) my_a = ''.join(set(tmp_dict.get(3)) - set(tmp_dict.get(2))) solved_dict.update({'A': my_a}) four_and_two = set(tmp_dict.get(4)) - set(tmp_dict.get(2)) five_and_three = set.intersection(set(tmp_dict.get(5)[0]), set(tmp_dict.get(5)[1]), set(tmp_dict.get(5)[2])) five_and_two = copy_set(five_and_three) five_and_two.discard(solved_dict.get('A')) tmp_d = four_and_two.intersection(five_and_two) my_d = ''.join(tmp_d) tmp_b = copy_set(four_and_two) tmp_b.discard(my_d) my_b = ''.join(tmp_b) tmp_g = copy_set(five_and_two) tmp_g.discard(my_d) my_g = ''.join(tmp_g) solved_dict.update({'D': my_d}) solved_dict.update({'B': my_b}) solved_dict.update({'G': my_g}) for tmp_5 in tmp_dict.get(5): tmp_5_set = set(tmp_5) for v in solved_dict.values(): tmp_5_set.discard(v) if len(tmp_5_set) == 1: my_f = ''.join(tmp_5_set) solved_dict.update({'F': my_f}) tmp_2 = set(tmp_dict.get(2)) tmp_2.discard(my_f) my_c = ''.join(tmp_2) solved_dict.update({'C': my_c}) break tmp_7_set = set(tmp_dict.get(7)) for v in solved_dict.values(): tmp_7_set.discard(v) my_e = ''.join(tmp_7_set) solved_dict.update({'E': my_e}) print(sorted_list) print(solved_dict) return solved_dict def copy_set(my_set: set) -> set: return set([x for x in my_set]) def find_intersections(master_list: list[list[str]]) -> set[str]: result = [] for idx1 in range(0, len(master_list)): s1 = set(master_list[idx1]) for idx2 in range(0, len(master_list)): if idx2 == idx1: continue else: s2 = set(master_list[idx2]) s3 = s1.intersection(s2) result.extend(s3) return set(result) def decode_output(output_list: list[str], key_dict: dict[str, str]) -> int: my_key_dict = dict(((v, k) for (k, v) in key_dict.items())) str_value = '' for output_num in output_list: decode = '' for char in sorted(output_num): decode += my_key_dict.get(char).upper() decode = ''.join(sorted(decode)) for (k, v) in display.items(): if v == decode: str_value += str(k) break return int(str_value) if __name__ == '__main__': with open('../../resources/2021/inputd8a.txt', 'r') as f: test_string = f.read() part1(test_string) part2(test_string) with open('../../resources/2021/inputd8.txt', 'r') as f: test_input = f.read() part1(test_input) part2(test_input)
n = input("value of n\n") n = int(n) if n < 5: print("n is less than 5") elif n == 5: print("n is equal to 5") else: print("n is greater than 5")
n = input('value of n\n') n = int(n) if n < 5: print('n is less than 5') elif n == 5: print('n is equal to 5') else: print('n is greater than 5')
""" Dictionary of supported JIRA events and output friendly format """ jira_events = { "project_created": "New Project Created", "jira:issue_created": "New Issue Created", "jira:issue_updated": "Issue Updated" } issue_events = { "issue_commented": "Comment Added", "issue_comment_edited": "Comment Edited", "issue_comment_deleted": "Comment Deleted" }
""" Dictionary of supported JIRA events and output friendly format """ jira_events = {'project_created': 'New Project Created', 'jira:issue_created': 'New Issue Created', 'jira:issue_updated': 'Issue Updated'} issue_events = {'issue_commented': 'Comment Added', 'issue_comment_edited': 'Comment Edited', 'issue_comment_deleted': 'Comment Deleted'}
''' practice qusestion from chapter 1 Module 5 of IBM Digital Nation Courses by Aashik J Krishnan/Aash Gates ''' #testing variable x = 8 print(x) x = "seven" #assigning the Sring to variable x print(x) x = 6 #end of the Program
""" practice qusestion from chapter 1 Module 5 of IBM Digital Nation Courses by Aashik J Krishnan/Aash Gates """ x = 8 print(x) x = 'seven' print(x) x = 6
class ApiKeyManager( object ): def __init__( self, app ): self.app = app def create_api_key( self, user ): guid = self.app.security.get_new_guid() new_key = self.app.model.APIKeys() new_key.user_id = user.id new_key.key = guid sa_session = self.app.model.context sa_session.add( new_key ) sa_session.flush() return guid def get_or_create_api_key( self, user ): # Logic Galaxy has always used - but it would appear to have a race # condition. Worth fixing? Would kind of need a message queue to fix # in multiple process mode. if user.api_keys: key = user.api_keys[0].key else: key = self.create_api_key( user ) return key
class Apikeymanager(object): def __init__(self, app): self.app = app def create_api_key(self, user): guid = self.app.security.get_new_guid() new_key = self.app.model.APIKeys() new_key.user_id = user.id new_key.key = guid sa_session = self.app.model.context sa_session.add(new_key) sa_session.flush() return guid def get_or_create_api_key(self, user): if user.api_keys: key = user.api_keys[0].key else: key = self.create_api_key(user) return key
class Solution(object): def uniquePathsWithObstacles(self, obstacleGrid): """ :type obstacleGrid: List[List[int]] :rtype: int """ rslt = [] for i in range(len(obstacleGrid)): r = [] for j in range(len(obstacleGrid[i])): if obstacleGrid[i][j] == 1: r.append(0) elif i == 0 and j == 0: r.append(1) elif i == 0: r.append(r[j - 1]) elif j == 0: r.append(rslt[i - 1][j]) else: r.append(r[j - 1] + rslt[i - 1][j]) rslt.append(r) return rslt[-1][-1] if __name__ == '__main__': sol = Solution() print(sol.uniquePathsWithObstacles([[0, 0, 0], [0, 1, 0], [0, 0, 0]]))
class Solution(object): def unique_paths_with_obstacles(self, obstacleGrid): """ :type obstacleGrid: List[List[int]] :rtype: int """ rslt = [] for i in range(len(obstacleGrid)): r = [] for j in range(len(obstacleGrid[i])): if obstacleGrid[i][j] == 1: r.append(0) elif i == 0 and j == 0: r.append(1) elif i == 0: r.append(r[j - 1]) elif j == 0: r.append(rslt[i - 1][j]) else: r.append(r[j - 1] + rslt[i - 1][j]) rslt.append(r) return rslt[-1][-1] if __name__ == '__main__': sol = solution() print(sol.uniquePathsWithObstacles([[0, 0, 0], [0, 1, 0], [0, 0, 0]]))
totalSegundos = int(input()) quantidadeHoras = totalSegundos//60//60 segundosHoras = quantidadeHoras*60*60 restante = totalSegundos - segundosHoras quantidadeMinutos = restante//60 segundosMinutos = quantidadeMinutos*60 quantidadeSegundos = restante - segundosMinutos print('{}:{}:{}'.format(quantidadeHoras, quantidadeMinutos, quantidadeSegundos))
total_segundos = int(input()) quantidade_horas = totalSegundos // 60 // 60 segundos_horas = quantidadeHoras * 60 * 60 restante = totalSegundos - segundosHoras quantidade_minutos = restante // 60 segundos_minutos = quantidadeMinutos * 60 quantidade_segundos = restante - segundosMinutos print('{}:{}:{}'.format(quantidadeHoras, quantidadeMinutos, quantidadeSegundos))
class Solution: def threeSumClosest(self, nums,target): nums.sort() out=0 for i in range(len(nums)-1): j=i+1 k=len(nums)-1 while j<k: # print(nums[i]+nums[j]+nums[k],out) if i==0 and j==1 and k==len(nums)-1: out=nums[i]+nums[j]+nums[k] if nums[i]+nums[j]+nums[k]<target: j+=1 else: k-=1 else: if abs(nums[i]+nums[j]+nums[k]-target)<abs(out-target): out=nums[i]+nums[j]+nums[k] if nums[i]+nums[j]+nums[k]<target: j+=1 else: k-=1 return out d= Solution() d=d.threeSumClosest([1,1,-1,-1,3],-1) print(d)
class Solution: def three_sum_closest(self, nums, target): nums.sort() out = 0 for i in range(len(nums) - 1): j = i + 1 k = len(nums) - 1 while j < k: if i == 0 and j == 1 and (k == len(nums) - 1): out = nums[i] + nums[j] + nums[k] if nums[i] + nums[j] + nums[k] < target: j += 1 else: k -= 1 else: if abs(nums[i] + nums[j] + nums[k] - target) < abs(out - target): out = nums[i] + nums[j] + nums[k] if nums[i] + nums[j] + nums[k] < target: j += 1 else: k -= 1 return out d = solution() d = d.threeSumClosest([1, 1, -1, -1, 3], -1) print(d)
""" Test the health check endpoints """ def test_live(mini_sentry, relay): """Internal endpoint used by kubernetes """ relay = relay(mini_sentry) response = relay.get("/api/relay/healthcheck/live/") assert response.status_code == 200 def test_external_live(mini_sentry, relay): """Endpoint called by a downstream to see if it has network connection to the upstream. """ relay = relay(mini_sentry) response = relay.get("/api/0/relays/live/") assert response.status_code == 200 def test_is_healthy(mini_sentry, relay): """Internal endpoint used by kubernetes """ relay = relay(mini_sentry) # NOTE this is redundant but palced here to clearly show the exposed endpoint # (internally the relay fixture waits for the ready health check anyway) response = relay.get("/api/relay/healthcheck/ready/") assert response.status_code == 200
""" Test the health check endpoints """ def test_live(mini_sentry, relay): """Internal endpoint used by kubernetes """ relay = relay(mini_sentry) response = relay.get('/api/relay/healthcheck/live/') assert response.status_code == 200 def test_external_live(mini_sentry, relay): """Endpoint called by a downstream to see if it has network connection to the upstream. """ relay = relay(mini_sentry) response = relay.get('/api/0/relays/live/') assert response.status_code == 200 def test_is_healthy(mini_sentry, relay): """Internal endpoint used by kubernetes """ relay = relay(mini_sentry) response = relay.get('/api/relay/healthcheck/ready/') assert response.status_code == 200
# -*- coding: utf-8 -*- DDD_TABLE = { '61' : 'Brasilia', '71' : 'Salvador', '11' : 'Sao Paulo', '21' : 'Rio de Janeiro', '32' : 'Juiz de Fora', '19' : 'Campinas', '27' : 'Vitoria', '31' : 'Belo Horizonte' } def main(): ddd = input() if ddd in DDD_TABLE: print(DDD_TABLE[ddd]) else: print('DDD nao cadastrado') if __name__ == '__main__': main()
ddd_table = {'61': 'Brasilia', '71': 'Salvador', '11': 'Sao Paulo', '21': 'Rio de Janeiro', '32': 'Juiz de Fora', '19': 'Campinas', '27': 'Vitoria', '31': 'Belo Horizonte'} def main(): ddd = input() if ddd in DDD_TABLE: print(DDD_TABLE[ddd]) else: print('DDD nao cadastrado') if __name__ == '__main__': main()
minimal_message = """ { "$schema": "../../harmony/app/schemas/data-operation/0.7.0/data-operation-v0.7.0.json", "version": "0.7.0", "callback": "http://localhost/some-path", "stagingLocation": "s3://example-bucket/public/some-org/some-service/some-uuid/", "user": "jdoe", "client": "curl", "requestId": "00001111-2222-3333-4444-555566667777", "sources": [ ], "format": { }, "subset": { } } """ minimal_source_message = """ { "$schema": "../../harmony/app/schemas/data-operation/0.7.0/data-operation-v0.7.0.json", "version": "0.7.0", "callback": "http://localhost/some-path", "stagingLocation": "s3://example-bucket/public/some-org/some-service/some-uuid/", "user": "jdoe", "client": "curl", "requestId": "00001111-2222-3333-4444-555566667777", "sources": [ { "collection": "C0001-EXAMPLE", "variables": [], "granules": [] } ], "format": { }, "subset": { } } """ full_message = """ { "$schema": "../../harmony/app/schemas/data-operation/0.7.0/data-operation-v0.7.0.json", "version": "0.7.0", "callback": "http://localhost/some-path", "stagingLocation": "s3://example-bucket/public/some-org/some-service/some-uuid/", "user": "jdoe", "client": "curl", "requestId": "00001111-2222-3333-4444-555566667777", "isSynchronous": true, "sources": [ { "collection": "C0001-EXAMPLE", "variables": [ { "id": "V0001-EXAMPLE", "name": "ExampleVar1", "fullPath": "example/path/ExampleVar1" } ], "granules": [ { "id": "G0001-EXAMPLE", "name": "Example1", "url": "file://example/example_granule_1.txt", "temporal": { "start": "2001-01-01T01:01:01Z", "end": "2002-02-02T02:02:02Z" }, "bbox": [-1, -2, 3, 4] }, { "id": "G0002-EXAMPLE", "name": "Example2", "url": "file://example/example_granule_2.txt", "temporal": { "start": "2003-03-03T03:03:03Z", "end": "2004-04-04T04:04:04Z" }, "bbox": [-5, -6, 7, 8] } ]}, { "collection": "C0002-EXAMPLE", "variables": [ { "id": "V0002-EXAMPLE", "name": "ExampleVar2", "fullPath": "example/path/ExampleVar2" } ], "granules": [ { "id": "G0003-EXAMPLE", "name": "Example3", "url": "file://example/example_granule_3.txt", "temporal": { "start": "2005-05-05T05:05:05Z", "end": "2006-06-06T06:06:06Z" }, "bbox": [-9, -10, 11, 12] }, { "id": "G0004-EXAMPLE", "name": "Example4", "url": "file://example/example_granule_4.txt", "temporal": { "start": "2007-07-07T07:07:07Z", "end": "2008-08-08T08:08:08Z" }, "bbox": [-13, -14, 15, 16] } ] } ], "format": { "crs": "CRS:84", "srs": { "proj4": "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs", "wkt": "PROJCS[ ... ]", "epsg": "EPSG:7030" }, "isTransparent": true, "mime": "image/tiff", "width": 800, "height": 600, "dpi": 72, "interpolation": "near", "scaleExtent": { "x": { "min": 0.5, "max": 125 }, "y": { "min": 52, "max": 75.22 } }, "scaleSize": { "x": 14.2, "y": 35 } }, "temporal": { "start": "1999-01-01T10:00:00Z", "end": "2020-02-20T15:00:00Z" }, "subset": { "bbox": [ -91.1, -45.0, 91.1, 45.0 ], "shape": { "href": "s3://example-bucket/shapefiles/abcd.json", "type": "application/geo+json" } } } """
minimal_message = '\n {\n "$schema": "../../harmony/app/schemas/data-operation/0.7.0/data-operation-v0.7.0.json",\n "version": "0.7.0",\n "callback": "http://localhost/some-path",\n "stagingLocation": "s3://example-bucket/public/some-org/some-service/some-uuid/",\n "user": "jdoe",\n "client": "curl",\n "requestId": "00001111-2222-3333-4444-555566667777",\n "sources": [\n ],\n "format": {\n },\n "subset": {\n }\n }\n' minimal_source_message = '\n {\n "$schema": "../../harmony/app/schemas/data-operation/0.7.0/data-operation-v0.7.0.json",\n "version": "0.7.0",\n "callback": "http://localhost/some-path",\n "stagingLocation": "s3://example-bucket/public/some-org/some-service/some-uuid/",\n "user": "jdoe",\n "client": "curl",\n "requestId": "00001111-2222-3333-4444-555566667777",\n "sources": [\n {\n "collection": "C0001-EXAMPLE",\n "variables": [],\n "granules": []\n }\n ],\n "format": {\n },\n "subset": {\n }\n }\n' full_message = '\n {\n "$schema": "../../harmony/app/schemas/data-operation/0.7.0/data-operation-v0.7.0.json",\n "version": "0.7.0",\n "callback": "http://localhost/some-path",\n "stagingLocation": "s3://example-bucket/public/some-org/some-service/some-uuid/",\n "user": "jdoe",\n "client": "curl",\n "requestId": "00001111-2222-3333-4444-555566667777",\n "isSynchronous": true,\n "sources": [\n {\n "collection": "C0001-EXAMPLE",\n "variables": [\n {\n "id": "V0001-EXAMPLE",\n "name": "ExampleVar1",\n "fullPath": "example/path/ExampleVar1"\n }\n ],\n "granules": [\n {\n "id": "G0001-EXAMPLE",\n "name": "Example1",\n "url": "file://example/example_granule_1.txt",\n "temporal": {\n "start": "2001-01-01T01:01:01Z",\n "end": "2002-02-02T02:02:02Z"\n },\n "bbox": [-1, -2, 3, 4]\n },\n {\n "id": "G0002-EXAMPLE",\n "name": "Example2",\n "url": "file://example/example_granule_2.txt",\n "temporal": {\n "start": "2003-03-03T03:03:03Z",\n "end": "2004-04-04T04:04:04Z"\n },\n "bbox": [-5, -6, 7, 8]\n }\n ]}, {\n "collection": "C0002-EXAMPLE",\n "variables": [\n {\n "id": "V0002-EXAMPLE",\n "name": "ExampleVar2",\n "fullPath": "example/path/ExampleVar2"\n }\n ],\n "granules": [\n {\n "id": "G0003-EXAMPLE",\n "name": "Example3",\n "url": "file://example/example_granule_3.txt",\n "temporal": {\n "start": "2005-05-05T05:05:05Z",\n "end": "2006-06-06T06:06:06Z"\n },\n "bbox": [-9, -10, 11, 12]\n },\n {\n "id": "G0004-EXAMPLE",\n "name": "Example4",\n "url": "file://example/example_granule_4.txt",\n "temporal": {\n "start": "2007-07-07T07:07:07Z",\n "end": "2008-08-08T08:08:08Z"\n },\n "bbox": [-13, -14, 15, 16]\n }\n ]\n }\n ],\n "format": {\n "crs": "CRS:84",\n "srs": {\n "proj4": "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs",\n "wkt": "PROJCS[ ... ]",\n "epsg": "EPSG:7030"\n },\n "isTransparent": true,\n "mime": "image/tiff",\n "width": 800,\n "height": 600,\n "dpi": 72,\n "interpolation": "near",\n "scaleExtent": { "x": { "min": 0.5, "max": 125 }, "y": { "min": 52, "max": 75.22 } },\n "scaleSize": { "x": 14.2, "y": 35 }\n },\n "temporal": {\n "start": "1999-01-01T10:00:00Z",\n "end": "2020-02-20T15:00:00Z"\n },\n "subset": {\n "bbox": [\n -91.1,\n -45.0,\n 91.1,\n 45.0\n ],\n "shape": {\n "href": "s3://example-bucket/shapefiles/abcd.json",\n "type": "application/geo+json"\n }\n }\n }\n'
with open('09.txt') as fd: data = fd.readline().strip() pos = 0 current = [] stack = [] in_garbage = False garbage = 0 while pos < len(data): c = data[pos] pos += 1 if in_garbage: if c == '!': pos += 1 elif c == '>': in_garbage = False else: garbage += 1 else: if c == '{': child = [] current.append(child) stack.append(current) current = child elif c == '<': in_garbage = True elif c == '}': assert len(stack) > 0, 'unbalanced parens, too many closing' current = stack.pop() elif c == ',': pass else: raise Exception('unknown char ' + c) assert len(stack) == 0, 'unbalanced parens, too few closing' def score(node, depth=0): return depth + sum(score(n, depth + 1) for n in node) print(current) print(score(current)) print(garbage)
with open('09.txt') as fd: data = fd.readline().strip() pos = 0 current = [] stack = [] in_garbage = False garbage = 0 while pos < len(data): c = data[pos] pos += 1 if in_garbage: if c == '!': pos += 1 elif c == '>': in_garbage = False else: garbage += 1 elif c == '{': child = [] current.append(child) stack.append(current) current = child elif c == '<': in_garbage = True elif c == '}': assert len(stack) > 0, 'unbalanced parens, too many closing' current = stack.pop() elif c == ',': pass else: raise exception('unknown char ' + c) assert len(stack) == 0, 'unbalanced parens, too few closing' def score(node, depth=0): return depth + sum((score(n, depth + 1) for n in node)) print(current) print(score(current)) print(garbage)
""" Test Case 1 def main(): LL = [] print('Original List: ', LL) LL.reverse() print('Reversed List: ', LL) """ """ Test Case 1 - Results Original List: [] Reversed List: [] """ """ Test Case 2 def main(): LL = [1, 2, 3] print('Original List:', LL) LL.reverse() print('Reversed List:', LL) """ """ Test Case 2 - Results Original List: [1, 2, 3] Reversed List: [1, 2, 3] """ """ Test Case 3 def main(): LL = [9, 8, 7, 6, 5, 4, 3, 2, 1] print('Original List:', LL) LL.reverse() print('Reversed List:', LL) """ """ Test Case 3 - Results Original List: [9, 8, 7, 6, 5, 4, 3, 2, 1] Reversed List: [1, 2, 3, 4, 5, 6, 7, 8, 9] """
""" Test Case 1 def main(): LL = [] print('Original List: ', LL) LL.reverse() print('Reversed List: ', LL) """ '\nTest Case 1 - Results\nOriginal List: []\nReversed List: []\n' "\nTest Case 2\ndef main():\n LL = [1, 2, 3]\n print('Original List:', LL)\n LL.reverse()\n print('Reversed List:', LL)\n" '\nTest Case 2 - Results\nOriginal List: [1, 2, 3]\nReversed List: [1, 2, 3]\n' "\nTest Case 3\ndef main():\n LL = [9, 8, 7, 6, 5, 4, 3, 2, 1]\n print('Original List:', LL)\n LL.reverse()\n print('Reversed List:', LL)\n" '\nTest Case 3 - Results\nOriginal List: [9, 8, 7, 6, 5, 4, 3, 2, 1]\nReversed List: [1, 2, 3, 4, 5, 6, 7, 8, 9]\n'
f""" Temperature Conversions - SOLUTIONS """ # You're studying climate change, and over the last 3 years, you've recorded the temperature at noon every day in degrees Fahrenheit (F). The var sampleF holds a portion of those recordings. sampleF = [91.4, 82.4, 71.6, 107.6, 115.6] # Convert each item in this list into degrees Celsius and add the results to a dict called sample_temps so that the conversion of each day's temperature is easily accessible (no need to round). For reference, the conversion equation between F and C is: # Celsius = (Fahrenheit - 32) * 5.0/9.0 sample_temps = {} for f in sampleF: c = (f - 32)*(5/9) sample_temps.update({f: format(c, '.2f')}) for k, v in sample_temps.items(): print(f'{k} F -> {v} C') """ 91.4 F -> 33.00 C 82.4 F -> 28.00 C 71.6 F -> 22.00 C 107.6 F -> 42.00 C 115.6 F -> 46.44 C """
f'\nTemperature Conversions - SOLUTIONS\n' sample_f = [91.4, 82.4, 71.6, 107.6, 115.6] sample_temps = {} for f in sampleF: c = (f - 32) * (5 / 9) sample_temps.update({f: format(c, '.2f')}) for (k, v) in sample_temps.items(): print(f'{k} F -> {v} C') '\n91.4 F -> 33.00 C\n82.4 F -> 28.00 C\n71.6 F -> 22.00 C\n107.6 F -> 42.00 C\n115.6 F -> 46.44 C\n'
# Use the range function to loop through a code set 6 times. for x in range(6): print(x)
for x in range(6): print(x)
""" File for providing static messages or data """ def get_app_message(key): all_messages = { 'register_success': 'We will be working hard to process your request.', 'register_error': 'Invalid Data', 'register_error_message': 'Sorry, we are unable to process your request. Please make sure you fill out all required fields and try again.', 'approval_error': 'Management Error', 'approval_error_message': 'There is an issue with your approval request. Please contact a system administrator.', 'enroll_error': 'Enrollment Failure', 'enroll_error_message': 'An error occurred during customer enrollment. Please contact a system administrator.', 'enroll_success': 'A new customer has been successfully enrolled. We look forward to taking their orders.', 'oppo_error': 'Acknowledgment Failed', 'oppo_error_message': 'Sorry, we are unable to create this opportunity. Please contact a system administrator.', 'oppo_can_error': 'Annulment Failure', 'oppo_can_message': 'Internal error occurred for the request opportunity. Please contact a system administrator.', 'catalog_error': 'Business Catalog Error', 'catalog_error_message': 'We have encountered an error while retrieving data from the current client partner. This might be caused by incomplete catalog definition. Please contact your business client.', 'order_submit_message': 'Your order has been submitted successfully. Please keep in touch with the vendor.', 'order_cancel_message': 'Your order has been cancelled.', '': '', } message = all_messages[key] if message is None or message == '': return 'Message not found' else: return message def addSnackDataToContext(context, message): if context is None: context = {} if (message == 'ERR01'): context['snack_data'] = 'Internal server failure' elif (message == 'ERR02'): pass else: context['snack_data'] = message return context def getNewOppoMessage(oppo_number): if not oppo_number: return '' oppo_number = str(oppo_number).replace('-', '') return 'Opportunity number <span class="imp-num">{0}</span> has been created successfully. Please share this number with the customer.'.format(oppo_number)
""" File for providing static messages or data """ def get_app_message(key): all_messages = {'register_success': 'We will be working hard to process your request.', 'register_error': 'Invalid Data', 'register_error_message': 'Sorry, we are unable to process your request. Please make sure you fill out all required fields and try again.', 'approval_error': 'Management Error', 'approval_error_message': 'There is an issue with your approval request. Please contact a system administrator.', 'enroll_error': 'Enrollment Failure', 'enroll_error_message': 'An error occurred during customer enrollment. Please contact a system administrator.', 'enroll_success': 'A new customer has been successfully enrolled. We look forward to taking their orders.', 'oppo_error': 'Acknowledgment Failed', 'oppo_error_message': 'Sorry, we are unable to create this opportunity. Please contact a system administrator.', 'oppo_can_error': 'Annulment Failure', 'oppo_can_message': 'Internal error occurred for the request opportunity. Please contact a system administrator.', 'catalog_error': 'Business Catalog Error', 'catalog_error_message': 'We have encountered an error while retrieving data from the current client partner. This might be caused by incomplete catalog definition. Please contact your business client.', 'order_submit_message': 'Your order has been submitted successfully. Please keep in touch with the vendor.', 'order_cancel_message': 'Your order has been cancelled.', '': ''} message = all_messages[key] if message is None or message == '': return 'Message not found' else: return message def add_snack_data_to_context(context, message): if context is None: context = {} if message == 'ERR01': context['snack_data'] = 'Internal server failure' elif message == 'ERR02': pass else: context['snack_data'] = message return context def get_new_oppo_message(oppo_number): if not oppo_number: return '' oppo_number = str(oppo_number).replace('-', '') return 'Opportunity number <span class="imp-num">{0}</span> has been created successfully. Please share this number with the customer.'.format(oppo_number)
def _kubectl_impl(ctx): executable = ctx.actions.declare_file(ctx.attr.name) contents = """ set -o errexit export KUBECTL="{kubectl}" export RESOURCE="{resource}" export NAMESPACE="{namespace}" "{script}" """.format( kubectl = ctx.executable._kubectl.short_path, resource = ctx.file.resource.short_path, namespace = ctx.attr.namespace, script = ctx.executable._script.path, ) ctx.actions.write(executable, contents, is_executable = True) runfiles = [ ctx.executable._kubectl, ctx.executable._script, ctx.file.resource, ] return [DefaultInfo( executable = executable, runfiles = ctx.runfiles(files = runfiles), )] _kubectl_attr = { "_kubectl": attr.label( allow_single_file = True, cfg = "host", default = "@kubectl//:binary", executable = True, ), } _attrs = dict({ "namespace": attr.string( mandatory = False, ), "resource": attr.label( mandatory = True, allow_single_file = True, ), }, **_kubectl_attr) apply = rule( implementation = _kubectl_impl, attrs = dict({ "_script": attr.label( allow_single_file = True, cfg = "host", default = "//rules/kubectl:apply.sh", executable = True, ), }, **_attrs), executable = True, ) delete = rule( implementation = _kubectl_impl, attrs = dict({ "_script": attr.label( allow_single_file = True, cfg = "host", default = "//rules/kubectl:delete.sh", executable = True, ), }, **_attrs), executable = True, ) def _kubectl_patch_impl(ctx): executable = ctx.actions.declare_file(ctx.attr.name) contents = """ set -o errexit export KUBECTL="{kubectl}" export NAMESPACE="{namespace}" export RESOURCE_TYPE="{resource_type}" export RESOURCE_NAME="{resource_name}" export PATCH_TYPE="{patch_type}" export PATCH_FILE="{patch_file}" "{script}" """.format( kubectl = ctx.executable._kubectl.short_path, namespace = ctx.attr.namespace, resource_type = ctx.attr.resource_type, resource_name = ctx.attr.resource_name, patch_type = ctx.attr.patch_type, patch_file = ctx.file.patch_file.short_path, script = ctx.executable._script.path, ) ctx.actions.write(executable, contents, is_executable = True) runfiles = [ ctx.executable._kubectl, ctx.executable._script, ctx.file.patch_file, ] return [DefaultInfo( executable = executable, runfiles = ctx.runfiles(files = runfiles), )] patch = rule( implementation = _kubectl_patch_impl, attrs = dict({ "namespace": attr.string( mandatory = False, ), "resource_type": attr.string( mandatory = True, ), "resource_name": attr.string( mandatory = True, ), "patch_type": attr.string( mandatory = True, ), "patch_file": attr.label( mandatory = True, allow_single_file = True, ), "_script": attr.label( allow_single_file = True, cfg = "host", default = "//rules/kubectl:patch.sh", executable = True, ), }, **_kubectl_attr), executable = True, )
def _kubectl_impl(ctx): executable = ctx.actions.declare_file(ctx.attr.name) contents = '\n set -o errexit\n export KUBECTL="{kubectl}"\n export RESOURCE="{resource}"\n export NAMESPACE="{namespace}"\n "{script}"\n '.format(kubectl=ctx.executable._kubectl.short_path, resource=ctx.file.resource.short_path, namespace=ctx.attr.namespace, script=ctx.executable._script.path) ctx.actions.write(executable, contents, is_executable=True) runfiles = [ctx.executable._kubectl, ctx.executable._script, ctx.file.resource] return [default_info(executable=executable, runfiles=ctx.runfiles(files=runfiles))] _kubectl_attr = {'_kubectl': attr.label(allow_single_file=True, cfg='host', default='@kubectl//:binary', executable=True)} _attrs = dict({'namespace': attr.string(mandatory=False), 'resource': attr.label(mandatory=True, allow_single_file=True)}, **_kubectl_attr) apply = rule(implementation=_kubectl_impl, attrs=dict({'_script': attr.label(allow_single_file=True, cfg='host', default='//rules/kubectl:apply.sh', executable=True)}, **_attrs), executable=True) delete = rule(implementation=_kubectl_impl, attrs=dict({'_script': attr.label(allow_single_file=True, cfg='host', default='//rules/kubectl:delete.sh', executable=True)}, **_attrs), executable=True) def _kubectl_patch_impl(ctx): executable = ctx.actions.declare_file(ctx.attr.name) contents = '\n set -o errexit\n export KUBECTL="{kubectl}"\n export NAMESPACE="{namespace}"\n export RESOURCE_TYPE="{resource_type}"\n export RESOURCE_NAME="{resource_name}"\n export PATCH_TYPE="{patch_type}"\n export PATCH_FILE="{patch_file}"\n "{script}"\n '.format(kubectl=ctx.executable._kubectl.short_path, namespace=ctx.attr.namespace, resource_type=ctx.attr.resource_type, resource_name=ctx.attr.resource_name, patch_type=ctx.attr.patch_type, patch_file=ctx.file.patch_file.short_path, script=ctx.executable._script.path) ctx.actions.write(executable, contents, is_executable=True) runfiles = [ctx.executable._kubectl, ctx.executable._script, ctx.file.patch_file] return [default_info(executable=executable, runfiles=ctx.runfiles(files=runfiles))] patch = rule(implementation=_kubectl_patch_impl, attrs=dict({'namespace': attr.string(mandatory=False), 'resource_type': attr.string(mandatory=True), 'resource_name': attr.string(mandatory=True), 'patch_type': attr.string(mandatory=True), 'patch_file': attr.label(mandatory=True, allow_single_file=True), '_script': attr.label(allow_single_file=True, cfg='host', default='//rules/kubectl:patch.sh', executable=True)}, **_kubectl_attr), executable=True)
# -*- coding: utf-8 -*- """ Created on Tue Feb 22 14:50:22 2022 @author: Riedel """ # ============================================================================= # # import setuptools # from readreflex import _version # # with open("README.md", "r") as fh: # long_description = fh.read() # # setuptools.setup( # name="readreflexw", # version=_version.version, # author="Paul-Benjamin Riedel", # author_email="riedel@geophysik-ggd.com", # license='MIT', # description="Python library to read and handle data written with the Software reflexW by Sandmeier Geophysical Research", # long_description=long_description, # long_description_content_type="text/markdown", # url="https://github.com/GGDRriedel/readreflexw", # packages=setuptools.find_packages(), # install_requires=['segyio', 'tqdm', 'numpy', 'obspy', 'matplotlib', 'pandas', 'h5py', 'scipy', 'librosa'], # entry_points=''' # [console_scripts] # readreflex=readreflex.readreflex:main # ''', # classifiers=[ # "Programming Language :: Python :: 3", # "License ::MIT # "Operating System :: Windows", # "Framework :: Matplotlib", # "Topic :: Scientific/Engineering :: Physics", # "Intended Audience :: Science/Research", # "Natural Language :: English", # # ], # ) # =============================================================================
""" Created on Tue Feb 22 14:50:22 2022 @author: Riedel """
#!/bin/zsh while True: OldmanAge = input() def AgetoDays(age): result = int(age) * 365 return result print(AgetoDays(OldmanAge)) break
while True: oldman_age = input() def ageto_days(age): result = int(age) * 365 return result print(ageto_days(OldmanAge)) break
class JobDoesNotExist(RuntimeError): """The reduce mode job doesn't exist (set_total has not been run). """ class JobFailed(RuntimeError): """Skip, the reduce mode job has already been marked as failed. """ class ProgressTypeError(TypeError): """Progress argument is not of the correct type"""
class Jobdoesnotexist(RuntimeError): """The reduce mode job doesn't exist (set_total has not been run). """ class Jobfailed(RuntimeError): """Skip, the reduce mode job has already been marked as failed. """ class Progresstypeerror(TypeError): """Progress argument is not of the correct type"""
# -*- coding: utf-8 -*- class Solution: INTEGER_TO_ALPHABET = {n: chr(ord('a') + n - 1) for n in range(1, 27)} def freqAlphabets(self, s: str) -> str: i, result = 0, [] while i < len(s): if i + 2 < len(s) and s[i + 2] == '#': result.append(self.INTEGER_TO_ALPHABET[int(s[i:i + 2])]) i += 3 else: result.append(self.INTEGER_TO_ALPHABET[int(s[i:i + 1])]) i += 1 return ''.join(result) if __name__ == '__main__': solution = Solution() assert 'jkab' == solution.freqAlphabets('10#11#12') assert 'acz' == solution.freqAlphabets('1326#') assert 'y' == solution.freqAlphabets('25#') assert 'abcdefghijklmnopqrstuvwxyz' == solution.freqAlphabets('12345678910#11#12#13#14#15#16#17#18#19#20#21#22#23#24#25#26#')
class Solution: integer_to_alphabet = {n: chr(ord('a') + n - 1) for n in range(1, 27)} def freq_alphabets(self, s: str) -> str: (i, result) = (0, []) while i < len(s): if i + 2 < len(s) and s[i + 2] == '#': result.append(self.INTEGER_TO_ALPHABET[int(s[i:i + 2])]) i += 3 else: result.append(self.INTEGER_TO_ALPHABET[int(s[i:i + 1])]) i += 1 return ''.join(result) if __name__ == '__main__': solution = solution() assert 'jkab' == solution.freqAlphabets('10#11#12') assert 'acz' == solution.freqAlphabets('1326#') assert 'y' == solution.freqAlphabets('25#') assert 'abcdefghijklmnopqrstuvwxyz' == solution.freqAlphabets('12345678910#11#12#13#14#15#16#17#18#19#20#21#22#23#24#25#26#')
def test_get_all_offices(fineract): offices = [office for office in fineract.get_offices()] assert len(offices) == 3 def test_get_single_office(fineract): office = fineract.get_offices(2) assert office assert office.name == 'Merida' def test_get_all_staff(fineract): staff = [staff for staff in fineract.get_staff()] assert len(staff) == 3 def test_get_single_staff(fineract): staff = fineract.get_staff(2) assert staff assert staff.display_name == 'M, Mary'
def test_get_all_offices(fineract): offices = [office for office in fineract.get_offices()] assert len(offices) == 3 def test_get_single_office(fineract): office = fineract.get_offices(2) assert office assert office.name == 'Merida' def test_get_all_staff(fineract): staff = [staff for staff in fineract.get_staff()] assert len(staff) == 3 def test_get_single_staff(fineract): staff = fineract.get_staff(2) assert staff assert staff.display_name == 'M, Mary'
print("\n\t\t-----> Welcome to Matias list changer <-----\t\t\n") listn = [1,2,3,4,5,6,7,8,9,10] print(f"\nThis is the list without changes ===> {listn}\n") listn[4] *= 2 listn[7] *= 2 listn[9] *= 2 print(f"\nThis is the modified list ===> {listn}\n")
print('\n\t\t-----> Welcome to Matias list changer <-----\t\t\n') listn = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] print(f'\nThis is the list without changes ===> {listn}\n') listn[4] *= 2 listn[7] *= 2 listn[9] *= 2 print(f'\nThis is the modified list ===> {listn}\n')
def shiftCalc(n): if n < 0x40: n = n - 1 n = n ^ 0x10 n = n + 1 else: n = n ^ 0x20 return n keys = """ 1 ! 2 " 3 # 4 $ 5 % 6 & 7 ' 8 ( 9 ) 0 @ : * - = ; + , < . > / ? """.replace("\t"," ").split("\n") keys = [x.strip() for x in keys if x.strip() != ""] for i in range(0,26): keys.append("{0} {1}".format(chr(i+97),chr(i+65))) keymap = [] for k in keys: keymap.append([ord(k[0]),ord(k[-1]),k[0],k[-1]]) keymap.sort(key = lambda x:x[0]) for m in keymap: if shiftCalc(m[0]) != m[1]: print("Unshifted {0} Shifted {1} [${2:02x},${3:02x}] offset {4}".format(m[2],m[3],m[0],m[1],m[1]-m[0])) print("${0:02x} ${1:02x} {2}".format(m[0],shiftCalc(m[0]),"ERR" if shiftCalc(m[0]) != m[1] else "ok"))
def shift_calc(n): if n < 64: n = n - 1 n = n ^ 16 n = n + 1 else: n = n ^ 32 return n keys = '\n1\t!\n2\t"\n3\t#\n4\t$\n5\t%\n6\t&\n7\t\'\n8\t(\n9 \t)\n0 \t@\n:\t*\n- \t=\n; \t+\n,\t<\n. \t>\n/\t?\n'.replace('\t', ' ').split('\n') keys = [x.strip() for x in keys if x.strip() != ''] for i in range(0, 26): keys.append('{0} {1}'.format(chr(i + 97), chr(i + 65))) keymap = [] for k in keys: keymap.append([ord(k[0]), ord(k[-1]), k[0], k[-1]]) keymap.sort(key=lambda x: x[0]) for m in keymap: if shift_calc(m[0]) != m[1]: print('Unshifted {0} Shifted {1} [${2:02x},${3:02x}] offset {4}'.format(m[2], m[3], m[0], m[1], m[1] - m[0])) print('${0:02x} ${1:02x} {2}'.format(m[0], shift_calc(m[0]), 'ERR' if shift_calc(m[0]) != m[1] else 'ok'))
class PizzaDelivery: def __init__(self, name, price, ingredients): self.name = name self.price = price self.ingredients = ingredients self.ordered = False def add_extra(self, ingredient, quantity, price_per_ingredient): if self.ordered: return f"Pizza {self.name} already prepared, and we can't make any changes!" if ingredient in self.ingredients: self.ingredients[ingredient] += quantity else: self.ingredients[ingredient] = quantity self.price += price_per_ingredient * quantity def remove_ingredient(self, ingredient, quantity, price_per_ingredient): if self.ordered: return f"Pizza {self.name} already prepared, and we can't make any changes!" if ingredient not in self.ingredients: return f"Wrong ingredient selected! We do not use {ingredient} in {self.name}!" if self.ingredients[ingredient] < quantity: return f"Please check again the desired quantity of {ingredient}!" self.ingredients[ingredient] -= quantity self.price -= quantity * price_per_ingredient def make_order(self): if not self.ordered: self.ordered = True ingredients = ', '.join([f'{key}: {value}' for key, value in self.ingredients.items()]) return f"You've ordered pizza {self.name} prepared with {ingredients} and the price will be {self.price}lv." return f"Pizza {self.name} already prepared, and we can't make any changes!" margarita = PizzaDelivery('Margarita', 11, {'cheese': 2, 'tomatoes': 1}) margarita.add_extra('mozzarella', 1, 0.5) margarita.add_extra('cheese', 1, 1) margarita.remove_ingredient('cheese', 1, 1) print(margarita.remove_ingredient('bacon', 1, 2.5)) print(margarita.remove_ingredient('tomatoes', 2, 0.5)) margarita.remove_ingredient('cheese', 2, 1) print(margarita.make_order()) print(margarita.add_extra('cheese', 1, 1))
class Pizzadelivery: def __init__(self, name, price, ingredients): self.name = name self.price = price self.ingredients = ingredients self.ordered = False def add_extra(self, ingredient, quantity, price_per_ingredient): if self.ordered: return f"Pizza {self.name} already prepared, and we can't make any changes!" if ingredient in self.ingredients: self.ingredients[ingredient] += quantity else: self.ingredients[ingredient] = quantity self.price += price_per_ingredient * quantity def remove_ingredient(self, ingredient, quantity, price_per_ingredient): if self.ordered: return f"Pizza {self.name} already prepared, and we can't make any changes!" if ingredient not in self.ingredients: return f'Wrong ingredient selected! We do not use {ingredient} in {self.name}!' if self.ingredients[ingredient] < quantity: return f'Please check again the desired quantity of {ingredient}!' self.ingredients[ingredient] -= quantity self.price -= quantity * price_per_ingredient def make_order(self): if not self.ordered: self.ordered = True ingredients = ', '.join([f'{key}: {value}' for (key, value) in self.ingredients.items()]) return f"You've ordered pizza {self.name} prepared with {ingredients} and the price will be {self.price}lv." return f"Pizza {self.name} already prepared, and we can't make any changes!" margarita = pizza_delivery('Margarita', 11, {'cheese': 2, 'tomatoes': 1}) margarita.add_extra('mozzarella', 1, 0.5) margarita.add_extra('cheese', 1, 1) margarita.remove_ingredient('cheese', 1, 1) print(margarita.remove_ingredient('bacon', 1, 2.5)) print(margarita.remove_ingredient('tomatoes', 2, 0.5)) margarita.remove_ingredient('cheese', 2, 1) print(margarita.make_order()) print(margarita.add_extra('cheese', 1, 1))
## CITIES cities = ['London', 'Constantinople', 'Sydney', 'Leningrad', 'Peking'] # Use bracket notation to change: # Constantinople to Istanbul # Leningrad to Saint Petersburg # Peking to Beijing cities[1] = 'Istanbul' cities[3] = 'Saint Petersburg' cities[4] = 'Beijing' ## DINOSAURS dinos = ['Tyrannosaurus rex', 'Torosaurus', 'Stegosaurus', 'Brontosaurus'] # Use bracket notation to change: # Torosaurus to Triceratops # Brontosaurus to Apatosaurus dinos[1] = 'Triceratops' dinos[3] = 'Apatosaurus' ## PLANETS planets = ['Mercury', 'Venus', 'Earth', 'Mars', 'Jupiter', 'Saturn', 'Uranus', 'Neptune', 'Pluto'] # Remove the last planet using a list method # and store it in this variable: not_actually_a_planet = planets.pop() ## GYMNASTS team_usa = ['Simone Biles'] original_length = len(team_usa) # Use a list method to add more gymnasts to the list: # Sunisa Lee, Jordan Chiles, Grace McCallum, MyKayla Skinner team_usa.append("Sunisa Lee") team_usa.append("Jordan Chiles") team_usa.append("Grace McCallum") team_usa.append("MyKayla Skinner") # When you're done, there should be 5 gymnasts in the list new_length = len(team_usa)
cities = ['London', 'Constantinople', 'Sydney', 'Leningrad', 'Peking'] cities[1] = 'Istanbul' cities[3] = 'Saint Petersburg' cities[4] = 'Beijing' dinos = ['Tyrannosaurus rex', 'Torosaurus', 'Stegosaurus', 'Brontosaurus'] dinos[1] = 'Triceratops' dinos[3] = 'Apatosaurus' planets = ['Mercury', 'Venus', 'Earth', 'Mars', 'Jupiter', 'Saturn', 'Uranus', 'Neptune', 'Pluto'] not_actually_a_planet = planets.pop() team_usa = ['Simone Biles'] original_length = len(team_usa) team_usa.append('Sunisa Lee') team_usa.append('Jordan Chiles') team_usa.append('Grace McCallum') team_usa.append('MyKayla Skinner') new_length = len(team_usa)
class Constants: FW_VERSION = 4 LIGHT_TRANSITION_DURATION = 320 FAST_RECONNECT_WAIT_TIME_BEFORE_RESETING_LIGHTS = 5 HEARTBEAT_INTERVAL = 120 HEARTBEAT_MAX_RESPONSE_TIME = 5
class Constants: fw_version = 4 light_transition_duration = 320 fast_reconnect_wait_time_before_reseting_lights = 5 heartbeat_interval = 120 heartbeat_max_response_time = 5
class GoogleAnalyticsClientError(Exception): """ General Google Analytics error (error accessing GA) """ def __init__(self, reason): self.reason = reason def __repr__(self): return 'GAError: %s' % self.reason def __str__(self): return 'GAError: %s' % self.reason
class Googleanalyticsclienterror(Exception): """ General Google Analytics error (error accessing GA) """ def __init__(self, reason): self.reason = reason def __repr__(self): return 'GAError: %s' % self.reason def __str__(self): return 'GAError: %s' % self.reason
class UI_Draw: def draw(self, context): layout = self.layout layout.prop(self, 'auto_presets') layout.separator() split = layout.split() split.prop(self, 'handle', text='Handle') col = split.column(align=True) col.prop(self, 'handle_z_top', text='Top') if self.shape_rnd or self.shape_sq: col.prop(self, 'handle_l_size', text='Size') else: col.prop(self, 'handle_l_size', text='Length') col.prop(self, 'handle_w_size', text='Width') col.prop(self, 'handle_z_btm', text='Bottom') layout.separator() split = layout.split() split.label('Girdle') col = split.column(align=True) col.prop(self, 'girdle_z_top', text='Top') if not self.shape_tri: col.prop(self, 'girdle_l_ofst', text='Size Offset') else: col.prop(self, 'girdle_l_ofst', text='Length Offset') col.prop(self, 'girdle_w_ofst', text='Width Offset') col.prop(self, 'girdle_z_btm', text='Bottom') layout.separator() split = layout.split() split.prop(self, 'hole', text='Hole') col = split.column(align=True) col.prop(self, 'hole_z_top', text='Top/Culet') if self.shape_rnd or self.shape_sq: col.prop(self, 'hole_l_size', text='Size') else: col.prop(self, 'hole_l_size', text='Length') col.prop(self, 'hole_w_size', text='Width') col.prop(self, 'hole_z_btm', text='Bottom') if self.shape_fant and self.cut in {'PEAR', 'HEART'}: col.prop(self, 'hole_pos_ofst', text='Position Offset') if not self.shape_rnd: layout.separator() split = layout.split() split.prop(self, 'curve_seat', text='Curve Seat') col = split.column(align=True) col.prop(self, 'curve_seat_segments', text='Segments') col.prop(self, 'curve_seat_profile', text='Profile') if self.shape_tri: layout.separator() split = layout.split() split.prop(self, 'curve_profile', text='Curve Profile') col = split.column(align=True) col.prop(self, 'curve_profile_segments', text='Segments') col.prop(self, 'curve_profile_factor', text='Factor') elif self.cut == 'MARQUISE': layout.separator() split = layout.split() split.label('Profile') col = split.column(align=True) col.prop(self, 'mul_1', text='Factor 1') col.prop(self, 'mul_2', text='Factor 2') if not self.shape_fant: layout.separator() split = layout.split() split.prop(self, 'bevel_corners', text='Bevel Corners') col = split.column(align=True) if self.shape_rect: col.prop(self, 'bevel_corners_width', text='Width') else: col.prop(self, 'bevel_corners_percent', text='Width') col.prop(self, 'bevel_corners_segments', text='Segments') col.prop(self, 'bevel_corners_profile', text='Profile') if self.shape_rnd or self.cut in {'OVAL', 'MARQUISE'}: layout.separator() split = layout.split() split.label('Detalization') split.prop(self, 'detalization', text='')
class Ui_Draw: def draw(self, context): layout = self.layout layout.prop(self, 'auto_presets') layout.separator() split = layout.split() split.prop(self, 'handle', text='Handle') col = split.column(align=True) col.prop(self, 'handle_z_top', text='Top') if self.shape_rnd or self.shape_sq: col.prop(self, 'handle_l_size', text='Size') else: col.prop(self, 'handle_l_size', text='Length') col.prop(self, 'handle_w_size', text='Width') col.prop(self, 'handle_z_btm', text='Bottom') layout.separator() split = layout.split() split.label('Girdle') col = split.column(align=True) col.prop(self, 'girdle_z_top', text='Top') if not self.shape_tri: col.prop(self, 'girdle_l_ofst', text='Size Offset') else: col.prop(self, 'girdle_l_ofst', text='Length Offset') col.prop(self, 'girdle_w_ofst', text='Width Offset') col.prop(self, 'girdle_z_btm', text='Bottom') layout.separator() split = layout.split() split.prop(self, 'hole', text='Hole') col = split.column(align=True) col.prop(self, 'hole_z_top', text='Top/Culet') if self.shape_rnd or self.shape_sq: col.prop(self, 'hole_l_size', text='Size') else: col.prop(self, 'hole_l_size', text='Length') col.prop(self, 'hole_w_size', text='Width') col.prop(self, 'hole_z_btm', text='Bottom') if self.shape_fant and self.cut in {'PEAR', 'HEART'}: col.prop(self, 'hole_pos_ofst', text='Position Offset') if not self.shape_rnd: layout.separator() split = layout.split() split.prop(self, 'curve_seat', text='Curve Seat') col = split.column(align=True) col.prop(self, 'curve_seat_segments', text='Segments') col.prop(self, 'curve_seat_profile', text='Profile') if self.shape_tri: layout.separator() split = layout.split() split.prop(self, 'curve_profile', text='Curve Profile') col = split.column(align=True) col.prop(self, 'curve_profile_segments', text='Segments') col.prop(self, 'curve_profile_factor', text='Factor') elif self.cut == 'MARQUISE': layout.separator() split = layout.split() split.label('Profile') col = split.column(align=True) col.prop(self, 'mul_1', text='Factor 1') col.prop(self, 'mul_2', text='Factor 2') if not self.shape_fant: layout.separator() split = layout.split() split.prop(self, 'bevel_corners', text='Bevel Corners') col = split.column(align=True) if self.shape_rect: col.prop(self, 'bevel_corners_width', text='Width') else: col.prop(self, 'bevel_corners_percent', text='Width') col.prop(self, 'bevel_corners_segments', text='Segments') col.prop(self, 'bevel_corners_profile', text='Profile') if self.shape_rnd or self.cut in {'OVAL', 'MARQUISE'}: layout.separator() split = layout.split() split.label('Detalization') split.prop(self, 'detalization', text='')
""" Basic Configurations """ def configs(): configs_dict = {} configs_dict['cdsign'] = '/' configs_dict['path_root'] = './VQA_Project/' configs_dict['path_datasets'] = configs_dict['path_root'] + 'Datasets' + configs_dict['cdsign'] configs_dict['datasets'] = {} ##################################################################### VQAv1 ######################################################################### configs_dict['datasets']['VQAv1'] = {} configs_dict['datasets']['VQAv1']['Train'] = {} configs_dict['datasets']['VQAv1']['Train']['Questions'] = {} configs_dict['datasets']['VQAv1']['Train']['Questions']['File'] = 'OpenEnded_mscoco_train2014_questions.json' configs_dict['datasets']['VQAv1']['Train']['Questions']['Zip_File'] = 'Questions_Train_mscoco.zip' configs_dict['datasets']['VQAv1']['Train']['Questions']['Link'] = 'https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/Questions_Train_mscoco.zip' configs_dict['datasets']['VQAv1']['Train']['Questions']['Path'] = configs_dict['path_datasets'] + 'VQAv1' + configs_dict['cdsign'] + 'Questions' + configs_dict['cdsign'] + configs_dict['datasets']['VQAv1']['Train']['Questions']['File'] configs_dict['datasets']['VQAv1']['Train']['Annotations'] = {} configs_dict['datasets']['VQAv1']['Train']['Annotations']['File'] = 'mscoco_train2014_annotations.json' configs_dict['datasets']['VQAv1']['Train']['Annotations']['Zip_File'] = 'Annotations_Train_mscoco.zip' configs_dict['datasets']['VQAv1']['Train']['Annotations']['Link'] = 'https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/Annotations_Train_mscoco.zip' configs_dict['datasets']['VQAv1']['Train']['Annotations']['Path'] = configs_dict['path_datasets'] + 'VQAv1' + configs_dict['cdsign'] + 'Annotations' + configs_dict['cdsign'] + configs_dict['datasets']['VQAv1']['Train']['Annotations']['File'] configs_dict['datasets']['VQAv1']['Train']['Images'] = {} configs_dict['datasets']['VQAv1']['Train']['Images']['File'] = 'train2014' configs_dict['datasets']['VQAv1']['Train']['Images']['Zip_File'] = 'train2014.zip' configs_dict['datasets']['VQAv1']['Train']['Images']['Link'] = 'http://images.cocodataset.org/zips/train2014.zip' configs_dict['datasets']['VQAv1']['Train']['Images']['Path'] = configs_dict['path_datasets'] + 'VQAv1' + configs_dict['cdsign'] + 'Images' + configs_dict['cdsign'] + configs_dict['datasets']['VQAv1']['Train']['Images']['File'] + configs_dict['cdsign'] configs_dict['datasets']['VQAv1']['Val'] = {} configs_dict['datasets']['VQAv1']['Val']['Questions'] = {} configs_dict['datasets']['VQAv1']['Val']['Questions']['File'] = 'OpenEnded_mscoco_val2014_questions.json' configs_dict['datasets']['VQAv1']['Val']['Questions']['Zip_File'] = 'Questions_Val_mscoco.zip' configs_dict['datasets']['VQAv1']['Val']['Questions']['Link'] = 'https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/Questions_Val_mscoco.zip' configs_dict['datasets']['VQAv1']['Val']['Questions']['Path'] = configs_dict['path_datasets'] + 'VQAv1' + configs_dict['cdsign'] + 'Questions' + configs_dict['cdsign'] + configs_dict['datasets']['VQAv1']['Val']['Questions']['File'] configs_dict['datasets']['VQAv1']['Val']['Annotations'] = {} configs_dict['datasets']['VQAv1']['Val']['Annotations']['File'] = 'mscoco_val2014_annotations.json' configs_dict['datasets']['VQAv1']['Val']['Annotations']['Zip_File'] = 'Annotations_Val_mscoco.zip' configs_dict['datasets']['VQAv1']['Val']['Annotations']['Link'] = 'https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/Annotations_Val_mscoco.zip' configs_dict['datasets']['VQAv1']['Val']['Annotations']['Path'] = configs_dict['path_datasets'] + 'VQAv1' + configs_dict['cdsign'] + 'Annotations' + configs_dict['cdsign'] + configs_dict['datasets']['VQAv1']['Val']['Annotations']['File'] configs_dict['datasets']['VQAv1']['Val']['Images'] = {} configs_dict['datasets']['VQAv1']['Val']['Images']['File'] = 'val2014' configs_dict['datasets']['VQAv1']['Val']['Images']['Zip_File'] = 'val2014.zip' configs_dict['datasets']['VQAv1']['Val']['Images']['Link'] = 'http://images.cocodataset.org/zips/val2014.zip' configs_dict['datasets']['VQAv1']['Val']['Images']['Path'] = configs_dict['path_datasets'] + 'VQAv1' + configs_dict['cdsign'] + 'Images' + configs_dict['cdsign'] + configs_dict['datasets']['VQAv1']['Val']['Images']['File'] + configs_dict['cdsign'] ################################################################################################################################################### ##################################################################### VQAv2 ######################################################################### configs_dict['datasets']['VQAv2'] = {} configs_dict['datasets']['VQAv2']['Train'] = {} configs_dict['datasets']['VQAv2']['Train']['Questions'] = {} configs_dict['datasets']['VQAv2']['Train']['Questions']['File'] = 'v2_OpenEnded_mscoco_train2014_questions.json' configs_dict['datasets']['VQAv2']['Train']['Questions']['Zip_File'] = 'v2_Questions_Train_mscoco.zip' configs_dict['datasets']['VQAv2']['Train']['Questions']['Link'] = 'https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/v2_Questions_Train_mscoco.zip' configs_dict['datasets']['VQAv2']['Train']['Questions']['Path'] = configs_dict['path_datasets'] + 'VQAv2' + configs_dict['cdsign'] + 'Questions' + configs_dict['cdsign'] + configs_dict['datasets']['VQAv2']['Train']['Questions']['File'] configs_dict['datasets']['VQAv2']['Train']['Annotations'] = {} configs_dict['datasets']['VQAv2']['Train']['Annotations']['File'] = 'v2_mscoco_train2014_annotations.json' configs_dict['datasets']['VQAv2']['Train']['Annotations']['Zip_File'] = 'v2_Annotations_Train_mscoco.zip' configs_dict['datasets']['VQAv2']['Train']['Annotations']['Link'] = 'https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/v2_Annotations_Train_mscoco.zip' configs_dict['datasets']['VQAv2']['Train']['Annotations']['Path'] = configs_dict['path_datasets'] + 'VQAv2' + configs_dict['cdsign'] + 'Annotations' + configs_dict['cdsign'] + configs_dict['datasets']['VQAv2']['Train']['Annotations']['File'] configs_dict['datasets']['VQAv2']['Train']['Images'] = configs_dict['datasets']['VQAv1']['Train']['Images'] configs_dict['datasets']['VQAv2']['Val'] = {} configs_dict['datasets']['VQAv2']['Val']['Questions'] = {} configs_dict['datasets']['VQAv2']['Val']['Questions']['File'] = 'v2_OpenEnded_mscoco_val2014_questions.json' configs_dict['datasets']['VQAv2']['Val']['Questions']['Zip_File'] = 'v2_Questions_Val_mscoco.zip' configs_dict['datasets']['VQAv2']['Val']['Questions']['Link'] = 'https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/v2_Questions_Val_mscoco.zip' configs_dict['datasets']['VQAv2']['Val']['Questions']['Path'] = configs_dict['path_datasets'] + 'VQAv2' + configs_dict['cdsign'] + 'Questions' + configs_dict['cdsign'] + configs_dict['datasets']['VQAv2']['Val']['Questions']['File'] configs_dict['datasets']['VQAv2']['Val']['Annotations'] = {} configs_dict['datasets']['VQAv2']['Val']['Annotations']['File'] = 'v2_mscoco_val2014_annotations.json' configs_dict['datasets']['VQAv2']['Val']['Annotations']['Zip_File'] = 'v2_Annotations_Val_mscoco.zip' configs_dict['datasets']['VQAv2']['Val']['Annotations']['Link'] = 'https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/v2_Annotations_Val_mscoco.zip' configs_dict['datasets']['VQAv2']['Val']['Annotations']['Path'] = configs_dict['path_datasets'] + 'VQAv2' + configs_dict['cdsign'] + 'Annotations' + configs_dict['cdsign'] + configs_dict['datasets']['VQAv2']['Val']['Annotations']['File'] configs_dict['datasets']['VQAv2']['Val']['Images'] = configs_dict['datasets']['VQAv1']['Val']['Images'] ################################################################################################################################################### ##################################################################### VGv1.0 ######################################################################### configs_dict['datasets']['VGv1.0'] = {} configs_dict['datasets']['VGv1.0']['Images'] = {} configs_dict['datasets']['VGv1.0']['Images']['Part_1'] = {} configs_dict['datasets']['VGv1.0']['Images']['Part_1']['File'] = 'VG_100K' configs_dict['datasets']['VGv1.0']['Images']['Part_1']['Zip_File'] = 'images.zip' configs_dict['datasets']['VGv1.0']['Images']['Part_1']['Link'] = 'https://cs.stanford.edu/people/rak248/VG_100K_2/images.zip' configs_dict['datasets']['VGv1.0']['Images']['Part_1']['Path'] = configs_dict['path_datasets'] + 'VGv1.0' + configs_dict['cdsign'] + 'Images' + configs_dict['cdsign'] + configs_dict['datasets']['VGv1.0']['Images']['Part_1']['File'] + configs_dict['cdsign'] configs_dict['datasets']['VGv1.0']['Images']['Part_2'] = {} configs_dict['datasets']['VGv1.0']['Images']['Part_2']['File'] = 'VG_100K_2' configs_dict['datasets']['VGv1.0']['Images']['Part_2']['Zip_File'] = 'images2.zip' configs_dict['datasets']['VGv1.0']['Images']['Part_2']['Link'] = 'https://cs.stanford.edu/people/rak248/VG_100K_2/images2.zip' configs_dict['datasets']['VGv1.0']['Images']['Part_2']['Path'] = configs_dict['path_datasets'] + 'VGv1.0' + configs_dict['cdsign'] + 'Images' + configs_dict['cdsign'] + configs_dict['datasets']['VGv1.0']['Images']['Part_2']['File'] + configs_dict['cdsign'] configs_dict['datasets']['VGv1.0']['Questions_Answers'] = {} configs_dict['datasets']['VGv1.0']['Questions_Answers']['File'] = 'question_answers.json' configs_dict['datasets']['VGv1.0']['Questions_Answers']['Zip_File'] = 'question_answers.json.zip' configs_dict['datasets']['VGv1.0']['Questions_Answers']['Link'] = 'https://visualgenome.org/static/data/dataset/question_answers.json.zip' configs_dict['datasets']['VGv1.0']['Questions_Answers']['Path'] = configs_dict['path_datasets'] + 'VGv1.0' + configs_dict['cdsign'] + 'Questions_Answers' + configs_dict['cdsign'] + configs_dict['datasets']['VGv1.0']['Questions_Answers']['File'] ####################################################### VGv1.2##### ############################################################################### configs_dict['datasets']['VGv1.2'] = configs_dict['datasets']['VGv1.0'] ################################################################################################################################################### configs_dict['chosen_datasets_list'] = ['VQAv1'] #['VQAv1', 'VQAv2', 'VGv1.0', 'VGv1.2'] configs_dict['chosen_datasets_str'] = '_'.join(configs_dict['chosen_datasets_list']) configs_dict['image_model'] = 'YOLOv4-448-1024' # ['ResNet152-448-2048', 'YOLOv4-448-1024'] configs_dict['random_seed'] = 100 configs_dict['path_image_model'] = configs_dict['path_root'] + 'Image_Models' + configs_dict['cdsign'] + configs_dict['image_model'] + configs_dict['cdsign'] configs_dict['path_images_features'] = configs_dict['path_image_model'] + 'Images_Features' + configs_dict['cdsign'] configs_dict['path_histories'] = configs_dict['path_root'] + 'Histories' + configs_dict['cdsign'] + configs_dict['chosen_datasets_str'] + '_' + configs_dict['image_model'] + configs_dict['cdsign'] configs_dict['path_plots'] = configs_dict['path_root'] + 'Plots' + configs_dict['cdsign'] + configs_dict['chosen_datasets_str'] + '_' + configs_dict['image_model'] + configs_dict['cdsign'] configs_dict['path_test_images'] = configs_dict['path_root'] + 'Test_Images' + configs_dict['cdsign'] configs_dict['val_percent'] = 0.10 configs_dict['train_percent'] = 1 - configs_dict['val_percent'] configs_dict['que_max_length'] = 17 configs_dict['num_tokens'] = 10000 # + 1 'UNK' token return configs_dict
""" Basic Configurations """ def configs(): configs_dict = {} configs_dict['cdsign'] = '/' configs_dict['path_root'] = './VQA_Project/' configs_dict['path_datasets'] = configs_dict['path_root'] + 'Datasets' + configs_dict['cdsign'] configs_dict['datasets'] = {} configs_dict['datasets']['VQAv1'] = {} configs_dict['datasets']['VQAv1']['Train'] = {} configs_dict['datasets']['VQAv1']['Train']['Questions'] = {} configs_dict['datasets']['VQAv1']['Train']['Questions']['File'] = 'OpenEnded_mscoco_train2014_questions.json' configs_dict['datasets']['VQAv1']['Train']['Questions']['Zip_File'] = 'Questions_Train_mscoco.zip' configs_dict['datasets']['VQAv1']['Train']['Questions']['Link'] = 'https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/Questions_Train_mscoco.zip' configs_dict['datasets']['VQAv1']['Train']['Questions']['Path'] = configs_dict['path_datasets'] + 'VQAv1' + configs_dict['cdsign'] + 'Questions' + configs_dict['cdsign'] + configs_dict['datasets']['VQAv1']['Train']['Questions']['File'] configs_dict['datasets']['VQAv1']['Train']['Annotations'] = {} configs_dict['datasets']['VQAv1']['Train']['Annotations']['File'] = 'mscoco_train2014_annotations.json' configs_dict['datasets']['VQAv1']['Train']['Annotations']['Zip_File'] = 'Annotations_Train_mscoco.zip' configs_dict['datasets']['VQAv1']['Train']['Annotations']['Link'] = 'https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/Annotations_Train_mscoco.zip' configs_dict['datasets']['VQAv1']['Train']['Annotations']['Path'] = configs_dict['path_datasets'] + 'VQAv1' + configs_dict['cdsign'] + 'Annotations' + configs_dict['cdsign'] + configs_dict['datasets']['VQAv1']['Train']['Annotations']['File'] configs_dict['datasets']['VQAv1']['Train']['Images'] = {} configs_dict['datasets']['VQAv1']['Train']['Images']['File'] = 'train2014' configs_dict['datasets']['VQAv1']['Train']['Images']['Zip_File'] = 'train2014.zip' configs_dict['datasets']['VQAv1']['Train']['Images']['Link'] = 'http://images.cocodataset.org/zips/train2014.zip' configs_dict['datasets']['VQAv1']['Train']['Images']['Path'] = configs_dict['path_datasets'] + 'VQAv1' + configs_dict['cdsign'] + 'Images' + configs_dict['cdsign'] + configs_dict['datasets']['VQAv1']['Train']['Images']['File'] + configs_dict['cdsign'] configs_dict['datasets']['VQAv1']['Val'] = {} configs_dict['datasets']['VQAv1']['Val']['Questions'] = {} configs_dict['datasets']['VQAv1']['Val']['Questions']['File'] = 'OpenEnded_mscoco_val2014_questions.json' configs_dict['datasets']['VQAv1']['Val']['Questions']['Zip_File'] = 'Questions_Val_mscoco.zip' configs_dict['datasets']['VQAv1']['Val']['Questions']['Link'] = 'https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/Questions_Val_mscoco.zip' configs_dict['datasets']['VQAv1']['Val']['Questions']['Path'] = configs_dict['path_datasets'] + 'VQAv1' + configs_dict['cdsign'] + 'Questions' + configs_dict['cdsign'] + configs_dict['datasets']['VQAv1']['Val']['Questions']['File'] configs_dict['datasets']['VQAv1']['Val']['Annotations'] = {} configs_dict['datasets']['VQAv1']['Val']['Annotations']['File'] = 'mscoco_val2014_annotations.json' configs_dict['datasets']['VQAv1']['Val']['Annotations']['Zip_File'] = 'Annotations_Val_mscoco.zip' configs_dict['datasets']['VQAv1']['Val']['Annotations']['Link'] = 'https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/Annotations_Val_mscoco.zip' configs_dict['datasets']['VQAv1']['Val']['Annotations']['Path'] = configs_dict['path_datasets'] + 'VQAv1' + configs_dict['cdsign'] + 'Annotations' + configs_dict['cdsign'] + configs_dict['datasets']['VQAv1']['Val']['Annotations']['File'] configs_dict['datasets']['VQAv1']['Val']['Images'] = {} configs_dict['datasets']['VQAv1']['Val']['Images']['File'] = 'val2014' configs_dict['datasets']['VQAv1']['Val']['Images']['Zip_File'] = 'val2014.zip' configs_dict['datasets']['VQAv1']['Val']['Images']['Link'] = 'http://images.cocodataset.org/zips/val2014.zip' configs_dict['datasets']['VQAv1']['Val']['Images']['Path'] = configs_dict['path_datasets'] + 'VQAv1' + configs_dict['cdsign'] + 'Images' + configs_dict['cdsign'] + configs_dict['datasets']['VQAv1']['Val']['Images']['File'] + configs_dict['cdsign'] configs_dict['datasets']['VQAv2'] = {} configs_dict['datasets']['VQAv2']['Train'] = {} configs_dict['datasets']['VQAv2']['Train']['Questions'] = {} configs_dict['datasets']['VQAv2']['Train']['Questions']['File'] = 'v2_OpenEnded_mscoco_train2014_questions.json' configs_dict['datasets']['VQAv2']['Train']['Questions']['Zip_File'] = 'v2_Questions_Train_mscoco.zip' configs_dict['datasets']['VQAv2']['Train']['Questions']['Link'] = 'https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/v2_Questions_Train_mscoco.zip' configs_dict['datasets']['VQAv2']['Train']['Questions']['Path'] = configs_dict['path_datasets'] + 'VQAv2' + configs_dict['cdsign'] + 'Questions' + configs_dict['cdsign'] + configs_dict['datasets']['VQAv2']['Train']['Questions']['File'] configs_dict['datasets']['VQAv2']['Train']['Annotations'] = {} configs_dict['datasets']['VQAv2']['Train']['Annotations']['File'] = 'v2_mscoco_train2014_annotations.json' configs_dict['datasets']['VQAv2']['Train']['Annotations']['Zip_File'] = 'v2_Annotations_Train_mscoco.zip' configs_dict['datasets']['VQAv2']['Train']['Annotations']['Link'] = 'https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/v2_Annotations_Train_mscoco.zip' configs_dict['datasets']['VQAv2']['Train']['Annotations']['Path'] = configs_dict['path_datasets'] + 'VQAv2' + configs_dict['cdsign'] + 'Annotations' + configs_dict['cdsign'] + configs_dict['datasets']['VQAv2']['Train']['Annotations']['File'] configs_dict['datasets']['VQAv2']['Train']['Images'] = configs_dict['datasets']['VQAv1']['Train']['Images'] configs_dict['datasets']['VQAv2']['Val'] = {} configs_dict['datasets']['VQAv2']['Val']['Questions'] = {} configs_dict['datasets']['VQAv2']['Val']['Questions']['File'] = 'v2_OpenEnded_mscoco_val2014_questions.json' configs_dict['datasets']['VQAv2']['Val']['Questions']['Zip_File'] = 'v2_Questions_Val_mscoco.zip' configs_dict['datasets']['VQAv2']['Val']['Questions']['Link'] = 'https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/v2_Questions_Val_mscoco.zip' configs_dict['datasets']['VQAv2']['Val']['Questions']['Path'] = configs_dict['path_datasets'] + 'VQAv2' + configs_dict['cdsign'] + 'Questions' + configs_dict['cdsign'] + configs_dict['datasets']['VQAv2']['Val']['Questions']['File'] configs_dict['datasets']['VQAv2']['Val']['Annotations'] = {} configs_dict['datasets']['VQAv2']['Val']['Annotations']['File'] = 'v2_mscoco_val2014_annotations.json' configs_dict['datasets']['VQAv2']['Val']['Annotations']['Zip_File'] = 'v2_Annotations_Val_mscoco.zip' configs_dict['datasets']['VQAv2']['Val']['Annotations']['Link'] = 'https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/v2_Annotations_Val_mscoco.zip' configs_dict['datasets']['VQAv2']['Val']['Annotations']['Path'] = configs_dict['path_datasets'] + 'VQAv2' + configs_dict['cdsign'] + 'Annotations' + configs_dict['cdsign'] + configs_dict['datasets']['VQAv2']['Val']['Annotations']['File'] configs_dict['datasets']['VQAv2']['Val']['Images'] = configs_dict['datasets']['VQAv1']['Val']['Images'] configs_dict['datasets']['VGv1.0'] = {} configs_dict['datasets']['VGv1.0']['Images'] = {} configs_dict['datasets']['VGv1.0']['Images']['Part_1'] = {} configs_dict['datasets']['VGv1.0']['Images']['Part_1']['File'] = 'VG_100K' configs_dict['datasets']['VGv1.0']['Images']['Part_1']['Zip_File'] = 'images.zip' configs_dict['datasets']['VGv1.0']['Images']['Part_1']['Link'] = 'https://cs.stanford.edu/people/rak248/VG_100K_2/images.zip' configs_dict['datasets']['VGv1.0']['Images']['Part_1']['Path'] = configs_dict['path_datasets'] + 'VGv1.0' + configs_dict['cdsign'] + 'Images' + configs_dict['cdsign'] + configs_dict['datasets']['VGv1.0']['Images']['Part_1']['File'] + configs_dict['cdsign'] configs_dict['datasets']['VGv1.0']['Images']['Part_2'] = {} configs_dict['datasets']['VGv1.0']['Images']['Part_2']['File'] = 'VG_100K_2' configs_dict['datasets']['VGv1.0']['Images']['Part_2']['Zip_File'] = 'images2.zip' configs_dict['datasets']['VGv1.0']['Images']['Part_2']['Link'] = 'https://cs.stanford.edu/people/rak248/VG_100K_2/images2.zip' configs_dict['datasets']['VGv1.0']['Images']['Part_2']['Path'] = configs_dict['path_datasets'] + 'VGv1.0' + configs_dict['cdsign'] + 'Images' + configs_dict['cdsign'] + configs_dict['datasets']['VGv1.0']['Images']['Part_2']['File'] + configs_dict['cdsign'] configs_dict['datasets']['VGv1.0']['Questions_Answers'] = {} configs_dict['datasets']['VGv1.0']['Questions_Answers']['File'] = 'question_answers.json' configs_dict['datasets']['VGv1.0']['Questions_Answers']['Zip_File'] = 'question_answers.json.zip' configs_dict['datasets']['VGv1.0']['Questions_Answers']['Link'] = 'https://visualgenome.org/static/data/dataset/question_answers.json.zip' configs_dict['datasets']['VGv1.0']['Questions_Answers']['Path'] = configs_dict['path_datasets'] + 'VGv1.0' + configs_dict['cdsign'] + 'Questions_Answers' + configs_dict['cdsign'] + configs_dict['datasets']['VGv1.0']['Questions_Answers']['File'] configs_dict['datasets']['VGv1.2'] = configs_dict['datasets']['VGv1.0'] configs_dict['chosen_datasets_list'] = ['VQAv1'] configs_dict['chosen_datasets_str'] = '_'.join(configs_dict['chosen_datasets_list']) configs_dict['image_model'] = 'YOLOv4-448-1024' configs_dict['random_seed'] = 100 configs_dict['path_image_model'] = configs_dict['path_root'] + 'Image_Models' + configs_dict['cdsign'] + configs_dict['image_model'] + configs_dict['cdsign'] configs_dict['path_images_features'] = configs_dict['path_image_model'] + 'Images_Features' + configs_dict['cdsign'] configs_dict['path_histories'] = configs_dict['path_root'] + 'Histories' + configs_dict['cdsign'] + configs_dict['chosen_datasets_str'] + '_' + configs_dict['image_model'] + configs_dict['cdsign'] configs_dict['path_plots'] = configs_dict['path_root'] + 'Plots' + configs_dict['cdsign'] + configs_dict['chosen_datasets_str'] + '_' + configs_dict['image_model'] + configs_dict['cdsign'] configs_dict['path_test_images'] = configs_dict['path_root'] + 'Test_Images' + configs_dict['cdsign'] configs_dict['val_percent'] = 0.1 configs_dict['train_percent'] = 1 - configs_dict['val_percent'] configs_dict['que_max_length'] = 17 configs_dict['num_tokens'] = 10000 return configs_dict
TWITTER_DIR = "twitter_data" TWITTER_RAW_DIR = "raw" TWITTER_PARQUET_DIR = "parquet" TWITTER_RAW_SCHEMA = "twitter_schema.json"
twitter_dir = 'twitter_data' twitter_raw_dir = 'raw' twitter_parquet_dir = 'parquet' twitter_raw_schema = 'twitter_schema.json'
matriz = [] for i in range(2): matriz.append(list(map(int, input().split()))) linhaMaior, colMaior = 0, 0 for linha in range(len(matriz)): for elemento in range(len(matriz[linha])): if matriz[linha][elemento] > matriz[linhaMaior][colMaior]: linhaMaior, colMaior = linha, elemento print( f"Maior elemento: M[{linhaMaior + 1}][{colMaior + 1}] = {matriz[linhaMaior][colMaior]}")
matriz = [] for i in range(2): matriz.append(list(map(int, input().split()))) (linha_maior, col_maior) = (0, 0) for linha in range(len(matriz)): for elemento in range(len(matriz[linha])): if matriz[linha][elemento] > matriz[linhaMaior][colMaior]: (linha_maior, col_maior) = (linha, elemento) print(f'Maior elemento: M[{linhaMaior + 1}][{colMaior + 1}] = {matriz[linhaMaior][colMaior]}')
SLIP39_WORDS = [ "academic", "acid", "acne", "acquire", "acrobat", "activity", "actress", "adapt", "adequate", "adjust", "admit", "adorn", "adult", "advance", "advocate", "afraid", "again", "agency", "agree", "aide", "aircraft", "airline", "airport", "ajar", "alarm", "album", "alcohol", "alien", "alive", "alpha", "already", "alto", "aluminum", "always", "amazing", "ambition", "amount", "amuse", "analysis", "anatomy", "ancestor", "ancient", "angel", "angry", "animal", "answer", "antenna", "anxiety", "apart", "aquatic", "arcade", "arena", "argue", "armed", "artist", "artwork", "aspect", "auction", "august", "aunt", "average", "aviation", "avoid", "award", "away", "axis", "axle", "beam", "beard", "beaver", "become", "bedroom", "behavior", "being", "believe", "belong", "benefit", "best", "beyond", "bike", "biology", "birthday", "bishop", "black", "blanket", "blessing", "blimp", "blind", "blue", "body", "bolt", "boring", "born", "both", "boundary", "bracelet", "branch", "brave", "breathe", "briefing", "broken", "brother", "browser", "bucket", "budget", "building", "bulb", "bulge", "bumpy", "bundle", "burden", "burning", "busy", "buyer", "cage", "calcium", "camera", "campus", "canyon", "capacity", "capital", "capture", "carbon", "cards", "careful", "cargo", "carpet", "carve", "category", "cause", "ceiling", "center", "ceramic", "champion", "change", "charity", "check", "chemical", "chest", "chew", "chubby", "cinema", "civil", "class", "clay", "cleanup", "client", "climate", "clinic", "clock", "clogs", "closet", "clothes", "club", "cluster", "coal", "coastal", "coding", "column", "company", "corner", "costume", "counter", "course", "cover", "cowboy", "cradle", "craft", "crazy", "credit", "cricket", "criminal", "crisis", "critical", "crowd", "crucial", "crunch", "crush", "crystal", "cubic", "cultural", "curious", "curly", "custody", "cylinder", "daisy", "damage", "dance", "darkness", "database", "daughter", "deadline", "deal", "debris", "debut", "decent", "decision", "declare", "decorate", "decrease", "deliver", "demand", "density", "deny", "depart", "depend", "depict", "deploy", "describe", "desert", "desire", "desktop", "destroy", "detailed", "detect", "device", "devote", "diagnose", "dictate", "diet", "dilemma", "diminish", "dining", "diploma", "disaster", "discuss", "disease", "dish", "dismiss", "display", "distance", "dive", "divorce", "document", "domain", "domestic", "dominant", "dough", "downtown", "dragon", "dramatic", "dream", "dress", "drift", "drink", "drove", "drug", "dryer", "duckling", "duke", "duration", "dwarf", "dynamic", "early", "earth", "easel", "easy", "echo", "eclipse", "ecology", "edge", "editor", "educate", "either", "elbow", "elder", "election", "elegant", "element", "elephant", "elevator", "elite", "else", "email", "emerald", "emission", "emperor", "emphasis", "employer", "empty", "ending", "endless", "endorse", "enemy", "energy", "enforce", "engage", "enjoy", "enlarge", "entrance", "envelope", "envy", "epidemic", "episode", "equation", "equip", "eraser", "erode", "escape", "estate", "estimate", "evaluate", "evening", "evidence", "evil", "evoke", "exact", "example", "exceed", "exchange", "exclude", "excuse", "execute", "exercise", "exhaust", "exotic", "expand", "expect", "explain", "express", "extend", "extra", "eyebrow", "facility", "fact", "failure", "faint", "fake", "false", "family", "famous", "fancy", "fangs", "fantasy", "fatal", "fatigue", "favorite", "fawn", "fiber", "fiction", "filter", "finance", "findings", "finger", "firefly", "firm", "fiscal", "fishing", "fitness", "flame", "flash", "flavor", "flea", "flexible", "flip", "float", "floral", "fluff", "focus", "forbid", "force", "forecast", "forget", "formal", "fortune", "forward", "founder", "fraction", "fragment", "frequent", "freshman", "friar", "fridge", "friendly", "frost", "froth", "frozen", "fumes", "funding", "furl", "fused", "galaxy", "game", "garbage", "garden", "garlic", "gasoline", "gather", "general", "genius", "genre", "genuine", "geology", "gesture", "glad", "glance", "glasses", "glen", "glimpse", "goat", "golden", "graduate", "grant", "grasp", "gravity", "gray", "greatest", "grief", "grill", "grin", "grocery", "gross", "group", "grownup", "grumpy", "guard", "guest", "guilt", "guitar", "gums", "hairy", "hamster", "hand", "hanger", "harvest", "have", "havoc", "hawk", "hazard", "headset", "health", "hearing", "heat", "helpful", "herald", "herd", "hesitate", "hobo", "holiday", "holy", "home", "hormone", "hospital", "hour", "huge", "human", "humidity", "hunting", "husband", "hush", "husky", "hybrid", "idea", "identify", "idle", "image", "impact", "imply", "improve", "impulse", "include", "income", "increase", "index", "indicate", "industry", "infant", "inform", "inherit", "injury", "inmate", "insect", "inside", "install", "intend", "intimate", "invasion", "involve", "iris", "island", "isolate", "item", "ivory", "jacket", "jerky", "jewelry", "join", "judicial", "juice", "jump", "junction", "junior", "junk", "jury", "justice", "kernel", "keyboard", "kidney", "kind", "kitchen", "knife", "knit", "laden", "ladle", "ladybug", "lair", "lamp", "language", "large", "laser", "laundry", "lawsuit", "leader", "leaf", "learn", "leaves", "lecture", "legal", "legend", "legs", "lend", "length", "level", "liberty", "library", "license", "lift", "likely", "lilac", "lily", "lips", "liquid", "listen", "literary", "living", "lizard", "loan", "lobe", "location", "losing", "loud", "loyalty", "luck", "lunar", "lunch", "lungs", "luxury", "lying", "lyrics", "machine", "magazine", "maiden", "mailman", "main", "makeup", "making", "mama", "manager", "mandate", "mansion", "manual", "marathon", "march", "market", "marvel", "mason", "material", "math", "maximum", "mayor", "meaning", "medal", "medical", "member", "memory", "mental", "merchant", "merit", "method", "metric", "midst", "mild", "military", "mineral", "minister", "miracle", "mixed", "mixture", "mobile", "modern", "modify", "moisture", "moment", "morning", "mortgage", "mother", "mountain", "mouse", "move", "much", "mule", "multiple", "muscle", "museum", "music", "mustang", "nail", "national", "necklace", "negative", "nervous", "network", "news", "nuclear", "numb", "numerous", "nylon", "oasis", "obesity", "object", "observe", "obtain", "ocean", "often", "olympic", "omit", "oral", "orange", "orbit", "order", "ordinary", "organize", "ounce", "oven", "overall", "owner", "paces", "pacific", "package", "paid", "painting", "pajamas", "pancake", "pants", "papa", "paper", "parcel", "parking", "party", "patent", "patrol", "payment", "payroll", "peaceful", "peanut", "peasant", "pecan", "penalty", "pencil", "percent", "perfect", "permit", "petition", "phantom", "pharmacy", "photo", "phrase", "physics", "pickup", "picture", "piece", "pile", "pink", "pipeline", "pistol", "pitch", "plains", "plan", "plastic", "platform", "playoff", "pleasure", "plot", "plunge", "practice", "prayer", "preach", "predator", "pregnant", "premium", "prepare", "presence", "prevent", "priest", "primary", "priority", "prisoner", "privacy", "prize", "problem", "process", "profile", "program", "promise", "prospect", "provide", "prune", "public", "pulse", "pumps", "punish", "puny", "pupal", "purchase", "purple", "python", "quantity", "quarter", "quick", "quiet", "race", "racism", "radar", "railroad", "rainbow", "raisin", "random", "ranked", "rapids", "raspy", "reaction", "realize", "rebound", "rebuild", "recall", "receiver", "recover", "regret", "regular", "reject", "relate", "remember", "remind", "remove", "render", "repair", "repeat", "replace", "require", "rescue", "research", "resident", "response", "result", "retailer", "retreat", "reunion", "revenue", "review", "reward", "rhyme", "rhythm", "rich", "rival", "river", "robin", "rocky", "romantic", "romp", "roster", "round", "royal", "ruin", "ruler", "rumor", "sack", "safari", "salary", "salon", "salt", "satisfy", "satoshi", "saver", "says", "scandal", "scared", "scatter", "scene", "scholar", "science", "scout", "scramble", "screw", "script", "scroll", "seafood", "season", "secret", "security", "segment", "senior", "shadow", "shaft", "shame", "shaped", "sharp", "shelter", "sheriff", "short", "should", "shrimp", "sidewalk", "silent", "silver", "similar", "simple", "single", "sister", "skin", "skunk", "slap", "slavery", "sled", "slice", "slim", "slow", "slush", "smart", "smear", "smell", "smirk", "smith", "smoking", "smug", "snake", "snapshot", "sniff", "society", "software", "soldier", "solution", "soul", "source", "space", "spark", "speak", "species", "spelling", "spend", "spew", "spider", "spill", "spine", "spirit", "spit", "spray", "sprinkle", "square", "squeeze", "stadium", "staff", "standard", "starting", "station", "stay", "steady", "step", "stick", "stilt", "story", "strategy", "strike", "style", "subject", "submit", "sugar", "suitable", "sunlight", "superior", "surface", "surprise", "survive", "sweater", "swimming", "swing", "switch", "symbolic", "sympathy", "syndrome", "system", "tackle", "tactics", "tadpole", "talent", "task", "taste", "taught", "taxi", "teacher", "teammate", "teaspoon", "temple", "tenant", "tendency", "tension", "terminal", "testify", "texture", "thank", "that", "theater", "theory", "therapy", "thorn", "threaten", "thumb", "thunder", "ticket", "tidy", "timber", "timely", "ting", "tofu", "together", "tolerate", "total", "toxic", "tracks", "traffic", "training", "transfer", "trash", "traveler", "treat", "trend", "trial", "tricycle", "trip", "triumph", "trouble", "true", "trust", "twice", "twin", "type", "typical", "ugly", "ultimate", "umbrella", "uncover", "undergo", "unfair", "unfold", "unhappy", "union", "universe", "unkind", "unknown", "unusual", "unwrap", "upgrade", "upstairs", "username", "usher", "usual", "valid", "valuable", "vampire", "vanish", "various", "vegan", "velvet", "venture", "verdict", "verify", "very", "veteran", "vexed", "victim", "video", "view", "vintage", "violence", "viral", "visitor", "visual", "vitamins", "vocal", "voice", "volume", "voter", "voting", "walnut", "warmth", "warn", "watch", "wavy", "wealthy", "weapon", "webcam", "welcome", "welfare", "western", "width", "wildlife", "window", "wine", "wireless", "wisdom", "withdraw", "wits", "wolf", "woman", "work", "worthy", "wrap", "wrist", "writing", "wrote", "year", "yelp", "yield", "yoga", "zero", ]
slip39_words = ['academic', 'acid', 'acne', 'acquire', 'acrobat', 'activity', 'actress', 'adapt', 'adequate', 'adjust', 'admit', 'adorn', 'adult', 'advance', 'advocate', 'afraid', 'again', 'agency', 'agree', 'aide', 'aircraft', 'airline', 'airport', 'ajar', 'alarm', 'album', 'alcohol', 'alien', 'alive', 'alpha', 'already', 'alto', 'aluminum', 'always', 'amazing', 'ambition', 'amount', 'amuse', 'analysis', 'anatomy', 'ancestor', 'ancient', 'angel', 'angry', 'animal', 'answer', 'antenna', 'anxiety', 'apart', 'aquatic', 'arcade', 'arena', 'argue', 'armed', 'artist', 'artwork', 'aspect', 'auction', 'august', 'aunt', 'average', 'aviation', 'avoid', 'award', 'away', 'axis', 'axle', 'beam', 'beard', 'beaver', 'become', 'bedroom', 'behavior', 'being', 'believe', 'belong', 'benefit', 'best', 'beyond', 'bike', 'biology', 'birthday', 'bishop', 'black', 'blanket', 'blessing', 'blimp', 'blind', 'blue', 'body', 'bolt', 'boring', 'born', 'both', 'boundary', 'bracelet', 'branch', 'brave', 'breathe', 'briefing', 'broken', 'brother', 'browser', 'bucket', 'budget', 'building', 'bulb', 'bulge', 'bumpy', 'bundle', 'burden', 'burning', 'busy', 'buyer', 'cage', 'calcium', 'camera', 'campus', 'canyon', 'capacity', 'capital', 'capture', 'carbon', 'cards', 'careful', 'cargo', 'carpet', 'carve', 'category', 'cause', 'ceiling', 'center', 'ceramic', 'champion', 'change', 'charity', 'check', 'chemical', 'chest', 'chew', 'chubby', 'cinema', 'civil', 'class', 'clay', 'cleanup', 'client', 'climate', 'clinic', 'clock', 'clogs', 'closet', 'clothes', 'club', 'cluster', 'coal', 'coastal', 'coding', 'column', 'company', 'corner', 'costume', 'counter', 'course', 'cover', 'cowboy', 'cradle', 'craft', 'crazy', 'credit', 'cricket', 'criminal', 'crisis', 'critical', 'crowd', 'crucial', 'crunch', 'crush', 'crystal', 'cubic', 'cultural', 'curious', 'curly', 'custody', 'cylinder', 'daisy', 'damage', 'dance', 'darkness', 'database', 'daughter', 'deadline', 'deal', 'debris', 'debut', 'decent', 'decision', 'declare', 'decorate', 'decrease', 'deliver', 'demand', 'density', 'deny', 'depart', 'depend', 'depict', 'deploy', 'describe', 'desert', 'desire', 'desktop', 'destroy', 'detailed', 'detect', 'device', 'devote', 'diagnose', 'dictate', 'diet', 'dilemma', 'diminish', 'dining', 'diploma', 'disaster', 'discuss', 'disease', 'dish', 'dismiss', 'display', 'distance', 'dive', 'divorce', 'document', 'domain', 'domestic', 'dominant', 'dough', 'downtown', 'dragon', 'dramatic', 'dream', 'dress', 'drift', 'drink', 'drove', 'drug', 'dryer', 'duckling', 'duke', 'duration', 'dwarf', 'dynamic', 'early', 'earth', 'easel', 'easy', 'echo', 'eclipse', 'ecology', 'edge', 'editor', 'educate', 'either', 'elbow', 'elder', 'election', 'elegant', 'element', 'elephant', 'elevator', 'elite', 'else', 'email', 'emerald', 'emission', 'emperor', 'emphasis', 'employer', 'empty', 'ending', 'endless', 'endorse', 'enemy', 'energy', 'enforce', 'engage', 'enjoy', 'enlarge', 'entrance', 'envelope', 'envy', 'epidemic', 'episode', 'equation', 'equip', 'eraser', 'erode', 'escape', 'estate', 'estimate', 'evaluate', 'evening', 'evidence', 'evil', 'evoke', 'exact', 'example', 'exceed', 'exchange', 'exclude', 'excuse', 'execute', 'exercise', 'exhaust', 'exotic', 'expand', 'expect', 'explain', 'express', 'extend', 'extra', 'eyebrow', 'facility', 'fact', 'failure', 'faint', 'fake', 'false', 'family', 'famous', 'fancy', 'fangs', 'fantasy', 'fatal', 'fatigue', 'favorite', 'fawn', 'fiber', 'fiction', 'filter', 'finance', 'findings', 'finger', 'firefly', 'firm', 'fiscal', 'fishing', 'fitness', 'flame', 'flash', 'flavor', 'flea', 'flexible', 'flip', 'float', 'floral', 'fluff', 'focus', 'forbid', 'force', 'forecast', 'forget', 'formal', 'fortune', 'forward', 'founder', 'fraction', 'fragment', 'frequent', 'freshman', 'friar', 'fridge', 'friendly', 'frost', 'froth', 'frozen', 'fumes', 'funding', 'furl', 'fused', 'galaxy', 'game', 'garbage', 'garden', 'garlic', 'gasoline', 'gather', 'general', 'genius', 'genre', 'genuine', 'geology', 'gesture', 'glad', 'glance', 'glasses', 'glen', 'glimpse', 'goat', 'golden', 'graduate', 'grant', 'grasp', 'gravity', 'gray', 'greatest', 'grief', 'grill', 'grin', 'grocery', 'gross', 'group', 'grownup', 'grumpy', 'guard', 'guest', 'guilt', 'guitar', 'gums', 'hairy', 'hamster', 'hand', 'hanger', 'harvest', 'have', 'havoc', 'hawk', 'hazard', 'headset', 'health', 'hearing', 'heat', 'helpful', 'herald', 'herd', 'hesitate', 'hobo', 'holiday', 'holy', 'home', 'hormone', 'hospital', 'hour', 'huge', 'human', 'humidity', 'hunting', 'husband', 'hush', 'husky', 'hybrid', 'idea', 'identify', 'idle', 'image', 'impact', 'imply', 'improve', 'impulse', 'include', 'income', 'increase', 'index', 'indicate', 'industry', 'infant', 'inform', 'inherit', 'injury', 'inmate', 'insect', 'inside', 'install', 'intend', 'intimate', 'invasion', 'involve', 'iris', 'island', 'isolate', 'item', 'ivory', 'jacket', 'jerky', 'jewelry', 'join', 'judicial', 'juice', 'jump', 'junction', 'junior', 'junk', 'jury', 'justice', 'kernel', 'keyboard', 'kidney', 'kind', 'kitchen', 'knife', 'knit', 'laden', 'ladle', 'ladybug', 'lair', 'lamp', 'language', 'large', 'laser', 'laundry', 'lawsuit', 'leader', 'leaf', 'learn', 'leaves', 'lecture', 'legal', 'legend', 'legs', 'lend', 'length', 'level', 'liberty', 'library', 'license', 'lift', 'likely', 'lilac', 'lily', 'lips', 'liquid', 'listen', 'literary', 'living', 'lizard', 'loan', 'lobe', 'location', 'losing', 'loud', 'loyalty', 'luck', 'lunar', 'lunch', 'lungs', 'luxury', 'lying', 'lyrics', 'machine', 'magazine', 'maiden', 'mailman', 'main', 'makeup', 'making', 'mama', 'manager', 'mandate', 'mansion', 'manual', 'marathon', 'march', 'market', 'marvel', 'mason', 'material', 'math', 'maximum', 'mayor', 'meaning', 'medal', 'medical', 'member', 'memory', 'mental', 'merchant', 'merit', 'method', 'metric', 'midst', 'mild', 'military', 'mineral', 'minister', 'miracle', 'mixed', 'mixture', 'mobile', 'modern', 'modify', 'moisture', 'moment', 'morning', 'mortgage', 'mother', 'mountain', 'mouse', 'move', 'much', 'mule', 'multiple', 'muscle', 'museum', 'music', 'mustang', 'nail', 'national', 'necklace', 'negative', 'nervous', 'network', 'news', 'nuclear', 'numb', 'numerous', 'nylon', 'oasis', 'obesity', 'object', 'observe', 'obtain', 'ocean', 'often', 'olympic', 'omit', 'oral', 'orange', 'orbit', 'order', 'ordinary', 'organize', 'ounce', 'oven', 'overall', 'owner', 'paces', 'pacific', 'package', 'paid', 'painting', 'pajamas', 'pancake', 'pants', 'papa', 'paper', 'parcel', 'parking', 'party', 'patent', 'patrol', 'payment', 'payroll', 'peaceful', 'peanut', 'peasant', 'pecan', 'penalty', 'pencil', 'percent', 'perfect', 'permit', 'petition', 'phantom', 'pharmacy', 'photo', 'phrase', 'physics', 'pickup', 'picture', 'piece', 'pile', 'pink', 'pipeline', 'pistol', 'pitch', 'plains', 'plan', 'plastic', 'platform', 'playoff', 'pleasure', 'plot', 'plunge', 'practice', 'prayer', 'preach', 'predator', 'pregnant', 'premium', 'prepare', 'presence', 'prevent', 'priest', 'primary', 'priority', 'prisoner', 'privacy', 'prize', 'problem', 'process', 'profile', 'program', 'promise', 'prospect', 'provide', 'prune', 'public', 'pulse', 'pumps', 'punish', 'puny', 'pupal', 'purchase', 'purple', 'python', 'quantity', 'quarter', 'quick', 'quiet', 'race', 'racism', 'radar', 'railroad', 'rainbow', 'raisin', 'random', 'ranked', 'rapids', 'raspy', 'reaction', 'realize', 'rebound', 'rebuild', 'recall', 'receiver', 'recover', 'regret', 'regular', 'reject', 'relate', 'remember', 'remind', 'remove', 'render', 'repair', 'repeat', 'replace', 'require', 'rescue', 'research', 'resident', 'response', 'result', 'retailer', 'retreat', 'reunion', 'revenue', 'review', 'reward', 'rhyme', 'rhythm', 'rich', 'rival', 'river', 'robin', 'rocky', 'romantic', 'romp', 'roster', 'round', 'royal', 'ruin', 'ruler', 'rumor', 'sack', 'safari', 'salary', 'salon', 'salt', 'satisfy', 'satoshi', 'saver', 'says', 'scandal', 'scared', 'scatter', 'scene', 'scholar', 'science', 'scout', 'scramble', 'screw', 'script', 'scroll', 'seafood', 'season', 'secret', 'security', 'segment', 'senior', 'shadow', 'shaft', 'shame', 'shaped', 'sharp', 'shelter', 'sheriff', 'short', 'should', 'shrimp', 'sidewalk', 'silent', 'silver', 'similar', 'simple', 'single', 'sister', 'skin', 'skunk', 'slap', 'slavery', 'sled', 'slice', 'slim', 'slow', 'slush', 'smart', 'smear', 'smell', 'smirk', 'smith', 'smoking', 'smug', 'snake', 'snapshot', 'sniff', 'society', 'software', 'soldier', 'solution', 'soul', 'source', 'space', 'spark', 'speak', 'species', 'spelling', 'spend', 'spew', 'spider', 'spill', 'spine', 'spirit', 'spit', 'spray', 'sprinkle', 'square', 'squeeze', 'stadium', 'staff', 'standard', 'starting', 'station', 'stay', 'steady', 'step', 'stick', 'stilt', 'story', 'strategy', 'strike', 'style', 'subject', 'submit', 'sugar', 'suitable', 'sunlight', 'superior', 'surface', 'surprise', 'survive', 'sweater', 'swimming', 'swing', 'switch', 'symbolic', 'sympathy', 'syndrome', 'system', 'tackle', 'tactics', 'tadpole', 'talent', 'task', 'taste', 'taught', 'taxi', 'teacher', 'teammate', 'teaspoon', 'temple', 'tenant', 'tendency', 'tension', 'terminal', 'testify', 'texture', 'thank', 'that', 'theater', 'theory', 'therapy', 'thorn', 'threaten', 'thumb', 'thunder', 'ticket', 'tidy', 'timber', 'timely', 'ting', 'tofu', 'together', 'tolerate', 'total', 'toxic', 'tracks', 'traffic', 'training', 'transfer', 'trash', 'traveler', 'treat', 'trend', 'trial', 'tricycle', 'trip', 'triumph', 'trouble', 'true', 'trust', 'twice', 'twin', 'type', 'typical', 'ugly', 'ultimate', 'umbrella', 'uncover', 'undergo', 'unfair', 'unfold', 'unhappy', 'union', 'universe', 'unkind', 'unknown', 'unusual', 'unwrap', 'upgrade', 'upstairs', 'username', 'usher', 'usual', 'valid', 'valuable', 'vampire', 'vanish', 'various', 'vegan', 'velvet', 'venture', 'verdict', 'verify', 'very', 'veteran', 'vexed', 'victim', 'video', 'view', 'vintage', 'violence', 'viral', 'visitor', 'visual', 'vitamins', 'vocal', 'voice', 'volume', 'voter', 'voting', 'walnut', 'warmth', 'warn', 'watch', 'wavy', 'wealthy', 'weapon', 'webcam', 'welcome', 'welfare', 'western', 'width', 'wildlife', 'window', 'wine', 'wireless', 'wisdom', 'withdraw', 'wits', 'wolf', 'woman', 'work', 'worthy', 'wrap', 'wrist', 'writing', 'wrote', 'year', 'yelp', 'yield', 'yoga', 'zero']
class Model: def __init__(self): self.SoC = 0.0 self.io = 0.0 def step(self, value): if self.SoC > 0.95 and self.io == 1.0: self.io = 0.0 if self.SoC < 0.05 and self.io == 0.0: self.io = 1.0
class Model: def __init__(self): self.SoC = 0.0 self.io = 0.0 def step(self, value): if self.SoC > 0.95 and self.io == 1.0: self.io = 0.0 if self.SoC < 0.05 and self.io == 0.0: self.io = 1.0
def is_valid_index(r, c, board_size): return r in range(board_size) and c in range(board_size) def calculate_kills(matrix, r, c): kills = 0 possible_moves = [ (-2, -1), (-2, 1), (-1, -2), (-1, 2), (1, -2), (2, -1), (2, 1), (1, 2) ] for idx in range(len(possible_moves)): row = r + possible_moves[idx][0] col = c + possible_moves[idx][1] if is_valid_index(row, col, len(matrix)) and \ matrix[row][col] == "K": kills += 1 return kills size = int(input()) board = [[x for x in list(input())] for _ in range(size)] removed_knights = 0 while True: max_kills = 0 knight_position = set() for r in range(size): for c in range(size): if board[r][c] == "K": kills = calculate_kills(board, r, c) if kills > max_kills: max_kills = kills knight_position = (r, c) if not knight_position: break row, col = knight_position board[row][col] = "O" removed_knights += 1 print(removed_knights)
def is_valid_index(r, c, board_size): return r in range(board_size) and c in range(board_size) def calculate_kills(matrix, r, c): kills = 0 possible_moves = [(-2, -1), (-2, 1), (-1, -2), (-1, 2), (1, -2), (2, -1), (2, 1), (1, 2)] for idx in range(len(possible_moves)): row = r + possible_moves[idx][0] col = c + possible_moves[idx][1] if is_valid_index(row, col, len(matrix)) and matrix[row][col] == 'K': kills += 1 return kills size = int(input()) board = [[x for x in list(input())] for _ in range(size)] removed_knights = 0 while True: max_kills = 0 knight_position = set() for r in range(size): for c in range(size): if board[r][c] == 'K': kills = calculate_kills(board, r, c) if kills > max_kills: max_kills = kills knight_position = (r, c) if not knight_position: break (row, col) = knight_position board[row][col] = 'O' removed_knights += 1 print(removed_knights)
# settings.py # # aws-clean will not destroy things in the whitelist for global resources # please put under the global region. I recommend just adding to the lists provided # unless you are trying to provide a new cleaner. # # # What value do I put in for each resource> # # s3_buckets - BucketName # ec2_instances - Instance-id # rds_instances - DbIndentifier # dynamo_tables - TableName # redshift_clusters - ClusterIdentifier # ecs_clusters - Cluster ARN WHITELIST = { "global": { "s3_buckets": [ ] }, "us-east-1": { "ec2_instances": [ ], "rds_instances": [ ], "lambda_functions": [ ], "dynamo_tables": [ ], "redshift_clusters": [ ], "ecs_clusters": [ ], "efs": [ ] }, "us-east-2": { "ec2_instances": [ ], "rds_instances": [ ], "lambda_functions": [ ], "dynamo_tables": [ ], "redshift_clusters": [ ], "ecs_clusters": [ ], "efs": [ ] } } # regions that aws-clean will go through. global is things like S3 and IAM REGIONS = [ "global", "us-east-1", "us-east-2" ] RESULTS_DIR = "results" RESULTS_FILENAME = "aws_clean"
whitelist = {'global': {'s3_buckets': []}, 'us-east-1': {'ec2_instances': [], 'rds_instances': [], 'lambda_functions': [], 'dynamo_tables': [], 'redshift_clusters': [], 'ecs_clusters': [], 'efs': []}, 'us-east-2': {'ec2_instances': [], 'rds_instances': [], 'lambda_functions': [], 'dynamo_tables': [], 'redshift_clusters': [], 'ecs_clusters': [], 'efs': []}} regions = ['global', 'us-east-1', 'us-east-2'] results_dir = 'results' results_filename = 'aws_clean'
### YOUR CODE FOR openLocks() FUNCTION GOES HERE ### def openLocks(number_of_lockers , number_of_students): if type(number_of_lockers) == str or type(number_of_students) == str or number_of_lockers < 0 or number_of_students <0: return None if number_of_lockers == 0 or number_of_students == 0: return 0 locks = [1] * number_of_lockers # in this closed locker means lock[0] and open mean lock[1] open_locks = 0 for students in range(1 ,number_of_students+1): for lockers in range(1 , number_of_lockers+1): if lockers % students == 0: if students > 1: if locks[lockers - 1] == 1: locks[lockers - 1] = 0 else: locks[lockers - 1] = 1 for openlocks in range(1 , number_of_lockers+1): if locks[openlocks-1] == 1: open_locks += 1 return open_locks #### End OF MARKER ### YOUR CODE FOR mostTouchableLocker() FUNCTION GOES HERE ### def mostTouchableLocker(number_of_lockers , number_of_students): x = 0 z = 0 if number_of_lockers < 0 or number_of_students < 0: return None if number_of_lockers == 0 or number_of_students == 0 : return 0 if number_of_lockers < number_of_students or number_of_lockers%number_of_students == 0: for lockers in range(1 , number_of_lockers+1): y = 0 for i in range(1 , lockers+1): if lockers % i == 0: y+=1 if y >= x: x = y if y >= x: z = lockers return z else: for lockers in range(1 , number_of_students+1): y = 0 for i in range(1 , lockers+1): if lockers % i == 0: y+=1 if y >= x: x = y if y >= x: z = lockers return z #### End OF MARKER
def open_locks(number_of_lockers, number_of_students): if type(number_of_lockers) == str or type(number_of_students) == str or number_of_lockers < 0 or (number_of_students < 0): return None if number_of_lockers == 0 or number_of_students == 0: return 0 locks = [1] * number_of_lockers open_locks = 0 for students in range(1, number_of_students + 1): for lockers in range(1, number_of_lockers + 1): if lockers % students == 0: if students > 1: if locks[lockers - 1] == 1: locks[lockers - 1] = 0 else: locks[lockers - 1] = 1 for openlocks in range(1, number_of_lockers + 1): if locks[openlocks - 1] == 1: open_locks += 1 return open_locks def most_touchable_locker(number_of_lockers, number_of_students): x = 0 z = 0 if number_of_lockers < 0 or number_of_students < 0: return None if number_of_lockers == 0 or number_of_students == 0: return 0 if number_of_lockers < number_of_students or number_of_lockers % number_of_students == 0: for lockers in range(1, number_of_lockers + 1): y = 0 for i in range(1, lockers + 1): if lockers % i == 0: y += 1 if y >= x: x = y if y >= x: z = lockers return z else: for lockers in range(1, number_of_students + 1): y = 0 for i in range(1, lockers + 1): if lockers % i == 0: y += 1 if y >= x: x = y if y >= x: z = lockers return z
test = { 'name': 'Question32', 'points': 1, 'suites': [ { 'cases': [ { 'code': r""" >>> from datascience import * >>> retweets_likes_age.num_rows 7 """, 'hidden': False, 'locked': False }, { 'code': r""" >>> from datascience import * >>> retweets_likes_age.num_columns 4 """, 'hidden': False, 'locked': False }, ], 'scored': True, 'setup': '', 'teardown': '', 'type': 'doctest' } ] }
test = {'name': 'Question32', 'points': 1, 'suites': [{'cases': [{'code': '\n >>> from datascience import *\n >>> retweets_likes_age.num_rows\n 7\n ', 'hidden': False, 'locked': False}, {'code': '\n >>> from datascience import *\n >>> retweets_likes_age.num_columns\n 4\n ', 'hidden': False, 'locked': False}], 'scored': True, 'setup': '', 'teardown': '', 'type': 'doctest'}]}
if __name__ == '__main__': n = int(input()) student_marks = {} for _ in range(n): name, *line = input().split() scores = list(map(float, line)) student_marks[name] = scores query_name = str(input()) if query_name in student_marks: l=list(student_marks[query_name]) sum=0 for i in range(len(l)): sum= l[i]+sum print("{:.2f}".format(sum/3))
if __name__ == '__main__': n = int(input()) student_marks = {} for _ in range(n): (name, *line) = input().split() scores = list(map(float, line)) student_marks[name] = scores query_name = str(input()) if query_name in student_marks: l = list(student_marks[query_name]) sum = 0 for i in range(len(l)): sum = l[i] + sum print('{:.2f}'.format(sum / 3))
fname = input("Enter the file name: ") fh = open(fname) count = 0 for line in fh: line = line.rstrip() if line.startswith('From '): count = count + 1 words = line.split() emails = words[1] print(emails) print("There were",count, "lines in the file with From as the first word")
fname = input('Enter the file name: ') fh = open(fname) count = 0 for line in fh: line = line.rstrip() if line.startswith('From '): count = count + 1 words = line.split() emails = words[1] print(emails) print('There were', count, 'lines in the file with From as the first word')
def add_title(self): square = Square(side_length=2 * self.L) title = TextMobject("Brownian motion") title.scale(1.5) title.next_to(square, UP) self.add(square) self.add(title)
def add_title(self): square = square(side_length=2 * self.L) title = text_mobject('Brownian motion') title.scale(1.5) title.next_to(square, UP) self.add(square) self.add(title)
class Solution(object): def reverseBitsBitManipulation(self, n): """ Time - O(length(n)) = O(32) Space - O(1) :type n: integer :rtype: integer """ result = 0 for _ in range(32): result <<= 1 result |= n & 1 n >>= 1 return result
class Solution(object): def reverse_bits_bit_manipulation(self, n): """ Time - O(length(n)) = O(32) Space - O(1) :type n: integer :rtype: integer """ result = 0 for _ in range(32): result <<= 1 result |= n & 1 n >>= 1 return result
## PETRglobals.py [module] ## # Global variable initializations for the PETRARCH event coder # # SYSTEM REQUIREMENTS # This program has been successfully run under Mac OS 10.10; it is standard Python 2.7 # so it should also run in Unix or Windows. # # INITIAL PROVENANCE: # Programmer: Philip A. Schrodt # Parus Analytics # Charlottesville, VA, 22901 U.S.A. # http://eventdata.parusanalytics.com # # GitHub repository: https://github.com/openeventdata/petrarch # # Copyright (c) 2014 Philip A. Schrodt. All rights reserved. # # This project is part of the Open Event Data Alliance tool set; earlier developments # were funded in part by National Science Foundation grant SES-1259190 # # This code is covered under the MIT license # # REVISION HISTORY: # 22-Nov-13: Initial version -- ptab.verbsonly.py # 28-Apr-14: Latest version # 20-Nov-14: WriteActorRoot/Text added # ------------------------------------------------------------------------ # Global variables are listed below: additional details on their structure can # be found in various function definitions. The various options are described # in more detail in the config.ini file. VerbDict = {'verbs':{}, 'phrases':{}, 'transformations' : {}} # verb dictionary ActorDict = {} # actor dictionary ActorCodes = [] # actor code list AgentDict = {} # agent dictionary DiscardList = {} # discard list IssueList = [] IssueCodes = [] ConfigFileName = "PETR_config.ini" VerbFileName = "" # verb dictionary ActorFileList = [] # actor dictionary AgentFileName = "" # agent dictionary DiscardFileName = "" # discard list TextFileList = [] # current text or validation file EventFileName = "" # event output file IssueFileName = "" # issues list # element followed by attribute and content pairs for XML line AttributeList = [] # CODING OPTIONS # Defaults are more or less equivalent to TABARI NewActorLength = 0 # Maximum length for new actors extracted from noun phrases RequireDyad = True # Events require a non-null source and target StoponError = False # Raise stop exception on errors rather than recovering # OUTPUT OPTIONS WriteActorRoot = False # Include actor root in event record :: currently not implemented WriteActorText = False # Include actor text in event record WriteEventText = False # Include event text in event record RunTimeString = '' # used in error and debugging files -- just set it once # INTERFACE OPTIONS: these can be changed in config.ini # The default -- all false -- is equivalent to an A)utocode in TABARI CodeBySentence = False PauseBySentence = False PauseByStory = False # COMMA OPTION : These adjust the length (in words) of comma-delimited clauses # that are eliminated from the parse. To deactivate, set the max to zero. # Defaults, based on TABARI, are in () # comma_min : internal clause minimum length [2] # comma_max : internal clause maximum length [8] # comma_bmin : initial ("begin") clause minimum length [0] # comma_bmax : initial clause maximum length [0 : deactivated by default] # comma_emin : terminal ("end") clause minimum length [2] # comma_emax : terminal clause maximum length [8] CommaMin = 2 CommaMax = 8 CommaBMin = 0 CommaBMax = 0 CommaEMin = 2 CommaEMax = 8 stanfordnlp = '' # TEMPORARY VARIABLES # <14.11.20> Temporary in the sense that these won't be needed when we eventually # refactor so that codes are some sort of structure other than a string CodePrimer = '=#=' # separates actor code from root and text strings RootPrimer = CodePrimer + ':' # start of root string TextPrimer = CodePrimer + '+' # start of text string
verb_dict = {'verbs': {}, 'phrases': {}, 'transformations': {}} actor_dict = {} actor_codes = [] agent_dict = {} discard_list = {} issue_list = [] issue_codes = [] config_file_name = 'PETR_config.ini' verb_file_name = '' actor_file_list = [] agent_file_name = '' discard_file_name = '' text_file_list = [] event_file_name = '' issue_file_name = '' attribute_list = [] new_actor_length = 0 require_dyad = True stopon_error = False write_actor_root = False write_actor_text = False write_event_text = False run_time_string = '' code_by_sentence = False pause_by_sentence = False pause_by_story = False comma_min = 2 comma_max = 8 comma_b_min = 0 comma_b_max = 0 comma_e_min = 2 comma_e_max = 8 stanfordnlp = '' code_primer = '=#=' root_primer = CodePrimer + ':' text_primer = CodePrimer + '+'
# pylint: disable=W0622 def sum(arg): total = 0 for val in arg: total += val return total
def sum(arg): total = 0 for val in arg: total += val return total
class Wallet(): def __init__(self, initial_amount = 0): self.balance = initial_amount def spend_cash(self, amount): if self.balance < amount: print("insuffienct amount") else: self.balance -= amount def add_cash(self, amount): self.balance += amount
class Wallet: def __init__(self, initial_amount=0): self.balance = initial_amount def spend_cash(self, amount): if self.balance < amount: print('insuffienct amount') else: self.balance -= amount def add_cash(self, amount): self.balance += amount
a = int(input("")) b = int(input("")) c = int(input("")) d = int(input("")) x = (a*b-c*d) print("DIFERENCA = %d" %x)
a = int(input('')) b = int(input('')) c = int(input('')) d = int(input('')) x = a * b - c * d print('DIFERENCA = %d' % x)
""" CSS Grid Template """ class CSSGridMixin(object): """ A mixin for adding css grid to any standard CBV """ grid_wrapper = None grid_template_columns = None grid_template_areas = None grid_gap = None def get_context_data(self, **kwargs): """ Insert the single object into the context dict. """ context = super().get_context_data(**kwargs) context.update({ 'css-grid': { 'grid_wrapper': self.grid_wrapper, 'grid-template-columns': self.grid_template_columns, 'grid-template-areas': self.grid_template_areas, 'grid-gap': self.grid_gap } }) return context
""" CSS Grid Template """ class Cssgridmixin(object): """ A mixin for adding css grid to any standard CBV """ grid_wrapper = None grid_template_columns = None grid_template_areas = None grid_gap = None def get_context_data(self, **kwargs): """ Insert the single object into the context dict. """ context = super().get_context_data(**kwargs) context.update({'css-grid': {'grid_wrapper': self.grid_wrapper, 'grid-template-columns': self.grid_template_columns, 'grid-template-areas': self.grid_template_areas, 'grid-gap': self.grid_gap}}) return context
# Webhook content types HTTP_CONTENT_TYPE_JSON = "application/json" # Registerable extras features EXTRAS_FEATURES = [ "config_context_owners", "custom_fields", "custom_links", "custom_validators", "export_template_owners", "export_templates", "graphql", "job_results", "relationships", "statuses", "webhooks", ] # JobLogEntry Truncation Length JOB_LOG_MAX_GROUPING_LENGTH = 100 JOB_LOG_MAX_LOG_OBJECT_LENGTH = 200 JOB_LOG_MAX_ABSOLUTE_URL_LENGTH = 255
http_content_type_json = 'application/json' extras_features = ['config_context_owners', 'custom_fields', 'custom_links', 'custom_validators', 'export_template_owners', 'export_templates', 'graphql', 'job_results', 'relationships', 'statuses', 'webhooks'] job_log_max_grouping_length = 100 job_log_max_log_object_length = 200 job_log_max_absolute_url_length = 255
def _pretty_after(a, k): positional = ", ".join(repr(arg) for arg in a) keyword = ", ".join( "{}={!r}".format(name, value) for name, value in k.items() ) if positional: if keyword: return ", {}, {}".format(positional, keyword) else: return ", {}".format(positional) else: if keyword: return ", {}".format(keyword) else: return "" class A: def __init__(self, x, y, *, z=10): print("v3: __init__({}, {!r}, {!r}, z={!r})".format(self, x, y, z)) self.x = x self.y = y self.z = z def __new__(cls, *args, **kwargs): msg = _pretty_after(args, kwargs) print("v3: __new__({}{})".format(cls.__name__, msg)) result = super(A, cls).__new__(cls) print(" -> {}".format(result)) return result def __getnewargs__(self, *args, **kwargs): template = "v3: __getnewargs__({}{})" print(template.format(self, _pretty_after(args, kwargs))) raise NotImplementedError def __getstate__(self, *args, **kwargs): template = "v3: __getstate__({}{})" print(template.format(self, _pretty_after(args, kwargs))) raise NotImplementedError def __setstate__(self, state): template = "v3: __setstate__({}, state={!r})" print(template.format(self, state)) self.x = state["x"] self.y = state["y"] self.z = 10
def _pretty_after(a, k): positional = ', '.join((repr(arg) for arg in a)) keyword = ', '.join(('{}={!r}'.format(name, value) for (name, value) in k.items())) if positional: if keyword: return ', {}, {}'.format(positional, keyword) else: return ', {}'.format(positional) elif keyword: return ', {}'.format(keyword) else: return '' class A: def __init__(self, x, y, *, z=10): print('v3: __init__({}, {!r}, {!r}, z={!r})'.format(self, x, y, z)) self.x = x self.y = y self.z = z def __new__(cls, *args, **kwargs): msg = _pretty_after(args, kwargs) print('v3: __new__({}{})'.format(cls.__name__, msg)) result = super(A, cls).__new__(cls) print(' -> {}'.format(result)) return result def __getnewargs__(self, *args, **kwargs): template = 'v3: __getnewargs__({}{})' print(template.format(self, _pretty_after(args, kwargs))) raise NotImplementedError def __getstate__(self, *args, **kwargs): template = 'v3: __getstate__({}{})' print(template.format(self, _pretty_after(args, kwargs))) raise NotImplementedError def __setstate__(self, state): template = 'v3: __setstate__({}, state={!r})' print(template.format(self, state)) self.x = state['x'] self.y = state['y'] self.z = 10
""" Global wikipedia excpetion and warning classes. """ class PageError(Exception): """Exception raised when no Wikipedia matched a query.""" def __init__(self, page_title): self.title = page_title def __str__(self): return "\"%s\" does not match any pages. Try another query!" % self.title class DisambiguationError(Exception): """ Exception raised when a page resolves to a Disambiguation page. The `options` property contains a list of titles of Wikipedia pages that the query may refer to. """ def __init__(self, title, may_refer_to): self.title = title self.options = may_refer_to def __unicode__(self): return u"\"%s\" may refer to: \n%s" % (self.title, '\n'.join(self.options)) def __str__(self): return unicode(self).encode('ascii', 'ignore') class RedirectError(Exception): """Exception raised when a page title unexpectedly resolves to a redirect.""" def __init__(self, page_title): self.title = page_title def __str__(self): return ("\"%s\" resulted in a redirect. Set the redirect property to True to allow automatic redirects." % self.title)
""" Global wikipedia excpetion and warning classes. """ class Pageerror(Exception): """Exception raised when no Wikipedia matched a query.""" def __init__(self, page_title): self.title = page_title def __str__(self): return '"%s" does not match any pages. Try another query!' % self.title class Disambiguationerror(Exception): """ Exception raised when a page resolves to a Disambiguation page. The `options` property contains a list of titles of Wikipedia pages that the query may refer to. """ def __init__(self, title, may_refer_to): self.title = title self.options = may_refer_to def __unicode__(self): return u'"%s" may refer to: \n%s' % (self.title, '\n'.join(self.options)) def __str__(self): return unicode(self).encode('ascii', 'ignore') class Redirecterror(Exception): """Exception raised when a page title unexpectedly resolves to a redirect.""" def __init__(self, page_title): self.title = page_title def __str__(self): return '"%s" resulted in a redirect. Set the redirect property to True to allow automatic redirects.' % self.title
""" CONFIG module We put here things that we might vary depending on which machine we are running on, or whether we are in debugging mode, etc. """ COOKIE_KEY = "A random string would be better" DEBUG = True PORT = 5000 # The default Flask port; change for shared server machines
""" CONFIG module We put here things that we might vary depending on which machine we are running on, or whether we are in debugging mode, etc. """ cookie_key = 'A random string would be better' debug = True port = 5000