content stringlengths 7 1.05M | fixed_cases stringlengths 1 1.28M |
|---|---|
"""
Given two binary trees, write a function to check if they are the
same or not.
Two binary trees are considered the same if they are structurally
identical and the nodes have the same value.
Example 1:
Input: 1 1
/ \\ / \\
2 3 2 3
[1,2,3], [1,2,3]
Output: true
Example 2:
Input: 1 1
/ \\
2 2
[1,2], [1,null,2]
Output: false
Example 3:
Input: 1 1
/ \\ / \\
2 1 1 2
[1,2,1], [1,1,2]
Output: false
"""
class TreeNode:
"""Definition for a binary tree node."""
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def isSameTree(self, p, q):
"""DFS"""
if (not p) ^ (not q):
return False
elif not (p or q):
return True
elif p.val == q.val:
return self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right)
else:
return False
def lst2tree(l):
"""Convert list into Tree (BFS)"""
if (not l) or (l[0] == 'null'):
return None
tree = TreeNode(l[0])
i = 1
n_nodes = 2
cur_level = [tree]
while i < len(l):
nodes = []
n_null = 0
for nn in range(n_nodes):
if i + nn < len(l):
node = l[i + nn]
if (not node) or (node == 'null'):
n_null += 1
nodes.append(None)
else:
nodes.append(node)
else:
n_null += 1
nodes.append(None)
j = 0
next_level = []
for cur in cur_level:
if cur:
if nodes[j]:
cur.left = TreeNode(nodes[j])
if nodes[j + 1]:
cur.right = TreeNode(nodes[j + 1])
next_level += [cur.left, cur.right]
j += 2
i += n_nodes
n_nodes = 2 * (n_nodes - n_null)
cur_level = next_level
return tree
if __name__ == "__main__":
# try_tree_val = [1, 2, 'null', 4, 5, 'null', 7]
# try_tree = lst2tree(try_tree_val)
input_1 = [1, 2, 3]
input_2 = [1, 2, 3]
input_tree_1 = lst2tree(input_1)
input_tree_2 = lst2tree(input_2)
sol = Solution()
print(sol.isSameTree(input_tree_1, input_tree_2))
| """
Given two binary trees, write a function to check if they are the
same or not.
Two binary trees are considered the same if they are structurally
identical and the nodes have the same value.
Example 1:
Input: 1 1
/ \\ / \\
2 3 2 3
[1,2,3], [1,2,3]
Output: true
Example 2:
Input: 1 1
/ \\
2 2
[1,2], [1,null,2]
Output: false
Example 3:
Input: 1 1
/ \\ / \\
2 1 1 2
[1,2,1], [1,1,2]
Output: false
"""
class Treenode:
"""Definition for a binary tree node."""
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def is_same_tree(self, p, q):
"""DFS"""
if (not p) ^ (not q):
return False
elif not (p or q):
return True
elif p.val == q.val:
return self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right)
else:
return False
def lst2tree(l):
"""Convert list into Tree (BFS)"""
if not l or l[0] == 'null':
return None
tree = tree_node(l[0])
i = 1
n_nodes = 2
cur_level = [tree]
while i < len(l):
nodes = []
n_null = 0
for nn in range(n_nodes):
if i + nn < len(l):
node = l[i + nn]
if not node or node == 'null':
n_null += 1
nodes.append(None)
else:
nodes.append(node)
else:
n_null += 1
nodes.append(None)
j = 0
next_level = []
for cur in cur_level:
if cur:
if nodes[j]:
cur.left = tree_node(nodes[j])
if nodes[j + 1]:
cur.right = tree_node(nodes[j + 1])
next_level += [cur.left, cur.right]
j += 2
i += n_nodes
n_nodes = 2 * (n_nodes - n_null)
cur_level = next_level
return tree
if __name__ == '__main__':
input_1 = [1, 2, 3]
input_2 = [1, 2, 3]
input_tree_1 = lst2tree(input_1)
input_tree_2 = lst2tree(input_2)
sol = solution()
print(sol.isSameTree(input_tree_1, input_tree_2)) |
class Solution(object):
def findWords(self, words):
"""
:type words: List[str]
:rtype: List[str]
"""
First = ['Q','W','E','R','T','Y','U','I','O','P']
SEC = ['A','S','D','F','G','H','J','K','L']
LAST= ['Z','X','C','V','B','N','M']
answer = []
for word in words:
now = -1
answer_flag = True
for chr in word:
if (now == - 1):
if ((chr.upper()) in First):
now = 1
if ((chr.upper()) in SEC):
now = 2
if ((chr.upper()) in LAST):
now = 3
else:
flag = False
if ((chr.upper()) in First and now == 1):
flag = True
if ((chr.upper()) in SEC and now == 2):
flag = True
if ((chr.upper()) in LAST and now == 3):
flag = True
if (flag == False):
answer_flag = False
break
if (answer_flag):
answer.append(word)
return answer
| class Solution(object):
def find_words(self, words):
"""
:type words: List[str]
:rtype: List[str]
"""
first = ['Q', 'W', 'E', 'R', 'T', 'Y', 'U', 'I', 'O', 'P']
sec = ['A', 'S', 'D', 'F', 'G', 'H', 'J', 'K', 'L']
last = ['Z', 'X', 'C', 'V', 'B', 'N', 'M']
answer = []
for word in words:
now = -1
answer_flag = True
for chr in word:
if now == -1:
if chr.upper() in First:
now = 1
if chr.upper() in SEC:
now = 2
if chr.upper() in LAST:
now = 3
else:
flag = False
if chr.upper() in First and now == 1:
flag = True
if chr.upper() in SEC and now == 2:
flag = True
if chr.upper() in LAST and now == 3:
flag = True
if flag == False:
answer_flag = False
break
if answer_flag:
answer.append(word)
return answer |
def eqindexMultiPass(data):
"Multi pass"
for i in range(len(data)):
suml, sumr = sum(data[:i]), sum(data[i+1:])
if suml == sumr:
yield i
| def eqindex_multi_pass(data):
"""Multi pass"""
for i in range(len(data)):
(suml, sumr) = (sum(data[:i]), sum(data[i + 1:]))
if suml == sumr:
yield i |
class languages():
def __init__(self, fDic):
self.fDic = fDic
self.scripts = set(self.fDic.scripts)
self.scripts.discard('dflt')
def languageSyntax(self, script, language):
return 'languagesystem %s %s;' %(script, language)
def syntax(self):
result = [self.languageSyntax('DFLT', 'dflt')]
for script in self.scripts:
result.append(self.languageSyntax(script, 'dflt'))
for script, language in self.fDic.localized.keys():
result.append(self.languageSyntax(script, language))
return '\n'.join(result)
| class Languages:
def __init__(self, fDic):
self.fDic = fDic
self.scripts = set(self.fDic.scripts)
self.scripts.discard('dflt')
def language_syntax(self, script, language):
return 'languagesystem %s %s;' % (script, language)
def syntax(self):
result = [self.languageSyntax('DFLT', 'dflt')]
for script in self.scripts:
result.append(self.languageSyntax(script, 'dflt'))
for (script, language) in self.fDic.localized.keys():
result.append(self.languageSyntax(script, language))
return '\n'.join(result) |
n = int(input())
for i in range(1,n+1):
temp = n
for j in range(1,i):
print(temp,end="")
temp = temp -1
for j in range(1,(2*n) - (2*i) + 2):
print(n-i+1,end="")
for j in range(1,i):
temp = temp+1
print(temp,end="")
print()
for i in range(n-1,0,-1):
temp = n
for j in range(1,i):
print(temp,end="")
temp = temp - 1
for j in range(1,(2*n) - (2*i) + 2):
print(n-i+1,end="")
for j in range(1,i):
temp = temp+1
print(temp,end="")
print()
| n = int(input())
for i in range(1, n + 1):
temp = n
for j in range(1, i):
print(temp, end='')
temp = temp - 1
for j in range(1, 2 * n - 2 * i + 2):
print(n - i + 1, end='')
for j in range(1, i):
temp = temp + 1
print(temp, end='')
print()
for i in range(n - 1, 0, -1):
temp = n
for j in range(1, i):
print(temp, end='')
temp = temp - 1
for j in range(1, 2 * n - 2 * i + 2):
print(n - i + 1, end='')
for j in range(1, i):
temp = temp + 1
print(temp, end='')
print() |
class Solution(object):
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
stack = []
for i in range(len(s)):
if s[i]=='(' or s[i]=='[' or s[i]=='{':
stack.append(s[i])
elif s[i] == ')':
if len(stack)>0 and stack.pop()=='(': pass
else: return False
elif s[i] == ']':
if len(stack)>0 and stack.pop()=='[': pass
else: return False
elif s[i] == '}':
if len(stack)>0 and stack.pop()=='{': pass
else: return False
if len(stack) > 0: return False
return True
#s = Solution()
#print(s.isValid("((")) | class Solution(object):
def is_valid(self, s):
"""
:type s: str
:rtype: bool
"""
stack = []
for i in range(len(s)):
if s[i] == '(' or s[i] == '[' or s[i] == '{':
stack.append(s[i])
elif s[i] == ')':
if len(stack) > 0 and stack.pop() == '(':
pass
else:
return False
elif s[i] == ']':
if len(stack) > 0 and stack.pop() == '[':
pass
else:
return False
elif s[i] == '}':
if len(stack) > 0 and stack.pop() == '{':
pass
else:
return False
if len(stack) > 0:
return False
return True |
class Solution:
def shortestPalindrome(self, s: str) -> str:
temp = s + '#' + s[::-1]
i = 1
l = 0
lps = [0] * len(temp)
while i < len(temp):
if temp[i] == temp[l]:
lps[i] = l + 1
i += 1
l += 1
elif l != 0:
l = lps[l - 1]
else:
i += 1
return s[l:][::-1] + s
| class Solution:
def shortest_palindrome(self, s: str) -> str:
temp = s + '#' + s[::-1]
i = 1
l = 0
lps = [0] * len(temp)
while i < len(temp):
if temp[i] == temp[l]:
lps[i] = l + 1
i += 1
l += 1
elif l != 0:
l = lps[l - 1]
else:
i += 1
return s[l:][::-1] + s |
class Solution:
"""
@param nums: a list of integers
@return: return a integer
"""
def singleNonDuplicate(self, nums):
n = len(nums)
l = 0
r = n - 1
while l < r:
mid = l + (r - l) // 2
p1 = p2 = mid
if mid > 0 and nums[mid] == nums[mid - 1]:
p1 = mid - 1
elif mid < n - 1 and nums[mid] == nums[mid + 1]:
p2 = mid + 1
else:
return nums[mid]
if (p1 - l) % 2 == 1:
r = p1 - 1
else:
l = p2 + 1
return nums[l] | class Solution:
"""
@param nums: a list of integers
@return: return a integer
"""
def single_non_duplicate(self, nums):
n = len(nums)
l = 0
r = n - 1
while l < r:
mid = l + (r - l) // 2
p1 = p2 = mid
if mid > 0 and nums[mid] == nums[mid - 1]:
p1 = mid - 1
elif mid < n - 1 and nums[mid] == nums[mid + 1]:
p2 = mid + 1
else:
return nums[mid]
if (p1 - l) % 2 == 1:
r = p1 - 1
else:
l = p2 + 1
return nums[l] |
l1 = [1, 3, 5, 7, 9] # list mutable (read write)
t1 = (1, 3, 5, 7, 9) # tuple imutable (read only)
def f(x):
x.append(29)
f(l1)
print(l1)
f(t1)
print(t1)
| l1 = [1, 3, 5, 7, 9]
t1 = (1, 3, 5, 7, 9)
def f(x):
x.append(29)
f(l1)
print(l1)
f(t1)
print(t1) |
"""
Format for midterms(usually) and concepts tested
1. Tracing
- higher order functions(*), function scope
2. Iteration/Recursion
- Order Of Growth, writing iteration or recursion
"""
##################
#Order of Growth #
##################
n = 100000000
k = 40000
"""
Lists
"""
x = [i for i in range(n)]
y = [i for i in range(k)]
x + [1]
#O(n)
#You might not need to know this
x.append(1)
#O(1)
#You might not need to know this
x.extend(y)
#O(length of y)
"""
Tuples
"""
x = tuple(i for i in range(n))
x + (1,)
#O(n)
"""
Strings
"""
x = "".join(str(i) for i in range(n))
x[k:]
#O(n)
x + "1"
#O(n)
"""
Loops
"""
for i in range(n):
print(i)
#Time: O(n)
#Space: O(1)
def recursion(x):
if x == 0:
return x
else:
return x + recursion(x-1)
#Time: O(n)
#Space: O(n)
i = 1
while(i<n):
print(i)
i *= 2
#Time: O(logn)
#Space: O(1)
"""
Trees
"""
def tree(x):
if x == 0:
return x
else:
return tree(x-1) + tree(x-1)
#Time: O(2^n)
#Space: O(n)
"""
Tips
1) Identify if its recursion/iteration
2) Determine number of for loops, or number of levels of recursion.
3) Determine if there is any nested complexity.
i.e.
for i in range(n):
#Some O(n) function, called at each iteration of the for loop.
The complexity of the above algorithm is O(n^2)
If all else fails,
count the number of times certain pieces of code will run and see if it scales linearly, above linear or sublinear.
"""
"""
Helpful sites (If you don't believe me):
https://stackoverflow.com/questions/35180377/time-complexity-of-string-slice
https://stackoverflow.com/questions/33191470/difference-in-complexity-of-append-and-concatenate-for-this-list-code
"""
| """
Format for midterms(usually) and concepts tested
1. Tracing
- higher order functions(*), function scope
2. Iteration/Recursion
- Order Of Growth, writing iteration or recursion
"""
n = 100000000
k = 40000
'\nLists\n'
x = [i for i in range(n)]
y = [i for i in range(k)]
x + [1]
x.append(1)
x.extend(y)
'\nTuples\n'
x = tuple((i for i in range(n)))
x + (1,)
'\nStrings\n'
x = ''.join((str(i) for i in range(n)))
x[k:]
x + '1'
'\nLoops\n'
for i in range(n):
print(i)
def recursion(x):
if x == 0:
return x
else:
return x + recursion(x - 1)
i = 1
while i < n:
print(i)
i *= 2
'\nTrees\n'
def tree(x):
if x == 0:
return x
else:
return tree(x - 1) + tree(x - 1)
'\nTips\n\n1) Identify if its recursion/iteration\n2) Determine number of for loops, or number of levels of recursion.\n3) Determine if there is any nested complexity.\ni.e.\nfor i in range(n):\n #Some O(n) function, called at each iteration of the for loop.\n\nThe complexity of the above algorithm is O(n^2)\n\n\nIf all else fails,\ncount the number of times certain pieces of code will run and see if it scales linearly, above linear or sublinear.\n\n\n'
"\nHelpful sites (If you don't believe me):\nhttps://stackoverflow.com/questions/35180377/time-complexity-of-string-slice\nhttps://stackoverflow.com/questions/33191470/difference-in-complexity-of-append-and-concatenate-for-this-list-code\n" |
# A list contains authorized users' discord IDs.
OWNER = 184335517947658240 # foxfair
# Staff
AUTHORIZED = [
OWNER,
129405976020385792, # Auri
423991805156261889, # Kim
699053180079702098, # Gelica
266289895415218177, # Yang
294058604854509589, # Giana
107209352816914432, # Tooch
97145923691347968, # baosao
137798184721186817, # Vince
]
# Channels that the bot is authorized to send messages.
SEND_MSG_CHANNELS = [
725807955559055451, # bot-logs in my dev server.
# 725798620531785749, # villager-adoption-team in beyond stalks
]
| owner = 184335517947658240
authorized = [OWNER, 129405976020385792, 423991805156261889, 699053180079702098, 266289895415218177, 294058604854509589, 107209352816914432, 97145923691347968, 137798184721186817]
send_msg_channels = [725807955559055451] |
number = [i for i in range(1, 3001330+1)]
# number = [i for i in range(1, 10)]
number2 = number[:]
last = len(number) % 2 != 0
while len(number) > 1:
next_last = len(number) % 2 != last
number = [j for i, j in enumerate(number) if i % 2 != last]
last = next_last
print('#1', number[0])
number = number2
while len(number) > 1:
pop = set()
last = 0
for i in range(len(number) // 2):
last = number[i]
pop.add(number[(2 * i + (len(number) - i) // 2) % len(number)])
number = [i for i in number if i not in pop]
if len(number) == 1: break
pop = set()
start = number.index(last) + 1
for i in range(start, len(number)):
pop.add(number[(i + (len(number) + i - start) // 2) % len(number)])
number = [i for i in number if i not in pop]
print('#2', number[0])
| number = [i for i in range(1, 3001330 + 1)]
number2 = number[:]
last = len(number) % 2 != 0
while len(number) > 1:
next_last = len(number) % 2 != last
number = [j for (i, j) in enumerate(number) if i % 2 != last]
last = next_last
print('#1', number[0])
number = number2
while len(number) > 1:
pop = set()
last = 0
for i in range(len(number) // 2):
last = number[i]
pop.add(number[(2 * i + (len(number) - i) // 2) % len(number)])
number = [i for i in number if i not in pop]
if len(number) == 1:
break
pop = set()
start = number.index(last) + 1
for i in range(start, len(number)):
pop.add(number[(i + (len(number) + i - start) // 2) % len(number)])
number = [i for i in number if i not in pop]
print('#2', number[0]) |
class PagingModifier:
def __init__(self,
Id: int = None,
End: int = None,
Start: int = None,
Limit: int = None,
):
self.Id = Id
self.Start = Start
self.End = End
self.Limit = Limit
| class Pagingmodifier:
def __init__(self, Id: int=None, End: int=None, Start: int=None, Limit: int=None):
self.Id = Id
self.Start = Start
self.End = End
self.Limit = Limit |
employee_file=open("employee.txt","w") #write mode- erases previous content
employee_file.write("David - Software Developer")
print(employee_file.readable())
#print(employee_file.read())
employee_file.close()
#David - Software Developer
# previous data and content is vanished | employee_file = open('employee.txt', 'w')
employee_file.write('David - Software Developer')
print(employee_file.readable())
employee_file.close() |
# Copyright 2020 The Cross-Media Measurement Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Repository rules/macros for Google Cloud Spanner Emulator."""
def _cloud_spanner_emulator_impl(rctx):
version = rctx.attr.version
sha256 = rctx.attr.sha256
url = "https://storage.googleapis.com/cloud-spanner-emulator/releases/{version}/cloud-spanner-emulator_linux_amd64-{version}.tar.gz".format(version = version)
rctx.download_and_extract(
url = url,
sha256 = sha256,
)
rctx.template(
"BUILD.bazel",
Label("@wfa_measurement_system//build/cloud_spanner_emulator:BUILD.external"),
executable = False,
)
cloud_spanner_emulator_binaries = repository_rule(
implementation = _cloud_spanner_emulator_impl,
attrs = {
"version": attr.string(mandatory = True),
"sha256": attr.string(),
},
)
| """Repository rules/macros for Google Cloud Spanner Emulator."""
def _cloud_spanner_emulator_impl(rctx):
version = rctx.attr.version
sha256 = rctx.attr.sha256
url = 'https://storage.googleapis.com/cloud-spanner-emulator/releases/{version}/cloud-spanner-emulator_linux_amd64-{version}.tar.gz'.format(version=version)
rctx.download_and_extract(url=url, sha256=sha256)
rctx.template('BUILD.bazel', label('@wfa_measurement_system//build/cloud_spanner_emulator:BUILD.external'), executable=False)
cloud_spanner_emulator_binaries = repository_rule(implementation=_cloud_spanner_emulator_impl, attrs={'version': attr.string(mandatory=True), 'sha256': attr.string()}) |
# Copyright 2021 IBM All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides methods to perform the input validations.
"""
class Validators:
"""Validator class"""
@classmethod
def validate_string(cls, value: str) -> bool:
"""Validate the string
Args:
value: value to be checked
"""
return bool(value and value.strip())
| """
This module provides methods to perform the input validations.
"""
class Validators:
"""Validator class"""
@classmethod
def validate_string(cls, value: str) -> bool:
"""Validate the string
Args:
value: value to be checked
"""
return bool(value and value.strip()) |
__author__ = 'Lena'
class Group:
def __init__(self, name, header, footer):
self.name=name
self.header=header
self.footer=footer
| __author__ = 'Lena'
class Group:
def __init__(self, name, header, footer):
self.name = name
self.header = header
self.footer = footer |
TICKET_PRODUCTS = '''
query getTicketProducts {
tutorialProducts {
id
type
name
nameKo
nameEn
desc
descKo
descEn
warning
warningKo
warningEn
startAt
finishAt
total
remainingCount
isSoldOut
owner {
profile {
name
nameKo
nameEn
email
image
avatarUrl
}
}
price
isEditablePrice
isUniqueInType
active
cancelableDate
ticketOpenAt
ticketCloseAt
createdAt
updatedAt
purchaseCount
isPurchased
}
conferenceProducts {
id
type
name
nameKo
nameEn
desc
descKo
descEn
warning
warningKo
warningEn
startAt
finishAt
total
remainingCount
isSoldOut
owner {
profile {
name
nameKo
nameEn
email
image
avatarUrl
}
}
price
isEditablePrice
isUniqueInType
active
cancelableDate
ticketOpenAt
ticketCloseAt
createdAt
updatedAt
purchaseCount
isPurchased
}
}
'''
BUY_TICKET = '''
mutation BuyTicket($productId: ID!, $payment: PaymentInput!, $options: JSONString) {
buyTicket(productId:$productId, payment: $payment, options:$options) {
ticket{
id
amount
merchantUid
impUid
pgTid
receiptUrl
paidAt
status
}
}
}
'''
MY_TICKETS = '''
query getMyTickets {
myTickets {
isDomesticCard
amount
merchantUid
receiptUrl
paidAt
cancelReceiptUrl
cancelledAt
status
product{
id
type
name
nameKo
nameEn
desc
descKo
descEn
startAt
finishAt
total
owner {
profile {
name
nameKo
nameEn
email
image
avatarUrl
}
}
price
isEditablePrice
isUniqueInType
active
cancelableDate
ticketOpenAt
ticketCloseAt
createdAt
updatedAt
purchaseCount
}
options
}
}
'''
TICKET = '''
query getTicket($globalId: ID, $id: Int) {
ticket(globalId: $globalId, id: $id) {
isDomesticCard
amount
merchantUid
receiptUrl
paidAt
cancelReceiptUrl
cancelledAt
status
product{
id
type
name
nameKo
nameEn
desc
descKo
descEn
startAt
finishAt
total
owner {
profile {
name
nameKo
nameEn
email
image
avatarUrl
}
}
price
isEditablePrice
isUniqueInType
active
cancelableDate
ticketOpenAt
ticketCloseAt
createdAt
updatedAt
purchaseCount
}
options
}
}
'''
CANCEL_TICKET = '''
mutation cancelTicket($ticketId: ID!) {
cancelTicket(ticketId:$ticketId) {
ticket{
id
status
impUid
pgTid
receiptUrl
paidAt
cancelReceiptUrl
cancelledAt
}
}
}
'''
| ticket_products = '\nquery getTicketProducts {\n tutorialProducts {\n id\n type\n name\n nameKo\n nameEn\n desc\n descKo\n descEn\n warning\n warningKo\n warningEn\n startAt\n finishAt\n total\n remainingCount\n isSoldOut\n owner {\n profile {\n name\n nameKo\n nameEn\n email\n image\n avatarUrl\n }\n }\n price\n isEditablePrice\n isUniqueInType\n active\n cancelableDate\n ticketOpenAt\n ticketCloseAt\n createdAt\n updatedAt\n purchaseCount\n isPurchased\n }\n conferenceProducts {\n id\n type\n name\n nameKo\n nameEn\n desc\n descKo\n descEn\n warning\n warningKo\n warningEn\n startAt\n finishAt\n total\n remainingCount\n isSoldOut\n owner {\n profile {\n name\n nameKo\n nameEn\n email\n image\n avatarUrl\n }\n }\n price\n isEditablePrice\n isUniqueInType\n active\n cancelableDate\n ticketOpenAt\n ticketCloseAt\n createdAt\n updatedAt\n purchaseCount\n isPurchased\n }\n}\n'
buy_ticket = '\nmutation BuyTicket($productId: ID!, $payment: PaymentInput!, $options: JSONString) {\n buyTicket(productId:$productId, payment: $payment, options:$options) {\n ticket{\n id\n amount\n merchantUid\n impUid\n pgTid\n receiptUrl\n paidAt\n status\n }\n }\n}\n'
my_tickets = '\nquery getMyTickets {\n myTickets {\n isDomesticCard\n amount\n merchantUid\n receiptUrl\n paidAt\n cancelReceiptUrl\n cancelledAt\n status\n \n product{\n id\n type\n name\n nameKo\n nameEn\n desc\n descKo\n descEn\n startAt\n finishAt\n total\n owner {\n profile {\n name\n nameKo\n nameEn\n email\n image\n avatarUrl\n }\n }\n price\n isEditablePrice\n isUniqueInType\n active\n cancelableDate\n ticketOpenAt\n ticketCloseAt\n createdAt\n updatedAt\n purchaseCount\n }\n options\n }\n}\n'
ticket = '\nquery getTicket($globalId: ID, $id: Int) {\n ticket(globalId: $globalId, id: $id) {\n isDomesticCard\n amount\n merchantUid\n receiptUrl\n paidAt\n cancelReceiptUrl\n cancelledAt\n status\n \n product{\n id\n type\n name\n nameKo\n nameEn\n desc\n descKo\n descEn\n startAt\n finishAt\n total\n owner {\n profile {\n name\n nameKo\n nameEn\n email\n image\n avatarUrl\n }\n }\n price\n isEditablePrice\n isUniqueInType\n active\n cancelableDate\n ticketOpenAt\n ticketCloseAt\n createdAt\n updatedAt\n purchaseCount\n }\n options\n }\n}\n'
cancel_ticket = '\nmutation cancelTicket($ticketId: ID!) {\n cancelTicket(ticketId:$ticketId) {\n ticket{\n id\n status\n impUid\n pgTid\n receiptUrl\n paidAt\n cancelReceiptUrl\n cancelledAt\n }\n }\n}\n' |
"""
International Morse Code defines a standard encoding where each letter is mapped to a series of dots and dashes,
as follows: "a" maps to ".-", "b" maps to "-...", "c" maps to "-.-.", and so on.
For convenience, the full table for the 26 letters of the English alphabet is given below:
[".-","-...","-.-.","-..",".","..-.","--.","....","..",".---","-.-",".-..","--","-.","---",".--.","--.-",".-.","...","-","..-","...-",".--","-..-","-.--","--.."]
Now, given a list of words, each word can be written as a concatenation of the Morse code of each letter. For example, "cba" can be written as "-.-..--...", (which is the concatenation "-.-." + "-..." + ".-"). We'll call such a concatenation, the transformation of a word.
Return the number of different transformations among all words we have.
Example:
Input: words = ["gin", "zen", "gig", "msg"]
Output: 2
Explanation:
The transformation of each word is:
"gin" -> "--...-."
"zen" -> "--...-."
"gig" -> "--...--."
"msg" -> "--...--."
There are 2 different transformations, "--...-." and "--...--.".
Note:
The length of words will be at most 100.
Each words[i] will have length in range [1, 12].
words[i] will only consist of lowercase letters.
"""
MORSE = [".-","-...","-.-.","-..",".","..-.","--.","....","..",".---","-.-",".-..","--","-.","---",".--.","--.-",".-.","...",
"-","..-","...-",".--","-..-","-.--","--.."]
class Solution:
def uniqueMorseRepresentations(self, words):
"""
:type words: List[str]
:rtype: int
"""
morse_words = [
''.join([
MORSE[ord(letter) - 97] for letter in word
])
for word in words
]
return len(set(morse_words))
result = Solution().uniqueMorseRepresentations(["gin", "zen", "gig", "msg"])
print(result)
| """
International Morse Code defines a standard encoding where each letter is mapped to a series of dots and dashes,
as follows: "a" maps to ".-", "b" maps to "-...", "c" maps to "-.-.", and so on.
For convenience, the full table for the 26 letters of the English alphabet is given below:
[".-","-...","-.-.","-..",".","..-.","--.","....","..",".---","-.-",".-..","--","-.","---",".--.","--.-",".-.","...","-","..-","...-",".--","-..-","-.--","--.."]
Now, given a list of words, each word can be written as a concatenation of the Morse code of each letter. For example, "cba" can be written as "-.-..--...", (which is the concatenation "-.-." + "-..." + ".-"). We'll call such a concatenation, the transformation of a word.
Return the number of different transformations among all words we have.
Example:
Input: words = ["gin", "zen", "gig", "msg"]
Output: 2
Explanation:
The transformation of each word is:
"gin" -> "--...-."
"zen" -> "--...-."
"gig" -> "--...--."
"msg" -> "--...--."
There are 2 different transformations, "--...-." and "--...--.".
Note:
The length of words will be at most 100.
Each words[i] will have length in range [1, 12].
words[i] will only consist of lowercase letters.
"""
morse = ['.-', '-...', '-.-.', '-..', '.', '..-.', '--.', '....', '..', '.---', '-.-', '.-..', '--', '-.', '---', '.--.', '--.-', '.-.', '...', '-', '..-', '...-', '.--', '-..-', '-.--', '--..']
class Solution:
def unique_morse_representations(self, words):
"""
:type words: List[str]
:rtype: int
"""
morse_words = [''.join([MORSE[ord(letter) - 97] for letter in word]) for word in words]
return len(set(morse_words))
result = solution().uniqueMorseRepresentations(['gin', 'zen', 'gig', 'msg'])
print(result) |
# This resets variables every time the Supreme While Loop is reset
# If you tried to mess around with this code, you'll find results/errors on the second time the While Loop is executed
# Messing with this code will generally not break anything on the first topic that Indra talks About
yesnorep = True
ignoreinteract = False
repeating = 0
cussed = False
sensitive = False
topicdone = False
nevermind = False
if ignoreinteract == False:
interactions = interactions + 1
if loveBonus == 3 and interest < 75:
print("Hey, " + name + ".")
sleep(1.5)
print("Recently I can't help but feel that all my love and affection for\nyou is being ignored")
sleep(2)
print("As if you didn't care about me.")
sleep(1.5)
loveBonus = 0
interest = interest - 5
ChangeMind()
if negloveBonus == 3 and interest > 150 and interactions >= 30:
print("Hey, " + name + ".")
sleep(1.5)
print("Remember that one time you said you loved me, and I rejected you?")
sleep(2.5)
print("Well...")
sleep(1.5)
print("I've decided to forget that and move on.")
sleep(1.75)
print("What's in the past is in the past, right?")
sleep(1.5)
print("We should forgive those who have wronged us in the past.")
sleep(2.5)
print("Now that I've gotten to know you more, I think you aren't actually that bad!")
sleep(2.5)
print("Maybe I should've given you a better chance...")
sleep(2.5)
print("Oh well...")
sleep(1.75)
print("Just wanted to clear that up!")
CustomRecord("Changed Mind", "Neglove --> Neutral", +3)
if os.path.exists(intlog) == False:
StartEdit()
exactcurrentdatelist = list((datetime.now()).timetuple())
currentdatelist = [exactcurrentdatelist[0],
exactcurrentdatelist[1], exactcurrentdatelist[2]]
lastlogdate = currentdatelist
if isinstance(birthdate, date):
if currentdate.year == nextyear and birthdate != date(1000, 1, 1):
if currentdate.month == birthdate.month and currentdate.day == birthdate.day:
age = age + 1
CustomRecord("Birthday", str(age), +10)
print("Wait a minute...")
sleep(2)
slowprint(lead_dots = True)
sleep(2)
print("Congratulations!")
sleep(1.5)
print("It seems that today is your birthday!")
sleep(1.55)
print(f"Looks like you're turning {age} today!")
sleep(2)
print("Good for you!")
sleep(1.5)
if age == 16:
print(f"Turning {age} is a very important milestone!")
sleep(1.75)
elif age == 18:
print(
f"Turning {age} is probably one of the most important milestones in one's life!")
sleep(2.5)
print("I've been working on something recently...")
sleep(2.75)
print("I would love to show it to you!")
sleep(1.75)
print("Let me see...")
sleep(1.75)
print("Oh! There it is!")
sleep(1.75)
notify("Happy Birthday!", f"Congratulations on turning {age}!")
sleep(2)
print("Do you like it?")
sleep(1.5)
print("I worked pretty hard trying to figure that out.")
sleep(2.5)
print("I Wonder what you'll do on this special occasion?")
sleep(2.25)
print ("Well, whatever it is, I hope you have fun!")
sleep(2.5)
Save()
| yesnorep = True
ignoreinteract = False
repeating = 0
cussed = False
sensitive = False
topicdone = False
nevermind = False
if ignoreinteract == False:
interactions = interactions + 1
if loveBonus == 3 and interest < 75:
print('Hey, ' + name + '.')
sleep(1.5)
print("Recently I can't help but feel that all my love and affection for\nyou is being ignored")
sleep(2)
print("As if you didn't care about me.")
sleep(1.5)
love_bonus = 0
interest = interest - 5
change_mind()
if negloveBonus == 3 and interest > 150 and (interactions >= 30):
print('Hey, ' + name + '.')
sleep(1.5)
print('Remember that one time you said you loved me, and I rejected you?')
sleep(2.5)
print('Well...')
sleep(1.5)
print("I've decided to forget that and move on.")
sleep(1.75)
print("What's in the past is in the past, right?")
sleep(1.5)
print('We should forgive those who have wronged us in the past.')
sleep(2.5)
print("Now that I've gotten to know you more, I think you aren't actually that bad!")
sleep(2.5)
print("Maybe I should've given you a better chance...")
sleep(2.5)
print('Oh well...')
sleep(1.75)
print('Just wanted to clear that up!')
custom_record('Changed Mind', 'Neglove --> Neutral', +3)
if os.path.exists(intlog) == False:
start_edit()
exactcurrentdatelist = list(datetime.now().timetuple())
currentdatelist = [exactcurrentdatelist[0], exactcurrentdatelist[1], exactcurrentdatelist[2]]
lastlogdate = currentdatelist
if isinstance(birthdate, date):
if currentdate.year == nextyear and birthdate != date(1000, 1, 1):
if currentdate.month == birthdate.month and currentdate.day == birthdate.day:
age = age + 1
custom_record('Birthday', str(age), +10)
print('Wait a minute...')
sleep(2)
slowprint(lead_dots=True)
sleep(2)
print('Congratulations!')
sleep(1.5)
print('It seems that today is your birthday!')
sleep(1.55)
print(f"Looks like you're turning {age} today!")
sleep(2)
print('Good for you!')
sleep(1.5)
if age == 16:
print(f'Turning {age} is a very important milestone!')
sleep(1.75)
elif age == 18:
print(f"Turning {age} is probably one of the most important milestones in one's life!")
sleep(2.5)
print("I've been working on something recently...")
sleep(2.75)
print('I would love to show it to you!')
sleep(1.75)
print('Let me see...')
sleep(1.75)
print('Oh! There it is!')
sleep(1.75)
notify('Happy Birthday!', f'Congratulations on turning {age}!')
sleep(2)
print('Do you like it?')
sleep(1.5)
print('I worked pretty hard trying to figure that out.')
sleep(2.5)
print("I Wonder what you'll do on this special occasion?")
sleep(2.25)
print('Well, whatever it is, I hope you have fun!')
sleep(2.5)
save() |
class AbstractRemoteDatabase(object):
"""
Abstract base class which all remote database connections must
inherit from
Defines the public interface of any implementation
"""
def connect(self, **kwargs):
"""Connect to the course info database
"""
raise NotImplementedError()
def disconnect(self, **kwargs):
"""Disconnect from the course info database
"""
raise NotImplementedError()
def save_search(self, name, **kwargs):
"""Save a search by name
:param kwargs: parameters for the search.
eg for LDAP servers, kwargs holds:
* search_flt
* attrs
* limit
* path
Typical usage: parameters are defined in a config file,
and ``save_search`` is used to save them for later usage
"""
raise NotImplementedError()
def search(self, name, limit=None, **kwargs):
"""Search the database using a saved search
Is a wrapper for _search()
:param str name: name of the saved search
"""
raise NotImplementedError()
def known_searches(self):
"""Return names of known searches
:returns: list of names of searches currently saved
:rtype: list of strings
"""
raise NotImplementedError()
def _search(self, limit=None, *args, **kwargs):
"""Lowest level search method
:py:func:`search` should wrap this method in order
to pass in the proper arguments
"""
raise NotImplementedError()
| class Abstractremotedatabase(object):
"""
Abstract base class which all remote database connections must
inherit from
Defines the public interface of any implementation
"""
def connect(self, **kwargs):
"""Connect to the course info database
"""
raise not_implemented_error()
def disconnect(self, **kwargs):
"""Disconnect from the course info database
"""
raise not_implemented_error()
def save_search(self, name, **kwargs):
"""Save a search by name
:param kwargs: parameters for the search.
eg for LDAP servers, kwargs holds:
* search_flt
* attrs
* limit
* path
Typical usage: parameters are defined in a config file,
and ``save_search`` is used to save them for later usage
"""
raise not_implemented_error()
def search(self, name, limit=None, **kwargs):
"""Search the database using a saved search
Is a wrapper for _search()
:param str name: name of the saved search
"""
raise not_implemented_error()
def known_searches(self):
"""Return names of known searches
:returns: list of names of searches currently saved
:rtype: list of strings
"""
raise not_implemented_error()
def _search(self, limit=None, *args, **kwargs):
"""Lowest level search method
:py:func:`search` should wrap this method in order
to pass in the proper arguments
"""
raise not_implemented_error() |
__author__ = "Lucas Grulich (grulich@uni-mainz.de)"
__version__ = "0.0.14"
# --- Globals ----------------------------------------------------------------------------------------------------------
MONOSCALE_SHADOWLEVEL = 1
OAP_FILE_EXTENSION = ".oap"
DEFAULT_TYPE = "ARRAY2D"
SLICE_SIZE = 64
# --- Markers ------------------------------------------------
MARKER = {
'poisson': 7, # Value of the poisson spot
'flood_fill': 8, # Value of the flood fill
}
# --- Colors -------------------------------------------------
COLOR = {
0: 0, # Shadow level 0 -> background color of images.
1: 100, # Shadow level 1 -> usually between 25% and 33%
2: 200, # Shadow level 2 -> light intensity of 50 %
3: 255, # Shadow level 3 -> usually between 66% and 75%
MARKER['poisson']: 50, # Poisson spot color
}
# --- Particle Types -----------------------------------------
UNDEFINED = b'u' # Not yet classified
INDEFINABLE = b'i' # Not possible to classify
ERRONEOUS = b'e' # Artefacts or erroneous images
SPHERE = b's' # Spherical particles
COLUMN = b'c' # Column-like particles
ROSETTE = b'r' # Rosettes
DENDRITE = b'd' # Dendrites
PLATE = b'p' # Plates
| __author__ = 'Lucas Grulich (grulich@uni-mainz.de)'
__version__ = '0.0.14'
monoscale_shadowlevel = 1
oap_file_extension = '.oap'
default_type = 'ARRAY2D'
slice_size = 64
marker = {'poisson': 7, 'flood_fill': 8}
color = {0: 0, 1: 100, 2: 200, 3: 255, MARKER['poisson']: 50}
undefined = b'u'
indefinable = b'i'
erroneous = b'e'
sphere = b's'
column = b'c'
rosette = b'r'
dendrite = b'd'
plate = b'p' |
# -*- coding: utf-8 -*-
class Java(object):
KEY = 'java'
LABEL = 'Java'
DEPENDENCIES = ['java', 'javac']
TEMP_DIR = 'java'
SUFFIX = '.java'
# javac {class_path} tmp/Estimator.java
# class_path = '-cp ./gson.jar'
CMD_COMPILE = 'javac {class_path} {src_dir}/{src_file}'
# java {class_path} Estimator <args>
# class_path = '-cp ./gson.jar:./tmp'
CMD_EXECUTE = 'java {class_path} {dest_dir}/{dest_file}'
| class Java(object):
key = 'java'
label = 'Java'
dependencies = ['java', 'javac']
temp_dir = 'java'
suffix = '.java'
cmd_compile = 'javac {class_path} {src_dir}/{src_file}'
cmd_execute = 'java {class_path} {dest_dir}/{dest_file}' |
#Dictionaries Challenge 22: Database Admin Program
print("Welcome to the Database Admin Program")
#Create a dictionary to hold all username:password key-value pairs
log_on_information = {
'mooman74':'alskes145',
'meramo1986':'kehns010101',
'nickyD':'world1star',
'george2':'booo3oha',
'admin00':'admin1234',
}
#Get user input
username = input("Enter your username: ")
#Simulate logging on...
#Get user password
if username in log_on_information.keys():
password = input("Enter your password: ")
if password == log_on_information[username]:
print("\nHello " + username + "! You are logged in!")
if username == 'admin00':
#Show the whole database to the admin account
print("\nHere is the current user database:")
for key, value in log_on_information.items():
print("Username: " + key + "\t\tPassword: " + value)
else:
#Allow standard user to change their password
password_change = input("Would you like to change your password (yes/no): ").lower().strip()
if password_change == 'yes':
new_password = input("What would you like your new password to be (min 8 chars): ")
if len(new_password) >= 8:
log_on_information[username] = new_password
else:
print(new_password + " is not the minimum eight characters.")
print("\n" + username + " your password is " + log_on_information[username] + ".")
else:
print("\nThank you, goodbye.")
#User did not enter their password correctly
else:
print("Password incorrect!")
#User not in database
else:
print("Username not in database. Goodbye.")
| print('Welcome to the Database Admin Program')
log_on_information = {'mooman74': 'alskes145', 'meramo1986': 'kehns010101', 'nickyD': 'world1star', 'george2': 'booo3oha', 'admin00': 'admin1234'}
username = input('Enter your username: ')
if username in log_on_information.keys():
password = input('Enter your password: ')
if password == log_on_information[username]:
print('\nHello ' + username + '! You are logged in!')
if username == 'admin00':
print('\nHere is the current user database:')
for (key, value) in log_on_information.items():
print('Username: ' + key + '\t\tPassword: ' + value)
else:
password_change = input('Would you like to change your password (yes/no): ').lower().strip()
if password_change == 'yes':
new_password = input('What would you like your new password to be (min 8 chars): ')
if len(new_password) >= 8:
log_on_information[username] = new_password
else:
print(new_password + ' is not the minimum eight characters.')
print('\n' + username + ' your password is ' + log_on_information[username] + '.')
else:
print('\nThank you, goodbye.')
else:
print('Password incorrect!')
else:
print('Username not in database. Goodbye.') |
class Inventory:
def __init__(self):
self._prisonKeys = False
self._sunflowerSeeds = False
self._guardiansMoney = False
self._guardiansSword = False
self._dragonsKey = False
@property
def prison_keys(self):
return self._prisonKeys
@prison_keys.setter
def prison_keys(self, value):
self._prisonKeys = value
@property
def sunflower_seeds(self):
return self._sunflowerSeeds
@sunflower_seeds.setter
def sunflower_seeds(self, value):
self._sunflowerSeeds = value
@property
def guardians_money(self):
return self._guardiansMoney
@guardians_money.setter
def guardians_money(self, value):
self._guardiansMoney
@property
def guardians_sword(self):
return self._guardiansSword
@guardians_sword.setter
def guardians_sword(self, value):
self._guardiansSword = value
@property
def dragons_key(self):
return self._dragonsKey
@dragons_key.setter
def dragons_key(self, value):
self._dragonsKey = value
| class Inventory:
def __init__(self):
self._prisonKeys = False
self._sunflowerSeeds = False
self._guardiansMoney = False
self._guardiansSword = False
self._dragonsKey = False
@property
def prison_keys(self):
return self._prisonKeys
@prison_keys.setter
def prison_keys(self, value):
self._prisonKeys = value
@property
def sunflower_seeds(self):
return self._sunflowerSeeds
@sunflower_seeds.setter
def sunflower_seeds(self, value):
self._sunflowerSeeds = value
@property
def guardians_money(self):
return self._guardiansMoney
@guardians_money.setter
def guardians_money(self, value):
self._guardiansMoney
@property
def guardians_sword(self):
return self._guardiansSword
@guardians_sword.setter
def guardians_sword(self, value):
self._guardiansSword = value
@property
def dragons_key(self):
return self._dragonsKey
@dragons_key.setter
def dragons_key(self, value):
self._dragonsKey = value |
class Vulnerability:
"""
Class for representing necessary information about vulnerability.
Dictionary is used for storing cvssv2 and cvssv3 because of constant access.
cpe_type contains values:
'h' for hardware,
'a' for application,
'o' for operating system.
"""
def __init__(self):
self.CVE_description_value = ""
self.CVE_problem_type_value = ""
self.cvssv2 = {}
self.cvssv3 = {}
self.cpe_type = set()
def read_lines_with_no_information(name_of_file):
"""
Reads blocks with unnecessary information.
:param name_of_file: The name of file from which function will read.
Block ends with line containing only "},".
"""
line = name_of_file.readline()
while ('},' not in line)or('} , {' in line):
line = name_of_file.readline()
def remove_quotation_marks(source_string):
"""
:param source_string: String from which quotation marks will be removed (but only the outermost).
:return: String without the outermost quotation marks and the outermost white characters.
"""
first = source_string.find('"')
second = source_string[first+1:].rfind('"')
return source_string[first+1: second+first+1][:].strip()
def get_description_value(name_of_file):
"""
:param name_of_file: Source file for function.
:return: Description value for particular CVE.
"""
line = name_of_file.readline()
while 'value" :' not in line:
line = name_of_file.readline()
tmp_list = line.split(':')
if len(tmp_list) == 2:
value = tmp_list[1][:]
return value
else:
# When description value contains ":" too.
concatenation = ""
for i in range(1, len(tmp_list)-1):
concatenation = concatenation + tmp_list[i] + ":"
concatenation = concatenation + tmp_list[-1]
return concatenation
def get_problem_type_value(name_of_file):
"""
:param name_of_file: Source file.
:return: Problem type value describing weakness for particular CVE.
"""
line = name_of_file.readline()
while ('value" : ' not in line)and('},' not in line):
line = name_of_file.readline()
if '},' not in line:
tmp_list = line.split(":")
value = tmp_list[1][:]
return value
else:
# Not every Problem type block contains value. Return value chosen for better further processing.
return ""
def get_impact_vector_cvssv2(name_of_file):
"""
:param name_of_file: Source file.
:return: Dictionary containing cvssv2 information.
"""
cvssv2_dict = {}
line = name_of_file.readline()
# Counting name of braces to know when we leave cvssv2 block.
number_of_braces = 2
while ('"av" :' not in line)and('"accessVector" :' not in line):
line = name_of_file.readline()
tmp_list = line.split(":")
cvssv2_dict["av"] = remove_quotation_marks(tmp_list[1])
line = name_of_file.readline()
tmp_list = line.split(":")
cvssv2_dict["ac"] = remove_quotation_marks(tmp_list[1])
line = name_of_file.readline()
tmp_list = line.split(":")
cvssv2_dict["au"] = remove_quotation_marks(tmp_list[1])
line = name_of_file.readline()
tmp_list = line.split(":")
cvssv2_dict["c"] = remove_quotation_marks(tmp_list[1])
line = name_of_file.readline()
tmp_list = line.split(":")
cvssv2_dict["i"] = remove_quotation_marks(tmp_list[1])
line = name_of_file.readline()
tmp_list = line.split(":")
cvssv2_dict["a"] = remove_quotation_marks(tmp_list[1])
line = name_of_file.readline()
tmp_list = line.split(":")
cvssv2_dict["score"] = tmp_list[1].strip()
while number_of_braces != 0:
line = name_of_file.readline()
if '}' in line:
number_of_braces -= 1
else:
# We want to store also the other helpful information
tmp_list = line.split(":")
if len(tmp_list) == 2:
cvssv2_dict[remove_quotation_marks(tmp_list[0])] = remove_quotation_marks(tmp_list[1]).rstrip(',')
return cvssv2_dict
def get_impact_vector_cvssv3(name_of_file):
"""
:param name_of_file: Source file.
:return: Dictionary containing cvssv3 information.
"""
cvssv3_dict = {}
line = name_of_file.readline()
# Counting name of braces to know when we leave cvssv2 block.
number_of_braces = 2
while ('"av" :' not in line)and('"attackVector" :' not in line):
line = name_of_file.readline()
tmp_list = line.split(":")
cvssv3_dict["av"] = remove_quotation_marks(tmp_list[1])
line = name_of_file.readline()
tmp_list = line.split(":")
cvssv3_dict["ac"] = remove_quotation_marks(tmp_list[1])
line = name_of_file.readline()
tmp_list = line.split(":")
cvssv3_dict["pr"] = remove_quotation_marks(tmp_list[1])
line = name_of_file.readline()
tmp_list = line.split(":")
cvssv3_dict["ui"] = remove_quotation_marks(tmp_list[1])
line = name_of_file.readline()
tmp_list = line.split(":")
cvssv3_dict["s"] = remove_quotation_marks(tmp_list[1])
line = name_of_file.readline()
tmp_list = line.split(":")
cvssv3_dict["c"] = remove_quotation_marks(tmp_list[1])
line = name_of_file.readline()
tmp_list = line.split(":")
cvssv3_dict["i"] = remove_quotation_marks(tmp_list[1])
line = name_of_file.readline()
tmp_list = line.split(":")
cvssv3_dict["a"] = remove_quotation_marks(tmp_list[1])
line = name_of_file.readline()
tmp_list = line.split(":")
cvssv3_dict["score"] = tmp_list[1].strip().rstrip(',')
while number_of_braces != 0:
line = name_of_file.readline()
if '}' in line:
number_of_braces -= 1
else:
tmp_list = line.split(":")
if len(tmp_list) == 2:
cvssv3_dict[remove_quotation_marks(tmp_list[0])] = remove_quotation_marks(tmp_list[1]).rstrip(',')
return cvssv3_dict
def get_cpe_type(name_of_file):
cpe_type = set()
line = name_of_file.readline()
while ('},' not in line)or('}, {' in line):
if '"cpeMatchString" : "cpe:/' in line:
cpe_type.add(line[line.find('"cpeMatchString" : "cpe:/') + len('"cpeMatchString" : "cpe:/')])
line = name_of_file.readline()
return cpe_type
def parse(name_of_file):
"""
:param name_of_file: Source file.
:return: Dictionary containing
"""
dict_of_vulnerabilities = {}
with open(name_of_file, "r") as cve_file:
line = cve_file.readline()
while line != "":
if '"CVE_data_meta" : {' in line:
# Store CVE_ID as key.
line = cve_file.readline()
tmp_list = line.split(':')
cve_id = remove_quotation_marks(tmp_list[1])
dict_of_vulnerabilities[cve_id] = Vulnerability()
# The following line is "},"
line = cve_file.readline()
# The following block is block "affects".
read_lines_with_no_information(cve_file)
line = cve_file.readline()
while line != "":
if '"description" : {' in line:
description_value = remove_quotation_marks(get_description_value(cve_file))
dict_of_vulnerabilities[cve_id].CVE_description_value = description_value
read_lines_with_no_information(cve_file)
elif '"impact" : {' in line:
if '"impact" : { },' in line:
# Impact block can be empty
break
line = cve_file.readline()
while "cvss" not in line:
line = cve_file.readline()
cvssv2_was_read = False
# cvssv2 and cvssv3 can be in both orders
if '2' in line:
cvssv2_vector = get_impact_vector_cvssv2(cve_file)
cvssv2_was_read = True
dict_of_vulnerabilities[cve_id].cvssv2 = cvssv2_vector
line = cve_file.readline()
if '3' in line:
cvssv3_vector = get_impact_vector_cvssv3(cve_file)
dict_of_vulnerabilities[cve_id].cvssv3 = cvssv3_vector
line = cve_file.readline()
line = cve_file.readline()
if not cvssv2_was_read:
cvssv2_vector = get_impact_vector_cvssv2(cve_file)
dict_of_vulnerabilities[cve_id].cvssv2 = cvssv2_vector
# Impact is the last block
break
elif '"problemtype" : {' in line:
problem_type_value = remove_quotation_marks(get_problem_type_value(cve_file))
dict_of_vulnerabilities[cve_id].CVE_problem_type_value = problem_type_value
if '},' not in line:
read_lines_with_no_information(cve_file)
elif '"configurations" : {' in line:
cpe_type = get_cpe_type(cve_file)
dict_of_vulnerabilities[cve_id].cpe_type = cpe_type
else:
read_lines_with_no_information(cve_file)
line = cve_file.readline()
line = cve_file.readline()
return dict_of_vulnerabilities
| class Vulnerability:
"""
Class for representing necessary information about vulnerability.
Dictionary is used for storing cvssv2 and cvssv3 because of constant access.
cpe_type contains values:
'h' for hardware,
'a' for application,
'o' for operating system.
"""
def __init__(self):
self.CVE_description_value = ''
self.CVE_problem_type_value = ''
self.cvssv2 = {}
self.cvssv3 = {}
self.cpe_type = set()
def read_lines_with_no_information(name_of_file):
"""
Reads blocks with unnecessary information.
:param name_of_file: The name of file from which function will read.
Block ends with line containing only "},".
"""
line = name_of_file.readline()
while '},' not in line or '} , {' in line:
line = name_of_file.readline()
def remove_quotation_marks(source_string):
"""
:param source_string: String from which quotation marks will be removed (but only the outermost).
:return: String without the outermost quotation marks and the outermost white characters.
"""
first = source_string.find('"')
second = source_string[first + 1:].rfind('"')
return source_string[first + 1:second + first + 1][:].strip()
def get_description_value(name_of_file):
"""
:param name_of_file: Source file for function.
:return: Description value for particular CVE.
"""
line = name_of_file.readline()
while 'value" :' not in line:
line = name_of_file.readline()
tmp_list = line.split(':')
if len(tmp_list) == 2:
value = tmp_list[1][:]
return value
else:
concatenation = ''
for i in range(1, len(tmp_list) - 1):
concatenation = concatenation + tmp_list[i] + ':'
concatenation = concatenation + tmp_list[-1]
return concatenation
def get_problem_type_value(name_of_file):
"""
:param name_of_file: Source file.
:return: Problem type value describing weakness for particular CVE.
"""
line = name_of_file.readline()
while 'value" : ' not in line and '},' not in line:
line = name_of_file.readline()
if '},' not in line:
tmp_list = line.split(':')
value = tmp_list[1][:]
return value
else:
return ''
def get_impact_vector_cvssv2(name_of_file):
"""
:param name_of_file: Source file.
:return: Dictionary containing cvssv2 information.
"""
cvssv2_dict = {}
line = name_of_file.readline()
number_of_braces = 2
while '"av" :' not in line and '"accessVector" :' not in line:
line = name_of_file.readline()
tmp_list = line.split(':')
cvssv2_dict['av'] = remove_quotation_marks(tmp_list[1])
line = name_of_file.readline()
tmp_list = line.split(':')
cvssv2_dict['ac'] = remove_quotation_marks(tmp_list[1])
line = name_of_file.readline()
tmp_list = line.split(':')
cvssv2_dict['au'] = remove_quotation_marks(tmp_list[1])
line = name_of_file.readline()
tmp_list = line.split(':')
cvssv2_dict['c'] = remove_quotation_marks(tmp_list[1])
line = name_of_file.readline()
tmp_list = line.split(':')
cvssv2_dict['i'] = remove_quotation_marks(tmp_list[1])
line = name_of_file.readline()
tmp_list = line.split(':')
cvssv2_dict['a'] = remove_quotation_marks(tmp_list[1])
line = name_of_file.readline()
tmp_list = line.split(':')
cvssv2_dict['score'] = tmp_list[1].strip()
while number_of_braces != 0:
line = name_of_file.readline()
if '}' in line:
number_of_braces -= 1
else:
tmp_list = line.split(':')
if len(tmp_list) == 2:
cvssv2_dict[remove_quotation_marks(tmp_list[0])] = remove_quotation_marks(tmp_list[1]).rstrip(',')
return cvssv2_dict
def get_impact_vector_cvssv3(name_of_file):
"""
:param name_of_file: Source file.
:return: Dictionary containing cvssv3 information.
"""
cvssv3_dict = {}
line = name_of_file.readline()
number_of_braces = 2
while '"av" :' not in line and '"attackVector" :' not in line:
line = name_of_file.readline()
tmp_list = line.split(':')
cvssv3_dict['av'] = remove_quotation_marks(tmp_list[1])
line = name_of_file.readline()
tmp_list = line.split(':')
cvssv3_dict['ac'] = remove_quotation_marks(tmp_list[1])
line = name_of_file.readline()
tmp_list = line.split(':')
cvssv3_dict['pr'] = remove_quotation_marks(tmp_list[1])
line = name_of_file.readline()
tmp_list = line.split(':')
cvssv3_dict['ui'] = remove_quotation_marks(tmp_list[1])
line = name_of_file.readline()
tmp_list = line.split(':')
cvssv3_dict['s'] = remove_quotation_marks(tmp_list[1])
line = name_of_file.readline()
tmp_list = line.split(':')
cvssv3_dict['c'] = remove_quotation_marks(tmp_list[1])
line = name_of_file.readline()
tmp_list = line.split(':')
cvssv3_dict['i'] = remove_quotation_marks(tmp_list[1])
line = name_of_file.readline()
tmp_list = line.split(':')
cvssv3_dict['a'] = remove_quotation_marks(tmp_list[1])
line = name_of_file.readline()
tmp_list = line.split(':')
cvssv3_dict['score'] = tmp_list[1].strip().rstrip(',')
while number_of_braces != 0:
line = name_of_file.readline()
if '}' in line:
number_of_braces -= 1
else:
tmp_list = line.split(':')
if len(tmp_list) == 2:
cvssv3_dict[remove_quotation_marks(tmp_list[0])] = remove_quotation_marks(tmp_list[1]).rstrip(',')
return cvssv3_dict
def get_cpe_type(name_of_file):
cpe_type = set()
line = name_of_file.readline()
while '},' not in line or '}, {' in line:
if '"cpeMatchString" : "cpe:/' in line:
cpe_type.add(line[line.find('"cpeMatchString" : "cpe:/') + len('"cpeMatchString" : "cpe:/')])
line = name_of_file.readline()
return cpe_type
def parse(name_of_file):
"""
:param name_of_file: Source file.
:return: Dictionary containing
"""
dict_of_vulnerabilities = {}
with open(name_of_file, 'r') as cve_file:
line = cve_file.readline()
while line != '':
if '"CVE_data_meta" : {' in line:
line = cve_file.readline()
tmp_list = line.split(':')
cve_id = remove_quotation_marks(tmp_list[1])
dict_of_vulnerabilities[cve_id] = vulnerability()
line = cve_file.readline()
read_lines_with_no_information(cve_file)
line = cve_file.readline()
while line != '':
if '"description" : {' in line:
description_value = remove_quotation_marks(get_description_value(cve_file))
dict_of_vulnerabilities[cve_id].CVE_description_value = description_value
read_lines_with_no_information(cve_file)
elif '"impact" : {' in line:
if '"impact" : { },' in line:
break
line = cve_file.readline()
while 'cvss' not in line:
line = cve_file.readline()
cvssv2_was_read = False
if '2' in line:
cvssv2_vector = get_impact_vector_cvssv2(cve_file)
cvssv2_was_read = True
dict_of_vulnerabilities[cve_id].cvssv2 = cvssv2_vector
line = cve_file.readline()
if '3' in line:
cvssv3_vector = get_impact_vector_cvssv3(cve_file)
dict_of_vulnerabilities[cve_id].cvssv3 = cvssv3_vector
line = cve_file.readline()
line = cve_file.readline()
if not cvssv2_was_read:
cvssv2_vector = get_impact_vector_cvssv2(cve_file)
dict_of_vulnerabilities[cve_id].cvssv2 = cvssv2_vector
break
elif '"problemtype" : {' in line:
problem_type_value = remove_quotation_marks(get_problem_type_value(cve_file))
dict_of_vulnerabilities[cve_id].CVE_problem_type_value = problem_type_value
if '},' not in line:
read_lines_with_no_information(cve_file)
elif '"configurations" : {' in line:
cpe_type = get_cpe_type(cve_file)
dict_of_vulnerabilities[cve_id].cpe_type = cpe_type
else:
read_lines_with_no_information(cve_file)
line = cve_file.readline()
line = cve_file.readline()
return dict_of_vulnerabilities |
_close_methods = []
def session_close_method(close_method: callable) -> callable:
"""
A method that will be automatically
called when the session closes.
:param close_method: Method to call on session close.
:return: The decorated method that will be called on session close.
"""
_close_methods.append(close_method)
return close_method
def call_session_close_methods() -> None:
"""
Call all of the methods decorated with @session_close_method.
:return: None
"""
for m in _close_methods:
m()
| _close_methods = []
def session_close_method(close_method: callable) -> callable:
"""
A method that will be automatically
called when the session closes.
:param close_method: Method to call on session close.
:return: The decorated method that will be called on session close.
"""
_close_methods.append(close_method)
return close_method
def call_session_close_methods() -> None:
"""
Call all of the methods decorated with @session_close_method.
:return: None
"""
for m in _close_methods:
m() |
#!/usr/bin/env python
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: caronni@google.com (Germano Caronni)
"""Deal with Microsoft-specific Authenticode data."""
# Comments and constant names as extracted from pecoff_v8 specs.
# Variable names are also used as defined in the PECOFF specification.
# pylint: disable-msg=C6409
# Version 1, legacy version of Win_Certificate structure. It is supported
# only for purposes of verifying legacy Authenticode signatures.
WIN_CERT_REVISION_1_0 = 0x100
# Version 2 is the current version of the Win_Certificate structure.
WIN_CERT_REVISION_2_0 = 0x200
# Only type PKCS is supported by the pecoff specification.
WIN_CERT_TYPE_X509 = 1
WIN_CERT_TYPE_PKCS_SIGNED_DATA = 2
WIN_CERT_TYPE_RESERVED_1 = 3
WIN_CERT_TYPE_TS_STACK_SIGNED = 4
class PecoffBlob(object):
"""Encapsulating class for Microsoft-specific Authenticode data.
As defined in the PECOFF (v8) and Authenticode specifications.
This is data as it is extracted from the signature_data field by
the fingerprinter.
"""
def __init__(self, signed_data_tuple):
self._wRevision = signed_data_tuple[0]
self._wCertificateType = signed_data_tuple[1]
self._bCertificate = signed_data_tuple[2]
if self._wRevision != WIN_CERT_REVISION_2_0:
raise RuntimeError("Unknown revision %#x." % self._wRevision)
if self._wCertificateType != WIN_CERT_TYPE_PKCS_SIGNED_DATA:
raise RuntimeError("Unknown cert type %#x." % self._wCertificateType)
def getcertificateblob(self):
return self._bCertificate
| """Deal with Microsoft-specific Authenticode data."""
win_cert_revision_1_0 = 256
win_cert_revision_2_0 = 512
win_cert_type_x509 = 1
win_cert_type_pkcs_signed_data = 2
win_cert_type_reserved_1 = 3
win_cert_type_ts_stack_signed = 4
class Pecoffblob(object):
"""Encapsulating class for Microsoft-specific Authenticode data.
As defined in the PECOFF (v8) and Authenticode specifications.
This is data as it is extracted from the signature_data field by
the fingerprinter.
"""
def __init__(self, signed_data_tuple):
self._wRevision = signed_data_tuple[0]
self._wCertificateType = signed_data_tuple[1]
self._bCertificate = signed_data_tuple[2]
if self._wRevision != WIN_CERT_REVISION_2_0:
raise runtime_error('Unknown revision %#x.' % self._wRevision)
if self._wCertificateType != WIN_CERT_TYPE_PKCS_SIGNED_DATA:
raise runtime_error('Unknown cert type %#x.' % self._wCertificateType)
def getcertificateblob(self):
return self._bCertificate |
def snail(array):
temp_list = []
if array and len(array) > 1:
if isinstance(array[0], list):
temp_list.extend(array[0])
else:
temp_list.append(array[0])
array.pop(0)
for lis_index in range(len(array)):
temp_list.append(array[lis_index][-1])
array[lis_index].pop(-1)
if isinstance(array[-1], list):
temp_list.extend(array[-1][::-1])
else:
temp_list.append(array[-1])
array.pop(-1)
for lis_index in range(len(array)):
temp_list.append(array[::-1][lis_index][0])
array[::-1][lis_index].pop(0)
temp_list.extend(snail(array))
return temp_list
elif array:
return array[0]
else:
return []
| def snail(array):
temp_list = []
if array and len(array) > 1:
if isinstance(array[0], list):
temp_list.extend(array[0])
else:
temp_list.append(array[0])
array.pop(0)
for lis_index in range(len(array)):
temp_list.append(array[lis_index][-1])
array[lis_index].pop(-1)
if isinstance(array[-1], list):
temp_list.extend(array[-1][::-1])
else:
temp_list.append(array[-1])
array.pop(-1)
for lis_index in range(len(array)):
temp_list.append(array[::-1][lis_index][0])
array[::-1][lis_index].pop(0)
temp_list.extend(snail(array))
return temp_list
elif array:
return array[0]
else:
return [] |
def get_scale_factor(input_units, sampling_rate=None):
if input_units=='ms':
return 1e3
elif input_units=='s':
return 1
elif input_units=='samples':
if sampling_rate is None:
raise ValueError('Must provide sampling_rate if input_units=="samples"')
return sampling_rate | def get_scale_factor(input_units, sampling_rate=None):
if input_units == 'ms':
return 1000.0
elif input_units == 's':
return 1
elif input_units == 'samples':
if sampling_rate is None:
raise value_error('Must provide sampling_rate if input_units=="samples"')
return sampling_rate |
config = {'luna_raw':'/root/ssd_data/LUNA/',
'luna_data':'/research/dept8/jzwang/dataset/LUNA16/combined/',
'preprocess_result_path':'/research/dept8/jzwang/dataset/HKU/preprocessed/numpy/',
'luna_abbr':'./labels/shorter.csv',
'luna_label':'./labels/lunaqualified_all.csv',
'luna_candidate_label':'./labels/luna_candidate_all.csv',
'lidc_xml':'./lidc_xml',
'preprocessing_backend':'python'
}
| config = {'luna_raw': '/root/ssd_data/LUNA/', 'luna_data': '/research/dept8/jzwang/dataset/LUNA16/combined/', 'preprocess_result_path': '/research/dept8/jzwang/dataset/HKU/preprocessed/numpy/', 'luna_abbr': './labels/shorter.csv', 'luna_label': './labels/lunaqualified_all.csv', 'luna_candidate_label': './labels/luna_candidate_all.csv', 'lidc_xml': './lidc_xml', 'preprocessing_backend': 'python'} |
#! /usr/bin/python3
# -*- coding:utf-8 -*-
"""
Define functions relative to bcv.
"""
def checkBcv(bcv):
"""Check that a bcv is valid.
Valid syntax:
BBCCVV
BBCCVV-vv
BBCCVV-ccvv
BBCCVV-BBccvv
"""
if len(bcv) == 6:
return True
elif len(bcv) == 9:
return bcv[4:6] < bcv[7:9]
elif len(bcv) == 11:
return bcv[2:4] < bcv[7:9] or (bcv[2:4] == bcv[7:9] and bcv[4:6] < bcv[9:11])
elif len(bcv) == 13:
return bcv[0:2] == bcv[7:9] and (bcv[2:4] < bcv[9:11] or (bcv[2:4] == bcv[9:11] and bcv[4:6] < bcv[11:13]))
else:
return False
def expandBcv(bcv):
"""If the bcv is an interval, expand if.
"""
if len(bcv) == 6:
return bcv
else:
return "-".join(splitBcv(bcv))
def splitBcv(bcv):
"""Split a valid bcv interval.
"""
if len(bcv) == 6:
return bcv, bcv
elif len(bcv) == 9:
return bcv[0:6], bcv[0:4]+bcv[7:9]
elif len(bcv) == 11:
return bcv[0:6], bcv[0:2]+bcv[7:11]
elif len(bcv) == 13:
return bcv[0:6], bcv[7:13]
else:
None, None
| """
Define functions relative to bcv.
"""
def check_bcv(bcv):
"""Check that a bcv is valid.
Valid syntax:
BBCCVV
BBCCVV-vv
BBCCVV-ccvv
BBCCVV-BBccvv
"""
if len(bcv) == 6:
return True
elif len(bcv) == 9:
return bcv[4:6] < bcv[7:9]
elif len(bcv) == 11:
return bcv[2:4] < bcv[7:9] or (bcv[2:4] == bcv[7:9] and bcv[4:6] < bcv[9:11])
elif len(bcv) == 13:
return bcv[0:2] == bcv[7:9] and (bcv[2:4] < bcv[9:11] or (bcv[2:4] == bcv[9:11] and bcv[4:6] < bcv[11:13]))
else:
return False
def expand_bcv(bcv):
"""If the bcv is an interval, expand if.
"""
if len(bcv) == 6:
return bcv
else:
return '-'.join(split_bcv(bcv))
def split_bcv(bcv):
"""Split a valid bcv interval.
"""
if len(bcv) == 6:
return (bcv, bcv)
elif len(bcv) == 9:
return (bcv[0:6], bcv[0:4] + bcv[7:9])
elif len(bcv) == 11:
return (bcv[0:6], bcv[0:2] + bcv[7:11])
elif len(bcv) == 13:
return (bcv[0:6], bcv[7:13])
else:
(None, None) |
# https://www.codechef.com/SEPT20B/problems/TREE2
T = int(input())
for _ in range(T):
x = input()
l = list(map(int, input().split()))
s = set(l)
m = min(s)
ops = len(s)
if m: print(ops)
else: print(ops-1)
| t = int(input())
for _ in range(T):
x = input()
l = list(map(int, input().split()))
s = set(l)
m = min(s)
ops = len(s)
if m:
print(ops)
else:
print(ops - 1) |
class SessionGenerator(object):
session: "TrainingSession"
def __init__(self,
training_cycle,
fatigue_rating,
current_training_max):
load_size = self.determine_load_size(
fatigue_rating,
training_cycle.previous_large_load_training_max,
current_training_max
)
self.session = self.generate_session(training_cycle.config,
load_size,
current_training_max)
training_cycle.previous_training_max = current_training_max
if fatigue_rating == "low":
training_cycle.previous_large_load_training_max = current_training_max
training_cycle.save()
def determine_load_size(self,
fatigue_rating,
previous_large_load_training_max,
current_training_max):
if current_training_max > previous_large_load_training_max: # TM improved from last fresh session.
return {
'low': 'large',
'medium': 'medium',
'high': 'medium'
}[fatigue_rating]
else: # TM stagnated or regressed from last fresh session.
return {
'low': 'supramaximal',
'medium': 'medium',
'high': 'small'
}[fatigue_rating]
def generate_session(self, config, load_size, training_max):
def calculate_set_quantity(reps_per_set, intensity, inol):
'''
Calculates the sets required to accomplish the work desired.
`extra_reps` is how many repetitions are left over after the
calculated number of flat (specified repetition quantity) sets. For
instance, if the user needs to complete 18 repetitions in sets of
5, `extra_reps` will be 3.
Returns a tuple comprising `(sets, extra_reps)`.
'''
total_reps = round(inol * 100 * (1 - intensity))
extra_reps = round(total_reps % reps_per_set)
sets = round((total_reps - extra_reps) / reps_per_set)
return sets, extra_reps
if load_size == 'supramaximal':
config.inol_target_large += config.supramaximal_inol_increment
config.save()
load_size = 'large'
load_size_map = {
'reps_per_set': {
'small': config.reps_per_set_small,
'medium': config.reps_per_set_medium,
'large': config.reps_per_set_large
},
'inol_targets': {
'small': config.inol_target_small,
'medium': config.inol_target_medium,
'large': config.inol_target_large
},
'intensity_targets': {
'small': config.intensity_target_small,
'medium': config.intensity_target_medium,
'large': config.intensity_target_large
}
}
sets, extra_reps = calculate_set_quantity(
load_size_map['reps_per_set'][load_size],
load_size_map['intensity_targets'][load_size],
load_size_map['inol_targets'][load_size]
)
session = TrainingSession(
sets=sets,
reps_per_set=load_size_map['reps_per_set'][load_size],
extra_reps=extra_reps,
intensity=load_size_map['intensity_targets'][load_size],
training_max=training_max
)
return session
class TrainingSession(object):
sets: int
reps_per_set: int
extra_reps: int
intensity: float
training_max: float
def __init__(self, **kwargs):
for kw in kwargs:
setattr(self, kw, kwargs[kw])
@property
def e1rm(self):
return self.training_max / 0.9
@property
def load(self):
return self.intensity * self.e1rm
| class Sessiongenerator(object):
session: 'TrainingSession'
def __init__(self, training_cycle, fatigue_rating, current_training_max):
load_size = self.determine_load_size(fatigue_rating, training_cycle.previous_large_load_training_max, current_training_max)
self.session = self.generate_session(training_cycle.config, load_size, current_training_max)
training_cycle.previous_training_max = current_training_max
if fatigue_rating == 'low':
training_cycle.previous_large_load_training_max = current_training_max
training_cycle.save()
def determine_load_size(self, fatigue_rating, previous_large_load_training_max, current_training_max):
if current_training_max > previous_large_load_training_max:
return {'low': 'large', 'medium': 'medium', 'high': 'medium'}[fatigue_rating]
else:
return {'low': 'supramaximal', 'medium': 'medium', 'high': 'small'}[fatigue_rating]
def generate_session(self, config, load_size, training_max):
def calculate_set_quantity(reps_per_set, intensity, inol):
"""
Calculates the sets required to accomplish the work desired.
`extra_reps` is how many repetitions are left over after the
calculated number of flat (specified repetition quantity) sets. For
instance, if the user needs to complete 18 repetitions in sets of
5, `extra_reps` will be 3.
Returns a tuple comprising `(sets, extra_reps)`.
"""
total_reps = round(inol * 100 * (1 - intensity))
extra_reps = round(total_reps % reps_per_set)
sets = round((total_reps - extra_reps) / reps_per_set)
return (sets, extra_reps)
if load_size == 'supramaximal':
config.inol_target_large += config.supramaximal_inol_increment
config.save()
load_size = 'large'
load_size_map = {'reps_per_set': {'small': config.reps_per_set_small, 'medium': config.reps_per_set_medium, 'large': config.reps_per_set_large}, 'inol_targets': {'small': config.inol_target_small, 'medium': config.inol_target_medium, 'large': config.inol_target_large}, 'intensity_targets': {'small': config.intensity_target_small, 'medium': config.intensity_target_medium, 'large': config.intensity_target_large}}
(sets, extra_reps) = calculate_set_quantity(load_size_map['reps_per_set'][load_size], load_size_map['intensity_targets'][load_size], load_size_map['inol_targets'][load_size])
session = training_session(sets=sets, reps_per_set=load_size_map['reps_per_set'][load_size], extra_reps=extra_reps, intensity=load_size_map['intensity_targets'][load_size], training_max=training_max)
return session
class Trainingsession(object):
sets: int
reps_per_set: int
extra_reps: int
intensity: float
training_max: float
def __init__(self, **kwargs):
for kw in kwargs:
setattr(self, kw, kwargs[kw])
@property
def e1rm(self):
return self.training_max / 0.9
@property
def load(self):
return self.intensity * self.e1rm |
# -*- coding: utf-8 -*-
"""
@create: 2019-05-30 22:19:12.
@author: ppolxda
@desc:
"""
class Error(Exception):
pass
class InputError(Error):
pass
class DuplicateError(Error):
pass
class IndexExpiredError(Error):
pass
| """
@create: 2019-05-30 22:19:12.
@author: ppolxda
@desc:
"""
class Error(Exception):
pass
class Inputerror(Error):
pass
class Duplicateerror(Error):
pass
class Indexexpirederror(Error):
pass |
DROPS = ((3, 'Pling'), (5, 'Plang'), (7, 'Plong'))
def convert(number):
"""
Converts a number to a string according to the raindrop sounds.
"""
return "".join(sound for factor, sound
in DROPS if number % factor == 0) or str(number)
| drops = ((3, 'Pling'), (5, 'Plang'), (7, 'Plong'))
def convert(number):
"""
Converts a number to a string according to the raindrop sounds.
"""
return ''.join((sound for (factor, sound) in DROPS if number % factor == 0)) or str(number) |
filename = "CCaseScene.unity"
subA = ("<<<<<<< Updated upstream\n")
subB = ("=======\n")
subC = (">>>>>>> Stashed changes\n")
with open(filename, 'r+') as f:
data = f.read()
count = 0
# Iterate starts
while True:
# Find indexes
start, end = data.index(subA), data.index(subB)
# Slice string
data = data[:start] + data[end:]
data = data.replace(subB,"", 1).replace(subC,"", 1)
# Iterate ends
count += 1
if(data.find(subA) < 0):
# if(count > 4):
break
# return pointer to top of file so we can re-write the content with replaced string
f.seek(0)
# Deletes
f.truncate()
# re-write the content with the updated content
f.write(data)
# close file
f.close()
| filename = 'CCaseScene.unity'
sub_a = '<<<<<<< Updated upstream\n'
sub_b = '=======\n'
sub_c = '>>>>>>> Stashed changes\n'
with open(filename, 'r+') as f:
data = f.read()
count = 0
while True:
(start, end) = (data.index(subA), data.index(subB))
data = data[:start] + data[end:]
data = data.replace(subB, '', 1).replace(subC, '', 1)
count += 1
if data.find(subA) < 0:
break
f.seek(0)
f.truncate()
f.write(data)
f.close() |
# New feature in Python 3.8, assignment expressions (known as the walrus operator)
# Assignment expression are written with a new notation (:=). This operator is often
# called the walrus operator as it resembles the eyes and tusks of a walrus on its side.
#
# Video explanation: https://realpython.com/lessons/assignment-expressions/
# PEP 572 https://www.python.org/dev/peps/pep-0572/
# Assignment expressions allow you to assign and return a value in the same expression.
walrus = True
print(walrus) # True
# In Python 3.8, we can combine these two expressions. It will assign walrus to True
# and return True
print(walrus := True) # True
# Another example with a while loop. This program allows you to input a text until you
# input the word quit.
inputs = list()
while True:
current = input("Write something: ")
if current == "quit":
break
inputs.append(current)
# With assignment expressions this code can be simplified
inputs = list()
while (current := input("Write something: ")) != "quit":
inputs.append(current)
| walrus = True
print(walrus)
print((walrus := True))
inputs = list()
while True:
current = input('Write something: ')
if current == 'quit':
break
inputs.append(current)
inputs = list()
while (current := input('Write something: ')) != 'quit':
inputs.append(current) |
def solve():
A = int(input())
row = (A + 2) // 3 # from (100 100) to (100+row-1, 100)
board = []
for _ in range(1000):
board.append([0]*1000)
I = J = 100 # [2, 999]
for _ in range(1000):
print('{} {}'.format(I, J))
I_, J_ = map(int, input().split())
if I_ == 0 and J_ == 0:
return
board[I_][J_] = 1
while board[I-1][J-1] and board[I-1][J] and board[I-1][J+1] and I < 100+row-3:
I += 1
input()
if __name__ == '__main__':
T = int(input())
for t in range(T):
solve() | def solve():
a = int(input())
row = (A + 2) // 3
board = []
for _ in range(1000):
board.append([0] * 1000)
i = j = 100
for _ in range(1000):
print('{} {}'.format(I, J))
(i_, j_) = map(int, input().split())
if I_ == 0 and J_ == 0:
return
board[I_][J_] = 1
while board[I - 1][J - 1] and board[I - 1][J] and board[I - 1][J + 1] and (I < 100 + row - 3):
i += 1
input()
if __name__ == '__main__':
t = int(input())
for t in range(T):
solve() |
deleteObject = True
editObject = True
getObject = {'id': 1234,
'fingerprint': 'aa:bb:cc:dd',
'label': 'label',
'notes': 'notes',
'key': 'ssh-rsa AAAAB3N...pa67 user@example.com'}
createObject = getObject
getAllObjects = [getObject]
| delete_object = True
edit_object = True
get_object = {'id': 1234, 'fingerprint': 'aa:bb:cc:dd', 'label': 'label', 'notes': 'notes', 'key': 'ssh-rsa AAAAB3N...pa67 user@example.com'}
create_object = getObject
get_all_objects = [getObject] |
'''
Created on Apr 27, 2015
@author: DHawkins
'''
POSITION = [
{'abbrev': 'al', 'column': 7, 'row': 7, 'state': 'alabama'},
{'abbrev': 'ak', 'column': 1, 'row': 8, 'state': 'alaska'},
{'abbrev': 'az', 'column': 2, 'row': 6, 'state': 'arizona'},
{'abbrev': 'ar', 'column': 5, 'row': 6, 'state': 'arkansas'},
{'abbrev': 'ca', 'column': 1, 'row': 5, 'state': 'california'},
{'abbrev': 'co', 'column': 3, 'row': 5, 'state': 'colorado'},
{'abbrev': 'ct', 'column': 10, 'row': 3, 'state': 'connecticut'},
{'abbrev': 'dc', 'column': 9, 'row': 6, 'state': 'district of columbia'},
{'abbrev': 'de', 'column': 10, 'row': 5, 'state': 'delaware'},
{'abbrev': 'fl', 'column': 8, 'row': 8, 'state': 'florida'},
{'abbrev': 'ga', 'column': 8, 'row': 7, 'state': 'georgia'},
{'abbrev': 'hi', 'column': 2, 'row': 8, 'state': 'hawaii'},
{'abbrev': 'id', 'column': 2, 'row': 3, 'state': 'idaho'},
{'abbrev': 'il', 'column': 6, 'row': 4, 'state': 'illinois'},
{'abbrev': 'in', 'column': 7, 'row': 4, 'state': 'indiana'},
{'abbrev': 'ia', 'column': 5, 'row': 4, 'state': 'iowa'},
{'abbrev': 'ks', 'column': 4, 'row': 6, 'state': 'kansas'},
{'abbrev': 'ky', 'column': 6, 'row': 5, 'state': 'kentucky'},
{'abbrev': 'la', 'column': 5, 'row': 7, 'state': 'louisiana'},
{'abbrev': 'me', 'column': 11, 'row': 1, 'state': 'maine'},
{'abbrev': 'md', 'column': 9, 'row': 5, 'state': 'maryland'},
{'abbrev': 'ma', 'column': 11, 'row': 2, 'state': 'massachusetts'},
{'abbrev': 'mi', 'column': 7, 'row': 3, 'state': 'michigan'},
{'abbrev': 'mn', 'column': 5, 'row': 3, 'state': 'minnesota'},
{'abbrev': 'ms', 'column': 6, 'row': 7, 'state': 'mississippi'},
{'abbrev': 'mo', 'column': 5, 'row': 5, 'state': 'missouri'},
{'abbrev': 'mt', 'column': 3, 'row': 3, 'state': 'montana'},
{'abbrev': 'ne', 'column': 4, 'row': 5, 'state': 'nebraska'},
{'abbrev': 'nv', 'column': 2, 'row': 4, 'state': 'nevada'},
{'abbrev': 'nh', 'column': 10, 'row': 2, 'state': 'new hampshire'},
{'abbrev': 'nj', 'column': 10, 'row': 4, 'state': 'new jersey'},
{'abbrev': 'nm', 'column': 3, 'row': 6, 'state': 'new mexico'},
{'abbrev': 'ny', 'column': 9, 'row': 3, 'state': 'new york'},
{'abbrev': 'nc', 'column': 7, 'row': 6, 'state': 'north carolina'},
{'abbrev': 'nd', 'column': 4, 'row': 3, 'state': 'north dakota'},
{'abbrev': 'oh', 'column': 8, 'row': 4, 'state': 'ohio'},
{'abbrev': 'ok', 'column': 4, 'row': 7, 'state': 'oklahoma'},
{'abbrev': 'or', 'column': 1, 'row': 4, 'state': 'oregon'},
{'abbrev': 'pa', 'column': 9, 'row': 4, 'state': 'pennsylvania'},
{'abbrev': 'ri', 'column': 11, 'row': 3, 'state': 'rhode island'},
{'abbrev': 'sc', 'column': 8, 'row': 6, 'state': 'south carolina'},
{'abbrev': 'sd', 'column': 4, 'row': 4, 'state': 'south dakota'},
{'abbrev': 'tn', 'column': 6, 'row': 6, 'state': 'tennessee'},
{'abbrev': 'tx', 'column': 4, 'row': 8, 'state': 'texas'},
{'abbrev': 'ut', 'column': 2, 'row': 5, 'state': 'utah'},
{'abbrev': 'vt', 'column': 9, 'row': 2, 'state': 'vermont'},
{'abbrev': 'va', 'column': 8, 'row': 5, 'state': 'virginia'},
{'abbrev': 'wa', 'column': 1, 'row': 3, 'state': 'washington'},
{'abbrev': 'wv', 'column': 7, 'row': 5, 'state': 'west virginia'},
{'abbrev': 'wi', 'column': 6, 'row': 3, 'state': 'wisconsin'},
{'abbrev': 'wy', 'column': 3, 'row': 4, 'state': 'wyoming'}
]
| """
Created on Apr 27, 2015
@author: DHawkins
"""
position = [{'abbrev': 'al', 'column': 7, 'row': 7, 'state': 'alabama'}, {'abbrev': 'ak', 'column': 1, 'row': 8, 'state': 'alaska'}, {'abbrev': 'az', 'column': 2, 'row': 6, 'state': 'arizona'}, {'abbrev': 'ar', 'column': 5, 'row': 6, 'state': 'arkansas'}, {'abbrev': 'ca', 'column': 1, 'row': 5, 'state': 'california'}, {'abbrev': 'co', 'column': 3, 'row': 5, 'state': 'colorado'}, {'abbrev': 'ct', 'column': 10, 'row': 3, 'state': 'connecticut'}, {'abbrev': 'dc', 'column': 9, 'row': 6, 'state': 'district of columbia'}, {'abbrev': 'de', 'column': 10, 'row': 5, 'state': 'delaware'}, {'abbrev': 'fl', 'column': 8, 'row': 8, 'state': 'florida'}, {'abbrev': 'ga', 'column': 8, 'row': 7, 'state': 'georgia'}, {'abbrev': 'hi', 'column': 2, 'row': 8, 'state': 'hawaii'}, {'abbrev': 'id', 'column': 2, 'row': 3, 'state': 'idaho'}, {'abbrev': 'il', 'column': 6, 'row': 4, 'state': 'illinois'}, {'abbrev': 'in', 'column': 7, 'row': 4, 'state': 'indiana'}, {'abbrev': 'ia', 'column': 5, 'row': 4, 'state': 'iowa'}, {'abbrev': 'ks', 'column': 4, 'row': 6, 'state': 'kansas'}, {'abbrev': 'ky', 'column': 6, 'row': 5, 'state': 'kentucky'}, {'abbrev': 'la', 'column': 5, 'row': 7, 'state': 'louisiana'}, {'abbrev': 'me', 'column': 11, 'row': 1, 'state': 'maine'}, {'abbrev': 'md', 'column': 9, 'row': 5, 'state': 'maryland'}, {'abbrev': 'ma', 'column': 11, 'row': 2, 'state': 'massachusetts'}, {'abbrev': 'mi', 'column': 7, 'row': 3, 'state': 'michigan'}, {'abbrev': 'mn', 'column': 5, 'row': 3, 'state': 'minnesota'}, {'abbrev': 'ms', 'column': 6, 'row': 7, 'state': 'mississippi'}, {'abbrev': 'mo', 'column': 5, 'row': 5, 'state': 'missouri'}, {'abbrev': 'mt', 'column': 3, 'row': 3, 'state': 'montana'}, {'abbrev': 'ne', 'column': 4, 'row': 5, 'state': 'nebraska'}, {'abbrev': 'nv', 'column': 2, 'row': 4, 'state': 'nevada'}, {'abbrev': 'nh', 'column': 10, 'row': 2, 'state': 'new hampshire'}, {'abbrev': 'nj', 'column': 10, 'row': 4, 'state': 'new jersey'}, {'abbrev': 'nm', 'column': 3, 'row': 6, 'state': 'new mexico'}, {'abbrev': 'ny', 'column': 9, 'row': 3, 'state': 'new york'}, {'abbrev': 'nc', 'column': 7, 'row': 6, 'state': 'north carolina'}, {'abbrev': 'nd', 'column': 4, 'row': 3, 'state': 'north dakota'}, {'abbrev': 'oh', 'column': 8, 'row': 4, 'state': 'ohio'}, {'abbrev': 'ok', 'column': 4, 'row': 7, 'state': 'oklahoma'}, {'abbrev': 'or', 'column': 1, 'row': 4, 'state': 'oregon'}, {'abbrev': 'pa', 'column': 9, 'row': 4, 'state': 'pennsylvania'}, {'abbrev': 'ri', 'column': 11, 'row': 3, 'state': 'rhode island'}, {'abbrev': 'sc', 'column': 8, 'row': 6, 'state': 'south carolina'}, {'abbrev': 'sd', 'column': 4, 'row': 4, 'state': 'south dakota'}, {'abbrev': 'tn', 'column': 6, 'row': 6, 'state': 'tennessee'}, {'abbrev': 'tx', 'column': 4, 'row': 8, 'state': 'texas'}, {'abbrev': 'ut', 'column': 2, 'row': 5, 'state': 'utah'}, {'abbrev': 'vt', 'column': 9, 'row': 2, 'state': 'vermont'}, {'abbrev': 'va', 'column': 8, 'row': 5, 'state': 'virginia'}, {'abbrev': 'wa', 'column': 1, 'row': 3, 'state': 'washington'}, {'abbrev': 'wv', 'column': 7, 'row': 5, 'state': 'west virginia'}, {'abbrev': 'wi', 'column': 6, 'row': 3, 'state': 'wisconsin'}, {'abbrev': 'wy', 'column': 3, 'row': 4, 'state': 'wyoming'}] |
# A ring buffer is a non-growable buffer with a fixed size. When the ring buffer is full
# and a new element is inserted, the oldest element in the ring buffer is overwritten with
# the newest element. This kind of data structure is very useful for use cases such as
# storing logs and history information, where you typically want to store information up
# until it reaches a certain age, after which you don't care about it anymore and don't
# mind seeing it overwritten by newer data.
# Implement this behavior in the RingBuffer class. RingBuffer has two methods, append and
# get. The append method adds elements to the buffer. The get method returns all of the
# elements in the buffer in a list in their given order. It should not return any None
# values in the list even if they are present in the ring buffer.
class RingBuffer:
def __init__(self, capacity):
self.capacity = capacity
self.current = 0
self.storage = [None] * capacity
def append(self, item):
if self.current == self.capacity:
self.current = 0
self.storage[self.current] = item
self.current += 1
def get(self):
temp_list = []
for item in self.storage:
if item:
temp_list.append(item)
return temp_list
buffer = RingBuffer(3)
buffer.get() # should return []
buffer.append("a")
buffer.append("b")
buffer.append("c")
print(
f"Should return \"['a', 'b', 'c']\" \t --> \t {buffer.get()}"
) # should return ['a', 'b', 'c']
# 'd' overwrites the oldest value in the ring buffer, which is 'a'
buffer.append("d")
print(
f"Should return \"['d', 'b', 'c']\" \t --> \t {buffer.get()}"
) # should return ['d', 'b', 'c']
buffer.append("e")
buffer.append("f")
print(
f"Should return \"['d', 'e', 'f']\" \t --> \t {buffer.get()}"
) # should return ['d', 'e', 'f']
# Testing
# http://pythontutor.com/visualize.html#code=class%20RingBuffer%3A%0A%20%20%20%20def%20__init__%28self,%20capacity%29%3A%0A%20%20%20%20%20%20%20%20self.capacity%20%3D%20capacity%0A%20%20%20%20%20%20%20%20self.current%20%3D%200%0A%20%20%20%20%20%20%20%20self.storage%20%3D%20%5BNone%5D%20*%20capacity%0A%0A%20%20%20%20def%20append%28self,%20item%29%3A%0A%20%20%20%20%20%20%20%20if%20self.current%20%3D%3D%20self.capacity%3A%0A%20%20%20%20%20%20%20%20%20%20%20%20self.current%20%3D%200%0A%0A%20%20%20%20%20%20%20%20self.storage%5Bself.current%5D%20%3D%20item%0A%20%20%20%20%20%20%20%20self.current%20%2B%3D%201%0A%0A%20%20%20%20def%20get%28self%29%3A%0A%20%20%20%20%20%20%20%20temp_list%20%3D%20%5B%5D%0A%20%20%20%20%20%20%20%20for%20item%20in%20self.storage%3A%0A%20%20%20%20%20%20%20%20%20%20%20%20if%20item%3A%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20temp_list.append%28item%29%0A%20%20%20%20%20%20%20%20return%20temp_list%0A%0A%0Abuffer%20%3D%20RingBuffer%283%29%0A%0Abuffer.get%28%29%20%20%20%23%20should%20return%20%5B%5D%0A%0Abuffer.append%28'a'%29%0Abuffer.append%28'b'%29%0Abuffer.append%28'c'%29%0A%0Aprint%28f%22Should%20return%20%5C%22%5B'a',%20'b',%20'c'%5D%5C%22%20%5Ct%20--%3E%20%5Ct%20%7Bbuffer.get%28%29%7D%22%29%20%20%20%23%20should%20return%20%5B'a',%20'b',%20'c'%5D%0A%0A%23%20'd'%20overwrites%20the%20oldest%20value%20in%20the%20ring%20buffer,%20which%20is%20'a'%0Abuffer.append%28'd'%29%0A%0Aprint%28f%22Should%20return%20%5C%22%5B'd',%20'b',%20'c'%5D%5C%22%20%5Ct%20--%3E%20%5Ct%20%7Bbuffer.get%28%29%7D%22%29%20%20%20%23%20should%20return%20%5B'd',%20'b',%20'c'%5D%0A%0Abuffer.append%28'e'%29%0Abuffer.append%28'f'%29%0A%0Aprint%28f%22Should%20return%20%5C%22%5B'd',%20'e',%20'f'%5D%5C%22%20%5Ct%20--%3E%20%5Ct%20%7Bbuffer.get%28%29%7D%22%29%20%20%20%23%20should%20return%20%5B'd',%20'e',%20'f'%5D%0A&cumulative=false&curInstr=52&heapPrimitives=nevernest&mode=display&origin=opt-frontend.js&py=3&rawInputLstJSON=%5B%5D&textReferences=false
| class Ringbuffer:
def __init__(self, capacity):
self.capacity = capacity
self.current = 0
self.storage = [None] * capacity
def append(self, item):
if self.current == self.capacity:
self.current = 0
self.storage[self.current] = item
self.current += 1
def get(self):
temp_list = []
for item in self.storage:
if item:
temp_list.append(item)
return temp_list
buffer = ring_buffer(3)
buffer.get()
buffer.append('a')
buffer.append('b')
buffer.append('c')
print(f"""Should return "['a', 'b', 'c']" \t --> \t {buffer.get()}""")
buffer.append('d')
print(f"""Should return "['d', 'b', 'c']" \t --> \t {buffer.get()}""")
buffer.append('e')
buffer.append('f')
print(f"""Should return "['d', 'e', 'f']" \t --> \t {buffer.get()}""") |
HEADER_JSON_CONTENT = {
'Content-type': 'application/json', 'Accept': 'text/plain'
}
SUCCESS_MESSAGES = {
201: "Created Propertie: {title} in {provinces} province(s)",
}
ERROR_MESSAGES = {
422: {
'message': "Please, verify data for register this propertie"
},
404: {
'message': "Provide an valid ID to retrive the queried propertie."
},
}
MIN_BEDS = 1
MAX_BEDS = 5
MIN_BATHS = 1
MAX_BATHS = 5
MIN_SQUARE_METERS = 20
MAX_SQUARE_METERS = MIN_SQUARE_METERS * 12
MIN_LONGITUDE = 0
MAX_LONGITUDE = 1400
MIN_LATITUDE = 0
MAX_LATITUDE = 1000
PROVINCES = {
'Gode': {
'boundaries': {
'upperLeft': {
'x': 0,
'y': 1000,
},
'bottomRight': {
'x': 600,
'y': 500,
},
}
},
'Ruja': {
'boundaries': {
'upperLeft': {
'x': 400,
'y': 1000,
},
'bottomRight': {
'x': 1100,
'y': 500,
},
}
},
'Jaby': {
'boundaries': {
'upperLeft': {
'x': 1100,
'y': 1000,
},
'bottomRight': {
'x': 1400,
'y': 500,
},
}
},
'Scavy': {
'boundaries': {
'upperLeft': {
'x': 0,
'y': 500,
},
'bottomRight': {
'x': 600,
'y': 0,
},
}
},
'Groola': {
'boundaries': {
'upperLeft': {
'x': 600,
'y': 500,
},
'bottomRight': {
'x': 800,
'y': 0,
},
}
},
'Nova': {
'boundaries': {
'upperLeft': {
'x': 800,
'y': 500,
},
'bottomRight': {
'x': 1400,
'y': 0,
},
}
}
}
| header_json_content = {'Content-type': 'application/json', 'Accept': 'text/plain'}
success_messages = {201: 'Created Propertie: {title} in {provinces} province(s)'}
error_messages = {422: {'message': 'Please, verify data for register this propertie'}, 404: {'message': 'Provide an valid ID to retrive the queried propertie.'}}
min_beds = 1
max_beds = 5
min_baths = 1
max_baths = 5
min_square_meters = 20
max_square_meters = MIN_SQUARE_METERS * 12
min_longitude = 0
max_longitude = 1400
min_latitude = 0
max_latitude = 1000
provinces = {'Gode': {'boundaries': {'upperLeft': {'x': 0, 'y': 1000}, 'bottomRight': {'x': 600, 'y': 500}}}, 'Ruja': {'boundaries': {'upperLeft': {'x': 400, 'y': 1000}, 'bottomRight': {'x': 1100, 'y': 500}}}, 'Jaby': {'boundaries': {'upperLeft': {'x': 1100, 'y': 1000}, 'bottomRight': {'x': 1400, 'y': 500}}}, 'Scavy': {'boundaries': {'upperLeft': {'x': 0, 'y': 500}, 'bottomRight': {'x': 600, 'y': 0}}}, 'Groola': {'boundaries': {'upperLeft': {'x': 600, 'y': 500}, 'bottomRight': {'x': 800, 'y': 0}}}, 'Nova': {'boundaries': {'upperLeft': {'x': 800, 'y': 500}, 'bottomRight': {'x': 1400, 'y': 0}}}} |
#!/usr/bin/env python
def reverse(string):
"""Reverse a given string."""
return string[::-1]
def main():
print(reverse('a'))
print(reverse('abcd'))
print(reverse('hello world'))
if __name__ == '__main__':
main()
| def reverse(string):
"""Reverse a given string."""
return string[::-1]
def main():
print(reverse('a'))
print(reverse('abcd'))
print(reverse('hello world'))
if __name__ == '__main__':
main() |
jogador = {}
soma = 0
gols = []
jogador['nome'] = str(input('Nome do Jogador: '))
jogador['njogos'] = int(input(f'Quanta partidas {jogador["nome"]} Jogou?'))
for v in range(0, jogador['njogos']):
temp = int(input(f'Quantos gols na partida {v}? '))
soma += temp
gols.append(temp)
jogador['gols'] = gols[:]
print('-=' * 30)
print(jogador)
print('-=' * 30)
print(f'O campo nome tem o valor {jogador["nome"]}')
print(f'O campo gols tem o valor {gols}')
print(f'O campo total tem o valor {soma}')
print('-=' * 30)
print(f'O jogador {jogador["nome"]} jogou {jogador["njogos"]} partidas.')
for i, v in enumerate(gols):
print(f' => Na partida {i}, fez {v} gols')
print(f'Foi um total de {soma}')
| jogador = {}
soma = 0
gols = []
jogador['nome'] = str(input('Nome do Jogador: '))
jogador['njogos'] = int(input(f"Quanta partidas {jogador['nome']} Jogou?"))
for v in range(0, jogador['njogos']):
temp = int(input(f'Quantos gols na partida {v}? '))
soma += temp
gols.append(temp)
jogador['gols'] = gols[:]
print('-=' * 30)
print(jogador)
print('-=' * 30)
print(f"O campo nome tem o valor {jogador['nome']}")
print(f'O campo gols tem o valor {gols}')
print(f'O campo total tem o valor {soma}')
print('-=' * 30)
print(f"O jogador {jogador['nome']} jogou {jogador['njogos']} partidas.")
for (i, v) in enumerate(gols):
print(f' => Na partida {i}, fez {v} gols')
print(f'Foi um total de {soma}') |
"""
@file
@brief Exceptions.
"""
class RenderException(Exception):
"""
Custom exception for all class and functions below.
"""
pass
| """
@file
@brief Exceptions.
"""
class Renderexception(Exception):
"""
Custom exception for all class and functions below.
"""
pass |
# -*- coding: utf-8 -*-
# This file is generated from NI-TClk API metadata version 255.0.0d0
functions = {
'ConfigureForHomogeneousTriggers': {
'documentation': {
'description': '\nConfigures the attributes commonly required for the TClk synchronization\nof device sessions with homogeneous triggers in a single PXI chassis or\na single PC. Use niTClk_ConfigureForHomogeneousTriggers to configure\nthe attributes for the reference clocks, start triggers, reference\ntriggers, script triggers, and pause triggers. If\nniTClk_ConfigureForHomogeneousTriggers cannot perform all the steps\nappropriate for the given sessions, it returns an error. If an error is\nreturned, use the instrument driver functions and attributes for signal\nrouting, along with the following NI-TClk attributes:\nNITCLK_ATTR_START_TRIGGER_MASTER_SESSION\nNITCLK_ATTR_REF_TRIGGER_MASTER_SESSION\nNITCLK_ATTR_PAUSE_TRIGGER_MASTER_SESSION\nniTClk_ConfigureForHomogeneousTriggers affects the following clocks and\ntriggers: - Reference clocks - Start triggers - Reference triggers -\nScript triggers - Pause triggers Reference Clocks\nniTClk_ConfigureForHomogeneousTriggers configures the reference clocks\nif they are needed. Specifically, if the internal sample clocks or\ninternal sample clock timebases are used, and the reference clock source\nis not configured--or is set to None (no trigger\nconfigured)--niTClk_ConfigureForHomogeneousTriggers configures the\nfollowing: PXI--The reference clock source on all devices is set to be\nthe 10 MHz PXI backplane clock (PXI_CLK10). PCI--One of the devices\nexports its 10 MHz onboard reference clock to RTSI 7. The reference\nclock source on all devices is set to be RTSI 7. Note: If the reference\nclock source is set to a value other than None,\nniTClk_ConfigureForHomogeneousTriggers cannot configure the reference\nclock source. Start Triggers If the start trigger is set to None (no\ntrigger configured) for all sessions, the sessions are configured to\nshare the start trigger. The start trigger is shared by: - Implicitly\nexporting the start trigger from one session - Configuring the other\nsessions for digital edge start triggers with sources corresponding to\nthe exported start trigger - Setting\nNITCLK_ATTR_START_TRIGGER_MASTER_SESSION to the session that is\nexporting the trigger for all sessions If the start triggers are None\nfor all except one session, niTClk_ConfigureForHomogeneousTriggers\nconfigures the sessions to share the start trigger from the one excepted\nsession. The start trigger is shared by: - Implicitly exporting start\ntrigger from the session with the start trigger that is not None -\nConfiguring the other sessions for digital-edge start triggers with\nsources corresponding to the exported start trigger - Setting\nNITCLK_ATTR_START_TRIGGER_MASTER_SESSION to the session that is\nexporting the trigger for all sessions If start triggers are configured\nfor all sessions, niTClk_ConfigureForHomogeneousTriggers does not\naffect the start triggers. Start triggers are considered to be\nconfigured for all sessions if either of the following conditions is\ntrue: - No session has a start trigger that is None - One session has a\nstart trigger that is None, and all other sessions have start triggers\nother than None. The one session with the None trigger must have\nNITCLK_ATTR_START_TRIGGER_MASTER_SESSION set to itself, indicating\nthat the session itself is the start trigger master Reference Triggers\nniTClk_ConfigureForHomogeneousTriggers configures sessions that support\nreference triggers to share the reference triggers if the reference\ntriggers are None (no trigger configured) for all except one session.\nThe reference triggers are shared by: - Implicitly exporting the\nreference trigger from the session whose reference trigger is not None -\nConfiguring the other sessions that support the reference trigger for\ndigital-edge reference triggers with sources corresponding to the\nexported reference trigger - Setting\nNITCLK_ATTR_REF_TRIGGER_MASTER_SESSION to the session that is\nexporting the trigger for all sessions that support reference trigger If\nthe reference triggers are configured for all sessions that support\nreference triggers, niTClk_ConfigureForHomogeneousTriggers does not\naffect the reference triggers. Reference triggers are considered to be\nconfigured for all sessions if either one or the other of the following\nconditions is true: - No session has a reference trigger that is None -\nOne session has a reference trigger that is None, and all other sessions\nhave reference triggers other than None. The one session with the None\ntrigger must have NITCLK_ATTR_REF_TRIGGER_MASTER_SESSION set to\nitself, indicating that the session itself is the reference trigger\nmaster Reference Trigger Holdoffs Acquisition sessions may be configured\nwith the reference trigger. For acquisition sessions, when the reference\ntrigger is shared, niTClk_ConfigureForHomogeneousTriggers configures\nthe holdoff attributes (which are instrument driver specific) on the\nreference trigger master session so that the session does not recognize\nthe reference trigger before the other sessions are ready. This\ncondition is only relevant when the sample clock rates, sample clock\ntimebase rates, sample counts, holdoffs, and/or any delays for the\nacquisitions are different. When the sample clock rates, sample clock\ntimebase rates, and/or the sample counts are different in acquisition\nsessions sharing the reference trigger, you should also set the holdoff\nattributes for the reference trigger master using the instrument driver.\nPause Triggers\nniTClk_ConfigureForHomogeneousTriggers configures generation sessions\nthat support pause triggers to share them, if the pause triggers are\nNone (no trigger configured) for all except one session. The pause\ntriggers are shared by: - Implicitly exporting the pause trigger from\nthe session whose script trigger is not None - Configuring the other\nsessions that support the pause trigger for digital-edge pause triggers\nwith sources corresponding to the exported pause trigger - Setting\nNITCLK_ATTR_PAUSE_TRIGGER_MASTER_SESSION to the session that is\nexporting the trigger for all sessions that support script triggers If\nthe pause triggers are configured for all generation sessions that\nsupport pause triggers, niTClk_ConfigureForHomogeneousTriggers does not\naffect pause triggers. Pause triggers are considered to be configured\nfor all sessions if either one or the other of the following conditions\nis true: - No session has a pause trigger that is None - One session has\na pause trigger that is None and all other sessions have pause triggers\nother than None. The one session with the None trigger must have\nNITCLK_ATTR_PAUSE_TRIGGER_MASTER_SESSION set to itself, indicating\nthat the session itself is the pause trigger master Note: TClk\nsynchronization is not supported for pause triggers on acquisition\nsessions.\n'
},
'parameters': [
{
'direction': 'in',
'documentation': {
'description': 'Number of elements in the sessions array'
},
'name': 'sessionCount',
'type': 'ViUInt32'
},
{
'direction': 'in',
'documentation': {
'description': 'sessions is an array of sessions that are being synchronized.'
},
'is_session_handle': False,
'name': 'sessions',
'python_api_converter_name': 'convert_to_nitclk_session_number_list',
'size': {
'mechanism': 'len',
'value': 'sessionCount'
},
'type': 'ViSession[]',
'type_in_documentation': 'list of instrument-specific sessions or nitclk.SessionReference instances'
}
],
'returns': 'ViStatus'
},
'FinishSyncPulseSenderSynchronize': {
'documentation': {
'description': 'Finishes synchronizing the Sync Pulse Sender.'
},
'parameters': [
{
'direction': 'in',
'documentation': {
'description': 'Number of elements in the sessions array'
},
'name': 'sessionCount',
'type': 'ViUInt32'
},
{
'direction': 'in',
'documentation': {
'description': 'sessions is an array of sessions that are being synchronized.'
},
'is_session_handle': False,
'name': 'sessions',
'python_api_converter_name': 'convert_to_nitclk_session_number_list',
'size': {
'mechanism': 'len',
'value': 'sessionCount'
},
'type': 'ViSession[]',
'type_in_documentation': 'list of instrument-specific sessions or nitclk.SessionReference instances'
},
{
'default_value': 'hightime.timedelta(seconds=0.0)',
'direction': 'in',
'documentation': {
'description': '\nMinimal period of TClk, expressed in seconds. Supported values are\nbetween 0.0 s and 0.050 s (50 ms). Minimal period for a single\nchassis/PC is 200 ns. If the specified value is less than 200 ns,\nNI-TClk automatically coerces minTime to 200 ns. For multichassis\nsynchronization, adjust this value to account for propagation delays\nthrough the various devices and cables.\n'
},
'name': 'minTime',
'python_api_converter_name': 'convert_timedelta_to_seconds_real64',
'type': 'ViReal64',
'type_in_documentation': 'hightime.timedelta, datetime.timedelta, or float in seconds'
}
],
'returns': 'ViStatus'
},
'GetAttributeViReal64': {
'codegen_method': 'private',
'documentation': {
'description': 'Gets the value of an NI-TClk ViReal64 attribute.'
},
'parameters': [
{
'direction': 'in',
'documentation': {
'description': 'session references the sessions being synchronized.'
},
'name': 'session',
'type': 'ViSession'
},
{
'direction': 'in',
'documentation': {
'description': 'Pass VI_NULL or an empty string'
},
'name': 'channelName',
'type': 'ViConstString'
},
{
'direction': 'in',
'documentation': {
'description': '\nThe ID of the attribute that you want to get Supported Attribute\nNITCLK_ATTR_SAMPLE_CLOCK_DELAY\n'
},
'name': 'attributeId',
'type': 'ViAttr'
},
{
'direction': 'out',
'documentation': {
'description': 'The value that you are getting'
},
'name': 'value',
'type': 'ViReal64'
}
],
'returns': 'ViStatus'
},
'GetAttributeViSession': {
'codegen_method': 'private',
'documentation': {
'description': 'Gets the value of an NI-TClk ViSession attribute.'
},
'parameters': [
{
'direction': 'in',
'documentation': {
'description': 'session references the sessions being synchronized.'
},
'name': 'session',
'type': 'ViSession'
},
{
'direction': 'in',
'documentation': {
'description': 'Pass VI_NULL or an empty string'
},
'name': 'channelName',
'type': 'ViConstString'
},
{
'direction': 'in',
'documentation': {
'description': '\nThe ID of the attribute that you want to set Supported Attributes\nNITCLK_ATTR_START_TRIGGER_MASTER_SESSION\nNITCLK_ATTR_REF_TRIGGER_MASTER_SESSION\nNITCLK_ATTR_PAUSE_TRIGGER_MASTER_SESSION\n'
},
'name': 'attributeId',
'type': 'ViAttr'
},
{
'direction': 'out',
'documentation': {
'description': 'The value that you are getting'
},
'is_session_handle': False,
'name': 'value',
'type': 'ViSession'
}
],
'returns': 'ViStatus'
},
'GetAttributeViString': {
'codegen_method': 'private',
'documentation': {
'description': '\nThis function queries the value of an NI-TClk ViString attribute. You\nmust provide a ViChar array to serve as a buffer for the value. You pass\nthe number of bytes in the buffer as bufSize. If the current value of\nthe attribute, including the terminating NULL byte, is larger than the\nsize you indicate in bufSize, the function copies bufSize minus 1 bytes\ninto the buffer, places an ASCII NULL byte at the end of the buffer, and\nreturns the array size that you must pass to get the entire value. For\nexample, if the value is "123456" and bufSize is 4, the function places\n"123" into the buffer and returns 7. If you want to call\nniTClk_GetAttributeViString just to get the required array size, pass 0\nfor bufSize and VI_NULL for the value.\n'
},
'parameters': [
{
'direction': 'in',
'documentation': {
'description': 'session references the sessions being synchronized.'
},
'name': 'session',
'type': 'ViSession'
},
{
'direction': 'in',
'documentation': {
'description': 'Pass VI_NULL or an empty string'
},
'name': 'channelName',
'type': 'ViConstString'
},
{
'direction': 'in',
'documentation': {
'description': '\nThe ID of the attribute that you want to get Supported Attributes\nNITCLK_ATTR_SYNC_PULSE_SOURCE\nNITCLK_ATTR_SYNC_PULSE_CLOCK_SOURCE\nNITCLK_ATTR_EXPORTED_SYNC_PULSE_OUTPUT_TERMINAL\n'
},
'name': 'attributeId',
'type': 'ViAttr'
},
{
'direction': 'in',
'documentation': {
'description': '\nThe number of bytes in the ViChar array that you specify for the value\nparameter\n'
},
'name': 'bufSize',
'type': 'ViInt32'
},
{
'direction': 'out',
'documentation': {
'description': 'The value that you are getting'
},
'name': 'value',
'size': {
'mechanism': 'ivi-dance',
'value': 'bufSize'
},
'type': 'ViChar[]'
}
],
'returns': 'ViStatus'
},
'GetExtendedErrorInfo': {
'codegen_method': 'private',
'documentation': {
'description': '\nReports extended error information for the most recent NI-TClk function\nthat returned an error. To establish the function that returned an\nerror, use the return values of the individual functions because once\nniTClk_GetExtendedErrorInfo reports an errorString, it does not report\nan empty string again.\n'
},
'is_error_handling': True,
'parameters': [
{
'direction': 'out',
'documentation': {
'description': '\nExtended error description. If errorString is NULL, then it is not large\nenough to hold the entire error description. In this case, the return\nvalue of niTClk_GetExtendedErrorInfo is the size that you should use\nfor niTClk_GetExtendedErrorInfo to return the full error string.\n'
},
'name': 'errorString',
'size': {
'mechanism': 'ivi-dance',
'value': 'errorStringSize'
},
'type': 'ViChar[]'
},
{
'direction': 'in',
'documentation': {
'description': '\nSize of the errorString. If errorStringSize is 0, then it is not large\nenough to hold the entire error description. In this case, the return\nvalue of niTClk_GetExtendedErrorInfo is the size that you should use\nfor niTClk_GetExtendedErrorInfo to return the full error string.\n'
},
'name': 'errorStringSize',
'type': 'ViUInt32'
}
],
'returns': 'ViStatus'
},
'Initiate': {
'documentation': {
'description': '\nInitiates the acquisition or generation sessions specified, taking into\nconsideration any special requirements needed for synchronization. For\nexample, the session exporting the TClk-synchronized start trigger is\nnot initiated until after niTClk_Initiate initiates all the sessions\nthat import the TClk-synchronized start trigger.\n'
},
'parameters': [
{
'direction': 'in',
'documentation': {
'description': 'Number of elements in the sessions array'
},
'name': 'sessionCount',
'type': 'ViUInt32'
},
{
'direction': 'in',
'documentation': {
'description': 'sessions is an array of sessions that are being synchronized.'
},
'is_session_handle': False,
'name': 'sessions',
'python_api_converter_name': 'convert_to_nitclk_session_number_list',
'size': {
'mechanism': 'len',
'value': 'sessionCount'
},
'type': 'ViSession[]',
'type_in_documentation': 'list of instrument-specific sessions or nitclk.SessionReference instances'
}
],
'returns': 'ViStatus'
},
'IsDone': {
'documentation': {
'description': '\nMonitors the progress of the acquisitions and/or generations\ncorresponding to sessions.\n'
},
'parameters': [
{
'direction': 'in',
'documentation': {
'description': 'Number of elements in the sessions array'
},
'name': 'sessionCount',
'type': 'ViUInt32'
},
{
'direction': 'in',
'documentation': {
'description': 'sessions is an array of sessions that are being synchronized.'
},
'is_session_handle': False,
'name': 'sessions',
'python_api_converter_name': 'convert_to_nitclk_session_number_list',
'size': {
'mechanism': 'len',
'value': 'sessionCount'
},
'type': 'ViSession[]',
'type_in_documentation': 'list of instrument-specific sessions or nitclk.SessionReference instances'
},
{
'direction': 'out',
'documentation': {
'description': '\nIndicates that the operation is done. The operation is done when each\nsession has completed without any errors or when any one of the sessions\nreports an error.\n'
},
'name': 'done',
'type': 'ViBoolean'
}
],
'returns': 'ViStatus'
},
'SetAttributeViReal64': {
'codegen_method': 'private',
'documentation': {
'description': '\nSets the value of an NI-TClk VIReal64 attribute.\nniTClk_SetAttributeViReal64 is a low-level function that you can use to\nset the values NI-TClk attributes. NI-TClk contains high-level functions\nthat set most of the attributes. It is best to use the high-level\nfunctions as much as possible.\n'
},
'parameters': [
{
'direction': 'in',
'documentation': {
'description': 'session references the sessions being synchronized.'
},
'name': 'session',
'type': 'ViSession'
},
{
'direction': 'in',
'documentation': {
'description': 'Pass VI_NULL or an empty string'
},
'name': 'channelName',
'type': 'ViConstString'
},
{
'direction': 'in',
'documentation': {
'description': '\nThe ID of the attribute that you want to set Supported Attribute\nNITCLK_ATTR_SAMPLE_CLOCK_DELAY\n'
},
'name': 'attributeId',
'type': 'ViAttr'
},
{
'direction': 'in',
'documentation': {
'description': 'The value for the attribute'
},
'name': 'value',
'type': 'ViReal64'
}
],
'returns': 'ViStatus'
},
'SetAttributeViSession': {
'codegen_method': 'private',
'documentation': {
'description': '\nSets the value of an NI-TClk ViSession attribute.\nniTClk_SetAttributeViSession is a low-level function that you can use\nto set the values NI-TClk attributes. NI-TClk contains high-level\nfunctions that set most of the attributes. It is best to use the\nhigh-level functions as much as possible.\n'
},
'parameters': [
{
'direction': 'in',
'documentation': {
'description': 'session references the sessions being synchronized.'
},
'name': 'session',
'type': 'ViSession'
},
{
'direction': 'in',
'documentation': {
'description': '\nPass VI_NULL or an empty string'
},
'name': 'channelName',
'type': 'ViConstString'
},
{
'direction': 'in',
'documentation': {
'description': '\nThe ID of the attribute that you want to set Supported Attributes\nNITCLK_ATTR_START_TRIGGER_MASTER_SESSION\nNITCLK_ATTR_REF_TRIGGER_MASTER_SESSION\nNITCLK_ATTR_PAUSE_TRIGGER_MASTER_SESSION\n'
},
'name': 'attributeId',
'type': 'ViAttr'
},
{
'direction': 'in',
'documentation': {
'description': 'The value for the attribute'
},
'is_session_handle': False,
'name': 'value',
'type': 'ViSession'
}
],
'returns': 'ViStatus'
},
'SetAttributeViString': {
'codegen_method': 'private',
'documentation': {
'description': '\nSets the value of an NI-TClk VIString attribute.\nniTClk_SetAttributeViString is a low-level function that you can use to\nset the values of NI-TClk attributes. NI-TClk contain high-level\nfunctions that set most of the attributes. It is best to use the\nhigh-level functions as much as possible.\n'
},
'parameters': [
{
'direction': 'in',
'documentation': {
'description': 'session references the sessions being synchronized.'
},
'name': 'session',
'type': 'ViSession'
},
{
'direction': 'in',
'documentation': {
'description': 'Pass VI_NULL or an empty string'
},
'name': 'channelName',
'type': 'ViConstString'
},
{
'direction': 'in',
'documentation': {
'description': '\nPass the ID of the attribute that you want to set Supported Attributes\nNITCLK_ATTR_SYNC_PULSE_SOURCE\nNITCLK_ATTR_SYNC_PULSE_CLOCK_SOURCE\nNITCLK_ATTR_EXPORTED_SYNC_PULSE_OUTPUT_TERMINAL\n'
},
'name': 'attributeId',
'type': 'ViAttr'
},
{
'direction': 'in',
'documentation': {
'description': 'Pass the value for the attribute'
},
'name': 'value',
'type': 'ViConstString'
}
],
'returns': 'ViStatus'
},
'SetupForSyncPulseSenderSynchronize': {
'documentation': {
'description': 'Configures the TClks on all the devices and prepares the Sync Pulse Sender for synchronization'
},
'parameters': [
{
'direction': 'in',
'documentation': {
'description': 'Number of elements in the sessions array'
},
'name': 'sessionCount',
'type': 'ViUInt32'
},
{
'direction': 'in',
'documentation': {
'description': 'sessions is an array of sessions that are being synchronized.'
},
'is_session_handle': False,
'name': 'sessions',
'python_api_converter_name': 'convert_to_nitclk_session_number_list',
'size': {
'mechanism': 'len',
'value': 'sessionCount'
},
'type': 'ViSession[]',
'type_in_documentation': 'list of instrument-specific sessions or nitclk.SessionReference instances'
},
{
'default_value': 'hightime.timedelta(seconds=0.0)',
'direction': 'in',
'documentation': {
'description': '\nMinimal period of TClk, expressed in seconds. Supported values are\nbetween 0.0 s and 0.050 s (50 ms). Minimal period for a single\nchassis/PC is 200 ns. If the specified value is less than 200 ns,\nNI-TClk automatically coerces minTime to 200 ns. For multichassis\nsynchronization, adjust this value to account for propagation delays\nthrough the various devices and cables.\n'
},
'name': 'minTime',
'python_api_converter_name': 'convert_timedelta_to_seconds_real64',
'type': 'ViReal64',
'type_in_documentation': 'hightime.timedelta, datetime.timedelta, or float in seconds'
}
],
'returns': 'ViStatus'
},
'Synchronize': {
'documentation': {
'description': '\nSynchronizes the TClk signals on the given sessions. After\nniTClk_Synchronize executes, TClk signals from all sessions are\nsynchronized. Note: Before using this NI-TClk function, verify that your\nsystem is configured as specified in the PXI Trigger Lines and RTSI\nLines topic of the NI-TClk Synchronization Help. You can locate this\nhelp file at Start>>Programs>>National Instruments>>NI-TClk.\n'
},
'parameters': [
{
'direction': 'in',
'documentation': {
'description': 'Number of elements in the sessions array'
},
'name': 'sessionCount',
'type': 'ViUInt32'
},
{
'direction': 'in',
'documentation': {
'description': 'sessions is an array of sessions that are being synchronized.'
},
'is_session_handle': False,
'name': 'sessions',
'python_api_converter_name': 'convert_to_nitclk_session_number_list',
'size': {
'mechanism': 'len',
'value': 'sessionCount'
},
'type': 'ViSession[]',
'type_in_documentation': 'list of instrument-specific sessions or nitclk.SessionReference instances'
},
{
'default_value': 'hightime.timedelta(seconds=0.0)',
'direction': 'in',
'documentation': {
'description': '\nMinimal period of TClk, expressed in seconds. Supported values are\nbetween 0.0 s and 0.050 s (50 ms). Minimal period for a single\nchassis/PC is 200 ns. If the specified value is less than 200 ns,\nNI-TClk automatically coerces minTime to 200 ns. For multichassis\nsynchronization, adjust this value to account for propagation delays\nthrough the various devices and cables.\n'
},
'name': 'minTclkPeriod',
'python_api_converter_name': 'convert_timedelta_to_seconds_real64',
'type': 'ViReal64',
'type_in_documentation': 'hightime.timedelta, datetime.timedelta, or float in seconds'
}
],
'returns': 'ViStatus'
},
'SynchronizeToSyncPulseSender': {
'documentation': {
'description': 'Synchronizes the other devices to the Sync Pulse Sender.'
},
'parameters': [
{
'direction': 'in',
'documentation': {
'description': 'Number of elements in the sessions array'
},
'name': 'sessionCount',
'type': 'ViUInt32'
},
{
'direction': 'in',
'documentation': {
'description': 'sessions is an array of sessions that are being synchronized.'
},
'is_session_handle': False,
'name': 'sessions',
'python_api_converter_name': 'convert_to_nitclk_session_number_list',
'size': {
'mechanism': 'len',
'value': 'sessionCount'
},
'type': 'ViSession[]',
'type_in_documentation': 'list of instrument-specific sessions or nitclk.SessionReference instances'
},
{
'default_value': 'hightime.timedelta(seconds=0.0)',
'direction': 'in',
'documentation': {
'description': '\nMinimal period of TClk, expressed in seconds. Supported values are\nbetween 0.0 s and 0.050 s (50 ms). Minimal period for a single\nchassis/PC is 200 ns. If the specified value is less than 200 ns,\nNI-TClk automatically coerces minTime to 200 ns. For multichassis\nsynchronization, adjust this value to account for propagation delays\nthrough the various devices and cables.\n'
},
'name': 'minTime',
'python_api_converter_name': 'convert_timedelta_to_seconds_real64',
'type': 'ViReal64',
'type_in_documentation': 'hightime.timedelta, datetime.timedelta, or float in seconds'
}
],
'returns': 'ViStatus'
},
'WaitUntilDone': {
'documentation': {
'description': '\nCall this function to pause execution of your program until the\nacquisitions and/or generations corresponding to sessions are done or\nuntil the function returns a timeout error. niTClk_WaitUntilDone is a\nblocking function that periodically checks the operation status. It\nreturns control to the calling program if the operation completes\nsuccessfully or an error occurs (including a timeout error). This\nfunction is most useful for finite data operations that you expect to\ncomplete within a certain time.\n'
},
'parameters': [
{
'direction': 'in',
'documentation': {
'description': 'Number of elements in the sessions array'
},
'name': 'sessionCount',
'type': 'ViUInt32'
},
{
'direction': 'in',
'documentation': {
'description': 'sessions is an array of sessions that are being synchronized.'
},
'is_session_handle': False,
'name': 'sessions',
'python_api_converter_name': 'convert_to_nitclk_session_number_list',
'size': {
'mechanism': 'len',
'value': 'sessionCount'
},
'type': 'ViSession[]',
'type_in_documentation': 'list of instrument-specific sessions or nitclk.SessionReference instances'
},
{
'default_value': 'hightime.timedelta(seconds=0.0)',
'direction': 'in',
'documentation': {
'description': '\nThe amount of time in seconds that niTClk_WaitUntilDone waits for the\nsessions to complete. If timeout is exceeded, niTClk_WaitUntilDone\nreturns an error.\n'
},
'name': 'timeout',
'python_api_converter_name': 'convert_timedelta_to_seconds_real64',
'type': 'ViReal64',
'type_in_documentation': 'hightime.timedelta, datetime.timedelta, or float in seconds'
}
],
'returns': 'ViStatus'
}
}
| functions = {'ConfigureForHomogeneousTriggers': {'documentation': {'description': '\nConfigures the attributes commonly required for the TClk synchronization\nof device sessions with homogeneous triggers in a single PXI chassis or\na single PC. Use niTClk_ConfigureForHomogeneousTriggers to configure\nthe attributes for the reference clocks, start triggers, reference\ntriggers, script triggers, and pause triggers. If\nniTClk_ConfigureForHomogeneousTriggers cannot perform all the steps\nappropriate for the given sessions, it returns an error. If an error is\nreturned, use the instrument driver functions and attributes for signal\nrouting, along with the following NI-TClk attributes:\nNITCLK_ATTR_START_TRIGGER_MASTER_SESSION\nNITCLK_ATTR_REF_TRIGGER_MASTER_SESSION\nNITCLK_ATTR_PAUSE_TRIGGER_MASTER_SESSION\nniTClk_ConfigureForHomogeneousTriggers affects the following clocks and\ntriggers: - Reference clocks - Start triggers - Reference triggers -\nScript triggers - Pause triggers Reference Clocks\nniTClk_ConfigureForHomogeneousTriggers configures the reference clocks\nif they are needed. Specifically, if the internal sample clocks or\ninternal sample clock timebases are used, and the reference clock source\nis not configured--or is set to None (no trigger\nconfigured)--niTClk_ConfigureForHomogeneousTriggers configures the\nfollowing: PXI--The reference clock source on all devices is set to be\nthe 10 MHz PXI backplane clock (PXI_CLK10). PCI--One of the devices\nexports its 10 MHz onboard reference clock to RTSI 7. The reference\nclock source on all devices is set to be RTSI 7. Note: If the reference\nclock source is set to a value other than None,\nniTClk_ConfigureForHomogeneousTriggers cannot configure the reference\nclock source. Start Triggers If the start trigger is set to None (no\ntrigger configured) for all sessions, the sessions are configured to\nshare the start trigger. The start trigger is shared by: - Implicitly\nexporting the start trigger from one session - Configuring the other\nsessions for digital edge start triggers with sources corresponding to\nthe exported start trigger - Setting\nNITCLK_ATTR_START_TRIGGER_MASTER_SESSION to the session that is\nexporting the trigger for all sessions If the start triggers are None\nfor all except one session, niTClk_ConfigureForHomogeneousTriggers\nconfigures the sessions to share the start trigger from the one excepted\nsession. The start trigger is shared by: - Implicitly exporting start\ntrigger from the session with the start trigger that is not None -\nConfiguring the other sessions for digital-edge start triggers with\nsources corresponding to the exported start trigger - Setting\nNITCLK_ATTR_START_TRIGGER_MASTER_SESSION to the session that is\nexporting the trigger for all sessions If start triggers are configured\nfor all sessions, niTClk_ConfigureForHomogeneousTriggers does not\naffect the start triggers. Start triggers are considered to be\nconfigured for all sessions if either of the following conditions is\ntrue: - No session has a start trigger that is None - One session has a\nstart trigger that is None, and all other sessions have start triggers\nother than None. The one session with the None trigger must have\nNITCLK_ATTR_START_TRIGGER_MASTER_SESSION set to itself, indicating\nthat the session itself is the start trigger master Reference Triggers\nniTClk_ConfigureForHomogeneousTriggers configures sessions that support\nreference triggers to share the reference triggers if the reference\ntriggers are None (no trigger configured) for all except one session.\nThe reference triggers are shared by: - Implicitly exporting the\nreference trigger from the session whose reference trigger is not None -\nConfiguring the other sessions that support the reference trigger for\ndigital-edge reference triggers with sources corresponding to the\nexported reference trigger - Setting\nNITCLK_ATTR_REF_TRIGGER_MASTER_SESSION to the session that is\nexporting the trigger for all sessions that support reference trigger If\nthe reference triggers are configured for all sessions that support\nreference triggers, niTClk_ConfigureForHomogeneousTriggers does not\naffect the reference triggers. Reference triggers are considered to be\nconfigured for all sessions if either one or the other of the following\nconditions is true: - No session has a reference trigger that is None -\nOne session has a reference trigger that is None, and all other sessions\nhave reference triggers other than None. The one session with the None\ntrigger must have NITCLK_ATTR_REF_TRIGGER_MASTER_SESSION set to\nitself, indicating that the session itself is the reference trigger\nmaster Reference Trigger Holdoffs Acquisition sessions may be configured\nwith the reference trigger. For acquisition sessions, when the reference\ntrigger is shared, niTClk_ConfigureForHomogeneousTriggers configures\nthe holdoff attributes (which are instrument driver specific) on the\nreference trigger master session so that the session does not recognize\nthe reference trigger before the other sessions are ready. This\ncondition is only relevant when the sample clock rates, sample clock\ntimebase rates, sample counts, holdoffs, and/or any delays for the\nacquisitions are different. When the sample clock rates, sample clock\ntimebase rates, and/or the sample counts are different in acquisition\nsessions sharing the reference trigger, you should also set the holdoff\nattributes for the reference trigger master using the instrument driver.\nPause Triggers\nniTClk_ConfigureForHomogeneousTriggers configures generation sessions\nthat support pause triggers to share them, if the pause triggers are\nNone (no trigger configured) for all except one session. The pause\ntriggers are shared by: - Implicitly exporting the pause trigger from\nthe session whose script trigger is not None - Configuring the other\nsessions that support the pause trigger for digital-edge pause triggers\nwith sources corresponding to the exported pause trigger - Setting\nNITCLK_ATTR_PAUSE_TRIGGER_MASTER_SESSION to the session that is\nexporting the trigger for all sessions that support script triggers If\nthe pause triggers are configured for all generation sessions that\nsupport pause triggers, niTClk_ConfigureForHomogeneousTriggers does not\naffect pause triggers. Pause triggers are considered to be configured\nfor all sessions if either one or the other of the following conditions\nis true: - No session has a pause trigger that is None - One session has\na pause trigger that is None and all other sessions have pause triggers\nother than None. The one session with the None trigger must have\nNITCLK_ATTR_PAUSE_TRIGGER_MASTER_SESSION set to itself, indicating\nthat the session itself is the pause trigger master Note: TClk\nsynchronization is not supported for pause triggers on acquisition\nsessions.\n'}, 'parameters': [{'direction': 'in', 'documentation': {'description': 'Number of elements in the sessions array'}, 'name': 'sessionCount', 'type': 'ViUInt32'}, {'direction': 'in', 'documentation': {'description': 'sessions is an array of sessions that are being synchronized.'}, 'is_session_handle': False, 'name': 'sessions', 'python_api_converter_name': 'convert_to_nitclk_session_number_list', 'size': {'mechanism': 'len', 'value': 'sessionCount'}, 'type': 'ViSession[]', 'type_in_documentation': 'list of instrument-specific sessions or nitclk.SessionReference instances'}], 'returns': 'ViStatus'}, 'FinishSyncPulseSenderSynchronize': {'documentation': {'description': 'Finishes synchronizing the Sync Pulse Sender.'}, 'parameters': [{'direction': 'in', 'documentation': {'description': 'Number of elements in the sessions array'}, 'name': 'sessionCount', 'type': 'ViUInt32'}, {'direction': 'in', 'documentation': {'description': 'sessions is an array of sessions that are being synchronized.'}, 'is_session_handle': False, 'name': 'sessions', 'python_api_converter_name': 'convert_to_nitclk_session_number_list', 'size': {'mechanism': 'len', 'value': 'sessionCount'}, 'type': 'ViSession[]', 'type_in_documentation': 'list of instrument-specific sessions or nitclk.SessionReference instances'}, {'default_value': 'hightime.timedelta(seconds=0.0)', 'direction': 'in', 'documentation': {'description': '\nMinimal period of TClk, expressed in seconds. Supported values are\nbetween 0.0 s and 0.050 s (50 ms). Minimal period for a single\nchassis/PC is 200 ns. If the specified value is less than 200 ns,\nNI-TClk automatically coerces minTime to 200 ns. For multichassis\nsynchronization, adjust this value to account for propagation delays\nthrough the various devices and cables.\n'}, 'name': 'minTime', 'python_api_converter_name': 'convert_timedelta_to_seconds_real64', 'type': 'ViReal64', 'type_in_documentation': 'hightime.timedelta, datetime.timedelta, or float in seconds'}], 'returns': 'ViStatus'}, 'GetAttributeViReal64': {'codegen_method': 'private', 'documentation': {'description': 'Gets the value of an NI-TClk ViReal64 attribute.'}, 'parameters': [{'direction': 'in', 'documentation': {'description': 'session references the sessions being synchronized.'}, 'name': 'session', 'type': 'ViSession'}, {'direction': 'in', 'documentation': {'description': 'Pass VI_NULL or an empty string'}, 'name': 'channelName', 'type': 'ViConstString'}, {'direction': 'in', 'documentation': {'description': '\nThe ID of the attribute that you want to get Supported Attribute\nNITCLK_ATTR_SAMPLE_CLOCK_DELAY\n'}, 'name': 'attributeId', 'type': 'ViAttr'}, {'direction': 'out', 'documentation': {'description': 'The value that you are getting'}, 'name': 'value', 'type': 'ViReal64'}], 'returns': 'ViStatus'}, 'GetAttributeViSession': {'codegen_method': 'private', 'documentation': {'description': 'Gets the value of an NI-TClk ViSession attribute.'}, 'parameters': [{'direction': 'in', 'documentation': {'description': 'session references the sessions being synchronized.'}, 'name': 'session', 'type': 'ViSession'}, {'direction': 'in', 'documentation': {'description': 'Pass VI_NULL or an empty string'}, 'name': 'channelName', 'type': 'ViConstString'}, {'direction': 'in', 'documentation': {'description': '\nThe ID of the attribute that you want to set Supported Attributes\nNITCLK_ATTR_START_TRIGGER_MASTER_SESSION\nNITCLK_ATTR_REF_TRIGGER_MASTER_SESSION\nNITCLK_ATTR_PAUSE_TRIGGER_MASTER_SESSION\n'}, 'name': 'attributeId', 'type': 'ViAttr'}, {'direction': 'out', 'documentation': {'description': 'The value that you are getting'}, 'is_session_handle': False, 'name': 'value', 'type': 'ViSession'}], 'returns': 'ViStatus'}, 'GetAttributeViString': {'codegen_method': 'private', 'documentation': {'description': '\nThis function queries the value of an NI-TClk ViString attribute. You\nmust provide a ViChar array to serve as a buffer for the value. You pass\nthe number of bytes in the buffer as bufSize. If the current value of\nthe attribute, including the terminating NULL byte, is larger than the\nsize you indicate in bufSize, the function copies bufSize minus 1 bytes\ninto the buffer, places an ASCII NULL byte at the end of the buffer, and\nreturns the array size that you must pass to get the entire value. For\nexample, if the value is "123456" and bufSize is 4, the function places\n"123" into the buffer and returns 7. If you want to call\nniTClk_GetAttributeViString just to get the required array size, pass 0\nfor bufSize and VI_NULL for the value.\n'}, 'parameters': [{'direction': 'in', 'documentation': {'description': 'session references the sessions being synchronized.'}, 'name': 'session', 'type': 'ViSession'}, {'direction': 'in', 'documentation': {'description': 'Pass VI_NULL or an empty string'}, 'name': 'channelName', 'type': 'ViConstString'}, {'direction': 'in', 'documentation': {'description': '\nThe ID of the attribute that you want to get Supported Attributes\nNITCLK_ATTR_SYNC_PULSE_SOURCE\nNITCLK_ATTR_SYNC_PULSE_CLOCK_SOURCE\nNITCLK_ATTR_EXPORTED_SYNC_PULSE_OUTPUT_TERMINAL\n'}, 'name': 'attributeId', 'type': 'ViAttr'}, {'direction': 'in', 'documentation': {'description': '\nThe number of bytes in the ViChar array that you specify for the value\nparameter\n'}, 'name': 'bufSize', 'type': 'ViInt32'}, {'direction': 'out', 'documentation': {'description': 'The value that you are getting'}, 'name': 'value', 'size': {'mechanism': 'ivi-dance', 'value': 'bufSize'}, 'type': 'ViChar[]'}], 'returns': 'ViStatus'}, 'GetExtendedErrorInfo': {'codegen_method': 'private', 'documentation': {'description': '\nReports extended error information for the most recent NI-TClk function\nthat returned an error. To establish the function that returned an\nerror, use the return values of the individual functions because once\nniTClk_GetExtendedErrorInfo reports an errorString, it does not report\nan empty string again.\n'}, 'is_error_handling': True, 'parameters': [{'direction': 'out', 'documentation': {'description': '\nExtended error description. If errorString is NULL, then it is not large\nenough to hold the entire error description. In this case, the return\nvalue of niTClk_GetExtendedErrorInfo is the size that you should use\nfor niTClk_GetExtendedErrorInfo to return the full error string.\n'}, 'name': 'errorString', 'size': {'mechanism': 'ivi-dance', 'value': 'errorStringSize'}, 'type': 'ViChar[]'}, {'direction': 'in', 'documentation': {'description': '\nSize of the errorString. If errorStringSize is 0, then it is not large\nenough to hold the entire error description. In this case, the return\nvalue of niTClk_GetExtendedErrorInfo is the size that you should use\nfor niTClk_GetExtendedErrorInfo to return the full error string.\n'}, 'name': 'errorStringSize', 'type': 'ViUInt32'}], 'returns': 'ViStatus'}, 'Initiate': {'documentation': {'description': '\nInitiates the acquisition or generation sessions specified, taking into\nconsideration any special requirements needed for synchronization. For\nexample, the session exporting the TClk-synchronized start trigger is\nnot initiated until after niTClk_Initiate initiates all the sessions\nthat import the TClk-synchronized start trigger.\n'}, 'parameters': [{'direction': 'in', 'documentation': {'description': 'Number of elements in the sessions array'}, 'name': 'sessionCount', 'type': 'ViUInt32'}, {'direction': 'in', 'documentation': {'description': 'sessions is an array of sessions that are being synchronized.'}, 'is_session_handle': False, 'name': 'sessions', 'python_api_converter_name': 'convert_to_nitclk_session_number_list', 'size': {'mechanism': 'len', 'value': 'sessionCount'}, 'type': 'ViSession[]', 'type_in_documentation': 'list of instrument-specific sessions or nitclk.SessionReference instances'}], 'returns': 'ViStatus'}, 'IsDone': {'documentation': {'description': '\nMonitors the progress of the acquisitions and/or generations\ncorresponding to sessions.\n'}, 'parameters': [{'direction': 'in', 'documentation': {'description': 'Number of elements in the sessions array'}, 'name': 'sessionCount', 'type': 'ViUInt32'}, {'direction': 'in', 'documentation': {'description': 'sessions is an array of sessions that are being synchronized.'}, 'is_session_handle': False, 'name': 'sessions', 'python_api_converter_name': 'convert_to_nitclk_session_number_list', 'size': {'mechanism': 'len', 'value': 'sessionCount'}, 'type': 'ViSession[]', 'type_in_documentation': 'list of instrument-specific sessions or nitclk.SessionReference instances'}, {'direction': 'out', 'documentation': {'description': '\nIndicates that the operation is done. The operation is done when each\nsession has completed without any errors or when any one of the sessions\nreports an error.\n'}, 'name': 'done', 'type': 'ViBoolean'}], 'returns': 'ViStatus'}, 'SetAttributeViReal64': {'codegen_method': 'private', 'documentation': {'description': '\nSets the value of an NI-TClk VIReal64 attribute.\nniTClk_SetAttributeViReal64 is a low-level function that you can use to\nset the values NI-TClk attributes. NI-TClk contains high-level functions\nthat set most of the attributes. It is best to use the high-level\nfunctions as much as possible.\n'}, 'parameters': [{'direction': 'in', 'documentation': {'description': 'session references the sessions being synchronized.'}, 'name': 'session', 'type': 'ViSession'}, {'direction': 'in', 'documentation': {'description': 'Pass VI_NULL or an empty string'}, 'name': 'channelName', 'type': 'ViConstString'}, {'direction': 'in', 'documentation': {'description': '\nThe ID of the attribute that you want to set Supported Attribute\nNITCLK_ATTR_SAMPLE_CLOCK_DELAY\n'}, 'name': 'attributeId', 'type': 'ViAttr'}, {'direction': 'in', 'documentation': {'description': 'The value for the attribute'}, 'name': 'value', 'type': 'ViReal64'}], 'returns': 'ViStatus'}, 'SetAttributeViSession': {'codegen_method': 'private', 'documentation': {'description': '\nSets the value of an NI-TClk ViSession attribute.\nniTClk_SetAttributeViSession is a low-level function that you can use\nto set the values NI-TClk attributes. NI-TClk contains high-level\nfunctions that set most of the attributes. It is best to use the\nhigh-level functions as much as possible.\n'}, 'parameters': [{'direction': 'in', 'documentation': {'description': 'session references the sessions being synchronized.'}, 'name': 'session', 'type': 'ViSession'}, {'direction': 'in', 'documentation': {'description': '\nPass VI_NULL or an empty string'}, 'name': 'channelName', 'type': 'ViConstString'}, {'direction': 'in', 'documentation': {'description': '\nThe ID of the attribute that you want to set Supported Attributes\nNITCLK_ATTR_START_TRIGGER_MASTER_SESSION\nNITCLK_ATTR_REF_TRIGGER_MASTER_SESSION\nNITCLK_ATTR_PAUSE_TRIGGER_MASTER_SESSION\n'}, 'name': 'attributeId', 'type': 'ViAttr'}, {'direction': 'in', 'documentation': {'description': 'The value for the attribute'}, 'is_session_handle': False, 'name': 'value', 'type': 'ViSession'}], 'returns': 'ViStatus'}, 'SetAttributeViString': {'codegen_method': 'private', 'documentation': {'description': '\nSets the value of an NI-TClk VIString attribute.\nniTClk_SetAttributeViString is a low-level function that you can use to\nset the values of NI-TClk attributes. NI-TClk contain high-level\nfunctions that set most of the attributes. It is best to use the\nhigh-level functions as much as possible.\n'}, 'parameters': [{'direction': 'in', 'documentation': {'description': 'session references the sessions being synchronized.'}, 'name': 'session', 'type': 'ViSession'}, {'direction': 'in', 'documentation': {'description': 'Pass VI_NULL or an empty string'}, 'name': 'channelName', 'type': 'ViConstString'}, {'direction': 'in', 'documentation': {'description': '\nPass the ID of the attribute that you want to set Supported Attributes\nNITCLK_ATTR_SYNC_PULSE_SOURCE\nNITCLK_ATTR_SYNC_PULSE_CLOCK_SOURCE\nNITCLK_ATTR_EXPORTED_SYNC_PULSE_OUTPUT_TERMINAL\n'}, 'name': 'attributeId', 'type': 'ViAttr'}, {'direction': 'in', 'documentation': {'description': 'Pass the value for the attribute'}, 'name': 'value', 'type': 'ViConstString'}], 'returns': 'ViStatus'}, 'SetupForSyncPulseSenderSynchronize': {'documentation': {'description': 'Configures the TClks on all the devices and prepares the Sync Pulse Sender for synchronization'}, 'parameters': [{'direction': 'in', 'documentation': {'description': 'Number of elements in the sessions array'}, 'name': 'sessionCount', 'type': 'ViUInt32'}, {'direction': 'in', 'documentation': {'description': 'sessions is an array of sessions that are being synchronized.'}, 'is_session_handle': False, 'name': 'sessions', 'python_api_converter_name': 'convert_to_nitclk_session_number_list', 'size': {'mechanism': 'len', 'value': 'sessionCount'}, 'type': 'ViSession[]', 'type_in_documentation': 'list of instrument-specific sessions or nitclk.SessionReference instances'}, {'default_value': 'hightime.timedelta(seconds=0.0)', 'direction': 'in', 'documentation': {'description': '\nMinimal period of TClk, expressed in seconds. Supported values are\nbetween 0.0 s and 0.050 s (50 ms). Minimal period for a single\nchassis/PC is 200 ns. If the specified value is less than 200 ns,\nNI-TClk automatically coerces minTime to 200 ns. For multichassis\nsynchronization, adjust this value to account for propagation delays\nthrough the various devices and cables.\n'}, 'name': 'minTime', 'python_api_converter_name': 'convert_timedelta_to_seconds_real64', 'type': 'ViReal64', 'type_in_documentation': 'hightime.timedelta, datetime.timedelta, or float in seconds'}], 'returns': 'ViStatus'}, 'Synchronize': {'documentation': {'description': '\nSynchronizes the TClk signals on the given sessions. After\nniTClk_Synchronize executes, TClk signals from all sessions are\nsynchronized. Note: Before using this NI-TClk function, verify that your\nsystem is configured as specified in the PXI Trigger Lines and RTSI\nLines topic of the NI-TClk Synchronization Help. You can locate this\nhelp file at Start>>Programs>>National Instruments>>NI-TClk.\n'}, 'parameters': [{'direction': 'in', 'documentation': {'description': 'Number of elements in the sessions array'}, 'name': 'sessionCount', 'type': 'ViUInt32'}, {'direction': 'in', 'documentation': {'description': 'sessions is an array of sessions that are being synchronized.'}, 'is_session_handle': False, 'name': 'sessions', 'python_api_converter_name': 'convert_to_nitclk_session_number_list', 'size': {'mechanism': 'len', 'value': 'sessionCount'}, 'type': 'ViSession[]', 'type_in_documentation': 'list of instrument-specific sessions or nitclk.SessionReference instances'}, {'default_value': 'hightime.timedelta(seconds=0.0)', 'direction': 'in', 'documentation': {'description': '\nMinimal period of TClk, expressed in seconds. Supported values are\nbetween 0.0 s and 0.050 s (50 ms). Minimal period for a single\nchassis/PC is 200 ns. If the specified value is less than 200 ns,\nNI-TClk automatically coerces minTime to 200 ns. For multichassis\nsynchronization, adjust this value to account for propagation delays\nthrough the various devices and cables.\n'}, 'name': 'minTclkPeriod', 'python_api_converter_name': 'convert_timedelta_to_seconds_real64', 'type': 'ViReal64', 'type_in_documentation': 'hightime.timedelta, datetime.timedelta, or float in seconds'}], 'returns': 'ViStatus'}, 'SynchronizeToSyncPulseSender': {'documentation': {'description': 'Synchronizes the other devices to the Sync Pulse Sender.'}, 'parameters': [{'direction': 'in', 'documentation': {'description': 'Number of elements in the sessions array'}, 'name': 'sessionCount', 'type': 'ViUInt32'}, {'direction': 'in', 'documentation': {'description': 'sessions is an array of sessions that are being synchronized.'}, 'is_session_handle': False, 'name': 'sessions', 'python_api_converter_name': 'convert_to_nitclk_session_number_list', 'size': {'mechanism': 'len', 'value': 'sessionCount'}, 'type': 'ViSession[]', 'type_in_documentation': 'list of instrument-specific sessions or nitclk.SessionReference instances'}, {'default_value': 'hightime.timedelta(seconds=0.0)', 'direction': 'in', 'documentation': {'description': '\nMinimal period of TClk, expressed in seconds. Supported values are\nbetween 0.0 s and 0.050 s (50 ms). Minimal period for a single\nchassis/PC is 200 ns. If the specified value is less than 200 ns,\nNI-TClk automatically coerces minTime to 200 ns. For multichassis\nsynchronization, adjust this value to account for propagation delays\nthrough the various devices and cables.\n'}, 'name': 'minTime', 'python_api_converter_name': 'convert_timedelta_to_seconds_real64', 'type': 'ViReal64', 'type_in_documentation': 'hightime.timedelta, datetime.timedelta, or float in seconds'}], 'returns': 'ViStatus'}, 'WaitUntilDone': {'documentation': {'description': '\nCall this function to pause execution of your program until the\nacquisitions and/or generations corresponding to sessions are done or\nuntil the function returns a timeout error. niTClk_WaitUntilDone is a\nblocking function that periodically checks the operation status. It\nreturns control to the calling program if the operation completes\nsuccessfully or an error occurs (including a timeout error). This\nfunction is most useful for finite data operations that you expect to\ncomplete within a certain time.\n'}, 'parameters': [{'direction': 'in', 'documentation': {'description': 'Number of elements in the sessions array'}, 'name': 'sessionCount', 'type': 'ViUInt32'}, {'direction': 'in', 'documentation': {'description': 'sessions is an array of sessions that are being synchronized.'}, 'is_session_handle': False, 'name': 'sessions', 'python_api_converter_name': 'convert_to_nitclk_session_number_list', 'size': {'mechanism': 'len', 'value': 'sessionCount'}, 'type': 'ViSession[]', 'type_in_documentation': 'list of instrument-specific sessions or nitclk.SessionReference instances'}, {'default_value': 'hightime.timedelta(seconds=0.0)', 'direction': 'in', 'documentation': {'description': '\nThe amount of time in seconds that niTClk_WaitUntilDone waits for the\nsessions to complete. If timeout is exceeded, niTClk_WaitUntilDone\nreturns an error.\n'}, 'name': 'timeout', 'python_api_converter_name': 'convert_timedelta_to_seconds_real64', 'type': 'ViReal64', 'type_in_documentation': 'hightime.timedelta, datetime.timedelta, or float in seconds'}], 'returns': 'ViStatus'}} |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Description:
Given an integer, write a function to determine if it is a power of two.
Tags: Math, Bit Manipulation
O(1) runtime; O(1) space
'''
class Solution(object):
def isPowerOfTwo(self, n):
"""
:type n: int
:rtype: bool
"""
return n > 0 and (n & (n - 1)) == 0
| """
Description:
Given an integer, write a function to determine if it is a power of two.
Tags: Math, Bit Manipulation
O(1) runtime; O(1) space
"""
class Solution(object):
def is_power_of_two(self, n):
"""
:type n: int
:rtype: bool
"""
return n > 0 and n & n - 1 == 0 |
class Lease(object):
"""
A lease.
:ivar id: ID of the lease
:ivar ttl: time to live for this lease
"""
def __init__(self, lease_id, ttl, etcd_client=None):
self.id = lease_id
self.ttl = ttl
self.etcd_client = etcd_client
async def _get_lease_info(self, *, keys=True):
return await self.etcd_client.get_lease_info(self.id, keys=keys)
async def revoke(self):
"""Revoke this lease."""
await self.etcd_client.revoke_lease(self.id)
async def refresh(self):
"""Refresh the time to live for this lease."""
return await self.etcd_client.refresh_lease(self.id)
# @property
async def remaining_ttl(self):
return (await self._get_lease_info(keys=False)).TTL
# @property
async def granted_ttl(self):
return (await self._get_lease_info(keys=False)).grantedTTL
# @property
async def keys(self):
return (await self._get_lease_info()).keys
| class Lease(object):
"""
A lease.
:ivar id: ID of the lease
:ivar ttl: time to live for this lease
"""
def __init__(self, lease_id, ttl, etcd_client=None):
self.id = lease_id
self.ttl = ttl
self.etcd_client = etcd_client
async def _get_lease_info(self, *, keys=True):
return await self.etcd_client.get_lease_info(self.id, keys=keys)
async def revoke(self):
"""Revoke this lease."""
await self.etcd_client.revoke_lease(self.id)
async def refresh(self):
"""Refresh the time to live for this lease."""
return await self.etcd_client.refresh_lease(self.id)
async def remaining_ttl(self):
return (await self._get_lease_info(keys=False)).TTL
async def granted_ttl(self):
return (await self._get_lease_info(keys=False)).grantedTTL
async def keys(self):
return (await self._get_lease_info()).keys |
"""
Given a set of complex values, find their product.
Example
For real = [1, 2] and imag = [1, 3], the output should be
arrayComplexElementsProduct(real, imag) = [-1, 5].
The task is to calculate product of 1 + 1 * i and 2 + 3 * i, so the answer is (1 + 1i) * (2 + 3i) = -1 + 5i.
"""
def arrayComplexElementsProduct(real, imag):
answer = [real[0], imag[0]]
for i in range(1, len(real)):
tmp = answer[0] * real[i] - answer[1] * imag[i]
answer[1] = answer[1] * real[i] + answer[0] * imag[i]
answer[0] = tmp
return answer
| """
Given a set of complex values, find their product.
Example
For real = [1, 2] and imag = [1, 3], the output should be
arrayComplexElementsProduct(real, imag) = [-1, 5].
The task is to calculate product of 1 + 1 * i and 2 + 3 * i, so the answer is (1 + 1i) * (2 + 3i) = -1 + 5i.
"""
def array_complex_elements_product(real, imag):
answer = [real[0], imag[0]]
for i in range(1, len(real)):
tmp = answer[0] * real[i] - answer[1] * imag[i]
answer[1] = answer[1] * real[i] + answer[0] * imag[i]
answer[0] = tmp
return answer |
print(any([True, 1, ""]))
print(all([True, 1, ""]))
print(dict(zip([1, 2, 3], "abc")))
| print(any([True, 1, '']))
print(all([True, 1, '']))
print(dict(zip([1, 2, 3], 'abc'))) |
top = [2,3,4,5,6]
# lst = [1,0,0,4,5]
lst = [1,2,3,4,5]
k = [9,9,0,0,0]
lst1 = [8,3,9,6,4,7,5,2,1]
lst2 = [10,11,12,8,3,9,6,4,7,5,2,1]
lst3 = [8,9,3,6,7,4,5,2,1]
lst4 = [8,3,9,6,4,7,5,2,1]
k = [9,0,0,0,0,0,0,0]
k = k[::-1]
def main():
subtract_1(lst1, k)
def helper1(lst, start):
new_lst = lst[start:]
index_of_zeros = []
index_of_carry = []
for i in range(len(new_lst)):
if new_lst[i] == 0:
index_of_zeros.append(i+ start)
elif new_lst[i] > 0:
index_of_carry.append(i)
index_of_carry = index_of_carry[1] + start
a = []
a.append([index_of_carry])
a.append(index_of_zeros)
return a
def subtract_1(lst, k):
print("top: ", top)
print("lst: ", lst)
print("k: ", k)
subtract_list = []
list_len = len(lst)
next_borrow = 1
for i in range(list_len):
#print("the list of i is: ", i)
#print("36 ",lst[i] - k[i], lst, k[i])
if (lst[i] - k[i]) >= 0:
#print("37 ",lst[i] - k[i], lst, k[i])
subtract_list.append(lst[i] - k[i])
while (lst[i] - k[i]) < 0:
if lst[i+next_borrow] > 0:
lst[i+next_borrow] -= 1
lst[i] = lst[i] + top[i]
if (lst[i] - k[i]) >= 0:
subtract_list.append(lst[i] - k[i])
#print("44 ",lst[i] - k[i], lst, k[i])
#print("43", lst[i] - k[i], lst[i], k[i])
#print("44",lst[i] - k[i], lst[i], k[i])
#problem!!!!!
elif lst[i+next_borrow] == 0:
a = helper1(lst, i)
index_of_carry = a[0][0]
index_of_zeros = a[1][0]
#print(lst[index_of_carry])
#print(lst[index_of_zeros])
temp = lst[:index_of_carry]
print(temp)
#print("value of i at 52: ",i)
#print("57", lst[i] - k[i], lst[i], k[i])
lst[index_of_carry] -= 1
for j in range(len(temp)):
#print("value of i at 54: ", i)
#print("61", lst[j] + top[j], lst, lst[3], j)
lst[j] += top[j]
if (lst[i] - k[i]) > 0:
subtract_list.append(lst[i] - k[i])
#print("value of i at 60: ",i)
#print("61", lst[i] - k[i], lst[i], k[i])
#problem!!!!!
print("subtract_list:", subtract_list)
if __name__ == "__main__":
main()
| top = [2, 3, 4, 5, 6]
lst = [1, 2, 3, 4, 5]
k = [9, 9, 0, 0, 0]
lst1 = [8, 3, 9, 6, 4, 7, 5, 2, 1]
lst2 = [10, 11, 12, 8, 3, 9, 6, 4, 7, 5, 2, 1]
lst3 = [8, 9, 3, 6, 7, 4, 5, 2, 1]
lst4 = [8, 3, 9, 6, 4, 7, 5, 2, 1]
k = [9, 0, 0, 0, 0, 0, 0, 0]
k = k[::-1]
def main():
subtract_1(lst1, k)
def helper1(lst, start):
new_lst = lst[start:]
index_of_zeros = []
index_of_carry = []
for i in range(len(new_lst)):
if new_lst[i] == 0:
index_of_zeros.append(i + start)
elif new_lst[i] > 0:
index_of_carry.append(i)
index_of_carry = index_of_carry[1] + start
a = []
a.append([index_of_carry])
a.append(index_of_zeros)
return a
def subtract_1(lst, k):
print('top: ', top)
print('lst: ', lst)
print('k: ', k)
subtract_list = []
list_len = len(lst)
next_borrow = 1
for i in range(list_len):
if lst[i] - k[i] >= 0:
subtract_list.append(lst[i] - k[i])
while lst[i] - k[i] < 0:
if lst[i + next_borrow] > 0:
lst[i + next_borrow] -= 1
lst[i] = lst[i] + top[i]
if lst[i] - k[i] >= 0:
subtract_list.append(lst[i] - k[i])
elif lst[i + next_borrow] == 0:
a = helper1(lst, i)
index_of_carry = a[0][0]
index_of_zeros = a[1][0]
temp = lst[:index_of_carry]
print(temp)
lst[index_of_carry] -= 1
for j in range(len(temp)):
lst[j] += top[j]
if lst[i] - k[i] > 0:
subtract_list.append(lst[i] - k[i])
print('subtract_list:', subtract_list)
if __name__ == '__main__':
main() |
day = '2'
with open(f'2015/data/day_{day}.in', 'r', encoding='utf-8') as f:
content = f.read().strip().split('\n')
def make_tup(row):
i = row.index('x')
a = row[:i]
row = row[i + 1:]
i = row.index('x')
b = row[:i]
c = row[i + 1:]
return int(a), int(b), int(c)
def area(a, b, c):
return 2 * (a*b + a*c + b*c)
##### Prva naloga #####
paper1 = 0
for row in content:
a, b, c = make_tup(row)
paper1 += area(a, b, c)
paper1 += min(a*b, b*c, a*c)
##### Druga naloga #####
ribbon2 = 0
for row in content:
a, b, c = make_tup(row)
ribbon2 += min(2*(a+b), 2*(a+c), 2*(b+c))
ribbon2 += a*b*c
def main():
s1 = str(paper1)
print(f'day {day}, puzzle 1: {s1}')
s2 = str(ribbon2)
print(f'day {day}, puzzle 2: {s2}')
with open(f'2015/out/day_{day}_1.out', 'w', encoding='utf-8') as f:
f.write(s1)
with open(f'2015/out/day_{day}_2.out', 'w', encoding='utf-8') as f:
f.write(s2)
main() | day = '2'
with open(f'2015/data/day_{day}.in', 'r', encoding='utf-8') as f:
content = f.read().strip().split('\n')
def make_tup(row):
i = row.index('x')
a = row[:i]
row = row[i + 1:]
i = row.index('x')
b = row[:i]
c = row[i + 1:]
return (int(a), int(b), int(c))
def area(a, b, c):
return 2 * (a * b + a * c + b * c)
paper1 = 0
for row in content:
(a, b, c) = make_tup(row)
paper1 += area(a, b, c)
paper1 += min(a * b, b * c, a * c)
ribbon2 = 0
for row in content:
(a, b, c) = make_tup(row)
ribbon2 += min(2 * (a + b), 2 * (a + c), 2 * (b + c))
ribbon2 += a * b * c
def main():
s1 = str(paper1)
print(f'day {day}, puzzle 1: {s1}')
s2 = str(ribbon2)
print(f'day {day}, puzzle 2: {s2}')
with open(f'2015/out/day_{day}_1.out', 'w', encoding='utf-8') as f:
f.write(s1)
with open(f'2015/out/day_{day}_2.out', 'w', encoding='utf-8') as f:
f.write(s2)
main() |
def translate():
return "jQuery(document).ready(function(){jQuery('body').translate('%s');});" % request.args(0).split('.')[0]
def changeLanguage():
session._language = request.args[0]
#T.force(request.args[0])
#T.set_current_languages(str(request.args[0]),str(request.args[0]) + '-' + str(request.args[0]))
if(len(request.args) == 5):
redirect(URL(request.args[1],request.args[2], request.args[3],args=(request.args[4])))
else:
redirect(URL(request.args[1],request.args[2], request.args[3]))
return
| def translate():
return "jQuery(document).ready(function(){jQuery('body').translate('%s');});" % request.args(0).split('.')[0]
def change_language():
session._language = request.args[0]
if len(request.args) == 5:
redirect(url(request.args[1], request.args[2], request.args[3], args=request.args[4]))
else:
redirect(url(request.args[1], request.args[2], request.args[3]))
return |
def main():
square = int(input("Calculate square root of: "))
print("square root of " + str(square) + " is " +
str(binsquareroot(square)))
def binsquareroot(square):
if square < 1:
return "an imaginair number"
if square == 1:
return 1
left = 1
right = square
mid = right
while (left + 1 < right):
mid = int(left + (right - left) / 2)
root = mid * mid
if root == square:
return mid
elif root > square:
right = mid
else: # root < square
left = mid
return "not a flat root"
if __name__ == '__main__':
main()
| def main():
square = int(input('Calculate square root of: '))
print('square root of ' + str(square) + ' is ' + str(binsquareroot(square)))
def binsquareroot(square):
if square < 1:
return 'an imaginair number'
if square == 1:
return 1
left = 1
right = square
mid = right
while left + 1 < right:
mid = int(left + (right - left) / 2)
root = mid * mid
if root == square:
return mid
elif root > square:
right = mid
else:
left = mid
return 'not a flat root'
if __name__ == '__main__':
main() |
def update_game(api, game, step_fn):
"""
Takes care of doing one update tick:
- call the player's AI
- send actions chosen by AI
:param api: API object to communicate with the server
:param game: The current game's state.
:param step_fn: function to call to execute the player's AI
"""
step_fn(game)
api.send_actions(game.id, list(filter(None.__ne__, [cell.actions() for cell in game.me.cells])))
| def update_game(api, game, step_fn):
"""
Takes care of doing one update tick:
- call the player's AI
- send actions chosen by AI
:param api: API object to communicate with the server
:param game: The current game's state.
:param step_fn: function to call to execute the player's AI
"""
step_fn(game)
api.send_actions(game.id, list(filter(None.__ne__, [cell.actions() for cell in game.me.cells]))) |
# helpers.py
def url_join(*args, end_slash = True):
strip_args = [str(a).rstrip("/") for a in args]
url = "/".join(strip_args)
if end_slash and not url.endswith("/"):
url = url + "/"
return url
| def url_join(*args, end_slash=True):
strip_args = [str(a).rstrip('/') for a in args]
url = '/'.join(strip_args)
if end_slash and (not url.endswith('/')):
url = url + '/'
return url |
#
# Copyright (c) 2010-2016, Fabric Software Inc. All rights reserved.
#
class DirQualTypeInfo:
def __init__(self, dir_qual, type_info):
self.dir_qual = dir_qual
self.type_info = type_info
@property
def dq(self):
return self.dir_qual
@property
def ti(self):
return self.type_info
def get_desc(self):
return "%s:%s" % (self.dir_qual.get_desc(), self.type_info.get_desc())
def __str__(self):
return self.get_desc()
| class Dirqualtypeinfo:
def __init__(self, dir_qual, type_info):
self.dir_qual = dir_qual
self.type_info = type_info
@property
def dq(self):
return self.dir_qual
@property
def ti(self):
return self.type_info
def get_desc(self):
return '%s:%s' % (self.dir_qual.get_desc(), self.type_info.get_desc())
def __str__(self):
return self.get_desc() |
grade = 95
if grade >= 90:
print("A")
elif grade >= 80:
print("B")
elif grade >= 70:
print("C")
elif grade >= 60:
print("D")
else:
print("F") | grade = 95
if grade >= 90:
print('A')
elif grade >= 80:
print('B')
elif grade >= 70:
print('C')
elif grade >= 60:
print('D')
else:
print('F') |
def gen_src(count):
for i in range(1, count):
data = "".join(["%d" % x for x in range(1, 10000)])
native.genrule(
name = "generated_class_%d" % i,
out = "Class%d.java" % i,
bash = "echo -e 'package gen;\npublic class Class%d { static String data = \"%s\"; }' > $OUT" % (i, data),
)
native.android_library(
name = "generated_lib_%d" % i,
srcs = [":generated_class_%d" % i],
)
return [":generated_lib_%d" % x for x in range(1, count)]
def gen_src_with_refs(index, ref_count, type):
if type == "method":
refs = " ".join(["void fun%d() {};\n" % i for i in range(1, ref_count + 1)])
elif type == "field":
refs = " ".join(["int field%d = 1;\n" % i for i in range(1, ref_count + 1)])
else:
fail("unknown type")
name = "generated_class_%d_%d_%s_refs" % (index, ref_count, type)
native.genrule(
name = name,
out = "Class%d.java" % index,
bash = "echo -e 'package gen;\npublic class Class%d {\n%s}' > $OUT" % (index, refs),
)
return ":%s" % name
def gen_overflow_lib(type):
for i in range(1, 15):
native.android_library(
name = "generated_lib_%s_overflow_%d" % (type, i),
srcs = [gen_src_with_refs(i, 5000, type)],
visibility = ["PUBLIC"],
)
return [":" + "generated_lib_%s_overflow_%d" % (type, i) for i in range(1, 15)]
def gen_primary_dex_overflow(type, gen_deps):
native.android_binary(
name = "primary_dex_%s_overflow" % type,
dex_group_lib_limit = 1,
keystore = "//keystores:debug",
manifest = "SimpleManifest.xml",
primary_dex_patterns = [
"^gen/Class",
],
use_split_dex = True,
deps = [
"//java/com/sample/app:app",
"//java/com/sample/lib:lib",
] + gen_deps,
)
def gen_secondary_dex_overflow(type, gen_deps):
native.android_binary(
name = "secondary_dex_%s_overflow" % type,
dex_group_lib_limit = 1,
secondary_dex_weight_limit = 1024 * 1024 * 64,
keystore = "//keystores:debug",
manifest = "SimpleManifest.xml",
primary_dex_patterns = [
"/MyApplication^",
],
use_split_dex = True,
deps = [
"//java/com/sample/app:app",
"//java/com/sample/lib:lib",
] + gen_deps,
)
| def gen_src(count):
for i in range(1, count):
data = ''.join(['%d' % x for x in range(1, 10000)])
native.genrule(name='generated_class_%d' % i, out='Class%d.java' % i, bash='echo -e \'package gen;\npublic class Class%d { static String data = "%s"; }\' > $OUT' % (i, data))
native.android_library(name='generated_lib_%d' % i, srcs=[':generated_class_%d' % i])
return [':generated_lib_%d' % x for x in range(1, count)]
def gen_src_with_refs(index, ref_count, type):
if type == 'method':
refs = ' '.join(['void fun%d() {};\n' % i for i in range(1, ref_count + 1)])
elif type == 'field':
refs = ' '.join(['int field%d = 1;\n' % i for i in range(1, ref_count + 1)])
else:
fail('unknown type')
name = 'generated_class_%d_%d_%s_refs' % (index, ref_count, type)
native.genrule(name=name, out='Class%d.java' % index, bash="echo -e 'package gen;\npublic class Class%d {\n%s}' > $OUT" % (index, refs))
return ':%s' % name
def gen_overflow_lib(type):
for i in range(1, 15):
native.android_library(name='generated_lib_%s_overflow_%d' % (type, i), srcs=[gen_src_with_refs(i, 5000, type)], visibility=['PUBLIC'])
return [':' + 'generated_lib_%s_overflow_%d' % (type, i) for i in range(1, 15)]
def gen_primary_dex_overflow(type, gen_deps):
native.android_binary(name='primary_dex_%s_overflow' % type, dex_group_lib_limit=1, keystore='//keystores:debug', manifest='SimpleManifest.xml', primary_dex_patterns=['^gen/Class'], use_split_dex=True, deps=['//java/com/sample/app:app', '//java/com/sample/lib:lib'] + gen_deps)
def gen_secondary_dex_overflow(type, gen_deps):
native.android_binary(name='secondary_dex_%s_overflow' % type, dex_group_lib_limit=1, secondary_dex_weight_limit=1024 * 1024 * 64, keystore='//keystores:debug', manifest='SimpleManifest.xml', primary_dex_patterns=['/MyApplication^'], use_split_dex=True, deps=['//java/com/sample/app:app', '//java/com/sample/lib:lib'] + gen_deps) |
##Exemplo retirado do site http://code.tutsplus.com/tutorials/beginning-test-driven-development-in-python--net-30137
##//lhekheklqhlekhqkehqkehqkhelqw
##//ljkfhjdhfjkdhfkjlsdhlfkhslkjkljdflksgflsgdf
##//lkhdsklfskfgshgfsjhgfs
class Calculator(object):
def add(self, x, y):
number_types = (int, float, complex)
if instance(x, number_types) and instance(y, number_types):
return x + y
else:
raise ValueError
def sub(self, x, y):
number_types = (int, float, complex)
if instance(x, number_types) and instance(y, number_types):
return x + y
else:
raise ValueError
def mult(self, x, y):
number_types = (int, float, complex)
if instance(x, number_types) and instance(y, number_types):
return x + y
else:
raise ValueError
def div(self, x, y):
number_types = (int, float, complex)
if instance(x, number_types) and instance(y, number_types):
return x + y
else:
raise ValueError
| class Calculator(object):
def add(self, x, y):
number_types = (int, float, complex)
if instance(x, number_types) and instance(y, number_types):
return x + y
else:
raise ValueError
def sub(self, x, y):
number_types = (int, float, complex)
if instance(x, number_types) and instance(y, number_types):
return x + y
else:
raise ValueError
def mult(self, x, y):
number_types = (int, float, complex)
if instance(x, number_types) and instance(y, number_types):
return x + y
else:
raise ValueError
def div(self, x, y):
number_types = (int, float, complex)
if instance(x, number_types) and instance(y, number_types):
return x + y
else:
raise ValueError |
'''
Prompt:
Write a function bestSum(targetSum, numbers) that takes
in a targetSum and an array of numbers as arguments.
The function should return an array containing the
shortest combination of numbers that add up to exactly the targetSum.
If there is a tie for the shotest combination, you may return any of the shortest.
'''
# takes in a numbers array in decreasing order
def recurse(targetSum, numbers, combination = []):
if targetSum == 0:
return combination
if targetSum < 0:
return None
for n in numbers:
result = recurse(targetSum-n, numbers, [*combination, n])
if result is not None:
return result
return None
# Greedy approach. This only works on some sets of coins.
# Why? Consider this input: bestSum(8, [1, 4, 5])
# Should return [4, 4], but we are returning [5, 1, 1, 1]
def bestSum(targetSum, numbers):
numbers.sort(reverse=True)
return recurse(targetSum, numbers)
# Correct optimal solution with dynamic programming
def bestSum_V2(targetSum, numbers, cache = {}):
if targetSum in cache:
return cache[targetSum]
if targetSum == 0:
return []
if targetSum < 0:
return None
best = None
for n in numbers:
result = bestSum_V2(targetSum-n, numbers, cache)
if result is not None:
current = [*result, n]
if best is None or len(best) > len(current):
best = current
cache[targetSum] = best
return best
# print(bestSum(28, [7, 2, 1, 10, 5]))
# print(bestSum(7, [5, 3, 4, 7]))
# print(bestSum(8, [1, 4, 5]))
# print(bestSum_V2(28, [7, 2, 1, 10, 5]))
# print(bestSum_V2(7, [5, 3, 4, 7]))
# print(bestSum_V2(300, [100, 150, 7, 14]))
print(bestSum_V2(8, [1, 4, 5]))
| """
Prompt:
Write a function bestSum(targetSum, numbers) that takes
in a targetSum and an array of numbers as arguments.
The function should return an array containing the
shortest combination of numbers that add up to exactly the targetSum.
If there is a tie for the shotest combination, you may return any of the shortest.
"""
def recurse(targetSum, numbers, combination=[]):
if targetSum == 0:
return combination
if targetSum < 0:
return None
for n in numbers:
result = recurse(targetSum - n, numbers, [*combination, n])
if result is not None:
return result
return None
def best_sum(targetSum, numbers):
numbers.sort(reverse=True)
return recurse(targetSum, numbers)
def best_sum_v2(targetSum, numbers, cache={}):
if targetSum in cache:
return cache[targetSum]
if targetSum == 0:
return []
if targetSum < 0:
return None
best = None
for n in numbers:
result = best_sum_v2(targetSum - n, numbers, cache)
if result is not None:
current = [*result, n]
if best is None or len(best) > len(current):
best = current
cache[targetSum] = best
return best
print(best_sum_v2(8, [1, 4, 5])) |
apps_details = [
{
"app": "Learning xc functional from experimental data",
"repo": "https://github.com/mfkasim1/xcnn", # leave blank if no repo available
# leave blank if no paper available, strongly suggested to link to open-access paper
"paper": "https://arxiv.org/abs/2102.04229",
},
{
"app": "Basis optimization",
"repo": "https://github.com/diffqc/dqc-apps/tree/main/01-basis-opt",
"paper": "",
},
{
"app": "Alchemical perturbation",
"repo": "https://github.com/diffqc/dqc-apps/tree/main/04-alchemical-perturbation",
"paper": "",
},
]
repo_icons = {
"github": "docs/data/readme_icons/github.svg",
}
paper_icon = "docs/data/readme_icons/paper.svg"
def get_repo_name(repo_link):
# get the repository name
for repo_name in repo_icons.keys():
if repo_name in repo_link:
return repo_name
raise RuntimeError("Unlisted repository, please contact admin to add the repository.")
def add_row(app_detail):
# get the string for repository column
if app_detail['repo'].strip() != "":
repo_name = get_repo_name(app_detail['repo'])
repo_detail = f"[]({app_detail['repo']})"
else:
repo_detail = ""
# get the string for the paper column
if app_detail['paper'].strip() != "":
paper_detail = f"[]({app_detail['paper']})"
else:
paper_detail = ""
s = f"| {app_detail['app']} | {repo_detail} | {paper_detail} |\n"
return s
def main():
# construct the strings
s = "| Applications | Repo | Paper |\n"
s += "|-----------------------------------|------|-------|\n"
for app_detail in apps_details:
s += add_row(app_detail)
# open the readme file
fname = "README.md"
with open(fname, "r") as f:
content = f.read()
# find the signature in README
sig_start = "<!-- start of readme_appgen.py -->"
sig_end = "<!-- end of readme_appgen.py -->"
note = "<!-- Please do not edit this part directly, instead add your " + \
"application in the readme_appgen.py file -->\n"
idx_start = content.find(sig_start)
idx_end = content.find(sig_end)
# write the string to the README
content = content[:idx_start] + sig_start + "\n" + note + s + content[idx_end:]
with open(fname, "w") as f:
f.write(content)
if __name__ == "__main__":
main()
| apps_details = [{'app': 'Learning xc functional from experimental data', 'repo': 'https://github.com/mfkasim1/xcnn', 'paper': 'https://arxiv.org/abs/2102.04229'}, {'app': 'Basis optimization', 'repo': 'https://github.com/diffqc/dqc-apps/tree/main/01-basis-opt', 'paper': ''}, {'app': 'Alchemical perturbation', 'repo': 'https://github.com/diffqc/dqc-apps/tree/main/04-alchemical-perturbation', 'paper': ''}]
repo_icons = {'github': 'docs/data/readme_icons/github.svg'}
paper_icon = 'docs/data/readme_icons/paper.svg'
def get_repo_name(repo_link):
for repo_name in repo_icons.keys():
if repo_name in repo_link:
return repo_name
raise runtime_error('Unlisted repository, please contact admin to add the repository.')
def add_row(app_detail):
if app_detail['repo'].strip() != '':
repo_name = get_repo_name(app_detail['repo'])
repo_detail = f"[]({app_detail['repo']})"
else:
repo_detail = ''
if app_detail['paper'].strip() != '':
paper_detail = f"[]({app_detail['paper']})"
else:
paper_detail = ''
s = f"| {app_detail['app']} | {repo_detail} | {paper_detail} |\n"
return s
def main():
s = '| Applications | Repo | Paper |\n'
s += '|-----------------------------------|------|-------|\n'
for app_detail in apps_details:
s += add_row(app_detail)
fname = 'README.md'
with open(fname, 'r') as f:
content = f.read()
sig_start = '<!-- start of readme_appgen.py -->'
sig_end = '<!-- end of readme_appgen.py -->'
note = '<!-- Please do not edit this part directly, instead add your ' + 'application in the readme_appgen.py file -->\n'
idx_start = content.find(sig_start)
idx_end = content.find(sig_end)
content = content[:idx_start] + sig_start + '\n' + note + s + content[idx_end:]
with open(fname, 'w') as f:
f.write(content)
if __name__ == '__main__':
main() |
class TempChDir:
"""
Context manager to step into a directory temporarily. Use in a `with` block:
with TempChDir(path):
...do stuff...
This automatically changes cwd if necessary, upon entry of the block, and
changes it back on exit. Note that if `path` is ".", this is
a null operation, which can be handy in some situations.
"""
info = None
def __init__(self, path: Path):
self.old_dir = None
self.new_dir = Path(path).expanduser().absolute()
cwd = Path.cwd().absolute()
if cwd != self.new_dir:
self.old_dir = cwd
def __enter__(self):
if self.old_dir:
if self.info:
self.info(f'Changing CWD to {self.new_dir} (temporarily)')
os.chdir(self.new_dir)
def __exit__(self, *args):
if self.old_dir:
if self.info:
self.info(f'Changing CWD back to {self.old_dir}')
os.chdir(self.old_dir)
| class Tempchdir:
"""
Context manager to step into a directory temporarily. Use in a `with` block:
with TempChDir(path):
...do stuff...
This automatically changes cwd if necessary, upon entry of the block, and
changes it back on exit. Note that if `path` is ".", this is
a null operation, which can be handy in some situations.
"""
info = None
def __init__(self, path: Path):
self.old_dir = None
self.new_dir = path(path).expanduser().absolute()
cwd = Path.cwd().absolute()
if cwd != self.new_dir:
self.old_dir = cwd
def __enter__(self):
if self.old_dir:
if self.info:
self.info(f'Changing CWD to {self.new_dir} (temporarily)')
os.chdir(self.new_dir)
def __exit__(self, *args):
if self.old_dir:
if self.info:
self.info(f'Changing CWD back to {self.old_dir}')
os.chdir(self.old_dir) |
class ChainMap:
def __init__(self, *maps):
if maps:
self.maps = list(maps)
else:
self.maps = [{}]
def __getitem__(self, k):
for m in self.maps:
if k in m:
return m[k]
raise KeyError(k)
def __setitem__(self, k, v):
self.maps[0][k] = v
def __delitem__(self, k):
del self.maps[0][k]
| class Chainmap:
def __init__(self, *maps):
if maps:
self.maps = list(maps)
else:
self.maps = [{}]
def __getitem__(self, k):
for m in self.maps:
if k in m:
return m[k]
raise key_error(k)
def __setitem__(self, k, v):
self.maps[0][k] = v
def __delitem__(self, k):
del self.maps[0][k] |
#
# PySNMP MIB module BDCOM-FLASH (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/BDCOM-FLASH
# Produced by pysmi-0.3.4 at Wed May 1 11:36:39 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint")
bdlocal, = mibBuilder.importSymbols("BDCOM-SMI", "bdlocal")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
ObjectIdentity, Counter32, Unsigned32, Gauge32, TimeTicks, IpAddress, ModuleIdentity, MibIdentifier, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, iso, NotificationType, Integer32, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Counter32", "Unsigned32", "Gauge32", "TimeTicks", "IpAddress", "ModuleIdentity", "MibIdentifier", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso", "NotificationType", "Integer32", "Bits")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
bdlflash = MibIdentifier((1, 3, 6, 1, 4, 1, 3320, 2, 10))
bdflashSize = MibScalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bdflashSize.setStatus('mandatory')
if mibBuilder.loadTexts: bdflashSize.setDescription('Total Size in Octets of Flash memory')
bdflashFree = MibScalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bdflashFree.setStatus('mandatory')
if mibBuilder.loadTexts: bdflashFree.setDescription('Unused Size in Octets of Flash memory')
bdflashController = MibScalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bdflashController.setStatus('mandatory')
if mibBuilder.loadTexts: bdflashController.setDescription('Provides the type of Flash controller (either CCTL or CCTL2) installed in the router.')
bdflashCard = MibScalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bdflashCard.setStatus('mandatory')
if mibBuilder.loadTexts: bdflashCard.setDescription('Provides the type of Flash Card installed in the router. For example, the type of Flash Card could be either CSC-MS or CSC-MC+.')
bdflashVPP = MibScalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("installed", 1), ("missing", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bdflashVPP.setStatus('mandatory')
if mibBuilder.loadTexts: bdflashVPP.setDescription('State of the VPP DIP jumper on the Flash memory card. Files can be written to the Flash memory card only if the VPP DIP jumper is turned on.')
bdflashErase = MibScalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 6), Integer32()).setMaxAccess("writeonly")
if mibBuilder.loadTexts: bdflashErase.setStatus('mandatory')
if mibBuilder.loadTexts: bdflashErase.setDescription('Request to erase flash memory')
bdflashEraseTime = MibScalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 7), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bdflashEraseTime.setStatus('mandatory')
if mibBuilder.loadTexts: bdflashEraseTime.setDescription('Indicates the value of sysUptime the last time Flash memory was erased. If the flash had not been erased after powerup it has a value of 0 days 00:00:00.')
bdflashEraseStatus = MibScalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("flashOpInProgress", 1), ("flashOpSuccess", 2), ("flashOpFailure", 3), ("flashReadOnly", 4), ("flashOpenFailure", 5), ("bufferAllocationFailure", 6), ("noOpAfterPowerOn", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bdflashEraseStatus.setStatus('mandatory')
if mibBuilder.loadTexts: bdflashEraseStatus.setDescription('Status of current or last flash erasing')
bdflashToNet = MibScalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 9), DisplayString()).setMaxAccess("writeonly")
if mibBuilder.loadTexts: bdflashToNet.setStatus('mandatory')
if mibBuilder.loadTexts: bdflashToNet.setDescription('Write flash entry to tftp server. Value should be the name of the flash entry to send. Instance is the IP address of the tftp host.')
bdflashToNetTime = MibScalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 10), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bdflashToNetTime.setStatus('mandatory')
if mibBuilder.loadTexts: bdflashToNetTime.setDescription('Indicates the value of sysUpTime the last time a file was transfered from Flash memory on the router to a TFTP host. Returns 0 days 00:00:00 if there had been no transfer since powerup.')
bdflashToNetStatus = MibScalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("flashOpInProgress", 1), ("flashOpSuccess", 2), ("flashOpFailure", 3), ("flashReadOnly", 4), ("flashOpenFailure", 5), ("bufferAllocationFailure", 6), ("noOpAfterPowerOn", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bdflashToNetStatus.setStatus('mandatory')
if mibBuilder.loadTexts: bdflashToNetStatus.setDescription('Status of current or last flash to net transfer')
bdnetToFlash = MibScalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 12), DisplayString()).setMaxAccess("writeonly")
if mibBuilder.loadTexts: bdnetToFlash.setStatus('mandatory')
if mibBuilder.loadTexts: bdnetToFlash.setDescription('Write flash entry from tftp server. Value should be the name of the flash entry to write. Instance is the IP address of the tftp host.')
bdnetToFlashTime = MibScalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 13), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bdnetToFlashTime.setStatus('mandatory')
if mibBuilder.loadTexts: bdnetToFlashTime.setDescription('Indicates the value of sysUpTime the last time file was copied from a Trivial File Transfer Protocol(TFTP) server to the Flash memory on the router. Returns 0 days 00:00:00 if there had been no transfers since powerup.')
bdnetToFlashStatus = MibScalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("flashOpInProgress", 1), ("flashOpSuccess", 2), ("flashOpFailure", 3), ("flashReadOnly", 4), ("flashOpenFailure", 5), ("bufferAllocationFailure", 6), ("noOpAfterPowerOn", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bdnetToFlashStatus.setStatus('mandatory')
if mibBuilder.loadTexts: bdnetToFlashStatus.setDescription('Status of current or last net to flash transfer')
bdflashStatus = MibScalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("busy", 1), ("available", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bdflashStatus.setStatus('mandatory')
if mibBuilder.loadTexts: bdflashStatus.setDescription('Status of the availability of flash')
bdflashEntries = MibScalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bdflashEntries.setStatus('mandatory')
if mibBuilder.loadTexts: bdflashEntries.setDescription('Number of entries in the flash directory')
bdlflashFileDirTable = MibTable((1, 3, 6, 1, 4, 1, 3320, 2, 10, 17), )
if mibBuilder.loadTexts: bdlflashFileDirTable.setStatus('mandatory')
if mibBuilder.loadTexts: bdlflashFileDirTable.setDescription(' A list of flash file entries.')
bdlflashFileDirEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3320, 2, 10, 17, 1), ).setIndexNames((0, "BDCOM-FLASH", "flashEntries"))
if mibBuilder.loadTexts: bdlflashFileDirEntry.setStatus('mandatory')
if mibBuilder.loadTexts: bdlflashFileDirEntry.setDescription('A collection of flash eprom objects')
bdflashDirName = MibTableColumn((1, 3, 6, 1, 4, 1, 3320, 2, 10, 17, 1, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bdflashDirName.setStatus('mandatory')
if mibBuilder.loadTexts: bdflashDirName.setDescription('Name associated with the flash entry')
bdflashDirSize = MibTableColumn((1, 3, 6, 1, 4, 1, 3320, 2, 10, 17, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bdflashDirSize.setStatus('mandatory')
if mibBuilder.loadTexts: bdflashDirSize.setDescription('Size in Octets of a flash entry')
bdflashDirStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 3320, 2, 10, 17, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("valid", 1), ("deleted", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: bdflashDirStatus.setStatus('mandatory')
if mibBuilder.loadTexts: bdflashDirStatus.setDescription('Indicates the status of the entry')
mibBuilder.exportSymbols("BDCOM-FLASH", bdflashVPP=bdflashVPP, bdflashEraseTime=bdflashEraseTime, bdflashSize=bdflashSize, bdflashDirStatus=bdflashDirStatus, bdlflashFileDirEntry=bdlflashFileDirEntry, bdflashEntries=bdflashEntries, bdflashToNet=bdflashToNet, bdflashEraseStatus=bdflashEraseStatus, bdflashFree=bdflashFree, bdlflash=bdlflash, bdflashCard=bdflashCard, bdflashController=bdflashController, bdnetToFlashStatus=bdnetToFlashStatus, bdnetToFlashTime=bdnetToFlashTime, bdflashDirName=bdflashDirName, bdlflashFileDirTable=bdlflashFileDirTable, bdflashStatus=bdflashStatus, bdflashToNetStatus=bdflashToNetStatus, bdflashDirSize=bdflashDirSize, bdflashErase=bdflashErase, bdflashToNetTime=bdflashToNetTime, bdnetToFlash=bdnetToFlash)
| (octet_string, integer, object_identifier) = mibBuilder.importSymbols('ASN1', 'OctetString', 'Integer', 'ObjectIdentifier')
(named_values,) = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues')
(value_size_constraint, constraints_intersection, constraints_union, value_range_constraint, single_value_constraint) = mibBuilder.importSymbols('ASN1-REFINEMENT', 'ValueSizeConstraint', 'ConstraintsIntersection', 'ConstraintsUnion', 'ValueRangeConstraint', 'SingleValueConstraint')
(bdlocal,) = mibBuilder.importSymbols('BDCOM-SMI', 'bdlocal')
(module_compliance, notification_group) = mibBuilder.importSymbols('SNMPv2-CONF', 'ModuleCompliance', 'NotificationGroup')
(object_identity, counter32, unsigned32, gauge32, time_ticks, ip_address, module_identity, mib_identifier, counter64, mib_scalar, mib_table, mib_table_row, mib_table_column, iso, notification_type, integer32, bits) = mibBuilder.importSymbols('SNMPv2-SMI', 'ObjectIdentity', 'Counter32', 'Unsigned32', 'Gauge32', 'TimeTicks', 'IpAddress', 'ModuleIdentity', 'MibIdentifier', 'Counter64', 'MibScalar', 'MibTable', 'MibTableRow', 'MibTableColumn', 'iso', 'NotificationType', 'Integer32', 'Bits')
(display_string, textual_convention) = mibBuilder.importSymbols('SNMPv2-TC', 'DisplayString', 'TextualConvention')
bdlflash = mib_identifier((1, 3, 6, 1, 4, 1, 3320, 2, 10))
bdflash_size = mib_scalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 1), integer32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
bdflashSize.setStatus('mandatory')
if mibBuilder.loadTexts:
bdflashSize.setDescription('Total Size in Octets of Flash memory')
bdflash_free = mib_scalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 2), integer32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
bdflashFree.setStatus('mandatory')
if mibBuilder.loadTexts:
bdflashFree.setDescription('Unused Size in Octets of Flash memory')
bdflash_controller = mib_scalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 3), display_string()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
bdflashController.setStatus('mandatory')
if mibBuilder.loadTexts:
bdflashController.setDescription('Provides the type of Flash controller (either CCTL or CCTL2) installed in the router.')
bdflash_card = mib_scalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 4), display_string()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
bdflashCard.setStatus('mandatory')
if mibBuilder.loadTexts:
bdflashCard.setDescription('Provides the type of Flash Card installed in the router. For example, the type of Flash Card could be either CSC-MS or CSC-MC+.')
bdflash_vpp = mib_scalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 5), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2))).clone(namedValues=named_values(('installed', 1), ('missing', 2)))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
bdflashVPP.setStatus('mandatory')
if mibBuilder.loadTexts:
bdflashVPP.setDescription('State of the VPP DIP jumper on the Flash memory card. Files can be written to the Flash memory card only if the VPP DIP jumper is turned on.')
bdflash_erase = mib_scalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 6), integer32()).setMaxAccess('writeonly')
if mibBuilder.loadTexts:
bdflashErase.setStatus('mandatory')
if mibBuilder.loadTexts:
bdflashErase.setDescription('Request to erase flash memory')
bdflash_erase_time = mib_scalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 7), time_ticks()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
bdflashEraseTime.setStatus('mandatory')
if mibBuilder.loadTexts:
bdflashEraseTime.setDescription('Indicates the value of sysUptime the last time Flash memory was erased. If the flash had not been erased after powerup it has a value of 0 days 00:00:00.')
bdflash_erase_status = mib_scalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 8), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=named_values(('flashOpInProgress', 1), ('flashOpSuccess', 2), ('flashOpFailure', 3), ('flashReadOnly', 4), ('flashOpenFailure', 5), ('bufferAllocationFailure', 6), ('noOpAfterPowerOn', 7)))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
bdflashEraseStatus.setStatus('mandatory')
if mibBuilder.loadTexts:
bdflashEraseStatus.setDescription('Status of current or last flash erasing')
bdflash_to_net = mib_scalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 9), display_string()).setMaxAccess('writeonly')
if mibBuilder.loadTexts:
bdflashToNet.setStatus('mandatory')
if mibBuilder.loadTexts:
bdflashToNet.setDescription('Write flash entry to tftp server. Value should be the name of the flash entry to send. Instance is the IP address of the tftp host.')
bdflash_to_net_time = mib_scalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 10), time_ticks()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
bdflashToNetTime.setStatus('mandatory')
if mibBuilder.loadTexts:
bdflashToNetTime.setDescription('Indicates the value of sysUpTime the last time a file was transfered from Flash memory on the router to a TFTP host. Returns 0 days 00:00:00 if there had been no transfer since powerup.')
bdflash_to_net_status = mib_scalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 11), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=named_values(('flashOpInProgress', 1), ('flashOpSuccess', 2), ('flashOpFailure', 3), ('flashReadOnly', 4), ('flashOpenFailure', 5), ('bufferAllocationFailure', 6), ('noOpAfterPowerOn', 7)))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
bdflashToNetStatus.setStatus('mandatory')
if mibBuilder.loadTexts:
bdflashToNetStatus.setDescription('Status of current or last flash to net transfer')
bdnet_to_flash = mib_scalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 12), display_string()).setMaxAccess('writeonly')
if mibBuilder.loadTexts:
bdnetToFlash.setStatus('mandatory')
if mibBuilder.loadTexts:
bdnetToFlash.setDescription('Write flash entry from tftp server. Value should be the name of the flash entry to write. Instance is the IP address of the tftp host.')
bdnet_to_flash_time = mib_scalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 13), time_ticks()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
bdnetToFlashTime.setStatus('mandatory')
if mibBuilder.loadTexts:
bdnetToFlashTime.setDescription('Indicates the value of sysUpTime the last time file was copied from a Trivial File Transfer Protocol(TFTP) server to the Flash memory on the router. Returns 0 days 00:00:00 if there had been no transfers since powerup.')
bdnet_to_flash_status = mib_scalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 14), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=named_values(('flashOpInProgress', 1), ('flashOpSuccess', 2), ('flashOpFailure', 3), ('flashReadOnly', 4), ('flashOpenFailure', 5), ('bufferAllocationFailure', 6), ('noOpAfterPowerOn', 7)))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
bdnetToFlashStatus.setStatus('mandatory')
if mibBuilder.loadTexts:
bdnetToFlashStatus.setDescription('Status of current or last net to flash transfer')
bdflash_status = mib_scalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 15), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2))).clone(namedValues=named_values(('busy', 1), ('available', 2)))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
bdflashStatus.setStatus('mandatory')
if mibBuilder.loadTexts:
bdflashStatus.setDescription('Status of the availability of flash')
bdflash_entries = mib_scalar((1, 3, 6, 1, 4, 1, 3320, 2, 10, 16), integer32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
bdflashEntries.setStatus('mandatory')
if mibBuilder.loadTexts:
bdflashEntries.setDescription('Number of entries in the flash directory')
bdlflash_file_dir_table = mib_table((1, 3, 6, 1, 4, 1, 3320, 2, 10, 17))
if mibBuilder.loadTexts:
bdlflashFileDirTable.setStatus('mandatory')
if mibBuilder.loadTexts:
bdlflashFileDirTable.setDescription(' A list of flash file entries.')
bdlflash_file_dir_entry = mib_table_row((1, 3, 6, 1, 4, 1, 3320, 2, 10, 17, 1)).setIndexNames((0, 'BDCOM-FLASH', 'flashEntries'))
if mibBuilder.loadTexts:
bdlflashFileDirEntry.setStatus('mandatory')
if mibBuilder.loadTexts:
bdlflashFileDirEntry.setDescription('A collection of flash eprom objects')
bdflash_dir_name = mib_table_column((1, 3, 6, 1, 4, 1, 3320, 2, 10, 17, 1, 1), display_string()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
bdflashDirName.setStatus('mandatory')
if mibBuilder.loadTexts:
bdflashDirName.setDescription('Name associated with the flash entry')
bdflash_dir_size = mib_table_column((1, 3, 6, 1, 4, 1, 3320, 2, 10, 17, 1, 2), integer32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
bdflashDirSize.setStatus('mandatory')
if mibBuilder.loadTexts:
bdflashDirSize.setDescription('Size in Octets of a flash entry')
bdflash_dir_status = mib_table_column((1, 3, 6, 1, 4, 1, 3320, 2, 10, 17, 1, 3), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2))).clone(namedValues=named_values(('valid', 1), ('deleted', 2)))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
bdflashDirStatus.setStatus('mandatory')
if mibBuilder.loadTexts:
bdflashDirStatus.setDescription('Indicates the status of the entry')
mibBuilder.exportSymbols('BDCOM-FLASH', bdflashVPP=bdflashVPP, bdflashEraseTime=bdflashEraseTime, bdflashSize=bdflashSize, bdflashDirStatus=bdflashDirStatus, bdlflashFileDirEntry=bdlflashFileDirEntry, bdflashEntries=bdflashEntries, bdflashToNet=bdflashToNet, bdflashEraseStatus=bdflashEraseStatus, bdflashFree=bdflashFree, bdlflash=bdlflash, bdflashCard=bdflashCard, bdflashController=bdflashController, bdnetToFlashStatus=bdnetToFlashStatus, bdnetToFlashTime=bdnetToFlashTime, bdflashDirName=bdflashDirName, bdlflashFileDirTable=bdlflashFileDirTable, bdflashStatus=bdflashStatus, bdflashToNetStatus=bdflashToNetStatus, bdflashDirSize=bdflashDirSize, bdflashErase=bdflashErase, bdflashToNetTime=bdflashToNetTime, bdnetToFlash=bdnetToFlash) |
# https://www.codewars.com/kata/human-readable-duration-format
def format_duration(seconds):
if not seconds:
return "now"
units = [
("year", 365 * 24 * 60 * 60),
("day", 24 * 60 * 60),
("hour", 60 * 60),
("minute", 60),
("second", 1)
]
parts = []
for unit, divisor in units:
quantity, seconds = divmod(seconds, divisor)
if quantity:
parts.append("{} {}{}".format(quantity, unit, "s" if quantity > 1 else ""))
return parts[0] if len(parts) == 1 else ", ".join(parts[:-1]) + " and " + parts[-1] | def format_duration(seconds):
if not seconds:
return 'now'
units = [('year', 365 * 24 * 60 * 60), ('day', 24 * 60 * 60), ('hour', 60 * 60), ('minute', 60), ('second', 1)]
parts = []
for (unit, divisor) in units:
(quantity, seconds) = divmod(seconds, divisor)
if quantity:
parts.append('{} {}{}'.format(quantity, unit, 's' if quantity > 1 else ''))
return parts[0] if len(parts) == 1 else ', '.join(parts[:-1]) + ' and ' + parts[-1] |
class RotationMatrix(object):
def __init__(self, M, N, entries):
self.M, self.N = M, N
self.entries = dict(entries)
self.tier_index = self._create_tier_index()
def __str__(self):
string = ""
for i in range(self.M):
for j in range(self.N):
sep = " " if j < self.N-1 else "\n"
string += str(self.entries[(i,j)]) + sep
return string
def rotate_matrix(self, R):
for index in self.tier_index:
tier_copy = {key:self.entries[key] for key in index}
for list_index, key in enumerate(index):
rkey = index[(list_index + R + 1) % len(index) - 1]
self.entries[rkey] = tier_copy[key]
def _create_tier_index(self):
row, col, tier_index = 0, 0, []
directions = [(1, 0), (0, 1), (-1, 0), (0, -1)]
while self.M - 2 * row > 0 and self.N - 2 * col > 0:
i, j = row, col
tier_list = []
for move in directions:
while True:
if i + move[0] > self.M - row - 1 or i + move[0] < row or \
j + move[1] > self.N - col - 1 or j + move[1] < col:
break
else:
i, j = i + move[0], j + move[1]
tier_list.append((i, j))
tier_index.append(tier_list)
row, col = row + 1, col + 1
return tier_index
M, N, R = [int(value) for value in input().split()]
mat = {}
for i in range(M):
values = input().split()
for j in range(N):
mat[(i,j)] = int(values[j])
A = RotationMatrix(M, N, mat)
A.rotate_matrix(R)
print(A)
| class Rotationmatrix(object):
def __init__(self, M, N, entries):
(self.M, self.N) = (M, N)
self.entries = dict(entries)
self.tier_index = self._create_tier_index()
def __str__(self):
string = ''
for i in range(self.M):
for j in range(self.N):
sep = ' ' if j < self.N - 1 else '\n'
string += str(self.entries[i, j]) + sep
return string
def rotate_matrix(self, R):
for index in self.tier_index:
tier_copy = {key: self.entries[key] for key in index}
for (list_index, key) in enumerate(index):
rkey = index[(list_index + R + 1) % len(index) - 1]
self.entries[rkey] = tier_copy[key]
def _create_tier_index(self):
(row, col, tier_index) = (0, 0, [])
directions = [(1, 0), (0, 1), (-1, 0), (0, -1)]
while self.M - 2 * row > 0 and self.N - 2 * col > 0:
(i, j) = (row, col)
tier_list = []
for move in directions:
while True:
if i + move[0] > self.M - row - 1 or i + move[0] < row or j + move[1] > self.N - col - 1 or (j + move[1] < col):
break
else:
(i, j) = (i + move[0], j + move[1])
tier_list.append((i, j))
tier_index.append(tier_list)
(row, col) = (row + 1, col + 1)
return tier_index
(m, n, r) = [int(value) for value in input().split()]
mat = {}
for i in range(M):
values = input().split()
for j in range(N):
mat[i, j] = int(values[j])
a = rotation_matrix(M, N, mat)
A.rotate_matrix(R)
print(A) |
__version__ = "0.0.10"
__description__ = "Python client for interfacing with NTCore"
__license__ = "Apache 2.0"
__maintainer__ = "NTCore"
__maintainer_email__ = "info@nantutech.com"
__title__ = "ntcore"
__url__ = "https://www.nantu.io/" | __version__ = '0.0.10'
__description__ = 'Python client for interfacing with NTCore'
__license__ = 'Apache 2.0'
__maintainer__ = 'NTCore'
__maintainer_email__ = 'info@nantutech.com'
__title__ = 'ntcore'
__url__ = 'https://www.nantu.io/' |
def get_the_2nd_lower( stu_list ):
min_grade = min( stu_list, key = lambda x: x[1])[1]
#print( min_grade )
stu_list_without_lowest = [ s for s in stu_list if s[1] != min_grade]
# sort with student's name of ascending order
stu_list_without_lowest.sort( key = lambda x:x[0])
# get second lower grade
second_lower = min( stu_list_without_lowest, key = lambda x: x[1])[1]
for s in stu_list_without_lowest:
#print( s[0] )
#print( s[1] )
if s[1] == second_lower:
print(s[0])
return
if __name__ == '__main__':
student_list = []
for _ in range(int(input())):
name = input()
score = float(input())
student_list.append( list([name, score]) )
get_the_2nd_lower( student_list )
| def get_the_2nd_lower(stu_list):
min_grade = min(stu_list, key=lambda x: x[1])[1]
stu_list_without_lowest = [s for s in stu_list if s[1] != min_grade]
stu_list_without_lowest.sort(key=lambda x: x[0])
second_lower = min(stu_list_without_lowest, key=lambda x: x[1])[1]
for s in stu_list_without_lowest:
if s[1] == second_lower:
print(s[0])
return
if __name__ == '__main__':
student_list = []
for _ in range(int(input())):
name = input()
score = float(input())
student_list.append(list([name, score]))
get_the_2nd_lower(student_list) |
'''
Given a non-empty string s and an integer k, rearrange the string such that the same characters are at least distance k from each other.
All input strings are given in lowercase letters. If it is not possible to rearrange the string, return an empty string "".
Example 1:
Input: s = "aabbcc", k = 3
Output: "abcabc"
Explanation: The same letters are at least distance 3 from each other.
Example 2:
Input: s = "aaabc", k = 3
Output: ""
Explanation: It is not possible to rearrange the string.
Example 3:
Input: s = "aaadbbcc", k = 2
Output: "abacabcd"
Explanation: The same letters are at least distance 2 from each other.
'''
class Solution(object):
def rearrangeString(self, s, k):
"""
:type s: str
:type k: int
:rtype: str
"""
dic = {}
for c in s:
if c in dic:
dic[c] += 1
else:
dic[c] = 1
h = []
for key, value in dic.iteritems():
heapq.heappush(h, (-value, key))
res = []
last_idx = {}
for _ in xrange(len(s)):
modified = False
vec = []
while h:
neg_value, key = heapq.heappop(h)
if key not in last_idx or len(res) - last_idx[key] >= k:
last_idx[key] = len(res)
res.append(key)
if neg_value + 1 < 0:
vec.append((neg_value + 1, key))
modified = True
break
else:
vec.append((neg_value, key))
if not modified:
return ''
for item in vec:
heapq.heappush(h, item)
return ''.join(res)
| """
Given a non-empty string s and an integer k, rearrange the string such that the same characters are at least distance k from each other.
All input strings are given in lowercase letters. If it is not possible to rearrange the string, return an empty string "".
Example 1:
Input: s = "aabbcc", k = 3
Output: "abcabc"
Explanation: The same letters are at least distance 3 from each other.
Example 2:
Input: s = "aaabc", k = 3
Output: ""
Explanation: It is not possible to rearrange the string.
Example 3:
Input: s = "aaadbbcc", k = 2
Output: "abacabcd"
Explanation: The same letters are at least distance 2 from each other.
"""
class Solution(object):
def rearrange_string(self, s, k):
"""
:type s: str
:type k: int
:rtype: str
"""
dic = {}
for c in s:
if c in dic:
dic[c] += 1
else:
dic[c] = 1
h = []
for (key, value) in dic.iteritems():
heapq.heappush(h, (-value, key))
res = []
last_idx = {}
for _ in xrange(len(s)):
modified = False
vec = []
while h:
(neg_value, key) = heapq.heappop(h)
if key not in last_idx or len(res) - last_idx[key] >= k:
last_idx[key] = len(res)
res.append(key)
if neg_value + 1 < 0:
vec.append((neg_value + 1, key))
modified = True
break
else:
vec.append((neg_value, key))
if not modified:
return ''
for item in vec:
heapq.heappush(h, item)
return ''.join(res) |
class HttpListenerPrefixCollection(object,ICollection[str],IEnumerable[str],IEnumerable):
""" Represents the collection used to store Uniform Resource Identifier (URI) prefixes for System.Net.HttpListener objects. """
def Add(self,uriPrefix):
"""
Add(self: HttpListenerPrefixCollection,uriPrefix: str)
Adds a Uniform Resource Identifier (URI) prefix to the collection.
uriPrefix: A System.String that identifies the URI information that is compared in
incoming requests. The prefix must be terminated with a forward slash ("/").
"""
pass
def Clear(self):
"""
Clear(self: HttpListenerPrefixCollection)
Removes all the Uniform Resource Identifier (URI) prefixes from the collection.
"""
pass
def Contains(self,uriPrefix):
"""
Contains(self: HttpListenerPrefixCollection,uriPrefix: str) -> bool
Returns a System.Boolean value that indicates whether the specified prefix is
contained in the collection.
uriPrefix: A System.String that contains the Uniform Resource Identifier (URI) prefix to
test.
Returns: true if this collection contains the prefix specified by uriPrefix; otherwise,
false.
"""
pass
def CopyTo(self,array,offset):
"""
CopyTo(self: HttpListenerPrefixCollection,array: Array[str],offset: int)
Copies the contents of an System.Net.HttpListenerPrefixCollection to the
specified string array.
array: The one dimensional string array that receives the Uniform Resource Identifier
(URI) prefix strings in this collection.
offset: The zero-based index in array at which copying begins.
CopyTo(self: HttpListenerPrefixCollection,array: Array,offset: int)
Copies the contents of an System.Net.HttpListenerPrefixCollection to the
specified array.
array: The one dimensional System.Array that receives the Uniform Resource Identifier
(URI) prefix strings in this collection.
offset: The zero-based index in array at which copying begins.
"""
pass
def GetEnumerator(self):
"""
GetEnumerator(self: HttpListenerPrefixCollection) -> IEnumerator[str]
Returns an object that can be used to iterate through the collection.
Returns: An object that implements the System.Collections.IEnumerator interface and
provides access to the strings in this collection.
"""
pass
def Remove(self,uriPrefix):
"""
Remove(self: HttpListenerPrefixCollection,uriPrefix: str) -> bool
Removes the specified Uniform Resource Identifier (URI) from the list of
prefixes handled by the System.Net.HttpListener object.
uriPrefix: A System.String that contains the URI prefix to remove.
Returns: true if the uriPrefix was found in the System.Net.HttpListenerPrefixCollection
and removed; otherwise false.
"""
pass
def __add__(self,*args):
""" x.__add__(y) <==> x+y """
pass
def __contains__(self,*args):
""" __contains__(self: ICollection[str],item: str) -> bool """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerable) -> object """
pass
def __len__(self,*args):
""" x.__len__() <==> len(x) """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
Count=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the number of prefixes contained in the collection.
Get: Count(self: HttpListenerPrefixCollection) -> int
"""
IsReadOnly=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value that indicates whether access to the collection is read-only.
Get: IsReadOnly(self: HttpListenerPrefixCollection) -> bool
"""
IsSynchronized=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value that indicates whether access to the collection is synchronized (thread-safe).
Get: IsSynchronized(self: HttpListenerPrefixCollection) -> bool
"""
| class Httplistenerprefixcollection(object, ICollection[str], IEnumerable[str], IEnumerable):
""" Represents the collection used to store Uniform Resource Identifier (URI) prefixes for System.Net.HttpListener objects. """
def add(self, uriPrefix):
"""
Add(self: HttpListenerPrefixCollection,uriPrefix: str)
Adds a Uniform Resource Identifier (URI) prefix to the collection.
uriPrefix: A System.String that identifies the URI information that is compared in
incoming requests. The prefix must be terminated with a forward slash ("/").
"""
pass
def clear(self):
"""
Clear(self: HttpListenerPrefixCollection)
Removes all the Uniform Resource Identifier (URI) prefixes from the collection.
"""
pass
def contains(self, uriPrefix):
"""
Contains(self: HttpListenerPrefixCollection,uriPrefix: str) -> bool
Returns a System.Boolean value that indicates whether the specified prefix is
contained in the collection.
uriPrefix: A System.String that contains the Uniform Resource Identifier (URI) prefix to
test.
Returns: true if this collection contains the prefix specified by uriPrefix; otherwise,
false.
"""
pass
def copy_to(self, array, offset):
"""
CopyTo(self: HttpListenerPrefixCollection,array: Array[str],offset: int)
Copies the contents of an System.Net.HttpListenerPrefixCollection to the
specified string array.
array: The one dimensional string array that receives the Uniform Resource Identifier
(URI) prefix strings in this collection.
offset: The zero-based index in array at which copying begins.
CopyTo(self: HttpListenerPrefixCollection,array: Array,offset: int)
Copies the contents of an System.Net.HttpListenerPrefixCollection to the
specified array.
array: The one dimensional System.Array that receives the Uniform Resource Identifier
(URI) prefix strings in this collection.
offset: The zero-based index in array at which copying begins.
"""
pass
def get_enumerator(self):
"""
GetEnumerator(self: HttpListenerPrefixCollection) -> IEnumerator[str]
Returns an object that can be used to iterate through the collection.
Returns: An object that implements the System.Collections.IEnumerator interface and
provides access to the strings in this collection.
"""
pass
def remove(self, uriPrefix):
"""
Remove(self: HttpListenerPrefixCollection,uriPrefix: str) -> bool
Removes the specified Uniform Resource Identifier (URI) from the list of
prefixes handled by the System.Net.HttpListener object.
uriPrefix: A System.String that contains the URI prefix to remove.
Returns: true if the uriPrefix was found in the System.Net.HttpListenerPrefixCollection
and removed; otherwise false.
"""
pass
def __add__(self, *args):
""" x.__add__(y) <==> x+y """
pass
def __contains__(self, *args):
""" __contains__(self: ICollection[str],item: str) -> bool """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self, *args):
""" __iter__(self: IEnumerable) -> object """
pass
def __len__(self, *args):
""" x.__len__() <==> len(x) """
pass
def __repr__(self, *args):
""" __repr__(self: object) -> str """
pass
count = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Gets the number of prefixes contained in the collection.\n\n\n\nGet: Count(self: HttpListenerPrefixCollection) -> int\n\n\n\n'
is_read_only = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Gets a value that indicates whether access to the collection is read-only.\n\n\n\nGet: IsReadOnly(self: HttpListenerPrefixCollection) -> bool\n\n\n\n'
is_synchronized = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Gets a value that indicates whether access to the collection is synchronized (thread-safe).\n\n\n\nGet: IsSynchronized(self: HttpListenerPrefixCollection) -> bool\n\n\n\n' |
"""
Events app for the CDH webapp.
Provides event types, locations, and events
"""
default_app_config = "cdhweb.events.apps.EventsConfig"
| """
Events app for the CDH webapp.
Provides event types, locations, and events
"""
default_app_config = 'cdhweb.events.apps.EventsConfig' |
def padovan(n):
res=[1,1,1]
for i in range(n-2):
res.append(res[-2]+res[-3])
return res[n] | def padovan(n):
res = [1, 1, 1]
for i in range(n - 2):
res.append(res[-2] + res[-3])
return res[n] |
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# Recursive DFS
class Solution:
def maxDepth(self, root: TreeNode) -> int:
if root == None:
return 0
else:
left_height = self.maxDepth(root.left)
right_height = self.maxDepth(root.right)
return max(left_height, right_height) + 1
# BFS
class Solution2:
def maxDepth(self, root: TreeNode) -> int:
if root == None:
return 0
queue = [root]
dep = 0
while len(queue) != 0:
l = len(queue)
while l :
current = queue.pop(0)
if current.left:
queue.append(current.left)
if current.right:
queue.append(current.right)
l -= 1
if l == 0:
dep += 1
return dep | class Treenode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def max_depth(self, root: TreeNode) -> int:
if root == None:
return 0
else:
left_height = self.maxDepth(root.left)
right_height = self.maxDepth(root.right)
return max(left_height, right_height) + 1
class Solution2:
def max_depth(self, root: TreeNode) -> int:
if root == None:
return 0
queue = [root]
dep = 0
while len(queue) != 0:
l = len(queue)
while l:
current = queue.pop(0)
if current.left:
queue.append(current.left)
if current.right:
queue.append(current.right)
l -= 1
if l == 0:
dep += 1
return dep |
# Copyright (c) 2020, Vercer Ltd. Rights set out in LICENCE.txt
class StatementAlreadyPreparedException(Exception):
pass
class StatementNotPreparedException(Exception):
pass
class PreparedQueryNotSupported(Exception):
pass
class CannotAlterPreparedStatementQuerySet(Exception):
pass
class PreparedStatementNotYetExecuted(Exception):
pass
class StatementNotRegistered(Exception):
pass
| class Statementalreadypreparedexception(Exception):
pass
class Statementnotpreparedexception(Exception):
pass
class Preparedquerynotsupported(Exception):
pass
class Cannotalterpreparedstatementqueryset(Exception):
pass
class Preparedstatementnotyetexecuted(Exception):
pass
class Statementnotregistered(Exception):
pass |
__version__ = (
'1.2'
".3"
)
__custom__ = 42
| __version__ = '1.2.3'
__custom__ = 42 |
TITLE = "Jumpy Boi"
# screen dims
WIDTH = 1280
HEIGHT = 760
# frames per second
FPS = 60
# colors
WHITE = (255, 255, 255)
BLACK = (0,0,0)
REDDISH = (240,55,66)
SKY_BLUE = (143, 185, 252)
BROWN = (153, 140, 113)
GRAY = (110, 160, 149)
DARK_BLUE = (0, 23, 176)
FONT_NAME = 'arial'
SPRITESHEET = "spritesheet_jumper.png"
# data files
HS_FILE = "highscore.txt"
# player settings
PLAYER_ACC = 0.75
PLAYER_FRICTION = -0.12
PLAYER_GRAV = 0.8
PLAYER_JUMP = 25
# game settings
BOOST_POWER = 60
POW_SPAWN_PCT = 8
MOB_FREQ = 500
# layers - uses numerical value in layered sprites
PLAYER_LAYER = 3
PLATFORM_LAYER = 2
POW_LAYER = 4
MOB_LAYER = 3
CLOUD_LAYER = 1
BACKGROUND_LAYER = 0
# platform settings
PLATFORM_LIST = [(25, HEIGHT - 40),
(WIDTH/2, HEIGHT - 200),
(20, HEIGHT - 350),
(500, HEIGHT - 150),
(800, HEIGHT - 450),
(-20, HEIGHT - 350),
(-500, HEIGHT - 150),
(-10, HEIGHT - 550),
(500, HEIGHT - 150),
(60, HEIGHT - 300),
(530, HEIGHT - 250),
] | title = 'Jumpy Boi'
width = 1280
height = 760
fps = 60
white = (255, 255, 255)
black = (0, 0, 0)
reddish = (240, 55, 66)
sky_blue = (143, 185, 252)
brown = (153, 140, 113)
gray = (110, 160, 149)
dark_blue = (0, 23, 176)
font_name = 'arial'
spritesheet = 'spritesheet_jumper.png'
hs_file = 'highscore.txt'
player_acc = 0.75
player_friction = -0.12
player_grav = 0.8
player_jump = 25
boost_power = 60
pow_spawn_pct = 8
mob_freq = 500
player_layer = 3
platform_layer = 2
pow_layer = 4
mob_layer = 3
cloud_layer = 1
background_layer = 0
platform_list = [(25, HEIGHT - 40), (WIDTH / 2, HEIGHT - 200), (20, HEIGHT - 350), (500, HEIGHT - 150), (800, HEIGHT - 450), (-20, HEIGHT - 350), (-500, HEIGHT - 150), (-10, HEIGHT - 550), (500, HEIGHT - 150), (60, HEIGHT - 300), (530, HEIGHT - 250)] |
"""
Stride.py
Author: Matthew Yu, Array Lead (2020).
Contact: matthewjkyu@gmail.com
Created: 11/19/20
Last Modified: 02/27/21
Description: Implementation of the Stride class.
"""
# Library Imports.
# Custom Imports.
class Stride:
"""
The Stride class provides the base API for derived classes to
calculate the stride (change of VREF) for various MPPT algorithms.
By default, the stride function implemented by the concrete base class is a
fixed stride.
"""
def __init__(self, strideType="Fixed", minStride=0.01, VMPP=0.621, error=0.05):
"""
Sets up the initial source parameters.
Parameters
----------
strideType: String
The name of the stride type.
minStride: float
The minimum value of the stride, if applicable.
VMPP: float
Our estimation of the PVSource voltage at the maximum power point.
Note that the default value is for a single cell and is an
experimental estimate; according to Sunniva the cell VMPP is 0.621.
error: float
The minimum error percentage of V_best to serve as our minimum
stride.
"""
# Name of the explicit stride function used.
self._strideType = strideType
# The minimum stride attempted in any iteration.
self._minStride = minStride
# The previous iteration characteristics.
self.vOld = 0.0
self.iOld = 0.0
self.pOld = 0.0
self.irrOld = 0.0
self.tOld = 0.0
# The anticipated VMPP to aim for.
self.VMPP = VMPP
# User defined error for determining variable minimum stride distance.
self.error = error
def setup(self, VMPP=0.621, error=0.05):
"""
Reinitializes the predicted parameters for the local MPPT algorithms context.
Parameters
----------
VMPP: float
Our estimation of the PVSource voltage at the maximum power point.
Note that the default value is for a single cell and is an
experimental estimate; according to Sunniva the cell VMPP is 0.621.
error: float
The minimum error percentage of V_best to serve as our minimum stride.
"""
self.VMPP = VMPP
self.error = error
def getStride(self, arrVoltage, arrCurrent, irradiance, temperature):
"""
Calculates the voltage stride for the given PVSource output.
May use prior history.
By default, we output a fixed stride.
Parameters
----------
arrVoltage: float
Array voltage in V.
arrCurrent: float
Array current in A.
irradiance: float
Irradiance in W/M^2 (G)
temperature: float
Cell Temperature in C.
Return
------
float The change in voltage that should be applied to the array in the
next cycle.
Assumptions
-----------
This method is called sequentially in increasing cycle order. The
arrVoltage and arrCurrent are expected to have stabilized to the
reference voltage applied in the last cycle, if any.
Note that the second assumption doesn't hold true in reality, as large
changes in reference voltage may mean the array does not converge to
steady state behavior by the next MPPT cycle. This should always be
considered in the algorithms.
"""
return self._minStride
def reset(self):
"""
Resets any internal variables set by the MPPT algorithm during operation.
"""
self.vOld = 0.0
self.iOld = 0.0
self.pOld = 0.0
self.irrOld = 0.0
self.tOld = 0.0
def getStrideType(self):
"""
Returns the Stride model type used for the simulation.
Return
------
String: Stride type name.
"""
return self._strideType
| """
Stride.py
Author: Matthew Yu, Array Lead (2020).
Contact: matthewjkyu@gmail.com
Created: 11/19/20
Last Modified: 02/27/21
Description: Implementation of the Stride class.
"""
class Stride:
"""
The Stride class provides the base API for derived classes to
calculate the stride (change of VREF) for various MPPT algorithms.
By default, the stride function implemented by the concrete base class is a
fixed stride.
"""
def __init__(self, strideType='Fixed', minStride=0.01, VMPP=0.621, error=0.05):
"""
Sets up the initial source parameters.
Parameters
----------
strideType: String
The name of the stride type.
minStride: float
The minimum value of the stride, if applicable.
VMPP: float
Our estimation of the PVSource voltage at the maximum power point.
Note that the default value is for a single cell and is an
experimental estimate; according to Sunniva the cell VMPP is 0.621.
error: float
The minimum error percentage of V_best to serve as our minimum
stride.
"""
self._strideType = strideType
self._minStride = minStride
self.vOld = 0.0
self.iOld = 0.0
self.pOld = 0.0
self.irrOld = 0.0
self.tOld = 0.0
self.VMPP = VMPP
self.error = error
def setup(self, VMPP=0.621, error=0.05):
"""
Reinitializes the predicted parameters for the local MPPT algorithms context.
Parameters
----------
VMPP: float
Our estimation of the PVSource voltage at the maximum power point.
Note that the default value is for a single cell and is an
experimental estimate; according to Sunniva the cell VMPP is 0.621.
error: float
The minimum error percentage of V_best to serve as our minimum stride.
"""
self.VMPP = VMPP
self.error = error
def get_stride(self, arrVoltage, arrCurrent, irradiance, temperature):
"""
Calculates the voltage stride for the given PVSource output.
May use prior history.
By default, we output a fixed stride.
Parameters
----------
arrVoltage: float
Array voltage in V.
arrCurrent: float
Array current in A.
irradiance: float
Irradiance in W/M^2 (G)
temperature: float
Cell Temperature in C.
Return
------
float The change in voltage that should be applied to the array in the
next cycle.
Assumptions
-----------
This method is called sequentially in increasing cycle order. The
arrVoltage and arrCurrent are expected to have stabilized to the
reference voltage applied in the last cycle, if any.
Note that the second assumption doesn't hold true in reality, as large
changes in reference voltage may mean the array does not converge to
steady state behavior by the next MPPT cycle. This should always be
considered in the algorithms.
"""
return self._minStride
def reset(self):
"""
Resets any internal variables set by the MPPT algorithm during operation.
"""
self.vOld = 0.0
self.iOld = 0.0
self.pOld = 0.0
self.irrOld = 0.0
self.tOld = 0.0
def get_stride_type(self):
"""
Returns the Stride model type used for the simulation.
Return
------
String: Stride type name.
"""
return self._strideType |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021-2022 F4PGA Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
# -- General configuration ------------------------------------------------
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'fpga-interchange-tests'
copyright = '2020, Various'
author = 'Various'
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
language = None
exclude_patterns = []
pygments_style = 'default'
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
html_show_sourcelink = True
html_theme = 'sphinx_f4pga_theme'
html_theme_options = {
'repo_name': 'chipsalliance/fpga-interchange-tests',
'github_url' : 'https://github.com/chipsalliance/fpga-interchange-tests',
'globaltoc_collapse': True,
'color_primary': 'indigo',
'color_accent': 'blue',
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'fpga-interchange-tests'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
latex_documents = [
(
master_doc, 'fpga-interchange-tests.tex',
'fpga-interchange-tests Design support status', 'Various', 'manual'
),
]
# -- Options for manual page output ---------------------------------------
man_pages = [
(
master_doc, 'fpga-interchange-tests',
'fpga-interchange-tests Design support status',
[author], 1
)
]
# -- Options for Texinfo output -------------------------------------------
texinfo_documents = [
(
master_doc, 'fpga-interchange-tests',
'fpga-interchange-tests Design status',
author, 'fpga-interchange-tests',
'FPGA interchange design support status',
'Miscellaneous'
),
]
| extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'fpga-interchange-tests'
copyright = '2020, Various'
author = 'Various'
version = '0.1'
release = '0.1'
language = None
exclude_patterns = []
pygments_style = 'default'
todo_include_todos = False
html_show_sourcelink = True
html_theme = 'sphinx_f4pga_theme'
html_theme_options = {'repo_name': 'chipsalliance/fpga-interchange-tests', 'github_url': 'https://github.com/chipsalliance/fpga-interchange-tests', 'globaltoc_collapse': True, 'color_primary': 'indigo', 'color_accent': 'blue'}
htmlhelp_basename = 'fpga-interchange-tests'
latex_elements = {}
latex_documents = [(master_doc, 'fpga-interchange-tests.tex', 'fpga-interchange-tests Design support status', 'Various', 'manual')]
man_pages = [(master_doc, 'fpga-interchange-tests', 'fpga-interchange-tests Design support status', [author], 1)]
texinfo_documents = [(master_doc, 'fpga-interchange-tests', 'fpga-interchange-tests Design status', author, 'fpga-interchange-tests', 'FPGA interchange design support status', 'Miscellaneous')] |
mail = "To Tax authortiy, Last year earnings of our consultants are given as below (in GBP)\n \
Sirish Dhulipala: 4000 for Jan, 4100 for Feb, 4200 For March, 3900 for April, 4000 May, 4100 June, 4000 July, 4000 August, 4000 September, 4000 October, 4000 November, 4000 December \n \
Anand Reddy: 18000 for first half and 20000 for second half of year \n \
Vinay Kallu: 9400 for Q1, 9800 for Q2, 9700 for Q3, 9500 for Q4"
| mail = 'To Tax authortiy, Last year earnings of our consultants are given as below (in GBP)\n Sirish Dhulipala: 4000 for Jan, 4100 for Feb, 4200 For March, 3900 for April, 4000 May, 4100 June, 4000 July, 4000 August, 4000 September, 4000 October, 4000 November, 4000 December \n Anand Reddy: 18000 for first half and 20000 for second half of year \n Vinay Kallu: 9400 for Q1, 9800 for Q2, 9700 for Q3, 9500 for Q4' |
'''
2^(15) = 32768 and the sum of its digits is 3 + 2 + 7 + 6 + 8 = 26.
What is the sum of the digits of the number 2^(1000)?
'''
n = 0
for _ in list(map(int, list(str(2**1000)))):
n += _
print(n)
| """
2^(15) = 32768 and the sum of its digits is 3 + 2 + 7 + 6 + 8 = 26.
What is the sum of the digits of the number 2^(1000)?
"""
n = 0
for _ in list(map(int, list(str(2 ** 1000)))):
n += _
print(n) |
SAMTRYGG_JSON_API_URL = 'https://www.samtrygg.se/RentalObject/SearchResult'
SAMTRYGG_DATASTORE_FILEPATH = '/datastore/samtrygg_data.json'
SAMTRYGG_PROCESSED_DATASTORE_FILEPATH = '/datastore/processed_samtrygg_data.json'
SAMTRYGG_PROCESSED_UNSEEN_DATASTORE_FILEPATH = '/datastore/processed_unseen_samtrygg_data.json'
| samtrygg_json_api_url = 'https://www.samtrygg.se/RentalObject/SearchResult'
samtrygg_datastore_filepath = '/datastore/samtrygg_data.json'
samtrygg_processed_datastore_filepath = '/datastore/processed_samtrygg_data.json'
samtrygg_processed_unseen_datastore_filepath = '/datastore/processed_unseen_samtrygg_data.json' |
colors = {
"Light": {
"BackgroundColor": [1, 0, 1, .2],
"TabColor": [1, 0, 1, .3],
"ThemeColor": [1, 0, 1, 1],
"BottomStatusColor": [.9, 0.9, 1, .8],
"PrimaryTextColor": [0, 0, 0, 1],
"SecondaryTextColor": [0, 0, 0, .5],
"SelectorHoverColor": [.7, .7, .7, .5],
"SelectorActiveColor": [1, 0, 1, 1],
"SelectorNormalColor": [1, 1, 1, .7]
},
"Dark": {
"BackgroundColor": [0.05, 0.05, .05, 1],
"TabColor": [0.05, 0.05, .05, .5],
"ThemeColor": [1, 0, 1, 1],
"BottomStatusColor": [1, 0, 1, .5],
"PrimaryTextColor": [1, 1, 1, 1],
"SecondaryTextColor": [1, 1, 1, .6],
"SelectorHoverColor": [.3, .3, .3, .5],
"SelectorActiveColor": [1, 0, 1, .5],
"SelectorNormalColor": [.1, .1, .1, .3]
}
}
def get_color(style, color_type):
return colors[style][color_type]
| colors = {'Light': {'BackgroundColor': [1, 0, 1, 0.2], 'TabColor': [1, 0, 1, 0.3], 'ThemeColor': [1, 0, 1, 1], 'BottomStatusColor': [0.9, 0.9, 1, 0.8], 'PrimaryTextColor': [0, 0, 0, 1], 'SecondaryTextColor': [0, 0, 0, 0.5], 'SelectorHoverColor': [0.7, 0.7, 0.7, 0.5], 'SelectorActiveColor': [1, 0, 1, 1], 'SelectorNormalColor': [1, 1, 1, 0.7]}, 'Dark': {'BackgroundColor': [0.05, 0.05, 0.05, 1], 'TabColor': [0.05, 0.05, 0.05, 0.5], 'ThemeColor': [1, 0, 1, 1], 'BottomStatusColor': [1, 0, 1, 0.5], 'PrimaryTextColor': [1, 1, 1, 1], 'SecondaryTextColor': [1, 1, 1, 0.6], 'SelectorHoverColor': [0.3, 0.3, 0.3, 0.5], 'SelectorActiveColor': [1, 0, 1, 0.5], 'SelectorNormalColor': [0.1, 0.1, 0.1, 0.3]}}
def get_color(style, color_type):
return colors[style][color_type] |
# https://leetcode.com/problems/sort-colors/
# Related Topics: Array, Two Pointers, Sort
# Difficulty: Medium
# Initial thoughts:
# Since we are dealing with a small and predefined set of possiblities (3 colors in this case)
# we can loop once over the array, creating a frequency table for the 3 colors and then fill
# the array according to the freq table.
# Time Complexity: O(n) where n == the number of elements in the nums array
# Space Complexity: O(1) (the extra space required for the freq table is constant)
class Solution:
def sortColors(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
count = [0]*3
for el in nums:
count[el] += 1
c = 0
for i in range(len(count)):
for j in range(count[i]):
nums[c] = i
c += 1
# Optimization:
# We can solve this problem in one pass instead of two. The idea is based on the fact that
# the sort space in limited two only three distinct individuals. This allows us to have a pointer
# at the front, one at the back and swap the colors that belong to the front or the back with them
# while moving forward. This way, the color that belongs to the middle will end up at the middle.
# There is just one catch: when swapping the current element with the one at the back (bc it belongs
# to the back), we can't move our current pointer forward, because the element that we swapped with
# could belong to the back (e.g. [2,0,2])
# Time Complexity: O(n) where n == the number of elements in the nums array
# Space Complexity: O(1) (the extra space required for the freq table is constant)
class Solution:
def sortColors(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
left = curr = 0
right = len(nums)-1
while curr <= right:
if nums[curr] == 0:
nums[curr], nums[left] = nums[left], nums[curr]
left += 1
curr += 1
elif nums[curr] == 2:
nums[curr], nums[right] = nums[right], nums[curr]
right -= 1
else:
curr += 1
| class Solution:
def sort_colors(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
count = [0] * 3
for el in nums:
count[el] += 1
c = 0
for i in range(len(count)):
for j in range(count[i]):
nums[c] = i
c += 1
class Solution:
def sort_colors(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
left = curr = 0
right = len(nums) - 1
while curr <= right:
if nums[curr] == 0:
(nums[curr], nums[left]) = (nums[left], nums[curr])
left += 1
curr += 1
elif nums[curr] == 2:
(nums[curr], nums[right]) = (nums[right], nums[curr])
right -= 1
else:
curr += 1 |
while True:
line = input().split(' ')
n = int(line[0])
n2 = int(line[1])
if n == n2 and n == 0:
break
num = line[1].replace(line[0], '')
print(int(num) if num != '' else 0)
| while True:
line = input().split(' ')
n = int(line[0])
n2 = int(line[1])
if n == n2 and n == 0:
break
num = line[1].replace(line[0], '')
print(int(num) if num != '' else 0) |
"""initialize paths, checkpoints etc"""
# initialize screenshot path (default: "sh.png", for testing e.g. "assets/test/t1.jpeg")
screenshot_path = "sh.png"
# initialize checkpoint template file, located in "./assets/scene/" (default: "checkpoint")
checkpoint = "checkpoint3"
# initialize battle mode ("default_mode", "quick_mode" or "arts_mode")
script_mode = "arts_mode"
# initialize how many "Golden Apples" would be used (default: 0)
recovery_times = 0
# initialize how many times will be played ("inf" for infinity)
run_time = 1
| """initialize paths, checkpoints etc"""
screenshot_path = 'sh.png'
checkpoint = 'checkpoint3'
script_mode = 'arts_mode'
recovery_times = 0
run_time = 1 |
def sort_twisted37(arr):
return sorted(arr, key=lambda x: convert(x))
def convert(n):
if "3" not in str(n) and "7" not in str(n):
return n
neg_flag=True if n<0 else False
n=abs(n)
total=0
for i in str(n):
if i=="3":
total=total*10+7
elif i=="7":
total=total*10+3
else:
total=total*10+int(i)
return -total if neg_flag else total | def sort_twisted37(arr):
return sorted(arr, key=lambda x: convert(x))
def convert(n):
if '3' not in str(n) and '7' not in str(n):
return n
neg_flag = True if n < 0 else False
n = abs(n)
total = 0
for i in str(n):
if i == '3':
total = total * 10 + 7
elif i == '7':
total = total * 10 + 3
else:
total = total * 10 + int(i)
return -total if neg_flag else total |
##
# DATA_ENCODING:
#
# In order to transmit data in a serialized format, string objects need to be encoded. Otherwise,
# it is unclear how the characters are translated into raw bytes on the wire. This value should be
# consistent with the encoding used on the flight software that is being communicated with.
#
# Traditional C/C++ strings typically use "ascii" encoding. Hence being used here. However, should
# F prime be updated to use some other encoding, this value may be changed.
#
DATA_ENCODING = "ascii"
| data_encoding = 'ascii' |
# Suppose an array sorted in ascending order is rotated at some pivot unknown to you beforehand.
# (i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2).
# Find the minimum element.
# You may assume no duplicate exists in the array.
class Solution(object):
def findMin(self, nums):
"""
O(logn)
O(1)
:type nums: List[int]
:rtype: int
"""
left = 0
right = len(nums)-1
while left < right and nums[left]> nums[right]:
middle = left + (right-left)/2
if nums[middle] > nums[right]:
# left-middle is increasing
left = middle + 1
else:
right = middle
return nums[left]
| class Solution(object):
def find_min(self, nums):
"""
O(logn)
O(1)
:type nums: List[int]
:rtype: int
"""
left = 0
right = len(nums) - 1
while left < right and nums[left] > nums[right]:
middle = left + (right - left) / 2
if nums[middle] > nums[right]:
left = middle + 1
else:
right = middle
return nums[left] |
"""
@no 35
@name Search Insert Position
"""
class Solution:
def searchInsert(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
if len(nums) == 0:
return 0
else:
for i in range(len(nums)):
if target <= nums[i]:
return i
return len(nums) | """
@no 35
@name Search Insert Position
"""
class Solution:
def search_insert(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
if len(nums) == 0:
return 0
else:
for i in range(len(nums)):
if target <= nums[i]:
return i
return len(nums) |
#bear in mind, this is log(n)
def binary_search(arr, target):
return binary_search_func(arr, 0, len(arr) - 1, target)
def binary_search_func(arr, start_index, end_index, target):
if start_index > end_index:
return -1
mid_index = (start_index + end_index) // 2
if arr[mid_index] == target:
return mid_index
elif arr[mid_index] > target:
return binary_search_func(arr, start_index, mid_index - 1, target)
else:
return binary_search_func(arr, mid_index + 1, end_index, target) | def binary_search(arr, target):
return binary_search_func(arr, 0, len(arr) - 1, target)
def binary_search_func(arr, start_index, end_index, target):
if start_index > end_index:
return -1
mid_index = (start_index + end_index) // 2
if arr[mid_index] == target:
return mid_index
elif arr[mid_index] > target:
return binary_search_func(arr, start_index, mid_index - 1, target)
else:
return binary_search_func(arr, mid_index + 1, end_index, target) |
try:
age = int(input("Enter age:"))
if age >= 18:
print("You can vote")
elif age > 0 and age <= 17:
print("Too young to vote")
else:
print("You are a time traveller")
except:
print("Please enter age as integer") | try:
age = int(input('Enter age:'))
if age >= 18:
print('You can vote')
elif age > 0 and age <= 17:
print('Too young to vote')
else:
print('You are a time traveller')
except:
print('Please enter age as integer') |
class Solution:
def equalPartition(self, N, arr):
# Find the sum of the array
summ = sum(arr)
# If the sum is odd, then return false
if summ % 2 != 0 :
return 0
# This is the number we need to obtain during the computation
required = summ // 2
# Form the DP table
table = [[False for _ in range(N+1)] for _ in range(required+1)]
for i in range(N+1) :
table[0][i] = True
for i in range(1, required+1) :
for j in range(1, N+1) :
# We keep the sum same and not remove the jth element in arr
table[i][j] = table[i][j-1]
if arr[j-1] <= i :
# We reduce the sum and find the remaining in the interval till one less than j
table[i][j] = table[i][j] or table[i - arr[j-1]][j-1]
if table[required][N] :
return 1
else :
return 0
if __name__ == '__main__':
t = int(input())
for _ in range(t):
N = int(input())
arr = input().split()
for it in range(N):
arr[it] = int(arr[it])
ob = Solution()
if (ob.equalPartition(N, arr) == 1):
print("YES")
else:
print("NO")
| class Solution:
def equal_partition(self, N, arr):
summ = sum(arr)
if summ % 2 != 0:
return 0
required = summ // 2
table = [[False for _ in range(N + 1)] for _ in range(required + 1)]
for i in range(N + 1):
table[0][i] = True
for i in range(1, required + 1):
for j in range(1, N + 1):
table[i][j] = table[i][j - 1]
if arr[j - 1] <= i:
table[i][j] = table[i][j] or table[i - arr[j - 1]][j - 1]
if table[required][N]:
return 1
else:
return 0
if __name__ == '__main__':
t = int(input())
for _ in range(t):
n = int(input())
arr = input().split()
for it in range(N):
arr[it] = int(arr[it])
ob = solution()
if ob.equalPartition(N, arr) == 1:
print('YES')
else:
print('NO') |
class SignatureNoeud:
def __init__(self,attributs, degre, degres_noeuds_adjacents,attributs_arretes):
self.attributs = attributs
self.degre = degre
self.degres_noeuds_adjacents = degres_noeuds_adjacents
self.attributs_arretes = attributs_arretes
| class Signaturenoeud:
def __init__(self, attributs, degre, degres_noeuds_adjacents, attributs_arretes):
self.attributs = attributs
self.degre = degre
self.degres_noeuds_adjacents = degres_noeuds_adjacents
self.attributs_arretes = attributs_arretes |
class QueueException(Exception):
pass
class Empty(QueueException):
pass
class Full(QueueException):
pass | class Queueexception(Exception):
pass
class Empty(QueueException):
pass
class Full(QueueException):
pass |
GITHUB_IPS_ONLY = False
ENFORCE_SECRET = ""
RETURN_SCRIPTS_INFO = True
PORT = 8000
| github_ips_only = False
enforce_secret = ''
return_scripts_info = True
port = 8000 |
def number():
while True:
try:
num1 = int(input("Enter a number: "))
num2 = int(input("Enter a number: "))
break
except ValueError:
print("One of the values entered is not an integer, try again")
while num1 < 1 or num2 < 1:
print("One of the numbers isnt a positive integer try again")
try:
num1 = int(input("Enter a number: "))
num2 = int(input("Enter a number: "))
break
except ValueError:
print("One of the values entered is not an integer, try again")
print(f"The larger number is: {max(num1,num2)}")
again = input("Would you like to go again? ")
if again.lower == "yes":
number()
elif again.lower() == "no":
pass
else:
print("Invalid")
number()
| def number():
while True:
try:
num1 = int(input('Enter a number: '))
num2 = int(input('Enter a number: '))
break
except ValueError:
print('One of the values entered is not an integer, try again')
while num1 < 1 or num2 < 1:
print('One of the numbers isnt a positive integer try again')
try:
num1 = int(input('Enter a number: '))
num2 = int(input('Enter a number: '))
break
except ValueError:
print('One of the values entered is not an integer, try again')
print(f'The larger number is: {max(num1, num2)}')
again = input('Would you like to go again? ')
if again.lower == 'yes':
number()
elif again.lower() == 'no':
pass
else:
print('Invalid')
number() |
class GaApiError(Exception):
"""Base exception for API errors."""
class GaInvalidArgumentError(GaApiError):
"""Exception for errors on the report definition."""
class GaAuthenticationError(GaApiError):
"""Exception for UNAUTHENTICATED && PERMISSION_DENIED errors."""
class GaRateLimitError(GaApiError):
"""Exception for Rate Limit errors."""
class GaQuotaExceededError(GaApiError):
"""Exception for Quota Exceeded errors."""
class GaBackendServerError(GaApiError):
"""Exception for 500 and 503 backend errors that are Google's fault"""
class GaUnknownError(GaApiError):
"""Exception for unknown errors."""
| class Gaapierror(Exception):
"""Base exception for API errors."""
class Gainvalidargumenterror(GaApiError):
"""Exception for errors on the report definition."""
class Gaauthenticationerror(GaApiError):
"""Exception for UNAUTHENTICATED && PERMISSION_DENIED errors."""
class Garatelimiterror(GaApiError):
"""Exception for Rate Limit errors."""
class Gaquotaexceedederror(GaApiError):
"""Exception for Quota Exceeded errors."""
class Gabackendservererror(GaApiError):
"""Exception for 500 and 503 backend errors that are Google's fault"""
class Gaunknownerror(GaApiError):
"""Exception for unknown errors.""" |
class IDError(Exception):
pass
class Service:
def __init__(self, google_service_object, id = None):
self.service = google_service_object
self.id = id
@property
def id(self):
if self.__id is None:
raise IDError("Service id is uninitialized, use .initialize_env(...)")
return self.__id
@id.setter
def id(self,id):
self.__id = id
def __repr__(self):
return "<Base Service Object>"
| class Iderror(Exception):
pass
class Service:
def __init__(self, google_service_object, id=None):
self.service = google_service_object
self.id = id
@property
def id(self):
if self.__id is None:
raise id_error('Service id is uninitialized, use .initialize_env(...)')
return self.__id
@id.setter
def id(self, id):
self.__id = id
def __repr__(self):
return '<Base Service Object>' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.