content stringlengths 7 1.05M | fixed_cases stringlengths 1 1.28M |
|---|---|
enum_ = '''
[COMMENT_LINE_IMPORTS]
# System
from enum import Enum
[COMMENT_LINE]
[COMMENT_LINE_CLASS_NAME]
class [CLASS_NAME](Enum):
[TAB]Template = 0
[COMMENT_LINE]
'''.strip() | enum_ = '\n[COMMENT_LINE_IMPORTS]\n\n# System\nfrom enum import Enum\n\n[COMMENT_LINE]\n\n\n\n[COMMENT_LINE_CLASS_NAME]\n\nclass [CLASS_NAME](Enum):\n[TAB]Template = 0\n\n\n[COMMENT_LINE]\n'.strip() |
phones = [
{"id": 6, "latitude": 40.1011685523957, "longitude": -88.2200390954902},
{"id": 7, "latitude": 40.0963186128832, "longitude": -88.2255865263258},
{"id": 8, "latitude": 40.1153308841568, "longitude": -88.223901994968},
]
def get_phones():
return phones
bad_data = [
{"id": 6, "longitude": -88.2200390954902},
{"id": 7, "latitude": 40.0963186128832, "longitude": -88.2255865263258},
{"id": 8, "latitude": 40.1153308841568, "longitude": -88.223901994968},
]
def get_bad_data():
return bad_data
| phones = [{'id': 6, 'latitude': 40.1011685523957, 'longitude': -88.2200390954902}, {'id': 7, 'latitude': 40.0963186128832, 'longitude': -88.2255865263258}, {'id': 8, 'latitude': 40.1153308841568, 'longitude': -88.223901994968}]
def get_phones():
return phones
bad_data = [{'id': 6, 'longitude': -88.2200390954902}, {'id': 7, 'latitude': 40.0963186128832, 'longitude': -88.2255865263258}, {'id': 8, 'latitude': 40.1153308841568, 'longitude': -88.223901994968}]
def get_bad_data():
return bad_data |
ies = []
ies.append({ "iei" : "2C", "value" : "IMEISV", "type" : "5G mobile identity", "reference" : "9.10.3.4", "presence" : "O", "format" : "TLV", "length" : "11"})
ies.append({ "iei" : "7D", "value" : "NAS message container", "type" : "message container", "reference" : "9.10.3.31", "presence" : "O", "format" : "TLV-E", "length" : "3-n"})
msg_list[key]["ies"] = ies
| ies = []
ies.append({'iei': '2C', 'value': 'IMEISV', 'type': '5G mobile identity', 'reference': '9.10.3.4', 'presence': 'O', 'format': 'TLV', 'length': '11'})
ies.append({'iei': '7D', 'value': 'NAS message container', 'type': 'message container', 'reference': '9.10.3.31', 'presence': 'O', 'format': 'TLV-E', 'length': '3-n'})
msg_list[key]['ies'] = ies |
# Write a program that reads an integer and displays, using asterisks, a filled and hol-
# low square, placed next to each other. For example if the side length is 5, the pro gram
# should display
# ***** *****
# ***** * *
# ***** * *
# ***** * *
# ***** *****
side = int(input("Enter side length: "))
lineNum = 1
dotNum = 1
for i in range(side):
dotNum = 1
# filled square
for i in range(side):
print("*", end = "")
print(end=" ")
# hollow square
for i in range(side):
if lineNum == 1 or lineNum == side:
print("*", end = "")
else:
if dotNum == 1 or dotNum == side:
print("*", end = "")
else:
print(" ", end = "")
dotNum += 1
lineNum += 1
print() | side = int(input('Enter side length: '))
line_num = 1
dot_num = 1
for i in range(side):
dot_num = 1
for i in range(side):
print('*', end='')
print(end=' ')
for i in range(side):
if lineNum == 1 or lineNum == side:
print('*', end='')
elif dotNum == 1 or dotNum == side:
print('*', end='')
else:
print(' ', end='')
dot_num += 1
line_num += 1
print() |
class Solution:
def countBits(self, num: int) -> List[int]:
result = [0] * (1 + num)
for i in range(1, 1 + num):
result[i] = result[i&(i - 1)] + 1
return result
| class Solution:
def count_bits(self, num: int) -> List[int]:
result = [0] * (1 + num)
for i in range(1, 1 + num):
result[i] = result[i & i - 1] + 1
return result |
# function to check if two strings are
# anagram or not
def anagram(s1, s2):
# the sorted strings are checked
if(sorted(s1)== sorted(s2)):
print("The strings are anagrams.")
else:
print("The strings aren't anagrams.")
#commonCharacterCount
def commonCharacterCount(s1, s2):
#find intersection(common elements) and put them into a list
s = list(set(s1))
sum1 = 0
for i in s:
#count the no.of occurances of each char in s1 and s2 and take the min to avoid duplication
sum1+=min(s1.count(i),s2.count(i))
return sum1
def isBananagram(s1, s2):
# if((commonCharacterCount(s1, s2)) and ( sorted(s1)== sorted(s2))):
if((commonCharacterCount(s1, s2)) and anagram(s1, s2)):
print("The string are Bannagrams")
else:
print("The string are not bananagrams") | def anagram(s1, s2):
if sorted(s1) == sorted(s2):
print('The strings are anagrams.')
else:
print("The strings aren't anagrams.")
def common_character_count(s1, s2):
s = list(set(s1))
sum1 = 0
for i in s:
sum1 += min(s1.count(i), s2.count(i))
return sum1
def is_bananagram(s1, s2):
if common_character_count(s1, s2) and anagram(s1, s2):
print('The string are Bannagrams')
else:
print('The string are not bananagrams') |
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 16 12:03:24 2020
@author: tsdj
"""
| """
Created on Sun Feb 16 12:03:24 2020
@author: tsdj
""" |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Back of envelope calculations for a precious metals mining company
try:
oz_per_yr = int(input("How many ounces will be produced annually?: "))
price_per_oz = int(input("What is the estimated USD price per ounce for the year?: "))
aisc = int(input("What is the USD AISC?: "))
exchange_rate = float(input("What is the USD/CAD exchange rate?: "))
shrs_out = int(input("How many shares are outstanding?: "))
pe = int(input("What is the estimated PE ratio?: "))
values = {
'oz_per_yr': oz_per_yr,
'price_per_oz': price_per_oz,
'aisc': aisc,
'exchange_rate': exchange_rate,
'shrs_out': shrs_out,
'pe': pe
}
def share_price_calc(vals):
oz_per_yr = vals['oz_per_yr']
price_per_oz = vals['price_per_oz']
aisc = vals['aisc']
exchange_rate = vals['exchange_rate']
shrs_out = vals['shrs_out']
pe = vals['pe']
share_price = (((oz_per_yr * (price_per_oz - aisc)) * exchange_rate) /shrs_out) * pe
return share_price
except ValueError:
print("ValueError: Please enter a number (not a string).")
finally:
try:
print(share_price_calc(values))
except Exception:
print("Exception resulting from ValueError: Please enter a number (not a string).")
| try:
oz_per_yr = int(input('How many ounces will be produced annually?: '))
price_per_oz = int(input('What is the estimated USD price per ounce for the year?: '))
aisc = int(input('What is the USD AISC?: '))
exchange_rate = float(input('What is the USD/CAD exchange rate?: '))
shrs_out = int(input('How many shares are outstanding?: '))
pe = int(input('What is the estimated PE ratio?: '))
values = {'oz_per_yr': oz_per_yr, 'price_per_oz': price_per_oz, 'aisc': aisc, 'exchange_rate': exchange_rate, 'shrs_out': shrs_out, 'pe': pe}
def share_price_calc(vals):
oz_per_yr = vals['oz_per_yr']
price_per_oz = vals['price_per_oz']
aisc = vals['aisc']
exchange_rate = vals['exchange_rate']
shrs_out = vals['shrs_out']
pe = vals['pe']
share_price = oz_per_yr * (price_per_oz - aisc) * exchange_rate / shrs_out * pe
return share_price
except ValueError:
print('ValueError: Please enter a number (not a string).')
finally:
try:
print(share_price_calc(values))
except Exception:
print('Exception resulting from ValueError: Please enter a number (not a string).') |
class MergeSort:
def __init__(self,array):
self.array = array
def result(self):
self.sort(0,len(self.array)-1)
return self.array
def sort(self,front,rear):
if front>rear or front == rear:
return
mid = int((front+rear)/2)
self.sort(front,mid)
self.sort(mid+1,rear)
self.merge(front,mid,rear)
def merge(self,front,mid,rear):
n1 = mid-front+1
n2 = rear-mid
L = []
R = []
for i in range(n1):
L.append(self.array[front+i])
for j in range(n2):
R.append(self.array[mid+j+1])
L.append(float('inf'))
R.append(float('inf'))
i = 0
j = 0
for k in range(front,rear+1):
if L[i]<=R[j]:
self.array[k]=L[i]
i=i+1
else:
self.array[k]=R[j]
j=j+1
test = list(map(int,input().split(' ')))
t = MergeSort(test)
print(t.result())
| class Mergesort:
def __init__(self, array):
self.array = array
def result(self):
self.sort(0, len(self.array) - 1)
return self.array
def sort(self, front, rear):
if front > rear or front == rear:
return
mid = int((front + rear) / 2)
self.sort(front, mid)
self.sort(mid + 1, rear)
self.merge(front, mid, rear)
def merge(self, front, mid, rear):
n1 = mid - front + 1
n2 = rear - mid
l = []
r = []
for i in range(n1):
L.append(self.array[front + i])
for j in range(n2):
R.append(self.array[mid + j + 1])
L.append(float('inf'))
R.append(float('inf'))
i = 0
j = 0
for k in range(front, rear + 1):
if L[i] <= R[j]:
self.array[k] = L[i]
i = i + 1
else:
self.array[k] = R[j]
j = j + 1
test = list(map(int, input().split(' ')))
t = merge_sort(test)
print(t.result()) |
#!/usr/bin/python
def find_max_palindrome(min=100,max=999):
max_palindrome = 0
a = 999
while a > 99:
b = 999
while b >= a:
prod = a*b
if prod > max_palindrome and str(prod)==(str(prod)[::-1]):
max_palindrome = prod
b -= 1
a -= 1
return max_palindrome
print (find_max_palindrome())
| def find_max_palindrome(min=100, max=999):
max_palindrome = 0
a = 999
while a > 99:
b = 999
while b >= a:
prod = a * b
if prod > max_palindrome and str(prod) == str(prod)[::-1]:
max_palindrome = prod
b -= 1
a -= 1
return max_palindrome
print(find_max_palindrome()) |
def write():
systemInfo_file = open("writeTest.txt", "a")
systemInfo_file.write("\nTEST DATA2")
systemInfo_file.close()
| def write():
system_info_file = open('writeTest.txt', 'a')
systemInfo_file.write('\nTEST DATA2')
systemInfo_file.close() |
# Problem Statement: https://www.hackerrank.com/challenges/exceptions/problem
for _ in range(int(input())):
try:
a, b = map(int, input().split())
print(a // b)
except (ZeroDivisionError, ValueError) as e:
print(f'Error Code: {e}') | for _ in range(int(input())):
try:
(a, b) = map(int, input().split())
print(a // b)
except (ZeroDivisionError, ValueError) as e:
print(f'Error Code: {e}') |
"""
Support for local Luftdaten sensors.
Copyright (c) 2020 Mario Villavecchia
Licensed under MIT. All rights reserved.
https://github.com/lichtteil/local_luftdaten/
Support for Ecocity Airhome sensors. AQI sensor
Mykhailo G
"""
| """
Support for local Luftdaten sensors.
Copyright (c) 2020 Mario Villavecchia
Licensed under MIT. All rights reserved.
https://github.com/lichtteil/local_luftdaten/
Support for Ecocity Airhome sensors. AQI sensor
Mykhailo G
""" |
class ApplicationException(Exception):
"""Base Exception class for all exceptions across the application"""
def __init__(self, err_code: int, err_msg: str) -> None:
super(ApplicationException, self).__init__(err_code, err_msg)
self.err_code = err_code
self.err_msg = err_msg
def __str__(self): # pragma: no cover
return f"Error Code: {self.err_code}. Error Message: {self.err_msg}"
def __repr__(self): # pragma: no cover
return self.__str__()
class APIException(ApplicationException):
"""Raised when and External API Call returns non 200 code"""
pass
class DatabaseException(ApplicationException):
"""Raised when issue with database connection"""
pass
| class Applicationexception(Exception):
"""Base Exception class for all exceptions across the application"""
def __init__(self, err_code: int, err_msg: str) -> None:
super(ApplicationException, self).__init__(err_code, err_msg)
self.err_code = err_code
self.err_msg = err_msg
def __str__(self):
return f'Error Code: {self.err_code}. Error Message: {self.err_msg}'
def __repr__(self):
return self.__str__()
class Apiexception(ApplicationException):
"""Raised when and External API Call returns non 200 code"""
pass
class Databaseexception(ApplicationException):
"""Raised when issue with database connection"""
pass |
class BaseContext(dict):
"""
warning: This is a singletone object. Don't create a instance of this object directly
"""
context_obj: 'BaseContext' = None
initial_data: dict = {
}
def initialize(self):
""" Initializes the context with default data """
self.update(self.initial_data)
def clean(self):
""" Clears all context data """
self.clear()
def reset(self):
""" Clears all context data and re-initializes the dict with default data """
self.clean()
self.reset()
@classmethod
def get_instance(cls):
if BaseContext.context_obj is None:
BaseContext.context_obj = cls()
BaseContext.context_obj.initialize()
return BaseContext.context_obj
| class Basecontext(dict):
"""
warning: This is a singletone object. Don't create a instance of this object directly
"""
context_obj: 'BaseContext' = None
initial_data: dict = {}
def initialize(self):
""" Initializes the context with default data """
self.update(self.initial_data)
def clean(self):
""" Clears all context data """
self.clear()
def reset(self):
""" Clears all context data and re-initializes the dict with default data """
self.clean()
self.reset()
@classmethod
def get_instance(cls):
if BaseContext.context_obj is None:
BaseContext.context_obj = cls()
BaseContext.context_obj.initialize()
return BaseContext.context_obj |
class Location:
__slots__ = 'name', '_longitude', '_latitude'
def __init__(self, name, longitude, latitude):
self._longitude = longitude
self._latitude = latitude
self.name = name
@property
def longitude(self):
return self._longitude
@property
def latitude(self):
return self._latitude
print(Location.__dict__)
# {'__module__': '__main__', '__slots__': ('name', '_longitude', '_latitude'), '__init__': <function Location.__init__ at 0x00000237D7A39B80>, 'longitude': <property object at 0x00000237D7A36DB0>, 'latitude': <property object at 0x00000237D7A3F590>,
# '_latitude': <member '_latitude' of 'Location' objects>, '_longitude': <member '_longitude' of 'Location' objects>, 'name':
# <member 'name' of 'Location' objects>, '__doc__': None}
Location.map_service = 'Google Maps'
l = Location('Mumbai',longitude= 19.0760, latitude=72.8777)
print(l.name, l.longitude, l.latitude)
# ('Mumbai', 19.076, 72.8777)
try:
l.__dict__
except AttributeError as ex:
print(ex)
# 'Location' object has no attribute '__dict__'
try:
l.map_link = 'http://maps.google.com/...'
except AttributeError as ex:
print(ex)
# 'Location' object has no attribute 'map_link'
del l.name
try:
print(l.name)
except AttributeError as ex:
print(f'Attribute Error: {ex}')
# Attribute Error: name
l.name = 'Mumbai'
print(l.name)
# 'Mumbai'
class Person:
def __init__(self, name):
self.name = name
class Student(Person):
__slots__ = 'age',
def __init__(self, name, age):
super().__init__(name)
self.age = age
s = Student('Python', 30)
print(s.name, s.age, s.__dict__)
# ('Python', 30, {'name': 'Python'})
class Person:
__slots__ = '_name', 'age'
def __init__(self, name, age):
self.name = name
self.age = age
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
p = Person('Eric', 78)
print(p.name, p.age)
#('Eric', 78)
try:
print(p.__dict__)
except AttributeError as ex:
print(ex)
# 'Person' object has no attribute '__dict__'
hasattr(Person.name, '__get__'), hasattr(Person.name, '__set__')
#(True, True)
hasattr(Person.age, '__get__'), hasattr(Person.age, '__set__')
#(True, True)
class Person:
__slots__ = 'name', '__dict__'
def __init__(self, name, age):
self.name = name
self.age = age
p = Person('Alex', 19)
print(p.name, p.age, p.__dict__) | class Location:
__slots__ = ('name', '_longitude', '_latitude')
def __init__(self, name, longitude, latitude):
self._longitude = longitude
self._latitude = latitude
self.name = name
@property
def longitude(self):
return self._longitude
@property
def latitude(self):
return self._latitude
print(Location.__dict__)
Location.map_service = 'Google Maps'
l = location('Mumbai', longitude=19.076, latitude=72.8777)
print(l.name, l.longitude, l.latitude)
try:
l.__dict__
except AttributeError as ex:
print(ex)
try:
l.map_link = 'http://maps.google.com/...'
except AttributeError as ex:
print(ex)
del l.name
try:
print(l.name)
except AttributeError as ex:
print(f'Attribute Error: {ex}')
l.name = 'Mumbai'
print(l.name)
class Person:
def __init__(self, name):
self.name = name
class Student(Person):
__slots__ = ('age',)
def __init__(self, name, age):
super().__init__(name)
self.age = age
s = student('Python', 30)
print(s.name, s.age, s.__dict__)
class Person:
__slots__ = ('_name', 'age')
def __init__(self, name, age):
self.name = name
self.age = age
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
p = person('Eric', 78)
print(p.name, p.age)
try:
print(p.__dict__)
except AttributeError as ex:
print(ex)
(hasattr(Person.name, '__get__'), hasattr(Person.name, '__set__'))
(hasattr(Person.age, '__get__'), hasattr(Person.age, '__set__'))
class Person:
__slots__ = ('name', '__dict__')
def __init__(self, name, age):
self.name = name
self.age = age
p = person('Alex', 19)
print(p.name, p.age, p.__dict__) |
class Solution:
def longestConsecutive(self, nums: List[int]) -> int:
if not nums:
return 0
nums.sort()
count = 1
longest_streak = []
for index, value in enumerate(nums):
if index + 1 >= len(nums):
break
if nums[index + 1] == value + 1:
count += 1
longest_streak.append(count)
else:
count = 1
if not longest_streak:
return count
return max(longest_streak)
A = Solution()
print(A.longestConsecutive([9,1,4,7,3,-1,0,5,8,-1,6]))
print(A.longestConsecutive([1]))
print(A.longestConsecutive([]))
print(A.longestConsecutive([-3,-2,-1,0]))
print(A.longestConsecutive([1,1,1,1])) | class Solution:
def longest_consecutive(self, nums: List[int]) -> int:
if not nums:
return 0
nums.sort()
count = 1
longest_streak = []
for (index, value) in enumerate(nums):
if index + 1 >= len(nums):
break
if nums[index + 1] == value + 1:
count += 1
longest_streak.append(count)
else:
count = 1
if not longest_streak:
return count
return max(longest_streak)
a = solution()
print(A.longestConsecutive([9, 1, 4, 7, 3, -1, 0, 5, 8, -1, 6]))
print(A.longestConsecutive([1]))
print(A.longestConsecutive([]))
print(A.longestConsecutive([-3, -2, -1, 0]))
print(A.longestConsecutive([1, 1, 1, 1])) |
jobs = {}
class JobInfo(object):
def __init__(self, jobId):
self.jobId = jobId
self.data = None
self.handlers = []
| jobs = {}
class Jobinfo(object):
def __init__(self, jobId):
self.jobId = jobId
self.data = None
self.handlers = [] |
#!/usr/bin/env python3
# ----------------------------------------------------------------------------
# Copyright (c) 2020--, Qiyun Zhu.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
"""Functions for parsing alignment / mapping files.
Notes
-----
A parser function operates on a single line in the input file.
A parser function returns a tuple of:
str : query Id
str : subject Id
float : alignment score, optional
int : alignment length, optional
start : alignment start (5') coordinate, optional
start : alignment end (3') coordinate, optional
"""
def parse_align_file(fh, mapper, fmt=None, n=None):
"""Read an alignment file in chunks and yield query-to-subject(s) maps.
Parameters
----------
fh : file handle
Alignment file to parse.
mapper : object
Module for handling alignments.
fmt : str, optional
Format of mapping file.
n : int, optional
Number of lines per chunck.
Yields
------
dict of set
Query-to-subject(s) map.
Notes
-----
The design of this function aims to couple with the extremely large size of
typical alignment files. It reads the entire file sequentially, pauses and
processes current cache for every _n_ lines, yields and clears cache, then
proceeds.
"""
n = n or 1000000 # default chunk size: 1 million
i = 0 # current line number
j = n # target line number at end of current chunk
last = None # last query Id
# determine file format based on first line
if fmt is None:
try:
line = next(fh)
except StopIteration:
return
fmt = infer_align_format(line)
parser = assign_parser(fmt)
try:
last = mapper.parse(line, parser)
i = 1
except TypeError:
pass
mapper.append()
else:
parser = assign_parser(fmt)
# parse remaining content
for line in fh:
try:
query = mapper.parse(line, parser)
i += 1
except TypeError:
continue
# flush when query Id changes and chunk size was already reached
if query != last:
if i >= j:
yield mapper.flush()
j = i + n
last = query
mapper.append()
# finish last chunk
yield mapper.flush()
class Plain(object):
"""Mapping module for plain sequence alignments.
Attributes
----------
map : dict
Cache of query-to-subject(s) map.
buf : (str, str)
Buffer of last (query, subject) pair.
See Also
--------
parse_align_file
ordinal.Ordinal
Notes
-----
The design of this class provides an interface for parsing extremely large
alignment files. The other class of the same type is `Ordinal`. They both
provide three instance methods: `parse`, `append` and `flush` which can be
called from parent process.
"""
def __init__(self):
"""Initiate mapper.
"""
self.map = {}
def parse(self, line, parser):
"""Parse one line in alignment file.
Parameters
----------
line : str
Line in alignment file.
parser : callable
Function to parse the line.
Returns
-------
str
Query identifier.
Raises
------
TypeError
Query and subject cannot be extracted from line.
"""
self.buf = parser(line)[:2]
return self.buf[0]
def append(self):
"""Append buffered last line to cached read map.
"""
try:
query, subject = self.buf
except (AttributeError, TypeError):
return
self.map.setdefault(query, []).append(subject)
def flush(self):
"""Process, return and clear read map.
Returns
-------
dict of set
Processed read map.
"""
for query, subjects in self.map.items():
self.map[query] = set(subjects)
res, self.map = self.map, {}
return res
def infer_align_format(line):
"""Guess the format of an alignment file based on first line.
Parameters
----------
line : str
First line of alignment file.
Returns
-------
str or None
Alignment file format (map, b6o or sam).
Raises
------
ValueError
Alignment file format cannot be determined.
See Also
--------
parse_b6o_line
parse_sam_line
"""
if line.split()[0] == '@HD':
return 'sam'
row = line.rstrip().split('\t')
if len(row) == 2:
return 'map'
if len(row) >= 12:
if all(row[i].isdigit() for i in range(3, 10)):
return 'b6o'
if len(row) >= 11:
if all(row[i].isdigit() for i in (1, 3, 4)):
return 'sam'
raise ValueError('Cannot determine alignment file format.')
def assign_parser(fmt):
"""Assign parser function based on format code.
Parameters
----------
fmt : str
Alignment file format code.
Returns
-------
function
Alignment parser function.
"""
if fmt == 'map': # simple map of query <tab> subject
return parse_map_line
if fmt == 'b6o': # BLAST format
return parse_b6o_line
elif fmt == 'sam': # SAM format
return parse_sam_line
else:
raise ValueError(f'Invalid format code: "{fmt}".')
def parse_map_line(line, *args):
"""Parse a line in a simple mapping file.
Parameters
----------
line : str
Line to parse.
args : list, optional
Placeholder for caller compatibility.
Returns
-------
tuple of (str, str)
Query and subject.
Notes
-----
Only first two columns are considered.
"""
try:
query, subject = line.rstrip().split('\t', 2)[:2]
return query, subject
except ValueError:
pass
def parse_b6o_line(line):
"""Parse a line in a BLAST tabular file (b6o).
Parameters
----------
line : str
Line to parse.
Returns
-------
tuple of (str, str, float, int, int, int)
Query, subject, score, length, start, end.
Notes
-----
BLAST tabular format:
qseqid sseqid pident length mismatch gapopen qstart qend sstart send
evalue bitscore
.. _BLAST manual:
https://www.ncbi.nlm.nih.gov/books/NBK279684/
"""
x = line.rstrip().split('\t')
qseqid, sseqid, length, score = x[0], x[1], int(x[3]), float(x[11])
sstart, send = sorted([int(x[8]), int(x[9])])
return qseqid, sseqid, score, length, sstart, send
def parse_sam_line(line):
"""Parse a line in a SAM format (sam).
Parameters
----------
line : str
Line to parse.
Returns
-------
tuple of (str, str, None, int, int, int)
Query, subject, None, length, start, end.
Notes
-----
SAM format:
QNAME, FLAG, RNAME, POS, MAPQ, CIGAR, RNEXT, PNEXT, TLEN, SEQ, QUAL,
TAGS
.. _Wikipedia:
https://en.wikipedia.org/wiki/SAM_(file_format)
.. _SAM format specification:
https://samtools.github.io/hts-specs/SAMv1.pdf
.. _Bowtie2 manual:
http://bowtie-bio.sourceforge.net/bowtie2/manual.shtml#sam-output
"""
# skip header
if line.startswith('@'):
return
x = line.rstrip().split('\t')
qname, rname = x[0], x[2] # query and subject identifiers
# skip unmapped
if rname == '*':
return
pos = int(x[3]) # leftmost mapping position
# parse CIGAR string
length, offset = cigar_to_lens(x[5])
# append strand to read Id if not already
if not qname.endswith(('/1', '/2')):
flag = int(x[1])
if flag & (1 << 6): # forward strand: bit 64
qname += '/1'
elif flag & (1 << 7): # reverse strand: bit 128
qname += '/2'
return qname, rname, None, length, pos, pos + offset - 1
def cigar_to_lens(cigar):
"""Extract lengths from a CIGAR string.
Parameters
----------
cigar : str
CIGAR string.
Returns
-------
int
Alignment length.
int
Offset in subject sequence.
"""
align, offset = 0, 0
n = '' # current step size
for c in cigar:
if c in 'MDIHNPSX=':
if c in 'M=X':
align += int(n)
elif c in 'DN':
offset += int(n)
n = ''
else:
n += c
return align, align + offset
def parse_kraken(line):
"""Parse a line in a Kraken mapping file.
Parameters
----------
line : str
Line to parse.
Returns
-------
tuple of (str, str)
Query and subject.
Notes
-----
Kraken2 output format:
C/U, sequence ID, taxonomy ID, length, LCA mapping
.. _Kraken2 manual:
https://ccb.jhu.edu/software/kraken2/index.shtml?t=manual
"""
x = line.rstrip().split('\t')
return (x[1], x[2]) if x[0] == 'C' else (None, None)
def parse_centrifuge(line):
"""Parse a line in a Centrifuge mapping file.
Parameters
----------
line : str
Line to parse.
Returns
-------
tuple of (str, str, int, int)
Query, subject, score, length.
Notes
-----
Centrifuge output format:
readID, seqID, taxID, score, 2ndBestScore, hitLength, queryLength,
numMatches
.. _Centrifuge manual:
https://ccb.jhu.edu/software/centrifuge/manual.shtml
"""
if line.startswith('readID'):
return None
x = line.rstrip().split('\t')
return x[0], x[1], int(x[3]), int(x[5])
| """Functions for parsing alignment / mapping files.
Notes
-----
A parser function operates on a single line in the input file.
A parser function returns a tuple of:
str : query Id
str : subject Id
float : alignment score, optional
int : alignment length, optional
start : alignment start (5') coordinate, optional
start : alignment end (3') coordinate, optional
"""
def parse_align_file(fh, mapper, fmt=None, n=None):
"""Read an alignment file in chunks and yield query-to-subject(s) maps.
Parameters
----------
fh : file handle
Alignment file to parse.
mapper : object
Module for handling alignments.
fmt : str, optional
Format of mapping file.
n : int, optional
Number of lines per chunck.
Yields
------
dict of set
Query-to-subject(s) map.
Notes
-----
The design of this function aims to couple with the extremely large size of
typical alignment files. It reads the entire file sequentially, pauses and
processes current cache for every _n_ lines, yields and clears cache, then
proceeds.
"""
n = n or 1000000
i = 0
j = n
last = None
if fmt is None:
try:
line = next(fh)
except StopIteration:
return
fmt = infer_align_format(line)
parser = assign_parser(fmt)
try:
last = mapper.parse(line, parser)
i = 1
except TypeError:
pass
mapper.append()
else:
parser = assign_parser(fmt)
for line in fh:
try:
query = mapper.parse(line, parser)
i += 1
except TypeError:
continue
if query != last:
if i >= j:
yield mapper.flush()
j = i + n
last = query
mapper.append()
yield mapper.flush()
class Plain(object):
"""Mapping module for plain sequence alignments.
Attributes
----------
map : dict
Cache of query-to-subject(s) map.
buf : (str, str)
Buffer of last (query, subject) pair.
See Also
--------
parse_align_file
ordinal.Ordinal
Notes
-----
The design of this class provides an interface for parsing extremely large
alignment files. The other class of the same type is `Ordinal`. They both
provide three instance methods: `parse`, `append` and `flush` which can be
called from parent process.
"""
def __init__(self):
"""Initiate mapper.
"""
self.map = {}
def parse(self, line, parser):
"""Parse one line in alignment file.
Parameters
----------
line : str
Line in alignment file.
parser : callable
Function to parse the line.
Returns
-------
str
Query identifier.
Raises
------
TypeError
Query and subject cannot be extracted from line.
"""
self.buf = parser(line)[:2]
return self.buf[0]
def append(self):
"""Append buffered last line to cached read map.
"""
try:
(query, subject) = self.buf
except (AttributeError, TypeError):
return
self.map.setdefault(query, []).append(subject)
def flush(self):
"""Process, return and clear read map.
Returns
-------
dict of set
Processed read map.
"""
for (query, subjects) in self.map.items():
self.map[query] = set(subjects)
(res, self.map) = (self.map, {})
return res
def infer_align_format(line):
"""Guess the format of an alignment file based on first line.
Parameters
----------
line : str
First line of alignment file.
Returns
-------
str or None
Alignment file format (map, b6o or sam).
Raises
------
ValueError
Alignment file format cannot be determined.
See Also
--------
parse_b6o_line
parse_sam_line
"""
if line.split()[0] == '@HD':
return 'sam'
row = line.rstrip().split('\t')
if len(row) == 2:
return 'map'
if len(row) >= 12:
if all((row[i].isdigit() for i in range(3, 10))):
return 'b6o'
if len(row) >= 11:
if all((row[i].isdigit() for i in (1, 3, 4))):
return 'sam'
raise value_error('Cannot determine alignment file format.')
def assign_parser(fmt):
"""Assign parser function based on format code.
Parameters
----------
fmt : str
Alignment file format code.
Returns
-------
function
Alignment parser function.
"""
if fmt == 'map':
return parse_map_line
if fmt == 'b6o':
return parse_b6o_line
elif fmt == 'sam':
return parse_sam_line
else:
raise value_error(f'Invalid format code: "{fmt}".')
def parse_map_line(line, *args):
"""Parse a line in a simple mapping file.
Parameters
----------
line : str
Line to parse.
args : list, optional
Placeholder for caller compatibility.
Returns
-------
tuple of (str, str)
Query and subject.
Notes
-----
Only first two columns are considered.
"""
try:
(query, subject) = line.rstrip().split('\t', 2)[:2]
return (query, subject)
except ValueError:
pass
def parse_b6o_line(line):
"""Parse a line in a BLAST tabular file (b6o).
Parameters
----------
line : str
Line to parse.
Returns
-------
tuple of (str, str, float, int, int, int)
Query, subject, score, length, start, end.
Notes
-----
BLAST tabular format:
qseqid sseqid pident length mismatch gapopen qstart qend sstart send
evalue bitscore
.. _BLAST manual:
https://www.ncbi.nlm.nih.gov/books/NBK279684/
"""
x = line.rstrip().split('\t')
(qseqid, sseqid, length, score) = (x[0], x[1], int(x[3]), float(x[11]))
(sstart, send) = sorted([int(x[8]), int(x[9])])
return (qseqid, sseqid, score, length, sstart, send)
def parse_sam_line(line):
"""Parse a line in a SAM format (sam).
Parameters
----------
line : str
Line to parse.
Returns
-------
tuple of (str, str, None, int, int, int)
Query, subject, None, length, start, end.
Notes
-----
SAM format:
QNAME, FLAG, RNAME, POS, MAPQ, CIGAR, RNEXT, PNEXT, TLEN, SEQ, QUAL,
TAGS
.. _Wikipedia:
https://en.wikipedia.org/wiki/SAM_(file_format)
.. _SAM format specification:
https://samtools.github.io/hts-specs/SAMv1.pdf
.. _Bowtie2 manual:
http://bowtie-bio.sourceforge.net/bowtie2/manual.shtml#sam-output
"""
if line.startswith('@'):
return
x = line.rstrip().split('\t')
(qname, rname) = (x[0], x[2])
if rname == '*':
return
pos = int(x[3])
(length, offset) = cigar_to_lens(x[5])
if not qname.endswith(('/1', '/2')):
flag = int(x[1])
if flag & 1 << 6:
qname += '/1'
elif flag & 1 << 7:
qname += '/2'
return (qname, rname, None, length, pos, pos + offset - 1)
def cigar_to_lens(cigar):
"""Extract lengths from a CIGAR string.
Parameters
----------
cigar : str
CIGAR string.
Returns
-------
int
Alignment length.
int
Offset in subject sequence.
"""
(align, offset) = (0, 0)
n = ''
for c in cigar:
if c in 'MDIHNPSX=':
if c in 'M=X':
align += int(n)
elif c in 'DN':
offset += int(n)
n = ''
else:
n += c
return (align, align + offset)
def parse_kraken(line):
"""Parse a line in a Kraken mapping file.
Parameters
----------
line : str
Line to parse.
Returns
-------
tuple of (str, str)
Query and subject.
Notes
-----
Kraken2 output format:
C/U, sequence ID, taxonomy ID, length, LCA mapping
.. _Kraken2 manual:
https://ccb.jhu.edu/software/kraken2/index.shtml?t=manual
"""
x = line.rstrip().split('\t')
return (x[1], x[2]) if x[0] == 'C' else (None, None)
def parse_centrifuge(line):
"""Parse a line in a Centrifuge mapping file.
Parameters
----------
line : str
Line to parse.
Returns
-------
tuple of (str, str, int, int)
Query, subject, score, length.
Notes
-----
Centrifuge output format:
readID, seqID, taxID, score, 2ndBestScore, hitLength, queryLength,
numMatches
.. _Centrifuge manual:
https://ccb.jhu.edu/software/centrifuge/manual.shtml
"""
if line.startswith('readID'):
return None
x = line.rstrip().split('\t')
return (x[0], x[1], int(x[3]), int(x[5])) |
print(list(enumerate([])))
print(list(enumerate([1, 2, 3])))
print(list(enumerate([1, 2, 3], 5)))
print(list(enumerate([1, 2, 3], -5)))
print(list(enumerate(range(10000))))
| print(list(enumerate([])))
print(list(enumerate([1, 2, 3])))
print(list(enumerate([1, 2, 3], 5)))
print(list(enumerate([1, 2, 3], -5)))
print(list(enumerate(range(10000)))) |
class Latex:
def __init__(self, ssh):
"""
latex
"""
self.bash = ssh.bash
# https://github.com/James-Yu/LaTeX-Workshop/wiki/Compile#latex-recipe
#pdflatex -> bibtex -> pdflatex X 2
# Recipe step 1
#
def compile(self, path, src, params):
try:
#compiler = "-pdf" # -pdflatex
#compiler = "-xelatex" # -xelatex
#compiler = "-lualatex" # -lualatex
#compiler = "" # enable automatic
r = self.bash.run('cd {} && latexmk {} -synctex=1 -interaction=nonstopmode -file-line-error {}.tex -outdir="."'.format(path, params, src))
#r = self.bash.run('cd {} && latexmk {} -synctex=1 -interaction=nonstopmode -file-line-error {}.tex -outdir="."'.format(path, compiler, src))
#r = self.bash.run('cd {} && latexmk -synctex=1 -interaction=nonstopmode -file-line-error -pdf -g {}.tex -outdir="."'.format(path, src)) #force 'g' parma
#r = self.bash.run('cd {} && bibtex {}'.format(path, src))
#r = self.bash.run('cd {} && pdflatex -synctex=1 -interaction=nonstopmode -file-line-error {}'.format(path, src))
#r = self.bash.run('cd {} && pdflatex -synctex=1 -interaction=nonstopmode -file-line-error {}'.format(path, src))
return r
except:
print("PDFLATEX ERROR")
raise
return True
| class Latex:
def __init__(self, ssh):
"""
latex
"""
self.bash = ssh.bash
def compile(self, path, src, params):
try:
r = self.bash.run('cd {} && latexmk {} -synctex=1 -interaction=nonstopmode -file-line-error {}.tex -outdir="."'.format(path, params, src))
return r
except:
print('PDFLATEX ERROR')
raise
return True |
programming_dictonary = {
"Bug":"An error in program that prevents the program running as expected",
"Function":"A piece of code that you can call over and over again",
}
# Retreive
print(programming_dictonary["Bug"])
# Adding items
programming_dictonary["Loop"] = "The action of doing something again and again"
empty_list = []
empty_dictonary = {}
# wipe an existing dictonary - helpful in wiping out data of game
# programming_dictonary = {}
print(programming_dictonary.keys())
print(programming_dictonary.values())
# editing dictonary values
programming_dictonary["Bug"] = "An moth in your computer"
print(programming_dictonary)
# Loop through dictonary
for key in programming_dictonary:
print(key,end=" : ")
print(programming_dictonary[key]) | programming_dictonary = {'Bug': 'An error in program that prevents the program running as expected', 'Function': 'A piece of code that you can call over and over again'}
print(programming_dictonary['Bug'])
programming_dictonary['Loop'] = 'The action of doing something again and again'
empty_list = []
empty_dictonary = {}
print(programming_dictonary.keys())
print(programming_dictonary.values())
programming_dictonary['Bug'] = 'An moth in your computer'
print(programming_dictonary)
for key in programming_dictonary:
print(key, end=' : ')
print(programming_dictonary[key]) |
# ========== base graph node class / generic knowledge representation (AST/ASG)
class Object():
# node constructor with scalar initializer
def __init__(self, V):
# node class/type tag
self.type = self.__class__.__name__.lower()
# node name / scalar value
self.value = V
# slot{}s / attributes / associative array
self.slot = {}
# nest[]ed elements / ordered container
self.nest = []
## ============================================================== text dump
# `print` callback method
def __repr__(self): return self.dump()
# pytest: dump without id,hashes,..
def test(self): return self.dump(test=True)
# full text tree dump
def dump(self, cycle=[], depth=0, prefix='', test=False):
# head
ret = self.pad(depth) + self.head(prefix, test)
# cycle
if self in cycle:
return ret + ' _/'
# slot{}s
for i in self.keys():
ret += self[i].dump(cycle + [self], depth + 1, f'{i} = ', test)
# nest[]ed
for j, k in enumerate(self.nest):
ret += k.dump(cycle + [self], depth + 1, f'{j}: ', test)
# subtree
return ret
# tree padding
def pad(self, depth): return '\n' + '\t' * depth
# short `<T:V>`` header-only dump
def head(self, prefix='', test=False):
ret = f'{prefix}<{self.type}:{self.value}>'
if not test:
ret += f' @{id(self):x}'
return ret
## ============================================================== operators
# ` S.keys() ` sorted slot keys
def keys(self):
return sorted(self.slot.keys())
def __getitem__(self, key):
assert isinstance(key, str)
return self.slot[key]
def __setitem__(self, key, that):
assert isinstance(key, str)
assert isinstance(that, Object)
self.slot[key] = that
return self
def __lshift__(self, that):
return self.__setitem__(that.type, that)
def __rshift__(self, that):
return self.__setitem__(that.value, that)
# ` A // B ` push subgraph
def __floordiv__(self, that):
assert isinstance(that, Object)
self.nest += [that]
return self
## ========================================================== serialization
def json(self, cycle=[], depth=0): raise NotImplementedError(self.json)
## ======================================================= graph evaluation
def eval(self, env): raise NotImplementedError(self.eval)
| class Object:
def __init__(self, V):
self.type = self.__class__.__name__.lower()
self.value = V
self.slot = {}
self.nest = []
def __repr__(self):
return self.dump()
def test(self):
return self.dump(test=True)
def dump(self, cycle=[], depth=0, prefix='', test=False):
ret = self.pad(depth) + self.head(prefix, test)
if self in cycle:
return ret + ' _/'
for i in self.keys():
ret += self[i].dump(cycle + [self], depth + 1, f'{i} = ', test)
for (j, k) in enumerate(self.nest):
ret += k.dump(cycle + [self], depth + 1, f'{j}: ', test)
return ret
def pad(self, depth):
return '\n' + '\t' * depth
def head(self, prefix='', test=False):
ret = f'{prefix}<{self.type}:{self.value}>'
if not test:
ret += f' @{id(self):x}'
return ret
def keys(self):
return sorted(self.slot.keys())
def __getitem__(self, key):
assert isinstance(key, str)
return self.slot[key]
def __setitem__(self, key, that):
assert isinstance(key, str)
assert isinstance(that, Object)
self.slot[key] = that
return self
def __lshift__(self, that):
return self.__setitem__(that.type, that)
def __rshift__(self, that):
return self.__setitem__(that.value, that)
def __floordiv__(self, that):
assert isinstance(that, Object)
self.nest += [that]
return self
def json(self, cycle=[], depth=0):
raise not_implemented_error(self.json)
def eval(self, env):
raise not_implemented_error(self.eval) |
"""
Samples below are intended to get us started with Python.
Did you notice this is a multi-line comment :-) and yes being the first
one and before code, it qualifies to be documentation as well. How Cool!!!
In order to be a docstring it had to be multi-line
"""
# print hello world :-), Hey this is a single line comment
print("Hello, World")
'''
We can define strings using ' (single quotes) or using " (double quotes)
Same goes for comments, did you notice this one.
Assignment
----------
print messages below:
1. You won't be disappointed with Python
2. One day you will say "I love Python"
3. You won't be disappointed with Python. One day you will say "I love Python"
'''
# Getting rid of new line
print("Hello", end='')
print(", World!!!")
# Working with variables is damn easy
an_int = 1
a_string = "We won't work with other types today. Yes, there are many more."
'''
There is no verbosity like - int anInt = 1; or String aString = "Something";
'''
# Programming is all about decision making, is not it?
if an_int == 1:
print(a_string)
# A decision without a negative case is not so useful
if an_int == 2:
print(a_string)
else:
print("Damn it was not true!!!")
# Ah! that was nice but how can I take more than one decisions
if an_int == 2:
print("It is 2 indeed")
elif an_int == 1:
print("It is 1 indeed")
else:
print("I seriously have not idea, what it is")
'''
Do we just keep scripting in Python or can we package snippets and reuse
Did not you realize, what print is? Yes, it is a function.
A callable, reusable and self contained unit of code. Provides a logical grouping and
helps in organizing snippets to perform unit of work.
Disclaimer: I am NOT good at definitions and this one is purely self cooked :-)
'''
def greet_awesome_people():
print("Hello Awesome People. I am a dumb function but teaches a very powerful thing. \nGuess what?")
# Guess what?
greet_awesome_people()
# Same goes for me, guess guess :-)
def i_am_bit_smarter(message):
print(message)
# And same goes for me
def i_am_bit_more_smarter(a, b):
return a + b
i_am_bit_smarter("Custom Message>> Sum of 10 and 2 is : " + str(i_am_bit_more_smarter(10, 2)))
'''
Assignment
----------
Write the smartest calculator which:
- Works only with integer
- Handles add, subtract, mul and divide
client should be able to use your calculator like:
add(10,2), subtract(11, 3) etc.
'''
# Time to evaluate our guess and together try to get a bit of programming Moksha :-). | """
Samples below are intended to get us started with Python.
Did you notice this is a multi-line comment :-) and yes being the first
one and before code, it qualifies to be documentation as well. How Cool!!!
In order to be a docstring it had to be multi-line
"""
print('Hello, World')
'\nWe can define strings using \' (single quotes) or using " (double quotes)\nSame goes for comments, did you notice this one.\n\nAssignment\n----------\n\nprint messages below:\n\n1. You won\'t be disappointed with Python\n2. One day you will say "I love Python"\n3. You won\'t be disappointed with Python. One day you will say "I love Python"\n'
print('Hello', end='')
print(', World!!!')
an_int = 1
a_string = "We won't work with other types today. Yes, there are many more."
'\nThere is no verbosity like - int anInt = 1; or String aString = "Something";\n'
if an_int == 1:
print(a_string)
if an_int == 2:
print(a_string)
else:
print('Damn it was not true!!!')
if an_int == 2:
print('It is 2 indeed')
elif an_int == 1:
print('It is 1 indeed')
else:
print('I seriously have not idea, what it is')
'\nDo we just keep scripting in Python or can we package snippets and reuse\nDid not you realize, what print is? Yes, it is a function.\n\nA callable, reusable and self contained unit of code. Provides a logical grouping and\nhelps in organizing snippets to perform unit of work.\n\nDisclaimer: I am NOT good at definitions and this one is purely self cooked :-)\n'
def greet_awesome_people():
print('Hello Awesome People. I am a dumb function but teaches a very powerful thing. \nGuess what?')
greet_awesome_people()
def i_am_bit_smarter(message):
print(message)
def i_am_bit_more_smarter(a, b):
return a + b
i_am_bit_smarter('Custom Message>> Sum of 10 and 2 is : ' + str(i_am_bit_more_smarter(10, 2)))
'\nAssignment\n----------\n\nWrite the smartest calculator which:\n- Works only with integer\n- Handles add, subtract, mul and divide\n\nclient should be able to use your calculator like:\nadd(10,2), subtract(11, 3) etc.\n' |
class PiecewiseError(Exception):
"""Base error class for framework."""
def __init__(self, message=""):
self._message = message
def __str__(self):
return self._message
class InternalError(PiecewiseError):
"""Error indicating internal bug or logical flaw, not meant to be raised
under normal operation."""
pass
| class Piecewiseerror(Exception):
"""Base error class for framework."""
def __init__(self, message=''):
self._message = message
def __str__(self):
return self._message
class Internalerror(PiecewiseError):
"""Error indicating internal bug or logical flaw, not meant to be raised
under normal operation."""
pass |
# Python - 3.6.0
def tickets(people):
moneys = {k: 0 for k in [25, 50, 100]}
for cost in people:
if cost == 50:
if moneys[25] < 1:
return 'NO'
moneys[25] -= 1
elif cost == 100:
if moneys[50] >= 1:
if moneys[25] < 1:
return 'NO'
moneys[50] -= 1
moneys[25] -= 1
else:
if moneys[25] < 3:
return 'NO'
moneys[25] -= 3
moneys[cost] += 1
return 'YES'
| def tickets(people):
moneys = {k: 0 for k in [25, 50, 100]}
for cost in people:
if cost == 50:
if moneys[25] < 1:
return 'NO'
moneys[25] -= 1
elif cost == 100:
if moneys[50] >= 1:
if moneys[25] < 1:
return 'NO'
moneys[50] -= 1
moneys[25] -= 1
else:
if moneys[25] < 3:
return 'NO'
moneys[25] -= 3
moneys[cost] += 1
return 'YES' |
class Error(Exception):
"""Generic error class for invalid wordnet operations."""
# reset the module so the user sees the public name
__module__ = 'wn'
class Warning(Warning):
"""Generic warning class for dubious worndet operations."""
# reset the module so the user sees the public name
__module__ = 'wn'
| class Error(Exception):
"""Generic error class for invalid wordnet operations."""
__module__ = 'wn'
class Warning(Warning):
"""Generic warning class for dubious worndet operations."""
__module__ = 'wn' |
def calculate_age(year_of_birth: int, current_year: int) -> str:
"""Year report depending on year of birth and current year."""
def year_str(y:int) -> str:
return "1 year" if y == 1 else str(y)+" years"
delta = current_year - year_of_birth
if delta == 0:
return "You were born this very year!"
elif delta > 0:
return "You are " + year_str(delta) + " old."
else:
return "You will be born in " + year_str(-delta) + "." | def calculate_age(year_of_birth: int, current_year: int) -> str:
"""Year report depending on year of birth and current year."""
def year_str(y: int) -> str:
return '1 year' if y == 1 else str(y) + ' years'
delta = current_year - year_of_birth
if delta == 0:
return 'You were born this very year!'
elif delta > 0:
return 'You are ' + year_str(delta) + ' old.'
else:
return 'You will be born in ' + year_str(-delta) + '.' |
LANGS = ["hi,my,fa,tk,vi,zh,ka,zh_yue,hy,ru", "hi,my,fa,vi,zh,tk,zh_yue,ru,bxr,cdo", "hi,my,fa,vi,tk,jv,zh_yue,id,zh,am", "is,en,se,et,fi,fr,kv,cs,de,eu", "et,lv,fi,en,se,cs,de,fr,is,hu", "cs,en,de,fr,lv,et,hu,fi,la,eu", "lv,et,cs,hu,de,el,fi,myv,mhr,tr", "hu,el,cs,de,lv,tr,la,et,xmf,myv", "el,hu,tr,la,cs,de,xmf,lv,hy,ka"]
my_str = ''
counter = 0
for langs in LANGS:
print(counter)
counter += 1
assert len(langs.split(',')) == 10
adapter_names = [f'{lang}/wiki@ukp' for lang in langs.split(',')]
adapter_names = ','.join(adapter_names)
if len(my_str) == 0:
my_str = f'"{adapter_names}"'
else:
my_str = my_str + f' "{adapter_names}"'
print(my_str) | langs = ['hi,my,fa,tk,vi,zh,ka,zh_yue,hy,ru', 'hi,my,fa,vi,zh,tk,zh_yue,ru,bxr,cdo', 'hi,my,fa,vi,tk,jv,zh_yue,id,zh,am', 'is,en,se,et,fi,fr,kv,cs,de,eu', 'et,lv,fi,en,se,cs,de,fr,is,hu', 'cs,en,de,fr,lv,et,hu,fi,la,eu', 'lv,et,cs,hu,de,el,fi,myv,mhr,tr', 'hu,el,cs,de,lv,tr,la,et,xmf,myv', 'el,hu,tr,la,cs,de,xmf,lv,hy,ka']
my_str = ''
counter = 0
for langs in LANGS:
print(counter)
counter += 1
assert len(langs.split(',')) == 10
adapter_names = [f'{lang}/wiki@ukp' for lang in langs.split(',')]
adapter_names = ','.join(adapter_names)
if len(my_str) == 0:
my_str = f'"{adapter_names}"'
else:
my_str = my_str + f' "{adapter_names}"'
print(my_str) |
class Solution(object):
def mySqrt(self, x):
"""
:type x: int
:rtype: int
"""
left = 0
right = x
while left <= right:
middle = (left + right) / 2
sqrt = middle * middle
if x == sqrt:
return middle
elif x < sqrt:
right = middle - 1
else:
left = middle + 1
return (left + right) / 2
| class Solution(object):
def my_sqrt(self, x):
"""
:type x: int
:rtype: int
"""
left = 0
right = x
while left <= right:
middle = (left + right) / 2
sqrt = middle * middle
if x == sqrt:
return middle
elif x < sqrt:
right = middle - 1
else:
left = middle + 1
return (left + right) / 2 |
a = [-11, -2, -4, -10, -9, 4, -5, -18, -16, 5, -6, -19, 0, -7, 11, -17, 11, 9, -7, -11, 19, -16, -11, -4, 19,
-6, -6, -15, 1, 11, -1, -16, 12, 0, -15, -13, 11, -19, -15, -18, -20, 7, -8, 17, -4, 16, 6, -5, -3, -8, 17, -2, 2, 1, -2, 19, -6, 13, -7, -8, 9, -13, 4, -19, -4, 20, 20, -10, 9, 13, 18, 15, -15, -16, 20, 15, 19,
-13, 6, 17, -9, -6, -3, -1, 19, 5, 2, -14, -20, 15, 20, -7, -13, 16, -16, 16, -4, 1, -20, -11]
k = []
for i in range(len(a) - 1):
for j in range(i + 1, len(a)):
s = a[i] + a[j]
if s == 7:
k.append((a[i], a[j]))
k1 = list(set(k))
print(len(k1))
print(k1)
q = 0
for i in range(len(k1) - 1):
for j in range(i + 1, len(k1)):
if k1[i][0] == k1[j][1]:
q += 1
print(k1[i], k1[j])
print(q)
| a = [-11, -2, -4, -10, -9, 4, -5, -18, -16, 5, -6, -19, 0, -7, 11, -17, 11, 9, -7, -11, 19, -16, -11, -4, 19, -6, -6, -15, 1, 11, -1, -16, 12, 0, -15, -13, 11, -19, -15, -18, -20, 7, -8, 17, -4, 16, 6, -5, -3, -8, 17, -2, 2, 1, -2, 19, -6, 13, -7, -8, 9, -13, 4, -19, -4, 20, 20, -10, 9, 13, 18, 15, -15, -16, 20, 15, 19, -13, 6, 17, -9, -6, -3, -1, 19, 5, 2, -14, -20, 15, 20, -7, -13, 16, -16, 16, -4, 1, -20, -11]
k = []
for i in range(len(a) - 1):
for j in range(i + 1, len(a)):
s = a[i] + a[j]
if s == 7:
k.append((a[i], a[j]))
k1 = list(set(k))
print(len(k1))
print(k1)
q = 0
for i in range(len(k1) - 1):
for j in range(i + 1, len(k1)):
if k1[i][0] == k1[j][1]:
q += 1
print(k1[i], k1[j])
print(q) |
BASE_STATS = {
1: {
"Monster": {"HP": 7, "Attack": 5, "Defence": 5, "Scaling": 0.05},
"Player": {"HP": 5, "Attack": 2, "Defence": 2},
},
30: {
"Monster": {"HP": 15, "Attack": 10, "Defence": 10, "Scaling": 0.1},
"Player": {"HP": 9, "Attack": 6, "Defence": 5},
},
60: {
"Monster": {"HP": 37, "Attack": 20, "Defence": 20, "Scaling": 0.2},
"Player": {"HP": 13, "Attack": 10, "Defence": 8},
},
90: {
"Monster": {"HP": 103, "Attack": 40, "Defence": 40, "Scaling": 0.4},
"Player": {"HP": 17, "Attack": 14, "Defence": 11},
},
}
LEVELS = [1, 30, 60, 90]
| base_stats = {1: {'Monster': {'HP': 7, 'Attack': 5, 'Defence': 5, 'Scaling': 0.05}, 'Player': {'HP': 5, 'Attack': 2, 'Defence': 2}}, 30: {'Monster': {'HP': 15, 'Attack': 10, 'Defence': 10, 'Scaling': 0.1}, 'Player': {'HP': 9, 'Attack': 6, 'Defence': 5}}, 60: {'Monster': {'HP': 37, 'Attack': 20, 'Defence': 20, 'Scaling': 0.2}, 'Player': {'HP': 13, 'Attack': 10, 'Defence': 8}}, 90: {'Monster': {'HP': 103, 'Attack': 40, 'Defence': 40, 'Scaling': 0.4}, 'Player': {'HP': 17, 'Attack': 14, 'Defence': 11}}}
levels = [1, 30, 60, 90] |
class Solution:
def rotate(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
'''
T: O(m * n) and S: O(1)
'''
if not matrix: return
n = len(matrix)
i, j = 0, n - 1
while i < j:
matrix[i], matrix[j] = matrix[j], matrix[i]
i, j = i + 1, j - 1
for i in range(n):
for j in range(i+1, n):
matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]
return
| class Solution:
def rotate(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
'\n T: O(m * n) and S: O(1)\n '
if not matrix:
return
n = len(matrix)
(i, j) = (0, n - 1)
while i < j:
(matrix[i], matrix[j]) = (matrix[j], matrix[i])
(i, j) = (i + 1, j - 1)
for i in range(n):
for j in range(i + 1, n):
(matrix[i][j], matrix[j][i]) = (matrix[j][i], matrix[i][j])
return |
# Atharv Kolhar
# Python Bytes
"""
Questions:
1. Create a Set of even numbers up to 20.
2. Create another list of number multiple of 3 up to 30
3. Find Intersection, Union, and Difference between the above two sets
4. Declare an Empty set and add the first 4 prime numbers to it.
""" | """
Questions:
1. Create a Set of even numbers up to 20.
2. Create another list of number multiple of 3 up to 30
3. Find Intersection, Union, and Difference between the above two sets
4. Declare an Empty set and add the first 4 prime numbers to it.
""" |
# Sum vs XOR
# https://www.hackerrank.com/challenges/sum-vs-xor/problem
def sumXor(n):
if n == 0:
return 1
return 1 << f'{n:b}'.count('0')
n = int(input().strip())
result = sumXor(n)
print (result) | def sum_xor(n):
if n == 0:
return 1
return 1 << f'{n:b}'.count('0')
n = int(input().strip())
result = sum_xor(n)
print(result) |
##################################################################
# Mocked returns for patched functions that access web resources #
##################################################################
GET_POLICE_STATION_API_MOCK = {
"displayFieldName": "NAME",
"fieldAliases": {
"OBJECTID": "OBJECTID",
"FACILITYID": "Facility Identifier",
"NAME": "Name of Facility",
"OWNER": "Owner Name",
"OWNTYPE": "Owner Type",
"SUBTYPEFIELD": "Subtype Field",
"FEATURECODE": "Feature Code",
"FULLADDR": "Full Address",
"AGENCYURL": "Website",
"OPERDAYS": "Operational Days",
"OPERHOURS": "Operational Hours",
"CONTACT": "Contact Name",
"PHONE": "Phone",
"EMAIL": "Email"
},
"features": [
{
"attributes": {
"OBJECTID": 1,
"FACILITYID": None,
"NAME": "Brookline Public Safety HQ",
"OWNER": "Town of Brookline",
"OWNTYPE": None,
"SUBTYPEFIELD": None,
"FEATURECODE": None,
"FULLADDR": "350 Washington St, Brookline, MA 02445",
"AGENCYURL": "http://www.brooklinepolice.com/",
"OPERDAYS": "Other",
"OPERHOURS": "Other",
"CONTACT": None,
"PHONE": "617-730-2222",
"EMAIL": None
},
"geometry": {
"x": -71.121409303637222,
"y": 42.333789044263746
}
}
]
}
NO_RESULTS_GET_POLICE_STATION_API_MOCK = {
"displayFieldName": "NAME",
"fieldAliases": {
"OBJECTID": "OBJECTID",
"FACILITYID": "Facility Identifier",
"NAME": "Name of Facility",
"OWNER": "Owner Name",
"OWNTYPE": "Owner Type",
"SUBTYPEFIELD": "Subtype Field",
"FEATURECODE": "Feature Code",
"FULLADDR": "Full Address",
"AGENCYURL": "Website",
"OPERDAYS": "Operational Days",
"OPERHOURS": "Operational Hours",
"CONTACT": "Contact Name",
"PHONE": "Phone",
"EMAIL": "Email"
},
"features": []
}
GEOCODE_MOCK = {
"spatialReference": {
"wkid": 4326,
"latestWkid": 4326
},
"x": -71.120614336337198,
"y": 42.334020535512529,
"z": 0
}
LOCATION_MOCK = {
"x": -71.120614336337198,
"y": 42.334020535512529,
"z": 0
}
SPACIAL_REFERENCE_MOCK = {
"wkid": 4326,
"latestWkid": 4326
}
ADDRESS_CANDIDATE_MOCK = {
"address": "333 WASHINGTON ST",
"location": {
"x": -71.120614336337198,
"y": 42.334020535512529,
"z": 0
},
"score": 100,
"attributes": {
"Loc_name": "Building",
"Score": 100,
"Match_addr": "333 WASHINGTON ST"
}
}
GET_ADDRESS_CANDIDATES_API_MOCK = {
"spatialReference": {
"wkid": 4326,
"latestWkid": 4326
},
"candidates": [
{
"address": "333 WASHINGTON ST",
"location": {
"x": -71.120614336337198,
"y": 42.334020535512529,
"z": 0
},
"score": 100,
"attributes": {
"Loc_name": "Building",
"Score": 100,
"Match_addr": "333 WASHINGTON ST"
}
}
]
}
GEOCODE_ADDRESS_MOCK = {
'geometry':{'x':-7917110.640673582,'y':5211147.471469648}}
GET_TRASH_PICKUP_API_MOCK = {
"displayFieldName": "DISTRICTID",
"fieldAliases": {
"OBJECTID": "OBJECTID",
"DISTRICTID": "District ID",
"NAME": "District Name",
"AGENCY": "Agency",
"AGENCYURL": "Website",
"CONTACT": "Contact Name",
"PHONE": "Phone",
"EMAIL": "Email",
"SUBTYPEFIELD": "Subtype Field",
"SCHEDULE": "Schedule",
"DESCRIPT": "Additional Information",
"MONDAY": "Monday",
"TUESDAY": "Tuesday",
"WEDNESDAY": "Wednesday",
"THURSDAY": "Thursday",
"FRIDAY": "Friday",
"SATURDAY": "Saturday",
"SUNDAY": "Sunday",
"GlobalID": "GlobalID"
},
"features": [
{
"attributes": {
"OBJECTID": 2,
"DISTRICTID": "1",
"NAME": "Wednesday Trash",
"AGENCY": "Town of Brookline",
"AGENCYURL": "http://ma-brookline.civicplus.com/820/Trash-Recycling-Information",
"CONTACT": "Brookline Public Works",
"PHONE": "null",
"EMAIL": "null",
"SUBTYPEFIELD": 0,
"SCHEDULE": "Weekly",
"DESCRIPT": "http://ma-brookline.civicplus.com/834/Holiday-Schedule-Christmas-Tree-Pick-Up",
"MONDAY": "No",
"TUESDAY": "No",
"WEDNESDAY": "Yes",
"THURSDAY": "No",
"FRIDAY": "No",
"SATURDAY": "No",
"SUNDAY": "No",
"GlobalID": "{20538BBF-EB83-4CCD-B1E8-93EF36554413}"
},
"geometry": {
"x": -71.121409303637222,
"y": 42.333789044263746
}
}
]
}
NO_RESPONSE_TRASH_PICKUP_API_MOCK = {
"displayFieldName": "DISTRICTID",
"fieldAliases": {
"OBJECTID": "OBJECTID",
"DISTRICTID": "District ID",
"NAME": "District Name",
"AGENCY": "Agency",
"AGENCYURL": "Website",
"CONTACT": "Contact Name",
"PHONE": "Phone",
"EMAIL": "Email",
"SUBTYPEFIELD": "Subtype Field",
"SCHEDULE": "Schedule",
"DESCRIPT": "Additional Information",
"MONDAY": "Monday",
"TUESDAY": "Tuesday",
"WEDNESDAY": "Wednesday",
"THURSDAY": "Thursday",
"FRIDAY": "Friday",
"SATURDAY": "Saturday",
"SUNDAY": "Sunday",
"GlobalID": "GlobalID"
},
"features": []
}
NO_RESPONSE_VOTING_PRECINCT_API_MOCK = {
"displayFieldName": "NAME",
"fieldAliases": {
"OBJECTID": "OBJECTID",
"PRECINCTID": "Precinct ID",
"NAME": "Precinct Name",
"COUNTY": "County",
"LASTUPDATE": "Last Update Date",
"LASTEDITOR": "Last Editor",
"SHAPE_Length": "SHAPE_Length",
"SHAPE_Area": "SHAPE_Area",
"STR_NUM": "STR_NUM",
"STREET": "STREET",
"ADDRESS": "ADDRESS",
"LOC_DESC": "LOC_DESC"
},
"geometryType": "esriGeometryPolygon",
"spatialReference": {
"wkid": 102100,
"latestWkid": 3857
},
"fields": [
{
"name": "OBJECTID",
"type": "esriFieldTypeOID",
"alias": "OBJECTID"
},
{
"name": "PRECINCTID",
"type": "esriFieldTypeString",
"alias": "Precinct ID",
"length": 10
},
{
"name": "NAME",
"type": "esriFieldTypeString",
"alias": "Precinct Name",
"length": 50
},
{
"name": "COUNTY",
"type": "esriFieldTypeString",
"alias": "County",
"length": 50
},
{
"name": "LASTUPDATE",
"type": "esriFieldTypeDate",
"alias": "Last Update Date",
"length": 8
},
{
"name": "LASTEDITOR",
"type": "esriFieldTypeString",
"alias": "Last Editor",
"length": 50
},
{
"name": "SHAPE_Length",
"type": "esriFieldTypeDouble",
"alias": "SHAPE_Length"
},
{
"name": "SHAPE_Area",
"type": "esriFieldTypeDouble",
"alias": "SHAPE_Area"
},
{
"name": "STR_NUM",
"type": "esriFieldTypeString",
"alias": "STR_NUM",
"length": 8
},
{
"name": "STREET",
"type": "esriFieldTypeString",
"alias": "STREET",
"length": 32
},
{
"name": "ADDRESS",
"type": "esriFieldTypeString",
"alias": "ADDRESS",
"length": 64
},
{
"name": "LOC_DESC",
"type": "esriFieldTypeString",
"alias": "LOC_DESC",
"length": 128
}
],
"features": [
]
}
GET_VOTING_PRECINCT_API_MOCK = {
"displayFieldName": "NAME",
"fieldAliases": {
"OBJECTID": "OBJECTID",
"PRECINCTID": "Precinct ID",
"NAME": "Precinct Name",
"COUNTY": "County",
"LASTUPDATE": "Last Update Date",
"LASTEDITOR": "Last Editor",
"SHAPE_Length": "SHAPE_Length",
"SHAPE_Area": "SHAPE_Area",
"STR_NUM": "STR_NUM",
"STREET": "STREET",
"ADDRESS": "ADDRESS",
"LOC_DESC": "LOC_DESC"
},
"geometryType": "esriGeometryPolygon",
"spatialReference": {
"wkid": 102100,
"latestWkid": 3857
},
"fields": [
{
"name": "OBJECTID",
"type": "esriFieldTypeOID",
"alias": "OBJECTID"
},
{
"name": "PRECINCTID",
"type": "esriFieldTypeString",
"alias": "Precinct ID",
"length": 10
},
{
"name": "NAME",
"type": "esriFieldTypeString",
"alias": "Precinct Name",
"length": 50
},
{
"name": "COUNTY",
"type": "esriFieldTypeString",
"alias": "County",
"length": 50
},
{
"name": "LASTUPDATE",
"type": "esriFieldTypeDate",
"alias": "Last Update Date",
"length": 8
},
{
"name": "LASTEDITOR",
"type": "esriFieldTypeString",
"alias": "Last Editor",
"length": 50
},
{
"name": "SHAPE_Length",
"type": "esriFieldTypeDouble",
"alias": "SHAPE_Length"
},
{
"name": "SHAPE_Area",
"type": "esriFieldTypeDouble",
"alias": "SHAPE_Area"
},
{
"name": "STR_NUM",
"type": "esriFieldTypeString",
"alias": "STR_NUM",
"length": 8
},
{
"name": "STREET",
"type": "esriFieldTypeString",
"alias": "STREET",
"length": 32
},
{
"name": "ADDRESS",
"type": "esriFieldTypeString",
"alias": "ADDRESS",
"length": 64
},
{
"name": "LOC_DESC",
"type": "esriFieldTypeString",
"alias": "LOC_DESC",
"length": 128
}
],
"features": [
{
"attributes": {
"OBJECTID": 19,
"PRECINCTID": "6",
"NAME": "Precinct 6 - Vote at 115 Greenough St",
"COUNTY": "Norfolk",
"LASTUPDATE": 'null',
"LASTEDITOR": 'null',
"SHAPE_Length": 15810.092868688193,
"SHAPE_Area": 7070142.9764727177,
"STR_NUM": "115",
"STREET": "GREENOUGH ST",
"ADDRESS": "115 GREENOUGH ST",
"LOC_DESC": "BHS Schluntz Gymnasium, 115 Greenough Street"
},
}
]
}
GET_LIBRARY_API_MOCK = {
"features": [
{
"attributes": {
"OBJECTID": 1,
"FACILITYID": "None",
"NAME": "Coolidge Corner Library",
"OWNER": "Town of Brookline",
"OWNTYPE": "None",
"SUBTYPEFIELD": "None",
"FEATURECODE": "None",
"FULLADDR": "31 Pleasant St, Brookline, MA 02446",
"AGENCYURL": "http://www.brooklinelibrary.org/",
"OPERDAYS": "Other",
"OPERHOURS": "Other",
"CONTACT": "None",
"PHONE": "617-730-2380",
"EMAIL": "http://www.brooklinelibrary.org/about/email"
},
"geometry": {
"x": -7916949.550832789,
"y": 5212579.537906414
}
},
{
"attributes": {
"OBJECTID": 2,
"FACILITYID": "None",
"NAME": "Main Library",
"OWNER": "Town of Brookline",
"OWNTYPE": "None",
"SUBTYPEFIELD": "None",
"FEATURECODE": "None",
"FULLADDR": "361 Washington St, Brookline, MA 02445",
"AGENCYURL": "http://www.brooklinelibrary.org/",
"OPERDAYS": "Other",
"OPERHOURS": "Other",
"CONTACT": "None",
"PHONE": "617-730-2370",
"EMAIL": "http://www.brooklinelibrary.org/about/email"
},
"geometry": {
"x": -7917194.260973867,
"y": 5211229.5272506215
}
},
{
"attributes": {
"OBJECTID": 3,
"FACILITYID": "None",
"NAME": "Putterham Branch Library",
"OWNER": "Town of Brookline",
"OWNTYPE": "None",
"SUBTYPEFIELD": "None",
"FEATURECODE": "None",
"FULLADDR": "959 W Roxbury Pky, Brookline, MA 02467",
"AGENCYURL": "http://www.brooklinelibrary.org/",
"OPERDAYS": "Other",
"OPERHOURS": "Other",
"CONTACT": "None",
"PHONE": "617-730-2385",
"EMAIL": "http://www.brooklinelibrary.org/about/email"
},
"geometry": {
"x": -7920391.679580264,
"y": 5206399.414108847
}
}
]
}
GET_SCHOOL_DISTRICT_API_MOCK = {
"displayFieldName": "NAME",
"fieldAliases": {
"OBJECTID": "OBJECTID",
"NAME": "School Name",
"DISTRCTNAME": "School District Name",
"SCHOOLAREA": "Area in Square Miles",
"LASTUPDATE": "Last Update Date",
"LASTEDITOR": "Last Editor"
},
"features": [
{
"attributes": {
"OBJECTID": 1,
"NAME": "Brookline School",
"DISTRCTNAME": "Brookline",
"SCHOOLAREA": "1",
"LASTUPDATE": None,
"LASTEDITOR": None
},
"geometry": {
"rings": [[[-7920615.96685251,5205180.75934551]]]
}
}
]
}
NO_RESULTS_GET_SCHOOL_DISTRICT_API_MOCK = {
"displayFieldName": "NAME",
"fieldAliases": {
"OBJECTID": "OBJECTID",
"NAME": "School Name",
"DISTRCTNAME": "School District Name",
"SCHOOLAREA": "Area in Square Miles",
"LASTUPDATE": "Last Update Date",
"LASTEDITOR": "Last Editor"
},
"features": []
}
| get_police_station_api_mock = {'displayFieldName': 'NAME', 'fieldAliases': {'OBJECTID': 'OBJECTID', 'FACILITYID': 'Facility Identifier', 'NAME': 'Name of Facility', 'OWNER': 'Owner Name', 'OWNTYPE': 'Owner Type', 'SUBTYPEFIELD': 'Subtype Field', 'FEATURECODE': 'Feature Code', 'FULLADDR': 'Full Address', 'AGENCYURL': 'Website', 'OPERDAYS': 'Operational Days', 'OPERHOURS': 'Operational Hours', 'CONTACT': 'Contact Name', 'PHONE': 'Phone', 'EMAIL': 'Email'}, 'features': [{'attributes': {'OBJECTID': 1, 'FACILITYID': None, 'NAME': 'Brookline Public Safety HQ', 'OWNER': 'Town of Brookline', 'OWNTYPE': None, 'SUBTYPEFIELD': None, 'FEATURECODE': None, 'FULLADDR': '350 Washington St, Brookline, MA 02445', 'AGENCYURL': 'http://www.brooklinepolice.com/', 'OPERDAYS': 'Other', 'OPERHOURS': 'Other', 'CONTACT': None, 'PHONE': '617-730-2222', 'EMAIL': None}, 'geometry': {'x': -71.12140930363722, 'y': 42.333789044263746}}]}
no_results_get_police_station_api_mock = {'displayFieldName': 'NAME', 'fieldAliases': {'OBJECTID': 'OBJECTID', 'FACILITYID': 'Facility Identifier', 'NAME': 'Name of Facility', 'OWNER': 'Owner Name', 'OWNTYPE': 'Owner Type', 'SUBTYPEFIELD': 'Subtype Field', 'FEATURECODE': 'Feature Code', 'FULLADDR': 'Full Address', 'AGENCYURL': 'Website', 'OPERDAYS': 'Operational Days', 'OPERHOURS': 'Operational Hours', 'CONTACT': 'Contact Name', 'PHONE': 'Phone', 'EMAIL': 'Email'}, 'features': []}
geocode_mock = {'spatialReference': {'wkid': 4326, 'latestWkid': 4326}, 'x': -71.1206143363372, 'y': 42.33402053551253, 'z': 0}
location_mock = {'x': -71.1206143363372, 'y': 42.33402053551253, 'z': 0}
spacial_reference_mock = {'wkid': 4326, 'latestWkid': 4326}
address_candidate_mock = {'address': '333 WASHINGTON ST', 'location': {'x': -71.1206143363372, 'y': 42.33402053551253, 'z': 0}, 'score': 100, 'attributes': {'Loc_name': 'Building', 'Score': 100, 'Match_addr': '333 WASHINGTON ST'}}
get_address_candidates_api_mock = {'spatialReference': {'wkid': 4326, 'latestWkid': 4326}, 'candidates': [{'address': '333 WASHINGTON ST', 'location': {'x': -71.1206143363372, 'y': 42.33402053551253, 'z': 0}, 'score': 100, 'attributes': {'Loc_name': 'Building', 'Score': 100, 'Match_addr': '333 WASHINGTON ST'}}]}
geocode_address_mock = {'geometry': {'x': -7917110.640673582, 'y': 5211147.471469648}}
get_trash_pickup_api_mock = {'displayFieldName': 'DISTRICTID', 'fieldAliases': {'OBJECTID': 'OBJECTID', 'DISTRICTID': 'District ID', 'NAME': 'District Name', 'AGENCY': 'Agency', 'AGENCYURL': 'Website', 'CONTACT': 'Contact Name', 'PHONE': 'Phone', 'EMAIL': 'Email', 'SUBTYPEFIELD': 'Subtype Field', 'SCHEDULE': 'Schedule', 'DESCRIPT': 'Additional Information', 'MONDAY': 'Monday', 'TUESDAY': 'Tuesday', 'WEDNESDAY': 'Wednesday', 'THURSDAY': 'Thursday', 'FRIDAY': 'Friday', 'SATURDAY': 'Saturday', 'SUNDAY': 'Sunday', 'GlobalID': 'GlobalID'}, 'features': [{'attributes': {'OBJECTID': 2, 'DISTRICTID': '1', 'NAME': 'Wednesday Trash', 'AGENCY': 'Town of Brookline', 'AGENCYURL': 'http://ma-brookline.civicplus.com/820/Trash-Recycling-Information', 'CONTACT': 'Brookline Public Works', 'PHONE': 'null', 'EMAIL': 'null', 'SUBTYPEFIELD': 0, 'SCHEDULE': 'Weekly', 'DESCRIPT': 'http://ma-brookline.civicplus.com/834/Holiday-Schedule-Christmas-Tree-Pick-Up', 'MONDAY': 'No', 'TUESDAY': 'No', 'WEDNESDAY': 'Yes', 'THURSDAY': 'No', 'FRIDAY': 'No', 'SATURDAY': 'No', 'SUNDAY': 'No', 'GlobalID': '{20538BBF-EB83-4CCD-B1E8-93EF36554413}'}, 'geometry': {'x': -71.12140930363722, 'y': 42.333789044263746}}]}
no_response_trash_pickup_api_mock = {'displayFieldName': 'DISTRICTID', 'fieldAliases': {'OBJECTID': 'OBJECTID', 'DISTRICTID': 'District ID', 'NAME': 'District Name', 'AGENCY': 'Agency', 'AGENCYURL': 'Website', 'CONTACT': 'Contact Name', 'PHONE': 'Phone', 'EMAIL': 'Email', 'SUBTYPEFIELD': 'Subtype Field', 'SCHEDULE': 'Schedule', 'DESCRIPT': 'Additional Information', 'MONDAY': 'Monday', 'TUESDAY': 'Tuesday', 'WEDNESDAY': 'Wednesday', 'THURSDAY': 'Thursday', 'FRIDAY': 'Friday', 'SATURDAY': 'Saturday', 'SUNDAY': 'Sunday', 'GlobalID': 'GlobalID'}, 'features': []}
no_response_voting_precinct_api_mock = {'displayFieldName': 'NAME', 'fieldAliases': {'OBJECTID': 'OBJECTID', 'PRECINCTID': 'Precinct ID', 'NAME': 'Precinct Name', 'COUNTY': 'County', 'LASTUPDATE': 'Last Update Date', 'LASTEDITOR': 'Last Editor', 'SHAPE_Length': 'SHAPE_Length', 'SHAPE_Area': 'SHAPE_Area', 'STR_NUM': 'STR_NUM', 'STREET': 'STREET', 'ADDRESS': 'ADDRESS', 'LOC_DESC': 'LOC_DESC'}, 'geometryType': 'esriGeometryPolygon', 'spatialReference': {'wkid': 102100, 'latestWkid': 3857}, 'fields': [{'name': 'OBJECTID', 'type': 'esriFieldTypeOID', 'alias': 'OBJECTID'}, {'name': 'PRECINCTID', 'type': 'esriFieldTypeString', 'alias': 'Precinct ID', 'length': 10}, {'name': 'NAME', 'type': 'esriFieldTypeString', 'alias': 'Precinct Name', 'length': 50}, {'name': 'COUNTY', 'type': 'esriFieldTypeString', 'alias': 'County', 'length': 50}, {'name': 'LASTUPDATE', 'type': 'esriFieldTypeDate', 'alias': 'Last Update Date', 'length': 8}, {'name': 'LASTEDITOR', 'type': 'esriFieldTypeString', 'alias': 'Last Editor', 'length': 50}, {'name': 'SHAPE_Length', 'type': 'esriFieldTypeDouble', 'alias': 'SHAPE_Length'}, {'name': 'SHAPE_Area', 'type': 'esriFieldTypeDouble', 'alias': 'SHAPE_Area'}, {'name': 'STR_NUM', 'type': 'esriFieldTypeString', 'alias': 'STR_NUM', 'length': 8}, {'name': 'STREET', 'type': 'esriFieldTypeString', 'alias': 'STREET', 'length': 32}, {'name': 'ADDRESS', 'type': 'esriFieldTypeString', 'alias': 'ADDRESS', 'length': 64}, {'name': 'LOC_DESC', 'type': 'esriFieldTypeString', 'alias': 'LOC_DESC', 'length': 128}], 'features': []}
get_voting_precinct_api_mock = {'displayFieldName': 'NAME', 'fieldAliases': {'OBJECTID': 'OBJECTID', 'PRECINCTID': 'Precinct ID', 'NAME': 'Precinct Name', 'COUNTY': 'County', 'LASTUPDATE': 'Last Update Date', 'LASTEDITOR': 'Last Editor', 'SHAPE_Length': 'SHAPE_Length', 'SHAPE_Area': 'SHAPE_Area', 'STR_NUM': 'STR_NUM', 'STREET': 'STREET', 'ADDRESS': 'ADDRESS', 'LOC_DESC': 'LOC_DESC'}, 'geometryType': 'esriGeometryPolygon', 'spatialReference': {'wkid': 102100, 'latestWkid': 3857}, 'fields': [{'name': 'OBJECTID', 'type': 'esriFieldTypeOID', 'alias': 'OBJECTID'}, {'name': 'PRECINCTID', 'type': 'esriFieldTypeString', 'alias': 'Precinct ID', 'length': 10}, {'name': 'NAME', 'type': 'esriFieldTypeString', 'alias': 'Precinct Name', 'length': 50}, {'name': 'COUNTY', 'type': 'esriFieldTypeString', 'alias': 'County', 'length': 50}, {'name': 'LASTUPDATE', 'type': 'esriFieldTypeDate', 'alias': 'Last Update Date', 'length': 8}, {'name': 'LASTEDITOR', 'type': 'esriFieldTypeString', 'alias': 'Last Editor', 'length': 50}, {'name': 'SHAPE_Length', 'type': 'esriFieldTypeDouble', 'alias': 'SHAPE_Length'}, {'name': 'SHAPE_Area', 'type': 'esriFieldTypeDouble', 'alias': 'SHAPE_Area'}, {'name': 'STR_NUM', 'type': 'esriFieldTypeString', 'alias': 'STR_NUM', 'length': 8}, {'name': 'STREET', 'type': 'esriFieldTypeString', 'alias': 'STREET', 'length': 32}, {'name': 'ADDRESS', 'type': 'esriFieldTypeString', 'alias': 'ADDRESS', 'length': 64}, {'name': 'LOC_DESC', 'type': 'esriFieldTypeString', 'alias': 'LOC_DESC', 'length': 128}], 'features': [{'attributes': {'OBJECTID': 19, 'PRECINCTID': '6', 'NAME': 'Precinct 6 - Vote at 115 Greenough St', 'COUNTY': 'Norfolk', 'LASTUPDATE': 'null', 'LASTEDITOR': 'null', 'SHAPE_Length': 15810.092868688193, 'SHAPE_Area': 7070142.976472718, 'STR_NUM': '115', 'STREET': 'GREENOUGH ST', 'ADDRESS': '115 GREENOUGH ST', 'LOC_DESC': 'BHS Schluntz Gymnasium, 115 Greenough Street'}}]}
get_library_api_mock = {'features': [{'attributes': {'OBJECTID': 1, 'FACILITYID': 'None', 'NAME': 'Coolidge Corner Library', 'OWNER': 'Town of Brookline', 'OWNTYPE': 'None', 'SUBTYPEFIELD': 'None', 'FEATURECODE': 'None', 'FULLADDR': '31 Pleasant St, Brookline, MA 02446', 'AGENCYURL': 'http://www.brooklinelibrary.org/', 'OPERDAYS': 'Other', 'OPERHOURS': 'Other', 'CONTACT': 'None', 'PHONE': '617-730-2380', 'EMAIL': 'http://www.brooklinelibrary.org/about/email'}, 'geometry': {'x': -7916949.550832789, 'y': 5212579.537906414}}, {'attributes': {'OBJECTID': 2, 'FACILITYID': 'None', 'NAME': 'Main Library', 'OWNER': 'Town of Brookline', 'OWNTYPE': 'None', 'SUBTYPEFIELD': 'None', 'FEATURECODE': 'None', 'FULLADDR': '361 Washington St, Brookline, MA 02445', 'AGENCYURL': 'http://www.brooklinelibrary.org/', 'OPERDAYS': 'Other', 'OPERHOURS': 'Other', 'CONTACT': 'None', 'PHONE': '617-730-2370', 'EMAIL': 'http://www.brooklinelibrary.org/about/email'}, 'geometry': {'x': -7917194.260973867, 'y': 5211229.5272506215}}, {'attributes': {'OBJECTID': 3, 'FACILITYID': 'None', 'NAME': 'Putterham Branch Library', 'OWNER': 'Town of Brookline', 'OWNTYPE': 'None', 'SUBTYPEFIELD': 'None', 'FEATURECODE': 'None', 'FULLADDR': '959 W Roxbury Pky, Brookline, MA 02467', 'AGENCYURL': 'http://www.brooklinelibrary.org/', 'OPERDAYS': 'Other', 'OPERHOURS': 'Other', 'CONTACT': 'None', 'PHONE': '617-730-2385', 'EMAIL': 'http://www.brooklinelibrary.org/about/email'}, 'geometry': {'x': -7920391.679580264, 'y': 5206399.414108847}}]}
get_school_district_api_mock = {'displayFieldName': 'NAME', 'fieldAliases': {'OBJECTID': 'OBJECTID', 'NAME': 'School Name', 'DISTRCTNAME': 'School District Name', 'SCHOOLAREA': 'Area in Square Miles', 'LASTUPDATE': 'Last Update Date', 'LASTEDITOR': 'Last Editor'}, 'features': [{'attributes': {'OBJECTID': 1, 'NAME': 'Brookline School', 'DISTRCTNAME': 'Brookline', 'SCHOOLAREA': '1', 'LASTUPDATE': None, 'LASTEDITOR': None}, 'geometry': {'rings': [[[-7920615.96685251, 5205180.75934551]]]}}]}
no_results_get_school_district_api_mock = {'displayFieldName': 'NAME', 'fieldAliases': {'OBJECTID': 'OBJECTID', 'NAME': 'School Name', 'DISTRCTNAME': 'School District Name', 'SCHOOLAREA': 'Area in Square Miles', 'LASTUPDATE': 'Last Update Date', 'LASTEDITOR': 'Last Editor'}, 'features': []} |
#https://www.hackerrank.com/contests/w35/challenges/lucky-purchase/problem
books= {} ;
n= int(input()) ;
for a0 in range(n):
string = input().split() ;
books[string[0]] = string[1] ;
books = list(books.items()) ;
mini = books[0] ;
for pair in books:
if pair[1].count('7')+pair[1].count('4')!=len(pair[1]):
continue;
if(pair[1].count('7')==pair[1].count('4')):
if(int(pair[1])<int(mini[1])):
mini = pair ;
print(mini[0] if mini[1].count('7')==mini[1].count('4') and mini[1].count('7')+mini[1].count('4')==len(mini[1]) else "-1") ; | books = {}
n = int(input())
for a0 in range(n):
string = input().split()
books[string[0]] = string[1]
books = list(books.items())
mini = books[0]
for pair in books:
if pair[1].count('7') + pair[1].count('4') != len(pair[1]):
continue
if pair[1].count('7') == pair[1].count('4'):
if int(pair[1]) < int(mini[1]):
mini = pair
print(mini[0] if mini[1].count('7') == mini[1].count('4') and mini[1].count('7') + mini[1].count('4') == len(mini[1]) else '-1') |
n = int(input("_gram? - "))
s = input("words: ")
grams = []
for i in range(len(s)):
op = ""
for x in range(i, i + n):
if x >= len(s):
op = ""
break
op += s[x]
if op != "":
grams.append(op)
print(op)
grams.sort()
print(grams)
| n = int(input('_gram? - '))
s = input('words: ')
grams = []
for i in range(len(s)):
op = ''
for x in range(i, i + n):
if x >= len(s):
op = ''
break
op += s[x]
if op != '':
grams.append(op)
print(op)
grams.sort()
print(grams) |
VERSION = '0.1.6b0'
ENV_LIST = ['clustering-v0', 'clustering-v1',
'clustering-v2', 'clustering-v3', 'classification-v0']
| version = '0.1.6b0'
env_list = ['clustering-v0', 'clustering-v1', 'clustering-v2', 'clustering-v3', 'classification-v0'] |
# Solution A
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
prev = None
cur = head
while cur:
tmp = cur.next
cur.next = prev
prev = cur
cur = tmp
return prev
# Solution B
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
if not head or not head.next:
return head
newhead = self.reverseList(head.next)
head.next.next = head
head.next = None
return newhead
| class Solution:
def reverse_list(self, head: ListNode) -> ListNode:
prev = None
cur = head
while cur:
tmp = cur.next
cur.next = prev
prev = cur
cur = tmp
return prev
class Solution:
def reverse_list(self, head: ListNode) -> ListNode:
if not head or not head.next:
return head
newhead = self.reverseList(head.next)
head.next.next = head
head.next = None
return newhead |
a=[2, 3, 5, 7]
print(a)
a.reverse()
print(a)
| a = [2, 3, 5, 7]
print(a)
a.reverse()
print(a) |
# Copyright (C) 2018 O.S. Systems Software LTDA.
"""
This is the updatehub agent package SDK for the Python language. With this
package it's possible to access information about a running updatehub agent,
such as current states, log information and also force a new version probe.
For more information please access http://docs.updatehub.io/
"""
__version__ = '2.0.0'
def get_version():
"""
Returns the current version of the package.
"""
return __version__
| """
This is the updatehub agent package SDK for the Python language. With this
package it's possible to access information about a running updatehub agent,
such as current states, log information and also force a new version probe.
For more information please access http://docs.updatehub.io/
"""
__version__ = '2.0.0'
def get_version():
"""
Returns the current version of the package.
"""
return __version__ |
def binarySearch(arr, x, left, right):
if right <= left:
if right + 1 <= len(arr):
return right + 1
return -1
mid = (left + right) // 2
if x <= arr[mid]:
return binarySearch(arr, x, left, mid)
else:
return binarySearch(arr, x, mid + 1, right)
def read_input():
n = input()
arr = list(map(int, input().split()))
x = int(input())
return arr, x
if __name__ == '__main__':
arr, x = read_input()
index = binarySearch(arr, x, left=0, right=len(arr))
index2 = binarySearch(arr, 2*x, left=0, right=len(arr))
print(index, index2)
| def binary_search(arr, x, left, right):
if right <= left:
if right + 1 <= len(arr):
return right + 1
return -1
mid = (left + right) // 2
if x <= arr[mid]:
return binary_search(arr, x, left, mid)
else:
return binary_search(arr, x, mid + 1, right)
def read_input():
n = input()
arr = list(map(int, input().split()))
x = int(input())
return (arr, x)
if __name__ == '__main__':
(arr, x) = read_input()
index = binary_search(arr, x, left=0, right=len(arr))
index2 = binary_search(arr, 2 * x, left=0, right=len(arr))
print(index, index2) |
river = {
'nile': 'egypt',
'huang': 'china',
'mississippi':'USA'
}
for key, value in river.items():
print("The " + key + "runs through " + value +".")
for name in (river.keys()):
print(name)
for country in (river.values()):
print(country) | river = {'nile': 'egypt', 'huang': 'china', 'mississippi': 'USA'}
for (key, value) in river.items():
print('The ' + key + 'runs through ' + value + '.')
for name in river.keys():
print(name)
for country in river.values():
print(country) |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This package provides the packages and modules to perform IO from various
input file formats and pymatgen objects.
"""
| """
This package provides the packages and modules to perform IO from various
input file formats and pymatgen objects.
""" |
# (C) Datadog, Inc. 2022-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
__version__ = '1.1.0'
| __version__ = '1.1.0' |
def printList():
li=list()
for i in range(1,21):
li.append(i**2)
print(li)
printList() | def print_list():
li = list()
for i in range(1, 21):
li.append(i ** 2)
print(li)
print_list() |
def count_substring(string, sub_string):
res = 0
len_sub = len(sub_string)
for i in range(len(string) - len_sub + 1):
if string[i:i + len_sub] == sub_string:
res += 1
i += 1
return res
| def count_substring(string, sub_string):
res = 0
len_sub = len(sub_string)
for i in range(len(string) - len_sub + 1):
if string[i:i + len_sub] == sub_string:
res += 1
i += 1
return res |
class shell_colors(object):
"""
Short hand for shell colors
"""
magenta = "\033[95m"
cyan = "\033[94m"
green = "\033[92m"
warn = "\033[93m"
err = "\033[91m"
end = "\033[0m"
bold = "\033[1m"
undeline = "\033[4m" | class Shell_Colors(object):
"""
Short hand for shell colors
"""
magenta = '\x1b[95m'
cyan = '\x1b[94m'
green = '\x1b[92m'
warn = '\x1b[93m'
err = '\x1b[91m'
end = '\x1b[0m'
bold = '\x1b[1m'
undeline = '\x1b[4m' |
name = "hyperparameter"
"""
The hyperparameter module
"""
| name = 'hyperparameter'
'\nThe hyperparameter module\n' |
"""19_sum_of_squares_of_first_n_natural_numbers.py."""
print("---- Sum of Squares of first n Natural numbers ----")
print("TODO")
| """19_sum_of_squares_of_first_n_natural_numbers.py."""
print('---- Sum of Squares of first n Natural numbers ----')
print('TODO') |
_base_="../base-ucmerced-config-simpleaug.py"
# this will merge with the parent
model=dict(pretrained='data/basetrain_chkpts/moco_v2_800ep.pth')
# epoch related
total_iters=5000
checkpoint_config = dict(interval=total_iters)
model = dict(
pretrained='data/basetrain_chkpts/moco_v2_800ep.pth',
backbone=dict(
norm_train=True,
frozen_stages=4,
)
)
optimizer = dict(type='SGD', lr=0.1, weight_decay=0.0001, momentum=0.9)
| _base_ = '../base-ucmerced-config-simpleaug.py'
model = dict(pretrained='data/basetrain_chkpts/moco_v2_800ep.pth')
total_iters = 5000
checkpoint_config = dict(interval=total_iters)
model = dict(pretrained='data/basetrain_chkpts/moco_v2_800ep.pth', backbone=dict(norm_train=True, frozen_stages=4))
optimizer = dict(type='SGD', lr=0.1, weight_decay=0.0001, momentum=0.9) |
def finferensi(berat, pinjam, harga):
linguistik = []
derajat = []
for bk, bv in berat.items():
for pk, pv in pinjam.items():
for hk, hv in harga.items():
# rule 1
if bk == 'ringan' and pk == 'sebentar' and hk == 'rendah':
linguistik.append('kecil')
derajat.append(min(bv, pv, hv))
# rule 2
if bk == 'ringan' and pk == 'sebentar' and hk == 'menengah':
linguistik.append('kecil')
derajat.append(min(bv, pv, hv))
# rule 3
if bk == 'ringan' and pk == 'sebentar' and hk == 'tinggi':
linguistik.append('kecil')
derajat.append(min(bv, pv, hv))
# rule 4
if bk == 'ringan' and pk == 'menengah' and hk == 'rendah':
linguistik.append('kecil')
derajat.append(min(bv, pv, hv))
# rule 5
if bk == 'ringan' and pk == 'menengah' and hk == 'menengah':
linguistik.append('kecil')
derajat.append(min(bv, pv, hv))
# rule 6
if bk == 'ringan' and pk == 'menengah' and hk == 'tinggi':
linguistik.append('kecil')
derajat.append(min(bv, pv, hv))
# rule 7
if bk == 'ringan' and pk == 'lama' and hk == 'rendah':
linguistik.append('besar')
derajat.append(min(bv, pv, hv))
# rule 8
if bk == 'ringan' and pk == 'lama' and hk == 'menengah':
linguistik.append('besar')
derajat.append(min(bv, pv, hv))
# rule 9
if bk == 'ringan' and pk == 'lama' and hk == 'tinggi':
linguistik.append('besar')
derajat.append(min(bv, pv, hv))
# rule 10
if bk == 'sedang' and pk == 'sebentar' and hk == 'rendah':
linguistik.append('kecil')
derajat.append(min(bv, pv, hv))
# rule 11
if bk == 'sedang' and pk == 'sebentar' and hk == 'menengah':
linguistik.append('kecil')
derajat.append(min(bv, pv, hv))
# rule 12
if bk == 'sedang' and pk == 'sebentar' and hk == 'tinggi':
linguistik.append('kecil')
derajat.append(min(bv, pv, hv))
# rule 13
if bk == 'sedang' and pk == 'menengah' and hk == 'rendah':
linguistik.append('besar')
derajat.append(min(bv, pv, hv))
# rule 14
if bk == 'sedang' and pk == 'menengah' and hk == 'menengah':
linguistik.append('kecil')
derajat.append(min(bv, pv, hv))
# rule 15
if bk == 'sedang' and pk == 'menengah' and hk == 'tinggi':
linguistik.append('besar')
derajat.append(min(bv, pv, hv))
# rule 16
if bk == 'sedang' and pk == 'lama' and hk == 'rendah':
linguistik.append('besar')
derajat.append(min(bv, pv, hv))
# rule 17
if bk == 'sedang' and pk == 'lama' and hk == 'menengah':
linguistik.append('besar')
derajat.append(min(bv, pv, hv))
# rule 18
if bk == 'sedang' and pk == 'lama' and hk == 'tinggi':
linguistik.append('besar')
derajat.append(min(bv, pv, hv))
# rule 19
if bk == 'berat' and pk == 'sebentar' and hk == 'rendah':
linguistik.append('kecil')
derajat.append(min(bv, pv, hv))
# rule 20
if bk == 'berat' and pk == 'sebentar' and hk == 'menengah':
linguistik.append('kecil')
derajat.append(min(bv, pv, hv))
# rule 21
if bk == 'berat' and pk == 'sebentar' and hk == 'tinggi':
linguistik.append('kecil')
derajat.append(min(bv, pv, hv))
# rule 22
if bk == 'berat' and pk == 'menengah' and hk == 'rendah':
linguistik.append('besar')
derajat.append(min(bv, pv, hv))
# rule 23
if bk == 'berat' and pk == 'menengah' and hk == 'menengah':
linguistik.append('besar')
derajat.append(min(bv, pv, hv))
# rule 24
if bk == 'berat' and pk == 'menengah' and hk == 'tinggi':
linguistik.append('besar')
derajat.append(min(bv, pv, hv))
# rule 25
if bk == 'berat' and pk == 'lama' and hk == 'rendah':
linguistik.append('besar')
derajat.append(min(bv, pv, hv))
# rule 26
if bk == 'berat' and pk == 'lama' and hk == 'menengah':
linguistik.append('besar')
derajat.append(min(bv, pv, hv))
# rule 27
if bk == 'berat' and pk == 'lama' and hk == 'tinggi':
linguistik.append('besar')
derajat.append(min(bv, pv, hv))
i = 0
templinguistik = []
tempderajat = []
for index,nama in enumerate(linguistik):
if derajat[i] != 0:
templinguistik.append(nama)
tempderajat.append(derajat[i])
print(f'{index+1}. {nama}: {derajat[i]}')
i += 1
tkecil = []
tbesar = []
for li,de in zip (templinguistik,tempderajat):
if li == 'kecil':
tkecil.append(de)
else:
tbesar.append(de)
if tkecil == []:
tkecil = [0,0]
elif tbesar == []:
tbesar = [0,0]
kecil = max(tkecil)
besar = max(tbesar)
output = dict(kecil=kecil,besar=besar)
return output
| def finferensi(berat, pinjam, harga):
linguistik = []
derajat = []
for (bk, bv) in berat.items():
for (pk, pv) in pinjam.items():
for (hk, hv) in harga.items():
if bk == 'ringan' and pk == 'sebentar' and (hk == 'rendah'):
linguistik.append('kecil')
derajat.append(min(bv, pv, hv))
if bk == 'ringan' and pk == 'sebentar' and (hk == 'menengah'):
linguistik.append('kecil')
derajat.append(min(bv, pv, hv))
if bk == 'ringan' and pk == 'sebentar' and (hk == 'tinggi'):
linguistik.append('kecil')
derajat.append(min(bv, pv, hv))
if bk == 'ringan' and pk == 'menengah' and (hk == 'rendah'):
linguistik.append('kecil')
derajat.append(min(bv, pv, hv))
if bk == 'ringan' and pk == 'menengah' and (hk == 'menengah'):
linguistik.append('kecil')
derajat.append(min(bv, pv, hv))
if bk == 'ringan' and pk == 'menengah' and (hk == 'tinggi'):
linguistik.append('kecil')
derajat.append(min(bv, pv, hv))
if bk == 'ringan' and pk == 'lama' and (hk == 'rendah'):
linguistik.append('besar')
derajat.append(min(bv, pv, hv))
if bk == 'ringan' and pk == 'lama' and (hk == 'menengah'):
linguistik.append('besar')
derajat.append(min(bv, pv, hv))
if bk == 'ringan' and pk == 'lama' and (hk == 'tinggi'):
linguistik.append('besar')
derajat.append(min(bv, pv, hv))
if bk == 'sedang' and pk == 'sebentar' and (hk == 'rendah'):
linguistik.append('kecil')
derajat.append(min(bv, pv, hv))
if bk == 'sedang' and pk == 'sebentar' and (hk == 'menengah'):
linguistik.append('kecil')
derajat.append(min(bv, pv, hv))
if bk == 'sedang' and pk == 'sebentar' and (hk == 'tinggi'):
linguistik.append('kecil')
derajat.append(min(bv, pv, hv))
if bk == 'sedang' and pk == 'menengah' and (hk == 'rendah'):
linguistik.append('besar')
derajat.append(min(bv, pv, hv))
if bk == 'sedang' and pk == 'menengah' and (hk == 'menengah'):
linguistik.append('kecil')
derajat.append(min(bv, pv, hv))
if bk == 'sedang' and pk == 'menengah' and (hk == 'tinggi'):
linguistik.append('besar')
derajat.append(min(bv, pv, hv))
if bk == 'sedang' and pk == 'lama' and (hk == 'rendah'):
linguistik.append('besar')
derajat.append(min(bv, pv, hv))
if bk == 'sedang' and pk == 'lama' and (hk == 'menengah'):
linguistik.append('besar')
derajat.append(min(bv, pv, hv))
if bk == 'sedang' and pk == 'lama' and (hk == 'tinggi'):
linguistik.append('besar')
derajat.append(min(bv, pv, hv))
if bk == 'berat' and pk == 'sebentar' and (hk == 'rendah'):
linguistik.append('kecil')
derajat.append(min(bv, pv, hv))
if bk == 'berat' and pk == 'sebentar' and (hk == 'menengah'):
linguistik.append('kecil')
derajat.append(min(bv, pv, hv))
if bk == 'berat' and pk == 'sebentar' and (hk == 'tinggi'):
linguistik.append('kecil')
derajat.append(min(bv, pv, hv))
if bk == 'berat' and pk == 'menengah' and (hk == 'rendah'):
linguistik.append('besar')
derajat.append(min(bv, pv, hv))
if bk == 'berat' and pk == 'menengah' and (hk == 'menengah'):
linguistik.append('besar')
derajat.append(min(bv, pv, hv))
if bk == 'berat' and pk == 'menengah' and (hk == 'tinggi'):
linguistik.append('besar')
derajat.append(min(bv, pv, hv))
if bk == 'berat' and pk == 'lama' and (hk == 'rendah'):
linguistik.append('besar')
derajat.append(min(bv, pv, hv))
if bk == 'berat' and pk == 'lama' and (hk == 'menengah'):
linguistik.append('besar')
derajat.append(min(bv, pv, hv))
if bk == 'berat' and pk == 'lama' and (hk == 'tinggi'):
linguistik.append('besar')
derajat.append(min(bv, pv, hv))
i = 0
templinguistik = []
tempderajat = []
for (index, nama) in enumerate(linguistik):
if derajat[i] != 0:
templinguistik.append(nama)
tempderajat.append(derajat[i])
print(f'{index + 1}. {nama}: {derajat[i]}')
i += 1
tkecil = []
tbesar = []
for (li, de) in zip(templinguistik, tempderajat):
if li == 'kecil':
tkecil.append(de)
else:
tbesar.append(de)
if tkecil == []:
tkecil = [0, 0]
elif tbesar == []:
tbesar = [0, 0]
kecil = max(tkecil)
besar = max(tbesar)
output = dict(kecil=kecil, besar=besar)
return output |
# Booknames.py
class Booknames:
def usfmBookId(self, bookName):
books = {
'Genesis': 'GEN',
'Exodus': 'EXO',
'Leviticus': 'LEV',
'Numbers': 'NUM',
'Deuteronomy': 'DEU',
'Joshua': 'JOS',
'Judges': 'JDG',
'Ruth': 'RUT',
'1Samuel': '1SA',
'2Samuel': '2SA',
'1Kings': '1KI',
'2Kings': '2KI',
'1Chronicles': '1CH',
'2Chronicles': '2CH',
'Ezra': 'EZR',
'1Ezra': 'EZR',
'Nehemiah': 'NEH',
'2Ezra': 'NEH',
'Esther': 'EST',
'Job': 'JOB',
'Psalms': 'PSA',
'PSALM': 'PSA',
'Proverbs': 'PRO',
'PROVERBS': 'PRO',
'Ecclesiastes': 'ECC',
'SongofSongs': 'SNG',
'Song': 'SNG',
'Isaiah': 'ISA',
'Jeremiah': 'JER',
'Lamentations': 'LAM',
'Ezekiel': 'EZK',
'Daniel': 'DAN',
'Hosea': 'HOS',
'Joel': 'JOL',
'Amos': 'AMO',
'Obadiah': 'OBA',
'Jonah': 'JON',
'Micah': 'MIC',
'Nahum': 'NAM',
'Habakkuk': 'HAB',
'Zephaniah': 'ZEP',
'Haggai': 'HAG',
'Zechariah': 'ZEC',
'Zachariah': 'ZEC',
'Malachi': 'MAL',
'Matthew': 'MAT',
'Mark': 'MRK',
'Luke': 'LUK',
'John': 'JHN',
'Acts': 'ACT',
'Romans': 'ROM',
'1Corinthians': '1CO',
'2Corinthians': '2CO',
'Galatians': 'GAL',
'Ephesians': 'EPH',
'Philippians': 'PHP',
'Phil': 'PHP',
'Colossians': 'COL',
'1Thess': '1TH',
'1Thessalonians':'1TH',
'2Thess': '2TH',
'2Thessalonians':'2TH',
'1Timothy': '1TI',
'2Timothy': '2TI',
'Titus': 'TIT',
'Philemon': 'PHM',
'Hebrews': 'HEB',
'James': 'JAS',
'1Peter': '1PE',
'2Peter': '2PE',
'1John': '1JN',
'2John': '2JN',
'3John': '3JN',
'Jude': 'JUD',
'Revelation': 'REV',
'Tobit': 'TOB',
'Judith': 'JDT',
'Wisdom': 'WIS',
'Sirach': 'SIR',
'Baruch': 'BAR',
'EpistJeremia': 'LJE',
'EpistJeremiah':'LJE',
'1Maccabees': '1MA',
'2Maccabees': '2MA',
'3Maccabees': '3MA',
'4Maccabees': '4MA',
# Spanish
'Exodo': 'EXO',
'Levitico': 'LEV',
'Levetico': 'LEV',
'Numeros': 'NUM',
'Deuteronomio': 'DEU',
'Josue': 'JOS',
'Jueces': 'JDG',
'Rut': 'RUT',
'1Reyes': '1KI',
'2Reyes': '2KI',
'1Cronicas': '1CH',
'2Cronicas': '2CH',
'Esdras': 'EZR',
'Nehemias': 'NEH',
'Ester': 'EST',
'Salmos': 'PSA',
'Salmo': 'PSA',
'salmo': 'PSA',
'Proverbios': 'PRO',
'Eclesiastes': 'ECC',
'Cantaras': 'SNG',
'Isaias': 'ISA',
'Jeremias': 'JER',
'Lamentacione': 'LAM',
'Ezequiel': 'EZK',
'Oseas': 'HOS',
'Abdias': 'OBA',
'Jonas': 'JON',
'Miqueas': 'MIC',
'Habacuc': 'HAB',
'Sofonias': 'ZEP',
'Hageo': 'HAG',
'Zacarias': 'ZEC',
'Malaquias': 'MAL',
'San_Mateo': 'MAT',
'San_Marcos': 'MRK',
'San_Lucas': 'LUK',
'San_Juan': 'JHN',
'San_1uan': 'JHN',
'Hechos': 'ACT',
'Romanos': 'ROM',
'1Corintios': '1CO',
'2Corintios': '2CO',
'Galatas': 'GAL',
'Efesios': 'EPH',
'Filipenses': 'PHP',
'Colosenses': 'COL',
'1Tes': '1TH',
'2Tes': '2TH',
'1Timoteo': '1TI',
'2Timoteo': '2TI',
'Tito': 'TIT',
'Filemon': 'PHM',
'Hebreos': 'HEB',
'Santiago': 'JAS',
'1San_Pedro': '1PE',
'2San_Pedro': '2PE',
'1San_Juan': '1JN',
'1San_1uan': '1JN',
'2San_Juan': '2JN',
'2San_1uan': '2JN',
'3San_Juan': '3JN',
'3San_1uan': '3JN',
'Judas': 'JUD',
'1udas': 'JUD',
'Apocalipsis': 'REV',
# Spanish Capitulo
'Genesis_Capitulo': 'GEN',
'Exodo_Capitulo': 'EXO',
'Levetico_Capitulo': 'LEV',
'Numeros_Capitulo': 'NUM',
'Deuteronomio_Capitulo':'DEU',
'Josue_Capitulo': 'JOS',
'Jueces_Capitulo': 'JDG',
'Ruth_Capitulo': 'RUT',
'1Samuel_Capitulo': '1SA',
'2Samuel_Capitulo': '2SA',
'1Reyes_Capitulo': '1KI',
'2Reyes_Capitulo': '2KI',
'Esdras_Capitulo': 'EZR',
'Esther_Capitulo': 'EST',
'Job_Capitulo': 'JOB',
'Salmos_Capitulo': 'PSA',
'Proverbios_Capitulo': 'PRO',
'Isaias_Capitulo': 'ISA',
'Ezequiel_Capitulo': 'EZK',
'Daniel_Capitulo': 'DAN',
'Oseas_Capitulo': 'HOS',
'Joel_Capitulo': 'JOL',
'Amos_Capitulo': 'AMO',
'Abdias_Capitulo': 'OBA',
'Jonas_Capitulo': 'JON',
'Miqueas_Capitulo': 'MIC',
'Nahum_Capitulo': 'NAM',
'Habacuc_Capitulo': 'HAB',
'Sofonias_Capitulo': 'ZEP',
# Spanish Book Codes, included as names, because not standard
'GEN': 'GEN',
'EXO': 'EXO',
'LEV': 'LEV',
'NUM': 'NUM',
'DEU': 'DEU',
'JOS': 'JOS',
'JUE': 'JDG',
'1SA': '1SA',
'2SA': '2SA',
'1RE': '1KI',
'2RE': '2KI',
'1CR': '1CH',
'2CR': '2CH',
'ESD': 'EZR',
'NEH': 'NEH',
'JOB': 'JOB',
'SAL': 'PSA',
'PRO': 'PRO',
'ISA': 'ISA',
'EZE': 'EZK',
'DAN': 'DAN',
'JOE': 'JOL',
'AMO': 'AMO',
'MIQ': 'MIC',
'HAB': 'HAB',
'SOF': 'ZEP',
'ZAC': 'ZEC',
'MAL': 'MAL',
# Portuguese
'Juizes': 'JDG',
'1Reis': '1KI',
'2Reis': '2KI',
'Neemias': 'NEH',
'Cantares': 'SNG',
'Lamentacoes': 'LAM',
'Obadias': 'OBA',
'Naum': 'NAM',
'Ageu': 'HAG',
'S_Mateus': 'MAT',
'S_Marcos': 'MRK',
'S_Lucas': 'LUK',
'S_Joao': 'JHN',
'Atos': 'ACT',
'Colossenses': 'COL',
'1Tess': '1TH',
'2Tess': '2TH',
'Hebreus': 'HEB',
'S_Tiago': 'JAS',
'1Pedro': '1PE',
'2Pedro': '2PE',
'1S_Joao': '1JN',
'2S_Joao': '2JN',
'3S_Joao': '3JN',
'S_Judas': 'JUD',
'Apocalipse': 'REV',
# French
'Genese': 'GEN',
'Exode': 'EXO',
'Levitique': 'LEV',
'Nombres': 'NUM',
'Deuteronome': 'DEU',
'Juges': 'JDG',
'1Rois': '1KI',
'2Rois': '2KI',
'1Chroniques': '1CH',
'2Chroniques': '2CH',
'Nehemie': 'NEH',
'Psaumes': 'PSA',
'Proverbes': 'PRO',
'Ecclesiaste': 'ECC',
'Cantiques': 'SNG',
'Esaie': 'ISA',
'Jeremie': 'JER',
'Lamentation': 'LAM',
'Osee': 'HOS',
'Michee': 'MIC',
'Sophonie': 'ZEP',
'Aggee': 'HAG',
'Zacharie': 'ZEC',
'Malachie': 'MAL',
# Indonesian
'Matius': 'MAT',
'Markus': 'MRK',
'Lukas': 'LUK',
'Yohanes': 'JHN',
'Kisah_Rasul': 'ACT',
'Roma': 'ROM',
'1Korintus': '1CO',
'2Korintus': '2CO',
'Galatia': 'GAL',
'Efesus': 'EPH',
'Filipi': 'PHP',
'Kolose': 'COL',
'1Tesalonika': '1TH',
'2Tesalonika': '2TH',
'1Timotius': '1TI',
'2Timotius': '2TI',
'Ibrani': 'HEB',
'Yakobus': 'JAS',
'1Petrus': '1PE',
'2Petrus': '2PE',
'1Yohanes': '1JN',
'2Yohanes': '2JN',
'3Yohanes': '3JN',
'Yudas': 'JUD',
'Wahyu': 'REV',
# Maasina Fulfulde
'Matthieu': 'MAT',
'Marc': 'MRK',
'Luc': 'LUK',
'Jean': 'JHN',
'Actes': 'ACT',
'Romains': 'ROM',
'1Corinthiens': '1CO',
'2Corinthiens': '2CO',
'Galates': 'GAL',
'Ephesiens': 'EPH',
'Philippiens': 'PHP',
'Colossiens': 'COL',
'1Thess': '1TH',
'2Thess': '2TH',
'1Timothee': '1TI',
'2Timothee': '2TI',
'Tite': 'TIT',
'Philemon': 'PHM',
'Hebreux': 'HEB',
'Jacques': 'JAS',
'1Pierre': '1PE',
'2Pierre': '2PE',
'1Jean': '1JN',
'2Jean': '2JN',
'3Jean': '3JN',
'Jude': 'JUD',
'Apocalypse': 'REV',
# Kolibugan Subanonm Southern Phillipines skn
'PONOGNAAN': 'GEN',
'YUNUS': 'JON',
'MARKUS': 'MRK',
'LUKAS': 'LUK',
'YAHIYA': 'JHN',
'MGA_GINANG': 'ACT',
'GALATIYA': 'GAL',
'EPESUS': 'EPH',
'PILIPI': 'PHP',
'KOLOSAS': 'COL',
'1TESALONIKA': '1TH',
'2TESALONIKA': '2TH',
'1TIMUTI': '1TI',
'2TIMUTI': '2TI',
'TITUS': 'TIT',
'YAKUB': 'JAS',
# Malay
'Kejadian': 'GEN',
'Keluaran': 'EXO',
'Imamat': 'LEV',
'Bilangan': 'NUM',
'Ulangan': 'DEU',
'Yosua': 'JOS',
'Hakim-hakim': 'JDG',
'1Raja-raja': '1KI',
'2Raja-raja': '2KI',
'1Tawarikh': '1CH',
'2Tawarikh': '2CH',
'Nehemia': 'NEH',
'Ayub': 'JOB',
'Mazmur': 'PSA',
'Amsal': 'PRO',
'Pengkhotbah': 'ECC',
'Kidung': 'SNG',
'Yesaya': 'ISA',
'Yeremia': 'JER',
'Ratapan': 'LAM',
'Yehezkiel': 'EZK',
'Yoel': 'JOL',
'Obaja': 'OBA',
'Yunus': 'JON',
'Mikha': 'MIC',
'Habakuk': 'HAB',
'Zefanya': 'ZEP',
'Hagai': 'HAG',
'Zakharia': 'ZEC',
'Maleakhi': 'MAL'
}
result = books.get(bookName, None)
return result
#NT Order:
#Traditional
#Russian
#Plautdietsch
#Finnish
#OT Order:
#Masoretic-Christian
#Masoretic-Tanakh
#Septuagint
#Vulgate
##
## Old Testament Book orders
##
def TraditionalOT(self, sequence):
traditional = {
'A01': 'GEN',
'A02': 'EXO',
'A03': 'LEV',
'A04': 'NUM',
'A05': 'DEU',
'A06': 'JOS',
'A07': 'JDG',
'A08': 'RUT',
'A09': '1SA',
'A10': '2SA',
'A11': '1KI',
'A12': '2KI',
'A13': '1CH',
'A14': '2CH',
'A15': 'EZR',
'A16': 'NEH',
'A17': 'EST',
'A18': 'JOB',
'A19': 'PSA',
'A20': 'PRO',
'A21': 'ECC',
'A22': 'SNG',
'A23': 'ISA',
'A24': 'JER',
'A25': 'LAM',
'A26': 'EZK',
'A27': 'DAN',
'A28': 'HOS',
'A29': 'JOL',
'A30': 'AMO',
'A31': 'OBA',
'A32': 'JON',
'A33': 'MIC',
'A34': 'NAM',
'A35': 'HAB',
'A36': 'ZEP',
'A37': 'HAG',
'A38': 'ZEC',
'A39': 'MAL'
}
return traditional.get(sequence, None)
def MasoreticChristianOT(self, sequence):
return self.TraditionalOT(sequence)
def HebrewOT(self, sequence):
hebrew = {
'A01': 'GEN',
'A02': 'EXO',
'A03': 'LEV',
'A04': 'NUM',
'A05': 'DEU',
'A06': 'JOS',
'A07': 'JDG',
'A08': '1SA',
'A09': '2SA',
'A10': '1KI',
'A11': '2KI',
'A12': 'ISA',
'A13': 'JER',
'A14': 'EZK',
'A15': 'HOS',
'A16': 'JOL',
'A17': 'AMO',
'A18': 'OBA',
'A19': 'JON',
'A20': 'MIC',
'A21': 'NAM',
'A22': 'HAB',
'A23': 'ZEP',
'A24': 'HAG',
'A25': 'ZEC',
'A26': 'MAL',
'A27': 'PSA',
'A28': 'PRO',
'A29': 'JOB',
'A30': 'SNG',
'A31': 'RUT',
'A32': 'LAM',
'A33': 'ECC',
'A34': 'EST',
'A35': 'DAN',
'A36': 'EZR',
'A37': 'NEH',
'A38': '1CH',
'A39': '2CH'
}
return hebrew.get(sequence, None)
def MasoreticTanakhOT(self, sequence):
return self.HebrewOT(sequence)
def CatholicOT(self, sequence): # This is my name for it. It must be corrected
catholic = {
'A01': 'GEN',
'A02': 'EXO',
'A03': 'LEV',
'A04': 'NUM',
'A05': 'DEU',
'A06': 'JOS',
'A07': 'JDG',
'A08': 'RUT',
'A09': '1SA',
'A10': '2SA',
'A11': '1KI',
'A12': '2KI',
'A13': '1CH',
'A14': '2CH',
'A15': 'EZR',
'A16': 'NEH',
'A17': 'TOB',
'A18': 'JDT',
'A19': 'EST',
'A20': '1MA',
'A21': '2MA',
'A22': 'JOB',
'A23': 'PSA',
'A24': 'PRO',
'A25': 'ECC',
'A26': 'SNG',
'A27': 'WIS',
'A28': 'SIR',
'A29': 'ISA',
'A30': 'JER',
'A31': 'LAM',
'A32': 'BAR',
'A33': 'EZK',
'A34': 'DAG',
'A35': 'HOS',
'A36': 'JOL',
'A37': 'AMO',
'A38': 'OBA',
'A39': 'JON',
'A40': 'MIC',
'A41': 'NAM',
'A42': 'HAB',
'A43': 'ZEP',
'A44': 'HAG',
'A45': 'ZEC',
'A46': 'MAL'
}
return catholic.get(sequence, None)
def VulgateOT(self, sequence):
vulgate = {
## To be added when sequence is known
}
return vulgate.get(sequence, None)
def SeptuagintOT(self, sequence):
septuagint = {
'A01': 'GEN',
'A02': 'EXO',
'A03': 'LEV',
'A04': 'NUM',
'A05': 'DEU',
'A06': 'JOS',
'A07': 'JDG',
'A08': 'RUT',
'A09': '1SA',
'A10': '2SA',
'A11': '1KI',
'A12': '2KI',
'A13': '1CH',
'A14': '2CH',
'A15': 'EZA', # Not sure EZA is correct codeEsdras 1
'A16': 'EZR',
'A17': 'NEH',
'A18': 'TOB', # Tobit
'A19': 'JDT', # Judith
'A20': 'ESG', # EST with additions
'A21': '1MA', # 1Maccabees
'A22': '2MA', # 2Maccabees
'A23': '3MA', # 3Maccabees
'A24': 'PSA',
# I think prayer of Manassa goes here
'A25': 'JOB',
'A26': 'PRO',
'A27': 'ECC',
'A28': 'SNG',
'A29': 'WIS', # Wisdom of Solomon
'A30': 'SIR', # Sirach
# Does Psalm of Solomon go here
'A31': 'HOS',
'A32': 'AMO',
'A33': 'MIC',
'A34': 'JOL',
'A35': 'OBA',
'A36': 'JON',
'A37': 'NAM',
'A38': 'HAB',
'A39': 'ZEP',
'A40': 'HAG',
'A41': 'ZEC',
'A42': 'MAL',
'A43': 'ISA',
'A44': 'JER',
'A45': 'BAR', # First book of Baruch
'A46': 'LAM',
'A47': 'LJE', # Epistle Jeremiah
'A48': 'EZK',
'A49': 'DAG', # DAN with additions
'A50': '4MA' # 4Maccabees
}
return septuagint.get(sequence, None)
def Septuagint2OT(self, sequence):
septuagint = {
'A01': 'GEN',
'A02': 'EXO',
'A03': 'LEV',
'A04': 'NUM',
'A05': 'DEU',
'A06': 'JOS',
'A07': 'JDG',
'A08': 'RUT',
'A09': '1SA',
'A10': '2SA',
'A11': '1KI',
'A12': '2KI',
'A13': '1CH',
'A14': '2CH',
'A15': 'EZA', # Not sure EZA is correct codeEsdras 1
'A16': 'EZR',
'A17': 'NEH',
'A18': 'TOB', # Tobit
'A19': 'JDT', # Judith
'A20': 'ESG', # EST with additions
'A21': '1MA', # 1Maccabees
'A22': '2MA', # 2Maccabees
'A23': '3MA', # 3Maccabees
'A24': 'PSA',
'A25': 'PS2', # special code for PSA 151
'A26': 'JOB',
'A27': 'PRO',
'A28': 'ECC',
'A29': 'SNG',
'A30': 'WIS', # Wisdom of Solomon
'A31': 'SIR', # Sirach
# Does Psalm of Solomon go here
'A32': 'HOS',
'A33': 'AMO',
'A34': 'MIC',
'A35': 'JOL',
'A36': 'OBA',
'A37': 'JON',
'A38': 'NAM',
'A39': 'HAB',
'A40': 'ZEP',
'A41': 'HAG',
'A42': 'ZEC',
'A43': 'MAL',
'A44': 'ISA',
'A45': 'JER',
'A46': 'BAR', # First book of Baruch
'A47': 'LAM',
'A48': 'LJE', # Epistle Jeremiah
'A49': 'EZK',
'A50': 'DAG', # DAN with additions
'A51': 'SUS', # Susanna
'A52': '4MA' # 4Maccabees
}
return septuagint.get(sequence, None)
def DutchTraditionalOT(self, sequence):
dutch = {
## To be added when sequence is known
}
return dutch.get(sequence, None)
def TRNNTMOT(self, sequence):
trnntm = {
'A01': 'GEN',
'A02': 'EXO',
'A03': 'LEV',
'A04': 'NUM',
'A05': 'DEU',
'A06': 'JOS',
'A07': 'JDG',
'A08': 'RUT',
'A09': '1SA',
'A10': '2SA',
'A11': '1CH',
'A12': '2CH',
'A13': '2KI',
'A14': 'ISA',
'A15': 'JER',
'A16': 'MIC',
'A17': 'HOS',
'A18': 'PSA',
'A19': 'ZEC',
'A20': 'JON'
}
return trnntm.get(sequence, None)
##
## New Testament book orders
##
def TraditionalNT(self, sequence):
traditional = {
'B01': 'MAT',
'B02': 'MRK',
'B03': 'LUK',
'B04': 'JHN',
'B05': 'ACT',
'B06': 'ROM',
'B07': '1CO',
'B08': '2CO',
'B09': 'GAL',
'B10': 'EPH',
'B11': 'PHP',
'B12': 'COL',
'B13': '1TH',
'B14': '2TH',
'B15': '1TI',
'B16': '2TI',
'B17': 'TIT',
'B18': 'PHM',
'B19': 'HEB',
'B20': 'JAS',
'B21': '1PE',
'B22': '2PE',
'B23': '1JN',
'B24': '2JN',
'B25': '3JN',
'B26': 'JUD',
'B27': 'REV'
}
return traditional.get(sequence, None)
def RussianNT(self, sequence):
russian = {
'B01': 'MAT',
'B02': 'MRK',
'B03': 'LUK',
'B04': 'JHN',
'B05': 'ACT',
'B06': 'JAS',
'B07': '1PE',
'B08': '2PE',
'B09': '1JN',
'B10': '2JN',
'B11': '3JN',
'B12': 'JUD',
'B13': 'ROM',
'B14': '1CO',
'B15': '2CO',
'B16': 'GAL',
'B17': 'EPH',
'B18': 'PHP',
'B19': 'COL',
'B20': '1TH',
'B21': '2TH',
'B22': '1TI',
'B23': '2TI',
'B24': 'TIT',
'B25': 'PHM',
'B26': 'HEB',
'B27': 'REV'
}
return russian.get(sequence, None)
def PlautdietschNT(self, sequence):
diestsch = {
'B01': 'MAT',
'B02': 'MRK',
'B03': 'LUK',
'B04': 'JHN',
'B05': 'ACT',
'B06': 'ROM',
'B07': '1CO',
'B08': '2CO',
'B09': 'GAL',
'B10': 'EPH',
'B11': 'PHP',
'B12': 'COL',
'B13': '1TH',
'B14': '2TH',
'B15': '1TI',
'B16': '2TI',
'B17': 'TIT',
'B18': 'PHM',
'B19': '1PE',
'B20': '2PE',
'B21': '1JN',
'B22': '2JN',
'B23': '3JN',
'B24': 'HEB',
'B25': 'JAS',
'B26': 'JUD',
'B27': 'REV'
}
return diestsch.get(sequence, None)
def FinnishNT(self, sequence):
finnish = {
'B01': 'MAT',
'B02': 'MRK',
'B03': 'LUK',
'B04': 'JHN',
'B05': 'ACT',
'B06': 'ROM',
'B07': '1CO',
'B08': '2CO',
'B09': 'GAL',
'B10': 'EPH',
'B11': 'PHP',
'B12': 'COL',
'B13': '1TH',
'B14': '2TH',
'B15': '1TI',
'B16': '2TI',
'B17': 'TIT',
'B18': 'PHM',
'B19': 'HEB',
'B20': '1PE',
'B21': '2PE',
'B22': '1JN',
'B23': '2JN',
'B24': '3JN',
'B25': 'JAS',
'B26': 'JUD',
'B27': 'REV'
}
return finnish.get(sequence, None)
| class Booknames:
def usfm_book_id(self, bookName):
books = {'Genesis': 'GEN', 'Exodus': 'EXO', 'Leviticus': 'LEV', 'Numbers': 'NUM', 'Deuteronomy': 'DEU', 'Joshua': 'JOS', 'Judges': 'JDG', 'Ruth': 'RUT', '1Samuel': '1SA', '2Samuel': '2SA', '1Kings': '1KI', '2Kings': '2KI', '1Chronicles': '1CH', '2Chronicles': '2CH', 'Ezra': 'EZR', '1Ezra': 'EZR', 'Nehemiah': 'NEH', '2Ezra': 'NEH', 'Esther': 'EST', 'Job': 'JOB', 'Psalms': 'PSA', 'PSALM': 'PSA', 'Proverbs': 'PRO', 'PROVERBS': 'PRO', 'Ecclesiastes': 'ECC', 'SongofSongs': 'SNG', 'Song': 'SNG', 'Isaiah': 'ISA', 'Jeremiah': 'JER', 'Lamentations': 'LAM', 'Ezekiel': 'EZK', 'Daniel': 'DAN', 'Hosea': 'HOS', 'Joel': 'JOL', 'Amos': 'AMO', 'Obadiah': 'OBA', 'Jonah': 'JON', 'Micah': 'MIC', 'Nahum': 'NAM', 'Habakkuk': 'HAB', 'Zephaniah': 'ZEP', 'Haggai': 'HAG', 'Zechariah': 'ZEC', 'Zachariah': 'ZEC', 'Malachi': 'MAL', 'Matthew': 'MAT', 'Mark': 'MRK', 'Luke': 'LUK', 'John': 'JHN', 'Acts': 'ACT', 'Romans': 'ROM', '1Corinthians': '1CO', '2Corinthians': '2CO', 'Galatians': 'GAL', 'Ephesians': 'EPH', 'Philippians': 'PHP', 'Phil': 'PHP', 'Colossians': 'COL', '1Thess': '1TH', '1Thessalonians': '1TH', '2Thess': '2TH', '2Thessalonians': '2TH', '1Timothy': '1TI', '2Timothy': '2TI', 'Titus': 'TIT', 'Philemon': 'PHM', 'Hebrews': 'HEB', 'James': 'JAS', '1Peter': '1PE', '2Peter': '2PE', '1John': '1JN', '2John': '2JN', '3John': '3JN', 'Jude': 'JUD', 'Revelation': 'REV', 'Tobit': 'TOB', 'Judith': 'JDT', 'Wisdom': 'WIS', 'Sirach': 'SIR', 'Baruch': 'BAR', 'EpistJeremia': 'LJE', 'EpistJeremiah': 'LJE', '1Maccabees': '1MA', '2Maccabees': '2MA', '3Maccabees': '3MA', '4Maccabees': '4MA', 'Exodo': 'EXO', 'Levitico': 'LEV', 'Levetico': 'LEV', 'Numeros': 'NUM', 'Deuteronomio': 'DEU', 'Josue': 'JOS', 'Jueces': 'JDG', 'Rut': 'RUT', '1Reyes': '1KI', '2Reyes': '2KI', '1Cronicas': '1CH', '2Cronicas': '2CH', 'Esdras': 'EZR', 'Nehemias': 'NEH', 'Ester': 'EST', 'Salmos': 'PSA', 'Salmo': 'PSA', 'salmo': 'PSA', 'Proverbios': 'PRO', 'Eclesiastes': 'ECC', 'Cantaras': 'SNG', 'Isaias': 'ISA', 'Jeremias': 'JER', 'Lamentacione': 'LAM', 'Ezequiel': 'EZK', 'Oseas': 'HOS', 'Abdias': 'OBA', 'Jonas': 'JON', 'Miqueas': 'MIC', 'Habacuc': 'HAB', 'Sofonias': 'ZEP', 'Hageo': 'HAG', 'Zacarias': 'ZEC', 'Malaquias': 'MAL', 'San_Mateo': 'MAT', 'San_Marcos': 'MRK', 'San_Lucas': 'LUK', 'San_Juan': 'JHN', 'San_1uan': 'JHN', 'Hechos': 'ACT', 'Romanos': 'ROM', '1Corintios': '1CO', '2Corintios': '2CO', 'Galatas': 'GAL', 'Efesios': 'EPH', 'Filipenses': 'PHP', 'Colosenses': 'COL', '1Tes': '1TH', '2Tes': '2TH', '1Timoteo': '1TI', '2Timoteo': '2TI', 'Tito': 'TIT', 'Filemon': 'PHM', 'Hebreos': 'HEB', 'Santiago': 'JAS', '1San_Pedro': '1PE', '2San_Pedro': '2PE', '1San_Juan': '1JN', '1San_1uan': '1JN', '2San_Juan': '2JN', '2San_1uan': '2JN', '3San_Juan': '3JN', '3San_1uan': '3JN', 'Judas': 'JUD', '1udas': 'JUD', 'Apocalipsis': 'REV', 'Genesis_Capitulo': 'GEN', 'Exodo_Capitulo': 'EXO', 'Levetico_Capitulo': 'LEV', 'Numeros_Capitulo': 'NUM', 'Deuteronomio_Capitulo': 'DEU', 'Josue_Capitulo': 'JOS', 'Jueces_Capitulo': 'JDG', 'Ruth_Capitulo': 'RUT', '1Samuel_Capitulo': '1SA', '2Samuel_Capitulo': '2SA', '1Reyes_Capitulo': '1KI', '2Reyes_Capitulo': '2KI', 'Esdras_Capitulo': 'EZR', 'Esther_Capitulo': 'EST', 'Job_Capitulo': 'JOB', 'Salmos_Capitulo': 'PSA', 'Proverbios_Capitulo': 'PRO', 'Isaias_Capitulo': 'ISA', 'Ezequiel_Capitulo': 'EZK', 'Daniel_Capitulo': 'DAN', 'Oseas_Capitulo': 'HOS', 'Joel_Capitulo': 'JOL', 'Amos_Capitulo': 'AMO', 'Abdias_Capitulo': 'OBA', 'Jonas_Capitulo': 'JON', 'Miqueas_Capitulo': 'MIC', 'Nahum_Capitulo': 'NAM', 'Habacuc_Capitulo': 'HAB', 'Sofonias_Capitulo': 'ZEP', 'GEN': 'GEN', 'EXO': 'EXO', 'LEV': 'LEV', 'NUM': 'NUM', 'DEU': 'DEU', 'JOS': 'JOS', 'JUE': 'JDG', '1SA': '1SA', '2SA': '2SA', '1RE': '1KI', '2RE': '2KI', '1CR': '1CH', '2CR': '2CH', 'ESD': 'EZR', 'NEH': 'NEH', 'JOB': 'JOB', 'SAL': 'PSA', 'PRO': 'PRO', 'ISA': 'ISA', 'EZE': 'EZK', 'DAN': 'DAN', 'JOE': 'JOL', 'AMO': 'AMO', 'MIQ': 'MIC', 'HAB': 'HAB', 'SOF': 'ZEP', 'ZAC': 'ZEC', 'MAL': 'MAL', 'Juizes': 'JDG', '1Reis': '1KI', '2Reis': '2KI', 'Neemias': 'NEH', 'Cantares': 'SNG', 'Lamentacoes': 'LAM', 'Obadias': 'OBA', 'Naum': 'NAM', 'Ageu': 'HAG', 'S_Mateus': 'MAT', 'S_Marcos': 'MRK', 'S_Lucas': 'LUK', 'S_Joao': 'JHN', 'Atos': 'ACT', 'Colossenses': 'COL', '1Tess': '1TH', '2Tess': '2TH', 'Hebreus': 'HEB', 'S_Tiago': 'JAS', '1Pedro': '1PE', '2Pedro': '2PE', '1S_Joao': '1JN', '2S_Joao': '2JN', '3S_Joao': '3JN', 'S_Judas': 'JUD', 'Apocalipse': 'REV', 'Genese': 'GEN', 'Exode': 'EXO', 'Levitique': 'LEV', 'Nombres': 'NUM', 'Deuteronome': 'DEU', 'Juges': 'JDG', '1Rois': '1KI', '2Rois': '2KI', '1Chroniques': '1CH', '2Chroniques': '2CH', 'Nehemie': 'NEH', 'Psaumes': 'PSA', 'Proverbes': 'PRO', 'Ecclesiaste': 'ECC', 'Cantiques': 'SNG', 'Esaie': 'ISA', 'Jeremie': 'JER', 'Lamentation': 'LAM', 'Osee': 'HOS', 'Michee': 'MIC', 'Sophonie': 'ZEP', 'Aggee': 'HAG', 'Zacharie': 'ZEC', 'Malachie': 'MAL', 'Matius': 'MAT', 'Markus': 'MRK', 'Lukas': 'LUK', 'Yohanes': 'JHN', 'Kisah_Rasul': 'ACT', 'Roma': 'ROM', '1Korintus': '1CO', '2Korintus': '2CO', 'Galatia': 'GAL', 'Efesus': 'EPH', 'Filipi': 'PHP', 'Kolose': 'COL', '1Tesalonika': '1TH', '2Tesalonika': '2TH', '1Timotius': '1TI', '2Timotius': '2TI', 'Ibrani': 'HEB', 'Yakobus': 'JAS', '1Petrus': '1PE', '2Petrus': '2PE', '1Yohanes': '1JN', '2Yohanes': '2JN', '3Yohanes': '3JN', 'Yudas': 'JUD', 'Wahyu': 'REV', 'Matthieu': 'MAT', 'Marc': 'MRK', 'Luc': 'LUK', 'Jean': 'JHN', 'Actes': 'ACT', 'Romains': 'ROM', '1Corinthiens': '1CO', '2Corinthiens': '2CO', 'Galates': 'GAL', 'Ephesiens': 'EPH', 'Philippiens': 'PHP', 'Colossiens': 'COL', '1Thess': '1TH', '2Thess': '2TH', '1Timothee': '1TI', '2Timothee': '2TI', 'Tite': 'TIT', 'Philemon': 'PHM', 'Hebreux': 'HEB', 'Jacques': 'JAS', '1Pierre': '1PE', '2Pierre': '2PE', '1Jean': '1JN', '2Jean': '2JN', '3Jean': '3JN', 'Jude': 'JUD', 'Apocalypse': 'REV', 'PONOGNAAN': 'GEN', 'YUNUS': 'JON', 'MARKUS': 'MRK', 'LUKAS': 'LUK', 'YAHIYA': 'JHN', 'MGA_GINANG': 'ACT', 'GALATIYA': 'GAL', 'EPESUS': 'EPH', 'PILIPI': 'PHP', 'KOLOSAS': 'COL', '1TESALONIKA': '1TH', '2TESALONIKA': '2TH', '1TIMUTI': '1TI', '2TIMUTI': '2TI', 'TITUS': 'TIT', 'YAKUB': 'JAS', 'Kejadian': 'GEN', 'Keluaran': 'EXO', 'Imamat': 'LEV', 'Bilangan': 'NUM', 'Ulangan': 'DEU', 'Yosua': 'JOS', 'Hakim-hakim': 'JDG', '1Raja-raja': '1KI', '2Raja-raja': '2KI', '1Tawarikh': '1CH', '2Tawarikh': '2CH', 'Nehemia': 'NEH', 'Ayub': 'JOB', 'Mazmur': 'PSA', 'Amsal': 'PRO', 'Pengkhotbah': 'ECC', 'Kidung': 'SNG', 'Yesaya': 'ISA', 'Yeremia': 'JER', 'Ratapan': 'LAM', 'Yehezkiel': 'EZK', 'Yoel': 'JOL', 'Obaja': 'OBA', 'Yunus': 'JON', 'Mikha': 'MIC', 'Habakuk': 'HAB', 'Zefanya': 'ZEP', 'Hagai': 'HAG', 'Zakharia': 'ZEC', 'Maleakhi': 'MAL'}
result = books.get(bookName, None)
return result
def traditional_ot(self, sequence):
traditional = {'A01': 'GEN', 'A02': 'EXO', 'A03': 'LEV', 'A04': 'NUM', 'A05': 'DEU', 'A06': 'JOS', 'A07': 'JDG', 'A08': 'RUT', 'A09': '1SA', 'A10': '2SA', 'A11': '1KI', 'A12': '2KI', 'A13': '1CH', 'A14': '2CH', 'A15': 'EZR', 'A16': 'NEH', 'A17': 'EST', 'A18': 'JOB', 'A19': 'PSA', 'A20': 'PRO', 'A21': 'ECC', 'A22': 'SNG', 'A23': 'ISA', 'A24': 'JER', 'A25': 'LAM', 'A26': 'EZK', 'A27': 'DAN', 'A28': 'HOS', 'A29': 'JOL', 'A30': 'AMO', 'A31': 'OBA', 'A32': 'JON', 'A33': 'MIC', 'A34': 'NAM', 'A35': 'HAB', 'A36': 'ZEP', 'A37': 'HAG', 'A38': 'ZEC', 'A39': 'MAL'}
return traditional.get(sequence, None)
def masoretic_christian_ot(self, sequence):
return self.TraditionalOT(sequence)
def hebrew_ot(self, sequence):
hebrew = {'A01': 'GEN', 'A02': 'EXO', 'A03': 'LEV', 'A04': 'NUM', 'A05': 'DEU', 'A06': 'JOS', 'A07': 'JDG', 'A08': '1SA', 'A09': '2SA', 'A10': '1KI', 'A11': '2KI', 'A12': 'ISA', 'A13': 'JER', 'A14': 'EZK', 'A15': 'HOS', 'A16': 'JOL', 'A17': 'AMO', 'A18': 'OBA', 'A19': 'JON', 'A20': 'MIC', 'A21': 'NAM', 'A22': 'HAB', 'A23': 'ZEP', 'A24': 'HAG', 'A25': 'ZEC', 'A26': 'MAL', 'A27': 'PSA', 'A28': 'PRO', 'A29': 'JOB', 'A30': 'SNG', 'A31': 'RUT', 'A32': 'LAM', 'A33': 'ECC', 'A34': 'EST', 'A35': 'DAN', 'A36': 'EZR', 'A37': 'NEH', 'A38': '1CH', 'A39': '2CH'}
return hebrew.get(sequence, None)
def masoretic_tanakh_ot(self, sequence):
return self.HebrewOT(sequence)
def catholic_ot(self, sequence):
catholic = {'A01': 'GEN', 'A02': 'EXO', 'A03': 'LEV', 'A04': 'NUM', 'A05': 'DEU', 'A06': 'JOS', 'A07': 'JDG', 'A08': 'RUT', 'A09': '1SA', 'A10': '2SA', 'A11': '1KI', 'A12': '2KI', 'A13': '1CH', 'A14': '2CH', 'A15': 'EZR', 'A16': 'NEH', 'A17': 'TOB', 'A18': 'JDT', 'A19': 'EST', 'A20': '1MA', 'A21': '2MA', 'A22': 'JOB', 'A23': 'PSA', 'A24': 'PRO', 'A25': 'ECC', 'A26': 'SNG', 'A27': 'WIS', 'A28': 'SIR', 'A29': 'ISA', 'A30': 'JER', 'A31': 'LAM', 'A32': 'BAR', 'A33': 'EZK', 'A34': 'DAG', 'A35': 'HOS', 'A36': 'JOL', 'A37': 'AMO', 'A38': 'OBA', 'A39': 'JON', 'A40': 'MIC', 'A41': 'NAM', 'A42': 'HAB', 'A43': 'ZEP', 'A44': 'HAG', 'A45': 'ZEC', 'A46': 'MAL'}
return catholic.get(sequence, None)
def vulgate_ot(self, sequence):
vulgate = {}
return vulgate.get(sequence, None)
def septuagint_ot(self, sequence):
septuagint = {'A01': 'GEN', 'A02': 'EXO', 'A03': 'LEV', 'A04': 'NUM', 'A05': 'DEU', 'A06': 'JOS', 'A07': 'JDG', 'A08': 'RUT', 'A09': '1SA', 'A10': '2SA', 'A11': '1KI', 'A12': '2KI', 'A13': '1CH', 'A14': '2CH', 'A15': 'EZA', 'A16': 'EZR', 'A17': 'NEH', 'A18': 'TOB', 'A19': 'JDT', 'A20': 'ESG', 'A21': '1MA', 'A22': '2MA', 'A23': '3MA', 'A24': 'PSA', 'A25': 'JOB', 'A26': 'PRO', 'A27': 'ECC', 'A28': 'SNG', 'A29': 'WIS', 'A30': 'SIR', 'A31': 'HOS', 'A32': 'AMO', 'A33': 'MIC', 'A34': 'JOL', 'A35': 'OBA', 'A36': 'JON', 'A37': 'NAM', 'A38': 'HAB', 'A39': 'ZEP', 'A40': 'HAG', 'A41': 'ZEC', 'A42': 'MAL', 'A43': 'ISA', 'A44': 'JER', 'A45': 'BAR', 'A46': 'LAM', 'A47': 'LJE', 'A48': 'EZK', 'A49': 'DAG', 'A50': '4MA'}
return septuagint.get(sequence, None)
def septuagint2_ot(self, sequence):
septuagint = {'A01': 'GEN', 'A02': 'EXO', 'A03': 'LEV', 'A04': 'NUM', 'A05': 'DEU', 'A06': 'JOS', 'A07': 'JDG', 'A08': 'RUT', 'A09': '1SA', 'A10': '2SA', 'A11': '1KI', 'A12': '2KI', 'A13': '1CH', 'A14': '2CH', 'A15': 'EZA', 'A16': 'EZR', 'A17': 'NEH', 'A18': 'TOB', 'A19': 'JDT', 'A20': 'ESG', 'A21': '1MA', 'A22': '2MA', 'A23': '3MA', 'A24': 'PSA', 'A25': 'PS2', 'A26': 'JOB', 'A27': 'PRO', 'A28': 'ECC', 'A29': 'SNG', 'A30': 'WIS', 'A31': 'SIR', 'A32': 'HOS', 'A33': 'AMO', 'A34': 'MIC', 'A35': 'JOL', 'A36': 'OBA', 'A37': 'JON', 'A38': 'NAM', 'A39': 'HAB', 'A40': 'ZEP', 'A41': 'HAG', 'A42': 'ZEC', 'A43': 'MAL', 'A44': 'ISA', 'A45': 'JER', 'A46': 'BAR', 'A47': 'LAM', 'A48': 'LJE', 'A49': 'EZK', 'A50': 'DAG', 'A51': 'SUS', 'A52': '4MA'}
return septuagint.get(sequence, None)
def dutch_traditional_ot(self, sequence):
dutch = {}
return dutch.get(sequence, None)
def trnntmot(self, sequence):
trnntm = {'A01': 'GEN', 'A02': 'EXO', 'A03': 'LEV', 'A04': 'NUM', 'A05': 'DEU', 'A06': 'JOS', 'A07': 'JDG', 'A08': 'RUT', 'A09': '1SA', 'A10': '2SA', 'A11': '1CH', 'A12': '2CH', 'A13': '2KI', 'A14': 'ISA', 'A15': 'JER', 'A16': 'MIC', 'A17': 'HOS', 'A18': 'PSA', 'A19': 'ZEC', 'A20': 'JON'}
return trnntm.get(sequence, None)
def traditional_nt(self, sequence):
traditional = {'B01': 'MAT', 'B02': 'MRK', 'B03': 'LUK', 'B04': 'JHN', 'B05': 'ACT', 'B06': 'ROM', 'B07': '1CO', 'B08': '2CO', 'B09': 'GAL', 'B10': 'EPH', 'B11': 'PHP', 'B12': 'COL', 'B13': '1TH', 'B14': '2TH', 'B15': '1TI', 'B16': '2TI', 'B17': 'TIT', 'B18': 'PHM', 'B19': 'HEB', 'B20': 'JAS', 'B21': '1PE', 'B22': '2PE', 'B23': '1JN', 'B24': '2JN', 'B25': '3JN', 'B26': 'JUD', 'B27': 'REV'}
return traditional.get(sequence, None)
def russian_nt(self, sequence):
russian = {'B01': 'MAT', 'B02': 'MRK', 'B03': 'LUK', 'B04': 'JHN', 'B05': 'ACT', 'B06': 'JAS', 'B07': '1PE', 'B08': '2PE', 'B09': '1JN', 'B10': '2JN', 'B11': '3JN', 'B12': 'JUD', 'B13': 'ROM', 'B14': '1CO', 'B15': '2CO', 'B16': 'GAL', 'B17': 'EPH', 'B18': 'PHP', 'B19': 'COL', 'B20': '1TH', 'B21': '2TH', 'B22': '1TI', 'B23': '2TI', 'B24': 'TIT', 'B25': 'PHM', 'B26': 'HEB', 'B27': 'REV'}
return russian.get(sequence, None)
def plautdietsch_nt(self, sequence):
diestsch = {'B01': 'MAT', 'B02': 'MRK', 'B03': 'LUK', 'B04': 'JHN', 'B05': 'ACT', 'B06': 'ROM', 'B07': '1CO', 'B08': '2CO', 'B09': 'GAL', 'B10': 'EPH', 'B11': 'PHP', 'B12': 'COL', 'B13': '1TH', 'B14': '2TH', 'B15': '1TI', 'B16': '2TI', 'B17': 'TIT', 'B18': 'PHM', 'B19': '1PE', 'B20': '2PE', 'B21': '1JN', 'B22': '2JN', 'B23': '3JN', 'B24': 'HEB', 'B25': 'JAS', 'B26': 'JUD', 'B27': 'REV'}
return diestsch.get(sequence, None)
def finnish_nt(self, sequence):
finnish = {'B01': 'MAT', 'B02': 'MRK', 'B03': 'LUK', 'B04': 'JHN', 'B05': 'ACT', 'B06': 'ROM', 'B07': '1CO', 'B08': '2CO', 'B09': 'GAL', 'B10': 'EPH', 'B11': 'PHP', 'B12': 'COL', 'B13': '1TH', 'B14': '2TH', 'B15': '1TI', 'B16': '2TI', 'B17': 'TIT', 'B18': 'PHM', 'B19': 'HEB', 'B20': '1PE', 'B21': '2PE', 'B22': '1JN', 'B23': '2JN', 'B24': '3JN', 'B25': 'JAS', 'B26': 'JUD', 'B27': 'REV'}
return finnish.get(sequence, None) |
class Blocktype(basestring):
"""
32-bit|64-bit
Possible values:
<ul>
<li> "32_bit" ,
<li> "64_bit"
</ul>
"""
@staticmethod
def get_api_name():
return "blocktype"
| class Blocktype(basestring):
"""
32-bit|64-bit
Possible values:
<ul>
<li> "32_bit" ,
<li> "64_bit"
</ul>
"""
@staticmethod
def get_api_name():
return 'blocktype' |
# pylint: disable=missing-function-docstring, missing-module-docstring/
a = 4
a += 5.0
| a = 4
a += 5.0 |
# encoding: utf-8
class FastCGIError(Exception):
pass
# Values for type component of FCGI_Header
FCGI_BEGIN_REQUEST = 1
FCGI_ABORT_REQUEST = 2
FCGI_END_REQUEST = 3
FCGI_PARAMS = 4
FCGI_STDIN = 5
FCGI_STDOUT = 6
FCGI_STDERR = 7
FCGI_DATA = 8
FCGI_GET_VALUES = 9
FCGI_GET_VALUES_RESULT = 10
FCGI_UNKNOWN_TYPE = 11
typeNames = {
FCGI_BEGIN_REQUEST : 'fcgi_begin_request',
FCGI_ABORT_REQUEST : 'fcgi_abort_request',
FCGI_END_REQUEST : 'fcgi_end_request',
FCGI_PARAMS : 'fcgi_params',
FCGI_STDIN : 'fcgi_stdin',
FCGI_STDOUT : 'fcgi_stdout',
FCGI_STDERR : 'fcgi_stderr',
FCGI_DATA : 'fcgi_data',
FCGI_GET_VALUES : 'fcgi_get_values',
FCGI_GET_VALUES_RESULT: 'fcgi_get_values_result',
FCGI_UNKNOWN_TYPE : 'fcgi_unknown_type'}
# Mask for flags component of FCGI_BeginRequestBody
FCGI_KEEP_CONN = 1
# Values for role component of FCGI_BeginRequestBody
FCGI_RESPONDER = 1
FCGI_AUTHORIZER = 2
FCGI_FILTER = 3
# Values for protocolStatus component of FCGI_EndRequestBody
FCGI_REQUEST_COMPLETE = 0
FCGI_CANT_MPX_CONN = 1
FCGI_OVERLOADED = 2
FCGI_UNKNOWN_ROLE = 3
FCGI_MAX_PACKET_LEN = 0xFFFF
class Record(object):
def __init__(self, type, reqId, content='', version=1):
self.version = version
self.type = type
self.reqId = reqId
self.content = content
self.length = len(content)
if self.length > FCGI_MAX_PACKET_LEN:
raise ValueError("Record length too long: %d > %d" %
(self.length, FCGI_MAX_PACKET_LEN))
if self.length % 8 != 0:
self.padding = 8 - (self.length & 7)
else:
self.padding = 0
self.reserved = 0
def fromHeaderString(clz, rec):
self = object.__new__(clz)
self.version = ord(rec[0])
self.type = ord(rec[1])
self.reqId = (ord(rec[2])<<8)|ord(rec[3])
self.length = (ord(rec[4])<<8)|ord(rec[5])
self.padding = ord(rec[6])
self.reserved = ord(rec[7])
self.content = None
return self
fromHeaderString = classmethod(fromHeaderString)
def toOutputString(self):
return "%c%c%c%c%c%c%c%c" % (
self.version, self.type,
(self.reqId&0xFF00)>>8, self.reqId&0xFF,
(self.length&0xFF00)>>8, self.length & 0xFF,
self.padding, self.reserved) + self.content + '\0'*self.padding
def totalLength(self):
return 8 + self.length + self.padding
def __repr__(self):
return "<FastCGIRecord version=%d type=%d(%s) reqId=%d>" % (
self.version, self.type, typeNames.get(self.type), self.reqId)
def parseNameValues(s):
'''
@param s: String containing valid name/value data, of the form:
'namelength + valuelength + name + value' repeated 0 or more
times. See C{fastcgi.writeNameValue} for how to create this
string.
@return: Generator of tuples of the form (name, value)
'''
off = 0
while off < len(s):
nameLen = ord(s[off])
off += 1
if nameLen&0x80:
nameLen=(nameLen&0x7F)<<24 | ord(s[off])<<16 | ord(s[off+1])<<8 | ord(s[off+2])
off += 3
valueLen=ord(s[off])
off += 1
if valueLen&0x80:
valueLen=(valueLen&0x7F)<<24 | ord(s[off])<<16 | ord(s[off+1])<<8 | ord(s[off+2])
off += 3
yield (s[off:off+nameLen], s[off+nameLen:off+nameLen+valueLen])
off += nameLen + valueLen
def getLenBytes(length):
if length<0x80:
return chr(length)
elif 0 < length <= 0x7FFFFFFF:
return (chr(0x80|(length>>24)&0x7F) + chr((length>>16)&0xFF) +
chr((length>>8)&0xFF) + chr(length&0xFF))
else:
raise ValueError("Name length too long.")
def writeNameValue(name, value):
return getLenBytes(len(name)) + getLenBytes(len(value)) + name + value
class Channel(object):
maxConnections = 100
reqId = 0
request = None
## High level protocol
def packetReceived(self, packet):
'''
@param packet: instance of C{fastcgi.Record}.
@raise: FastCGIError on invalid version or where the type does not exist
in funName
'''
if packet.version != 1:
raise FastCGIError("FastCGI packet received with version != 1")
funName = typeNames.get(packet.type)
if funName is None:
raise FastCGIError("Unknown FastCGI packet type: %d" % packet.type)
getattr(self, funName)(packet)
def fcgi_get_values(self, packet):
if packet.reqId != 0:
raise ValueError("Should be 0!")
content = ""
for name,value in parseNameValues(packet.content):
outval = None
if name == "FCGI_MAX_CONNS":
outval = str(self.maxConnections)
elif name == "FCGI_MAX_REQS":
outval = str(self.maxConnections)
elif name == "FCGI_MPXS_CONNS":
outval = "0"
if outval:
content += writeNameValue(name, outval)
self.writePacket(Record(FCGI_GET_VALUES_RESULT, 0, content))
def fcgi_unknown_type(self, packet):
# Unused, reserved for future expansion
pass
def fcgi_begin_request(self, packet):
role = ord(packet.content[0])<<8 | ord(packet.content[1])
flags = ord(packet.content[2])
if packet.reqId == 0:
raise ValueError("ReqId shouldn't be 0!")
if self.reqId != 0:
self.writePacket(Record(FCGI_END_REQUEST, self.reqId,
"\0\0\0\0"+chr(FCGI_CANT_MPX_CONN)+"\0\0\0"))
if role != FCGI_RESPONDER:
self.writePacket(Record(FCGI_END_REQUEST, self.reqId,
"\0\0\0\0"+chr(FCGI_UNKNOWN_ROLE)+"\0\0\0"))
self.reqId = packet.reqId
self.keepalive = flags & FCGI_KEEP_CONN
self.params = ""
def fcgi_abort_request(self, packet):
if packet.reqId != self.reqId:
return
self.request.connectionLost()
def fcgi_params(self, packet):
if packet.reqId != self.reqId:
return
# I don't feel like doing the work to incrementally parse this stupid
# protocol, so we'll just buffer all the params data before parsing.
if not packet.content:
self.makeRequest(dict(parseNameValues(self.params)))
self.request.process()
self.params += packet.content
def fcgi_stdin(self, packet):
if packet.reqId != self.reqId:
return
if not packet.content:
self.request.handleContentComplete()
else:
self.request.handleContentChunk(packet.content)
def fcgi_data(self, packet):
# For filter roles only, which is currently unsupported.
pass
def write(self, data):
if len(data) > FCGI_MAX_PACKET_LEN:
n = 0
while 1:
d = data[n*FCGI_MAX_PACKET_LEN:(n+1)*FCGI_MAX_PACKET_LEN]
if not d:
break
self.write(d)
return
self.writePacket(Record(FCGI_STDOUT, self.reqId, data))
def writeHeaders(self, code, headers):
l = []
code_message = responsecode.RESPONSES.get(code, "Unknown Status")
l.append("Status: %s %s\n" % (code, code_message))
if headers is not None:
for name, valuelist in headers.getAllRawHeaders():
for value in valuelist:
l.append("%s: %s\n" % (name, value))
l.append('\n')
self.write(''.join(l))
def finish(self):
if self.request is None:
raise RuntimeError("Request.finish called when no request was outstanding.")
self.writePacket(Record(FCGI_END_REQUEST, self.reqId,
"\0\0\0\0"+chr(FCGI_REQUEST_COMPLETE)+"\0\0\0"))
del self.reqId, self.request
if not self.keepalive:
self.transport.loseConnection()
## Low level protocol
paused = False
_lastRecord = None
recvd = ""
def writePacket(self, packet):
data = packet.toOutputString()
#print "Writing record", packet, repr(data)
self.sock.sendall(data)
def read(self, length):
s = ''
while len(s) < length:
s = self.sock.recv(length-len(s))
return s
def readPacket(self, tryrecv=False):
if tryrecv:
try:
self.sock.setblocking(0)
s = self.sock.recv(8)
finally:
self.sock.setblocking(1)
if len(s) < 8:
s += self.read(8-len(s))
else:
s = self.read(8)
record = Record.fromHeaderString(s)
if record.length:
record.content = self.read(record.length)
if record.padding:
self.read(record.padding)
return record
def dataReceived(self, recd):
self.recvd = self.recvd + recd
record = self._lastRecord
self._lastRecord = None
while len(self.recvd) >= 8 and not self.paused:
if record is None:
record = Record.fromHeaderString(self.recvd[:8])
if len(self.recvd) < record.totalLength():
self._lastRecord = record
break
record.content = self.recvd[8:record.length+8]
self.recvd = self.recvd[record.totalLength():]
self.packetReceived(record)
record = None
def pauseProducing(self):
self.paused = True
self.transport.pauseProducing()
def resumeProducing(self):
self.paused = False
self.transport.resumeProducing()
self.dataReceived('')
def stopProducing(self):
self.paused = True
self.transport.stopProducing()
| class Fastcgierror(Exception):
pass
fcgi_begin_request = 1
fcgi_abort_request = 2
fcgi_end_request = 3
fcgi_params = 4
fcgi_stdin = 5
fcgi_stdout = 6
fcgi_stderr = 7
fcgi_data = 8
fcgi_get_values = 9
fcgi_get_values_result = 10
fcgi_unknown_type = 11
type_names = {FCGI_BEGIN_REQUEST: 'fcgi_begin_request', FCGI_ABORT_REQUEST: 'fcgi_abort_request', FCGI_END_REQUEST: 'fcgi_end_request', FCGI_PARAMS: 'fcgi_params', FCGI_STDIN: 'fcgi_stdin', FCGI_STDOUT: 'fcgi_stdout', FCGI_STDERR: 'fcgi_stderr', FCGI_DATA: 'fcgi_data', FCGI_GET_VALUES: 'fcgi_get_values', FCGI_GET_VALUES_RESULT: 'fcgi_get_values_result', FCGI_UNKNOWN_TYPE: 'fcgi_unknown_type'}
fcgi_keep_conn = 1
fcgi_responder = 1
fcgi_authorizer = 2
fcgi_filter = 3
fcgi_request_complete = 0
fcgi_cant_mpx_conn = 1
fcgi_overloaded = 2
fcgi_unknown_role = 3
fcgi_max_packet_len = 65535
class Record(object):
def __init__(self, type, reqId, content='', version=1):
self.version = version
self.type = type
self.reqId = reqId
self.content = content
self.length = len(content)
if self.length > FCGI_MAX_PACKET_LEN:
raise value_error('Record length too long: %d > %d' % (self.length, FCGI_MAX_PACKET_LEN))
if self.length % 8 != 0:
self.padding = 8 - (self.length & 7)
else:
self.padding = 0
self.reserved = 0
def from_header_string(clz, rec):
self = object.__new__(clz)
self.version = ord(rec[0])
self.type = ord(rec[1])
self.reqId = ord(rec[2]) << 8 | ord(rec[3])
self.length = ord(rec[4]) << 8 | ord(rec[5])
self.padding = ord(rec[6])
self.reserved = ord(rec[7])
self.content = None
return self
from_header_string = classmethod(fromHeaderString)
def to_output_string(self):
return '%c%c%c%c%c%c%c%c' % (self.version, self.type, (self.reqId & 65280) >> 8, self.reqId & 255, (self.length & 65280) >> 8, self.length & 255, self.padding, self.reserved) + self.content + '\x00' * self.padding
def total_length(self):
return 8 + self.length + self.padding
def __repr__(self):
return '<FastCGIRecord version=%d type=%d(%s) reqId=%d>' % (self.version, self.type, typeNames.get(self.type), self.reqId)
def parse_name_values(s):
"""
@param s: String containing valid name/value data, of the form:
'namelength + valuelength + name + value' repeated 0 or more
times. See C{fastcgi.writeNameValue} for how to create this
string.
@return: Generator of tuples of the form (name, value)
"""
off = 0
while off < len(s):
name_len = ord(s[off])
off += 1
if nameLen & 128:
name_len = (nameLen & 127) << 24 | ord(s[off]) << 16 | ord(s[off + 1]) << 8 | ord(s[off + 2])
off += 3
value_len = ord(s[off])
off += 1
if valueLen & 128:
value_len = (valueLen & 127) << 24 | ord(s[off]) << 16 | ord(s[off + 1]) << 8 | ord(s[off + 2])
off += 3
yield (s[off:off + nameLen], s[off + nameLen:off + nameLen + valueLen])
off += nameLen + valueLen
def get_len_bytes(length):
if length < 128:
return chr(length)
elif 0 < length <= 2147483647:
return chr(128 | length >> 24 & 127) + chr(length >> 16 & 255) + chr(length >> 8 & 255) + chr(length & 255)
else:
raise value_error('Name length too long.')
def write_name_value(name, value):
return get_len_bytes(len(name)) + get_len_bytes(len(value)) + name + value
class Channel(object):
max_connections = 100
req_id = 0
request = None
def packet_received(self, packet):
"""
@param packet: instance of C{fastcgi.Record}.
@raise: FastCGIError on invalid version or where the type does not exist
in funName
"""
if packet.version != 1:
raise fast_cgi_error('FastCGI packet received with version != 1')
fun_name = typeNames.get(packet.type)
if funName is None:
raise fast_cgi_error('Unknown FastCGI packet type: %d' % packet.type)
getattr(self, funName)(packet)
def fcgi_get_values(self, packet):
if packet.reqId != 0:
raise value_error('Should be 0!')
content = ''
for (name, value) in parse_name_values(packet.content):
outval = None
if name == 'FCGI_MAX_CONNS':
outval = str(self.maxConnections)
elif name == 'FCGI_MAX_REQS':
outval = str(self.maxConnections)
elif name == 'FCGI_MPXS_CONNS':
outval = '0'
if outval:
content += write_name_value(name, outval)
self.writePacket(record(FCGI_GET_VALUES_RESULT, 0, content))
def fcgi_unknown_type(self, packet):
pass
def fcgi_begin_request(self, packet):
role = ord(packet.content[0]) << 8 | ord(packet.content[1])
flags = ord(packet.content[2])
if packet.reqId == 0:
raise value_error("ReqId shouldn't be 0!")
if self.reqId != 0:
self.writePacket(record(FCGI_END_REQUEST, self.reqId, '\x00\x00\x00\x00' + chr(FCGI_CANT_MPX_CONN) + '\x00\x00\x00'))
if role != FCGI_RESPONDER:
self.writePacket(record(FCGI_END_REQUEST, self.reqId, '\x00\x00\x00\x00' + chr(FCGI_UNKNOWN_ROLE) + '\x00\x00\x00'))
self.reqId = packet.reqId
self.keepalive = flags & FCGI_KEEP_CONN
self.params = ''
def fcgi_abort_request(self, packet):
if packet.reqId != self.reqId:
return
self.request.connectionLost()
def fcgi_params(self, packet):
if packet.reqId != self.reqId:
return
if not packet.content:
self.makeRequest(dict(parse_name_values(self.params)))
self.request.process()
self.params += packet.content
def fcgi_stdin(self, packet):
if packet.reqId != self.reqId:
return
if not packet.content:
self.request.handleContentComplete()
else:
self.request.handleContentChunk(packet.content)
def fcgi_data(self, packet):
pass
def write(self, data):
if len(data) > FCGI_MAX_PACKET_LEN:
n = 0
while 1:
d = data[n * FCGI_MAX_PACKET_LEN:(n + 1) * FCGI_MAX_PACKET_LEN]
if not d:
break
self.write(d)
return
self.writePacket(record(FCGI_STDOUT, self.reqId, data))
def write_headers(self, code, headers):
l = []
code_message = responsecode.RESPONSES.get(code, 'Unknown Status')
l.append('Status: %s %s\n' % (code, code_message))
if headers is not None:
for (name, valuelist) in headers.getAllRawHeaders():
for value in valuelist:
l.append('%s: %s\n' % (name, value))
l.append('\n')
self.write(''.join(l))
def finish(self):
if self.request is None:
raise runtime_error('Request.finish called when no request was outstanding.')
self.writePacket(record(FCGI_END_REQUEST, self.reqId, '\x00\x00\x00\x00' + chr(FCGI_REQUEST_COMPLETE) + '\x00\x00\x00'))
del self.reqId, self.request
if not self.keepalive:
self.transport.loseConnection()
paused = False
_last_record = None
recvd = ''
def write_packet(self, packet):
data = packet.toOutputString()
self.sock.sendall(data)
def read(self, length):
s = ''
while len(s) < length:
s = self.sock.recv(length - len(s))
return s
def read_packet(self, tryrecv=False):
if tryrecv:
try:
self.sock.setblocking(0)
s = self.sock.recv(8)
finally:
self.sock.setblocking(1)
if len(s) < 8:
s += self.read(8 - len(s))
else:
s = self.read(8)
record = Record.fromHeaderString(s)
if record.length:
record.content = self.read(record.length)
if record.padding:
self.read(record.padding)
return record
def data_received(self, recd):
self.recvd = self.recvd + recd
record = self._lastRecord
self._lastRecord = None
while len(self.recvd) >= 8 and (not self.paused):
if record is None:
record = Record.fromHeaderString(self.recvd[:8])
if len(self.recvd) < record.totalLength():
self._lastRecord = record
break
record.content = self.recvd[8:record.length + 8]
self.recvd = self.recvd[record.totalLength():]
self.packetReceived(record)
record = None
def pause_producing(self):
self.paused = True
self.transport.pauseProducing()
def resume_producing(self):
self.paused = False
self.transport.resumeProducing()
self.dataReceived('')
def stop_producing(self):
self.paused = True
self.transport.stopProducing() |
match x:
case Class( ):
pass
case Class( foo=1 ):
pass
case Class( foo=1, bar=2 ):
pass
case Class( foo=1, bar=2, ):
pass
| match x:
case Class():
pass
case Class(foo=1):
pass
case Class(foo=1, bar=2):
pass
case Class(foo=1, bar=2):
pass |
def deep_flatten(in_list):
out_list = []
for i in in_list:
if type(i) == str:
out_list.append(i)
elif hasattr(i,'__iter__'):
out_list.extend(deep_flatten(i))
else:
out_list.append(i)
return out_list
a = [(1,2),3,4,5,[4,4,5,6]]
a = [['cats', ['carl', 'cate']],['dogs', ['darlene', 'doug']]]
b = deep_flatten(a)
print(b) | def deep_flatten(in_list):
out_list = []
for i in in_list:
if type(i) == str:
out_list.append(i)
elif hasattr(i, '__iter__'):
out_list.extend(deep_flatten(i))
else:
out_list.append(i)
return out_list
a = [(1, 2), 3, 4, 5, [4, 4, 5, 6]]
a = [['cats', ['carl', 'cate']], ['dogs', ['darlene', 'doug']]]
b = deep_flatten(a)
print(b) |
"""Arcs BUILD rules."""
load(
"//third_party/bazel_rules/rules_kotlin/kotlin/js:js_library.bzl",
_kt_js_library = "kt_js_library",
)
load(
"//third_party/java/arcs/build_defs/internal:kotlin.bzl",
_arcs_kt_android_library = "arcs_kt_android_library",
_arcs_kt_android_test_suite = "arcs_kt_android_test_suite",
_arcs_kt_js_library = "arcs_kt_js_library",
_arcs_kt_jvm_library = "arcs_kt_jvm_library",
_arcs_kt_jvm_test_suite = "arcs_kt_jvm_test_suite",
_arcs_kt_library = "arcs_kt_library",
_arcs_kt_native_library = "arcs_kt_native_library",
_arcs_kt_particles = "arcs_kt_particles",
_arcs_kt_plan = "arcs_kt_plan",
_arcs_kt_plan_2 = "arcs_kt_plan_2",
)
load(
"//third_party/java/arcs/build_defs/internal:manifest.bzl",
_arcs_manifest = "arcs_manifest",
_arcs_manifest_bundle = "arcs_manifest_bundle",
_arcs_manifest_proto = "arcs_manifest_proto",
_arcs_proto_plan = "arcs_proto_plan",
)
load(
"//third_party/java/arcs/build_defs/internal:schemas.bzl",
_arcs_cc_schema = "arcs_cc_schema",
_arcs_kt_gen = "arcs_kt_gen",
_arcs_kt_schema = "arcs_kt_schema",
)
# Re-export rules from various other files.
# The default Arcs SDK to use.
DEFAULT_ARCS_SDK_DEPS = ["//third_party/java/arcs"]
arcs_cc_schema = _arcs_cc_schema
arcs_kt_android_library = _arcs_kt_android_library
arcs_kt_android_test_suite = _arcs_kt_android_test_suite
def arcs_kt_gen(**kwargs):
"""Wrapper around _arcs_kt_gen that sets the default Arcs SDK
Args:
**kwargs: Set of args to forward to _arcs_kt_gen
"""
kwargs.setdefault("arcs_sdk_deps", DEFAULT_ARCS_SDK_DEPS)
_arcs_kt_gen(**kwargs)
arcs_kt_jvm_library = _arcs_kt_jvm_library
arcs_kt_jvm_test_suite = _arcs_kt_jvm_test_suite
arcs_kt_library = _arcs_kt_library
arcs_kt_js_library = _arcs_kt_js_library
arcs_kt_native_library = _arcs_kt_native_library
def arcs_kt_particles(**kwargs):
"""Wrapper around _arcs_kt_particles that sets the default Arcs SDK
Args:
**kwargs: Set of args to forward to _arcs_kt_particles
"""
kwargs.setdefault("arcs_sdk_deps", DEFAULT_ARCS_SDK_DEPS)
_arcs_kt_particles(**kwargs)
def arcs_kt_plan(**kwargs):
"""Wrapper around _arcs_kt_plan that sets the default Arcs SDK
Args:
**kwargs: Set of args to forward to _arcs_kt_plan
"""
kwargs.setdefault("arcs_sdk_deps", DEFAULT_ARCS_SDK_DEPS)
_arcs_kt_plan(**kwargs)
def arcs_kt_plan_2(**kwargs):
"""Wrapper around _arcs_kt_plan_2 that sets the default Arcs SDK
Args:
**kwargs: Set of args to forward to _arcs_kt_plan_2
"""
kwargs.setdefault("arcs_sdk_deps", DEFAULT_ARCS_SDK_DEPS)
_arcs_kt_plan_2(**kwargs)
def arcs_kt_schema(**kwargs):
"""Wrapper around _arcs_kt_schema that sets the default Arcs SDK
Args:
**kwargs: Set of args to forward to _arcs_kt_schema
"""
kwargs.setdefault("arcs_sdk_deps", DEFAULT_ARCS_SDK_DEPS)
_arcs_kt_schema(**kwargs)
arcs_manifest = _arcs_manifest
arcs_manifest_bundle = _arcs_manifest_bundle
arcs_manifest_proto = _arcs_manifest_proto
arcs_proto_plan = _arcs_proto_plan
kt_js_library = _kt_js_library
| """Arcs BUILD rules."""
load('//third_party/bazel_rules/rules_kotlin/kotlin/js:js_library.bzl', _kt_js_library='kt_js_library')
load('//third_party/java/arcs/build_defs/internal:kotlin.bzl', _arcs_kt_android_library='arcs_kt_android_library', _arcs_kt_android_test_suite='arcs_kt_android_test_suite', _arcs_kt_js_library='arcs_kt_js_library', _arcs_kt_jvm_library='arcs_kt_jvm_library', _arcs_kt_jvm_test_suite='arcs_kt_jvm_test_suite', _arcs_kt_library='arcs_kt_library', _arcs_kt_native_library='arcs_kt_native_library', _arcs_kt_particles='arcs_kt_particles', _arcs_kt_plan='arcs_kt_plan', _arcs_kt_plan_2='arcs_kt_plan_2')
load('//third_party/java/arcs/build_defs/internal:manifest.bzl', _arcs_manifest='arcs_manifest', _arcs_manifest_bundle='arcs_manifest_bundle', _arcs_manifest_proto='arcs_manifest_proto', _arcs_proto_plan='arcs_proto_plan')
load('//third_party/java/arcs/build_defs/internal:schemas.bzl', _arcs_cc_schema='arcs_cc_schema', _arcs_kt_gen='arcs_kt_gen', _arcs_kt_schema='arcs_kt_schema')
default_arcs_sdk_deps = ['//third_party/java/arcs']
arcs_cc_schema = _arcs_cc_schema
arcs_kt_android_library = _arcs_kt_android_library
arcs_kt_android_test_suite = _arcs_kt_android_test_suite
def arcs_kt_gen(**kwargs):
"""Wrapper around _arcs_kt_gen that sets the default Arcs SDK
Args:
**kwargs: Set of args to forward to _arcs_kt_gen
"""
kwargs.setdefault('arcs_sdk_deps', DEFAULT_ARCS_SDK_DEPS)
_arcs_kt_gen(**kwargs)
arcs_kt_jvm_library = _arcs_kt_jvm_library
arcs_kt_jvm_test_suite = _arcs_kt_jvm_test_suite
arcs_kt_library = _arcs_kt_library
arcs_kt_js_library = _arcs_kt_js_library
arcs_kt_native_library = _arcs_kt_native_library
def arcs_kt_particles(**kwargs):
"""Wrapper around _arcs_kt_particles that sets the default Arcs SDK
Args:
**kwargs: Set of args to forward to _arcs_kt_particles
"""
kwargs.setdefault('arcs_sdk_deps', DEFAULT_ARCS_SDK_DEPS)
_arcs_kt_particles(**kwargs)
def arcs_kt_plan(**kwargs):
"""Wrapper around _arcs_kt_plan that sets the default Arcs SDK
Args:
**kwargs: Set of args to forward to _arcs_kt_plan
"""
kwargs.setdefault('arcs_sdk_deps', DEFAULT_ARCS_SDK_DEPS)
_arcs_kt_plan(**kwargs)
def arcs_kt_plan_2(**kwargs):
"""Wrapper around _arcs_kt_plan_2 that sets the default Arcs SDK
Args:
**kwargs: Set of args to forward to _arcs_kt_plan_2
"""
kwargs.setdefault('arcs_sdk_deps', DEFAULT_ARCS_SDK_DEPS)
_arcs_kt_plan_2(**kwargs)
def arcs_kt_schema(**kwargs):
"""Wrapper around _arcs_kt_schema that sets the default Arcs SDK
Args:
**kwargs: Set of args to forward to _arcs_kt_schema
"""
kwargs.setdefault('arcs_sdk_deps', DEFAULT_ARCS_SDK_DEPS)
_arcs_kt_schema(**kwargs)
arcs_manifest = _arcs_manifest
arcs_manifest_bundle = _arcs_manifest_bundle
arcs_manifest_proto = _arcs_manifest_proto
arcs_proto_plan = _arcs_proto_plan
kt_js_library = _kt_js_library |
"""
This file is just for exporting stuffs :)
"""
def my_decorator(f):
return f
@my_decorator
def exported_func(abc: str, b: str = ", World!") -> str:
"""This is the
docstring from
an exported function
:param abc: Describing cool parameter `abc`
"""
return abc + b
class ExportedClass:
pass
| """
This file is just for exporting stuffs :)
"""
def my_decorator(f):
return f
@my_decorator
def exported_func(abc: str, b: str=', World!') -> str:
"""This is the
docstring from
an exported function
:param abc: Describing cool parameter `abc`
"""
return abc + b
class Exportedclass:
pass |
# Default constants
DF_ADSB = 17
LAT_REF = 44.807047 # TODO put reference values
LONG_REF = -0.605526
NZ=15
# Default variables
fe=20*10**6; Te=1/fe
Ds=1*10**6; Ts=1/Ds
Fse=int(round(Ts/Te))
Ns=112 # Number of points of the signal : 1000, 112
Nfft=512 # Number of points of FFT
# Default TEB parameters
SIG_NB_ERROR = 100
MAX_COUNT = 500
Nteb = 11 # OK step of 1 db
# Default sync parameters
Tp = 8*(10**(-6))
Fpe = int(round(Tp/Te))
# Init operators to modify default config
def init_ref(lat_ref, long_ref):
LAT_REF = lat_ref
LONG_REF = long_ref
def init_fe(in_fe): # better : do lamda functions for variable recalculations
fe=in_fe; Te=1/fe
Fse=int(round(Ts/Te))
Fpe = int(round(Tp/Te))
| df_adsb = 17
lat_ref = 44.807047
long_ref = -0.605526
nz = 15
fe = 20 * 10 ** 6
te = 1 / fe
ds = 1 * 10 ** 6
ts = 1 / Ds
fse = int(round(Ts / Te))
ns = 112
nfft = 512
sig_nb_error = 100
max_count = 500
nteb = 11
tp = 8 * 10 ** (-6)
fpe = int(round(Tp / Te))
def init_ref(lat_ref, long_ref):
lat_ref = lat_ref
long_ref = long_ref
def init_fe(in_fe):
fe = in_fe
te = 1 / fe
fse = int(round(Ts / Te))
fpe = int(round(Tp / Te)) |
# names of hurricanes
names = ['Cuba I', 'San Felipe II Okeechobee', 'Bahamas', 'Cuba II', 'CubaBrownsville', 'Tampico', 'Labor Day', 'New England', 'Carol', 'Janet', 'Carla', 'Hattie', 'Beulah', 'Camille', 'Edith', 'Anita', 'David', 'Allen', 'Gilbert', 'Hugo', 'Andrew', 'Mitch', 'Isabel', 'Ivan', 'Emily', 'Katrina', 'Rita', 'Wilma', 'Dean', 'Felix', 'Matthew', 'Irma', 'Maria', 'Michael']
# months of hurricanes
months = ['October', 'September', 'September', 'November', 'August', 'September', 'September', 'September', 'September', 'September', 'September', 'October', 'September', 'August', 'September', 'September', 'August', 'August', 'September', 'September', 'August', 'October', 'September', 'September', 'July', 'August', 'September', 'October', 'August', 'September', 'October', 'September', 'September', 'October']
# years of hurricanes
years = [1924, 1928, 1932, 1932, 1933, 1933, 1935, 1938, 1953, 1955, 1961, 1961, 1967, 1969, 1971, 1977, 1979, 1980, 1988, 1989, 1992, 1998, 2003, 2004, 2005, 2005, 2005, 2005, 2007, 2007, 2016, 2017, 2017, 2018]
# maximum sustained winds (mph) of hurricanes
max_sustained_winds = [165, 160, 160, 175, 160, 160, 185, 160, 160, 175, 175, 160, 160, 175, 160, 175, 175, 190, 185, 160, 175, 180, 165, 165, 160, 175, 180, 185, 175, 175, 165, 180, 175, 160]
# areas affected by each hurricane
areas_affected = [['Central America', 'Mexico', 'Cuba', 'Florida', 'The Bahamas'], ['Lesser Antilles', 'The Bahamas', 'United States East Coast', 'Atlantic Canada'], ['The Bahamas', 'Northeastern United States'], ['Lesser Antilles', 'Jamaica', 'Cayman Islands', 'Cuba', 'The Bahamas', 'Bermuda'], ['The Bahamas', 'Cuba', 'Florida', 'Texas', 'Tamaulipas'], ['Jamaica', 'Yucatn Peninsula'], ['The Bahamas', 'Florida', 'Georgia', 'The Carolinas', 'Virginia'], ['Southeastern United States', 'Northeastern United States', 'Southwestern Quebec'], ['Bermuda', 'New England', 'Atlantic Canada'], ['Lesser Antilles', 'Central America'], ['Texas', 'Louisiana', 'Midwestern United States'], ['Central America'], ['The Caribbean', 'Mexico', 'Texas'], ['Cuba', 'United States Gulf Coast'], ['The Caribbean', 'Central America', 'Mexico', 'United States Gulf Coast'], ['Mexico'], ['The Caribbean', 'United States East coast'], ['The Caribbean', 'Yucatn Peninsula', 'Mexico', 'South Texas'], ['Jamaica', 'Venezuela', 'Central America', 'Hispaniola', 'Mexico'], ['The Caribbean', 'United States East Coast'], ['The Bahamas', 'Florida', 'United States Gulf Coast'], ['Central America', 'Yucatn Peninsula', 'South Florida'], ['Greater Antilles', 'Bahamas', 'Eastern United States', 'Ontario'], ['The Caribbean', 'Venezuela', 'United States Gulf Coast'], ['Windward Islands', 'Jamaica', 'Mexico', 'Texas'], ['Bahamas', 'United States Gulf Coast'], ['Cuba', 'United States Gulf Coast'], ['Greater Antilles', 'Central America', 'Florida'], ['The Caribbean', 'Central America'], ['Nicaragua', 'Honduras'], ['Antilles', 'Venezuela', 'Colombia', 'United States East Coast', 'Atlantic Canada'], ['Cape Verde', 'The Caribbean', 'British Virgin Islands', 'U.S. Virgin Islands', 'Cuba', 'Florida'], ['Lesser Antilles', 'Virgin Islands', 'Puerto Rico', 'Dominican Republic', 'Turks and Caicos Islands'], ['Central America', 'United States Gulf Coast (especially Florida Panhandle)']]
# damages (USD($)) of hurricanes
damages = ['Damages not recorded', '100M', 'Damages not recorded', '40M', '27.9M', '5M', 'Damages not recorded', '306M', '2M', '65.8M', '326M', '60.3M', '208M', '1.42B', '25.4M', 'Damages not recorded', '1.54B', '1.24B', '7.1B', '10B', '26.5B', '6.2B', '5.37B', '23.3B', '1.01B', '125B', '12B', '29.4B', '1.76B', '720M', '15.1B', '64.8B', '91.6B', '25.1B']
# deaths for each hurricane
deaths = [90,4000,16,3103,179,184,408,682,5,1023,43,319,688,259,37,11,2068,269,318,107,65,19325,51,124,17,1836,125,87,45,133,603,138,3057,74]
# 1
# Update Recorded Damages
conversion = {"M": 1000000,
"B": 1000000000}
def convert_values(item):
if item == "Damages not recorded":
return "Damages not recorded"
elif item[-1] == 'M':
return int(float(item[:-1]) * 1000000)
elif item[-1] == 'B':
return int(float(item[:-1]) * 1000000000)
# test function by updating damages
damages = list(map(convert_values, damages))
print(damages)
print("*****************************************************")
# 2
# Create a Table
hurricane_dictionary = {}
for index in range(len(names)):
hurricane_dictionary[names[index]] = {'Name': names[index], 'Month':months[index], 'Year': years[index], 'Max Sustained Wind': max_sustained_winds[index], 'Areas Affected': areas_affected[index], 'Damage': damages[index], 'Deaths': deaths[index]}
print(hurricane_dictionary)
print("*****************************************************")
# Create and view the hurricanes dictionary
new_dictionary = {}
def organize_by_year(hurricanes):
hurricanes_by_year= dict()
for cane in hurricanes:
current_year = hurricanes[cane]['Year']
current_cane = hurricanes[cane]
if current_year not in hurricanes_by_year:
hurricanes_by_year[current_year] = [current_cane]
else:
hurricanes_by_year[current_year].append(current_cane)
return hurricanes_by_year
hurricanes_by_year = organize_by_year(hurricane_dictionary)
print(organize_by_year(hurricane_dictionary))
# 3
# Organizing by Year
# create a new dictionary of hurricanes with year and key
print("*****************************************************")
# 4
# Counting Damaged Areas
def organize_areas_by_count(hurricanes):
hurricanes_by_area = dict()
for cane in hurricanes:
current_areas = hurricanes[cane]['Areas Affected']
for area in current_areas:
if area not in hurricanes_by_area:
hurricanes_by_area[area] = 1
else:
hurricanes_by_area[area] += 1
return hurricanes_by_area
print(organize_areas_by_count(hurricane_dictionary))
# create dictionary of areas to store the number of hurricanes involved in
# 5
# Calculating Maximum Hurricane Count
def find_most_affected(hurricanes):
return list(organize_areas_by_count(hurricanes).items())[0]
# find most frequently affected area and the number of hurricanes involved in
print(find_most_affected(hurricane_dictionary))
print("*****************************************************")
# 6
# Calculating the Deadliest Hurricane
print(hurricane_dictionary)
def find_most_deaths(hurricanes):
highest_death = {'Deaths': 0}
for cane in hurricanes:
if hurricanes[cane].get('Deaths', 0) > int(list(highest_death.values())[0]):
highest_death = {hurricanes[cane]['Name']: hurricanes[cane]['Deaths']}
return highest_death
# find highest mortality hurricane and the number of deaths
print("The deadliest hurricane and the number of deaths: " + str(find_most_deaths(hurricane_dictionary)))
# 7
# Rating Hurricanes by Mortality
print("*****************************************************")
def rate_by_mortality(hurricanes):
new_dictionary = {0:[], 1:[], 2:[], 3:[], 4:[]}
for cane in hurricanes:
if hurricanes[cane]['Deaths'] == 0:
new_dictionary[0].append(hurricanes[cane])
elif hurricanes[cane]['Deaths'] <= 100:
new_dictionary[1].append(hurricanes[cane])
elif hurricanes[cane]['Deaths'] <= 500:
new_dictionary[2].append(hurricanes[cane])
elif hurricanes[cane]['Deaths'] <= 1000:
new_dictionary[3].append(hurricanes[cane])
elif hurricanes[cane]['Deaths'] <= 10000:
new_dictionary[4].append(hurricanes[cane])
return new_dictionary
print(rate_by_mortality(hurricane_dictionary))
# categorize hurricanes in new dictionary with mortality severity as key
print("*****************************************************")
# 8 Calculating Hurricane Maximum Damage
def find_most_damage(hurricanes):
highest_death = {'Damage': 0}
for cane in hurricanes:
if hurricanes[cane]['Damage'] == "Damages not recorded":
continue
elif hurricanes[cane]['Damage'] > int(list(highest_death.values())[0]):
highest_death = {hurricanes[cane]['Name']: hurricanes[cane]['Damage']}
return highest_death
# find highest damage inducing hurricane and its total cost
print("The most damaging hurricane and its damages: " + str(find_most_damage(hurricane_dictionary)))
# 9
# Rating Hurricanes by Damage
damage_scale = {0: 0,
1: 100000000,
2: 1000000000,
3: 10000000000,
4: 50000000000}
print("*****************************************************")
def rate_by_damages(hurricanes):
new_dictionary = {0:[], 1:[], 2:[], 3:[], 4:[]}
for cane in hurricanes:
if hurricanes[cane]['Damage'] == 'Damages not recorded':
new_dictionary[0].append(hurricanes[cane])
elif hurricanes[cane]['Damage'] == 0:
new_dictionary[0].append(hurricanes[cane])
elif hurricanes[cane]['Damage'] <= 100000000:
new_dictionary[1].append(hurricanes[cane])
elif hurricanes[cane]['Damage'] <= 1000000000:
new_dictionary[2].append(hurricanes[cane])
elif hurricanes[cane]['Damage'] <= 10000000000:
new_dictionary[3].append(hurricanes[cane])
elif hurricanes[cane]['Damage'] <= 50000000000:
new_dictionary[4].append(hurricanes[cane])
return new_dictionary
print(rate_by_damages(hurricane_dictionary))
# categorize hurricanes in new dictionary with damage severity as key
| names = ['Cuba I', 'San Felipe II Okeechobee', 'Bahamas', 'Cuba II', 'CubaBrownsville', 'Tampico', 'Labor Day', 'New England', 'Carol', 'Janet', 'Carla', 'Hattie', 'Beulah', 'Camille', 'Edith', 'Anita', 'David', 'Allen', 'Gilbert', 'Hugo', 'Andrew', 'Mitch', 'Isabel', 'Ivan', 'Emily', 'Katrina', 'Rita', 'Wilma', 'Dean', 'Felix', 'Matthew', 'Irma', 'Maria', 'Michael']
months = ['October', 'September', 'September', 'November', 'August', 'September', 'September', 'September', 'September', 'September', 'September', 'October', 'September', 'August', 'September', 'September', 'August', 'August', 'September', 'September', 'August', 'October', 'September', 'September', 'July', 'August', 'September', 'October', 'August', 'September', 'October', 'September', 'September', 'October']
years = [1924, 1928, 1932, 1932, 1933, 1933, 1935, 1938, 1953, 1955, 1961, 1961, 1967, 1969, 1971, 1977, 1979, 1980, 1988, 1989, 1992, 1998, 2003, 2004, 2005, 2005, 2005, 2005, 2007, 2007, 2016, 2017, 2017, 2018]
max_sustained_winds = [165, 160, 160, 175, 160, 160, 185, 160, 160, 175, 175, 160, 160, 175, 160, 175, 175, 190, 185, 160, 175, 180, 165, 165, 160, 175, 180, 185, 175, 175, 165, 180, 175, 160]
areas_affected = [['Central America', 'Mexico', 'Cuba', 'Florida', 'The Bahamas'], ['Lesser Antilles', 'The Bahamas', 'United States East Coast', 'Atlantic Canada'], ['The Bahamas', 'Northeastern United States'], ['Lesser Antilles', 'Jamaica', 'Cayman Islands', 'Cuba', 'The Bahamas', 'Bermuda'], ['The Bahamas', 'Cuba', 'Florida', 'Texas', 'Tamaulipas'], ['Jamaica', 'Yucatn Peninsula'], ['The Bahamas', 'Florida', 'Georgia', 'The Carolinas', 'Virginia'], ['Southeastern United States', 'Northeastern United States', 'Southwestern Quebec'], ['Bermuda', 'New England', 'Atlantic Canada'], ['Lesser Antilles', 'Central America'], ['Texas', 'Louisiana', 'Midwestern United States'], ['Central America'], ['The Caribbean', 'Mexico', 'Texas'], ['Cuba', 'United States Gulf Coast'], ['The Caribbean', 'Central America', 'Mexico', 'United States Gulf Coast'], ['Mexico'], ['The Caribbean', 'United States East coast'], ['The Caribbean', 'Yucatn Peninsula', 'Mexico', 'South Texas'], ['Jamaica', 'Venezuela', 'Central America', 'Hispaniola', 'Mexico'], ['The Caribbean', 'United States East Coast'], ['The Bahamas', 'Florida', 'United States Gulf Coast'], ['Central America', 'Yucatn Peninsula', 'South Florida'], ['Greater Antilles', 'Bahamas', 'Eastern United States', 'Ontario'], ['The Caribbean', 'Venezuela', 'United States Gulf Coast'], ['Windward Islands', 'Jamaica', 'Mexico', 'Texas'], ['Bahamas', 'United States Gulf Coast'], ['Cuba', 'United States Gulf Coast'], ['Greater Antilles', 'Central America', 'Florida'], ['The Caribbean', 'Central America'], ['Nicaragua', 'Honduras'], ['Antilles', 'Venezuela', 'Colombia', 'United States East Coast', 'Atlantic Canada'], ['Cape Verde', 'The Caribbean', 'British Virgin Islands', 'U.S. Virgin Islands', 'Cuba', 'Florida'], ['Lesser Antilles', 'Virgin Islands', 'Puerto Rico', 'Dominican Republic', 'Turks and Caicos Islands'], ['Central America', 'United States Gulf Coast (especially Florida Panhandle)']]
damages = ['Damages not recorded', '100M', 'Damages not recorded', '40M', '27.9M', '5M', 'Damages not recorded', '306M', '2M', '65.8M', '326M', '60.3M', '208M', '1.42B', '25.4M', 'Damages not recorded', '1.54B', '1.24B', '7.1B', '10B', '26.5B', '6.2B', '5.37B', '23.3B', '1.01B', '125B', '12B', '29.4B', '1.76B', '720M', '15.1B', '64.8B', '91.6B', '25.1B']
deaths = [90, 4000, 16, 3103, 179, 184, 408, 682, 5, 1023, 43, 319, 688, 259, 37, 11, 2068, 269, 318, 107, 65, 19325, 51, 124, 17, 1836, 125, 87, 45, 133, 603, 138, 3057, 74]
conversion = {'M': 1000000, 'B': 1000000000}
def convert_values(item):
if item == 'Damages not recorded':
return 'Damages not recorded'
elif item[-1] == 'M':
return int(float(item[:-1]) * 1000000)
elif item[-1] == 'B':
return int(float(item[:-1]) * 1000000000)
damages = list(map(convert_values, damages))
print(damages)
print('*****************************************************')
hurricane_dictionary = {}
for index in range(len(names)):
hurricane_dictionary[names[index]] = {'Name': names[index], 'Month': months[index], 'Year': years[index], 'Max Sustained Wind': max_sustained_winds[index], 'Areas Affected': areas_affected[index], 'Damage': damages[index], 'Deaths': deaths[index]}
print(hurricane_dictionary)
print('*****************************************************')
new_dictionary = {}
def organize_by_year(hurricanes):
hurricanes_by_year = dict()
for cane in hurricanes:
current_year = hurricanes[cane]['Year']
current_cane = hurricanes[cane]
if current_year not in hurricanes_by_year:
hurricanes_by_year[current_year] = [current_cane]
else:
hurricanes_by_year[current_year].append(current_cane)
return hurricanes_by_year
hurricanes_by_year = organize_by_year(hurricane_dictionary)
print(organize_by_year(hurricane_dictionary))
print('*****************************************************')
def organize_areas_by_count(hurricanes):
hurricanes_by_area = dict()
for cane in hurricanes:
current_areas = hurricanes[cane]['Areas Affected']
for area in current_areas:
if area not in hurricanes_by_area:
hurricanes_by_area[area] = 1
else:
hurricanes_by_area[area] += 1
return hurricanes_by_area
print(organize_areas_by_count(hurricane_dictionary))
def find_most_affected(hurricanes):
return list(organize_areas_by_count(hurricanes).items())[0]
print(find_most_affected(hurricane_dictionary))
print('*****************************************************')
print(hurricane_dictionary)
def find_most_deaths(hurricanes):
highest_death = {'Deaths': 0}
for cane in hurricanes:
if hurricanes[cane].get('Deaths', 0) > int(list(highest_death.values())[0]):
highest_death = {hurricanes[cane]['Name']: hurricanes[cane]['Deaths']}
return highest_death
print('The deadliest hurricane and the number of deaths: ' + str(find_most_deaths(hurricane_dictionary)))
print('*****************************************************')
def rate_by_mortality(hurricanes):
new_dictionary = {0: [], 1: [], 2: [], 3: [], 4: []}
for cane in hurricanes:
if hurricanes[cane]['Deaths'] == 0:
new_dictionary[0].append(hurricanes[cane])
elif hurricanes[cane]['Deaths'] <= 100:
new_dictionary[1].append(hurricanes[cane])
elif hurricanes[cane]['Deaths'] <= 500:
new_dictionary[2].append(hurricanes[cane])
elif hurricanes[cane]['Deaths'] <= 1000:
new_dictionary[3].append(hurricanes[cane])
elif hurricanes[cane]['Deaths'] <= 10000:
new_dictionary[4].append(hurricanes[cane])
return new_dictionary
print(rate_by_mortality(hurricane_dictionary))
print('*****************************************************')
def find_most_damage(hurricanes):
highest_death = {'Damage': 0}
for cane in hurricanes:
if hurricanes[cane]['Damage'] == 'Damages not recorded':
continue
elif hurricanes[cane]['Damage'] > int(list(highest_death.values())[0]):
highest_death = {hurricanes[cane]['Name']: hurricanes[cane]['Damage']}
return highest_death
print('The most damaging hurricane and its damages: ' + str(find_most_damage(hurricane_dictionary)))
damage_scale = {0: 0, 1: 100000000, 2: 1000000000, 3: 10000000000, 4: 50000000000}
print('*****************************************************')
def rate_by_damages(hurricanes):
new_dictionary = {0: [], 1: [], 2: [], 3: [], 4: []}
for cane in hurricanes:
if hurricanes[cane]['Damage'] == 'Damages not recorded':
new_dictionary[0].append(hurricanes[cane])
elif hurricanes[cane]['Damage'] == 0:
new_dictionary[0].append(hurricanes[cane])
elif hurricanes[cane]['Damage'] <= 100000000:
new_dictionary[1].append(hurricanes[cane])
elif hurricanes[cane]['Damage'] <= 1000000000:
new_dictionary[2].append(hurricanes[cane])
elif hurricanes[cane]['Damage'] <= 10000000000:
new_dictionary[3].append(hurricanes[cane])
elif hurricanes[cane]['Damage'] <= 50000000000:
new_dictionary[4].append(hurricanes[cane])
return new_dictionary
print(rate_by_damages(hurricane_dictionary)) |
alien_0 = {'color': 'green', 'points': 5}
alien_1 = {'color': 'yellow', 'points': 10}
alien_2 = {'color': 'red', 'points': 15}
aliens = [alien_0, alien_1, alien_2]
for alien in aliens:
print(alien)
print('...')
aliens = []
for alien_number in range(30):
new_alien = {'color': 'green', 'points': 5, 'speed': 'slow'}
aliens.append(new_alien)
for alien in aliens[:3]:
if alien['color'] == 'green':
alien['color'] = 'yellow'
alien['poins'] = 10
alien['speed'] = 'medium'
elif alien['color'] == 'yellow':
alien['color'] = 'red'
alien['poins'] = 15
alien['speed'] = 'fast'
for alien in aliens[:5]:
print(alien)
print('...')
print('Total number of aliens: ' + str(len(aliens)))
| alien_0 = {'color': 'green', 'points': 5}
alien_1 = {'color': 'yellow', 'points': 10}
alien_2 = {'color': 'red', 'points': 15}
aliens = [alien_0, alien_1, alien_2]
for alien in aliens:
print(alien)
print('...')
aliens = []
for alien_number in range(30):
new_alien = {'color': 'green', 'points': 5, 'speed': 'slow'}
aliens.append(new_alien)
for alien in aliens[:3]:
if alien['color'] == 'green':
alien['color'] = 'yellow'
alien['poins'] = 10
alien['speed'] = 'medium'
elif alien['color'] == 'yellow':
alien['color'] = 'red'
alien['poins'] = 15
alien['speed'] = 'fast'
for alien in aliens[:5]:
print(alien)
print('...')
print('Total number of aliens: ' + str(len(aliens))) |
#!/usr/bin/env python3
with open("day-1/data") as f:
expenses = f.readlines()
founditems = []
def accounting(i):
for j in expenses:
j = int(j.strip('\n'))
for x in expenses:
x = int(x.strip('\n'))
if i + j + x == 2020 and i * j * x not in founditems:
print("3-sum: " + str(i * j * x))
founditems.append(i * j * x)
if i + j == 2020 and i * j not in founditems:
print("2-sum: " + str(i * j))
founditems.append(i * j)
for i in expenses:
i = int(i.strip('\n'))
accounting(i = i)
| with open('day-1/data') as f:
expenses = f.readlines()
founditems = []
def accounting(i):
for j in expenses:
j = int(j.strip('\n'))
for x in expenses:
x = int(x.strip('\n'))
if i + j + x == 2020 and i * j * x not in founditems:
print('3-sum: ' + str(i * j * x))
founditems.append(i * j * x)
if i + j == 2020 and i * j not in founditems:
print('2-sum: ' + str(i * j))
founditems.append(i * j)
for i in expenses:
i = int(i.strip('\n'))
accounting(i=i) |
class Command(object):
CONTINUE = 1
SKIP_REMAINING = 2
def __init__(self, args=None, explicit=False):
self.explicit = explicit
self._args = args or []
@property
def args(self):
return self._args
def add_argument(self, arg):
self._args.append(arg)
def cmdline(self):
return [self.name] + self._args
def check_result(self, result):
return Command.CONTINUE
def __repr__(self):
return "Command(name={}, args={})".format(self.name, self._args)
class SynchronizeCommand(Command):
name = "synchronize"
class DeployCommand(Command):
# Flag constants for deploy return value
REPO_UNCHANGED = "repo_unchanged"
REPO_CHANGED = "repo_changed"
name = "deploy"
def check_result(self, result):
# For backwards compatibility
if not result:
return Command.CONTINUE
changed = any(result[v] == DeployCommand.REPO_CHANGED for v in result)
if not changed:
return Command.SKIP_REMAINING
else:
return Command.CONTINUE
class BuildCommand(Command):
name = "build"
class RestartCommand(Command):
name = "restart"
class WaitUntilComponentsReadyCommand(Command):
name = "wait-until-components-ready"
class GenericCommand(Command):
def __init__(self, name, args=None):
self.name = name
# Generic commands can only be added explicitly from the commandline.
super(GenericCommand, self).__init__(args=args, explicit=True)
| class Command(object):
continue = 1
skip_remaining = 2
def __init__(self, args=None, explicit=False):
self.explicit = explicit
self._args = args or []
@property
def args(self):
return self._args
def add_argument(self, arg):
self._args.append(arg)
def cmdline(self):
return [self.name] + self._args
def check_result(self, result):
return Command.CONTINUE
def __repr__(self):
return 'Command(name={}, args={})'.format(self.name, self._args)
class Synchronizecommand(Command):
name = 'synchronize'
class Deploycommand(Command):
repo_unchanged = 'repo_unchanged'
repo_changed = 'repo_changed'
name = 'deploy'
def check_result(self, result):
if not result:
return Command.CONTINUE
changed = any((result[v] == DeployCommand.REPO_CHANGED for v in result))
if not changed:
return Command.SKIP_REMAINING
else:
return Command.CONTINUE
class Buildcommand(Command):
name = 'build'
class Restartcommand(Command):
name = 'restart'
class Waituntilcomponentsreadycommand(Command):
name = 'wait-until-components-ready'
class Genericcommand(Command):
def __init__(self, name, args=None):
self.name = name
super(GenericCommand, self).__init__(args=args, explicit=True) |
# -*- coding: utf-8 -*-
"""
Created on Mar 23 11:57 2017
@author: Denis Tome'
"""
__all__ = [
'VISIBLE_PART',
'MIN_NUM_JOINTS',
'CENTER_TR',
'SIGMA',
'STRIDE',
'SIGMA_CENTER',
'INPUT_SIZE',
'OUTPUT_SIZE',
'NUM_JOINTS',
'NUM_OUTPUT',
'H36M_NUM_JOINTS',
'JOINT_DRAW_SIZE',
'LIMB_DRAW_SIZE'
]
# threshold
VISIBLE_PART = 1e-3
MIN_NUM_JOINTS = 5
CENTER_TR = 0.4
# net attributes
SIGMA = 7
STRIDE = 8
SIGMA_CENTER = 21
INPUT_SIZE = 368
OUTPUT_SIZE = 46
NUM_JOINTS = 14
NUM_OUTPUT = NUM_JOINTS + 1
H36M_NUM_JOINTS = 17
# draw options
JOINT_DRAW_SIZE = 3
LIMB_DRAW_SIZE = 2
NORMALISATION_COEFFICIENT = 1280*720
# test options
BATCH_SIZE = 4
| """
Created on Mar 23 11:57 2017
@author: Denis Tome'
"""
__all__ = ['VISIBLE_PART', 'MIN_NUM_JOINTS', 'CENTER_TR', 'SIGMA', 'STRIDE', 'SIGMA_CENTER', 'INPUT_SIZE', 'OUTPUT_SIZE', 'NUM_JOINTS', 'NUM_OUTPUT', 'H36M_NUM_JOINTS', 'JOINT_DRAW_SIZE', 'LIMB_DRAW_SIZE']
visible_part = 0.001
min_num_joints = 5
center_tr = 0.4
sigma = 7
stride = 8
sigma_center = 21
input_size = 368
output_size = 46
num_joints = 14
num_output = NUM_JOINTS + 1
h36_m_num_joints = 17
joint_draw_size = 3
limb_draw_size = 2
normalisation_coefficient = 1280 * 720
batch_size = 4 |
class SGD:
"""
Implements vanilla SGD update
"""
def update(self, w, d_w, learning_rate):
"""
Performs SGD update
Arguments:
w, np array - weights
d_w, np array, same shape as w - gradient
learning_rate, float - learning rate
Returns:
updated_weights, np array same shape as w
"""
return w - d_w * learning_rate
class MomentumSGD:
"""
Implements Momentum SGD update
"""
def __init__(self, momentum=0.9):
self.momentum = momentum
self.velocity = 0
def update(self, w, d_w, learning_rate):
"""
Performs Momentum SGD update
Arguments:
w, np array - weights
d_w, np array, same shape as w - gradient
learning_rate, float - learning rate
Returns:
updated_weights, np array same shape as w
"""
# Implement momentum update
# Hint: you'll need to introduce some variables to remember
# velocity from the previous updates
self.velocity = self.momentum * self.velocity - learning_rate * d_w
return w + self.velocity
| class Sgd:
"""
Implements vanilla SGD update
"""
def update(self, w, d_w, learning_rate):
"""
Performs SGD update
Arguments:
w, np array - weights
d_w, np array, same shape as w - gradient
learning_rate, float - learning rate
Returns:
updated_weights, np array same shape as w
"""
return w - d_w * learning_rate
class Momentumsgd:
"""
Implements Momentum SGD update
"""
def __init__(self, momentum=0.9):
self.momentum = momentum
self.velocity = 0
def update(self, w, d_w, learning_rate):
"""
Performs Momentum SGD update
Arguments:
w, np array - weights
d_w, np array, same shape as w - gradient
learning_rate, float - learning rate
Returns:
updated_weights, np array same shape as w
"""
self.velocity = self.momentum * self.velocity - learning_rate * d_w
return w + self.velocity |
Desc = cellDescClass("DFFNSRX4")
Desc.properties["cell_footprint"] = "dffnsr"
Desc.properties["area"] = "116.424000"
Desc.properties["cell_leakage_power"] = "3723.759540"
Desc.pinOrder = ['CKN', 'D', 'IQ', 'IQN', 'Q', 'QN', 'RN', 'SN', 'next']
Desc.add_arc("CKN","D","setup_falling")
Desc.add_arc("CKN","D","hold_falling")
Desc.add_arc("CKN","SN","setup_falling")
Desc.add_arc("CKN","SN","hold_falling")
Desc.add_arc("CKN","RN","setup_falling")
Desc.add_arc("CKN","RN","hold_falling")
Desc.add_arc("CKN","Q","falling_edge")
Desc.add_arc("SN","Q","preset")
Desc.add_arc("RN","Q","clear")
Desc.add_arc("CKN","QN","falling_edge")
Desc.add_arc("SN","QN","clear")
Desc.add_arc("RN","QN","preset")
Desc.add_param("area",116.424000);
Desc.add_pin("D","input")
Desc.set_pin_job("CKN","clock")
Desc.add_pin("CKN","input")
Desc.add_pin("SN","input")
Desc.add_pin("RN","input")
Desc.add_pin("Q","output")
Desc.add_pin_func("Q","unknown")
Desc.add_pin("QN","output")
Desc.add_pin_func("QN","unknown")
Desc.add_pin("IQ","output")
Desc.add_pin_func("IQ","unknown")
Desc.add_pin("IQN","output")
Desc.add_pin_func("IQN","unknown")
Desc.add_pin("next","output")
Desc.add_pin_func("next","unknown")
Desc.set_job("flipflop")
CellLib["DFFNSRX4"]=Desc
| desc = cell_desc_class('DFFNSRX4')
Desc.properties['cell_footprint'] = 'dffnsr'
Desc.properties['area'] = '116.424000'
Desc.properties['cell_leakage_power'] = '3723.759540'
Desc.pinOrder = ['CKN', 'D', 'IQ', 'IQN', 'Q', 'QN', 'RN', 'SN', 'next']
Desc.add_arc('CKN', 'D', 'setup_falling')
Desc.add_arc('CKN', 'D', 'hold_falling')
Desc.add_arc('CKN', 'SN', 'setup_falling')
Desc.add_arc('CKN', 'SN', 'hold_falling')
Desc.add_arc('CKN', 'RN', 'setup_falling')
Desc.add_arc('CKN', 'RN', 'hold_falling')
Desc.add_arc('CKN', 'Q', 'falling_edge')
Desc.add_arc('SN', 'Q', 'preset')
Desc.add_arc('RN', 'Q', 'clear')
Desc.add_arc('CKN', 'QN', 'falling_edge')
Desc.add_arc('SN', 'QN', 'clear')
Desc.add_arc('RN', 'QN', 'preset')
Desc.add_param('area', 116.424)
Desc.add_pin('D', 'input')
Desc.set_pin_job('CKN', 'clock')
Desc.add_pin('CKN', 'input')
Desc.add_pin('SN', 'input')
Desc.add_pin('RN', 'input')
Desc.add_pin('Q', 'output')
Desc.add_pin_func('Q', 'unknown')
Desc.add_pin('QN', 'output')
Desc.add_pin_func('QN', 'unknown')
Desc.add_pin('IQ', 'output')
Desc.add_pin_func('IQ', 'unknown')
Desc.add_pin('IQN', 'output')
Desc.add_pin_func('IQN', 'unknown')
Desc.add_pin('next', 'output')
Desc.add_pin_func('next', 'unknown')
Desc.set_job('flipflop')
CellLib['DFFNSRX4'] = Desc |
class AbstractProvider(object):
"""Delegate class to provide requirement interface for the resolver."""
def identify(self, requirement_or_candidate):
"""Given a requirement, return an identifier for it.
This is used to identify a requirement, e.g. whether two requirements
should have their specifier parts merged.
"""
raise NotImplementedError
def get_preference(self, identifier, resolutions, candidates, information):
"""Produce a sort key for given requirement based on preference.
The preference is defined as "I think this requirement should be
resolved first". The lower the return value is, the more preferred
this group of arguments is.
:param identifier: An identifier as returned by ``identify()``. This
identifies the dependency matches of which should be returned.
:param resolutions: Mapping of candidates currently pinned by the
resolver. Each key is an identifier, and the value a candidate.
The candidate may conflict with requirements from ``information``.
:param candidates: Mapping of each dependency's possible candidates.
Each value is an iterator of candidates.
:param information: Mapping of requirement information of each package.
Each value is an iterator of *requirement information*.
A *requirement information* instance is a named tuple with two members:
* ``requirement`` specifies a requirement contributing to the current
list of candidates.
* ``parent`` specifies the candidate that provides (dependend on) the
requirement, or ``None`` to indicate a root requirement.
The preference could depend on a various of issues, including (not
necessarily in this order):
* Is this package pinned in the current resolution result?
* How relaxed is the requirement? Stricter ones should probably be
worked on first? (I don't know, actually.)
* How many possibilities are there to satisfy this requirement? Those
with few left should likely be worked on first, I guess?
* Are there any known conflicts for this requirement? We should
probably work on those with the most known conflicts.
A sortable value should be returned (this will be used as the ``key``
parameter of the built-in sorting function). The smaller the value is,
the more preferred this requirement is (i.e. the sorting function
is called with ``reverse=False``).
"""
raise NotImplementedError
def find_matches(self, identifier, requirements, incompatibilities):
"""Find all possible candidates that satisfy given constraints.
:param identifier: An identifier as returned by ``identify()``. This
identifies the dependency matches of which should be returned.
:param requirements: A mapping of requirements that all returned
candidates must satisfy. Each key is an identifier, and the value
an iterator of requirements for that dependency.
:param incompatibilities: A mapping of known incompatibilities of
each dependency. Each key is an identifier, and the value an
iterator of incompatibilities known to the resolver. All
incompatibilities *must* be excluded from the return value.
This should try to get candidates based on the requirements' types.
For VCS, local, and archive requirements, the one-and-only match is
returned, and for a "named" requirement, the index(es) should be
consulted to find concrete candidates for this requirement.
The return value should produce candidates ordered by preference; the
most preferred candidate should come first. The return type may be one
of the following:
* A callable that returns an iterator that yields candidates.
* An collection of candidates.
* An iterable of candidates. This will be consumed immediately into a
list of candidates.
"""
raise NotImplementedError
def is_satisfied_by(self, requirement, candidate):
"""Whether the given requirement can be satisfied by a candidate.
The candidate is guarenteed to have been generated from the
requirement.
A boolean should be returned to indicate whether ``candidate`` is a
viable solution to the requirement.
"""
raise NotImplementedError
def get_dependencies(self, candidate):
"""Get dependencies of a candidate.
This should return a collection of requirements that `candidate`
specifies as its dependencies.
"""
raise NotImplementedError
class AbstractResolver(object):
"""The thing that performs the actual resolution work."""
base_exception = Exception
def __init__(self, provider, reporter):
self.provider = provider
self.reporter = reporter
def resolve(self, requirements, **kwargs):
"""Take a collection of constraints, spit out the resolution result.
This returns a representation of the final resolution state, with one
guarenteed attribute ``mapping`` that contains resolved candidates as
values. The keys are their respective identifiers.
:param requirements: A collection of constraints.
:param kwargs: Additional keyword arguments that subclasses may accept.
:raises: ``self.base_exception`` or its subclass.
"""
raise NotImplementedError
| class Abstractprovider(object):
"""Delegate class to provide requirement interface for the resolver."""
def identify(self, requirement_or_candidate):
"""Given a requirement, return an identifier for it.
This is used to identify a requirement, e.g. whether two requirements
should have their specifier parts merged.
"""
raise NotImplementedError
def get_preference(self, identifier, resolutions, candidates, information):
"""Produce a sort key for given requirement based on preference.
The preference is defined as "I think this requirement should be
resolved first". The lower the return value is, the more preferred
this group of arguments is.
:param identifier: An identifier as returned by ``identify()``. This
identifies the dependency matches of which should be returned.
:param resolutions: Mapping of candidates currently pinned by the
resolver. Each key is an identifier, and the value a candidate.
The candidate may conflict with requirements from ``information``.
:param candidates: Mapping of each dependency's possible candidates.
Each value is an iterator of candidates.
:param information: Mapping of requirement information of each package.
Each value is an iterator of *requirement information*.
A *requirement information* instance is a named tuple with two members:
* ``requirement`` specifies a requirement contributing to the current
list of candidates.
* ``parent`` specifies the candidate that provides (dependend on) the
requirement, or ``None`` to indicate a root requirement.
The preference could depend on a various of issues, including (not
necessarily in this order):
* Is this package pinned in the current resolution result?
* How relaxed is the requirement? Stricter ones should probably be
worked on first? (I don't know, actually.)
* How many possibilities are there to satisfy this requirement? Those
with few left should likely be worked on first, I guess?
* Are there any known conflicts for this requirement? We should
probably work on those with the most known conflicts.
A sortable value should be returned (this will be used as the ``key``
parameter of the built-in sorting function). The smaller the value is,
the more preferred this requirement is (i.e. the sorting function
is called with ``reverse=False``).
"""
raise NotImplementedError
def find_matches(self, identifier, requirements, incompatibilities):
"""Find all possible candidates that satisfy given constraints.
:param identifier: An identifier as returned by ``identify()``. This
identifies the dependency matches of which should be returned.
:param requirements: A mapping of requirements that all returned
candidates must satisfy. Each key is an identifier, and the value
an iterator of requirements for that dependency.
:param incompatibilities: A mapping of known incompatibilities of
each dependency. Each key is an identifier, and the value an
iterator of incompatibilities known to the resolver. All
incompatibilities *must* be excluded from the return value.
This should try to get candidates based on the requirements' types.
For VCS, local, and archive requirements, the one-and-only match is
returned, and for a "named" requirement, the index(es) should be
consulted to find concrete candidates for this requirement.
The return value should produce candidates ordered by preference; the
most preferred candidate should come first. The return type may be one
of the following:
* A callable that returns an iterator that yields candidates.
* An collection of candidates.
* An iterable of candidates. This will be consumed immediately into a
list of candidates.
"""
raise NotImplementedError
def is_satisfied_by(self, requirement, candidate):
"""Whether the given requirement can be satisfied by a candidate.
The candidate is guarenteed to have been generated from the
requirement.
A boolean should be returned to indicate whether ``candidate`` is a
viable solution to the requirement.
"""
raise NotImplementedError
def get_dependencies(self, candidate):
"""Get dependencies of a candidate.
This should return a collection of requirements that `candidate`
specifies as its dependencies.
"""
raise NotImplementedError
class Abstractresolver(object):
"""The thing that performs the actual resolution work."""
base_exception = Exception
def __init__(self, provider, reporter):
self.provider = provider
self.reporter = reporter
def resolve(self, requirements, **kwargs):
"""Take a collection of constraints, spit out the resolution result.
This returns a representation of the final resolution state, with one
guarenteed attribute ``mapping`` that contains resolved candidates as
values. The keys are their respective identifiers.
:param requirements: A collection of constraints.
:param kwargs: Additional keyword arguments that subclasses may accept.
:raises: ``self.base_exception`` or its subclass.
"""
raise NotImplementedError |
def _is_socket(path): # -S
if not path:
return False
if hasattr(path, '_mode'):
return stat.S_ISSOCK(path._mode)
if hasattr(path, 'fileno') and os.stat in os.supports_fd:
path = path.fileno()
elif hasattr(path, 'name'):
path = path.name
return stat.S_ISSOCK(os.stat(path).st_mode)
| def _is_socket(path):
if not path:
return False
if hasattr(path, '_mode'):
return stat.S_ISSOCK(path._mode)
if hasattr(path, 'fileno') and os.stat in os.supports_fd:
path = path.fileno()
elif hasattr(path, 'name'):
path = path.name
return stat.S_ISSOCK(os.stat(path).st_mode) |
"""
This package contains both data models related to points of interest (POIs):
:class:`~cms.models.pois.poi.POI` and
:class:`~cms.models.pois.poi_translation.POITranslation`.
POIs can be used to store information which is relevant for one specific location, e.g. a place where local authorities
offer a public service. They can also be used as location for an :class:`~cms.models.events.event.Event`.
"""
| """
This package contains both data models related to points of interest (POIs):
:class:`~cms.models.pois.poi.POI` and
:class:`~cms.models.pois.poi_translation.POITranslation`.
POIs can be used to store information which is relevant for one specific location, e.g. a place where local authorities
offer a public service. They can also be used as location for an :class:`~cms.models.events.event.Event`.
""" |
#!/usr/bin/env python3
# Write a program that prints the reverse-complement of a DNA sequence
# You must use a loop and conditional
dna = 'ACTGAAAAAAAAAAA'
anti = ""
for i in range(len(dna)):
if dna[i] == 'A': anti += 'T'
elif dna[i] == 'T': anti += 'A'
elif dna[i] == 'C': anti += 'G'
else: anti += 'C'
print(anti[::-1]) #the [::-1] -> :: means implicitly that the beginning is the beginning
# and that you go backwards (-1)
"""
python3 23anti.py
TTTTTTTTTTTCAGT
"""
#Done | dna = 'ACTGAAAAAAAAAAA'
anti = ''
for i in range(len(dna)):
if dna[i] == 'A':
anti += 'T'
elif dna[i] == 'T':
anti += 'A'
elif dna[i] == 'C':
anti += 'G'
else:
anti += 'C'
print(anti[::-1])
'\npython3 23anti.py\nTTTTTTTTTTTCAGT\n' |
"""
Write a modified version of the Matrix class(that was defined in
one of the example problems in this section) with an __add__
operation as well as a __sub__ operation. It should add matrices,
assuming that they will be of the same length. Also, the unmodified
Matrix class code will be given.
"""
"""
This is the unmodified Matrix class code.
class Matrix:
def __init__(self,thelist: list):
self.thelist=thelist
for items in range(len(self.thelist)):
assert type(self.thelist[items])==list
assert len(self.thelist[0]) == len(self.thelist[items])
for things in range(len(self.thelist[items])):
assert type(self.thelist[items][things])==int
def __str__(self):
return str(self.thelist)
"""
# write your code below
| """
Write a modified version of the Matrix class(that was defined in
one of the example problems in this section) with an __add__
operation as well as a __sub__ operation. It should add matrices,
assuming that they will be of the same length. Also, the unmodified
Matrix class code will be given.
"""
'\nThis is the unmodified Matrix class code.\n\nclass Matrix:\n def __init__(self,thelist: list):\n self.thelist=thelist\n for items in range(len(self.thelist)):\n assert type(self.thelist[items])==list\n assert len(self.thelist[0]) == len(self.thelist[items])\n for things in range(len(self.thelist[items])):\n assert type(self.thelist[items][things])==int\n\n def __str__(self):\n return str(self.thelist)\n' |
num1 = 10
num2 = int(input())
def add_sum():
sum = num1+num2
print(sum)
add_sum()
| num1 = 10
num2 = int(input())
def add_sum():
sum = num1 + num2
print(sum)
add_sum() |
class ConsoleCommandFailed(Exception):
pass
class ConvergeFailed(Exception):
pass
| class Consolecommandfailed(Exception):
pass
class Convergefailed(Exception):
pass |
class Overflow(
Enum,
CascadeProperty,
):
Visible = "visible" # initial
Hidden = "hidden"
Scroll = "scroll"
Auto = "auto"
| class Overflow(Enum, CascadeProperty):
visible = 'visible'
hidden = 'hidden'
scroll = 'scroll'
auto = 'auto' |
def Y_S(a):
if a == 0:
return 0
else:
ys = (a//30) * 10 + 10
return ys
def M_S(b):
if b == 0:
return 0
else:
ms = (b//60) * 15 + 15
return ms
N=int(input())
ms_add = 0
ys_add = 0
arr = list(map(int,input().split()))
for i in arr:
Y = Y_S(i)
M = M_S(i)
ys_add += Y
ms_add += M
if ys_add < ms_add:
print('Y',ys_add)
elif ys_add == ms_add:
print('Y M',ys_add)
else:
print('M',ms_add)
| def y_s(a):
if a == 0:
return 0
else:
ys = a // 30 * 10 + 10
return ys
def m_s(b):
if b == 0:
return 0
else:
ms = b // 60 * 15 + 15
return ms
n = int(input())
ms_add = 0
ys_add = 0
arr = list(map(int, input().split()))
for i in arr:
y = y_s(i)
m = m_s(i)
ys_add += Y
ms_add += M
if ys_add < ms_add:
print('Y', ys_add)
elif ys_add == ms_add:
print('Y M', ys_add)
else:
print('M', ms_add) |
"""79. Word Search"""
class Solution(object):
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
# R2:
if not word:
return True
if not board:
return False
m = len(board)
n = len(board[0])
for i in range(m):
for j in range(n):
if self.backtrack(board, word, i, j):
return True
return False
def backtrack(self, board, word, i, j):
m = len(board)
n = len(board[0])
if board[i][j] == word[0]:
if not word[1:]:
return True
board[i][j] = ' '
if i < m-1 and self.backtrack(board, word[1:], i+1, j):
return True
if i > 0 and self.backtrack(board, word[1:], i-1, j):
return True
if j < n-1 and self.backtrack(board, word[1:], i, j+1):
return True
if j > 0 and self.backtrack(board, word[1:], i, j-1):
return True
board[i][j] = word[0]
return False
else:
return False
##### R1:
if not word:
return True
if not board:
return False
for i in range(len(board)):
for j in range(len(board[0])):
if self.dfs_exist_helper(board, word, i, j):
return True
return False
def dfs_exist_helper(self, board, word, i, j):
if board[i][j] == word[0]:
# exit condition
if not word[1:]:
return True
board[i][j] = " " #indicate used cell, in case cell check goes backwards
# this checked cell will be restored by the end of this loop
if i > 0 and self.dfs_exist_helper(board, word[1:], i-1, j):
return True
if i < len(board)-1 and self.dfs_exist_helper(board, word[1:],i+1, j):
return True
if j > 0 and self.dfs_exist_helper(board, word[1:], i, j-1):
return True
if j < len(board[0])-1 and self.dfs_exist_helper(board, word[1:], i, j+1):
return True
board[i][j] = word[0] #update the cell to its original value
return False
else:
return False
| """79. Word Search"""
class Solution(object):
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
if not word:
return True
if not board:
return False
m = len(board)
n = len(board[0])
for i in range(m):
for j in range(n):
if self.backtrack(board, word, i, j):
return True
return False
def backtrack(self, board, word, i, j):
m = len(board)
n = len(board[0])
if board[i][j] == word[0]:
if not word[1:]:
return True
board[i][j] = ' '
if i < m - 1 and self.backtrack(board, word[1:], i + 1, j):
return True
if i > 0 and self.backtrack(board, word[1:], i - 1, j):
return True
if j < n - 1 and self.backtrack(board, word[1:], i, j + 1):
return True
if j > 0 and self.backtrack(board, word[1:], i, j - 1):
return True
board[i][j] = word[0]
return False
else:
return False
if not word:
return True
if not board:
return False
for i in range(len(board)):
for j in range(len(board[0])):
if self.dfs_exist_helper(board, word, i, j):
return True
return False
def dfs_exist_helper(self, board, word, i, j):
if board[i][j] == word[0]:
if not word[1:]:
return True
board[i][j] = ' '
if i > 0 and self.dfs_exist_helper(board, word[1:], i - 1, j):
return True
if i < len(board) - 1 and self.dfs_exist_helper(board, word[1:], i + 1, j):
return True
if j > 0 and self.dfs_exist_helper(board, word[1:], i, j - 1):
return True
if j < len(board[0]) - 1 and self.dfs_exist_helper(board, word[1:], i, j + 1):
return True
board[i][j] = word[0]
return False
else:
return False |
"""
File: hailstone.py
Name:
-----------------------
This program should implement a console program that simulates
the execution of the Hailstone sequence, defined by Douglas
Hofstadter. Output format should match what is shown in the sample
run in the Assignment 2 Handout.
"""
def main():
"""
1. judge whether n is equal to 1
2. If n is odd, multiply n by 3 and add 1
If n is even, divide n by 2
3. count steps(while)
"""
print('This program computes Hailstone sequences.')
number = int(input('Enter a number: '))
i = 0
while number != 1:
if number % 2 == 0:
number = number // 2
i += 1
print(str(number) + ' is even, so I take half: ' + str(number))
else:
number = (3 * number) + 1
i += 1
print(str(number) + ' is odd, so I take 3n+1: ' + str(number))
print('It took ' + str(i) + ' steps to reach 1.')
'''
It took 111 steps for 27 to reach 1.
'''
###### DO NOT EDIT CODE BELOW THIS LINE ######
if __name__ == "__main__":
main()
| """
File: hailstone.py
Name:
-----------------------
This program should implement a console program that simulates
the execution of the Hailstone sequence, defined by Douglas
Hofstadter. Output format should match what is shown in the sample
run in the Assignment 2 Handout.
"""
def main():
"""
1. judge whether n is equal to 1
2. If n is odd, multiply n by 3 and add 1
If n is even, divide n by 2
3. count steps(while)
"""
print('This program computes Hailstone sequences.')
number = int(input('Enter a number: '))
i = 0
while number != 1:
if number % 2 == 0:
number = number // 2
i += 1
print(str(number) + ' is even, so I take half: ' + str(number))
else:
number = 3 * number + 1
i += 1
print(str(number) + ' is odd, so I take 3n+1: ' + str(number))
print('It took ' + str(i) + ' steps to reach 1.')
'\n It took 111 steps for 27 to reach 1.\n '
if __name__ == '__main__':
main() |
animals = ['bear', 'python3.8', 'peacock', 'kangaroo', 'whale', 'platypus']
print("The animal at 1: ", animals[1]) # Python3.8
print("The third animal: ", animals[2]) # Peacock
print("The first animal: ", animals[0]) # bear
print("The animal at 3: ", animals[3]) # kangaroo
print("The fifth (5th) animal: ", animals[4]) # whale
print("The animal at 2: ", animals[2]) # peacock
print("The sixth (6th) animal: ", animals[5]) # platypus
print("The animal at 4: ", animals[4]) # whale
| animals = ['bear', 'python3.8', 'peacock', 'kangaroo', 'whale', 'platypus']
print('The animal at 1: ', animals[1])
print('The third animal: ', animals[2])
print('The first animal: ', animals[0])
print('The animal at 3: ', animals[3])
print('The fifth (5th) animal: ', animals[4])
print('The animal at 2: ', animals[2])
print('The sixth (6th) animal: ', animals[5])
print('The animal at 4: ', animals[4]) |
#######################
# Dennis MUD #
# rename_item.py #
# Copyright 2018-2020 #
# Michael D. Reiley #
#######################
# **********
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# **********
NAME = "rename item"
CATEGORIES = ["items"]
USAGE = "rename item <item_id> <new_name>"
DESCRIPTION = """Set the name of the item <item_id> which you are holding to <new_name>.
You must own the item and it must be in your inventory in order to rename it.
Wizards can rename any item from anywhere.
Ex. `rename item 4 Blue Shard`"""
def COMMAND(console, args):
# Perform initial checks.
if not COMMON.check(NAME, console, args, argmin=2):
return False
# Perform argument type checks and casts.
itemid = COMMON.check_argtypes(NAME, console, args, checks=[[0, int]], retargs=0)
if itemid is None:
return False
# Lookup the target item and perform item checks.
thisitem = COMMON.check_item(NAME, console, itemid, owner=True, holding=True)
if not thisitem:
return False
# Make sure the item name is not an integer, as this would be confusing.
# We actually want an exception to be raised here.
if len(args) == 2:
try:
int(args[1])
console.msg("{0}: The item name cannot be an integer.".format(NAME))
return False
except ValueError:
# Not an integer.
pass
# Get new item name.
itemname = ' '.join(args[1:])
if itemname == "the":
console.msg("{0}: Very funny.".format(NAME))
return False
# Make sure an item by this name does not already exist.
# Make an exception if that is the item we are renaming. (changing case)
for item in console.database.items.all():
if item["name"].lower() == itemname.lower() and item["name"].lower() != thisitem["name"].lower():
console.msg("{0}: An item by that name already exists.".format(NAME))
return False
# Rename the item.
thisitem["name"] = itemname
console.database.upsert_item(thisitem)
# Finished.
console.msg("{0}: Done.".format(NAME))
return True
| name = 'rename item'
categories = ['items']
usage = 'rename item <item_id> <new_name>'
description = 'Set the name of the item <item_id> which you are holding to <new_name>.\n\nYou must own the item and it must be in your inventory in order to rename it.\nWizards can rename any item from anywhere.\n\nEx. `rename item 4 Blue Shard`'
def command(console, args):
if not COMMON.check(NAME, console, args, argmin=2):
return False
itemid = COMMON.check_argtypes(NAME, console, args, checks=[[0, int]], retargs=0)
if itemid is None:
return False
thisitem = COMMON.check_item(NAME, console, itemid, owner=True, holding=True)
if not thisitem:
return False
if len(args) == 2:
try:
int(args[1])
console.msg('{0}: The item name cannot be an integer.'.format(NAME))
return False
except ValueError:
pass
itemname = ' '.join(args[1:])
if itemname == 'the':
console.msg('{0}: Very funny.'.format(NAME))
return False
for item in console.database.items.all():
if item['name'].lower() == itemname.lower() and item['name'].lower() != thisitem['name'].lower():
console.msg('{0}: An item by that name already exists.'.format(NAME))
return False
thisitem['name'] = itemname
console.database.upsert_item(thisitem)
console.msg('{0}: Done.'.format(NAME))
return True |
class User:
def __init__(self, reddit_id, username, karma, relevant_comments, relevant_posts):
self.reddit_id = reddit_id
self.username = username
self.karma = karma
self.score = 0
self.relevant_comments = relevant_comments
self.relevant_posts = relevant_posts
| class User:
def __init__(self, reddit_id, username, karma, relevant_comments, relevant_posts):
self.reddit_id = reddit_id
self.username = username
self.karma = karma
self.score = 0
self.relevant_comments = relevant_comments
self.relevant_posts = relevant_posts |
# Mathematics > Algebra > Little Gaurav and Sequence
# Help Gaurav in calculating last digit of a sequence.
#
# https://www.hackerrank.com/challenges/little-gaurav-and-sequence/problem
#
def S_brutforce(n):
s = 0
i = 0
while 2 ** i <= n:
for j in range(0, n + 1):
s += 2 ** (2 ** i + 2 * j)
i += 1
return s
def S(n):
# s1 = 0
# i = 0
# while 2 ** i <= n:
# s1 += 2 ** (2 ** i)
# i += 1
i = n
k = 0
while i != 0:
i //= 2
k += 1
if k == 1:
s1 = 2
else:
s1 = [6, 2, 8, 4, 0][(k - 2) % 5]
# s2 = (4 ** (n + 1) - 1) // (4 - 1)
# s2 = 1 mod 10 si n pair
# s2 = 5 mod 10 si n impair
s2 = 1 if n % 2 == 0 else 5
return (s1 * s2) % 10
def test():
for n in range(1, 100):
s = S(n)
assert (s % 10) == (S_brutforce(n) % 10)
print("{:4} {}".format(n, s % 10))
for _ in range(int(input())):
n = int(input())
if n % 2 == 1:
print(0)
else:
s = S(n)
print(s)
| def s_brutforce(n):
s = 0
i = 0
while 2 ** i <= n:
for j in range(0, n + 1):
s += 2 ** (2 ** i + 2 * j)
i += 1
return s
def s(n):
i = n
k = 0
while i != 0:
i //= 2
k += 1
if k == 1:
s1 = 2
else:
s1 = [6, 2, 8, 4, 0][(k - 2) % 5]
s2 = 1 if n % 2 == 0 else 5
return s1 * s2 % 10
def test():
for n in range(1, 100):
s = s(n)
assert s % 10 == s_brutforce(n) % 10
print('{:4} {}'.format(n, s % 10))
for _ in range(int(input())):
n = int(input())
if n % 2 == 1:
print(0)
else:
s = s(n)
print(s) |
"""The configuration class used for testing using no default values"""
class TestConfig(object): # pylint: disable=too-few-public-methods
"""The configuration class used for testing using no default values"""
AZURE_STORAGE_ACCOUNT = 'TestApplicationAccount'
AZURE_STORAGE_KEY = 'TestApplicationKey'
AZURE_STORAGE_TABLE = 'TestApplicationTable'
AZURE_STORAGE_TABLE_PARTITION_KEY = 'TestApplicationPartitionKey'
APP_NAME = 'TestApplicationRowKeyAppName'
REVISION_PARAMETER = 'TestApplicationRevisionParameter'
| """The configuration class used for testing using no default values"""
class Testconfig(object):
"""The configuration class used for testing using no default values"""
azure_storage_account = 'TestApplicationAccount'
azure_storage_key = 'TestApplicationKey'
azure_storage_table = 'TestApplicationTable'
azure_storage_table_partition_key = 'TestApplicationPartitionKey'
app_name = 'TestApplicationRowKeyAppName'
revision_parameter = 'TestApplicationRevisionParameter' |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
#
# invenio-app-ils is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""ILS minters."""
def pid_minter(record_uuid, data, provider_cls):
"""Generic ILS PID minter."""
provider = provider_cls.create(object_type="rec", object_uuid=record_uuid)
data["pid"] = provider.pid.pid_value
return provider.pid
def dummy_pid_minter(record_uuid, data):
"""Dummy minter."""
return None
| """ILS minters."""
def pid_minter(record_uuid, data, provider_cls):
"""Generic ILS PID minter."""
provider = provider_cls.create(object_type='rec', object_uuid=record_uuid)
data['pid'] = provider.pid.pid_value
return provider.pid
def dummy_pid_minter(record_uuid, data):
"""Dummy minter."""
return None |
"""Global values for current session"""
testdir = None
settings = {}
| """Global values for current session"""
testdir = None
settings = {} |
# Solution to Mega Contest 1 Problem: Maximum Sum
for testcase in range(int(input())):
n, k = map(int, input().split())
array_a = list(map(int, input().split()))
array_b = list(map(int, input().split()))
if k>0:
array_a.sort()
array_b.sort()
# max_sum = sum(array_a[k:]+array_b[n-k:])
values = array_a[k:]
for val in range(k):
values.append(max(array_a[val], array_b[-val-1]))
max_sum = sum(values)
else: max_sum = sum(array_a)
print(max_sum) | for testcase in range(int(input())):
(n, k) = map(int, input().split())
array_a = list(map(int, input().split()))
array_b = list(map(int, input().split()))
if k > 0:
array_a.sort()
array_b.sort()
values = array_a[k:]
for val in range(k):
values.append(max(array_a[val], array_b[-val - 1]))
max_sum = sum(values)
else:
max_sum = sum(array_a)
print(max_sum) |
N = int(input())
V = list(map(int, input().split()))
C = list(map(int, input().split()))
result = 0
for i in range(N):
if V[i] > C[i]:
result += V[i] - C[i]
print(result)
| n = int(input())
v = list(map(int, input().split()))
c = list(map(int, input().split()))
result = 0
for i in range(N):
if V[i] > C[i]:
result += V[i] - C[i]
print(result) |
class Hair(object):
def __init__(self, radius):
self.radius = radius
self.phi = random(TAU)
self.slow = random(1.15, 1.2)
self.theta = asin(random(-self.radius, self.radius) / self.radius)
self.z = self.radius * sin(self.theta)
def render(self):
oFF = (noise(millis() * 0.0005, sin(self.phi)) - 0.5) * 0.3
oFFb = (noise(millis() * 0.0007, sin(self.z) * 0.01) - 0.5) * 0.3
self.thetaFF = self.theta + oFF
phiFF = self.phi + oFFb
x = self.radius * cos(self.theta) * cos(self.phi)
y = self.radius * cos(self.theta) * sin(self.phi)
self.z = self.radius * sin(self.theta)
xo = self.radius * cos(self.thetaFF) * cos(phiFF)
yo = self.radius * cos(self.thetaFF) * sin(phiFF)
zo = self.radius * sin(self.thetaFF)
xb = xo * self.slow
yb = yo * self.slow
zb = zo * self.slow
with beginShape(LINES):
stroke(0)
vertex(x, y, self.z)
stroke(200, 150)
vertex(xb, yb, zb)
| class Hair(object):
def __init__(self, radius):
self.radius = radius
self.phi = random(TAU)
self.slow = random(1.15, 1.2)
self.theta = asin(random(-self.radius, self.radius) / self.radius)
self.z = self.radius * sin(self.theta)
def render(self):
o_ff = (noise(millis() * 0.0005, sin(self.phi)) - 0.5) * 0.3
o_f_fb = (noise(millis() * 0.0007, sin(self.z) * 0.01) - 0.5) * 0.3
self.thetaFF = self.theta + oFF
phi_ff = self.phi + oFFb
x = self.radius * cos(self.theta) * cos(self.phi)
y = self.radius * cos(self.theta) * sin(self.phi)
self.z = self.radius * sin(self.theta)
xo = self.radius * cos(self.thetaFF) * cos(phiFF)
yo = self.radius * cos(self.thetaFF) * sin(phiFF)
zo = self.radius * sin(self.thetaFF)
xb = xo * self.slow
yb = yo * self.slow
zb = zo * self.slow
with begin_shape(LINES):
stroke(0)
vertex(x, y, self.z)
stroke(200, 150)
vertex(xb, yb, zb) |
with open("input.txt") as f:
numbers = list(map(int, f.readline().split()))
def parse_tree_metadata(tree):
num_nodes = tree[0]
num_metadata = tree[1]
leafs = tree[2:]
total = 0
for i in range(num_nodes):
leafs, sum_metadata = parse_tree_metadata(leafs)
total += sum_metadata
for i in range(num_metadata):
total += leafs[i]
return leafs[num_metadata:], total
print(parse_tree_metadata(numbers)[1])
def get_value_root_node(start):
node_sum = 0
num_nodes, num_metadata = numbers[start : start + 2]
next_start = start + 2
if num_nodes:
node_values = []
for child_node in range(num_nodes):
temporary_sum, next_start = get_value_root_node(next_start)
node_values.append(temporary_sum)
for i in numbers[next_start : next_start + num_metadata]:
if i - 1 < len(node_values):
node_sum += node_values[i - 1]
else:
node_sum += sum(numbers[next_start : next_start + num_metadata])
return node_sum, next_start + num_metadata
print(get_value_root_node(0)[0])
| with open('input.txt') as f:
numbers = list(map(int, f.readline().split()))
def parse_tree_metadata(tree):
num_nodes = tree[0]
num_metadata = tree[1]
leafs = tree[2:]
total = 0
for i in range(num_nodes):
(leafs, sum_metadata) = parse_tree_metadata(leafs)
total += sum_metadata
for i in range(num_metadata):
total += leafs[i]
return (leafs[num_metadata:], total)
print(parse_tree_metadata(numbers)[1])
def get_value_root_node(start):
node_sum = 0
(num_nodes, num_metadata) = numbers[start:start + 2]
next_start = start + 2
if num_nodes:
node_values = []
for child_node in range(num_nodes):
(temporary_sum, next_start) = get_value_root_node(next_start)
node_values.append(temporary_sum)
for i in numbers[next_start:next_start + num_metadata]:
if i - 1 < len(node_values):
node_sum += node_values[i - 1]
else:
node_sum += sum(numbers[next_start:next_start + num_metadata])
return (node_sum, next_start + num_metadata)
print(get_value_root_node(0)[0]) |
class Dough:
def __init__(self, flour_type, baking_technique, weight):
self.__flour_type = flour_type
self.__baking_technique = baking_technique
self.__weight = weight
@property
def flour_type(self):
return self.__flour_type
@flour_type.setter
def flour_type(self, value):
self.__flour_type = value
@property
def baking_technique(self):
return self.__baking_technique
@baking_technique.setter
def baking_technique(self, value):
self.__baking_technique = value
@property
def weight(self):
return self.__weight
@weight.setter
def weight(self, value):
self.__weight = value | class Dough:
def __init__(self, flour_type, baking_technique, weight):
self.__flour_type = flour_type
self.__baking_technique = baking_technique
self.__weight = weight
@property
def flour_type(self):
return self.__flour_type
@flour_type.setter
def flour_type(self, value):
self.__flour_type = value
@property
def baking_technique(self):
return self.__baking_technique
@baking_technique.setter
def baking_technique(self, value):
self.__baking_technique = value
@property
def weight(self):
return self.__weight
@weight.setter
def weight(self, value):
self.__weight = value |
"""
Returns starting node of the cycle if cycle exists
"""
def has_cycle(head):
fast = slow = head
while fast and fast.next and fast.next.next:
slow, fast = slow.next, fast.next.next
if slow is fast:
slow = head
while slow is not fast:
slow, fast = slow.next , fast.next
return slow
return None
| """
Returns starting node of the cycle if cycle exists
"""
def has_cycle(head):
fast = slow = head
while fast and fast.next and fast.next.next:
(slow, fast) = (slow.next, fast.next.next)
if slow is fast:
slow = head
while slow is not fast:
(slow, fast) = (slow.next, fast.next)
return slow
return None |
a, b = 10, 5
print("Add: a+b = ", a+b)
print("Sub: a-b = ", a-b)
print("Mul: a*b = ", a*b)
print("Div: a/b = ", a/b)
print("Mod: a%b = ", a%b)
print("Exp: a**b = ", a**b)
print("Floored Div: a//b = ", a//b) | (a, b) = (10, 5)
print('Add: a+b = ', a + b)
print('Sub: a-b = ', a - b)
print('Mul: a*b = ', a * b)
print('Div: a/b = ', a / b)
print('Mod: a%b = ', a % b)
print('Exp: a**b = ', a ** b)
print('Floored Div: a//b = ', a // b) |
'''
189. Rotate Array
Given an array, rotate the array to the right by k steps, where k is non-negative.
Example 1:
Input: [1,2,3,4,5,6,7] and k = 3
Output: [5,6,7,1,2,3,4]
Explanation:
rotate 1 steps to the right: [7,1,2,3,4,5,6]
rotate 2 steps to the right: [6,7,1,2,3,4,5]
rotate 3 steps to the right: [5,6,7,1,2,3,4]
Example 2:
Input: [-1,-100,3,99] and k = 2
Output: [3,99,-1,-100]
Explanation:
rotate 1 steps to the right: [99,-1,-100,3]
rotate 2 steps to the right: [3,99,-1,-100]
Note:
Try to come up as many solutions as you can, there are at least 3 different ways to solve this problem.
Could you do it in-place with O(1) extra space?
'''
# Brute Force Approach - O(n*k) time, O(1) space
def rotate(self, nums, k):
temp, previous = 0, 0
for i in range(k):
previous = nums[-1]
for j in range(len(nums)):
temp = nums[j]
nums[j] = previous
previous = temp
# Using Extra Array - O(n) time, O(n) space
def rotate(self, nums, k):
temp = []
for i in range(len(nums)):
temp.append(nums[(i + k + 1) % len(nums)])
return temp[:]
# Reverse Solution - O(n) time, O(1) space
def rotate(self, nums, k):
if k % len(nums) != 0:
k = k % len(nums)
nums[:] = nums[::-1][:k][::-1] + nums[::-1][k:][::-1]
| """
189. Rotate Array
Given an array, rotate the array to the right by k steps, where k is non-negative.
Example 1:
Input: [1,2,3,4,5,6,7] and k = 3
Output: [5,6,7,1,2,3,4]
Explanation:
rotate 1 steps to the right: [7,1,2,3,4,5,6]
rotate 2 steps to the right: [6,7,1,2,3,4,5]
rotate 3 steps to the right: [5,6,7,1,2,3,4]
Example 2:
Input: [-1,-100,3,99] and k = 2
Output: [3,99,-1,-100]
Explanation:
rotate 1 steps to the right: [99,-1,-100,3]
rotate 2 steps to the right: [3,99,-1,-100]
Note:
Try to come up as many solutions as you can, there are at least 3 different ways to solve this problem.
Could you do it in-place with O(1) extra space?
"""
def rotate(self, nums, k):
(temp, previous) = (0, 0)
for i in range(k):
previous = nums[-1]
for j in range(len(nums)):
temp = nums[j]
nums[j] = previous
previous = temp
def rotate(self, nums, k):
temp = []
for i in range(len(nums)):
temp.append(nums[(i + k + 1) % len(nums)])
return temp[:]
def rotate(self, nums, k):
if k % len(nums) != 0:
k = k % len(nums)
nums[:] = nums[::-1][:k][::-1] + nums[::-1][k:][::-1] |
# Definition for a undirected graph node
# class UndirectedGraphNode:
# def __init__(self, x):
# self.label = x
# self.neighbors = []
class Solution:
# @param node, a undirected graph node
# @return a undirected graph node
def cloneGraph(self, node):
graph = {}
visited = set()
def dfs(node, visited, graph):
if not node or node.label in visited:
return
visited |= {node.label}
if node.label not in graph:
graph[node.label] = UndirectedGraphNode(node.label)
newNode = graph[node.label]
for nbr in node.neighbors:
if nbr.label not in graph:
graph[nbr.label] = UndirectedGraphNode(nbr.label)
newNode.neighbors.append(graph[nbr.label])
dfs(nbr, visited, graph)
return newNode
return dfs(node, visited, graph)
| class Solution:
def clone_graph(self, node):
graph = {}
visited = set()
def dfs(node, visited, graph):
if not node or node.label in visited:
return
visited |= {node.label}
if node.label not in graph:
graph[node.label] = undirected_graph_node(node.label)
new_node = graph[node.label]
for nbr in node.neighbors:
if nbr.label not in graph:
graph[nbr.label] = undirected_graph_node(nbr.label)
newNode.neighbors.append(graph[nbr.label])
dfs(nbr, visited, graph)
return newNode
return dfs(node, visited, graph) |
"""
Base config file to provide global settings that should be overwritten with
environment specific values.
"""
# Flask
DEBUG = False
TESTING = False
CSRF_ENABLED = True
SECRET_KEY = ''
# SQL Alchemy
SQLALCHEMY_DATABASE_URI = ''
SQLALCHEMY_TRACK_MODIFICATIONS = False
| """
Base config file to provide global settings that should be overwritten with
environment specific values.
"""
debug = False
testing = False
csrf_enabled = True
secret_key = ''
sqlalchemy_database_uri = ''
sqlalchemy_track_modifications = False |
#Assignment 1
print("welcome to the Controlroom of Hyderabad ATC")
Altitude = 1500
if Altitude <= 1000:
print('youre safe to land')
elif Altitude <=5000:
print('Please Bring Down Altitude to 1000')
else :
print('it is Danger now to land, please Turn around')
#Assignment2
for num in range(0,200):
if num > 1:
for i in range(2,num):
if (num % i)== 0:
break
else :
print(num) | print('welcome to the Controlroom of Hyderabad ATC')
altitude = 1500
if Altitude <= 1000:
print('youre safe to land')
elif Altitude <= 5000:
print('Please Bring Down Altitude to 1000')
else:
print('it is Danger now to land, please Turn around')
for num in range(0, 200):
if num > 1:
for i in range(2, num):
if num % i == 0:
break
else:
print(num) |
s="2 3 4 5 66 74 33 2 3"
s=s.split()
s = [int(x) for x in s]
s1=[]
for i in s:
if i not in s1:
s1.append(i)
s1 = sorted(s1)
print(s1)
for i in s1:
print(i,s.count(i))
| s = '2 3 4 5 66 74 33 2 3'
s = s.split()
s = [int(x) for x in s]
s1 = []
for i in s:
if i not in s1:
s1.append(i)
s1 = sorted(s1)
print(s1)
for i in s1:
print(i, s.count(i)) |
print('This is the dummy Controller')
def dummy_example():
print('This is a function in the dummy Controller')
if __name__ == '__main__':
print('This is printed only from __main__')
dummy_example() | print('This is the dummy Controller')
def dummy_example():
print('This is a function in the dummy Controller')
if __name__ == '__main__':
print('This is printed only from __main__')
dummy_example() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.