content stringlengths 7 1.05M |
|---|
"""*****************************************************************************
* Copyright (C) 2020 Microchip Technology Inc. and its subsidiaries.
*
* Subject to your compliance with these terms, you may use Microchip software
* and any derivatives exclusively with Microchip products. It is your
* responsibility to comply with third party license terms applicable to your
* use of third party software (including open source software) that may
* accompany Microchip software.
*
* THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
* EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
* WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
* PARTICULAR PURPOSE.
*
* IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
* INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
* WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
* BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
* FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
* ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
* THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
*****************************************************************************"""
global flashNames
global ram_start
global ram_size
global flash_start
global flash_size
global flash_erase_size
global btl_start
flash_start = 0
flash_size = 0
flash_erase_size = 0
btl_start = "0x0"
NvmMemoryNames = ["NVM", "NVMCTRL", "EFC", "HEFC"]
FlashNames = ["FLASH", "IFLASH"]
RamNames = ["HSRAM", "HRAMC0", "HMCRAMC0", "IRAM", "FlexRAM"]
addr_space = ATDF.getNode("/avr-tools-device-file/devices/device/address-spaces/address-space")
addr_space_children = addr_space.getChildren()
periphNode = ATDF.getNode("/avr-tools-device-file/devices/device/peripherals")
peripherals = periphNode.getChildren()
for mem_idx in range(0, len(addr_space_children)):
mem_seg = addr_space_children[mem_idx].getAttribute("name")
mem_type = addr_space_children[mem_idx].getAttribute("type")
if ((any(x == mem_seg for x in FlashNames) == True) and (mem_type == "flash")):
flash_start = int(addr_space_children[mem_idx].getAttribute("start"), 16)
flash_size = int(addr_space_children[mem_idx].getAttribute("size"), 16)
if ((any(x == mem_seg for x in RamNames) == True) and (mem_type == "ram")):
ram_start = addr_space_children[mem_idx].getAttribute("start")
ram_size = addr_space_children[mem_idx].getAttribute("size")
btl_start = str(flash_start)
def activateAndConnectDependencies(component):
nvmMemoryName = ""
for module in range (0, len(peripherals)):
periphName = str(peripherals[module].getAttribute("name"))
if ((any(x == periphName for x in NvmMemoryNames) == True)):
nvmMemoryName = periphName.lower()
break
nvmMemoryCapabilityId = nvmMemoryName.upper() + "_MEMORY"
btlActivateTable = [nvmMemoryName]
btlConnectTable = [
[component, "btl_MEMORY_dependency", nvmMemoryName, nvmMemoryCapabilityId]
]
res = Database.activateComponents(btlActivateTable)
res = Database.connectDependencies(btlConnectTable)
def calcBootloaderSize():
global flash_erase_size
coreArch = Database.getSymbolValue("core", "CoreArchitecture")
# Get the Maximum bootloader size value defined in bootloader protocol python file
if (coreArch in btlSizes):
max_usb_btl_size = btlSizes[coreArch][0]
else:
return 0
btl_size = 0
if (flash_erase_size != 0):
if (flash_erase_size >= max_usb_btl_size):
btl_size = flash_erase_size
else:
btl_size = max_usb_btl_size
return btl_size
def setBootloaderSize(symbol, event):
btl_size = str(calcBootloaderSize())
symbol.setValue(btl_size)
if (btl_size != 0):
symbol.setVisible(True)
else:
symbol.setVisible(False)
def setAppStartAndCommentVisible(symbol, event):
global flash_start
global flash_size
global flash_erase_size
if (event["id"] == "BTL_SIZE"):
btlSize = int(event["value"],10)
if ((btlSize != 0) and (flash_erase_size != 0)):
appStartAligned = btlSize
# If the bootloader size is not aligned to Erase Block Size
if ((btlSize % flash_erase_size) != 0):
appStartAligned = btlSize + (flash_erase_size - (btlSize % flash_erase_size))
custom_app_start_addr = str(hex(flash_start + appStartAligned))
else:
custom_app_start_addr = str(hex(flash_start))
Database.setSymbolValue("core", "APP_START_ADDRESS", custom_app_start_addr[2:])
else:
comment_enable = True
custom_app_start_addr = int(Database.getSymbolValue("core", "APP_START_ADDRESS"), 16)
btl_size = calcBootloaderSize()
if (custom_app_start_addr < (flash_start + btl_size)):
symbol.setLabel("WARNING!!! Application Start Address Should be equal to or Greater than Bootloader Size !!!")
elif (custom_app_start_addr >= (flash_start + flash_size)):
symbol.setLabel("WARNING!!! Application Start Address is exceeding the Flash Memory Space !!!")
elif ((flash_erase_size != 0) and (custom_app_start_addr % flash_erase_size != 0)):
symbol.setLabel("WARNING!!! Application Start Address should be aligned to Erase block size ( "+ str(flash_erase_size) + " bytes ) of Flash memory !!!")
else:
comment_enable = False
symbol.setVisible(comment_enable)
def setTriggerLenVisible(symbol, event):
symbol.setVisible(event["value"])
def generateCommonSymbols(bootloaderComponent):
global ram_start
global ram_size
global btl_start
global btl_type
btlMemUsed = bootloaderComponent.createStringSymbol("MEM_USED", None)
btlMemUsed.setLabel("Bootloader Memory Used")
btlMemUsed.setReadOnly(True)
btlMemUsed.setDefaultValue("")
btlType = bootloaderComponent.createStringSymbol("BTL_TYPE", None)
btlType.setLabel("Bootloader Type")
btlType.setReadOnly(True)
btlType.setVisible(False)
btlType.setDefaultValue(btl_type)
btlStart = bootloaderComponent.createStringSymbol("BTL_START", None)
btlStart.setLabel("Bootloader Start Address")
btlStart.setVisible(False)
btlStart.setDefaultValue(btl_start)
btl_size = calcBootloaderSize()
btlSize = bootloaderComponent.createStringSymbol("BTL_SIZE", None)
btlSize.setLabel("Bootloader Size (Bytes)")
btlSize.setVisible(False)
btlSize.setDefaultValue(str(btl_size))
btlSize.setDependencies(setBootloaderSize, ["MEM_USED"])
btlAppAddrComment = bootloaderComponent.createCommentSymbol("BTL_APP_START_ADDR_COMMENT", None)
btlAppAddrComment.setVisible(False)
btlAppAddrComment.setDependencies(setAppStartAndCommentVisible, ["core.APP_START_ADDRESS", "BTL_SIZE"])
btlTriggerEnable = bootloaderComponent.createBooleanSymbol("BTL_TRIGGER_ENABLE", None)
btlTriggerEnable.setLabel("Enable Bootloader Trigger From Firmware")
btlTriggerEnable.setDescription("This Option can be used to Force Trigger bootloader from application firmware after a soft reset.")
btlTriggerLenDesc = "This option adds the provided offset to RAM Start address in bootloader linker script. \
Application firmware can store some pattern in the reserved bytes region from RAM start for bootloader \
to check at reset."
btlTriggerLen = bootloaderComponent.createStringSymbol("BTL_TRIGGER_LEN", btlTriggerEnable)
btlTriggerLen.setLabel("Number Of Bytes To Reserve From Start Of RAM")
btlTriggerLen.setVisible((btlTriggerEnable.getValue() == True))
btlTriggerLen.setDefaultValue("0")
btlTriggerLen.setDependencies(setTriggerLenVisible, ["BTL_TRIGGER_ENABLE"])
btlTriggerLen.setDescription(btlTriggerLenDesc)
btlRamStart = bootloaderComponent.createStringSymbol("BTL_RAM_START", None)
btlRamStart.setDefaultValue(ram_start)
btlRamStart.setReadOnly(True)
btlRamStart.setVisible(False)
btlRamSize = bootloaderComponent.createStringSymbol("BTL_RAM_SIZE", None)
btlRamSize.setDefaultValue(ram_size)
btlRamSize.setReadOnly(True)
btlRamSize.setVisible(False)
def generateHwCRCGeneratorSymbol(bootloaderComponent):
crcEnable = False
coreComponent = Database.getComponentByID("core")
# Enable PAC and DSU component if present
for module in range (0, len(peripherals)):
periphName = str(peripherals[module].getAttribute("name"))
if (periphName == "PAC"):
coreComponent.getSymbolByID("PAC_USE").setValue(True)
if (Database.getSymbolValue("core", "PAC_INTERRRUPT_MODE") != None):
coreComponent.getSymbolByID("PAC_INTERRRUPT_MODE").setValue(False)
elif (periphName == "DSU"):
res = Database.activateComponents(["dsu"])
crcEnable = True
btlHwCrc = bootloaderComponent.createBooleanSymbol("BTL_HW_CRC_GEN", None)
btlHwCrc.setLabel("Bootloader Hardware CRC Generator")
btlHwCrc.setReadOnly(True)
btlHwCrc.setVisible(False)
btlHwCrc.setDefaultValue(crcEnable)
def generateLinkerFileSymbol(bootloaderComponent):
# Disable Default linker script generation
Database.setSymbolValue("core", "ADD_LINKER_FILE", False)
# Generate Bootloader Linker Script
btlLinkerPath = "../bootloader/templates/arm/bootloader_linker_optimized.ld.ftl"
# Generate Bootloader Linker Script
btlLinkerFile = bootloaderComponent.createFileSymbol("BOOTLOADER_LINKER_FILE", None)
btlLinkerFile.setSourcePath(btlLinkerPath)
btlLinkerFile.setOutputName("btl.ld")
btlLinkerFile.setMarkup(True)
btlLinkerFile.setOverwrite(True)
btlLinkerFile.setType("LINKER")
def generateXC32SettingsAndFileSymbol(bootloaderComponent):
configName = Variables.get("__CONFIGURATION_NAME")
# generate startup_xc32.c file
btlStartSourceFile = bootloaderComponent.createFileSymbol("STARTUP_BOOTLOADER_C", None)
btlStartSourceFile.setSourcePath("../bootloader/templates/arm/startup_xc32_optimized.c.ftl")
btlStartSourceFile.setOutputName("startup_xc32.c")
btlStartSourceFile.setMarkup(True)
btlStartSourceFile.setOverwrite(True)
btlStartSourceFile.setDestPath("")
btlStartSourceFile.setProjectPath("config/" + configName + "/")
btlStartSourceFile.setType("SOURCE")
# set XC32 option to not use the CRT0 startup code
xc32NoCRT0StartupCodeSym = bootloaderComponent.createSettingSymbol("XC32_NO_CRT0_STARTUP_CODE", None)
xc32NoCRT0StartupCodeSym.setCategory("C32-LD")
xc32NoCRT0StartupCodeSym.setKey("no-startup-files")
xc32NoCRT0StartupCodeSym.setValue("true")
# Clear Placing data into its own section
xc32ClearDataSection = bootloaderComponent.createSettingSymbol("XC32_CLEAR_DATA_SECTION", None)
xc32ClearDataSection.setCategory("C32")
xc32ClearDataSection.setKey("place-data-into-section")
xc32ClearDataSection.setValue("false")
|
"""
739. Daily Temperatures
temperatures = [73, 74, 75, 71, 69, 72, 76, 73], your output should be [1, 1, 4, 2, 1, 1, 0, 0]
"""
#ascendning stack, 每次push是小的,在后面都是大的,如果stack top小于当前的 pop
class Solution:
def dailyTemperatures(self, temperatures):
"""
:type temperatures: List[int]
:rtype: List[int]
"""
n = len(temperatures)
res, stk = [0]*n, []
for i in range(n-1, -1, -1):
while stk and temperatures[i] >= temperatures[stk[-1]]:
stk.pop()
if stk:
res[i] = stk[-1] - i
stk.append(i)
return res
#descending stack, push的数是越来越小的
class Solution:
def dailyTemperatures(self, temperatures):
"""
:type temperatures: List[int]
:rtype: List[int]
"""
n = len(temperatures)
res, stk = [0]*n, []
for i in range(n):
while stk and temperatures[i] > temperatures[stk[-1]]:
res[stk[-1]] = i-stk[-1]
stk.pop()
stk.append(i)
return res |
def ft_filter(function_to_apply, list_of_inputs):
for elem in list_of_inputs:
res = function_to_apply(elem)
if res is True:
yield elem
|
# Программа расчитывает интеграл методами парабол и правых прямоугольников,
# двумя количествами делений. Находит наименее точный метод
# и высчитывает количество делений, для точности eps.
# a, b - отрезок
# n1, n2, n - количества делений
# h - шаг для делений
# y - функция
# I1, I2, I3, I4 - результаты методов
# average - среднее из них
# kkk - номер наименее точного метода
a, b = map(int,input('Введите границы отрезка через пробел: ').split(' '))
if a > b:
a, b = b, a
n1, n2 = map(int, input('N1 N2: ').split(' '))
def f(x):
y = x*x#x**2+4*x+4
return y
def parabola(n):
s = 0
m = n*2
h = (b-a)/n
for i in range(n):
s += f(a+i*h) + 4*f(a+i*h+h/2) + f(a+i*h+h)
s *= h/6
return s
def prav_pr(n):
s = 0
h = (b-a)/n
for i in range(n):
s += f(a+i*h)
s *= h
return s
I1 = parabola(n1)
I2 = parabola(n2)
I3 = prav_pr(n1)
I4 = prav_pr(n2)
I = [I1, I2, I3, I4]
average = sum(I)/4
if I1 != I3 and I2 != I4:
for i in range(len(I)):
I[i] = abs(I[i] - average)
kkk = (I.index(max(I)) + 2)//2
else:
kkk = 3
print('\n N1, N2: |', '{}'.format(n1).center(20), '|', '{}'.format(n2).center(20) + '\n' + '-'*69)
print(' Параболы: |', '{:.5}'.format(I1).center(20), '|', '{:.5}'.format(I2).center(20) + '\n' + '-'*69)
print(' Пр. прямоугольники: |', '{:.5}'.format(I3).center(20), '|', '{:.5}'.format(I4).center(20))
if kkk == 3:
print('\nВсе методы точны.')
else:
print('\nСамый не точный метод:', kkk)
eps = float(input('\nВведите eps: '))
n = 2
if kkk == 1:
while abs(parabola(2*n) - parabola(n)) > eps:
n *= 2
elif kkk == 2:
while abs(prav_pr(2*n) - prav_pr(n)) > eps:
n *= 2
print('Точность eps при', n)
|
STX: bytes = b"\x02" # Start of text, indicates the start of a message.
ETX: bytes = b"\x03" # End of text, indicates the end of a message
ENQ: bytes = b"\x05" # Enquiry about the PC interface being ready to receive a new message.
ACK: bytes = b"\x06" # Positive acknowledgement to a PMS message or enquiry (ENQ).
NAK: bytes = b"\x15" # Negative acknowledgement to a PMS message or enquiry (ENQ).
LRC_SKIP: bytes = b"\x0D" # The PMS can avoid LRC calculation by sending a 0DH value (return character)
def lrc(message: bytes) -> bytes:
result = 0
for b in message + ETX: # LRC message must not include STX and should include ETX
result ^= b
return bytes([result])
|
class Interval:
def __init__(self, start, end):
if start < end:
self.start = start
self.end = end
else:
self.start = end
self.end = start
def overlap(self, other):
if self.start > other.start and self.start < other.end:
return True
if self.end > other.start and self.end < other.end:
return True
return False
def __repr__(self):
return "Interval(start={}, end={})".format(self.start, self.end)
def __add__(self, other):
return Interval(start=min(self.start, other.start), end=max(self.end, other.end))
|
# create a loop
counter = 10
while counter > 0:
print(counter)
counter = counter -1
print('Blastoff!!') |
"""Providers for interop between JS rules.
This file has to live in the built-in so that all rules can load() the providers
even if users haven't installed any of the packages/*
These providers allows rules to interoperate without knowledge
of each other.
You can think of a provider as a message bus.
A rule "publishes" a message as an instance of the provider, and some other rule
subscribes to these by having a (possibly transitive) dependency on the publisher.
## Debugging
Debug output is considered orthogonal to these providers.
Any output may or may not have user debugging affordances provided, such as
readable minification.
We expect that rules will have a boolean `debug` attribute, and/or accept the `DEBUG`
environment variable.
Note that this means a given build either produces debug or non-debug output.
If users really need to produce both in a single build, they'll need two rules with
differing 'debug' attributes.
"""
JSEcmaScriptModuleInfo = provider(
doc = """JavaScript files (and sourcemaps) that are intended to be consumed by downstream tooling.
They should use modern syntax and ESModules.
These files should typically be named "foo.mjs"
TODO: should we require that?
Historical note: this was the typescript.es6_sources output""",
fields = {
"sources": "depset of direct and transitive JavaScript files and sourcemaps",
},
)
def transitive_js_ecma_script_module_info(sources, deps = []):
"""Constructs a JSEcmaScriptModuleInfo including all transitive sources from JSEcmaScriptModuleInfo providers in a list of deps.
Returns a single JSEcmaScriptModuleInfo.
"""
return combine_js_ecma_script_module_info([JSEcmaScriptModuleInfo(sources = sources)] + collect_js_ecma_script_module_infos(deps))
def combine_js_ecma_script_module_info(modules):
"""Combines all JavaScript sources and sourcemaps from a list of JSEcmaScriptModuleInfo providers.
Returns a single JSEcmaScriptModuleInfo.
"""
sources_depsets = []
for module in modules:
sources_depsets.extend([module.sources])
return JSEcmaScriptModuleInfo(
sources = depset(transitive = sources_depsets),
)
def collect_js_ecma_script_module_infos(deps):
"""Collects all JSEcmaScriptModuleInfo providers from a list of deps.
Returns a list of JSEcmaScriptModuleInfo providers.
"""
modules = []
for dep in deps:
if JSEcmaScriptModuleInfo in dep:
modules.extend([dep[JSEcmaScriptModuleInfo]])
return modules
|
# Hacer un programa que pida un caracter e indique si es una boca o no
caracter = input("Digite un caracter: ").lower()# para mayusculas
#caracter = caracter.lower()
#caracter = caracter.upper()
if caracter =="a" or caracter =="b" or caracter =="c" or caracter =="d" or caracter =="e":
print("Es Una vocal")
else:
print("No es una vocal")
|
try:
num = int(input("请输入整数:"))
result = 8 / num
print(result)
except ValueError:
print("请输入正确的整数")
except ZeroDivisionError:
print("除 0 错误")
except Exception as result:
print("未知错误 %s" % result)
else:
# 没有异常才会执行的代码
print('没有异常才会执行的代码')
pass
finally:
# 无论是否有异常,都会执行的代码
print("无论是否有异常,都会执行的代码")
# try:
# # 尝试执行的代码
# pass
# except 错误类型1:
# # 针对错误类型1,对应的代码处理
# pass
# except 错误类型2:
# # 针对错误类型2,对应的代码处理
# pass
# except (错误类型3, 错误类型4):
# # 针对错误类型3 和 4,对应的代码处理
# pass
# except Exception as result:
# # 打印错误信息
# print(result)
# else:
# # 没有异常才会执行的代码
# pass
# finally:
# # 无论是否有异常,都会执行的代码
# print("无论是否有异常,都会执行的代码")
|
grocery_list = ["fish", "tomato", "apples"] # Create new list
print("tomato" in grocery_list) # Check that grocery_list contains "tomato" item
grocery_dict = {"fish": 1, "tomato": 6, "apples": 3} # create new dictionary
print("fish" in grocery_dict)
|
def _impl(ctx):
args = [ctx.outputs.out.path] + [f.path for f in ctx.files.chunks]
args_file = ctx.actions.declare_file(ctx.label.name + ".args")
ctx.actions.write(
output = args_file,
content = "\n".join(args),
)
ctx.actions.run(
mnemonic = "Concat",
inputs = ctx.files.chunks + [args_file],
outputs = [ctx.outputs.out],
arguments = ["@" + args_file.path],
executable = ctx.executable.merge_tool,
execution_requirements = {
"supports-workers": "1",
"requires-worker-protocol": "json",
},
)
concat = rule(
implementation = _impl,
attrs = {
"chunks": attr.label_list(allow_files = True),
"out": attr.output(mandatory = True),
"merge_tool": attr.label(
executable = True,
cfg = "exec",
allow_files = True,
default = Label("//:persistent_worker"),
),
},
)
|
k = int(input())
r = list(map(int, input().split()))
p = set(r)
# stores the sum of all unique elements x total no of groups
a = sum(p)*k
# stores the sum of element in the list
b = sum(r)
# (a - b) = (k - 1)*captains_room_no
# prints the the difference of a and b divided by k-1
print((a-b)//(k-1))
|
"""
Tema: Herencia.
Curso: Curso de python, video 31.
Plataforma: Youtube.
Profesor: Juan diaz - Pildoras informaticas.
Alumno: @edinsonrequena.
"""
class Vehiculo:
def __init__(self, marca, modelo):
self.marca = marca
self.modelo = modelo
self.enmarcha = False
self.acelera = False
self.frena = False
def arrancar(self): self.enmarcha = True
def acelerar(self): self.acelera = True
def frenar(self): self.frena = True
def estado(self):
print(f"""Marca: {self.marca} \n Modelo: {self.modelo} \n En marcha:
{self.enmarcha} \nAcelera: {self.acelera},\n Frena: {self.frena} \n""") # TODO #13
class Moto(Vehiculo):
hcaballito = ''
def caballito(self): self.hcaballito = 'Voy haciendo caballito'
def estado(self):
super().estado()
print(f'Caballito: {self.hcaballito}')
class VElecetricos(Vehiculo):
def __init__(self, autonomia, marcaElec, modeloElec):
super().__init__(marcaElec, modeloElec)
self.autonomia = autonomia
def estado(self):
super().estado()
print(f'Autonomia: {self.autonomia}')
def cargarEnergia(self): self.cargando = True
class BiciElectrica(VElecetricos, Vehiculo): pass
class Furgoneta(Vehiculo):
def llevaCarga(self, carga):
self.carga = carga
self.siCarga = 'La furgoneta esta cargada'
self.noCarga = 'La furgoneta no esta cargada'
return self.siCarga if self.carga else self.noCarga
def estado(self): # TODO #22
super().estado()
print(f'Lleva carga?: ')
class App: # TODO #16
def crear(self):
# Objeto bici
bici = BiciElectrica(100, 'Test marca', 'Test Modelo')
bici.estado()
# Objeto Furgoneta
furgoneta = Furgoneta('Test', 'Test2')
furgoneta.estado()
# Objeto Moto
moto = Moto('Test Moto', 'Test moto')
moto.caballito()
moto.estado()
if __name__ == '__main__':
App().crear()
|
def swap(arr, i, j):
temp = arr[i]
arr[i] = arr[j]
arr[j] = temp
def selectionSort(arr):
for i in range(len(arr)):
smallest = arr[i]
for j in range(i+1, len(arr)):
print(i)
if smallest > arr[j]:
print(f"replacing {j} th position with {i}th position")
swap(arr, j, i)
smallest = arr[i]
else:
print('This is the smallest')
return arr
# Driver code
array = [10, 7, 8, 9, 1, 5]
print(f'Given array: {array}')
selectionSort(array)
print(f'Sorted array: {array}')
|
with open('input.txt', 'r') as reader:
input = [line for line in reader.read().splitlines()]
def part1(numbers):
length = len(numbers[0])
sums = [0] * length
for number in numbers:
for i in range(length):
sums[i] += int(number[i])
gamma = [1 if v > len(numbers) / 2 else 0 for v in sums]
gammaV = bin_arr_to_int(gamma)
return gammaV * (gammaV ^ int("".join([str(1)]*length), 2))
def part2(numbers):
idx = range(len(numbers))
current = 0
while(len(idx) > 1):
sum_current = sum([int(numbers[i][current]) for i in idx])
most_common = "1" if sum_current >= len(idx)/ 2 else "0"
idx = list(filter(lambda i: numbers[i][current] == most_common, idx))
current += 1
oxygen_generator_rating = bin_arr_to_int(numbers[idx[0]])
idx = range(len(numbers))
current = 0
while(len(idx) > 1):
sum_current = sum([int(numbers[i][current]) for i in idx])
least_common = "1" if sum_current < len(idx)/ 2 else "0"
idx = list(filter(lambda i: numbers[i][current] == least_common, idx))
current += 1
CO2_scrubber_rating = bin_arr_to_int(numbers[idx[0]])
return oxygen_generator_rating * CO2_scrubber_rating
def bin_arr_to_int(binary):
return int("".join([str(v) for v in binary]), 2)
print(part1(input))
print(part2(input))
|
class Solution(object):
def isPowerOfThree(self, n):
"""
:type n: int
:rtype: bool
"""
left, right = 0, 19
while left <= right:
mid = left + (right - left) / 2
if 3 ** mid == n:
return True
elif 3 ** mid > n:
right = mid - 1
else:
left = mid + 1
return False
def isPowerOfThree2(self, n):
"""
:type n: int
:rtype: bool
"""
while n > 1 and n % 3 == 0:
n /= 3
return n == 1 |
"""Top-level package for QBOB."""
__author__ = """Guen Prawiroatmodjo"""
__email__ = 'guenp@microsoft.com'
__version__ = '0.1.0'
|
def sieve(n):
checkArray = [1] * (n+1)
checkArray[0] = 0
checkArray[1] = 0
for i in range(2,n+1):
j = i + i
step = j + i
for j in range(j,n+1,i):
checkArray[j] = 0
for i in range(0,n+1):
if checkArray[i] != 0:
print(i)
if __name__ == "__main__":
n = int(input())
sieve(n) |
"""Different environment settings."""
# TODO: Add correct objects
LOCATION_TYPE_MAP = {
"floor": {"color": "gray", "gray_color": "gray", "mesh": ":/objects/floor.obj",},
"rack": {"color": "#62ca5f", "gray_color": "gray", "mesh": ":/objects/rack.obj",},
"wall": {"color": "black", "gray_color": "black", "mesh": ":/objects/floor.obj",},
"inbound_door": {
"color": "#fde724",
"gray_color": "#222",
"mesh": ":/objects/floor.obj",
},
"outbound_door": {
"color": "#fd8a24",
"gray_color": "#aaa",
"mesh": ":/objects/floor.obj",
},
"staging_area": {
"color": "#2c728e",
"gray_color": "#ddd",
"mesh": ":/objects/floor.obj",
},
"custom": {
"color": "#ca5f5f",
"gray_color": "white",
"mesh": ":/objects/floor.obj",
},
}
|
#Printing 'reverse Pyramid Shape !
'''
* * * * *
* * * *
* * *
* *
*
'''
n=int(input())
for i in range(n,0,-1): #for rows
for j in range(0,n-i): # for space
print(end=' ')
for j in range(0,i): # creating star
print("*",end=' ')
print()
#other method
n=int(input())
for i in range(n, 0, -1):
print(' ' * (n - i) + '* ' * (i))
|
class StepParseError(Exception):
pass
class RatDisconnectedError(Exception):
pass
class InvalidTimeoutExceptionError(Exception):
pass
class RatCallbackTimeoutError(Exception):
pass
class MissingFileError(Exception):
pass
|
#Classe:
class carro:
# Construtor:
def __init__(self, Ano, Velocidade_maxima, Velocidade_atual , Nitro = 'Sem nitro', Estado = 'Desligado', Nome = 'Sem Nome'):
self.Estado = Estado
self.Nome = Nome
self.Ano = Ano
self.Velocidade_maxima = Velocidade_maxima
self.Velocidade_atual = Velocidade_atual
self.Nitro = Nitro
# Métodos:
def ligar_desligar(self,Estado):
self.Estado = Estado
def mudar_nome(self,Nome):
self.Nome = Nome
def acelerar(self):
if self.Estado == 'Ligado':
if self.Velocidade_atual < self.Velocidade_maxima:
self.Velocidade_atual += 1
print(f'A velocidade atual acelerada agora é de {self.Velocidade_atual} km/h')
if self.Velocidade_atual == self.Velocidade_maxima:
print(f'Velocidade maxima de {self.Velocidade_maxima} km/h atingida!')
def frear(self):
if self.Estado == 'Ligado' and self.Velocidade_atual > 0:
self.Velocidade_atual -= 1
print(f'Velocidade atual reduzida para {self.Velocidade_atual} km/h')
# Objeto 01
Meu_carro = carro( 2005, 100, 0, Estado='Ligado', Nome='Golf')
print(f'Nome: {Meu_carro.Nome}')
print(f'Ano: {Meu_carro.Ano}')
print(f'Velocidade maxima: {Meu_carro.Velocidade_maxima}')
print(f'Estado: {Meu_carro.Estado}')
print(f'Status de Nitro: {Meu_carro.Nitro}')
print(f'Velocidade atual : {Meu_carro.Velocidade_atual}')
Meu_carro.acelerar()
Meu_carro.acelerar()
Meu_carro.frear()
# Objeto 02
Meu_carro = carro(2020, 400, 0, Estado='Ligado', Nitro= 'Nitro Seco x8', Nome= 'Koenigsegg', )
print(f'Nome: {Meu_carro.Nome}')
print(f'Ano: {Meu_carro.Ano}')
print(f'Velocidade maxima: {Meu_carro.Velocidade_maxima}')
print(f'Estado: {Meu_carro.Estado}')
print(f'Status de Nitro: {Meu_carro.Nitro}')
print(f'Velocidade atual : {Meu_carro.Velocidade_atual}')
Meu_carro.acelerar()
Meu_carro.acelerar()
Meu_carro.acelerar()
Meu_carro.acelerar()
Meu_carro.frear()
|
def max_of_maximums(dTEmax, dsRNAmax):
print('Finding max of maximums')
dmax = {}
for i in list(dTEmax.keys()):
for j in list(dsRNAmax.keys()):
try:
dTEmax[j]
except:
dmax[j] = dsRNAmax[j]
else:
dmax[j] = max(dTEmax[j], dsRNAmax[j])
try:
dsRNAmax[i]
except:
dmax[i] = dTEmax[i] # if no i in dsRNAmax
else: # if scaffold is present in both dictionaries, find the max between them, i and j should be same
dmax[i] = max(dTEmax[i], dsRNAmax[i])
for k, v in list(dmax.items()):
if v == 0:
print(k)
return dmax
def max_coords_per_scaffold(d):
print('Calcualting maximum coordinates per scaffold')
dmax_coords = {}
dscaff_max = {}
for scaff in list(d.keys()):
# collect all coordinates in list, for each scaffold
for line in d[scaff]:
dmax_coords.setdefault(scaff, []).append(int(line.split('\t')[3]))
for scaff in list(dmax_coords.keys()):
# get maximum coordinate per scaffold
dscaff_max[scaff] = max(dmax_coords[scaff])
return dscaff_max |
# -*- coding: utf-8 -*-
'''
Utility for chinese
'''
NUM_MAP = {
"零": "0",
"一": "1",
"二": "2",
"三": "3",
"四": "4",
"五": "5",
"六": "6",
"七": "7",
"八": "8",
"九": "9",
"十": "10"
}
def chinese_num_replace(s):
if not s:
return s
return ''.join([NUM_MAP.get(i, i) for i in s])
|
# Copyright (c) Microsoft. All rights reserved. Licensed under the MIT license.
# See LICENSE in the project root for license information.
# Client ID and secret.
client_id = '07c53e00-1adb-4fa7-8933-fd98f6a4da84'
client_secret = '7CmTo1brGWMmh5RoFiTdO0n'
|
registry = {
"centivize_service": {
"grpc": 7003,
},
}
|
''' additional_datastructures.py: File containing custom utility data structures for use in simple_rl. '''
class SimpleRLStack(object):
''' Implementation for a basic Stack data structure '''
def __init__(self, _list=None):
'''
Args:
_list (list) : underlying elements in the stack
'''
self._list = _list if _list is not None else []
def __repr__(self):
r = ''
for element in self._list:
r += str(element) + ', '
return r
def push(self, element):
return self._list.append(element)
def pop(self):
if len(self._list) > 0:
return self._list.pop()
return None
def peek(self):
if len(self._list) > 0:
return self._list[-1]
return None
def is_empty(self):
return len(self._list) == 0
def size(self):
return len(self._list)
|
cont = 0
while cont < 10:
cont = cont + 1
nome = input('Digite o nome do paciente: ')
idade = int(input('Digite a idade do paciente: '))
peso = float(input('Digite o peso do paciente: '))
altura = float(input('Digite a altura do paciente: '))
sus = int(input('Digite o número do cartão do SUS do paciente (15 algarismos): '))
diag = input('Digite o diagnóstico do paciente: ')
print(f'{nome},{altura},{idade},{peso},{sus},{diag}') |
# RUN: llvmPy %s > %t1
# RUN: cat -n %t1 >&2
# RUN: cat %t1 | FileCheck %s
print(1 * 2)
# CHECK: 2
print(2 * 2)
# CHECK-NEXT: 4
print(0 * 2)
# CHECK-NEXT: 0
|
# 1st solution
class Solution:
def nthMagicalNumber(self, n: int, a: int, b: int) -> int:
g = self.gcd(a, b)
largest = a // g * b
lst = []
numOne, numTwo = a, b
while numOne <= largest and numTwo <= largest:
if numOne < numTwo:
lst.append(numOne)
numOne += a
elif numOne > numTwo:
lst.append(numTwo)
numTwo += b
else:
lst.append(numOne)
numOne += a
numTwo += b
mod = 10**9 + 7
k, r = n // len(lst), n % len(lst)
if r == 0:
ans = k * lst[-1]
else:
ans = k * lst[-1] + lst[r - 1]
return ans % mod
def gcd(self, a, b):
while a:
a, b = b % a, a
return b
# 2nd solution, binary search
# O(log(N*min(a, b))) time | O(1) space
class Solution:
def nthMagicalNumber(self, n: int, a: int, b: int) -> int:
mod = 10 **9 + 7
g = self.gcd(a, b)
largest = a // g * b
def largeEnough(x):
return x // a + x // b - x // largest >= n
left, right = 0, n * min(a, b)
while left < right:
middle = left + (right - left) // 2
if largeEnough(middle):
right = middle
else:
left = middle + 1
return left % mod
def gcd(self, a, b):
while a:
a, b = b % a, a
return b
# 3rd solution, mathematical
# O(a + b) time | O(1) space
class Solution:
def nthMagicalNumber(self, n: int, a: int, b: int) -> int:
mod = 10**9 + 7
g = self.gcd(a, b)
largest = a // g * b
m = largest // a + largest // b - 1
q, r = divmod(n, m)
if r == 0:
return q * largest % mod
heads = [a, b]
for _ in range(r - 1):
if heads[0] <= heads[1]:
heads[0] += a
else:
heads[1] += b
return (q * largest + min(heads)) % mod
def gcd(self, a, b):
while a:
a, b = b % a, a
return b |
class Stopper:
"""Base class for implementing a Tune experiment stopper.
Allows users to implement experiment-level stopping via ``stop_all``. By
default, this class does not stop any trials. Subclasses need to
implement ``__call__`` and ``stop_all``.
.. code-block:: python
import time
from ray import tune
from ray.tune import Stopper
class TimeStopper(Stopper):
def __init__(self):
self._start = time.time()
self._deadline = 300
def __call__(self, trial_id, result):
return False
def stop_all(self):
return time.time() - self._start > self.deadline
tune.run(Trainable, num_samples=200, stop=TimeStopper())
"""
def __call__(self, trial_id, result):
"""Returns true if the trial should be terminated given the result."""
raise NotImplementedError
def stop_all(self):
"""Returns true if the experiment should be terminated."""
raise NotImplementedError
class NoopStopper(Stopper):
def __call__(self, trial_id, result):
return False
def stop_all(self):
return False
class FunctionStopper(Stopper):
def __init__(self, function):
self._fn = function
def __call__(self, trial_id, result):
return self._fn(trial_id, result)
def stop_all(self):
return False
@classmethod
def is_valid_function(cls, fn):
is_function = callable(fn) and not issubclass(type(fn), Stopper)
if is_function and hasattr(fn, "stop_all"):
raise ValueError(
"Stop object must be ray.tune.Stopper subclass to be detected "
"correctly.")
return is_function
|
def old_exponent(n, k):
"""
n is base, k is exponent
"""
answer = 1
for i in range(k):
answer *= n
print(answer)
def newExponent(n, k):
"""
n is base, k is exponent
"""
print("newExponent({0}, {1})".format(n, k))
if k == 1:
return n
elif k == 0:
return 1
left = int(k/2)
right = k - left
return newExponent(n, left) * newExponent(n, right)
|
#default parameters
#you can make default parameter(last_name= 'unknown') in last like after age
#if we make all parameters defalut than ouput is unknowndef user_info(first_name='unknown', last_name= 'unknown',age = None)
# None is used for numbers
#'unknown' is used for string
def user_info(first_name, last_name,age): #None is a special in python
print(f"Your First name is: {first_name} ")
print(f"Your last name is: {last_name} ")
print(f"Your age is: {age} ")
user_info("Beenash","Pervaiz", 20) |
quant = int(input('Digite a quantidade de termos que você quer ver: '))
c = 3
t1 = 0
t2 = 1
print(t1, t2 , end=' ')
while c <= quant:
fn = t1 + t2
print(fn, end = ' ')
t1 = t2
t2 = fn
c += 1
|
'''
Calculate an optimized configuration based on overall memory, cpu, and sensible defaults.
Will configure spark and yarn.
The formula will be run on 5 node t2.large cluster hosted in AWS. t2.large nodes have 2 vcpus, and 8gb of memory each.
4 nodes are designed as yarn node managers. The formula will run with default values.
'''
def constants(cluster, log):
'''
Sets the constants needed for calculating the formula.
:param cluster: Cluster configuration connection
:param log: simple log interface with log.info, log.error, log.warning, log.fatal, log.debug
:return: a dictionary with all constants
'''
const = {
"NUM_NM_WORKERS": len(cluster.yarn.nodemanager.hosts.all),
#The number of workers with the node manager role
#4 in our sample cluster
"NM_WORKER_CORES": cluster.yarn.nodemanager.hosts.max_cores,
#The max number of cores for any node manager host
# 2 in our sample cluster
"NM_WORKER_MEM": cluster.yarn.nodemanager.hosts.max_memory,
#The max amount of memory available on any node manager host
# 4gb or 7933714432 bytes in our sample cluster
"MIN_NM_MEMORY": gb_to_bytes(2),
"MEM_FRACTION_FOR_OTHER_SERVICES": lambda x: x if x is not None and x >= 0 and x < 1 else 0.125,
#percentage of overall system memory that should be preserved for other services besides yarn.
#Default is 12.5 %
"MAPREDUCE_JOB_COUNTERS_MAX": lambda x: x if x is not None and x >= 120 else 500,
#max number number of map reduce counters. The yarn default is 120, we recommend 500.
"SPARK_DRIVER_MAXPERMSIZE": lambda x: x if x is not None and x >= 512 else 512,
#The spark drivers max perm size
#spark-submit --driver-java-options " -XX:MaxPermSize=SOME_SIZE M "
"ENABLE_SPARK_SHUFFLE_SERVICE": lambda x: False if str(x).lower() == "false" else True,
#Enable spark shuffle service so we can use dynamic allocation
"NUM_THREADS": lambda x: x if x is not None and x > 0 else 1,
#The number of threads being supported by this configuration
"YARN_SCHEDULER_MINIMUM_ALLOCATION_MB": lambda x: x if x is not None and x >= 1024 else 1024,
"MAPREDUCE_MINIMUM_AM_MEMORY_MB": lambda x: (x / 512) * 512 if x is not None and x >= 1024 else 1024,
#The amount of memory the MR AppMaster needs.
#https://hadoop.apache.org/docs/stable/hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml
"MAX_HEAP_PERCENT": lambda x: x if x is not None else 0.75,
#how much memory should be allocated to java heap. i.e. mapreduce.map.memory.mb * 0.75,
# mapreduce.reduce.java.opts.max.heap * 0.75
"MAPREDUCE_MINIMUM_EXECUTOR_MEMORY_MB": lambda x: (x / 512) * 512 if x is not None and x >= 1024 else 1024,
#minimum executor memory
"SPARK_MEMORY_OVERHEAD_MIN": 384,
#minimum amount of memory allocated for spark overhead
"MEMORY_OVERHEAD": 0.10,
#The amount of off-heap memory (in megabytes) to be allocated for
# spark.yarn.executor.memoryOverhead, spark.yarn.driver.memoryOverhead, spark.yarn.am.memoryOverhead.
# variable from 6 to 10 percent
# http://spark.apache.org/docs/latest/running-on-yarn.html#spark-properties
"YARN_INCREMENT_ALLOCATION_MB": 512
#memory increment when requesting containers
}
if (const["NM_WORKER_MEM"] < (const["MIN_NM_MEMORY"])):
log.fatal(
"Running the toolkit with less than {0}GB memory for YARN is not supported.".format(const["MIN_NM_MEMORY"]))
elif (const["NM_WORKER_MEM"] <= (gb_to_bytes(256))):
const["MAX_JVM_MEMORY"] = const["NM_WORKER_MEM"] / 4
'''
Java Heap Size should not go over 25% of total memory per node manager
In our sample cluster this would be 7933714432 / 4 = 1983428608
'''
else:
# for node managers with greater than 256 GB RAM, JVM memory should still be at most 64GB
const["MAX_JVM_MEMORY"] = gb_to_bytes(64)
return const
def formula(cluster, log, constants):
"""
formula for calculating the optimized configuration
:param cluster: Cluster configuration connection
:param log: simple log interface with log.info, log.error, log.warning, log.fatal, log.debug
:param constants: the calculated constants with any user overrides from formula-args
:return: a dictionary with cdh configurations
"""
cdh = {}
# Validate user defined parameters in forumual-args.yaml file
if (bytes_to_mb(constants["MAX_JVM_MEMORY"]) < constants["MAPREDUCE_MINIMUM_EXECUTOR_MEMORY_MB"]):
'''
Make sure the user provided value is within the bounds of the cluster resources.
'''
log.warning("Container larger than {0}MB are not supported".format(constants["MAX_JVM_MEMORY"]))
if constants["MEM_FRACTION_FOR_OTHER_SERVICES"] < 0:
log.fatal("{0} must be non-nagative".format("MEM_FRACTION_FOR_OTHER_SERVICES"))
constants["SPARK_YARN_DRIVER_MEMORYOVERHEAD"] = \
max(constants["SPARK_MEMORY_OVERHEAD_MIN"], constants["MAPREDUCE_MINIMUM_AM_MEMORY_MB"] * constants["MEMORY_OVERHEAD"])
'''
6-10 percent of driver memory with a minimum of 384
in our sample cluster
max(384, 1024 * 0.10) = 384
'''
constants["SPARK_YARN_EXECUTOR_MEMORYOVERHEAD"] = \
max(constants["SPARK_MEMORY_OVERHEAD_MIN"], constants["MAPREDUCE_MINIMUM_EXECUTOR_MEMORY_MB"] * constants["MEMORY_OVERHEAD"])
'''
6-10 percent of executor memory with a minimum of 384
in our sample cluster
max(384, 1024 * 0.10 ) = 384
'''
cdh["YARN.NODEMANAGER.NODEMANAGER_BASE.YARN_NODEMANAGER_RESOURCE_CPU_VCORES"] = constants["NM_WORKER_CORES"]
'''
yarn.nodemanager.resource.cpu-vcores
Number of CPU cores that can be allocated for containers. Typically set to the number of physical cores on each machine.
in our sample cluster
2
'''
cdh["YARN.GATEWAY.GATEWAY_BASE.YARN_APP_MAPREDUCE_AM_RESOURCE_CPU_VCORES"] = 1
'''
yarn.app.mapreduce.am.resource.cpu-vcores
The number of virtual cores the mapreduce application master needs
'''
cdh["YARN.GATEWAY.GATEWAY_BASE.MAPREDUCE_JOB_COUNTERS_LIMIT"] = constants["MAPREDUCE_JOB_COUNTERS_MAX"]
'''
mapreduce.job.counters.limit
yarns defaults is 120 our default is 500.
Limit on the number of user counters allowed per job.
(https://hadoop.apache.org/docs/r2.6.0/hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml)
'''
cdh["YARN.NODEMANAGER.NODEMANAGER_BASE.NODEMANAGER_MAPRED_SAFETY_VALVE"] = \
"<property><name>mapreduce.job.counters.max</name><value>%d</value></property>" % (
constants["MAPREDUCE_JOB_COUNTERS_MAX"])
'''
mapreduce.job.counters.max
yarns defaults is 120 our default is 500. This sets the old max counters value from hadoop 1.x.
Limit on the number of user counters allowed per job.
(https://hadoop.apache.org/docs/r2.4.1/hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml)
'''
cdh["YARN.RESOURCEMANAGER.RESOURCEMANAGER_BASE.RESOURCEMANAGER_MAPRED_SAFETY_VALVE"] = \
cdh["YARN.NODEMANAGER.NODEMANAGER_BASE.NODEMANAGER_MAPRED_SAFETY_VALVE"]
'''
Set the same old counters max value for resource managers
'''
cdh["YARN.JOBHISTORY.JOBHISTORY_BASE.JOBHISTORY_MAPRED_SAFETY_VALVE"] = \
cdh["YARN.NODEMANAGER.NODEMANAGER_BASE.NODEMANAGER_MAPRED_SAFETY_VALVE"]
'''
Set the same old counters max value for job history server
'''
MEM_FOR_OTHER_SERVICES = int(constants["NM_WORKER_MEM"] * constants["MEM_FRACTION_FOR_OTHER_SERVICES"])
'''
Memory reserved for other cluster services.
the default will be calucation is total worker memory * 0.125
in our sample cluster
(7933714432 * 0.125 )= 991714304
'''
MEM_PER_NM = constants["NM_WORKER_MEM"] - MEM_FOR_OTHER_SERVICES
'''
Total worker memory minus the percentage reserved for other services
in our sample cluster
7933714432 - 991714304 = 6942000128
'''
cdh["YARN.RESOURCEMANAGER.RESOURCEMANAGER_BASE.YARN_SCHEDULER_INCREMENT_ALLOCATION_MB"] = constants["YARN_INCREMENT_ALLOCATION_MB"]
'''
yarn.scheduler.increment-allocation-mb
https://hadoop.apache.org/docs/r2.6.0/hadoop-yarn/hadoop-yarn-common/yarn-default.xml
in our sample cluster
512
'''
cdh["YARN.RESOURCEMANAGER.RESOURCEMANAGER_BASE.YARN_SCHEDULER_MAXIMUM_ALLOCATION_MB"] = \
(
int(bytes_to_mb(MEM_PER_NM) -
max(
constants["SPARK_DRIVER_MAXPERMSIZE"],
constants["SPARK_YARN_DRIVER_MEMORYOVERHEAD"],
constants["SPARK_YARN_EXECUTOR_MEMORYOVERHEAD"]
) * 3
) / cdh["YARN.RESOURCEMANAGER.RESOURCEMANAGER_BASE.YARN_SCHEDULER_INCREMENT_ALLOCATION_MB"]
) * cdh["YARN.RESOURCEMANAGER.RESOURCEMANAGER_BASE.YARN_SCHEDULER_INCREMENT_ALLOCATION_MB"]
'''
yarn.scheduler.maximum-allocation-mb
take available worker memory account for memory overheads and round to the nearest yarn.scheduler.increment-allocation-mb.
(https://hadoop.apache.org/docs/r2.6.0/hadoop-yarn/hadoop-yarn-common/yarn-default.xml)
in our sample cluster
( int( 6620 - max( 512, 512, 384) * 3 )/ 512 ) * 512 = 4608
'''
cdh["YARN.NODEMANAGER.NODEMANAGER_BASE.YARN_NODEMANAGER_RESOURCE_MEMORY_MB"] = \
cdh["YARN.RESOURCEMANAGER.RESOURCEMANAGER_BASE.YARN_SCHEDULER_MAXIMUM_ALLOCATION_MB"]
'''
yarn.nodemanager.resource.memory-mb
Amount of physical memory, in MB, that can be allocated for containers. we use the previous calculation from
yarn.scheduler.maximum-allocation-mb
https://hadoop.apache.org/docs/r2.6.1/hadoop-yarn/hadoop-yarn-common/yarn-default.xml
in our sample cluster
4608
'''
cdh["YARN.GATEWAY.GATEWAY_BASE.MAPREDUCE_MAP_MEMORY_MB"] = \
max(
min((
(cdh["YARN.RESOURCEMANAGER.RESOURCEMANAGER_BASE.YARN_SCHEDULER_MAXIMUM_ALLOCATION_MB"] /constants["NM_WORKER_CORES"]) /
cdh["YARN.RESOURCEMANAGER.RESOURCEMANAGER_BASE.YARN_SCHEDULER_INCREMENT_ALLOCATION_MB"]) *
cdh["YARN.RESOURCEMANAGER.RESOURCEMANAGER_BASE.YARN_SCHEDULER_INCREMENT_ALLOCATION_MB"],
bytes_to_mb(constants["MAX_JVM_MEMORY"])
),
constants["MAPREDUCE_MINIMUM_EXECUTOR_MEMORY_MB"]
)
'''
mapreduce.map.memory.mb
The amount of memory to request from the scheduler for each map task.
https://hadoop.apache.org/docs/stable/hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml
result,MAPREDUCE_MINIMUM_EXECUTOR_MEMORY_MB
In our sample cluster
max( min( ( ( 4608/4 ) / 512 ) * 512, 1891), 1024) = 1891
'''
cdh["YARN.GATEWAY.GATEWAY_BASE.MAPREDUCE_REDUCE_MEMORY_MB"] = \
2 * min(
cdh["YARN.GATEWAY.GATEWAY_BASE.MAPREDUCE_MAP_MEMORY_MB"],
bytes_to_mb(constants["MAX_JVM_MEMORY"])
)
'''
mapreduce.reduce.memory.mb
The amount of memory to request from the scheduler for each reduce task.
(https://hadoop.apache.org/docs/stable/hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml)
in our sample cluster
2 * min( 1891, 1891 ) = 3782
'''
cdh["YARN.GATEWAY.GATEWAY_BASE.MAPREDUCE_MAP_JAVA_OPTS_MAX_HEAP"] = \
mb_to_bytes(int(cdh["YARN.GATEWAY.GATEWAY_BASE.MAPREDUCE_MAP_MEMORY_MB"] * constants["MAX_HEAP_PERCENT"]))
'''
mapreduce.map.java.opts.max.heap
max java heap size for the map task. Standard practice is to make this 75 percent of mapreduce.reduce.memory.mb.
in our sample cluster
mb_to_bytes(int( 1891 * 0.75 )) = 1486880768 bytes or 1418 MB
'''
cdh["YARN.GATEWAY.GATEWAY_BASE.MAPREDUCE_REDUCE_JAVA_OPTS_MAX_HEAP"] = \
2 * cdh["YARN.GATEWAY.GATEWAY_BASE.MAPREDUCE_MAP_JAVA_OPTS_MAX_HEAP"]
'''
mapreduce.reduce.java.opts.max.heap
max java heap size for the reduce task. Standard practice is to make this 75 percent of mapreduce.reduce.java.opts.max.heap
in our sample cluster
2 * 1486880768 = 2973761536 or 2836
'''
cdh["YARN.RESOURCEMANAGER.RESOURCEMANAGER_BASE.YARN_SCHEDULER_MINIMUM_ALLOCATION_MB"] = \
constants["YARN_SCHEDULER_MINIMUM_ALLOCATION_MB"]
'''
yarn.scheduler.minimum-allocation-mb
The minimum allocation for every container request at the RM, in MBs.
Memory requests lower than this won't take effect, and the specified value will get allocated at minimum.
https://hadoop.apache.org/docs/r2.6.0/hadoop-yarn/hadoop-yarn-common/yarn-default.xml
in our cluster
1024
'''
cdh["YARN.RESOURCEMANAGER.RESOURCEMANAGER_BASE.YARN_SCHEDULER_MAXIMUM_ALLOCATION_VCORES"] = \
cdh["YARN.NODEMANAGER.NODEMANAGER_BASE.YARN_NODEMANAGER_RESOURCE_CPU_VCORES"]
'''
yarn.scheduler.maximum-allocation-vcores
The maximum allocation for every container request at the RM, in terms of virtual CPU cores.
Requests higher than this won't take effect, and will get capped to this value.
https://hadoop.apache.org/docs/r2.6.0/hadoop-yarn/hadoop-yarn-common/yarn-default.xml
in our cluster
2
'''
cdh["YARN.GATEWAY.GATEWAY_BASE.YARN_APP_MAPREDUCE_AM_RESOURCE_MB"] = \
constants["MAPREDUCE_MINIMUM_AM_MEMORY_MB"]
'''
yarn.app.mapreduce.am.resource.mb
The amount of memory the MR AppMaster needs.
https://hadoop.apache.org/docs/stable/hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml
in our sample cluster
1024
'''
cdh["YARN.GATEWAY.GATEWAY_BASE.YARN_APP_MAPREDUCE_AM_MAX_HEAP"] = \
mb_to_bytes(
int(cdh["YARN.GATEWAY.GATEWAY_BASE.YARN_APP_MAPREDUCE_AM_RESOURCE_MB"] * constants["MAX_HEAP_PERCENT"]))
'''
YARN_APP_MAPREDUCE_AM_MAX_HEAP
The maximum heap size, in bytes, of the Java MapReduce ApplicationMaster.
https://www.cloudera.com/documentation/enterprise/5-7-x/topics/cm_props_cdh570_yarn_mr2included_.html
in our sample cluster
mb_to_bytes(int( 1024 * 0.75)) = 805306368
'''
CONTAINERS_ACCROSS_CLUSTER = \
int(cdh["YARN.NODEMANAGER.NODEMANAGER_BASE.YARN_NODEMANAGER_RESOURCE_MEMORY_MB"] \
/ (
(
cdh["YARN.GATEWAY.GATEWAY_BASE.MAPREDUCE_MAP_MEMORY_MB"] + (
2 *
max(
constants["SPARK_YARN_DRIVER_MEMORYOVERHEAD"],
constants["SPARK_YARN_EXECUTOR_MEMORYOVERHEAD"],
cdh["YARN.RESOURCEMANAGER.RESOURCEMANAGER_BASE.YARN_SCHEDULER_INCREMENT_ALLOCATION_MB"]
) / cdh["YARN.RESOURCEMANAGER.RESOURCEMANAGER_BASE.YARN_SCHEDULER_INCREMENT_ALLOCATION_MB"]
) * cdh["YARN.RESOURCEMANAGER.RESOURCEMANAGER_BASE.YARN_SCHEDULER_INCREMENT_ALLOCATION_MB"]
)
) * constants["NUM_NM_WORKERS"])
'''
calculate the total number of containers that can run across the cluster. This value is not used in this formula but
it's useful as a reference if you want to create yarn client configuration and need to know how many containers
you can request.
in our sample cluster
int(4608 / ( (1891 + (2 * max(384, 384, 512) / 512) * 512 ) ) * 4) = 4
'''
'''
Check if the user provided num threads falls with in the capacity of the cluster.
'''
if constants["NUM_THREADS"] > (CONTAINERS_ACCROSS_CLUSTER / 2):
log.fatal("Number of concurrent threads should be at most {0}"
.format((min(CONTAINERS_ACCROSS_CLUSTER, CONTAINERS_ACCROSS_CLUSTER) / 2)))
log.info("{0} could be as large as {1} for multi-tenacty".format("NUM_THREADS", (CONTAINERS_ACCROSS_CLUSTER / 2)))
EXECUTORS_PER_THREAD = int((CONTAINERS_ACCROSS_CLUSTER - constants["NUM_THREADS"]) / constants["NUM_THREADS"])
if (constants["ENABLE_SPARK_SHUFFLE_SERVICE"]):
cdh["YARN.JOBHISTORY.JOBHISTORY_BASE.JOBHISTORY_CONFIG_SAFETY_VALVE"] = \
"{0}\n{1}".format(
"<property>"
"<name>""yarn.nodemanager.aux-services""</name>"
"<value>""spark_shuffle,mapreduce_shuffle""</value>"
"</property>",
"<property>"
"<name>yarn.nodemanager.aux-services.spark_shuffle.class</name>"
"<value>""org.apache.spark.network.yarn.YarnShuffleService</value>"
"</property>")
'''
Set the configuration for the spark shuffle service if enabled
'''
cdh["YARN.NODEMANAGER.NODEMANAGER_BASE.NODEMANAGER_CONFIG_SAFETY_VALVE"] = \
cdh["YARN.JOBHISTORY.JOBHISTORY_BASE.JOBHISTORY_CONFIG_SAFETY_VALVE"]
cdh["YARN.RESOURCEMANAGER.RESOURCEMANAGER_BASE.RESOURCEMANAGER_CONFIG_SAFETY_VALVE"] = \
cdh["YARN.JOBHISTORY.JOBHISTORY_BASE.JOBHISTORY_CONFIG_SAFETY_VALVE"]
return {"cdh": cdh } |
def total(lists):
array = []
sum = 0
for i in lists:
sum += i
array.append(sum)
return array
listss = [1,2,3,5,5,4]
print(total(listss))
|
description = 'Setup for the ma11 dom motor'
devices = dict(
dom = device('nicos_ess.devices.epics.motor.EpicsMotor',
description = 'Sample stick rotation',
motorpv = 'SQ:SANS:ma11:dom',
errormsgpv = 'SQ:SANS:ma11:dom-MsgTxt',
precision = 0.1,
),
)
|
with open("input.txt", "r") as f:
values = [int(e) for e in f.readlines()]
windowsSum = list()
index = 0
for value in values:
if(index+2 < len(values)):
sum = value
sum += values[index+1]
sum += values[index+2]
windowsSum.append(sum)
index += 1
isFirst = True
cValue = 0
counter = 0
for value in windowsSum :
if isFirst: isFirst = False
else:
if cValue < value :
counter += 1
cValue = value
print("Total increment = ", counter)
|
TOKEN = "<Your Bot Token Here>"
LOG_LEVEL_STDOUT = "DEBUG" # Console log level
LOG_LEVEL_FILE = "INFO" # File log level
|
# Bus Routes
# each routes[i] is a bus route that the i-th bus repeats forever
# we start at bus stop S and we want to go to bus stop T
# rravelling by buses only, find the least number of buses to take
class Solution(object):
def numBusesToDestination(self, routes, S, T):
"""
:type routes: List[List[int]]
:type S: int
:type T: int
:rtype: int
"""
stop_to_routes = collections.defaultdict(set)
for i, route in enumerate(routes):
for stop in route:
stop_to_routes[stop].add(i)
visited_stops = set([S])
visited_routes = set()
queue = collections.deque([(S, 0)])
while queue:
stop, routes_taken = queue.popleft()
if stop == T:
return routes_taken
for route_id in stop_to_routes[stop]:
if route_id not in visited_routes:
for next_stop in routes[route_id]:
if next_stop not in visited_stops:
queue.append((next_stop, routes_taken + 1))
visited_stops.add(next_stop)
visited_routes.add(route_id)
return -1
|
class Solution:
"""
@param matrix: the given matrix
@return: True if and only if the matrix is Toeplitz
"""
def isToeplitzMatrix(self, matrix):
# Write your code here
col=len(matrix[0])
row=len(matrix)
for i in range(1, row):
for j in range(1, col):
if matrix[i][j] != matrix[i-1][j-1]:
return False
return True |
_base_ = './retinanet_r50_fpn_1x_cityscapes.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='checkpoints/resnet101-63fe2227.pth')))
# load_from="checkpoints/retinanet_r101_fpn_mstrain_3x_coco_20210720_214650-7ee888e0.pth" |
# Title : Multiply all odd number
# Author : Kiran Raj R.
# Date : 06:11:2020
def multiply_odd(num):
""" Return sum of multiple of all odd number
below user specified range """
result = 1
for i in range(1,num, 2):
result*=i
return result
print(multiply_odd(10))
def multiply_even(num):
""" Return sum of multiple of all even number
below user specified range """
result = 1
for i in range(2,num, 2):
result*=i
return result
print(multiply_even(11)) |
class Player:
def __init__(self, name, life_value, attack_value):
self.name = name
self.life_value = life_value
self.attack_value = attack_value
def attack(self, enemy_player: 'Player'):
enemy_player.life_value = enemy_player.life_value - self.attack_value
def is_alive(self):
return self.life_value > 0 |
""" Implémentation simple du rendu monnaie (pas de limite de nombre de pièces/billets)
"""
def RenduMonnaie(p,a,pieces):
"""
Algorithme permettant de résoudre un problèmle de rendu monnaie
PARAMETRES :
- p : int
- prix
- a : int
- argent donné par le client
- pieces : list/tuple (iterable)
- liste des pieces/billets possibles pour le rendu
SORTIE :
- rendu : list
- liste des pièces/billets rendus (le - possible)
"""
rendu = []
# vérif si prix égal ou inférieur au prix (pas de rendu)
r = a-p
if r<=0: return rendu
# sinon calcule le rendu
plist = sorted(pieces,reverse=True)
while sum(rendu)<r: # boulce d'ajout des pieces
for i in plist:
if i<=r-sum(rendu):
rendu.append(i)
break
return rendu
if __name__=='__main__': # test
prix = int(input("prix : "))
argent = int(input("Argent donné par le client : "))
pieces = [1, 2, 5, 10, 20, 50, 100, 200, 500]
rendu = RenduMonnaie(prix, argent, pieces)
print(f"Pieces à rendre : {rendu}") |
# Define physical constants
P0 = 1000. # Ground pressure level. Unit: hPa
SCALE_HEIGHT = 7000. # Unit: m
CP = 1004. # specific heat at constant pressure for air (cp) = 1004 J/kg-K
DRY_GAS_CONSTANT = 287.
EARTH_RADIUS = 6.378e+6 # Unit: m
EARTH_OMEGA = 7.29e-5
|
with open('./input.txt') as input:
lines = [int(s.strip()) for s in input.readlines()]
last = 999999999999
counter = 0
for (a, b, c) in zip(lines[0:-2], lines[1:-1], lines[2:]):
current = a + b + c
if (current > last):
counter += 1
last = current
print(counter) # 1378
|
fileName = input("What's the name of the file? ../logs/")
results = list(map(lambda e: e.split(" "), open(
'../logs/' + fileName, 'r').readlines()))
"""
Interesting statistics:
- Average score of all runs
- Worst score
- Best score
- Average of worst 20% of scores
- Average of best 20% of scores
- Win %
"""
results = sorted(results, key=lambda r: int(r[0]))
scores = [int(r[0]) for r in results]
wins = [int(r[2].strip()) for r in results]
print("Average score:", sum(scores) / len(scores))
print("Best score:", scores[-1])
print("Worst score:", scores[0])
worstTwenty = scores[:(len(scores) // 5)]
print("Average of bottom 20%:", sum(worstTwenty) / len(worstTwenty))
bestTwenty = scores[int(len(scores) * 0.8):]
print("Average of top 20%:", sum(bestTwenty) / len(bestTwenty))
winrate = sum(wins) / len(wins)
print("Winrate: ", winrate * 100, "%", sep="")
|
# This program saves a list of numbers to a file.
def main():
# Create a list of numbers.
numbers = [1, 2, 3, 4, 5, 6, 7]
# Open a file for writing.
outfile = open('numberlist.txt', 'w')
# Write the list to the file.
for item in numbers:
outfile.write(str(item) + '\n')
# Close the file.
outfile.close()
# Call the main function.
main()
|
x = int(input('Enter your Age: '))
print('****************')
for i in range(0, 1):
if x >= 18:
print('You can watch content with R-rating')
elif x >= 13:
print('You can watch movies under parental guidance ')
else:
print('Cartoons permitted')
print(' Thanks! ')
|
"""
author:Wenquan Yang
time:2020/6/9 1:36
content:配置文件
"""
BLOCK_SIZE = 512 # 磁盘块大小Bytes
BLOCK_NUM = 2560 # 磁盘块总数量
SUPER_BLOCK_NUM = 2 # 超级块占用的块数
INODE_BLOCK_NUM = 256 # 索引占用的块数
DATA_BLOCK_NUM = BLOCK_NUM - SUPER_BLOCK_NUM - INODE_BLOCK_NUM
INODE_BLOCK_START_ID = SUPER_BLOCK_NUM
DATA_BLOCK_START_ID = SUPER_BLOCK_NUM + INODE_BLOCK_NUM + 1 # 数据块的起始地址
INODE_SIZE = 512 # INODE占用的块区大小
DISK_SIZE = BLOCK_SIZE * BLOCK_NUM # 磁盘大小
DISK_NAME = "../fms.pfs"
DIR_NUM = 128 # 每个目录锁包含的最大文件数
FREE_NODE_CNT = 50 # 超级块中空闲节点的最大块数
FREE_BLOCK_CNT = 100 # 超级块中空闲数据块的最大块数
BASE_NAME = "base" # 根目录名
FILE_TYPE = 0 # 文件类型
DIR_TYPE = 1 # 目录类型
ROOT_ID = 0
ROOT = 'root'
INIT_DIRS = ['root', 'home', 'etc']
VERSION = "V 1.2"
LOGO = """
____ ______ _____
/ __ \ / ____// ___/
/ /_/ // /_ \__ \\
/ ____// __/ ___/ /
/_/ /_/ /____/
"""
# color
FILE_COLOR_F = "37" # 文件名前景色
FILE_COLOR_B = "40" # 文件名背景色
DIR_COLOR_F = "32"
DIR_COLOR_B = "40"
|
def internal_consistency_check(Reports_dict, reportnos=None):
return_dict = {}
if reportnos:
search_list = reportnos
else:
search_list = list(Reports_dict.keys())
for reportno in search_list:
rdf = pd.DataFrame()
rdf = Reports_dict[reportno].copy()
print('REPORT', reportno)
if not rdf.empty:
if reportno == '1':
add_down_dont_match = check_add_down(rdf=rdf, tot_col='GEO', columns_to_add=['Q1', 'Q2', 'Q3', 'Q4', 'TOTAL'], groupby_vars=['YEAR', 'DRUG_CODE', 'STATE'])
add_across_dont_match = check_add_across(rdf=rdf, columns_to_add=['Q1', 'Q2', 'Q3', 'Q4'], col_tot=['TOTAL'])
return_dict[reportno] = (add_down_dont_match, add_across_dont_match)
assert return_dict[reportno] == ({}, {})
elif reportno == '2':
add_down_dont_match = check_add_down(rdf=rdf, tot_col='GEO', columns_to_add=['Q1', 'Q2', 'Q3', 'Q4', 'TOTAL'], groupby_vars=['YEAR', 'DRUG_CODE'])
add_across_dont_match = check_add_across(rdf=rdf, columns_to_add=['Q1', 'Q2', 'Q3', 'Q4'], col_tot=['TOTAL'])
return_dict[reportno] = (add_down_dont_match, add_across_dont_match)
assert return_dict[reportno] == ({}, {})
elif reportno == '3':
add_across_dont_match = check_add_across(rdf=rdf, columns_to_add=['Q1', 'Q2', 'Q3', 'Q4'], col_tot=['TOTAL'])
return_dict[reportno] = (add_across_dont_match)
assert return_dict[reportno] == {}
elif reportno == '4':
# 4 is the only report with unexplainable internal inconsistencies, for American Samoa in 2002
add_down_dont_match = check_add_down(rdf=rdf, tot_col='STATE', columns_to_add=['TOTAL GRAMS'], groupby_vars=['YEAR', 'DRUG_CODE'])
rdf['POP'] = np.nan
if '2000 POP' in list(rdf.columns):
rdf['POP'][rdf['2000 POP'].notnull()] = rdf['2000 POP']
if '2010 POP' in list(rdf.columns):
rdf['POP'][rdf['2010 POP'].notnull()] = rdf['2010 POP']
divisor_dont_match = check_divide(rdf, 'TOTAL GRAMS', 'POP', 'GRAMS/100K POP', 100000)
assert all(divisor_dont_match[('TOTAL GRAMS', 'POP', 'GRAMS/100K POP')]['STATE'] == 'AMERICAN SAMOA')
return_dict[reportno] = (add_down_dont_match, divisor_dont_match)
elif reportno == '5' or reportno == '7':
divisor_dont_match = check_divide(rdf, 'TOTAL GRAMS', 'BUYERS', 'AVG GRAMS')
return_dict[reportno] = (divisor_dont_match)
assert return_dict[reportno] == {}
return return_dict
def across_consistency_check(Reports_dict, reportlist):
returndict = {}
if reportlist == ['5', '7']:
# There are some errors, almost entirely in 2011 but a few in buyers in 2014
rdf5, rdf7 = pd.DataFrame(), pd.DataFrame()
rdf5, rdf7 = Reports_dict['5'].copy(), Reports_dict['7'].copy()
assert check_divide(rdf5, 'TOTAL GRAMS', 'BUYERS', 'AVG GRAMS') == {}
assert check_divide(rdf7, 'TOTAL GRAMS', 'BUYERS', 'AVG GRAMS') == {}
returndict[('5', '7')] = groupby_across_sheets(big_df=rdf5[list(set(rdf5.columns) - {'AVG GRAMS'})], small_df=rdf7[list(set(rdf7.columns) - {'AVG GRAMS'})], groupby_vars=['YEAR', 'DRUG CODE', 'BUSINESS ACTIVITY'], compare_cols=['TOTAL GRAMS', 'BUYERS'])
returndict2, returnlist_unmatch = returndict[('5', '7')]
assert all(returnlist_unmatch['YEAR'] == '2011')
for key in returndict2:
assert sum(returndict2[key]['YEAR'] == '2011') + sum(returndict2[key]['YEAR'] == '2014') == len(returndict2[key])
if reportlist == ['2', '3', '4']:
# All errors are in US totals only. But there
rdf2, rdf3, rdf4 = pd.DataFrame(), pd.DataFrame(), pd.DataFrame()
rdf2, rdf3, rdf4 = Reports_dict['2'].copy(), Reports_dict['3'].copy(), Reports_dict['4'].copy()
target_us_row_title = list(set(rdf3['GEO']) - set(statelist))[0]
for item in set(rdf2['GEO']) - set(statelist):
rdf2['GEO'].loc[rdf2['GEO'] == item] = target_us_row_title
for item in set(rdf3['GEO']) - set(statelist):
rdf3['GEO'].loc[rdf3['GEO'] == item] = target_us_row_title
for item in set(rdf4['STATE']) - set(statelist):
rdf4['STATE'].loc[rdf4['STATE'] == item] = target_us_row_title
assert all(rdf3.columns == rdf2.columns)
for col in ['Q1', 'Q2', 'Q3', 'Q4', 'TOTAL']:
rdf3[col] = rdf3[col].apply(lambda x: float(str(x).replace(',', '')))
rdf3[col][rdf3['YEAR'] == '2011'] = rdf3[col] / 1000
rdf4['POP'] = np.nan
if '2000 POP' in list(rdf4.columns):
rdf4['POP'][rdf4['2000 POP'].notnull()] = rdf4['2000 POP']
if '2010 POP' in list(rdf4.columns):
rdf4['POP'][rdf4['2010 POP'].notnull()] = rdf4['2010 POP']
df_pop = pd.DataFrame([(float(str(x[0]).replace(',', '')), x[1], x[2]) for x in list(set([tuple(x) for x in rdf4[['POP', 'YEAR', 'STATE']].values]))])
df_pop.columns = ['POP', 'YEAR', 'GEO']
rdf2 = pd.merge(rdf2, df_pop, on=['GEO', 'YEAR'])
merged = pd.merge(rdf2, rdf3, how='outer', on=['DRUG_CODE', 'GEO', 'YEAR'], indicator=True)
missing_entries = merged[merged['_merge'] == 'right_only']
returndict[('5', '7', 'missing_entries')] = missing_entries
merged2 = pd.merge(rdf2, rdf3, how='inner', on=['DRUG_CODE', 'GEO', 'YEAR'], )
for s1 in range(1, 5):
d = check_divide(merged2, 'Q%s_x' % s1, 'POP', 'Q%s_y' % s1, 100000)
assert set(list(d.values())[0]['GEO']) == {target_us_row_title}
returndict[('5', '7', 'Q%s' % s1)] = d
if reportlist == ['1', '2']:
rdf1, rdf2 = pd.DataFrame(), pd.DataFrame()
rdf1, rdf2 = Reports_dict['1'].copy(), Reports_dict['2'].copy()
for tot in [x for x in set(rdf1['GEO']) if x.isdigit() is False]:
rdf1 = rdf1.loc[rdf1['GEO'] != tot]
for us in [x for x in set(rdf2['GEO']) if x not in statelist]:
rdf2 = rdf2.loc[rdf2['GEO'] != us]
rdf2['STATE'] = rdf2['GEO']
returndict[('1', '2')] = groupby_across_sheets(big_df=rdf1, small_df=rdf2, groupby_vars=['YEAR', 'DRUG_CODE', 'STATE'], compare_cols=['Q1', 'Q2', 'Q3', 'Q4', 'TOTAL'])
if reportlist == ['2', '5']:
rdf2, rdf5 = pd.DataFrame(), pd.DataFrame()
rdf2, rdf5 = Reports_dict['2'].copy(), Reports_dict['5'].copy()
rdf2 = rdf2.loc[rdf2['GEO'] != 'UNITED STATES']
rdf2['STATE'] = rdf2['GEO']
rdf2['TOTAL GRAMS'] = rdf2['TOTAL']
rdf2['DRUG CODE'] = rdf2['DRUG_CODE']
returndict[('2', '5')] = groupby_across_sheets(big_df=rdf5, small_df=rdf2, groupby_vars=['YEAR', 'DRUG CODE', 'STATE'], compare_cols=['TOTAL GRAMS'])
return returndict
def groupby_across_sheets(bigdf, smalldf, groupby_vars, compare_cols):
big_df = bigdf.copy()
small_df = smalldf.copy()
returndict2 = {}
for col in compare_cols:
big_df[col] = big_df[col].apply(lambda x: float(str(x).replace(',', '')))
small_df[col] = small_df[col].apply(lambda x: float(str(x).replace(',', '')))
big_df_test = big_df.groupby(groupby_vars).sum()
merged_rdf = pd.merge(big_df_test, small_df, right_on=groupby_vars, left_index=True, how='outer', indicator=True)
returnlist_unmatch = merged_rdf[merged_rdf['_merge'] != 'both']
merged_rdf = pd.merge(big_df_test, small_df, right_on=groupby_vars, left_index=True, how='inner', indicator=True)
for col in compare_cols:
colx = col + '_x'
coly = col + '_y'
df_nonmatch = merged_rdf[merged_rdf.apply(lambda x: are_close(x[colx], x[coly], 0.015) is False, axis=1)]
if len(df_nonmatch) > 0:
returndict2[col] = df_nonmatch
return returndict2, returnlist_unmatch
def check_add_down(rdf, tot_col, columns_to_add, groupby_vars):
rdfa = pd.DataFrame()
rdfa = rdf.copy()
tot_loc = {tot_col: [x for x in list(set(rdfa[tot_col].tolist())) if x in totallist]}
add_down_dont_match = {}
tot_strings = list(tot_loc.values())[0]
for col in columns_to_add:
rdfa[col] = rdfa[col].apply(lambda x: float(x.replace(',', '')))
rdfa['bin'] = rdfa[list(tot_loc.keys())[0]].apply(lambda x: x in tot_strings)
rdf_test = rdfa.groupby(groupby_vars + ['bin']).sum()
pctc = pd.DataFrame(round(abs(rdf_test.groupby(groupby_vars).pct_change())))
totdiv = 0
for year in set(rdfa['YEAR']):
div = len(set(rdfa['bin'][rdfa['YEAR'] == year]))
for v in groupby_vars:
div = div * len(set(rdfa[v][rdfa['YEAR'] == year]))
totdiv = totdiv + div
entries = len(pctc) / totdiv
assert 0.6 <= entries <= 1
for column_to_add in columns_to_add:
r = rdf_test.loc[pctc[column_to_add].notnull() & pctc[column_to_add] != 0]
if len(r) > 0:
add_down_dont_match[column_to_add] = r
return add_down_dont_match
def check_add_across(rdf, columns_to_add, col_tot):
rdfa = pd.DataFrame()
rdfa = rdf.copy()
add_across_dont_match = {}
for col in columns_to_add + col_tot:
rdfa[col] = rdfa[col].apply(lambda x: float(x.replace(',', '')))
r = rdfa.loc[round(rdfa[columns_to_add].sum(axis=1) - rdfa[col_tot[0]], 1) != 0]
if len(r) > 0:
add_across_dont_match[[tuple(columns_to_add + col_tot)]] = r
return add_across_dont_match
def check_divide(rdf, top_divisor, bot_divisor, equals_to, multiplier=1, tolerance=0.02):
rdfa = pd.DataFrame()
rdfa = rdf.copy()
divide_dont_match = {}
for col in [top_divisor, bot_divisor, equals_to]:
rdfa[col] = rdfa[col].apply(lambda x: float(str(x).replace(',', '')))
rdfa['CALCULATED'] = rdfa.apply(lambda x: custom_lambda(x, top_divisor, bot_divisor, equals_to, multiplier, 'calc'), axis=1)
rdfa['BOOL'] = rdfa.apply(lambda x: custom_lambda(x, top_divisor, bot_divisor, equals_to, multiplier, 'bool'), axis=1)
rdfa['CLOSE'] = rdfa.apply(lambda x: custom_lambda(x, top_divisor, bot_divisor, equals_to, multiplier, tolerance), axis=1)
if len(rdfa.loc[-rdfa['CLOSE']]) > 0:
columns_list = [x for x in list(rdfa.columns) if ('_x' not in x and '_y' not in x) or x in [top_divisor, bot_divisor, equals_to]]
divide_dont_match[(top_divisor, bot_divisor, equals_to)] = rdfa[columns_list].loc[-rdfa['CLOSE']]
return divide_dont_match
def custom_lambda(df, top_divisor, bot_divisor, equals_to, multiplier, returntype):
calculated = multiplier * df[top_divisor] / df[bot_divisor]
res = are_equal(calculated, df[equals_to])
if returntype == 'bool':
return res[0]
if type(returntype) is float:
return are_close(calculated, df[equals_to], returntype)
return res[1]
def are_equal(val_compare, reference_val):
r = return_round(reference_val)
val = round(val_compare, r)
comp = round(reference_val, r)
val1 = round(val_compare, r + 1)
comp1 = round(reference_val, r + 1)
return (val == comp or val1 == comp1), (val, comp, val1, comp1)
def are_close(val_compare, reference_val, tolerance):
b, (val, comp, val1, comp1) = are_equal(val_compare, reference_val)
if not b:
if reference_val != 0:
return (abs(val_compare - reference_val) / reference_val <= tolerance) or (abs(val - comp) <= tolerance)
else:
return (abs(val - comp) <= tolerance)
return b
def return_round(x):
if x > 0:
if int(math.log10(x)) == math.log10(x) and int(math.log10(x)) < 0:
magn = int(math.log10(x)) + 1
else:
magn = int(math.log10(x))
if magn < 0:
return -(magn - 1)
elif magn == 0 or magn == 1 or magn == 2:
return 2
elif magn == 3 or magn == 4:
return 1
else:
return 0
elif x == 0:
return 2
else:
raise Exception("shouldn't be less than zero")
|
N = int(input())
X = 1
K = 0
while X <= N:
X *= 2
K += 1
print(max(0, K - 1)) |
PROCLITICS = {
"ὁ", "ἡ", "οἱ", "αἱ",
"ἐν", "εἰς", "ἐξ", "ἐκ",
"εἰ", "ὡς",
"οὐ", "οὐκ", "οὐχ",
}
ENCLITICS = {
# personal pronouns
"μου", "μοι", "με",
"σου", "σοι", "σε",
# indefinite pronouns
"τὶς", "τὶ", "τινός", "τινί", "τινά", "τινές", "τινάς", "τινῶν", "τισίν",
"τισί",
# indefinite adverbs
"πού", "ποτέ", "πώ", "πώς",
# dissyllabic forms of εἰμί
"εἰμί", "εἰσίν", "εἰσί", "ἐσμέν", "ἐστέ", "ἐστίν", "ἐστί",
# dissyllabic forms of φημί
"φησίν", "φημί", "φασίν",
# certain particles
"γέ", "τέ", "τοι",
}
ELISION = {
"ἀλλ’": "ἀλλά",
"ἀνθ’": "ἀντί",
"ἀπ’": "ἀπό",
"ἀφ’": "ἀπό",
"γ’": "γε",
"γένοιτ’": "γένοιτο",
"δ’": "δέ",
"δεῦρ’": "δεῦρο",
"δι’": "διά",
"δύναιτ’": "δύναιτο",
"εἶτ’": "εἶτα",
"ἐπ’": "ἐπί",
"ἔτ’": "ἔτι",
"ἐφ’": "ἐπί",
"ἡγοῖντ’": "ἡγοῖντο",
"ἵν’": "ἵνα",
"καθ’": "κατά",
"κατ’": "κατά",
"μ’": "με",
"μεθ’": "μετά",
"μετ’": "μετά",
"μηδ’": "μηδέ",
"μήδ’": "μηδέ", # @@@
"ὅτ’": "ὅτε",
"οὐδ’": "οὐδέ",
"πάνθ’": "πάντα",
"πάντ’": "πάντα",
"παρ’": "παρά",
"ποτ’": "ποτε",
"σ’": "σε",
"τ’": "τε",
"ταῦθ’": "ταῦτα",
"ταῦτ’": "ταῦτα",
"τοῦτ’": "τοῦτο",
"ὑπ’": "ὑπό",
"ὑφ’": "ὑπό",
# "ἔσθ’": "???",
# "θεῖ’": "???",
}
MOVABLE = {
"ἐξ": "ἐκ",
"οὐκ": "οὐ",
"οὐχ": "οὐ",
}
|
data_in = [3.0,
1.0,
0.0,
0.0,
1.0,
6.0,
1.0,
0.0,
1.0,
0.0,
3280.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
1.0] |
# Parsers
# Parse initial fields name to normalized form.
parse_name = lambda name: str(name).replace(' ', '_').lower()
|
"""
# IMPLEMENT POW(X, N)
Implement pow(x, n), which calculates x raised to the power n (i.e. xn).
Example 1:
Input: x = 2.00000, n = 10
Output: 1024.00000
Example 2:
Input: x = 2.10000, n = 3
Output: 9.26100
Example 3:
Input: x = 2.00000, n = -2
Output: 0.25000
Explanation: 2-2 = 1/22 = 1/4 = 0.25
Constraints:
-100.0 < x < 100.0
-231 <= n <= 231-1
-104 <= xn <= 104
"""
def myPow(x, n):
if n == 0:
return 1
elif n < 0:
return 1 / myPow(x, abs(n))
elif n % 2 != 0:
return x * myPow(x, n-1)
else:
return myPow(x*x, n/2) |
# Oct 2021
# Class for extraction.py
class FoundExpression:
def __init__(self, expression: str,
file: str,
language: str,
line_no: int):
self.expression = expression
self.language = language
self.file = file
self.line_no = line_no |
"""
@author: magician
@date: 2019/12/24
@file: rotate_array.py
"""
def rotate(nums, k: int) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
# nums = nums[k + 1:] + nums[:k + 1]
for i in range(k):
nums.insert(0, nums[-1])
nums.pop()
return nums
if __name__ == '__main__':
assert rotate([1,2,3,4,5,6,7], 3) == [5,6,7,1,2,3,4]
|
class Solution:
def minOperations(self, nums: List[int]) -> int:
n = len(nums)
ans = n
nums = sorted(set(nums))
for i, start in enumerate(nums):
end = start + n - 1
index = bisect_right(nums, end)
uniqueLength = index - i
ans = min(ans, n - uniqueLength)
return ans
|
"""
git-flow -- A collection of Git extensions to provide high-level
repository operations for Vincent Driessen's branching model.
"""
#
# This file is part of `gitflow`.
# Copyright (c) 2010-2011 Vincent Driessen
# Copyright (c) 2012 Hartmut Goebel
# Distributed under a BSD-like license. For full terms see the file LICENSE.txt
#
VERSION = (0, 6, 3)
__version__ = ".".join(map(str, VERSION[0:3])) + "".join(VERSION[3:])
__author__ = "Vincent Driessen, Hartmut Goebel"
__contact__ = "vincent@datafox.nl, h.goebel@goebel-consult.de"
__homepage__ = "http://github.com/nvie/gitflow/"
__docformat__ = "restructuredtext"
__copyright__ = "2010-2011 Vincent Driessen; 2012 Hartmut Goebel"
__license__ = "BSD"
|
data = (
'jun', # 0x00
'junj', # 0x01
'junh', # 0x02
'jud', # 0x03
'jul', # 0x04
'julg', # 0x05
'julm', # 0x06
'julb', # 0x07
'juls', # 0x08
'jult', # 0x09
'julp', # 0x0a
'julh', # 0x0b
'jum', # 0x0c
'jub', # 0x0d
'jubs', # 0x0e
'jus', # 0x0f
'juss', # 0x10
'jung', # 0x11
'juj', # 0x12
'juc', # 0x13
'juk', # 0x14
'jut', # 0x15
'jup', # 0x16
'juh', # 0x17
'jweo', # 0x18
'jweog', # 0x19
'jweogg', # 0x1a
'jweogs', # 0x1b
'jweon', # 0x1c
'jweonj', # 0x1d
'jweonh', # 0x1e
'jweod', # 0x1f
'jweol', # 0x20
'jweolg', # 0x21
'jweolm', # 0x22
'jweolb', # 0x23
'jweols', # 0x24
'jweolt', # 0x25
'jweolp', # 0x26
'jweolh', # 0x27
'jweom', # 0x28
'jweob', # 0x29
'jweobs', # 0x2a
'jweos', # 0x2b
'jweoss', # 0x2c
'jweong', # 0x2d
'jweoj', # 0x2e
'jweoc', # 0x2f
'jweok', # 0x30
'jweot', # 0x31
'jweop', # 0x32
'jweoh', # 0x33
'jwe', # 0x34
'jweg', # 0x35
'jwegg', # 0x36
'jwegs', # 0x37
'jwen', # 0x38
'jwenj', # 0x39
'jwenh', # 0x3a
'jwed', # 0x3b
'jwel', # 0x3c
'jwelg', # 0x3d
'jwelm', # 0x3e
'jwelb', # 0x3f
'jwels', # 0x40
'jwelt', # 0x41
'jwelp', # 0x42
'jwelh', # 0x43
'jwem', # 0x44
'jweb', # 0x45
'jwebs', # 0x46
'jwes', # 0x47
'jwess', # 0x48
'jweng', # 0x49
'jwej', # 0x4a
'jwec', # 0x4b
'jwek', # 0x4c
'jwet', # 0x4d
'jwep', # 0x4e
'jweh', # 0x4f
'jwi', # 0x50
'jwig', # 0x51
'jwigg', # 0x52
'jwigs', # 0x53
'jwin', # 0x54
'jwinj', # 0x55
'jwinh', # 0x56
'jwid', # 0x57
'jwil', # 0x58
'jwilg', # 0x59
'jwilm', # 0x5a
'jwilb', # 0x5b
'jwils', # 0x5c
'jwilt', # 0x5d
'jwilp', # 0x5e
'jwilh', # 0x5f
'jwim', # 0x60
'jwib', # 0x61
'jwibs', # 0x62
'jwis', # 0x63
'jwiss', # 0x64
'jwing', # 0x65
'jwij', # 0x66
'jwic', # 0x67
'jwik', # 0x68
'jwit', # 0x69
'jwip', # 0x6a
'jwih', # 0x6b
'jyu', # 0x6c
'jyug', # 0x6d
'jyugg', # 0x6e
'jyugs', # 0x6f
'jyun', # 0x70
'jyunj', # 0x71
'jyunh', # 0x72
'jyud', # 0x73
'jyul', # 0x74
'jyulg', # 0x75
'jyulm', # 0x76
'jyulb', # 0x77
'jyuls', # 0x78
'jyult', # 0x79
'jyulp', # 0x7a
'jyulh', # 0x7b
'jyum', # 0x7c
'jyub', # 0x7d
'jyubs', # 0x7e
'jyus', # 0x7f
'jyuss', # 0x80
'jyung', # 0x81
'jyuj', # 0x82
'jyuc', # 0x83
'jyuk', # 0x84
'jyut', # 0x85
'jyup', # 0x86
'jyuh', # 0x87
'jeu', # 0x88
'jeug', # 0x89
'jeugg', # 0x8a
'jeugs', # 0x8b
'jeun', # 0x8c
'jeunj', # 0x8d
'jeunh', # 0x8e
'jeud', # 0x8f
'jeul', # 0x90
'jeulg', # 0x91
'jeulm', # 0x92
'jeulb', # 0x93
'jeuls', # 0x94
'jeult', # 0x95
'jeulp', # 0x96
'jeulh', # 0x97
'jeum', # 0x98
'jeub', # 0x99
'jeubs', # 0x9a
'jeus', # 0x9b
'jeuss', # 0x9c
'jeung', # 0x9d
'jeuj', # 0x9e
'jeuc', # 0x9f
'jeuk', # 0xa0
'jeut', # 0xa1
'jeup', # 0xa2
'jeuh', # 0xa3
'jyi', # 0xa4
'jyig', # 0xa5
'jyigg', # 0xa6
'jyigs', # 0xa7
'jyin', # 0xa8
'jyinj', # 0xa9
'jyinh', # 0xaa
'jyid', # 0xab
'jyil', # 0xac
'jyilg', # 0xad
'jyilm', # 0xae
'jyilb', # 0xaf
'jyils', # 0xb0
'jyilt', # 0xb1
'jyilp', # 0xb2
'jyilh', # 0xb3
'jyim', # 0xb4
'jyib', # 0xb5
'jyibs', # 0xb6
'jyis', # 0xb7
'jyiss', # 0xb8
'jying', # 0xb9
'jyij', # 0xba
'jyic', # 0xbb
'jyik', # 0xbc
'jyit', # 0xbd
'jyip', # 0xbe
'jyih', # 0xbf
'ji', # 0xc0
'jig', # 0xc1
'jigg', # 0xc2
'jigs', # 0xc3
'jin', # 0xc4
'jinj', # 0xc5
'jinh', # 0xc6
'jid', # 0xc7
'jil', # 0xc8
'jilg', # 0xc9
'jilm', # 0xca
'jilb', # 0xcb
'jils', # 0xcc
'jilt', # 0xcd
'jilp', # 0xce
'jilh', # 0xcf
'jim', # 0xd0
'jib', # 0xd1
'jibs', # 0xd2
'jis', # 0xd3
'jiss', # 0xd4
'jing', # 0xd5
'jij', # 0xd6
'jic', # 0xd7
'jik', # 0xd8
'jit', # 0xd9
'jip', # 0xda
'jih', # 0xdb
'jja', # 0xdc
'jjag', # 0xdd
'jjagg', # 0xde
'jjags', # 0xdf
'jjan', # 0xe0
'jjanj', # 0xe1
'jjanh', # 0xe2
'jjad', # 0xe3
'jjal', # 0xe4
'jjalg', # 0xe5
'jjalm', # 0xe6
'jjalb', # 0xe7
'jjals', # 0xe8
'jjalt', # 0xe9
'jjalp', # 0xea
'jjalh', # 0xeb
'jjam', # 0xec
'jjab', # 0xed
'jjabs', # 0xee
'jjas', # 0xef
'jjass', # 0xf0
'jjang', # 0xf1
'jjaj', # 0xf2
'jjac', # 0xf3
'jjak', # 0xf4
'jjat', # 0xf5
'jjap', # 0xf6
'jjah', # 0xf7
'jjae', # 0xf8
'jjaeg', # 0xf9
'jjaegg', # 0xfa
'jjaegs', # 0xfb
'jjaen', # 0xfc
'jjaenj', # 0xfd
'jjaenh', # 0xfe
'jjaed', # 0xff
)
|
def calculate_area(side_length=10):
print(f"The area of a square with sides of length {side_length} is {side_length**2}.")
length=int(input("Enter side length: "))
if length<=0:
calculate_area(10)
else:
calculate_area(length) |
def find_max(num1, num2):
max_num=-1
if num2> num1:
data = range(num1,num2+1)
main_list = []
for x in data:
b = str(x)
if x < 0:
b = str(x*-1)
sx = list(map(int,list(b)))
if len(sx)==2 and sum(sx)%3==0 and x%5==0:
main_list.append(x)
if len(main_list)!=0:
return max(main_list)
return max_num
#Provide different values for num1 and num2 and test your program.
max_num=find_max(10,100)
print(max_num) |
def calc():
numOne = int(input("What is the first number of your problem?"))
numTwo = int(input("What is the second number of your problem?"))
numThree = input("What type of Math Problem is it, Addition, Subtraction, Multiplication, Division, Remainder, or Exponents? Type exactly.")
if numThree == 'Addition':
print(numOne + numTwo)
elif numThree == 'Subtraction':
print(numOne - numTwo)
elif numThree == 'Multiplication':
print(numOne * numTwo)
elif numThree == 'Division':
print(numOne / numThree)
elif numThree == 'Remainder':
print(numOne % numTwo)
elif numThree == 'Exponents':
print(numOne ** numTwo)
else:
print("Not acceptable format. Restart program and run again.")
while True:
calc()
|
class Solution:
def nthUglyNumber(self, n):
"""
:type n: int
:rtype: int
"""
primes, indices = [2, 3, 5], [0, 0, 0]
ugly_numbers = [1]
for _ in range(n):
next_numbers = list(map(lambda x: x[0] * x[1], zip(primes, map(lambda x: ugly_numbers[x], indices))))
min_num = min(next_numbers)
for index in range(len(indices)):
if next_numbers[index] == min_num:
indices[index] += 1
ugly_numbers.append(min_num)
return ugly_numbers[n - 1]
if __name__ == "__main__":
print(Solution().nthUglyNumber(10))
print(Solution().nthUglyNumber(11))
|
"""Codewars problem to find even index."""
def find_even_index(arr):
"""Return the index where sum of both sides are equal."""
if len(arr) == 0:
return 0
for i in range(0, len(arr)):
sum1 = 0
sum2 = 0
for j in range(0, i):
sum1 += arr[j]
for k in range(i + 1, len(arr)):
sum2 += arr[k]
if sum1 == sum2:
return i
return -1
|
nan = 'Неизвестно'
age = 'Возраст'
gender = 'Пол'
male = 'Мужчина'
female = 'Женщина'
check = 'Проверить'
site = 'Место'
torso = 'Живот'
head_neck = 'Голова / Шея'
palms_soles = 'Ладони / Ступни'
oral_genital = 'Полость рта / Гениталии'
lateral_torso = 'Бок'
anterior_torso = 'Грудь'
lower_extremity = 'Нижние конечности'
posterior_torso = 'Спина'
upper_extremity = 'Верхние конечности'
melanoma_diagnosis = 'Диагностирование меланомы'
photo_needed = 'Необходимо загрузить фотографию'
high_prob = 'Высокая вероятность наличия заболевания'
low_prob = 'Низкая вероятность наличия заболевания'
|
def read_pwscf_in(filepath):
"""
Note: read parameters from pwscf input template
"""
with open(filepath, 'r') as fin:
lines = fin.readlines()
control = {}
system = {}
electrons = {}
ions = {}
cell = {}
for i in range(len(lines)):
if lines[i].split()[0].lower() == "&control":
j = 1
while lines[i+j].split()[0] != "/":
if len(lines[i+j].split()) == 0:
pass
if len(lines[i+j].split("\n")[0].split("#")[0].split("=")) == 2:
# in case of single value &control variable
contorl[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()[0]
else:
control[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()
j += 1
if lines[i].split()[0].lower() == "&system":
j = 1
while lines[i+j].split()[0] != "/":
if len(lines[i+j].split()) == 0:
pass
if len(lines[i+j].split("\n")[0].split("#")[0].split("=")) == 2:
# in case of single value &control variable
system[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()[0]
else:
system[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()
j += 1
if lines[i].split()[0].lower() == "&electrons":
j = 1
while lines[i+j].split()[0] != "/":
if len(lines[i+j].split()) == 0:
pass
if len(lines[i+j].split("\n")[0].split("#")[0].split("=")) == 2:
# in case of single value &control variable
electrons[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()[0]
else:
electrons[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()
j += 1
if lines[i].split()[0].lower() == "&ions":
j = 1
while lines[i+j].split()[0] != "/":
if len(lines[i+j].split()) == 0:
pass
if len(lines[i+j].split("\n")[0].split("#")[0].split("=")) == 2:
# in case of single value &control variable
ions[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()[0]
else:
ions[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()
j += 1
if lines[i].split()[0].lower() == "&cell":
j = 1
while lines[i+j].split()[0] != "/":
if len(lines[i+j].split()) == 0:
pass
if len(lines[i+j].split("\n")[0].split("#")[0].split("=")) == 2:
# in case of single value &control variable
cell[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()[0]
else:
cell[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()
j += 1
return control, system, electrons, ions, cell
def read_neb_in(filepath):
"""
Note: read parameters from neb.x input template
"""
with open(filepath, 'r') as fin:
lines = fin.readlines()
path = {}
for i in range(len(lines)):
if lines[i].split()[0].lower() == "&path":
j = 1
while lines[i+j].split()[0] != "/":
if len(lines[i+j].split()) == 0:
pass
if len(lines[i+j].split("\n")[0].split("#")[0].split("=")) == 2:
# in case of single value &PATH variable
path[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()[0]
else:
path[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()
j += 1
return path
def read_ph_in(filepath):
"""
Note: read parameters from neb.x input template
"""
with open(filepath, 'r') as fin:
lines = fin.readlines()
ph = {}
for i in range(len(lines)):
if lines[i].split()[0].lower() == "&inputph":
j = 1
while lines[i+j].split()[0] != "/":
if len(lines[i+j].split()) == 0:
pass
if len(lines[i+j].split("\n")[0].split("#")[0].split("=")) == 2:
# in case of single value &INPUTPH variable
ph[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()[0]
else:
ph[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()
j += 1
return ph |
x_min = -2
y_min = (-(modelparams['weights'][0] * x_min) / modelparams['weights'][1] -
(modelparams['bias'][0] / model_params['weights'][1]))
x_max = 2
y_max = (-(modelparams['weights'][0] * x_max) / modelparams['weights'][1] -
(modelparams['bias'][0] / modelparams['weights'][1]))
fig, ax = plt.subplots(1, 2, sharex=True, figsize=(7, 3))
ax[0].plot([x_min, x_max], [y_min, y_max])
ax[1].plot([x_min, x_max], [y_min, y_max])
ax[0].scatter(X_train[y_train == 0, 0], X_train[y_train == 0, 1],
label='class 0', marker='o')
ax[0].scatter(X_train[y_train == 1, 0], X_train[y_train == 1, 1],
label='class 1', marker='s')
ax[1].scatter(X_test[y_test == 0, 0], X_test[y_test == 0, 1],
label='class 0', marker='o')
ax[1].scatter(X_test[y_test == 1, 0], X_test[y_test == 1, 1],
label='class 1', marker='s')
ax[1].legend(loc='upper left')
plt.show()
# The TensorFlow model performs better on the test set just by random chance.
# Remember, the perceptron algorithm stops learning as soon as it classifies
# the training set perfectly.
# Possible explanations why there is a difference between the NumPy and
# TensorFlow outcomes could thus be numerical precision, or slight differences
# in our implementation.
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 27 11:00:56 2021
@author: BRUNO
"""
#Função que calcula (x+y)^n
def main():
x = input('Digite o x: ')
y = input('Digite o y: ')
n = int(input('Digite o n: '))
print('O resultado é: ')
print(pascal(x,y,n))
def fat(n):
resultado = 1
for count in range (n):
resultado *= (count+1)
return resultado
def binomial(m,n):
resultado = (fat(m))/(fat(n)*fat(m-n))
return resultado
def pascal(x,y,n):
pascal = []
for cont in range (n+1):
pascal.append(f'{int(binomial(n,cont))}*({x}^{n-cont}*{y}^{cont})')
resultado = '+'.join(pascal)
return resultado
if __name__ == '__main__':
main()
|
for i in range(0, 201, 2):
print(i)
for i in range(0, 100, 3):
print(i)
|
PAD = 0
UNK = 1
BOS = 2
EOS = 3
PAD_WORD = "<blank>"
UNK_WORD = "unk"
BOS_WORD = "<s>"
EOS_WORD = "</s>"
BUFFER_SIZE = 64 * (1024 ** 2)
TOKEN_VOCAB = "token"
TYPE_VOCAB = "type"
CHAR_VOCAB = "char"
# dataset file fields
MENTION = "mention_span"
RIGHT_CTX = "right_context_token"
LEFT_CTX = "left_context_token"
TYPE = "y_str"
COARSE_FLAG = 0
FINE_FLAG = 1
UF_FLAG = 2
COARSE = {'person', 'group', 'organization', 'location', 'entity', 'time', 'object', 'event', 'place'}
FINE = {'accident', 'actor', 'agency', 'airline', 'airplane', 'airport', 'animal', 'architect', 'army', 'art',
'artist', 'athlete', 'attack', 'author', 'award', 'biology', 'body_part', 'bridge', 'broadcast',
'broadcast_station', 'building', 'car', 'cemetery', 'chemistry', 'city', 'coach', 'company', 'computer',
'conflict', 'country', 'county', 'currency', 'degree', 'department', 'director', 'disease', 'doctor', 'drug',
'education', 'election', 'engineer', 'ethnic_group', 'facility', 'film', 'finance', 'food', 'game', 'geography',
'god', 'government', 'health', 'heritage', 'holiday', 'hospital', 'hotel', 'institution', 'instrument',
'internet', 'island', 'language', 'law', 'lawyer', 'league', 'leisure', 'library', 'living_thing',
'mass_transit', 'medicine', 'military', 'mobile_phone', 'monarch', 'mountain', 'music', 'musician',
'music_school', 'natural_disaster', 'news', 'news_agency', 'park', 'planet', 'play', 'political_party',
'politician', 'product', 'programming_language', 'protest', 'province', 'rail', 'railway', 'religion',
'religious_leader', 'restaurant', 'road', 'scientific_method', 'ship', 'sign', 'society', 'software', 'soldier',
'spacecraft', 'sport', 'stage', 'stock_exchange', 'structure', 'subway', 'team', 'television_channel',
'television_network', 'television_program', 'theater', 'title', 'train', 'transit', 'transportation',
'treatment', 'water', 'weapon', 'website', 'writing'}
CHARS = ['!', '"', '#', '$', '%', '&', "'", '*', '+', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8',
'9', ':', ';', '<', '=', '>', '?', '@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', '\\', ']', '^', '_', '`', 'a', 'b', 'c', 'd',
'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'{', '}', '~', '·', 'Ì', 'Û', 'à', 'ò', 'ö', '˙', 'ِ', '’', '→', '■', '□', '●', '【', '】', 'の', '・', '一', '(',
')', '*', ':', '¥']
|
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
r"""A defined list of constants available to QuickRelease users.
The are some important difference between QuickRelease's L{config items<quickrelease.config>} and C{constants}:
1. C{constants} may be accessed without a L{ConfigSpec<quickrelease.config.ConfigSpec>} reference. This makes them useful in places where it may be difficult to obtain such a reference.
2. C{constants} can be overriden by the environment. This can be useful, but should be used sparingly, since the override is not yet logged anywhere. It's mostly intended to redefine paths to executables in different situations. For instance, if you have a debug version of the C{unzip} utility that you would like to have a L{Process<quickrelease.process.Process>} use. You can set the C{UNZIP} environment variable, and if your process is using a constant, it will be picked up. (This is similar to L{ConfigSpec<quickrelease.config.ConfigSpec>}'s overrides, but cannot currently be disabled.)
3. C{constant} can return complex Python types (lists, dictionaries, etc.)
"""
QUICKRELEASE_CONSTANTS = {
'ANT' : 'ant',
# This has the _PROG suffix because bzip2 uses the BZIP/BZIP2 env
# variables as another way to read its arguments (!!?)
'BZIP_PROG': 'bzip2',
'GIT' : 'git',
'GPG' : 'gpg',
'JAR' : 'jar',
'MAKE' : 'make',
'MD5SUM' : 'md5sum',
'MV' : 'mv',
'PERL' : 'perl',
'S3CURL' : 's3curl.pl',
'SVN' : 'svn',
'RSYNC' : 'rsync',
'TAR' : 'tar',
'UNZIP' : 'unzip',
'WGET' : 'wget',
'ZIP' : 'zip',
'BUILD_PLATFORMS_MAP': { 'Windows-i686': 'win32',
'Windows-AMD64': 'win64',
'Darwin-i686': 'mac',
'Darwin-x86_64': 'mac',
'Linux-i686': 'linux',
'Linux-x86_64': 'linux-x64',
},
'BUILD_PLATFORM_EXTENSIONS': { 'win32': 'exe',
'mac': 'dmg',
'linux': 'tar.gz',
'linux-x64': 'tar.gz',
},
# in seconds, so five minutes
'RUN_SHELL_COMMAND_DEFAULT_TIMEOUT': 60 * 5,
# A way to increase the default via the environment for instances
# where you're running in slow(er) environments, e.g. virtualization
'RUN_SHELL_COMMAND_TIMEOUT_FACTOR': 1,
# Number of output lines, by default, for quickrelease.command to store
# in memory before dumping to a file backing-store.
'RUN_SHELL_COMMAND_IN_MEM_LINES': 20000,
# in seconds, so 10 mintues.
'S3_PUSH_TIMEOUT': 60 * 10,
'S3_MIME_TYPES': { 'asc' : 'text/plain',
'bz2' : 'application/x-bzip2',
'dmg' : 'application/x-apple-diskimage',
'exe' : 'application/octet-stream',
'mar' : 'application/octet-stream',
'md5' : 'text/plain',
'tar.gz' : 'application/x-gzip',
'txt': 'text/plain',
'zip': 'application/zip',
},
}
"""
Various constants that can be useful for QuickRelease L{Process<quickrelease.process.Process>}es.
"""
QUICKRELEASE_CONSTANTS['BUILD_PLATFORMS'] = QUICKRELEASE_CONSTANTS['BUILD_PLATFORMS_MAP'].values()
CONSTANTS_FROM_ENV_HANDLERS = {
'BUILD_PLATFORMS': lambda val: tuple(val.split()),
'RUN_SHELL_COMMAND_DEFAULT_TIMEOUT': lambda val: int(val),
'RUN_SHELL_COMMAND_TIMEOUT_FACTOR': lambda val: int(val),
'RUN_SHELL_COMMAND_IN_MEM_LINES': lambda val: int(val),
'S3_PUSH_TIMEOUT': lambda val: int(val),
'BUILD_PLATFORM_EXTENSIONS': lambda val: NotImplementedError("Need to turn BUILD_PLATFORM_EXTENSIONS overloads into a dict!"),
'S3_MIME_TYPES': lambda val: NotImplementedError("Need to turn S3_MIME_TYPES overloads into a dict!"),
}
"""A dictionary of named constants -> handlers to convert an environment
variable string into the expected Python type. The type should match
what the named constant in L{QUICKRELEASE_CONSTANTS<quickrelease.constants.QUICKRELEASE_CONSTANTS>} returns.
"""
#
# Application-related constants; probably not a good idea to change these
#
_PIPE_STDOUT = 1
_PIPE_STDERR = 2
|
def reverse_number(n: int) -> int:
""" This function takes in input 'n' and returns 'n' with all digits reversed. """
if len(str(n)) == 1:
return n
k = abs(n)
reversed_n = []
while k != 0:
i = k % 10
reversed_n.append(i)
k = (k - i) // 10
return int(''.join(map(str, reversed_n))) if n > 0 else -int(''.join(map(str, reversed_n)))
|
# This is all about using strings
stg_1 = "this is the first message without a tab"
print(stg_1)
stg_2 = "\t this is the second message with a tab"
print(stg_2)
stg_3 = "this is another message with a newline\n"
print(stg_3)
|
#!/usr/bin/python
# unicode.py
text = u'\u041b\u0435\u0432 \u041d\u0438\u043a\u043e\u043b\u0430\
\u0435\u0432\u0438\u0447 \u0422\u043e\u043b\u0441\u0442\u043e\u0439: \n\
\u0410\u043d\u043d\u0430 \u041a\u0430\u0440\u0435\u043d\u0438\u043d\u0430'
print (text)
|
#to have some interaction
#we need an loop to look for actions
def setup():
size(400, 400)
#executed once
println("This is the setup. Executed once. Initiate things here")
#executed all the time waiting for infos
def draw():
#do the bakcground color transformation
noStroke()
fill(map(mouseX, width, 0, 0, width), 100)
rect(0, 0, width, height)
#do the circle color transformation
fill(mouseX)
#draw an ellipse
ellipse(mouseX, mouseY, 10, 10)
#print some infos
println("Frame number: " + frameCount)
print("mouse x: " + mouseX )
println(" mouse y: " + mouseY )
|
##########################################################################
# NSAp - Copyright (C) CEA, 2013
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
options = (
("documentation_folder", {
"type": "string",
"default": None,
"help": "the folder containing the documentation of the project.",
"group": "piws",
"level": 1,
}),
("show_user_status", {
"type": "yn",
"default": True,
"help": "Show or not the user status link on the website.",
"group": "piws",
"level": 1,
}),
("ldap_groups_dn", {
"type": "string",
"default": None,
"help": "LDAP groups dn for LDAP groups synchronisation in CW <= 3.20,"
"otherwise not required.",
"group": "piws",
"level": 1,
}),
('apache-cleanup-session-time',
{'type': 'time',
'default': None,
'help': ('Duration of inactivity after which an apache de-authentication'
'will be triggered'),
'group': 'piws',
'level': 1,
}),
('deauth-redirect-url',
{'type': 'string',
'default': None,
'help': 'Redirection url after apache deauthentication occured.',
'group': 'piws',
'level': 1,
}),
("enable-cwusers-watcher", {
"type": "string",
"default": 'no',
"help": ("If 'yes', an email is sent (this email address has to be "
"set in the [MAIL] all-in-one section) when a CW user is "
"created or deleted."),
"group": "piws",
"level": 1,
}),
('enable-apache-logout',
{'type': 'yn',
'default': False,
'help': 'Enable Apache logout',
'group': 'piws',
'level': 1,
}),
('logo',
{'type': 'string',
'default': 'images/nsap.png',
'help': 'Navigation bar logo',
'group': 'piws',
'level': 1,
}),
('enable-upload',
{'type' : 'yn',
'default': False,
'help': ('If true enable the upload, ie relax security on user and '
'group entities. The database must be regenerated if this '
'option is modified.'),
'group': 'piws',
'level': 1,
}),
('authorized-upload-groups',
{'type': 'csv',
'default': 'users',
'help': 'A list of groups that will be able to upload data.',
'group': 'piws',
'level': 1,
}),
('share_group_uploads',
{'type': 'yn',
'default': False,
'help': 'If true, share uploads between the memebers of a group.',
'group': 'piws',
'level': 1,
}),
("metagen_url", {
"type": "string",
"default": None,
"help": "the URL to the metagen bioresource.",
"group": "piws",
"level": 1,
}),
("allow-inline-relations", {
"type": "yn",
"default": True,
"help": ("if False remove inline relations from the schema: inline "
"relations are not compatible with the massive store."),
"group": "piws",
"level": 1,
}),
)
|
class Command:
def __init__(self, name, desc="", args=[]):
self.name = name
self.desc = desc
self.args = args
|
"""
Return nth catalan number.
Recursive Formula of Catalan Numbers says:
C of (n+1) = summation of C of i* C of n-i, for range i=0 to i=n
Therefore, for C of n formula becomes
C of (n) = summation of C of i* C of n-1-i, for range i=0 to i=n-1
"""
def getCatalan(n,dp_arr):
# Lookup
if (dp_arr[n] is not None):
return dp_arr[n]
#Base Case
if (n==0):
return 1
#Rec Case
Cn = 0
for i in range(0,n):
Cn = Cn + ( getCatalan(i, dp_arr) * getCatalan(n-1-i, dp_arr) )
dp_arr[n] = Cn
return Cn
#Driver Code
if __name__ == '__main__':
dp_arr : list = [None] * 100
n = 5
returned = getCatalan(n, dp_arr)
print(returned)
|
#!/usr/bin/python3
s0="パトカー"
s1="タクシー"
ret=""
for idx, c in enumerate(s0):
ret+=c+s1[idx]
print(ret)
|
class SSLUnavailable(Exception):
"""If you haven't verified a CNAME zone within the grace period (a week),
it can't be verified any more.
"""
pass
class CustomHostnameNotFound(Exception):
pass
|
n1,n2=map(int,input().split())
a=[]
for i in range(n2):
a.append(list(map(float,input().split())))
for i in zip(*a):
print(sum(i)/n2) |
def read_txt_file_str(filename):
f=open('text_files/'+filename, "r")
contents=f.read()
f.close()
return contents
def read_txt_file_list(filename):
f=open('text_files/'+filename, "r")
contents=f.readlines()
f.close()
return contents |
# Author: Jocelino F.G.
n = int(input())
vetor = [n]
dobro = n
for i in range(0, 10):
dobro = dobro * 2
vetor.append(dobro)
print("N[{}] = {}".format(i, vetor[i]))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# rule_engine/errors.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
class _UNDEFINED(object):
def __bool__(self):
return False
__name__ = 'UNDEFINED'
__nonzero__ = __bool__
def __repr__(self):
return self.__name__
UNDEFINED = _UNDEFINED()
"""
A sentinel value to specify that something is undefined. When evaluated, the
value is falsy.
.. versionadded:: 2.0.0
"""
class EngineError(Exception):
"""
The base exception class from which other exceptions within this package
inherit.
"""
def __init__(self, message=''):
"""
:param str message: A text description of what error occurred.
"""
self.message = message
"""A text description of what error occurred."""
class EvaluationError(EngineError):
"""
An error raised for issues which occur while the rule is being evaluated.
This can occur at parse time while AST nodes are being evaluated during
the reduction phase.
"""
pass
class SyntaxError(EngineError):
"""A base error for syntax related issues."""
class DatetimeSyntaxError(SyntaxError):
"""
An error raised for issues regarding the use of improperly formatted
datetime expressions.
"""
def __init__(self, message, value):
"""
:param str message: A text description of what error occurred.
:param str value: The datetime value which contains the syntax error which caused this exception to be raised.
"""
super(DatetimeSyntaxError, self).__init__(message)
self.value = value
"""
The datetime value which contains the syntax error which caused this
exception to be raised.
"""
class RuleSyntaxError(SyntaxError):
"""
An error raised for issues identified in while parsing the grammar of the
rule text.
"""
def __init__(self, message, token=None):
"""
:param str message: A text description of what error occurred.
:param token: The PLY token (if available) which is related to the syntax error.
"""
if token is None:
position = 'EOF'
else:
position = "line {0}:{1}".format(token.lineno, token.lexpos)
message = message + ' at: ' + position
super(RuleSyntaxError, self).__init__(message)
self.token = token
"""The PLY token (if available) which is related to the syntax error."""
class RegexSyntaxError(SyntaxError):
"""
An error raised for issues regarding the use of improper regular expression
syntax.
"""
def __init__(self, message, error, value):
"""
:param str message: A text description of what error occurred.
:param error: The :py:exc:`re.error` exception from which this error was triggered.
:type error: :py:exc:`re.error`
:param str value: The regular expression value which contains the syntax error which caused this exception to be raised.
"""
super(RegexSyntaxError, self).__init__(message)
self.error = error
"""The :py:exc:`re.error` exception from which this error was triggered."""
self.value = value
"""
The regular expression value which contains the syntax error which
caused this exception to be raised.
"""
class AttributeResolutionError(EvaluationError):
"""
An error raised with an attribute can not be resolved to a value.
..versionadded:: 2.0.0
"""
def __init__(self, attribute_name, object_, thing=UNDEFINED):
"""
:param str attribute_name: The name of the symbol that can not be resolved.
:param object_: The value that *attribute_name* was used as an attribute for.
:param thing: The root-object that was used to resolve *object*.
"""
self.attribute_name = attribute_name
"""The name of the symbol that can not be resolved."""
self.object = object_
"""The value that *attribute_name* was used as an attribute for."""
self.thing = thing
"""The root-object that was used to resolve *object*."""
super(AttributeResolutionError, self).__init__("unknown attribute: {0!r}".format(attribute_name))
class AttributeTypeError(EvaluationError):
"""
An error raised when an attribute with type information is resolved to a
Python value that is not of that type.
"""
def __init__(self, attribute_name, object_type, is_value, is_type, expected_type):
"""
:param str attribute_name: The name of the symbol that can not be resolved.
:param object_type: The value that *attribute_name* was used as an attribute for.
:param is_value: The native Python value of the incompatible attribute.
:param is_type: The :py:class:`rule-engine type<rule_engine.ast.DataType>` of the incompatible attribute.
:param expected_type: The :py:class:`rule-engine type<rule_engine.ast.DataType>` that was expected for this attribute.
"""
self.attribute_name = attribute_name
"""The name of the attribute that is of an incompatible type."""
self.object_type = object_type
"""The object on which the attribute was resolved."""
self.is_value = is_value
"""The native Python value of the incompatible attribute."""
self.is_type = is_type
"""The :py:class:`rule-engine type<rule_engine.ast.DataType>` of the incompatible attribute."""
self.expected_type = expected_type
"""The :py:class:`rule-engine type<rule_engine.ast.DataType>` that was expected for this attribute."""
message = "attribute {0!r} resolved to incorrect datatype (is: {1}, expected: {2})".format(attribute_name, is_type.name, expected_type.name)
super(AttributeTypeError, self).__init__(message)
class SymbolResolutionError(EvaluationError):
"""
An error raised when a symbol name is not able to be resolved to a value.
"""
def __init__(self, symbol_name, symbol_scope=None, thing=UNDEFINED):
"""
:param str symbol_name: The name of the symbol that can not be resolved.
:param str symbol_scope: The scope of where the symbol should be valid for resolution.
:param thing: The root-object that was used to resolve the symbol.
.. versionchanged:: 2.0.0
Added the *thing* parameter.
"""
self.symbol_name = symbol_name
"""The name of the symbol that can not be resolved."""
self.symbol_scope = symbol_scope
"""The scope of where the symbol should be valid for resolution."""
self.thing = thing
"""The root-object that was used to resolve the symbol."""
super(SymbolResolutionError, self).__init__("unknown symbol: {0!r}".format(symbol_name))
class SymbolTypeError(EvaluationError):
"""
An error raised when a symbol with type information is resolved to a Python
value that is not of that type.
"""
def __init__(self, symbol_name, is_value, is_type, expected_type):
"""
:param str symbol_name: The name of the symbol that is of an incompatible type.
:param is_value: The native Python value of the incompatible symbol.
:param is_type: The :py:class:`rule-engine type<rule_engine.ast.DataType>` of the incompatible symbol.
:param expected_type: The :py:class:`rule-engine type<rule_engine.ast.DataType>` that was expected for this symbol.
"""
self.symbol_name = symbol_name
"""The name of the symbol that is of an incompatible type."""
self.is_value = is_value
"""The native Python value of the incompatible symbol."""
self.is_type = is_type
"""The :py:class:`rule-engine type<rule_engine.ast.DataType>` of the incompatible symbol."""
self.expected_type = expected_type
"""The :py:class:`rule-engine type<rule_engine.ast.DataType>` that was expected for this symbol."""
message = "symbol {0!r} resolved to incorrect datatype (is: {1}, expected: {2})".format(symbol_name, is_type.name, expected_type.name)
super(SymbolTypeError, self).__init__(message)
|
def is_even(number):
return number % 2 == 0
|
class Solution(object):
def matrixReshape(self, nums, r, c):
"""
:type nums: List[List[int]]
:type r: int
:type c: int
:rtype: List[List[int]]
"""
if len(nums)*len(nums[0]) != r*c:
return nums
kek = []
nums = [item for sublist in nums for item in sublist]
k = 0
for i in range(r):
kek.append([])
for j in range(c):
kek[i].append(nums[i*c + j])
return kek
|
class InvalidOperationError(BaseException):
pass
class Node():
def __init__(self, value, next=None):
self.value = value
self.next = next
class Stack():
def __init__(self, node=None):
self.top = node
def __len__(self):
count = 0
curr = self.top
while curr:
count += 1
curr = curr.next
return count
def push(self, value):
node = Node(value)
node.next = self.top
self.top = node
def pop(self):
if self.is_empty():
raise InvalidOperationError("Method not allowed on empty collection")
else:
node = self.top.value
self.top = self.top.next
# node.next = None
return node
def peek(self):
if self.is_empty():
raise InvalidOperationError("Method not allowed on empty collection")
return self.top.value
def is_empty(self):
return self.top is None
class Queue():
def __init__(self):
self.front = None
self.rear = None
def enqueue(self, value):
node = Node(value)
if not self.front:
self.front, self.rear = node, node
else:
self.rear.next = node
self.rear = node
def dequeue(self):
if self.is_empty():
raise InvalidOperationError("Method not allowed on empty collection")
node = self.front
self.front = self.front.next
return node.value
def peek(self):
if self.is_empty():
raise InvalidOperationError("Method not allowed on empty collection")
return self.front.value
def is_empty(self):
if not self.front:
return True
|
"""
The file provides default secret parameters used as a reference for creating your
own secret.py or in testing.
Make sure to create your own secret.py (in the same folder) with appropriate values for when deploying
the website!
"""
SECRET_KEY = "2r4-$a^!rs=^glu=a8m=e5a$5*wg2uxjjob!diff-z*wzdx+4y"
"""
Set these if mysql is used as a database backend
"""
MYSQL_USERNAME = ""
MYSQL_PASSWORD = ""
"""
Set these if you're sending e-mail through Gmail using Google's API
"""
SECRET_GMAIL_API_CLIENT_ID = 'google_assigned_id'
SECRET_GMAIL_API_CLIENT_SECRET = 'google_assigned_secret'
SECRET_GMAIL_API_REFRESH_TOKEN = 'google_assigned_token'
"""
Set these if you're sending e-mail through SMTP
"""
SECRET_EMAIL_HOST_USER = 'username'
SECRET_EMAIL_HOST_PASSWORD = 'password'
|
# Given
x = 10000.0
y = 3.0
print(x / y)
print(10000 / 3)
# What is happening?
# Given
print(x - 1 / y)
print((x - 1) / y)
# What is happening?
# Given
x = 'foo'
y = 'bar'
# Create 'foobar' using x and y
s = x + y
print(s)
# Create 'foo -> bar' using x and y
print(x + " -> " + y)
# Given
x = 'hello world'
# from x create 'HELLO WORLD'
print(x.upper())
# from x create 'hellX wXrld'
print(x.replace('o', 'X'))
# Given
x = 10000.0
y = 3.0
# print "10000 / 3 = 3333" using x and y
print("{x} / {y} = {z}".format(x=x, y=y, z=x/y))
# Given
s = ['hello', 'world']
# print 'helloworld'
print(s[0] + s[1])
# print 'hello world'
print(s[0] , s[1])
# print 'hello
print(s[0])
# world'
print(s[1])
# Given
x = "Monty Python and the Holy Grail"
# create the list ['Monty', 'Python', 'and', 'the', 'Holy', 'Grail']
print(x.split())
y = "one,two,three,four"
# create the list ['one', 'two', 'three', 'four'
print(y.split(','))
|
class Config:
def __init__(self):
self.data_dir = './data/'
self.data_path = self.data_dir + 'peot.txt'
self.pickle_path = self.data_dir + 'tang.npz'
self.load_path = './checkpoints/peot9.pt'
self.save_path = './checkpoints/peot9.pt'
self.do_train = False
self.do_test = False
self.do_predict = True
self.do_load_model = True
self.num_epoch = 40
self.batch_size = 128
self.lr = 1e-3
self.weight_decay = 1e-4
self.max_gen_len = 200
self.max_len = 125
self.embedding_dim = 300
self.hidden_dim = 256
|
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for Indexer objects.
The choice to add the bug to the function rather than to the object was that
the indexer may be run on many bugs/items/etc so I didn't want the object to
become dependent on the bug it was manipulating.
"""
__author__ = 'jason.stredwick@gmail.com (Jason Stredwick)'
class Error(Exception):
pass
class IndexerBase(object):
"""Indexer base class
Indexer are responsible creating search indices for bug from a specific
provider.
"""
def __init__(self):
pass
def Index(self, bug):
raise NotImplementedError
|
'''
@Author: Ofey Chan
@Date: 2020-03-03 19:23:15
@LastEditors: Ofey Chan
@LastEditTime: 2020-03-03 20:07:31
@Description: General permutation group class.
@Reference:
'''
|
# Forcing recursion for no good reason. But it passed so....
def solution_r(n):
if n <= 0:
return n
else:
if not n%3 or not n%5:
return n + solution_r(n-1)
else:
return solution_r(n-1)
def solution(number):
if not number:
return 0
return solution_r(number-1)
assert solution(10) == 23, "Oops, recursion is the devil"
|
class Solution:
def minJumps(self, arr: List[int]) -> int:
graph = defaultdict(list)
for i in range(len(arr)):
graph[arr[i]].append(i)
visited = set()
src, dest = 0, len(arr) - 1
queue = deque()
queue.append((src, 0))
visited.add(src)
while queue:
node, dist = queue.popleft()
if node == dest:
return dist
for child in [node - 1, node + 1] + graph[arr[node]][::-1]:
if 0 <= child < len(arr) and child != node and child not in visited:
visited.add(child)
if child == dest:
return dist + 1
queue.append((child, dist + 1))
return -1
|
def transitions(y,x):
yield y+1,x
yield y,x+1
yield y-1,x
yield y,x-1
def valid_transitions(arr):
# print(arr)
Y = len(arr)
X = len(arr[0])
def _f(y0,x0):
for y,x in transitions(y0,x0):
if 0 <= y < Y and 0 <= x < X and arr[y][x] != "-":
yield y,x
return _f
def opp(player):
if player == "W":
return "B"
else:
return "W"
def dfs(board, init, visited, tran_fn, ans):
q = [(init, 'W')]
while q:
(y,x), player = q.pop()
if (y,x) not in visited:
visited.add((y,x))
ans[y][x] = player
for yn,xn in tran_fn(y,x):
# print((y,x), (yn,xn))
item = (yn,xn), opp(player)
q.append(item)
Y,X = [int(x) for x in input().split()]
board = []
for y in range(Y):
s = input()
board.append(s)
def run(board):
tran_fn = valid_transitions(board)
Y = len(board)
X = len(board[0])
ans = [["-" for _ in range(X)] for _ in range(Y)]
visited = set()
for y in range(Y):
for x in range(X):
if board[y][x] == '.':
dfs(board, (y,x), visited, tran_fn, ans)
ans = ["".join(xs) for xs in ans]
print("\n".join(ans))
# print(board)
run(board)
# print(list(valid_transitions(board)(0,0)))
|
#
# Copyright (c) 2020 Xilinx, Inc. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Mandatory Common Configuration required
sharedWs = "{buildDir}/shared_ws"
XSCT_BUILD_SOURCE = "" # build source type whether to be used XSCT default or git source (i.e. XSCT_BUILD_SOURCE="git")
version = "2020.2" # Vitis version installed and to be used
vitisPath = "" # user needs to mentioned Vitispath path where vitis is installed in user's system
outoftreebuild = True
# Parallel Threads
parallel_make = 20
# delpoy artifacts
deploy_artifacts = "{buildDir}/{machine}/deploy/"
# Run test configuration
rootfs_path = "{ROOT}/build/{machine}/deploy/rootfs.cpio.gz.u-boot"
boot_scr_path = ""
deployDir = "{ROOT}/build/{machine}/deploy"
# local board configuration
# Serial communication configurations
"""
These below configurations will used to communicate,
with board which was connected to your host machine by using serial uart
"""
board_interface = "host_target"
com = "/dev/ttyUSB0" # Allocate proper com port(ttyUSB0/ttyUSB1/ttyUSB2/ttyUSB3)
baudrate = "115200"
# Remote host configuration
"""
This below configuration need to enable if target connected to remote host machine.
remote_host = ""
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.