content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import argparse
def parser():
"""Parse the arguments."""
parser = argparse.ArgumentParser(description="Train the model")
parser.add_argument(
"--cfg",
help="Path to the config file defining testing",
type=str,
default="/data/land_cover_tracking/config/weighted_loss.yml",
)
parser.add_argument(
"--checkpoint",
help="Path to the config file",
type=str,
default="/data/land_cover_tracking/weights/cfg_weighted_loss_best_f1.pth",
)
parser.add_argument(
"--samples_list",
help="Path to the list of samples for inference",
type=str,
default="test",
)
parser.add_argument(
"--destination",
help="Path for saving results",
type=str,
default="/data/seg_data/inference",
)
parser.add_argument(
"--outputs",
nargs="+",
default=["raster"],
help="What kind of outputs to generate "
+ "from ['alphablend','raster','alphablended_raster', 'raw_raster']",
)
return parser.parse_args() | a0b50f959b8151075f9cfde5304138627006d29c | 39,609 |
def create_locations(client, create_smb=False, create_s3=False):
"""
Convenience function for creating locations.
Locations must exist before tasks can be created.
"""
smb_arn = None
s3_arn = None
if create_smb:
response = client.create_location_smb(
ServerHostname="host",
Subdirectory="somewhere",
User="",
Password="",
AgentArns=["stuff"],
)
smb_arn = response["LocationArn"]
if create_s3:
response = client.create_location_s3(
S3BucketArn="arn:aws:s3:::my_bucket",
Subdirectory="dir",
S3Config={"BucketAccessRoleArn": "role"},
)
s3_arn = response["LocationArn"]
return {"smb_arn": smb_arn, "s3_arn": s3_arn} | 73cb87329a8105187721dba41a37d9ff20095fc4 | 39,610 |
import subprocess
def check_code_lang(branch):
"""
Check this PR code programming language
"""
langs = []
checkers = []
lst_files = subprocess.getoutput("git diff --name-only remotes/origin/{}..".format(branch))
for item in lst_files.splitlines():
if item.endswith(".py") and "Python" not in langs:
langs.append("Python")
checkers.append("pylint-3")
elif item.endswith(".go") and "GO" not in langs:
langs.append("GO")
checkers.append("golint")
elif item.endswith(".c") and "C/C++" not in langs:
langs.append("C/C++")
checkers.append("pclint")
elif item.endswith(".cpp") and "C/C++" not in langs:
langs.append("C/C++")
checkers.append("pclint")
elif item.endswith(".h") and "C/C++" not in langs:
langs.append("C/C++")
checkers.append("pclint")
return langs, checkers | 9feef85a1a6587092797927b0f564c7828510f89 | 39,611 |
from typing import IO
import io
import codecs
def with_encoding_utf8(text_stream: IO[str]) -> bool:
"""
Return whether ``text_stream`` is a text stream with encoding set to UTF-8.
:raises TypeError: if ``text_stream`` is not a text stream
"""
result = False
if isinstance(text_stream, io.StringIO):
# note: 'StringIO' saves (unicode) strings in memory and therefore doesn't have (or need)
# an encoding, which is fine.
# https://stackoverflow.com/questions/9368865/io-stringio-encoding-in-python3/9368909#9368909
result = True
else:
try:
text_stream_encoding: str = text_stream.encoding # type: ignore
except AttributeError as exc:
raise TypeError("Value is not a text stream.") from exc
if text_stream_encoding is None:
# e.g. the strange case of `tempfile.SpooledTemporaryFile(mode='rt', encoding='utf-8')`
pass
else:
try:
text_stream_encoding_norm = codecs.lookup(text_stream_encoding).name
result = text_stream_encoding_norm == 'utf-8'
except LookupError:
pass
return result | d1b9252e8d45a0b33d63127a7d99dae8068b979f | 39,612 |
def valores_posicion(matrix, nxn, isrow, pos):
"""Devuelve una lista con informacion acerca de una fila/columna especifica de la matriz.
Parametros:
isrow -- booleano que indica si es fila o columna
pos -- entero que indica fila/columna
"""
valores = []
espacios = 0
for i in range(nxn):
if isrow:
if matrix[pos][i] != "":
valores.append(espacios)
valores.append(matrix[pos][i])
else:
espacios+=1
else:
if matrix[i][pos] != "":
valores.append(espacios)
valores.append(matrix[i][pos])
espacios = 0
else:
espacios+=1
# Si la fila/columna esta vacia, indicar que existen nxn espacios vacios
if espacios == nxn :
valores.append(espacios)
valores.append("0")
return valores | d2bda0fe96699d7caf4dba10a26e81760e287e27 | 39,614 |
def namestr_bytestring(member_header_bytestring):
"""
Namestrs start after 5 header lines and are 140 bytes long.
"""
index = 80 * 5
return member_header_bytestring[index:index + 140] | 2ba4f5adfe7c97ccc123ee7ef26853adbdfd1fec | 39,616 |
def projectPartners(n):
""" project_partners == PEP8 (forced mixedCase by CodeWars) """
return n * (n - 1) / 2 | 0912a6566b3d7e8957cd0a4661495a1886d9bb91 | 39,617 |
def _bytes(_str):
"""
Convert ordinary Python string (utf-8) into byte array (should be considered
as c-string).
@rtype: bytes
flaming edit:
python 2.5 doesn't have bytes() or even bytearray() so we just bootleg it
with str(), heh.
"""
return str(_str) | f9b8cbd952858e4a3655acc35935171e1ae1626b | 39,618 |
import re
def turnApost(sentence):
"""
turn all the Apostrophes to '.
"""
clean_sentence = re.sub(r'(\D)’', r"\1'", sentence)
return clean_sentence | 85b74344ebd9d7444abdc3daa5b6c35a1d090494 | 39,619 |
def weblog_matches(pattern, data):
"""
Match weblog data line by line.
"""
total=0
for line in data.read()[:20000].splitlines():
p = pattern.search(line)
#for p in pattern.finditer(data.read()[:20000]):
if p:
total += len(p.groups())
data.seek(0)
return 0 | 33bda4ce6d2127175cbc7196ac0fb12f83bf41a6 | 39,620 |
import copy
def threshold(
array,
thresh_max=None,
thresh_min=None,
val_max=None,
val_min=None,
inPlace_pref=False):
"""
Thresholds values in an array and sets them to defined values
RH 2021
Args:
array (np.ndarray): the mean position (X, Y) - where high value expected. 0-indexed. Make second value 0 to make 1D gaussian
thresh_max (number, scalar): values in array above this are set to val_max
thresh_min (number, scalar): values in array below this are set to val_min
val_max (number, scalar): values in array above thresh_max are set to this
val_min (number, scalar): values in array above thresh_min are set to this
inPlace_pref (bool): whether to do the calculation 'in place', and change the local input variable directly
Return:
output_array (np.ndarray): same as input array but with values thresheld
"""
if val_max is None:
val_max = thresh_max
if val_min is None:
val_min = thresh_min
if inPlace_pref:
output_array = array
else:
output_array = copy.deepcopy(array)
if thresh_max is None:
output_array[output_array < thresh_min] = val_min
elif thresh_min is None:
output_array[output_array > thresh_max] = val_max
else:
output_array[output_array < thresh_min] = val_min
output_array[output_array > thresh_max] = val_max
return output_array | 3f60c92e89a982b5037aeb6106602334a7c2673c | 39,622 |
def read_txt_file(path):
"""
For a given .txt path, reads the file and returns a string
:param path: The path to the .txt file
:return: string of the file
"""
file = open(path, "r", encoding='utf-8')
text_string = file.read()
return text_string | 364dc24fc13e60716ed84f03c035296f411081db | 39,623 |
def findall(s, sub):
"""Returns all indices of 'sub' within 's', as a list"""
ret = []
cur = 0
while 1:
n = s.find(sub, cur)
if n < 0: return ret
ret.append(n)
cur = n+1
return ret | 7a20518048237eff2a3e8e6d1d525f1c47fca3d0 | 39,624 |
def unzip(l):
"""Unzips a list of tuples into a tuple of two lists
e.g. [(1,2), (3, 4)] -> ([1, 3], [2, 4])
"""
xs = [t[0] for t in l]
ys = [t[1] for t in l]
return xs, ys | 72ad40e0cadc11bab62f25861893fa3c01952b24 | 39,625 |
def _CurrentRolesForAccount(project_iam_policy, account):
"""Returns a set containing the roles for `account`.
Args:
project_iam_policy: The response from GetIamPolicy.
account: A string with the identifier of an account.
"""
return set(binding.role
for binding in project_iam_policy.bindings
if account in binding.members) | 64bb0a51600778580016e3545fe7230949b46d63 | 39,627 |
def switch(n, m):
"""
Generates a generic switch symbol for an nPsT sort of switch.
Probably won't generate a useful pin numbering when T>2.
"""
out = []
# Convert to stupid letters for 1 and 2
name_letters = {1: "S", 2: "D"}
name_n = name_letters[n] if n in name_letters else str(n)
name_m = name_letters[m] if m in name_letters else str(m)
# Number of pins on the right is n*m, plus one per pole for spacing,
# minus the final spacing (n starts at 1), rounded up to nearest odd
# number so that half the height is on the 100mil grid.
n_pins_right = n * m + n - 1
if n_pins_right % 2 == 0:
n_pins_right += 1
height = 100 * (n_pins_right - 1)
hheight = height // 2
# Ref goes at the top, 100 above the top pin, unless only one throw
# in which case we also need to clear the switch graphic
refheight = hheight + 100
if m == 1:
refheight += 50
# Value/name goes below, unless m is even, in which case the bottom spacer
# isn't there so needs to be ignored
valheight = -(hheight + 100)
if n % 2 == 1 and m % 2 == 0:
valheight += 100
# Output component header
name = "SWITCH_{}P{}T".format(name_n, name_m)
out.append("#\n# {}\n#".format(name))
out.append('DEF {} SW 0 1 Y N 1 F N'.format(name))
out.append('F0 "SW" 0 {} 50 H V C CNN'.format(refheight))
out.append('F1 "{}" 0 {} 50 H V C CNN'.format(name, valheight))
out.append('F2 "" 0 0 50 H I C CNN')
out.append('F3 "" 0 0 50 H I C CNN')
out.append('DRAW')
# Output drawing
pole_top = hheight
for pole in range(n):
# Draw pole
pole_num = pole*(m+1) + 2
pole_y = pole_top - (100 * (m - 1))//2
if m % 2 == 0:
pole_y -= 50
out.append('X "~" {} -100 {} 40 R 50 50 1 1 P'
.format(pole_num, pole_y))
out.append('C -50 {} 10 1 1 0 N'.format(pole_y))
out.append('P 2 1 1 0 -50 {} 50 {} N'
.format(pole_y + 10, pole_y + 90))
for throw in range(m):
# Draw throws
throw_num = pole_num + throw - 1
throw_y = pole_top - 100 * throw
if throw > 0:
throw_num += 1
out.append('X "~" {} 100 {} 40 L 50 50 1 1 P'
.format(throw_num, throw_y))
out.append('C 50 {} 10 1 1 0 N'.format(throw_y))
# Move down for next pole
pole_top -= 100 * (m + 1)
# Draw connecting dashed line
if n > 1:
pole_y = hheight - (100 * (m - 1))//2 + 50
if m % 2 == 0:
pole_y -= 50
for _ in range(5*(m+1)*(n-1)):
out.append('P 2 1 1 0 0 {} 0 {} N'
.format(pole_y, pole_y - 5))
pole_y -= 20
# Done
out.append('ENDDRAW\nENDDEF\n')
return out | d874588cde9371a56c939d15b09f53800be4779f | 39,628 |
def add_to_group(client, profile, user, group_name):
""" Adds the specified user to the specified group """
try:
response = client.admin_add_user_to_group(
UserPoolId=profile["user_pool_id"],
Username=user.email,
GroupName=group_name,
)
if response["ResponseMetadata"]["HTTPStatusCode"] == 200:
print(f"User {user.email} added to group {group_name}")
return response
except client.exceptions.UserNotFoundException as error:
print(f"User {user.email} does not exist")
return error.response
except client.exceptions.ResourceNotFoundException as error:
print(f"Group {group_name} does not exist")
return error.response
except client.exceptions.ClientError as error:
print(f"Fail to add user {user.email} to group {group_name}")
return error.response | f7a1528511bd81985d89b847bd1737e2407667bc | 39,631 |
def parse_response(request, verbose=False, return_request=False):
"""Return a dictionnary containing at least 'status' and 'message' keys.
The goal is that the response is always the same, whether the output type of the WIMS server is
set to JSON or WIMS.
/!\ Warning: output must be set 'ident_type=json' in 'WIMS_HOME/log/classes/.connections/IDENT'
for this API to work properly."""
try:
response = request.json()
except:
status, message = request.text.split('\n', maxsplit=1)
code = "N/A"
if ' ' in status:
status, code = status.split(' ', maxsplit=1)
response = {
'status': status,
'message': message if '\n' not in message else message[:-1],
'code': code,
}
if response['status'] not in ["ERROR", "OK"]:
if not return_request:
raise ValueError("Not a adm/raw response, maybe the URL is incorrect. "
+ ("Use verbose=True to see the received response content "
+ "or use return_request=True to get the request object."
if not verbose else "Received:\n\n" + request.text))
else:
return request
return response | 2a5dadfe2271a610118101d1c6e8966f8ccf35a0 | 39,632 |
def extract_clas(the_sidd):
"""
Extract the classification string from a SIDD as appropriate for NITF Security
tags CLAS attribute.
Parameters
----------
the_sidd : SIDDType|SIDDType1
Returns
-------
str
"""
class_str = the_sidd.ProductCreation.Classification.classification
if class_str is None or class_str == '':
return 'U'
else:
return class_str[:1] | 1745de16393035a7ba0ee8035e6c3be15a4b4af4 | 39,637 |
def fs_begin_open_shift(self):
"""
Начать открытие смены.
"""
return self.protocol.command(
0xFF41,
self.admin_password
) | d6d298db133bab900a47d60e501e080b2c250410 | 39,638 |
def auto_cmap(labels):
"""
Find an appropriate color map based on provide labels.
"""
assert len(labels) <= 20, "Too many labels to support"
cmap = "Category10_10" if len(labels) <= 10 else "Category20_20"
return cmap | 99bdf74197b17d5908237e6a45b0882330c96024 | 39,639 |
def get_cicle_outer_square(cicle):
"""
获取圆形外接正方形
:param cicle:
:return: [[x0, y0], [x1, y1]] 返回左上角和右下角的坐标
"""
x0 = int(cicle[0]) - int(cicle[2])
y0 = int(cicle[1]) - int(cicle[2])
x1 = int(cicle[0]) + int(cicle[2])
y1 = int(cicle[1]) + int(cicle[2])
return [[x0, y0], [x1, y1]] | edacbd66347210cfe731ec5e3d294b767904067e | 39,640 |
import re
def create_target_population(cov_pop_burden_df, param_df, index):
"""Adds a new column to cov_pop_burden_df which is the target population
Inputs:
cov_pop_burden_df - a df which must contain 'incidence_number' and
'pop_0-0' columns
param_df - a df of parameters which must contain the column 'intervention_type'
where all of the values are 'Therapeutic', 'Diagnostic' or 'Vaccine'
index - a string that is one of the indexes of param_df
Returns:
a cov_pop_burden_df with target_pop column added
"""
intervention_type = param_df.loc[index, 'intervention_type']
# Select incidence as the target population if it is a therapeutic or diagnostic
if re.search('Therapeutic', intervention_type):
cov_pop_burden_df['target_pop'] = cov_pop_burden_df['incidence_number']
elif intervention_type in ['Device', 'Rapid diagnostic test']:
cov_pop_burden_df['target_pop'] = cov_pop_burden_df['incidence_number']
# Select population column if it is a vaccine
#~ This assumes it is an infant vaccination
elif param_df.loc[index, 'intervention_type'] == 'Vaccine':
cov_pop_burden_df['target_pop'] = cov_pop_burden_df['pop_0-0']
else:
raise ValueError('The value of intervention_type for is not valid')
return cov_pop_burden_df | 147137634fa1b65b7aae59db4af615fd79e606af | 39,641 |
def create_dll(root_head):
"""Algorithm to Conver Binary Tree to Doubly Linked List"""
doubly_linked_list = []
# --- Find DLL Head NOTE: Just need to go left till hit NONE
head = None
current = root_head
while True:
if current.left:
current = current.left
else:
head = current
break
stack = [head]
visited = set()
def add_visited(*args):
for item in args:
visited.add(item)
# --- Crawls back up, following each Triangle Shape from left Vertices to Right
while True:
try:
current = stack.pop()
except IndexError:
pass
if current in doubly_linked_list:
break
if current.left and current.left not in visited: # Goes Down to Next Triangle Shape
stack.append(current.left)
continue
elif current.prev and current.prev.right:
if current not in visited:
doubly_linked_list.append(current)
if current.prev.right.left and current.prev.right.left not in visited: # If finds a Left Vertices, Goes down
add_visited(current, current.prev, current.prev.right)
doubly_linked_list.append(current.prev)
if current.prev.prev:
stack.append(current.prev.prev)
stack.append(current.prev.right.left)
continue
elif current.prev.right.right: # Checks if Right Vertices Has Left or right
if current.prev.right.right.left:
stack.append(current.right.right.left)
continue
doubly_linked_list.append(current.prev.right.right)
else:
doubly_linked_list.append(current.prev)
doubly_linked_list.append(current.prev.right)
add_visited(current, current.prev, current.prev.right)
if current.prev.prev and current.prev.prev not in visited:
stack.append(current.prev.prev)
elif current.prev:
doubly_linked_list.append(current.prev)
add_visited(current)
if current.prev.prev:
stack.append(current.prev.prev)
elif current.right:
doubly_linked_list.append(current)
add_visited(current)
if current.right.left:
stack.append(current.right.left)
continue
elif current.right.right:
if current.right.right.left:
stack.append(current.right.right.left)
continue
doubly_linked_list.append(current.right)
doubly_linked_list.append(current.right.right)
add_visited(current.right, current.right.right)
else:
add_visited(current.right)
doubly_linked_list.append(current.right)
return doubly_linked_list | 45af31f70b5e7ec4d444ea2397a88f65a4d06113 | 39,642 |
def append_space(prompt):
"""Adds a space to the end of the given string if none is present."""
if not prompt.endswith(' '):
return prompt + ' '
return prompt | d6a408d613f0c790cce6d35ccdb1edfeb2aca865 | 39,643 |
import torch
def gather_nd(params, indices):
"""params is of "n" dimensions and has size [x1, x2, x3, ..., xn],
indices is of 2 dimensions and has size [num_samples, m] (m <= n)
"""
assert type(indices) == torch.Tensor
return params[indices.transpose(0, 1).long().numpy().tolist()] | 696648dc7356cc9a756fca1d4a7f1391b6b3e0ad | 39,645 |
import sys
def flexibleInput(prompt=''):
"""Use different input function depending on Python version."""
if sys.version_info[0] < 3:
return raw_input(prompt)
else:
return input(prompt) | cd7dafea5ec1623cf2e7430bf8eb5eb1d41b199e | 39,647 |
def remove_page(api, assessment_id):
"""Remove all pages from an assessment."""
allPages = api.pages.get()
for page in allPages:
if page.name.startswith(assessment_id):
api.pages.delete(page.id)
return True | 4075c7245cc9c01117d58b69eb06a33d2bbc7e37 | 39,648 |
def var_name(var, glb):
"""
eg:
in: a = 5
in: var_name(a, globals())
out: 'a'
:param var: 要查的变量对象
:param glb: globals()
:return: var对象对应的名称
"""
for vn in glb:
if glb[vn] is var:
return vn
return 'unkonw' | 342e1e613203f74288e053c501f61017f9d972f1 | 39,649 |
def getHosts(config):
"""Collects the host IPs in a list to be iterated over
Args:
config (list[str]): The list generated from configparser.ConfigParser().read()
Returns:
list[str]: List of IP addresses of PDUs
"""
hosts = []
for key in config["Hosts"]:
hosts.append(config["Hosts"][key])
return hosts | 792680b8c2aee8d2910c8c6fef3754bde9609f8d | 39,651 |
def get_registers(frame, kind):
"""Returns the registers given the frame and the kind of registers desired.
Returns None if there's no such kind.
"""
registerSet = frame.GetRegisters() # Return type of SBValueList.
for value in registerSet:
if kind.lower() in value.GetName().lower():
return value
return None | 48d7aab6a20eaa6b41da8f0db824d882504c86ef | 39,652 |
import requests
import json
def photo_meetup(repo_data, matches):
"""
Recebe como entrada uma Dataclass com dados do nosso
repositório no Github e retorna a url de uma imagem
do meetup.
"""
contents_url = f"{repo_data.url}/contents/palestras?ref=master"
response = requests.get(contents_url, headers=repo_data.headers)
content = json.loads(response.content)
content_length = len(content)
if len(matches) == 1:
# /meetup retorna sempre o último
return content[-1].get("download_url"), (content_length)
elif int(matches[1]) < content_length:
# /meetup \d{1,2}
event_num = int(matches[1])
return content[event_num - 1].get("download_url"), event_num
else:
# Numero invalido
return None, None | ced2a61d66c81ec2d24e6b46c76d670741de8a9e | 39,653 |
def _replace_bucket_unit(match_obj):
"""Replace the intern('unit') in `bucket()` with just the string
literal, because the unit determines the return type of the column and the
function would not be able to validate a unit if it was interned."""
full = match_obj.group(0)
interned = match_obj.group(1)
unit = match_obj.group(2)
# from "bucket(col, intern('unit'))" to "bucket(col, 'unit')"
return "{0}'{1}')".format(full[0 : full.index(interned)], unit) | 2fd0fa094bb816cf3842a410eb668e202031bde9 | 39,654 |
def calculate_chksum(buffer):
""" Calculate simple checksum by XOR'ing each byte in the buffer
Returns the checksum
"""
checksum = 0
for b in buffer:
if not b == '\n':
checksum ^= int(b, 16)
return checksum | c4392171d99e4148b7edf2188ac0d44f0e871357 | 39,655 |
def remove_char(fasta_d):
"""
Removes the > from fasta headers in the fasta dictionaries,
coming from empty lines in the fastas
Parameters
----------
fasta_d : dict
Dictionary for fasta, chromosome names as values
Returns
-------
The dictionaries of chromosome names as value and sequence as key,
minus the '>'
"""
for i in fasta_d.keys():
fasta_d[i] = fasta_d[i].lstrip(">")
return(fasta_d) | 5023ea9db1a81a0e5e5645fd5e0f2a079ea15056 | 39,657 |
from datetime import datetime
def convert_to_datetime(date: str, time: str) -> datetime:
"""
Converts a date and time string into a datetime object.
"""
return datetime.strptime(date + time, '%Y-%m-%d%I:%M %p') | 8fe295138ed4796396e8874e738f172fd455e9ef | 39,658 |
def get_final_sector_classification():
"""Return the list of sectors to be used in the new multiregional Input-Output table.
Outputs:
- list of sectors
"""
return ['secA', 'secB', 'secC', 'secD', 'secE', 'secF', 'secG', 'secH', 'secI'] | e0dfb8676b8ec954dc64fea32ba61b8c1de5f2f5 | 39,659 |
def mean(seq):
"""Returns the arithmetic mean of the sequence *seq* =
:math:`\{x_1,\ldots,x_n\}` as :math:`A = \\frac{1}{n} \sum_{i=1}^n x_i`.
"""
return sum(seq) / len(seq) | 86196c50855372c8126ae4af14a2c5f90171777e | 39,661 |
def categoryEncode(categories):
"""
encode category with unique
"""
initial_code=1 #
category_dict={}
for i in categories:
category_dict[i]=initial_code
initial_code+=1
return category_dict | 6296a42f4f6e8c284682cc31a75321e9085af96c | 39,666 |
def _make_list_default(value, defval):
"""
Converts value into a list and uses default if the value is not passed.
:param value: a single value (that will be converted into a list with one item) or a list or tuple of values
:param defval: the default that is used if value is empty
:return: list of values
"""
if value and not isinstance(value, tuple) and not isinstance(value, list):
value = (value,)
return list(value or defval) | 1b79c429b6251cbc6ed7093ef3ead83b225ab916 | 39,668 |
import os
import errno
def makedirs(path):
"""
Create directory `path`, including its parent directories if they do
not already exist. Return True if the directory did not exist and was
created, or False if it already existed.
"""
try:
os.makedirs(path)
return True
except OSError as e:
if e.errno == errno.EEXIST:
return False
raise | 777ee3f834106e9c8420c7356ef8511944ef4501 | 39,670 |
def add_new_subspecies(ctx, user, species, subspecies_id):
"""
This adds a new SubSpecies object to a given Species object. This should only be called
when a given subspecies is first detected, as it issues an ADD chip.
"""
subspecies = species.subspecies.create_child(subspecies_id=subspecies_id, species_type=species.type)
# Send a chip for this subspecies being added.
subspecies.send_chips(ctx, user)
return subspecies | fc878bc6fd988ecec05e64f38baf04f2b9c903fd | 39,671 |
def valuelist_streams(streams, element):
"""
Template tag to show a list of stream with object distance to source in detail pages.
"""
valuelist = []
for stream in streams:
distance_to_source = stream.distance_to_source(element)
valuelist.append({
'pk': stream.pk,
'text': stream.name_display,
'distance_to_source': distance_to_source,
})
return {
'valuelist': valuelist,
'modelname': 'stream'
} | 5e83aeaf6717ed2fb392c2239baee4070aa92b33 | 39,672 |
def reduce_keys(dict_long_keys):
"""Cut keys generated in the multiqc report.
First entry is allways lims sample ID"""
new_dict = {key.split('_')[0]: val for key, val in dict_long_keys.items()}
return new_dict | 754a5cd5c4544960d2c96ba131f8603319672733 | 39,673 |
def process_molecules_for_final_mdl(molecules):
""" grab molecule defintions from mdlr for final mdl. """
molecule_str = ""
for idx, molecule in enumerate(molecules):
diffusion_list = molecule[1]['diffusionFunction']
molecule_name = molecule[0][0]
component_str = ""
component_list = molecule[0][1:]
for c_idx, component in enumerate(component_list):
if c_idx > 0:
component_str += ", "
c_name = component['componentName']
try:
c_loc = component['componentLoc']
x1, y1, z1 = c_loc[0], c_loc[1], c_loc[2]
c_rot = component['componentRot']
x2, y2, z2, angle = c_rot[0], c_rot[1], c_rot[2], c_rot[3]
component_str += "{}{{loc=[{}, {}, {}], rot=[{}, {}, {}, {}]}}".format(c_name, x1, y1, z1, x2, y2, z2, angle)
except KeyError:
component_str += c_name
molecule_str += "\t{}({})\n".format(molecule_name, component_str)
# molecule_str += "\t{\n"
# molecule_str += "\t\t{} = {}\n".format(diffusion_list[0], diffusion_list[1])
# molecule_str += "\t}\n"
return molecule_str | 292f5b7a1df58615f6383a24508499d7a96b680b | 39,674 |
import atexit
def init_context(dev):
"""
Create a context that will be cleaned up properly.
Create a context on the specified device and register its pop()
method with atexit.
Parameters
----------
dev : pycuda.driver.Device
GPU device.
Returns
-------
ctx : pycuda.driver.Context
Created context.
"""
ctx = dev.make_context()
atexit.register(ctx.pop)
return ctx | 878b49c23394a6940255e2382d370314c62119d1 | 39,676 |
import math
def slope_from_angle(angle, inverse=False):
"""Returns the function slope for a given angle in degrees.
Returns ``float("inf")`` if the slope is vertical. When the origin (0,0)
of the matrix is in the upper left corner (as opposed to bottom left), set
`inverse` to True.
"""
if angle % 180 == 0:
a = float("inf")
else:
a = 1 / math.tan( math.radians(angle) )
if inverse:
a *= -1
return a | 0c0bfc38413237a2e9b26febbc92e8d78444601f | 39,677 |
def well(project):
""" A Well subsetted from the loaded Project object "" """
return project[0] | 96c96674489f21f11e012c1c33a49eb2adc32c7f | 39,678 |
import struct
def _ctx_to_int64(ctx):
"""Pack context into int64 in native endian"""
data = struct.pack("=ii", ctx.device_type, ctx.device_id)
return struct.unpack("=q", data)[0] | d5e1564c374a713ce756a5679480ea5ebcd6eed4 | 39,679 |
def every_n_steps(step, n):
"""Step starts from 0."""
return (step + 1) % n == 0 | cac3fd79062ded6013d84dddb8f79919f17e167b | 39,681 |
def my_compute_weights(context):
"""
Compute ordering weights.
"""
# Compute even target weights for our long positions and short positions.
stocks_worst_weight = 1.00 / len(context.stocks_worst)
return stocks_worst_weight | db8b8e61ac2f73acdd231e296d33737ee0302699 | 39,683 |
import sympy
def get_idxs(exprs):
"""
Finds sympy.tensor.indexed.Idx instances and returns them.
"""
idxs = set()
for expr in (exprs):
for i in expr.find(sympy.Idx):
idxs.add(i)
return sorted(idxs, key=str) | 99c8b2cc63d346f8a8994aef12907767b46a5b98 | 39,686 |
def qscleaner(w):
"""
Just remove ? character from a word.
"""
w=w.replace('?','')
return w | 5588866df2ef48b6a4b8f66e25b7d55729205eb4 | 39,687 |
def check_sequence_signing_type(sequence):
"""
Checks that only one of signing_key or self_signed is present in the entry
"""
for entity in sequence:
# Check only if the entity has a defined certificate
if 'certificate' not in entity:
continue
cert = entity['certificate']
# Check the keys are not present at the same time
if 'signing_key' in cert and 'self_signed' in cert:
return {
'result': False,
'message': ("The certificate '%s' can't define signing_key and self_signed at the "
"same time.") % entity['name']
}
return {
'result': True,
'message': "All certificates have a correct private key attribute."
} | 32c3d620c637a378ccab8f427be6e6627d6e9d7c | 39,688 |
def calculateGeneCountProbability(numGenes, pMother, pFather):
"""
determines probability of parents passing on the specified number of mutated genes given their mutated gene counts
"""
p = 1.0
#p(0 genes) = pMother^ & pFather^
if(numGenes == 0):
p = p * (1 - pMother) * (1 - pFather)
#p(1 gene) = pMother^ & pFather || pFather^ & pMother
elif(numGenes == 1):
p = p * (1 - pMother) * pFather + (1 - pFather) * pMother
#p(2 genes) = pMother & pFather
else:
p = p * pMother * pFather
return p | 5850ad85ec3f871050ae128fbd26229c3f589ccb | 39,689 |
def updateWeights(m, weights, D, alpha):
"""
updateWeights(m, weights, D, alpha)
update weights use the learning experience from predict error get from bp
Parameters
----------
m: labels num
weights: weight/theta/θ
D: gradient weight
alpha: learning rate
Returns
-------
"""
for l in range(len(weights)):
weights[l] = weights[l] - alpha*D[l]
return weights | 5ddd00011db787941ff22ff733d5e55956fdea08 | 39,690 |
import struct
def read_subheader(subheader):
"""
Return the subheader as a list
Parameters
----------
subheader (string):
32 character string in the subheader format
Returns
-------
list:
10 item list with the following data members:
[0] subflgs
[1] subexp
[2] subindx
[3] subtime
[4] subnext
[5] subnois
[6] subnpts
[7] subscan
[8] subwlevel
[9] subresv
"""
subhead_str = "<cchfffiif4s"
items = struct.unpack(subhead_str.encode('utf8'), subheader)
item_cpy = [ord(i) for i in items[:2]]
item_cpy += items[2:]
return item_cpy | fe6079457cd5e7e1ef9defbb7470933f6d2bde79 | 39,691 |
import time
def enrich_raw(msg):
"""Enriches raw data with additional information.
Messages come in one at a time form the websocket, so this function
takes and processes individual emssages one at a time.
"""
if isinstance(msg, dict):
msg['receive_timestamp'] = int(time.time() * 10**3)
elif isinstance(msg, list):
msg.append(int(time.time() * 10**3))
else:
raise TypeError(f"enriching raw data of type {type(msg)} not supported")
return msg | abdb6cba84d878f9e945e0b2068f11bc45675acb | 39,692 |
def is_safe_to_wait(transfer, reveal_timeout, block_number):
""" True if there are more than enough blocks to safely settle on chain and
waiting is safe.
"""
# A node may wait for a new balance proof while there are reveal_timeout
# left, at that block and onwards it is not safe to wait.
return block_number < transfer.expiration - reveal_timeout | 5012c8c9ea8809fcf1bf684f265fa81a81890120 | 39,694 |
def mutateScript(context, script, mutator):
"""Apply `mutator` function to every command in the `script` array of
strings. The mutator function is called with `context` and the string to
be mutated and must return the modified string. Sets `context.tmpBase`
to a path unique to every command."""
previous_tmpbase = context.tmpBase
i = 0
mutated_script = []
for line in script:
number = ""
if len(script) > 1:
number = "-%s" % (i,)
i += 1
context.tmpBase = previous_tmpbase + number
mutated_line = mutator(context, line)
mutated_script.append(mutated_line)
return mutated_script | e6aa1d1c021505f67e5025b6100bed43bd03d44c | 39,697 |
from datetime import datetime
def get_date(date_str: str):
"""Get a datetime objects from a DD/MM string."""
try:
date = datetime.strptime(date_str, "%d/%m")
return date.replace(year=2016)
except ValueError:
return None | 4adc6537b256a01c01f2ad4b663652bb8dd5fa11 | 39,699 |
def get_fep_atom_pdbindexes(wtfep):
"""Return list of FEP-atoms
Parse fepfile and pdbfile to find residue numbers of FEP-atoms.
:param wtpdb: wild-type pdbfile
:param wtfep: wild-type fepfile
:type wtpdb: str
:type wtfep: str
:return: list of atoms that are in fepfile
"""
s_is_at = False
indexes = []
for line in open(wtfep):
line = line.replace('#', '!').split('!')[0].strip()
if line == '':
continue
if line.lower() == '[atoms]':
s_is_at = True
continue
elif '[' in line:
s_is_at = False
elif s_is_at:
qnum, pdbnum = line.split()
indexes.append(int(pdbnum))
return indexes | 08dceaa7f6127cea6b9d47f91d252a575ccfd41e | 39,700 |
def file_contains_markers(path, start, end):
"""
:return true if both :start and :end are in the file
"""
with open(path, 'r') as f_in:
contents = f_in.read()
return start in contents and end in contents | 180f61133e1b8e5e028670a63f59b94c58acd69f | 39,701 |
from datetime import datetime
def set_correct_session(request, k):
"""Method to correctly set the 'last_kwargs' session key, so we can use
MonthView filtering.
:param request: request object
:param k: Key for session
:return: int 00, datetime or None
"""
try:
return request.session["last_kwargs"][k]
except KeyError:
value = None
if k == "contract":
value = "00"
elif k == "year":
value = datetime.now().strftime("%Y")
elif k == "month":
value = datetime.now().strftime("%m")
return value | 545f926f199599add01b36b6e8dfa2966e18a8e8 | 39,702 |
def getClosestD(df,d):
"""
donne la ligne de df la plus proche de la distance d. Utilisé pour l'interpolation.
Args:
df: dataframe panda contenant 'distance'
d: integers représentant la distance désiré
Returns:
dataframe de 1 ligne de df
"""
return df.iloc[(df['distance']-d).abs().argmin()] | 55ee4b7852c8aacdaacb6723693c5773a580c4c7 | 39,703 |
def minimal_shear_reinforcement(values, model, concrete_type, b):
"""Calculate the minimal necessery shear reinforcment.
Parameters
----------
model : class
class method that contains the Finite Element Analysis
concrete_type: str
string that define the concrete type, e.g. 'c3037'
Returns
-------
asw_min : float
minimal necessary shear reinforcement
"""
fctm = values.concrete(concrete_type)['fctm']
fyk = values.steel()
rho_w_min = 0.16*fctm/fyk
asw_min = rho_w_min*b*10000
return asw_min | 863436548790fa9b624957842e7c36337600c273 | 39,704 |
def ans(x):
"""(func) -> float
<x> the known derivative to fun()
"""
return 6 * x | d5e35bbfc9b17be8abcf2959be96deda6c5303f8 | 39,705 |
from typing import List
def ingresar_notas(n: int = 10, min_: int = 0, max_: int = 10) -> List[int]:
"""Solicita las notas de 'n' alumnos.
:param n: Cantidad de alumnos.
:param min_: Valor mínimo de la nota.
:param max_: Valor máximo de la nota.
:n type: int
:min_ type: int
:max_ type: int
:return: Lista con las notas de los alumnos.
:rtype: list
"""
notas = []
i = 0
while i < n:
nota = input(f'[{i+1}] Ingrese la nota: ')
try:
nota = int(nota)
except ValueError:
print('La nota debe ser un número.')
continue
if min_ <= nota <= max_:
notas.append(nota)
i += 1
else:
print('Nota fuera de rango')
return notas | 3a95cf8d1273b113ec8fd14c3c16125406a4bb7d | 39,706 |
def bordered(text):
"""
┌─┐ ╔═╗
│ │ ║ ║
└─┘ ╚═╝
"""
lines = text.splitlines()
width = max(len(s) for s in lines)
res = ['┌' + '─' * width + '┐']
for s in lines:
res.append('│' + (s + ' ' * width)[:width] + '│')
res.append('└' + '─' * width + '┘')
return '\n'.join(res) | 410d50989801dc4feea4455854930855292bdbd8 | 39,707 |
import torch
def build_mask_mat_for_batch(seq_length):
"""
Builds a datastructure that is used in the soft-labeling functions to allow the functions
to easily access any mask that will zero our tokens not within a certain (i, j) range.
Ex: i = 1, j = 3, seq_length = 5, mask_mat[i][j] = [0,1,1,1,0], where mask_mat is output of this function
Arguments:
seq_length (int) : length of a sequence(s)
Returns:
tensor : as described above, will be of dimension seq_length x seq_length x seq_length
"""
mask_mat = torch.zeros((seq_length, seq_length, seq_length))
for i in range(seq_length):
for j in range(seq_length):
mask_mat[i,j,i:j+1] = 1
mask_mat = mask_mat.float()
return mask_mat | 4a16787bcce6f2a63bca9659bedb0f1eccd57706 | 39,708 |
def valida_cpf_v1(cpf):
"""função para validar cpf, irá verificar os ulitmos dois digitos.
para saber se estão corretos
Args:
cpf (string): numeros do cpf
Returns:
bolean : se é valido ou não
"""
verifica_cpf = cpf
multiplicador = 10
controle = 0
digitos_verificadores = []
while controle != 2:
valor = 0 # valor do incremento.
for i in range((9+controle)):
valor += int(verifica_cpf[i])*(multiplicador-i)
resultado = valor*10%11
if resultado == 10: #caso seja 10 o resto no cpf é 0 o digito
resultado = 0
controle += 1 #incrementa para na proxima sair do loop
multiplicador += 1 #incrementa para pegar o primeiro digito verificador na multiplicação.
digitos_verificadores.append(resultado)
if int(cpf[-2]) == digitos_verificadores[0] and int(cpf[-1]) == digitos_verificadores[1]:#foi transformado os para interiros
return True #pois estava dando erro comparando como string
else:
return False | 5ae22e39365b16acfe872b0f8abb83d97f2167c1 | 39,709 |
from typing import List
def scalar_multiply(matrix: List[list], n: int) -> List[list]:
"""
>>> scalar_multiply([[1,2],[3,4]],5)
[[5, 10], [15, 20]]
>>> scalar_multiply([[1.4,2.3],[3,4]],5)
[[7.0, 11.5], [15, 20]]
"""
return [[x * n for x in row] for row in matrix] | f3bd103256384328e44e2dcb0e7c05590d54a476 | 39,710 |
from typing import List
import setuptools
def get_packages() -> List[str]:
"""Returns subpackages to include in this package."""
base_package = "gazoo_device.tests"
sub_packages = [base_package + "." + sub_package
for sub_package in setuptools.find_packages()]
return [base_package] + sub_packages | 140b36dba0b937f01c739b0574b83546e4d50694 | 39,712 |
def __key2str(key):
"""
Take a key and return in string format.
"""
if type(key) is tuple:
return " ".join(key)
else:
return key | 34bac54532980870c0d461c79eb6d4b162706050 | 39,713 |
def minOrMax():
"""
[which type of proble is it]
Returns:
[int] -- [1 - <=, 2 - >=]
"""
op = 0
while(op != 1 and op != 2):
print("O PROBLEMA É DE:")
print("1. Maximizacao")
print("2. Minimizacao")
op = int(input())
if op != 1 and op != 2:
print("Opcao invalida")
return op | 84d0d637a624a295279151e3a9364e3a7ea8e1a4 | 39,714 |
import requests
def get_lihkg_response(resp):
"""
get_lihkg_response(resp)
Obtain the data of the response object.
Return:
-------
A dictionary.
"""
response = dict()
if isinstance(resp, requests.models.Response):
if resp.status_code == 200:
response = resp.json()
if response.get('success', 0) == 1:
response = response.get('response', dict())
return response
else:
raise TypeError('resp must be a \'requests.models.Response\' object.') | 46f3677fd42b5eaf7779cdb3daa70f3f616ebb1b | 39,715 |
def api_v1_index():
"""
API index route
"""
return {
'recipes': '/api/v1/recipes',
'recipe': '/api/v1/recipe/:id'
} | a3cfd5472fb7e99ad78d7864e66de3b932d3c65f | 39,718 |
def ensure_traversal_connected(graph, path):
"""
Validate a traversal to make sure that it covers all unpinned
nodes. This may not be the case if there are two disconnected
components in the query graph.
"""
graph_nodes = set(graph["nodes"].keys())
pinned_nodes = {
key
for key, value in graph["nodes"].items()
if value.get("id", None) is not None
}
path_nodes = {n for n in path if n in graph["nodes"].keys()}
# path_nodes + pinned_nodes must cover all nodes in the graph
return pinned_nodes | path_nodes == graph_nodes | c4f15e5425f6df528fe9704b89d4f0db1a6ce932 | 39,719 |
def get_resources(connection):
""" Do an RTSP-DESCRIBE request, then parse out available resources from the response """
resp = connection.describe(verbose=False).split('\r\n')
resources = [x.replace('a=control:','') for x in resp if (x.find('control:') != -1 and x[-1] != '*' )]
return resources | 7ba421b9ac930d08e035b5f221bfc38920da3e8a | 39,720 |
import socket
def check_connection(addr: str, port: int, *, timeout: float = 0.1) -> bool:
"""
Attempt to make a TCP connection. Return if a connection was made in
less than ``timeout`` seconds. Return True if a connection is made within
the timeout.
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.settimeout(float(timeout))
try:
s.connect((addr, port))
except Exception as error:
return False
return True | 1b378b394df2c6433b3bbdd87f5038fdcc919678 | 39,721 |
def status_code() -> int:
"""return the status code to check and test"""
return 328 | 1ec15003c2f8c574ce9bdf7bcc6d2a8d83c4f577 | 39,722 |
def write_sqlcreate(data, name):
"""SQL create statement"""
row = next(data)
output = "CREATE TABLE %s (" % name
output += ",".join(["`%s`" % c for c in row._fields])
output += ");\n\n"
return output | be5668be2e3e85363e3424a7e9e5d3382065dbac | 39,724 |
from typing import List
from typing import Any
from typing import Iterable
def as_list(item_or_sequence) -> List[Any]:
"""Turns an arbitrary sequence or a single item into a list. In case of
a single item, the list contains this element as its sole item."""
if isinstance(item_or_sequence, Iterable):
return list(item_or_sequence)
return [item_or_sequence] | 2edf2d9adb03c0efb16e59a507a918149fdae524 | 39,727 |
def exec_post_hooks(hooks, cli):
"""
Post hooks.
"""
# For 1 hook
try:
if len(hooks) == 2 and callable(hooks[1]):
hstrg = hooks[0]
if hstrg and (not hstrg in cli):
return True
hfunc = hooks[1]
hpara = hooks[2:]
try:
hfunc(*hpara)
except Exception as e:
print('Post-Hook `{}` crashed! error: `{}`!'.format(hfunc, e))
return False
return True
except Exception:
pass
# For a list of hooks ...
for hook in hooks:
if isinstance(hook, list) or isinstance(hook, tuple):
hstrg = hook[0]
if hstrg and (not hstrg in cli):
continue
hfunc = hook[1]
hpara = hook[2:]
try:
hfunc(*hpara)
except Exception as e:
print('Post-Hook `{}` crashed! error: `{}`!'.format(hfunc, e))
continue
else:
print('Invalid Post-Hook `{}`! Cannot execute!'.format(hook))
continue
return True | c37ae1bc8f15f982627bf69424ba041a36a5af08 | 39,728 |
import json
def LoadPropDatabase(filename):
"""Loads a propellor database from a .json file."""
with open(filename, 'r') as f:
prop_data = json.loads(f.read())
return prop_data | d5eb392e7c2c8258d55fffefa69876e519e29ac5 | 39,729 |
import time
def elapsed_time(t0):
"""Given a start time (time.time() object), computes and returns elapsed
time as a string.
Keyword arguments
=================
:param t0: output of time.time()
Start time to compute elapsed time from (no default)
:return: str
Elapsed time.
"""
t = time.time() - t0
if t < 60:
t = "{:2.1f} sec.".format(t)
elif 60 < t < 3600:
t = "{:2.1f} min.".format(t / 60)
else:
t = "{:2.1f} hr.".format(t / 3600)
return t | 9ad33267df8a89eaab45b7e274656fbb23e29baa | 39,730 |
def format_attribute(name, val, lng):
"""Format a string for displaying the name and value of an attribute.
Args:
name: name of the attribute to display.
val: value of the attribute to display.
lng: length of the string to be returned, in number of
characters. Blank space will be padded with '-' characters.
Returns: a string.
"""
name += ' '
if val is not None:
val = ' ' + val
lng -= len(val)
return '{:-<{pad}.{trunc}}{}'.format(
name, val, pad=lng, trunc=lng)
else:
return '{:<{pad}.{trunc}}'.format(name, pad=lng, trunc=lng) | 72cf9f0a5499e9e219292eac5e30752ebc0477fa | 39,731 |
from typing import Union
def seconds_to_datetime(seconds: Union[float, int]) -> str:
"""Convert seconds to datetime string."""
# NOTE(xames3): Inspired from `timedelta` class of datetime module.
mm, ss = divmod(int(seconds), 60)
hh, mm = divmod(mm, 60)
dd, hh = divmod(hh, 24)
return f"{dd:02d}:{hh:02d}:{mm:02d}:{ss:02d}" | 4bd16548386737d2408d5a9e732ca4ce4842698c | 39,733 |
import os
def find_images(dir_path, bands):
"""
Load a mult band into geoarray.
Parameters:
dir_path (str): path to directory containing images.
bands (lst): list of bands.
Returns:
band_list (lst): list of paths to images.
"""
###Create the empty list
band_list = list()
listdir = os.listdir(dir_path)
for band in bands:
for filename in listdir:
if band in filename:
band_list.append(os.path.join(dir_path,filename))
return band_list | b02411f9aa210877a2b98ed7a0350af49937d0f6 | 39,734 |
def rssError(yArr, yHatArr):
"""
误差大小评价函数
Parameters:
yArr - 真实数据
yHatArr - 预测数据
Returns:
误差大小
"""
return ((yArr - yHatArr) **2).sum() | 7b52bfee44dd2aeb4b794fa98e67543b95c77c32 | 39,736 |
import os
def get_test_profile_name():
""" Read name of test profile from environment variable.
Reads name of existing test profile 'AIIDA_TEST_PROFILE' environment variable.
If specified, this profile is used for running the tests (instead of setting up a temporary profile).
:returns: content of environment variable or `None`
"""
return os.environ.get('AIIDA_TEST_PROFILE', None) | b71fa8b81b809de437d44f5931d3beb4c95810d3 | 39,737 |
def unpack_and_add(l, c):
"""Convenience function to allow me to add to an existing list
without altering that list."""
t = [a for a in l]
t.append(c)
return(t) | 0e40a59bb39d855bf09edb65990a91979d23da61 | 39,739 |
def reg_n_correct(prediction, y, significance=None):
"""Calculates the number of correct predictions made by a conformal
regression model.
"""
if significance is not None:
idx = int(significance * 100 - 1)
prediction = prediction[:, :, idx]
low = y >= prediction[:, 0]
high = y <= prediction[:, 1]
correct = low * high
return y[correct].size | 190eec0754ef61bf2bac3b93d184233a6e2c3316 | 39,740 |
def LockReadProcessWriteUnlock(client, file, id, func):
"""
This method will lock a file, read the contents and pass it to
the user defined function. This function should return the
return item and the new contents of the file. The contents is
writing and the file unlocked.
"""
client.LockFile(file, id)
try:
lines = client.ReadFile(file)
return_item = None
if lines:
return_item, lines = func(lines)
client.WriteFile(lines, file)
finally:
client.UnlockFile(file, id)
return return_item | 5cc2d8418fa23b900d2400e50ad6f2e257c275a4 | 39,741 |
def rename_tax_rows(df, index, tax_names, suffixes=["current", "prior", "total"]):
"""Internal function that loops over consecutive rows and adds the name."""
for tax_name in tax_names:
for offset in [0, 1, 2]:
suffix = suffixes[offset]
df.loc[index + offset, 0] = f"{tax_name}_{suffix}"
index = index + 3
return index | 46eb75f216439c80fc8b5c4bb3fa73c703b8f16e | 39,743 |
def color_from_code(code):
"""Generate a color based on a simple code
Args:
code (int): an integer going from 1 to 999
Returns:
[tuple]: the rgb color code
"""
if code == 0:
return (255, 255, 255)
assert code < 1000
color = [0, 0, 0]
for i, div in enumerate([100, 10, 1]):
digit = (code // div) % 10
color[i] = (255 // 9) * digit
return color | bf329b2dd4627f92ee37e2e11287ca9121718f67 | 39,744 |
def compareEvents(test, actualEvents, expectedEvents):
"""
Compare two sequences of log events, examining only the the keys which are
present in both.
@param test: a test case doing the comparison
@type test: L{unittest.TestCase}
@param actualEvents: A list of log events that were emitted by a logger.
@type actualEvents: L{list} of L{dict}
@param expectedEvents: A list of log events that were expected by a test.
@type expected: L{list} of L{dict}
"""
if len(actualEvents) != len(expectedEvents):
test.assertEqual(actualEvents, expectedEvents)
allMergedKeys = set()
for event in expectedEvents:
allMergedKeys |= set(event.keys())
def simplify(event):
copy = event.copy()
for key in event.keys():
if key not in allMergedKeys:
copy.pop(key)
return copy
simplifiedActual = [simplify(event) for event in actualEvents]
test.assertEqual(simplifiedActual, expectedEvents) | 53216d1c77cf8d2e104197ee5f7fb32963505433 | 39,745 |
def arg_name(arg_index=lambda ctx: ctx["operands"][0].value):
"""
Returns a lambda that gets the name of the argument at the given index.
The index defaults to the first operand's value.
"""
return lambda ctx: (ctx["arg_names"][arg_index(ctx)]
if arg_index(ctx) < len(ctx["arg_names"])
else "var%s" % arg_index(ctx)) | 7b8bb99f6bfe8860f92a66f62c702a87b2f46322 | 39,746 |
def bool(anon, obj, field, val):
"""
Returns a random boolean value (True/False)
"""
return anon.faker.bool(field=field) | c667627c02f295affc0c67b3db12812591cddfa9 | 39,748 |
def delete_cmd_prefix(arg: str):
"""Для быстрого ввода аргументов могут использоваться команды"""
if arg.startswith("/"):
arg = arg[1:]
return arg | a724db021a3e89426ba4f43561f91521fa4447aa | 39,749 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.