content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import multiprocessing
def get_result(num):
"""Trivial function used in multiprocessing example"""
process_name = multiprocessing.current_process().name
print("Current process:", process_name, ", Input Number:", num)
return 10*num | 5e2cd23e30cacddf2e0fe279111c8b26697a776d | 39,064 |
def get_inputs():
"""Prompts the user for one or more types to check.
Returns:
the names of the two types given
"""
type1= input("What pokemon type do you want to know about?: ").lower()
type2= input("What is the secondary type? (If none, say none, or press enter to skip): ").lower()
return type1, type2 | 965a770afb74c3df53ea5607e5afdb9773bb9c67 | 39,066 |
def format_includes(includes):
"""
Format includes for the api query (to {'include' : <foo>,<bar>,<bat>})
:param includes: str or list: can be None, related resources to include
:return: dict: the formatted includes
"""
result = None
if isinstance(includes, str):
result = includes
elif isinstance(includes, list):
result = ','.join(includes)
return {'include': result} if result is not None else {} | 9f15ac9b767b6612794bec7b14427b6f90d4a734 | 39,067 |
import base64
import gzip
import json
def json_to_comp_str(jsonobj):
"""Take a json object and returns compressed form as string."""
return base64.b64encode(gzip.compress(bytes(json.dumps(jsonobj), "utf-8"))).decode() | 0b258512acbd663b346abb707ff11af953227b9a | 39,068 |
import numpy
def fixminus(x):
"""
Replace nonphysical, negative values in array *x* with the corresponding
positive numerical values. Returns modified array. Does not
touch original array.
"""
i=numpy.where(x<0)
z=x.copy()
z[i]=numpy.abs(x[i])
return z | ce0729ba6882737c74808b1e8b21e8984d086d7f | 39,070 |
def top_k_predictions(model, probs, k: int):
""" Returns the top `k` most probable classes from our model
After training a fastai Learner on a multi-label classification problem,
return the label and probabilities associated with the top `k` most
probable classes.
Args:
model [Learner]: Fastai Learner trained to do multi-label classification
probs [Tensor]: Tensor of class probabilities
k [int]: Number of classes/probabilities to return
Returns:
List [str]: Top k classes
List [tensor]: Probabilities associated with the top k classess
"""
# Determine all the potential classes our model will choose from
classes = model.data.classes
num_classes = len(classes)
# Limit k to the total number of classes
if k > num_classes:
k = num_classes
# Get the indices of the `k` classes with highest probabilities
top_k_indices = probs.argsort(descending=True)[:k]
labels = []
probabilities = []
for i in top_k_indices:
labels.append(classes[i])
probabilities.append(probs[i])
return labels, probabilities | abedd8ac1b5d90f4485dda8d7c0bae966d57d328 | 39,071 |
def sparse_batch_mm(m1, m2):
"""
https://github.com/pytorch/pytorch/issues/14489
m1: sparse matrix of size N x M
m2: dense matrix of size B x M x K
returns m1@m2 matrix of size B x N x K
"""
batch_size = m2.shape[0]
# stack m2 into columns: (B x N x K) -> (N, B, K) -> (N, B * K)
m2_stack = m2.transpose(0, 1).reshape(m1.shape[1], -1)
result = m1.mm(m2_stack).reshape(m1.shape[0], batch_size, -1) \
.transpose(1, 0)
return result | 6fe7a5f4b407d27b71b646872d43a78154c594e8 | 39,073 |
def asum(a, b):
"""
Add a and b.
:param a: number a (int)
:param b: number b (int)
:return:
"""
return a+b | 70b8c0f6b07ae34b1cb27db2d44c534f4b26dc3b | 39,074 |
def summarizekeys(d, counts={}, base=''):
"""Summarizes keys in the given dict, recursively.
This means counting how many fields exist at each level.
Returns keys of the form ``key0.key1`` and values of ints.
Checks if `d` is instance of dict before doing anything.
"""
if not isinstance(d, dict): return counts
for k, v in d.items():
k = '.'.join((base, k)) if base else k
if k not in counts:
counts[k] = 0
counts[k] += 1
summarizekeys(v, counts=counts, base=k)
return counts | 714fc56cba1d63178902987623aa0a246c20aa6d | 39,075 |
def set_piece(board, index, val):
"""Sets the nth piece of the provided board"""
negative = board < 0
board = board if board >= 0 else -board
factor = 10 ** index
chopped = board // factor
leftover = board % factor
old_piece = chopped % 10
chopped -= old_piece
chopped += val
modified_board = (chopped * factor) + leftover
if negative: modified_board *= -1
return modified_board | 7ea50da2acda13dee752bcb10731151c9378baba | 39,076 |
import hashlib
def hash_password(pw_in):
"""Take password as input, hash it in SHA-1, and split it for use later on"""
hash = hashlib.sha1()
hash.update(str.encode(pw_in))
digest = hash.hexdigest()
return [digest[:5], digest[5:]] | f3950dcda6053f5be3aaa05e11fb8b147abf5eb5 | 39,077 |
def NumGems(gems):
"""Returns the number of gems in the defaultdict(int)."""
return sum(gems.values()) | 60afed06af7c45daa1f8ef4f1097658cfc31e74d | 39,078 |
def harvestMethods(cls):
"""
Obtain the Monte methods of a class.
The class must have already been harvested.
NOT_RPYTHON
"""
d = {}
# Walk the MRO and harvest Monte methods. The repacker has already placed
# them in the correct location.
for c in reversed(cls.__mro__):
if hasattr(c, "_monteMethods_"):
d.update(c._monteMethods_)
return d | 3e6fe2b144555d6c492ee3be6f1bc228cd19dbae | 39,079 |
import torch
import numpy
def update(output, batch_size, beam_size, device, **kwargs):
"""
Update the inputs for next inference.
"""
num_layer = kwargs['num_layer']
last_state = (torch.from_numpy(output[0]).to(device)
if isinstance(output[0], numpy.ndarray) else output[0].clone().detach().cpu())
input_ids = last_state.view(batch_size * beam_size, -1).to(device)
input_unfinished_sents_id = -2
beam_select_idx = (torch.from_numpy(output[input_unfinished_sents_id - 2]).to(device) if isinstance(
output[input_unfinished_sents_id - 2], numpy.ndarray) else output[input_unfinished_sents_id - 2].clone().detach().to(device))
input_log_probs = (torch.from_numpy(output[input_unfinished_sents_id - 1]).to(device) if isinstance(
output[input_unfinished_sents_id - 1], numpy.ndarray) else output[input_unfinished_sents_id - 1].clone().detach().to(device))
input_unfinished_sents = (torch.from_numpy(output[input_unfinished_sents_id]).to(device) if isinstance(
output[input_unfinished_sents_id], numpy.ndarray) else
output[input_unfinished_sents_id].clone().detach().to(device))
prev_step_scores = (torch.from_numpy(output[-1]).to(device)
if isinstance(output[-1], numpy.ndarray) else output[-1].clone().detach().to(device))
past = []
if isinstance(output[1], tuple): # past in torch output is tuple
past = list(output[1])
else:
for i in range(num_layer):
past_i = (torch.from_numpy(output[i + 1])
if isinstance(output[i + 1], numpy.ndarray) else output[i + 1].clone().detach())
past.append(past_i.to(device))
inputs = {
'input_ids': input_ids,
'beam_select_idx': beam_select_idx,
'input_log_probs': input_log_probs,
'input_unfinished_sents': input_unfinished_sents,
'prev_step_scores': prev_step_scores,
}
ort_inputs = {
'input_ids': numpy.ascontiguousarray(input_ids.cpu().numpy()),
'beam_select_idx': numpy.ascontiguousarray(beam_select_idx.cpu().numpy()),
'input_log_probs': numpy.ascontiguousarray(input_log_probs.cpu().numpy()),
'input_unfinished_sents': numpy.ascontiguousarray(input_unfinished_sents.cpu().numpy()),
'prev_step_scores': numpy.ascontiguousarray(prev_step_scores.cpu().numpy()),
}
for i, past_i in enumerate(past):
ort_inputs[f'past_{i}'] = numpy.ascontiguousarray(past_i.cpu().numpy())
return inputs, ort_inputs, past | 1d03b449f582a7de4c82a320978e6840aee1ef5f | 39,080 |
def tree_attribute(identifier):
"""
Predicate that returns True for custom attributes added to AttrTrees
that are not methods, properties or internal attributes.
These custom attributes start with a capitalized character when
applicable (not applicable to underscore or certain unicode characters)
"""
if identifier[0].upper().isupper() is False and identifier[0] != '_':
return True
else:
return identifier[0].isupper() | a2e55597dc6df6a897f87a819e6d1dce2580923f | 39,081 |
def get_sim_name(obj):
"""
Get an in-simulation object name: if ``obj`` has attribute
``__sim_name__``, it will be returned, otherwise ``__name__``
standard attribute.
Args:
obj: an object to get name of
Returns: object name
"""
if hasattr(obj, '__sim_name__') and obj.__sim_name__ is not None:
return obj.__sim_name__
else:
return obj.__name__ | 80c7b29047f09d5ca1f3cdb2d326d1f2e75d996b | 39,082 |
import json
def build_payload(call):
"""
Used to build the payloads we will run against the cluster
"""
payload = json.dumps({"method": call, "params": {"force": True}, "id": 1})
return payload | 033c10ba4f4eba958c1a9208f1b6e12c823ff080 | 39,083 |
def scoreSorter(order):
"""Sort by score value
"""
if order == 'asc':
return "_score"
return {"_score": {"order": "asc"}} | 142e532bd744d7bbcd6568c8e027a441ab7a4e04 | 39,084 |
import os
def find_all_ckpts(log_ckpt_path, only_filename=True) -> list:
"""
ๅจๆๅฎ่ณๆๅคพไธญ๏ผๆพๅฐๅ
จ้จๅฒๅญ็ๆฌ้่ทฏๅพ (*ไพ้ ๅฏๆชๅไพๅคๆท)
้
ๅ Keras ็ callbacks ๅฝๆธ ModelCheckpoint ไฝฟ็จ
"""
checkpoints = next(os.walk(log_ckpt_path))[2]
if only_filename:
return checkpoints
else:
ckpts_list = []
for ckpt in checkpoints:
ext = os.path.splitext(ckpt)[-1]
if ext == ".h5" or ext == ".h5df":
ckpts_list.append(os.path.join(log_ckpt_path,ckpt))
print("Find %d checkpoint"%len(ckpts_list))
return ckpts_list | 049399f95ea9c7a6f4da48a3244522dfd28ba596 | 39,086 |
def braced(s):
"""Wrap the given string in braces, which is awkward with str.format"""
return '{' + s + '}' | 360bfd93ab70ae8393563e25de3d437e1aebfb85 | 39,087 |
from bs4 import BeautifulSoup
def query_cct(provided_ioc, session):
"""Search cybercrime-tracker.net for specific information about panels."""
api = "http://cybercrime-tracker.net/index.php?search={}&s=0&m=10000"
vt_latest = "https://www.virustotal.com/latest-scan/http://{}"
vt_ip = "https://www.virustotal.com/en/ip-address/{}/information/"
base_url = api.format(provided_ioc)
resp = session.get(base_url, timeout=180)
cct_dicts = []
if resp.status_code == 200:
soup = BeautifulSoup(resp.content, "html.parser")
table = soup.findAll("table", attrs={"class": "ExploitTable"})[0]
rows = table.find_all(["tr"])[1:]
if len(rows) == 0:
cct_dicts.append({"no data": provided_ioc})
return cct_dicts
for row in rows:
cells = row.find_all("td", limit=5)
if len(cells) > 0:
tmp = {
"date": cells[0].text,
"url": cells[1].text,
"ip": cells[2].text,
"type": cells[3].text,
"vt latest scan": vt_latest.format(cells[1].text),
"vt ip info": None
}
if tmp["ip"] != "":
tmp["vt ip info"] = vt_ip.format(tmp["ip"])
if tmp not in cct_dicts:
cct_dicts.append(tmp)
else:
cct_dicts.append({"no data": provided_ioc})
return cct_dicts | fa6c2f2e7601d40b264c8f39ee4a0eb0cfb711f7 | 39,088 |
import os
def remove_extensions(target_name):
"""Removes file extensions from given name"""
target_name = os.path.basename(target_name)
while "." in target_name:
target_name = os.path.splitext(target_name)[0]
return target_name | ac17ef63d6366b73897f6d6d50f84cf96dc76621 | 39,091 |
import hashlib
def configsignature(repo, includetemp=True):
"""Obtain the signature string for the current sparse configuration.
This is used to construct a cache key for matchers.
"""
cache = repo._sparsesignaturecache
signature = cache.get('signature')
if includetemp:
tempsignature = cache.get('tempsignature')
else:
tempsignature = '0'
if signature is None or (includetemp and tempsignature is None):
signature = hashlib.sha1(repo.vfs.tryread('sparse')).hexdigest()
cache['signature'] = signature
if includetemp:
raw = repo.vfs.tryread('tempsparse')
tempsignature = hashlib.sha1(raw).hexdigest()
cache['tempsignature'] = tempsignature
return '%s %s' % (signature, tempsignature) | 46b85d4ab830a7b71fdd414d09fb972c5942bca9 | 39,092 |
def clean (str_in: str, keep_char = '', remove_char = '', command_name='()', str_name='') -> str:
"""clean(str)
- Removes '_' characters
- Raises exceptions on illegal characters
"""
str_out = ''
for nibble in str_in:
if nibble in keep_char:
str_out += nibble
elif nibble in remove_char:
pass
else:
if str_name == '':
raise TypeError(F"{command_name}: Unknown nibble: {nibble}")
else:
raise TypeError(
F"{command_name}: Unknown nibble in {str_name}: {nibble}")
return str_out | 82e2bb2b7fbdd56bc7421167099553d70eb98c33 | 39,094 |
import math
def __get_views(views):
"""Convert viewcount to human readable format"""
if int(views) == 0:
return '0'
millnames = ['', 'k', 'M', 'Billion', 'Trillion']
millidx = max(0, min(len(millnames) - 1,
int(math.floor(math.log10(abs(views)) / 3.0))))
return '%.0f%s' % (views / 10 ** (3 * millidx), millnames[millidx]) | 6229801d09a703f860a1ba5e285d76b199cfdffb | 39,096 |
def EXACT(string1, string2):
"""
Tests whether two strings are identical. Same as `string2 == string2`.
>>> EXACT("word", "word")
True
>>> EXACT("Word", "word")
False
>>> EXACT("w ord", "word")
False
"""
return string1 == string2 | d6feb4c40bc93fa1ec5426f394a47c5d42c21dfd | 39,097 |
import json
def putTableAll(obj):
"""
Returns table as string showing standings of league with all data
Parameters:
-----------
obj: dict
JSON object of league standings obtained from API/cache
Returns:
--------
str
Standings as a text code block (to get monospaced text) showing all data
"""
try:
assert(type(obj) == dict)
fin = open('source/teamcodes.json', 'r')
mapper = json.load(fin)
str_re = '```\nLEAGUE: ' + str(obj['competition']['name']) +\
' ' * (45 - 2 - 8 - 10 - len(str(obj['competition']['name']))) +\
'MATCHDAY: ' + str(obj['season']['currentMatchday']) + '\n'
str_re += 'โโโโโโคโโโโโโโคโโโโโคโโโโโคโโโโโคโโโโโคโโโโโโคโโโโโโ\n'
str_re += 'โ SN โ TEAM โ M โ W โ D โ L โ PTS โ GD โ\n'
str_re += 'โ โโโโโชโโโโโโโชโโโโโชโโโโโชโโโโโชโโโโโชโโโโโโชโโโโโโฃ\n'
for team in obj['standings'][0]['table']:
text = 'โ %-2d โ %-4s โ %-2d โ %-2d โ %-2d โ %-2d โ %-3d โ %+-3d โ\n'\
% (team['position'], mapper.get(team['team']['name'], team['team']['name'][:4])[:4], team['playedGames'], team['won'],
team['draw'], team['lost'], team['points'], team['goalDifference'])
str_re += text
str_re += 'โโโโโโงโโโโโโโงโโโโโงโโโโโงโโโโโงโโโโโงโโโโโโงโโโโโโ```'
fin.close()
return str_re
except AssertionError:
return 'Error!' | ae46f33be6200363ab2876fd4d95a1217d719305 | 39,098 |
def aperture(n, xs):
"""Returns a new list, composed of n-tuples of consecutive elements. If n is
greater than the length of the list, an empty list is returned.
Acts as a transducer if a transformer is given in list position"""
return [xs[i : i + n] for i in range(0, int(len(xs) / n) * n)] | 295bafa05746ad8150b3b48c57d2a19847d7bdab | 39,099 |
import re
def proto_pretty(line):
"""fixes up inconsistent spaces in C function prototypes"""
line = re.sub(r',', ' , ', line)
line = re.sub(r'\(', ' ( ', line)
line = re.sub(r'\)', ' ) ', line)
line = re.sub(r'\*', ' * ', line)
line = re.sub(r'\s+', ' ', line)
line = re.sub(r'\(\s+\*', '(*', line)
line = re.sub(r' ,', ',', line)
line = re.sub(r' \(', '(', line)
line = re.sub(r'\) ', ')', line)
line = re.sub(r' \* ', ' *', line)
line = re.sub('^\s*', '', line)
return line | 8c80ec721b1761162e618c76ed75d697141c3965 | 39,100 |
def _fully_qualified_name(t: type) -> str:
"""Retrieves the fully qualified name of the provided type.
Args:
t (type): The type whose fully qualified name shall be retrieved.
Returns:
str: The fully qualified name of ``t``.
Raises:
TypeError: If ``t`` is not an instance of ``type``.
"""
if not isinstance(t, type):
raise TypeError(
"The parameter <t> has to be a type, but is an instance of {}!".format(_fully_qualified_name(type(t)))
)
prefix = ""
if hasattr(t, "__module__"):
prefix = t.__module__ + "."
return prefix + t.__name__ | 9a3a6795231b36184ce175c37224aa27e7c6b665 | 39,101 |
def compact_regions(regions):
"""Returns the list of regions in the compact value used for updates """
out_regions = []
for region in regions:
new_region = []
new_region.append(region.get("label"))
new_region.append(region.get("xmin"))
new_region.append(region.get("ymin"))
new_region.append(region.get("xmax"))
new_region.append(region.get("ymax"))
out_regions.append(new_region)
return out_regions | dbaf3d7ad895e7b6c98b215a6185bb41ec74116c | 39,105 |
import argparse
def collect_args() -> argparse.Namespace:
"""Set command line arguments"""
parser = argparse.ArgumentParser()
parser.add_argument('--config', help="Config file", type=str)
args = parser.parse_args()
return args | dabea80c77d4844843a4dd444b55281400f57caa | 39,106 |
def version_string(version):
"""Returns a string from version tuple or list"""
if isinstance(version, str):
return version
return ".".join([f"{v}" for v in version]) | 8688b70a940febebeb16ee2e1f389968aea5a6ea | 39,108 |
import subprocess
def style_transfer(path_name,style,gpu_ids='1'):
"""
@params:
path_name: str
style: str
@return:
p: int
"""
cmd1='cd ./DoFace/pytorchCycleGANandPix2Pix/'
cmd2=f'python test.py --dataroot ../../tmp/{path_name}/testA --results_dir ../../tmp/{path_name}/result --name raw2{style} --model test --no_dropout --gpu_ids {gpu_ids}'
cmd=cmd1+' && '+cmd2
p=subprocess.call(cmd,shell=True)
return p | 96fa0f96718702c4aca54a766d73184fcbb25218 | 39,110 |
def spritecollideany(sprite, group, collided=None):
"""finds any sprites in a group that collide with the given sprite
pygame.sprite.spritecollideany(sprite, group): return sprite
Given a sprite and a group of sprites, this will return return any single
sprite that collides with with the given sprite. If there are no
collisions, then this returns None.
If you don't need all the features of the spritecollide function, this
function will be a bit quicker.
Collided is a callback function used to calculate if two sprites are
colliding. It should take two sprites as values and return a bool value
indicating if they are colliding. If collided is not passed, then all
sprites must have a "rect" value, which is a rectangle of the sprite area,
which will be used to calculate the collision.
"""
if collided:
for s in group:
if collided(sprite, s):
return s
else:
# Special case old behaviour for speed.
spritecollide = sprite.rect.colliderect
for s in group:
if spritecollide(s.rect):
return s
return None | 4f80e045cec8e8641bc74de15f82adc65394da1a | 39,113 |
import re
def fixture_readme_code_result(readme_content: str) -> str:
"""Get the README.md example result content.
:param readme_content: plain text content of README.md
:return: example result
"""
match = re.search(r"```text([^`]*)```", readme_content, flags=re.DOTALL)
assert match
return match.group(1).lstrip() | ca8fb13bb5013409c91d0ab979848f4745ae6246 | 39,115 |
def divide_list(list, perc=0.5):
"""
Divide a list into two new lists. perc is the first list's share. If perc=0.6 then the first new list will have 60 percent of the original list.
example : f,s = divide_list([1,2,3,4,5,6,7], perc=0.7)
"""
origLen = len(list)
lim = int(perc*origLen)
firstList = list[:lim]
secondList = list[lim:]
return firstList, secondList | afe1068a0cdc4f3125df26b821527838d5ef189e | 39,116 |
def rotate(message):
"""
Returns the encoded or decoded message.
"""
rot_message = ''
for letter in range(len(message)):
change = 13
# ord() returns the number representing a specific character's unicode
value = ord(message[letter])
if value < 65 or (value > 90 and value < 97) or value > 122:
rot_message += chr(value)
elif value + change > 122: # 122 is the last lowercase 'z' in ASCII
change -= (122 - value)
change = change % 26 # 26 letters in the alphabet
# 96 is the last character before 'a' in ASCII
# chr() returns the character that represents
# the specified unicode number
rot_message += chr(96 + change)
elif value > 64 and value < 91:
if value + change > 90:
rot_message += chr(value - change)
else:
rot_message += chr(value + change)
else:
rot_message += chr(value + change)
return rot_message | 5d780cac4676cf00f6fbe775d4af5e326798e17b | 39,117 |
def _arg_names(f):
"""Return the argument names of a function."""
return f.func_code.co_varnames[:f.func_code.co_argcount] | 446630b93be6c250b126165001efc854f944fd80 | 39,118 |
import hashlib
def process_password(cont):
""" Password processing """
return hashlib.md5(bytes(cont, 'utf-8')).hexdigest() | c2118803e72105b6a40b6d10dfccd0fe8c8751af | 39,120 |
def collate_eigenvalues(input_data, trivial_modes=True):
""" Creates dataframe with free energy from diffrent structural forms.
{0:apo , 1:holo1, 2:holo2}
"""
if trivial_modes:
output_data = input_data[0]['mode_number'][input_data[0]['mode_number'] > 6].to_frame()
else:
output_data = input_data[0]['mode_number'].to_frame()
for form_idx, df in input_data.items():
output_data['eigenvalue_{}'.format(form_idx)] = df['eigenvalue'][df['mode_number'].isin(output_data['mode_number'])]
if trivial_modes:
output_data['mode_number'] = range(1, output_data.shape[0]+1)
output_data.reset_index(drop=True, inplace=True)
return output_data | 5c935d3b143d8734c01c80ba8d13fed504b261a0 | 39,121 |
def nuclear_charge(sym):
""" nuclear charge
"""
return {'H': 1, 'HE': 2,
'C': 6,
'N': 7,
'O': 8, 'S': 16,
'F': 9, 'CL': 17,
'NE': 10, 'AR': 18}[sym.upper()] | 4ddb779594bf114fbe95660a3a35e0ddbc500723 | 39,122 |
import argparse
def get_config_from_cmd(key, default=None, key_type=None, kvdict=None, convert_to_list=False):
"""
It will return default value or value in the argument commend
if default is None, key_type should be given
"""
if kvdict is not None and key in kvdict:
return kvdict[key]
pa = argparse.ArgumentParser(allow_abbrev=False)
pa.add_argument('--{}'.format(key), type=type(default) if key_type is None else key_type, default=default)
args = pa.parse_known_args()[0]
value = args.__dict__[key]
if convert_to_list:
value = value.split(',') if len(value) > 0 else []
return value | d144e38f875f58424e36b9d94cf5feac7d23f4cd | 39,123 |
def out_path(tmp_path):
"""Create a temporary output folder"""
out_dir = tmp_path / "output"
out_dir.mkdir()
return out_dir | dcd9cadc0d11ba3bb7bd415feaa45bcf093b551f | 39,124 |
def _get_ftrack_secure_key(hostname, key):
"""Secure item key for entered hostname."""
return "/".join(("ftrack", hostname, key)) | 7513cd5807b1c1697e6c055de40f7873b2bf9d0a | 39,125 |
def minNumArrayRotate(array):
""" Devuelve el menor nรบmero del array, si es un array vacio devuelve None """
minNum = None
left = 0
right = len(array) - 1
if right == left:
return array[0]
while left < right:
mid = int(left + (right - left) / 2)
if right - left == 1:
if array[left] > array[right]:
return array[right]
else:
return array[left]
elif array[mid] <= array[right]:
right = mid
else:
left = mid
return minNum | 19250b42c841aae79481c256868f3d6542c2fc00 | 39,126 |
import posixpath
def get_full_path(stack_path: str, resource_id: str) -> str:
"""
Return the unique posix path-like identifier
while will used for identify a resource from resources in a multi-stack situation
"""
if not stack_path:
return resource_id
return posixpath.join(stack_path, resource_id) | a27ce8003d838f722dd319bd474e19500ae6a8a9 | 39,128 |
def decorator(fun):
"""Decorator"""
return fun | 4fa6b9535edbf2b6eb9d02982ea279b3fb1ea7d9 | 39,129 |
def dec_hex(val):
"""
Format inteegr to hex
:param val: integer value
:return: string of hexadecimal representation
"""
return '0x%X' % val | fbccb1fa66ffef7746e365e1448d164ba9c3a4e6 | 39,131 |
def convert_tagObject(tagObject, text, mode):
"""Converts input tagObject into a dictionary with relevant information
in dictionary structure.
For mode, 'target', or 'modifier' has te be specified.
"""
if mode == "target":
result = {
"var" : tagObject.categoryString(),
"subtype" : tagObject.getLiteral(),
"phrase" : tagObject.getPhrase(),
"span" : tagObject.getSpan(),
"scope" : tagObject.getScope(),
"sentence" : text
}
elif mode == "modifier":
result = {
"category" : tagObject.categoryString(),
"term" : tagObject.getLiteral(),
"phrase" : tagObject.getPhrase(),
"span" : tagObject.getSpan(),
"scope" : tagObject.getScope()
}
else:
raise Exception(
"No valid Mode is selected",
"Choose either 'target', or 'modifier'.",
"Current mode set:", mode,
"text:", text)
return(result) | 3abcf827888fbaada444830d351ba9730a5fe05b | 39,132 |
import torch
def reconstruction_loss(prediction, target, size_average=False):
"""
Reconstruction loss for the output of the decoder and the original data (essentially MSE)
"""
error = (prediction - target).view(prediction.size(0), -1)
error = error**2
error = torch.sum(error, dim=-1)
if size_average:
error = error.mean()
else:
error = error.sum()
return error | 7f14f63211fc0a6053385a63920dd990baa2f2a9 | 39,133 |
import random
def powerRandomInt(max_val):
"""Returns a random integer from the interval [0, max_val],
using a power-law distribution.
The underlying probability distribution is given by:
P(X >= n) = (c/(n+c))^4, for n>=0 an integer, and where we use c=20.
But if X > max_val is generated then max_val is returned.
Assuming max_val is sufficiently large the distribution should look
approximately like the following. We display all values of n for
which P(n) >= 1%
P(0) = 0.177
P(1) = 0.139
P(2) = 0.111
P(3) = 0.089
P(4) = 0.072
P(5) = 0.059
P(6) = 0.049
P(7) = 0.040
P(8) = 0.034
P(9) = 0.028
P(10) = 0.024
P(11) = 0.020
P(12) = 0.017
P(13) = 0.015
P(14) = 0.013
P(15) = 0.011
P(16) = 0.009
The mean is approximately 6 and the variance is approximaley 4.4.
Args:
max_val {number} A positive number. All returned values will be less than
this.
Returns:
{int} A random integer in the range [0, max_val].
"""
x = int(20*random.paretovariate(4) - 20)
# Ensure the value is in the range [0, max_val]
return max(0, min(x, max_val)) | b003a0ae90e254afa5bd2afe4f44e8369131bb6e | 39,134 |
import argparse
def parse_args():
"""
Parses arguments provided through the command line.
"""
# Initialize
parser = argparse.ArgumentParser()
# Mandatory args
parser.add_argument("motif_file", metavar="motif.jaspar")
parser.add_argument("logo_file", metavar="logo.png")
# Optional args
parser.add_argument("-r", "--rev-complement", action="store_true",
help="plot the reverse complement logo")
return(parser.parse_args()) | b592f4e84e0a234ca5b80698256ec4fed3413030 | 39,135 |
from typing import List
from typing import Union
def _parse_field_names(fields: List[Union[str, dict]]) -> List[str]:
"""Parse field names.
Args:
fields: Either field names or field descriptors with a `name` key.
Returns:
Field names.
"""
return [field if isinstance(field, str) else field["name"] for field in fields] | 28ab3032c747ffdc6f2197c62374baf4f94e2ce5 | 39,136 |
import os
def insert_userdir(path):
"""
this is a utility which returns a new path
with reference to the environment variable `$USER_PWD`
as provided by `makeself` command
If *path* is a list, then the operation is performed over
all the strings in the list and the list itself will be modified.
Returns the same object (str or list) but modified.
If `$USER_PWD` is not set, then the same objects will be returned.
PARAMETERS :
path : str or list(str)
RETURNS :
str or list(str)
"""
if 'USER_PWD' in os.environ:
if path is str:
path = os.path.join(os.environ['USER_PWD'], path)
elif path is list:
for i in range(len(path)):
path[i] = os.path.join(os.environ['USER_PWD'], path[i])
else:
raise Exception("Path must be str or list(str)")
return path | 8d7c68a3b878cabb2e67d0662d090982bea2ddf7 | 39,137 |
from typing import Counter
def apply(rules, start, n):
"""Return bag of letter pairs after applying rules n times to start.
String doubles in length with each rewrite, so use a compact representation:
for each unique pair in the string, count how often it occurs.
TEST start string NNCB is represented as {'NN': 1, 'NC':1, 'CB': 1}.
"""
pairs = Counter()
for index in range(len(start) - 1):
pairs[start[index:index+2]] += 1
for _ in range(n):
new_pairs = Counter()
for pair in pairs:
if pair in rules: # if there's a rule XY -> L
letter = rules[pair] # all pairs XY become XL and LY
new_pairs[pair[0] + letter] += pairs[pair]
new_pairs[letter + pair[1]] += pairs[pair]
else:
new_pairs[pair] = pairs[pair]
pairs = new_pairs
return pairs | 67b0bcf3ee2ec0cb3e820f0f9727db08f2484d14 | 39,139 |
from typing import Dict
from typing import Any
def check_action(reply: Dict[str, Any]) -> Dict[str, Any]:
"""Check that the reply contains a message of success."""
if not reply["success"]:
raise RuntimeError(f"Error communicating with the large object storage:\n{reply['error']}")
return reply | 21170d0fb0d417e1ca525cf8540e630aa79a7e72 | 39,141 |
import json
def _parseNative(logs, needle):
"""Parse console logs from Chrome and get decoded JSON.
Args:
logs: Chrome log object
needle (str): the string leading the actual JSON.
Example:
>>> _parseNative([{'message':'a=b'},{'message':'ac={"a":[1,2]}'}],'c=')
{u'a': [1, 2]}
"""
ret = None
for log in logs:
message = log['message']
loc = message.find(needle)
if loc >= 0:
ret = json.loads(message[loc+len(needle):])
return ret | 2d6af33aaecf4e2d41e83540c337b79475ba3c4c | 39,142 |
def add_round_key(block, key):
"""Performs a bitwise XOR between a state block and the key.
Parameters
----------
block : np.ndarray
4x4 column major block matrix.
key : np.ndarray
4x4 column major block matrix.
Returns
-------
np.ndarray
4x4 result block matrix
"""
return block ^ key.T | 1edcf1a28777099a3f7c8bc641606b4297ab8704 | 39,143 |
import requests
def check_status(address, text):
"""Sends request to address and checks if text is present in reponse
Args:
address (str): site address
text (str): text to be checked in responce
Returns:
(status, elapsed): (tuple (str, int)) with status, and responce time
"""
elapsed = 0
try:
r = requests.get(address)
except requests.ConnectionError:
status = "Error: CONNECTION_ERROR"
except Exception as e:
status = 'Error: {}'.format(str(e))
else:
if r.status_code == 200:
status = 'OK' if text in r.text else 'Error: CON_OK_MISSING_TEXT'
else:
status = 'Error: {}'.format(r.status_code)
elapsed = r.elapsed.total_seconds()
return status, elapsed | 0c15e75f35f3ac65dad66b9ac45f09d2a043e2d0 | 39,145 |
def get_passed_tests(log_file_path):
"""Gets passed tests with OK status"""
ok_test_line_pattern = "[ OK ] "
ok_tests = []
with open(log_file_path) as log_file_obj:
for line in log_file_obj.readlines():
if ok_test_line_pattern in line:
ok_tests.append(line.split(ok_test_line_pattern)[1])
return ok_tests | 97fdeab4a2330864f57cd7ac70e8d5f81eaeaa78 | 39,146 |
import sys
def err(func, id, arg, info=''):
"""
Raise specific errors for internal checks.
With :py:data:`func` and :py:data:`id` a dictionary of error functions is accessed.
Args:
func (str): name of function in which error occured
id (int): number of dictionary entry for function :py:data:`func`
arg (list): list of arguments specific for each error (can vary in length and type)
info (str, optional): text displayed in error message
Returns:
:py:func:`sys.exit()` displaying an error message
"""
# define functions returning the right error message
def _err_tra_index_1(args):
return ("INVALID NUMBER OF SNAPSHOTS\n%-24s%d\n%-24s%d"
% ("SELECTED NUMBER N:", args[0], "TOTAL STEPS:", args[1]))
def _err_tra_index_2(args):
return ("T2 NEEDS TO BE LARGER THAN T1\n%-24s%.8f\n%-24s%.8f\n"
% ("SELECTED TIME T1:", args[0], "SELECTED TIME T2:", args[1]))
def _err_tra_index_3(args):
return ("INVALID INTERVAL SELECTION\n%-24s%.8f\n%-24s%.8f\n%-24s%.8f\n%-24s%.8f"
% ("SELECTED TIME T1:", args[0], "SELECTED TIME T2:",
args[1], "FIRST TIME STEP:", args[2], "LAST TIME STEP:", args[3]))
def _err_tra_index_4(args):
return ("INVALID NUMBER OF SNAPSHOTS FOR INTERVAL\n%-24s%d\n%-24s%d"
% ("SELECTED NUMBER N:", args[0], "STEPS IN INTERVAL:", args[1]))
def _err_pbc_apply3x3(args):
return "INVALID ARGUMENTS\nEITHER SELECTION BY ID OR NAME, NOT BOTH"
def _err_ion_single1(args):
return "%s\n%-24s%d" % ("ONLY ONE ION ALLOWED", "IONS SELECTED:", args[0])
def _err_ion_single2(args):
return ("%s\n%-24s%-6s%-6s%-6s"
% ("THREE DIFFERENT SPECIES NEEDED", "SELECTED SPECIES:", args[0], args[1], args[2]))
def _err_water_single(args):
return "%s\n%-24s%-6s%-6s" % ("TWO DIFFERENT SPECIES NEEDED", "SELECTED SPECIES:", args[0], args[1])
def _err_hbonds_load1(args):
return ("%s\n%-6d%s\n%-6d%s"
% ("NUMBER OF SNAPSHOTS DOES NOT MATCH IN FILES:", args[0], args[1], args[2], args[3]))
def _err_hbonds_load2(args):
return "%s\n%s\n%s\n%s" % ("DATA OF FILES DOES NOT MATCH", "ITERATION VALUES NOT COMPATIBLE", args[0], args[1])
def _err_scntl_text1(args):
return "MORE BRACKETS CLOSED THAN OPENED"
def _err_scntl_text2(args):
return "%s\n%d %s" % ("MORE BRACKETS OPENED THAN CLOSED", args[0], "STILL OPEN")
def _err_scntl_read1(args):
return "MISSING PARAMETER IN %s\nPLEASE PROVIDE PARAMETER" % args[0]
def _err_scntl_read2(args):
return "PLEASE PROVIDE BLOCK !TRA IN %s.scntl" % args[0]
def _err_argcheck1(args):
return "WRONG NUMBER OF ARGUMENTS GIVEN\n%-12s%d\n%-12s%d" % ("EXPECTED:", 1, "GIVEN:", args[0])
def _err_argcheck2(args):
return "WRONG FILE ENDING\nPLEASE USE %s" % args[0]
# store functions in dictionary (function name = key, id = position in list)
error = {
'tra_index': [_err_tra_index_1, _err_tra_index_2, _err_tra_index_3, _err_tra_index_4],
'pbc_apply3x3': [_err_pbc_apply3x3],
'ion_single': [_err_ion_single1, _err_ion_single2],
'water_single': [_err_water_single],
'hbonds_load': [_err_hbonds_load1, _err_hbonds_load2],
'scntl_text': [_err_scntl_text1, _err_scntl_text2],
'scntl_read': [_err_scntl_read1, _err_scntl_read2],
'argcheck': [_err_argcheck1, _err_argcheck2],
'structure_water': [_err_hbonds_load1, _err_hbonds_load2]
}
try:
sys.exit("\nPROGRAM TERMINATED\nERROR IN FUNCTION: %s\n\n%s\n%s" % (func, error[func][id](arg), info))
except KeyError and IndexError: # catch wrong parameters while raising errors
sys.exit("WRONG INTERNAL ERROR HANDLING\nNO ERROR MESSAGE FOR FUNCTION %s WITH ID %d AND ARGUMENTS %s"
% (func, id, str(arg))) | 63db6085ba918e0f69ed1d0af0c0e2cd712a37c5 | 39,147 |
def _clean(request_body):
""" Removes unused text from the request body for proper parsing.
For example: AnimalsCollectRequest(ids:[1,2,3]) --> ids:[1,2,3]
:param request_body: the request body
:type request_body: str
:returns: a cleaned request_body that is simpler to parse
:rtype: str
"""
try:
colon_index = request_body.index(':')
open_paren_index = request_body.index('(')
if open_paren_index < colon_index:
close_paren_index = request_body.rindex(')')
request_body = request_body[open_paren_index+1:close_paren_index]
except ValueError:
pass
return request_body | 36a3f717fc0f360a7552cb5e9d2c23473dbe198f | 39,148 |
def field_fmt(item):
"""Return the field format if appropriate."""
return '"{:s}"'.format(item['fmt']) if '"' not in item['fmt'] else item['fmt'] | 293f125a058fb15694f8836cb444fb76132787b7 | 39,150 |
def adjust_after_shuffle(interval):
"""
Adjusts name and strand to correct name and strand after shuffling (assumes use of shuffle_transcriptome method)
"""
#Adjusts name and strand in one to name and strand that was in two
interval.name = interval[11]
interval.strand = interval[13]
return interval | 6221713c8da2bebd553c1c04b9dcf8e2907070d5 | 39,151 |
import json
def safeJson(result, path=[]):
"""Take the result of a requests call and format it into a structured dict."""
if result.status_code != 200:
output = {'code': result.status_code, 'content': result.text}
print("ConfigManager: get secret failed (token expired?)")
print(json.dumps(output, indent=2))
return output
output = result.json()
if path:
for level in path:
try:
output = output[level]
except:
return {'code': 200, 'content': "path not found: {0}".format(path)}
return output | 1a5af140ef8d517a0236eaebfc9c676260a76a14 | 39,152 |
def reference_value_for_covariate_mean_all_values(cov_df):
"""
Strategy for choosing reference value for country covariate.
This one takes the mean of all incoming covariate values.
"""
return float(cov_df["mean_value"].mean()) | 57935c7b39e2f02f059e7f4b4a835bbe84a67081 | 39,153 |
import os
def new_file_name(file_name, values_str):
"""
Returns the file name of the new configuration file: the original name, with
values_str appended to it. If the original file had an extension (the part
of the name after the last dot), it will be kept at the end of the file.
"""
base_name, dot, ext = os.path.basename(file_name).rpartition('.')
return base_name + '-' + values_str + dot + ext | 7bb3f7d5da91d1563cb535eb5a4db92aab78d48d | 39,154 |
def determine(hand):
"""Returns a list of values, a set of values, a list of suits, and a list of cards within a hand."""
values, vset, suits, all_cards = [], set(), [], []
for x in range(len(hand)):
values.append(int(hand[x]))
vset.add(int(hand[x]))
suits.append(hand[x].suit)
all_cards.append(hand[x])
return sorted(values, reverse=True), vset, suits, all_cards | 60318bf9c9259f0741caaadb0246d2d8d66ca4f5 | 39,155 |
def osm_zoom_level_to_pixels_per_meter(
zoom_level: float, equator_length: float
) -> float:
"""
Convert OSM zoom level to pixels per meter on Equator. See
https://wiki.openstreetmap.org/wiki/Zoom_levels
:param zoom_level: integer number usually not bigger than 20, but this
function allows any non-negative float value
:param equator_length: celestial body equator length in meters
"""
return 2.0**zoom_level / equator_length * 256.0 | 7351c289dde4bc42a46e343efcd3e750841b2c8c | 39,157 |
import random
def split_train_val_dataframe(df_all, split_frac=0.2, verbose=False):
"""Split train valid dataset."""
construction_site_meta = list(df_all.construction_site.value_counts().items())
train_sample_ids = []
val_sample_ids = []
for construction_site, num_tot in construction_site_meta:
# print(construction_site, num_tot)
sample_ids = list(df_all[df_all.construction_site == construction_site].sample_id.values)
assert(len(sample_ids) == num_tot)
split_index = int(num_tot * split_frac)
random.shuffle(sample_ids)
val_sample_ids += sample_ids[:split_index]
train_sample_ids += sample_ids[split_index:]
if verbose:
print(f'Construction site {construction_site}, total samples {num_tot}')
print(f'train samples {len(sample_ids[split_index:])} , valid samples {len(sample_ids[:split_index])}')
return train_sample_ids, val_sample_ids | 502ea77820dca4da66e37f78501035e62a3cef69 | 39,158 |
def subtract(minuend, subtrahend):
"""Subtracts minuend from subtrahend
This function subtracts minuend from subtrahend only when they are
either integers or floats
Args:
minuend(int/float): The quantity or number from which another
is to be subtracted
subtrahend(int/float): The quantity or number to be subtracted
from another
>>> subtract(10, 6)
4
Returns:
The difference of subtrahend subtracted by the minuend
Raises:
TypeError if subtrahend or minuend is neither an integer or
float
"""
if not isinstance(subtrahend, (int, float)):
raise TypeError(f"{subtrahend} is not an integer or float")
if not isinstance(minuend, (int, float)):
raise TypeError(f"{minuend} is not an integer or float")
return minuend - subtrahend | cdd43cd65cef485f929093cf8c5e481b6cc91148 | 39,159 |
import operator
def strip(listed):
"""Strip a list of string"""
return map(operator.methodcaller("strip"), listed) | 503c7a745c34f211160f45aa7234e96875ddd8fb | 39,162 |
def decode_remote_id(msg):
"""
practice decoding some remote ids:
| 0x27
| 0x01 0xe2 0x40
| 0x03 0x42 0x2a
| 0x28 0x0c 0x89
| 0x92 0x00 0x00 0x00
>>> decode_remote_id(_remote_ids[0])
'123456'
>>> decode_remote_id(_remote_ids[1])
'213546'
>>> decode_remote_id(_remote_ids[2])
'821650'
"""
high = msg[ 0 ] * 256 * 256
middle = msg[ 1 ] * 256
low = msg[ 2 ]
return str(high + middle + low) | c8832403ddaa3309b9ac85596c00193361338dbf | 39,163 |
import hashlib
def get_email_id(row):
"""
Creates a unique identifier for each email based on the
date received and a header ID
"""
text = row['X-GM-THRID'] + str(row['Date'])
return hashlib.md5(text.encode('utf-8')).hexdigest() | eddcea303947ef6d1b14b7b0dfb912fcc267f9f1 | 39,164 |
def split_by_unescaped_sep(text, sep=':'):
"""Split string at sep but only if not escaped."""
def remerge(s):
# s is a list of strings.
for i in range(len(s) - 1):
n_esc = len(s[i]) - len(s[i].rstrip('\\'))
if n_esc % 2 == 0:
continue
else:
new_s = s[:i] + [s[i] + sep + s[i + 1]] + s[i + 2:]
return remerge(new_s)
return s
# split by every sep (even unescaped ones)
# then re-merge strings that end in an uneven number of escape chars:
return remerge(text.split(sep)) | 6eb360787a3ba5e08f499d2a3352d1170d2fcf89 | 39,166 |
import copy
def v4_to_v3_package(v4_package):
"""Converts a v4 package to a v3 package
:param v4_package: a v3 package
:type v4_package: dict
:return: a v3 package
:rtype: dict
"""
package = copy.deepcopy(v4_package)
package.pop('upgradesFrom', None)
package.pop('downgradesTo', None)
package["packagingVersion"] = "3.0"
return package | 4b6dac5dda71d2954123ba2fcfdc3dfed88d73dd | 39,167 |
import pandas
from typing import Collection
from typing import Hashable
def cast_multiindex(obj: pandas.MultiIndex, brief_dims: Collection[Hashable]):
"""Single dispatch specialised variant of :func:`cast` for
:class:`pandas.MultiIndex`.
Map to a set of tuples. Note that this means that levels are
positional. Using a set allows comparing the indices non-positionally.
"""
return {"names": obj.names, "data": set(obj.tolist())} | 16c8a7e5e6b354e633ae158cd8169fce6bea9722 | 39,168 |
import os
def rootname(path: str) -> str:
"""Get 'root' part of path (filename without ext)."""
filename = os.path.basename(path)
root, ext = os.path.splitext(filename)
return root | 7b33cc6e3aa0022d5766436c3101608388383113 | 39,169 |
def sovatoms_to_tokens(sovatoms: int) -> float:
"""Convert sovatoms to tokens."""
return sovatoms / 100000000 | 5a683213ccc916396af9e7424e315d640fdaf3a3 | 39,171 |
def plot_pareto(
self,
x_symbol,
y_symbol,
c_symbol=None,
cmap=None,
ax=None,
title=None,
grid=False,
is_show_fig=True,
save_path=None,
):
"""Plot the pareto front for 2 objective functions
Parameters
----------
self : XOutput
x_symbol : str
symbol of the first objective function
y_symbol: str
symbol of the second objective function
c_symbol: str
optional symbol to set the plot colors
cmap: colormap
optional colormap
is_show_fig : bool
True to show figure after plot
save_path : str
full path of the png file where the figure is saved if save_path is not None
"""
return self.parent.plot_pareto(
x_symbol,
y_symbol,
c_symbol,
cmap,
ax,
title,
grid,
is_show_fig,
save_path,
) | 194b635ad25369a35059825843ada66c0290bfe0 | 39,173 |
import os
def open_detector_file(destination_dir, detector_file_name):
""" Opens a new detector file in given directory. """
return open(os.path.join(destination_dir, detector_file_name), "w") | 0a3bed9130f5f367d5aa74fb3cad96104c3a7e45 | 39,174 |
import argparse
import configparser
def parse():
""" Parse config file, update with command line arguments
"""
# defaults arguments
defaults = { "stratfile":"strat.txt"}
# Parse any conf_file specification
conf_parser = argparse.ArgumentParser(
description=__doc__, # printed with -h/--help
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
# Turn off help, so we print all options in response to -h
add_help=False
)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args()
if args.conf_file:
config = configparser.ConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("Defaults")))
# Parse rest of arguments
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser]
)
parser.set_defaults(**defaults)
parser.add_argument("--stratfile", help="""File containing the Backtrace strings.
""")
args = parser.parse_args(remaining_argv)
return args | a696732f784cc11cbf5c972564faf72de97ee666 | 39,175 |
import math
def reverse_invert(lst):
"""Reverse only integer and convert its sign."""
res = []
for e in lst:
if type(e) is int:
sign = e
if e < 0:
e = e * - 1
res.append(math.copysign(int(str(e)[::-1]), sign * - 1))
return res | 2b472f03954d5c371c6d6aec4b01f15ae497c381 | 39,176 |
def get_seqname(
locus_text: str) -> str:
"""
Args:
locus_text:
The LOCUS (i.e. header section) of genbank file
Returns:
The seqname of chromosome
"""
line1 = locus_text[:locus_text.find('\n')]
return line1[len('LOCUS'):].lstrip().split(' '*3)[0] | d97939f43ac8937a5fdd694f3575ebe28333718d | 39,177 |
def filter_json(source, fields):
"""
Function that can filter a Dict
"""
return list(
map(
lambda x: dict(
filter(
lambda y: y[0] in fields,
x.items()
)
),
source
)
) | 4bffc7242c54f77e89b23eae130dc757c51e012f | 39,178 |
def overlap(interval1, interval2):
"""
Returns the total amount of overlap between two intervals in the format of (x,y)
Example:
input: (0,10) , (5,10)
returns: 5
"""
return max(0, min(interval1[1], interval2[1]) - max(interval1[0], interval2[0])) | 3a859fd12181cbfe278f298c8dcf7133eadd59e3 | 39,180 |
def filt_dct(dct):
"""Filter None values from dict."""
return dict((k,v) for k,v in dct.items() \
if v is not None) | 1d2803a9c7fd143b53206e7e5ac63a955d38b0c9 | 39,182 |
from typing import Sequence
def is_same_shape(a: Sequence, b: Sequence) -> bool:
"""
Compares two shapes a and b, returning True if they are the same
(their ranks and corresponding lengths match) and False otherwise.
"""
return tuple(a) == tuple(b) | 27dbacaa36d600631f89081bb8085d9b2b7f9f58 | 39,183 |
import os
def check_path(path, base_directory=''):
"""
Verifies Path for storing external files.
"""
file_path = os.path.normpath(path)
if not file_path.startswith(base_directory):
raise Exception('%s is not a valid path for storing external files!' % path)
return file_path | a80f108996dacb702198d920af460546efc85df4 | 39,184 |
import requests
from bs4 import BeautifulSoup
def get_link_to_text(project_id):
"""
ะคัะฝะบัะธั ะธัะตั ัััะปะบั ะฝะฐ ัะตะบัั ะทะฐะบะพะฝะพะฟัะพะตะบัะฐ
"""
url = 'https://sozd.duma.gov.ru/bill/' + project_id
try:
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
registration_stage = soup.find_all('div', {'data-eventnum': '1.1'})
if registration_stage and len(registration_stage) > 0:
docs = registration_stage[1]
docs_links = docs.find_all('a')
project_texts = [x for x in docs_links
if x.text.strip().startswith('ะขะตะบัั ะฒะฝะตัะตะฝะฝะพะณะพ')]
if project_texts:
doc_guid = project_texts[0].attrs['id']
return 'https://sozd.duma.gov.ru/download/' + doc_guid
except requests.exceptions.RequestException as e:
print(e)
return None | 388fdf7c3e62742d2164b60d00a93b4b1272aa35 | 39,185 |
def get_cp(fs_id):
"""use ID to determine patient or control"""
cp = fs_id.split("_")[3]
if cp in ("FCD", "fcd"):
c_p = "patient"
elif cp in ("C", "c"):
c_p = "control"
else:
print("subject " + fs_id + " cannot be identified as either patient or control...")
print("Please double check the IDs in the list of subjects")
c_p = "false"
return c_p | df847edf913681b8fe1c05701062cbedf412a69c | 39,188 |
def get_holders(ipr):
"""Recursive function to follow chain of disclosure updates and return holder emails"""
items = []
for x in [ y.target.get_child() for y in ipr.updates]:
items.extend(get_holders(x))
return ([ipr.holder_contact_email] if hasattr(ipr,'holder_contact_email') else []) + items | fa82022ae0f1bac887e45ac61f727f44759f7f19 | 39,190 |
from typing import List
def max_sub_array(nums: List[int]) -> int:
"""
:param nums:
:return:
>>> max_sub_array([-2,1,-3,4,-1,2,1,-5,4])
6
>>> max_sub_array([-2,1])
1
"""
for i in range(1, len(nums)):
nums[i] = nums[i] + max(nums[i - 1], 0)
return max(nums) | 6b466bbb037e257fb9f447265d6a362a56ca13cd | 39,191 |
def has_n_leading_zeroes(num_zeroes, digest):
"""Check if the given digest has the required number of leading zeroes."""
return digest[:num_zeroes] == '0'*num_zeroes | fe96941bbeb6325a36452e7b0730face0af54c54 | 39,192 |
import math
def stringify_path(signed_hashes, encoding):
"""Returns a nicely stringified version of the inserted sequence of signed hashes.
The printed hashes are hexadecimals, occuring after decoding the given ones according
to the inserted encoding type.
.. note:: The output of this function is to be passed into the ``print`` function
:param signed_hashes: a sequence of signed hashes
:type signed_hashes: tuple of (+1/-1, bytes) pairs
:param encoding: type to be used for decoding
:type encoding: str
:rtype: str
"""
def order_of_magnitude(num): return 0 if num == 0 else int(math.log10(num))
def get_with_sign(num): return str(num) if num < 0 else '+' + str(num)
if signed_hashes is not None:
stringified_elems = []
for i in range(len(signed_hashes)):
elem = signed_hashes[i]
stringified_elems.append(
('\n' +
(7 - order_of_magnitude(i)) * ' ' +
'[{i}]' +
3 * ' ' +
'{sign}' +
2 * ' ' +
'{hash}'). format(
i=i,
sign=get_with_sign(elem[0]),
hash=elem[1].decode(encoding=encoding)
if not isinstance(elem[1], str) else elem[1]))
return ''.join(elem for elem in stringified_elems)
return '' | 5c661c75e2da394354b6f8376bf43b4ac4635cbe | 39,193 |
import os
def override_fail_retries(env_var, default_value=None):
"""Used to patch environment.getEnv to set FAIL_RETRIES."""
return os.getenv(
env_var, default=default_value) if env_var != 'FAIL_RETRIES' else 1 | e8039363dd81edd6db0e8b7ab31f093cdf6f8a92 | 39,194 |
import torch
def tangent_vectors(normals):
"""Returns a pair of vector fields u and v to complete the orthonormal basis [n,u,v].
normals -> uv
(N, 3) or (N, S, 3) -> (N, 2, 3) or (N, S, 2, 3)
This routine assumes that the 3D "normal" vectors are normalized.
It is based on the 2017 paper from Pixar, "Building an orthonormal basis, revisited".
Args:
normals (Tensor): (N,3) or (N,S,3) normals `n_i`, i.e. unit-norm 3D vectors.
Returns:
(Tensor): (N,2,3) or (N,S,2,3) unit vectors `u_i` and `v_i` to complete
the tangent coordinate systems `[n_i,u_i,v_i].
"""
x, y, z = normals[..., 0], normals[..., 1], normals[..., 2]
s = (2 * (z >= 0)) - 1.0 # = z.sign(), but =1. if z=0.
a = -1 / (s + z)
b = x * y * a
uv = torch.stack((1 + s * x * x * a, s * b, -s * x, b, s + y * y * a, -y), dim=-1)
uv = uv.view(uv.shape[:-1] + (2, 3))
return uv | 60282a0f1efc63e0ab04e12685f35337b8185f3a | 39,195 |
from typing import List
from typing import Dict
def _tags_as_dict(tags: List[Dict]) -> Dict[str, str]:
"""
Convert a list of tags to a dictionary
:param tags: the list of tags
:return: the dictionary of tags
"""
return {tag["Key"]: tag.get("Value", "") for tag in tags} | f909f1f4fb6773cf3b1e32067d963738b6e7023d | 39,196 |
import functools
import unittest
def system_must_validate_cert(f):
"""Skip the test on TLS certificate validation failures."""
@functools.wraps(f)
def dec(*args, **kwargs):
try:
f(*args, **kwargs)
except OSError as e:
if "CERTIFICATE_VERIFY_FAILED" in str(e):
raise unittest.SkipTest("system does not contain "
"necessary certificates")
raise
return dec | bd6d5822f54d5a5f9a041c591d704683845a762b | 39,197 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.