content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def calculaNovaSerieForm(modelo, serie, anoReferencia, tabelaSerie):
"""
Dada a situacao de um aluno de formulario, calcula a serie adequada em que esse
aluno pode ser matriculado. Se a serie que ele deve ser destinado nao esta ativa
ou se o aluno ja passou pela ultima serie atendida pela ONG, entao, nao existe uma
serie apropriada, nesse caso, retorna ```None```.
"""
ordem = tabelaSerie['ordem'][serie] + modelo.anoPlanejamento - anoReferencia
if ordem <= modelo.ordemUltimaSerie:
novaSerie = tabelaSerie[(tabelaSerie['ordem'] == ordem)].index[0]
if tabelaSerie['ativa'][novaSerie] == 0:
novaSerie = None
else:
novaSerie = None
return novaSerie | 900631802c34301c53fe63c3675fe6d1466406ba | 47,236 |
import torch
def get_optimal_reference_mic(
bf_mat: torch.Tensor,
target_scm: torch.Tensor,
noise_scm: torch.Tensor,
eps: float = 1e-6,
):
"""Compute the optimal reference mic given the a posteriori SNR, see [1].
Args:
bf_mat: (batch, freq, mics, mics)
target_scm (torch.ComplexTensor): (batch, freqs, mics, mics)
noise_scm (torch.ComplexTensor): (batch, freqs, mics, mics)
eps: value to clip the denominator.
Returns:
torch.
References
Erdogan et al. 2016: "Improved MVDR beamforming using single-channel maskprediction networks"
https://www.merl.com/publications/docs/TR2016-072.pdf
"""
den = torch.clamp(
torch.einsum("...flm,...fln,...fnm->...m", bf_mat.conj(), noise_scm, bf_mat).real, min=eps
)
snr_post = (
torch.einsum("...flm,...fln,...fnm->...m", bf_mat.conj(), target_scm, bf_mat).real / den
)
assert torch.all(torch.isfinite(snr_post)), snr_post
return torch.argmax(snr_post, dim=-1) | 16f2c7b5b91987e487c8643b61628c2e8ea5f9d4 | 47,237 |
import os
def get_notesdir():
"""Gets the fullpath to the main app directory
:param: none
:return: the app's home folder (either NOTESDIR or $HOME/.notes)
:rtype: str
"""
if "NOTESDIR" in os.environ:
notesdir = os.environ["NOTESDIR"]
else:
notesdir = os.environ["HOME"] + "/.notes" # pragma: no cover
return os.path.realpath(notesdir) | 6ecc7fb9e0a0e6779a623a913d5039b7a5476005 | 47,238 |
def tree_display(input_dict: dict, depth: int = 0) -> str:
"""
Displays the dict in a treefied format
:param input_dict: python dict to be treefied
:param depth: depth to start at for the dict
:return: treefied message
"""
FINAL_CHAR = " └ "
ENTRY_CHAR = " ├ "
SKIP_CHAR = " ┊ "
KEY_CHAR = ": "
NEWLINE_CHAR = "\n"
# if depth == 0:
# out_str = "Message\n"
# else:
# out_str = ""
out_str = ""
if type(input_dict) is dict:
final_index = len(input_dict) - 1
current_index = 0
for key, value in input_dict.items():
for _ in range(0, depth):
out_str += SKIP_CHAR
if current_index == final_index:
out_str += FINAL_CHAR
else:
out_str += ENTRY_CHAR
current_index += 1
if type(value) is dict:
out_str = (
out_str
+ "<b>"
+ key
+ "</b>"
+ NEWLINE_CHAR
+ tree_display(value, depth + 1)
)
else:
out_str = (
out_str
+ "<b>"
+ key
+ "</b>"
+ KEY_CHAR
+ tree_display(value, depth + 1)
+ NEWLINE_CHAR
)
else:
out_str = str(input_dict)
return out_str | 640fba6061d4f424841849ca980d5e961d9af2c6 | 47,239 |
import subprocess
def git_commit(directory):
"""Runs the git command to commit everything in a given directory"""
return subprocess.Popen(["git", "commit", "--all", "-m", "pypear init"], cwd=directory) | b37229ac74fa42e7b830d1f13b57921d665abf80 | 47,240 |
import os
def availProg(prog, myEnv):
""" Check if a program is available """
cmds = (os.path.join(path, prog)
for path in myEnv["PATH"].split(os.pathsep))
return any(os.path.isfile(cmd) and os.access(cmd, os.X_OK)
for cmd in cmds) | db865b47878a93dbe2979977969c4fe9697463ae | 47,241 |
import torch
def _get_model_analysis_input(cfg, use_train_input):
"""
Return a dummy input for model analysis with batch size 1. The input is
used for analyzing the model (counting flops and activations etc.).
Args:
cfg (Config): the global config object.
use_train_input (bool): if True, return the input for training. Otherwise,
return the input for testing.
Returns:
Args: the input for model analysis.
"""
rgb_dimension = 3
if use_train_input:
input_tensors = torch.rand(
rgb_dimension,
cfg.DATA.NUM_INPUT_FRAMES,
cfg.DATA.TRAIN_CROP_SIZE,
cfg.DATA.TRAIN_CROP_SIZE,
)
else:
input_tensors = torch.rand(
rgb_dimension,
cfg.DATA.NUM_INPUT_FRAMES,
cfg.DATA.TEST_CROP_SIZE,
cfg.DATA.TEST_CROP_SIZE,
)
model_inputs = input_tensors.unsqueeze(0)
if cfg.NUM_GPUS:
model_inputs = model_inputs.cuda(non_blocking=True)
inputs = {"video": model_inputs}
return inputs | 6a6b85e11182001a2596acda9bfbdbf3616311b8 | 47,242 |
import difflib
def diff(s1, s2):
"""Compute the difference betweeen two strings normalized by the length
of the longest of the two strings"""
longest = max((s1, s2), key=len)
return sum(d[0] != ' ' for d in difflib.ndiff(s1, s2)) / len(longest) | 6ce40f4b470f3a1062037ff31269ffd699dd4251 | 47,246 |
from typing import List
from typing import Tuple
def parse_data(file_name: str) -> List[List[Tuple[int, int]]]:
"""
Read data from a file and save it into a nested list.
Nested lists consists of two coordinates - 2 tuples, each with 2 digits.
Eg. line 0,9 -> 5,9 will produce [ [ (0, 9), (5, 9) ] ]
Args:
file_name (str): name of file in folder / or absolute path
Returns:
List[List[Tuple[int, int]]]: a nested list of coordinates
"""
list_of_coordinates: list = []
with open(file_name, encoding="utf-8") as f:
for line in f:
el1, el2 = line.rstrip().split("->")
el1 = tuple(map(int, el1.split(",")))
el2 = tuple(map(int, el2.split(",")))
list_of_coordinates.append([el1, el2])
return list_of_coordinates | a0dfafe9ab0081a85005db7caf31fd460fb0b221 | 47,247 |
def _check(n):
"""
This is a method decorator that checks before executing an operation. The
decorated method returns True if the operator's impact is known at
runtime, and False otherwise. The decorator removes the values from the
stack and passes them on to the original method.
"""
def dec(f):
def _(self):
if len(self.stack) < n:
return False
args = [self.stack.pop() for _ in range(n)]
res = f(self, *args)
if res is not None:
self.stack.append(res)
return True
return _
return dec | 3fde59519daa481e8318963a5b096351184fdf73 | 47,248 |
def convertDogPeaks(peaks, params):
"""
This is an old libcv2 function that is no longer used
"""
bin = params['bin']
dictpeaks = []
peak = {}
for i in range(peaks.shape[0]):
row = peaks[i,0] * bin
col = peaks[i,1] * bin
sca = peaks[i,2]
peak['xcoord'] = col
peak['ycoord'] = row
peak['size'] = sca
dictpeaks.append(peak.copy())
return dictpeaks | 9bcb9dda8258cadcefb2ae183a92f04f7a2af1f0 | 47,249 |
import time
def blockingCalculation(a, b):
"""
Returns a*b, slowly. This is an example of a function that
blocks. Note that it has no special decorations -- this could
just as easily be a standard python disk or network function. But
time.sleep is enough.
"""
time.sleep(2.) # thinking...
return a*b | b385880016583e3a022de48f1bbb0ae32bd5dd76 | 47,250 |
import json
def read_json_source(filepath: str):
""" Read Json Configuration Template """
with open(filepath) as file:
data = json.load(file)
return data | d0a5a4bdcf9f4ad94d3fad5d4fde44bbd1cf1712 | 47,252 |
def compute_pll_clkregs(divide, duty, phase):
"""
Returns a string of 0s and 1s with the following layout. Bits are indexed
starting from LSB.
CLKREG2: RESERVED[6:0], MX[1:0], EDGE, NO_COUNT, DELAY_TIME[5:0]
CLKREG1: PHASE_MUX[3:0], RESERVED, HIGH_TIME[5:0], LOW_TIME[5:0]
"""
PLL_FRAC_PRECISION = 10
def round_frac(x, n):
scale = (1 << PLL_FRAC_PRECISION)
return int(round(x / scale, n) * scale)
# Sanity check argument types and values
assert isinstance(divide, int), type(divide)
assert divide >= 1 and divide <= 128, divide
assert isinstance(duty, int) or isinstance(duty, float), type(duty)
assert duty > 0.0 and duty < 1.0, duty
assert isinstance(phase, int) or isinstance(phase, float), type(phase)
assert phase > -360.0 and phase < 360.0, phase
# Phase shift register fields
if phase < 0.0:
phase += 360.0
phase_fixed = int(phase * (1 << PLL_FRAC_PRECISION))
phase_in_cycles = round_frac((phase_fixed * divide) // 360, 3)
phase_mux = (phase_in_cycles >> (PLL_FRAC_PRECISION - 3)) & 0x07
delay_tim = (phase_in_cycles >> (PLL_FRAC_PRECISION)) & 0x3F
# Divider register fields
if divide > 1:
if divide >= 64:
duty_min = (divide - 64.0) / divide
duty_max = 64.0 / divide
duty = max(duty_min, min(duty, duty_max))
duty_fix = int(duty * (1 << PLL_FRAC_PRECISION))
temp = round_frac(duty_fix * divide, 1)
high_time = (temp >> PLL_FRAC_PRECISION) & 0x7F
w_edge = (temp >> (PLL_FRAC_PRECISION - 1)) & 0x01
if high_time == 0:
high_time = 1
w_edge = 0
elif high_time == divide:
high_time = divide - 1
w_edge = 1
low_time = divide - high_time
no_count = 0
# No division
else:
w_edge = 0
no_count = 1
high_time = 1
low_time = 1
high_time &= 0x3F
low_time &= 0x3F
# Assemble the final clock registers content
clkregs = low_time
clkregs |= high_time << 6
clkregs |= phase_mux << 13
clkregs |= delay_tim << 16
clkregs |= no_count << 22
clkregs |= w_edge << 23
clkregs = "{:032b}".format(clkregs)[::-1]
return clkregs | 0835523879b824638b710577477ec128aaef892c | 47,253 |
def numberWithCommas(number):
"""
-----------------------------
Purpose:
- Formats a numeric value into a string with comma separating thousands place
Arguments:
- number: the numeric value to be formatted
Return Value:
- Returns a formatted string
"""
return f"{number:,.2f}" | c6ceabe3573fa541fb049b1df62ac1dec54f7cec | 47,254 |
from pathlib import Path
from textwrap import dedent
def _default_config(config_dir=None):
"""Default configuration Python file, with a plugin placeholder."""
if not config_dir: # pragma: no cover
config_dir = Path.home() / '.phy'
path = config_dir / 'plugins'
return dedent("""
# You can also put your plugins in ~/.phy/plugins/.
from phy import IPlugin
# Plugin example:
#
# class MyPlugin(IPlugin):
# def attach_to_cli(self, cli):
# # you can create phy subcommands here with click
# pass
c = get_config()
c.Plugins.dirs = [r'{}']
""".format(path)) | bfce2eda98682734d9295e5a491c2946062d5f0e | 47,255 |
import re
def find_failing_lines(exception_message):
""" Parse which line is failing and return the line numbers """
failing_lines = []
key = re.compile(br'\d+\:\d+', re.IGNORECASE)
for line in exception_message.splitlines():
numbers = key.findall(line)
if numbers:
idxs = re.findall(br'\d+', numbers[0])
failing_lines.append(int(idxs[1]))
return failing_lines | eb865d50ebbbf85a7ab8dd8bf0ff362574842741 | 47,256 |
def _totalWords(dataset, index):
"""
Given a dataset, compute the total number of words at the given index.
GIVEN:
dataset (list) list of lists, where each sublist is a document
index (int) index in dataset to count words
RETURN:
total_words (int) total number of words in the dataset
"""
total_words = 0
for d in dataset:
words = d[index].split(" ")
total_words += len(words)
return total_words | 10ad7f04da68310a5c0321c62203e6fc1a6e8cc7 | 47,258 |
import os
def collect_all_classes(path):
"""Collects all class names from the given folder. The class names must be folders in the given folder."""
classes = []
# collect all classes
for name in os.listdir(path):
classes.append(name)
return classes | 85244d8ee6be0b3a737ec937120df0ba7b7ac929 | 47,259 |
def GetDeferGroups(env):
"""Returns the dict of defer groups from the root defer environment.
Args:
env: Environment context.
Returns:
The dict of defer groups from the root defer environment.
"""
return env.GetDeferRoot()['_DEFER_GROUPS'] | d623ada67c1e49e00a678ce26f97b53f579c4379 | 47,260 |
def nzds(M, fns, word):
"""Computes the Non Zero Dimensions Score for @word.
Computes the count of total unique cooccurences for the given word divided by the total of words.
The result ist the percentage of the words that @word stands in cooccurence with.
"""
context_vector = M[fns.index(word)]
n_total_dimensions = len(fns)
n_non_zero_dimensions = len(context_vector.nonzero()[0])
return n_non_zero_dimensions / n_total_dimensions | 7dec53c50a41e22a5783f35471d932b2884686f1 | 47,261 |
import functools
def gen_to_list(func):
"""
Transforms a function that would return a generator into a function that
returns a list of the generated values, ergo, do not use this decorator
with infinite generators.
"""
@functools.wraps(func)
def inner(*args, **kwargs):
return list(func(*args, **kwargs))
return inner | 20c606bf73d97ff6a3d9e8a6b139cdca9c2bb8fa | 47,262 |
import argparse
def ParseArguments():
"""Parse arguments
Returns:
argparse.Namespace: the parsed command-line arguments.
"""
parser = argparse.ArgumentParser(
description='Helper functions for end to end tests.')
subparsers = parser.add_subparsers(
title='commands', description='valid sub-commands', dest='command')
normalize = subparsers.add_parser(
'normalize', help='Helps normalize a GCS URL')
normalize.add_argument('url', type=str, help='the GCS URL to normalize')
check_stamp = subparsers.add_parser(
'check_stamp', help='Checks the content of a GiftStick Stamp file')
check_stamp.add_argument(
'stamp', type=str, help='the stamp.json file to check')
check_system_info = subparsers.add_parser(
'check_system_info',
help='Checks the content of the system_info.txt file')
check_system_info.add_argument(
'system_info', type=str, help='the system_info.txt file to check')
check_lsblk = subparsers.add_parser(
'check_lsblk',
help='Checks the content of the lsblk.txt file')
check_lsblk.add_argument(
'lsblk', type=str, help='the lsblk.txt file to check')
check_hash = subparsers.add_parser(
'check_hash',
help='Checks the content of the sdb.hash file')
check_hash.add_argument(
'hash', type=str, help='the sdb.hash file to check')
check_udevadm = subparsers.add_parser(
'check_udevadm',
help='Checks the content of the sdb.udevadm.txt file')
check_udevadm.add_argument(
'udevadm', type=str, help='the sdb.udevadm.txt file to check')
return parser.parse_args() | 191f9288d0f3adcaca421f7191bccd86ca2810e0 | 47,263 |
def ordenar(datos):
""" Ordena un diccionario segun el criterio que se elija
de los listados en el menu """
def print_menu():
print('elija un criterio para ordenar')
print(' 0: por nombre')
print(' 1: por la nota de la evaluacion 1')
print(' 2: por la nota de la evaluacion 2')
print(' 3: por la suma de ambas evaluaciones')
# convierte a lista, ordena y devuelve un diccionario
def ordenar(datos, num):
lista = list(datos.items())
if num != -1:
if num == 2 and lista[0][1][num] == -1:
print('--! Estas tratando de ordenar por el total, pero aun no fue calculado !--')
lista.sort(key = lambda lista: lista[1][num])
else:
lista.sort()
return dict(lista)
print_menu()
return ordenar( datos, int(input()) - 1 ) | 6c5b1b74707b3affaf709fe21c26f53e7a5ebd8c | 47,265 |
import random
import math
def exponentielle(lx):
"""
Simule une loi exponentielle de paramètre :math:`\\lambda`.
"""
u = random.random()
return - 1.0 / lx * math.log(1.0 - u) | 23ad8f938f55b6d6537d4d0d886464bbf8ceb2a2 | 47,266 |
def objects_init_params(fake_request):
"""Init parameters for Icinga2Objects."""
return {
"results": ({"name": "objectname", "type": "Objecttype", "attrs": {"state": 1, "host_name": "Hostname"}}, ),
"request": fake_request
} | 5087f66e1041b1f3dee28dcd09532db58240a484 | 47,268 |
def indexend(k):
""" Return end index in papers (e.g. 0 or 1-based) """
return k | 36c365c42f25f58dd2b3e700113a72f2f17f3c1c | 47,270 |
def calcUserMeanRating(userRatingGroup):
""" Calculate the average rating of a user
"""
userID = userRatingGroup[0]
ratingSum = 0.0
ratingCnt = len(userRatingGroup[1])
if ratingCnt == 0:
return (userID, 0.0)
for item in userRatingGroup[1]:
ratingSum += item[1]
return (userID, 1.0 * ratingSum / ratingCnt) | 9a8a292b5a464d23aabfa4a73e631049546f2ff2 | 47,271 |
import shlex
import subprocess
def run_annovar(annovar_path, input_vcf_path, output_csv_path):
"""Run Annovar as subprocess"""
if '/' in input_vcf_path:
output_name = input_vcf_path.split('/')[-1].split('.')[0]
else:
output_name = input_vcf_path.split('.')[0]
output_csv_path_name = output_csv_path + output_name
args_str = "sudo perl " + annovar_path + "table_annovar.pl " + input_vcf_path + " " + annovar_path + "humandb/ -buildver hg19 -out " + output_csv_path_name + " -remove -protocol knownGene,tfbsConsSites,cytoBand,targetScanS,genomicSuperDups,gwasCatalog,esp6500siv2_all,1000g2015aug_all,snp138,ljb26_all,cg46,cg69,popfreq_all,clinvar_20140929,cosmic70,nci60 -operation g,r,r,r,r,r,f,f,f,f,f,f,f,f,f,f -nastring . -vcfinput -csvout"
args = shlex.split(args_str)
p = subprocess.Popen(args, stdout=subprocess.PIPE)
#run_handler(output_csv_path)
return 'Finished running ANNOVAR on {}'.format(input_vcf_path) | ed4fbee84a8d9d502b6a54ce64e3c84de75c8562 | 47,272 |
def quicksort(l):
"""Sort list using quick sort.
Complexity: O(n log n). Worst: O(n2)
@param l list to sort.
@returns sorted list.
"""
if len(l) <= 1:
return l
pivot = l[0]
less = []
equal = []
greater = []
for e in l:
if e < pivot:
less.append(e)
elif e == pivot:
equal.append(e)
else:
greater.append(e)
return quicksort(less) + equal + quicksort(greater) | 3429b3eecb2149271638a042fe9bb50ba08e014a | 47,274 |
def parse_video_time_format(s, fps=30.):
""" Format MM:SS.f """
m, sf = s.split(":")
m = float(m)
s, f = [float(x) for x in sf.split(".")]
time_interval = m * 60. + s + 1. /fps * f
return time_interval | 521e0a58eb1cebe7d5eeebd50a3c360995f9daf5 | 47,275 |
def subset(ds, X=None, Y=None):
"""Subset model output horizontally using isel, properly accounting for horizontal grids.
Inputs
------
ds: xarray Dataset
Dataset of ROMS model output. Assumes that full regular grid setup is
available and has been read in using xroms so that dimension names
have been updated.
X: slice, optional
Slice in X dimension using form `X=slice(start, stop, step)`. For example,
>>> X=slice(20,40,2)
Indices are used for rho grid, and psi grid is reduced accordingly.
Y: slice, optional
Slice in Y dimension using form `Y=slice(start, stop, step)`. For example,
>>> Y=slice(20,40,2)
Indices are used for rho grid, and psi grid is reduced accordingly.
Returns
-------
Dataset with form as if model had been run at the subsetted size. That is, the outermost
cells of the rho grid are like ghost cells and the psi grid is one inward from this size
in each direction.
Notes
-----
X and Y must be slices, not single numbers.
Example usage
-------------
Subset only in Y direction:
>>> xroms.subset(ds, Y=slice(50,100))
Subset in X and Y:
>>> xroms.subset(ds, X=slice(20,40), Y=slice(50,100))
"""
if X is not None:
assert isinstance(X, slice), "X must be a slice, e.g., slice(50,100)"
ds = ds.isel(xi_rho=X, xi_u=slice(X.start, X.stop - 1))
if Y is not None:
assert isinstance(Y, slice), "Y must be a slice, e.g., slice(50,100)"
ds = ds.isel(eta_rho=Y, eta_v=slice(Y.start, Y.stop - 1))
return ds | eb454aad6a344ac76defd1a7314e52c1d948422f | 47,276 |
def person_path_weight():
"""
<enumeratedValueSet variable="person_path_weight"> <value value="2"/> </enumeratedValueSet>
Float from 0.0 to 2.0 in steps of 0.1
"""
ppw = 2.0
return f'<enumeratedValueSet variable="person_path_weight"> <value value="{ppw}"/> </enumeratedValueSet>' | 932eae36c51a3b866231cf5ce8c21eb17c69938a | 47,277 |
def contains_sep(name):
""" Test if name contains a mode name, e.g. TEM, ITG, ETG"""
return any(sub in name for sub in ["TEM", "ITG", "ETG"]) | 414497454191394783dec87e772fd5f7f0e3b717 | 47,280 |
def get_smallest_divs(soup):
"""Return the smallest (i.e. innermost, un-nested) `div` HTML tags."""
return [
div for div in soup.find_all("div") if not div.find("div") and div.text.strip()
] | f3181c7f3cd5b4c82f060780e23dcf34028316e8 | 47,281 |
def get_dict_values(dicts, keys, return_dict=False):
"""Get values from `dicts` specified by `keys`.
When `return_dict` is True, return values are in dictionary format.
Parameters
----------
dicts : dict
keys : list
return_dict : bool
Returns
-------
dict or list
Examples
--------
>>> get_dict_values({"a":1,"b":2,"c":3}, ["b"])
[2]
>>> get_dict_values({"a":1,"b":2,"c":3}, ["b", "d"], True)
{'b': 2}
"""
new_dicts = dict((key, dicts[key]) for key in keys if key in list(dicts.keys()))
if return_dict is False:
return list(new_dicts.values())
return new_dicts | f965fc1593381f771ba0a2bebc525bc8b11c3815 | 47,282 |
def triangulate(poly):
"""
This function return the triangulated given polygon mesh
@type poly: hostApp object mesh
@param poly: the hostApp obj to be decompose
@rtype: hostApp object mesh
@return: triangulated polygon mesh
"""
#put the host code here
return poly | 0f40d1867f87c62db34af2ce952c553bb31c44b4 | 47,283 |
def testInjResults(clip):
"""
Look at what was found and determine if it matches the input.
"""
pdiff=1 - (clip.inject.period_days / clip.trapFit.period_days)
if pdiff < .01:
clip.inject['wasFound'] = True
else:
clip.inject['wasFound'] = False
return clip | 062d4620bad30c24a1bca7a63a75ca6582e6702a | 47,284 |
def simplified_tag(t):
"""
Returns a simplified POS tag:
NP-SBJ -> NP
PP=4 -> PP
-RRB- -> -RRB-
"""
if t == None:
return None
if t[0:1] == "-":
return t
else:
caret_pos = t.find("-")
t_minus_caret = ""
if not caret_pos == -1:
t_minus_caret = t[0:caret_pos]
else:
t_minus_caret = t
equal_pos = t_minus_caret.find("=")
t_simplified = ""
if not equal_pos == -1:
t_simplified = t_minus_caret[0:equal_pos]
else:
t_simplified = t_minus_caret
return t_simplified | 244591a9a46984a67652ca11ad9ab70d296a5fb8 | 47,286 |
import json
def load_metadata(metadata_path: str='/autograder/submission_metadata.json'):
"""Load JSON data from metadata file."""
return json.load(open(metadata_path)) | f0f55c530bff130c95c5d119a659339795682e13 | 47,288 |
from datetime import datetime
def datetimefstr(dtime_list, dtformat):
"""
converts a datetime (as one or several string elements of a list) to
a datetimeobject
removes "used" elements of list
:returns: a datetime
:rtype: datetime.datetime
"""
parts = dtformat.count(' ') + 1
dtstring = ' '.join(dtime_list[0:parts])
dtstart = datetime.strptime(dtstring, dtformat)
for _ in range(parts):
dtime_list.pop(0)
return dtstart | d13ec79fa279a90b31b3a5679c8e2b6854a5c56c | 47,289 |
def to_usd(my_price):
"""
Converts a float or int to to usd format.
Param: my_price (any int or float) like 14 or 14.849285
"""
return f"${my_price:,.2f}" | f429d04a5c2c6807a42c020ce21f9d75da6d40f5 | 47,290 |
def _windows_extract_compute_hws(winsize):
"""This returns the interval [i-hws1:i+hws1] suitable for indexing"""
if winsize % 2 == 1:
hws1 = (winsize - 1) // 2
hws2 = hws1 + 1
else:
hws1 = winsize // 2
hws2 = hws1
assert hws1 + hws2 == winsize
return hws1, hws2 | bb75d9a960893317dd9715cb9489e820694f59f3 | 47,291 |
def first_non_null(*args):
"""return the first non null value in the arguments supplied"""
for x in args:
if x != '':
return x
return '' | 1169be1c179b7e4b0c753e4202045aa7b9a0d5c4 | 47,292 |
def limit_vector(vector, bottom_limit, upper_limit):
"""
This function cut the a vector to keep only the values between the bottom_limit and the upper_limit.
Parameters
----------
vector : list
The vector that will be cut
upper_limit : float
The maximum value of the vector.
bottom_limit : float
The minimum value of the vector
Returns
-------
vector : iterable
The limited vector
"""
temp = []
for x in vector:
if bottom_limit <= x <= upper_limit:
temp.append(x)
return temp | 5c520f09e6caac08461cd6add911ea69a12afa72 | 47,293 |
import json
def xml_to_json(element, tag, prefix=''):
"""Converts a layer of xml to a json string. Handles multiple instances of the
specified tag and any schema prefix which they may have.
Args:
element (:obj:`xml.etree.ElementTree.Element`): xml element containing the data
tag (str): target tag
prefix (str): schema prefix on the name of the tag
Returns:
(str): json of the original object with the supplied tag as the key and a list
of all instances of this tag as the value.
"""
tag = ''.join([prefix, tag])
all_data = [{field.tag[len(prefix):]: field.text
for field in fields.getiterator()
if field.tag != tag}
for fields in element.getiterator(tag)]
return json.dumps(all_data) | ef28c257d2b2f974569b431ab10c8aa7a2e6ab7a | 47,294 |
def n_hits_filter(frame, reco_pulse_series_names, n_hits=8):
"""filter out frames with fewer than `n_hits` hits"""
total_hits = 0
for series_name in reco_pulse_series_names:
pulses = frame[series_name]
try:
pulses = frame.apply(pulses)
except AttributeError:
pass
total_hits += sum(len(series) for series in pulses.values())
print(f"found {total_hits} hits")
if total_hits >= n_hits:
return True
else:
return False | 6ac63c151c55976d17f82da05b39e11c927316c3 | 47,296 |
def _to_http_uri(s: str) -> str:
"""Prefix the string with 'http://' if there is no schema."""
if not s.startswith(('http://', 'https://')):
return 'http://' + s
return s | 5ad67d12bbfbca13143dfbacd4ea96f53b9919e9 | 47,297 |
def _is_iterable(val):
"""
Checks if the input if an iterable. This function will return False if a
string is passed due to its use in pmutt.
Parameters
----------
val : iterable or non-iterable
Value to check if iterable
Returns
-------
is_iterable : bool
True if iterable. False if not iterable or string.
"""
if isinstance(val, str):
return False
else:
# If it's not a string, check if it's iterable
try:
iter(val)
except TypeError:
return False
else:
return True | 34f140c9bc6fce9f06f05c8b3a5aa1aabe3df840 | 47,300 |
def clean_locals(params):
"""
Clean up locals dict, remove empty and self params.
:param params: locals dicts from a function.
:type params: dict
:returns: cleaned locals dict to use as params for functions
:rtype: dict
"""
return dict((k, v) for k, v in params.items() if v is not None and k != 'self') | 89ecf3376958bb191250397cc285a33028433fad | 47,301 |
def get_state_dict(task):
"""
gets a state dict for a given task
:param task_id:
:return:
"""
no_chars = str.maketrans("", "", "<>/()[]")
response = {
"state": task.state,
"class": "",
"loading": "Loading...",
"message": "No status message probably an error",
"current": 0,
"total": 1,
}
loadingd = {
"SUCCESS": "Done!",
"FAILURE": "Failed."
}
classsuffixdict = {
"PENDING": "-info",
"RECEIVED": "-info",
"STARTED": "-info",
"PROGRESS": "-default",
"WARNING": "-warning",
"SUCCESS": "-success",
"FAILED": "-danger",
}
msgdict = {
"PENDING": "Pending...",
"RECEIVED": "Received by processing server...",
"STARTED": "Started...",
}
if hasattr(task, 'info') and type(task.info) is dict:
response['message'] = str(task.info.get('status', "").translate(no_chars).split(":")[-1].strip())
response['current'] = task.info.get("current", 0)
response['total'] = task.info.get("total", 1)
response['message'] = msgdict.get(task.state, response['message'])
response['classsuffix'] = classsuffixdict.get(task.state, "")
response['loading'] = loadingd.get(task.state, response['loading'])
return response | 80ed8e015d7d271f91d50f3045cfc3f69eff52d3 | 47,302 |
def filter_instruction_forms(instruction_forms):
"""Removes the instruction forms that are currently not supported"""
new_instruction_forms = list()
for instruction_form in instruction_forms:
if all([operand.type not in {"r8l", "r16l", "r32l", "moffs32", "moffs64"} for operand in instruction_form.operands]):
new_instruction_forms.append(instruction_form)
return new_instruction_forms | 772504766366a3088b939da4bcf1ebd032acadde | 47,304 |
def Make_a_Frame(s_to_frame, i_max_lenght):
"""Create a frame for a string and place it in the middle
to get a clean display of values"""
i_positioner = int((i_max_lenght - len(s_to_frame)) / 2)
#prevent misalignments when strings are sized differently
if (len("|" + "_" * i_positioner + s_to_frame + "_" * i_positioner + "|") % 2) != 0:
return "|" + " " * i_positioner + s_to_frame + " " * (i_positioner - 1) + "|"
return "|" + " " * i_positioner + s_to_frame + " " * i_positioner + "|" | a613711c6e2ba7f3f5460cbf2d2789c31b9bf632 | 47,305 |
import os
import yaml
def generate_std_options_file(outdir: str,
filename: str = '',
save=True):
"""
Generates a standard options-.yaml file, where the user can specify
her/his preferred microscopy/experiment settings.
outdir: str
directory where the options.yaml file will be saved.
filename: str
add-ons for the filename: options_ADD_ON_FILENAME.yaml
"""
user_setting = {'1_czidir': os.path.join(outdir,"dataXYZ-CYC{:02}.czi"),
'1_outdir': outdir,
'1_channelnames_dir': os.path.join(outdir,"channelnames.txt"),
'1_overwrite_exposure_times': False,
'1_out_template': "1_{m:05}_Z{z:03}_CH{c:03}",
'codex_instrument': "CODEX instrument",
'tilingMode': "gridrows",
'referenceCycle': 2,
'referenceChannel': 1,
'numSubTiles': 1,
'deconvolutionIterations': 25,
'deconvolutionModel': "vectorial",
'useBackgroundSubtraction': True,
'useDeconvolution': True,
'useExtendedDepthOfField': True,
'useShadingCorrection': True,
'use3dDriftCompensation': True,
'useBleachMinimizingCrop': False,
'useBlindDeconvolution': False,
'useDiagnosticMode': False,
'num_z_planes': 1,
'tile_width_minus_overlap': None,
'tile_height_minus_overlap': None,
'wavelengths': [1,2,3,4]}
# Write YAML file
if save:
with open(os.path.join(outdir, 'options' + filename + '.yaml'), 'w',
encoding='utf-8') as yaml_file:
yaml.dump(user_setting, yaml_file)
print("...finished generating the standard options.yaml file. \n"
"Saved in "
f"{os.path.join(outdir,'options' + filename + '.yaml')}")
# json.dump(user_setting, json_file, ensure_ascii=False, indent=4)
return user_setting | 69a4e8084648a0e66052ada5e60ef52cb710ac6a | 47,306 |
def class_counts(rows, label):
"""
find the frequency of items for each class in a dataset.
PARAMETERS
==========
rows: list
A list of lists to store the rows whose
predictions is to be determined.
label: integer
The index of the last column
RETURNS
=======
counts
A dictionary of predictions
"""
counts = {} # a dictionary of label -> count.
for row in rows:
# in our dataset format, the label is always the last column
lbl = row[label]
if lbl not in counts:
counts[lbl] = 0
counts[lbl] += 1
return counts | 9881712046f7647fff618e1ea1e28a25431f7430 | 47,307 |
import json
def extract_data_request(request):
"""
Extract category and query from request
Arguments:
request {Request} -- Django request
"""
if request.method == 'GET':
return request.GET['resource'], request.GET['category'], json.loads(request.GET.get('query', '{}'))
return None | 482fb5805b59631d1a5bf4e12237d70e7fef6d69 | 47,308 |
import random
import pathlib
def _random_name() -> str:
""" Pick (hopefully unique) random name for a machine
credit for the words_alpha.txt file https://github.com/dwyl/english-words
"""
name = random.choice(
[word for word in open(pathlib.Path(__file__).parent / "words_alpha.txt")]
)[:-1]
return name | 49af36e5443496ce1c017709e38016d9de78a4ae | 47,309 |
def dict_drop(my_dict, keys):
"""
You've guessed it right - returns a new dictionary with `keys`
removed from `my_dict`
"""
if not isinstance(keys, (list, tuple)):
# works for a few cases - a single string is wrapped into a list
keys = [keys]
return { k: v for k, v in my_dict.items() if k not in keys } | b51137581b826cf9042e432907d95ff2523742e5 | 47,310 |
def iterate_array(client, url, http_method='GET', limit=100, offset=0, params=None):
"""
Get a list of objects from the Podio API and provide a generator to iterate
over these items. Use this for
e.g. to read all the items of one app use:
url = 'https://api.podio.com/comment/item/{}/'.format(item_id)
for item in iterate_array(client, url, 'GET'):
print(item)
"""
all_elements = []
if params is None:
params = dict(limit=limit, offset=offset)
else:
params['limit'] = limit
params['offset'] = offset
do_requests = True
while do_requests == True:
if http_method == 'POST':
api_resp = client.post(url, data=params)
elif http_method == 'GET':
api_resp = client.get(url, params=params)
else:
raise Exception("Method not supported.")
if api_resp.status_code != 200:
raise Exception('Podio API response was bad: {}'.format(api_resp.content))
resp = api_resp.json()
num_entries = len(resp)
if num_entries < limit or num_entries <= 0:
do_requests = False
params['offset'] += limit
all_elements.extend(resp)
# print(f"array of {len(all_elements)}")
return all_elements | 745e3209e5add0b0a8a64aea1878ac5b2897afb9 | 47,311 |
def get_appliance_info(tintri):
"""
Get Tintri Appliance details
Args:
tintri (obj): Tintri object
Returns:
appliance: Dict of apliance details
"""
appliance = {}
info = tintri.get_appliance_info()
product = None
if tintri.is_vmstore():
product = 'Tintri VMstore'
elif tintri.is_tgc():
product = 'Tintri Global Center'
appliance['product'] = product
appliance['model'] = info.get('modelName')
return appliance | fd774fa1052ea1c24fbdc5ec86db498fd6d02a10 | 47,313 |
import math
def grid_to_angle(grid_point):
"""
:param input_image:
:param lower_threshold:
:param upper_threshold:
:param inside_value:
:param outside_value:
:return:
"""
out1 = 0.
out2 = 0.
# Computes the dv values
dv_x = grid_point[0]
dv_y = grid_point[1]
# Declares and init angles phi and theta
phi = 0.
theta = 0.
# If dv_y "zero"
if dv_y != 0.0:
# Computes phi
phi = math.atan(dv_x / dv_y)
if dv_y < 0:
phi = phi + math.pi
if phi < 0.:
phi = phi + 2 * math.pi
# Computes theta
theta = math.atan(dv_y / math.cos(phi))
# Set the output pixel value
out1 = theta
out2 = phi
# dv_y non zero
else:
if dv_x != 0.0:
# Computes phi
if dv_x > 0.:
phi = math.pi * 2.0
else:
phi = -2.0 * math.pi
# To keep phi between [0; 360]
if phi < 0.:
phi = phi + math.pi
# Computes theta
theta = math.atan(math.fabs(dv_x))
# Set the output pixel value
out1 = theta
out2 = phi
else:
# Set the output pixel value to default value
out1 = 0.
out2 = 0.
return (out1, out2) | aba06a5482abcfb0711e0ed159c7b1ad8600826e | 47,314 |
def lowest_pending_jobs(backends):
"""Returns the backend with lowest pending jobs."""
backends = filter(lambda x: x.status.get('available', False), backends)
by_pending_jobs = sorted(backends,
key=lambda x: x.status['pending_jobs'])
return by_pending_jobs[0] | 7c0d6fa2234247edde3657f08ca8dc95d4ebd1a7 | 47,316 |
def find_last_good_vel(j, n, azipos, vflag, nfilter):
"""
Looking for the last good (i.e. processed) velocity in a slice.
Parameters:
===========
j: int
Position in dimension 1
n: int
Position in dimension 2
azipos: array
Array of available position in dimension 1
vflag: ndarray <azimuth, range>
Flag array (-3: missing, 0: unprocessed, 1: processed, 2: processed and unfolded)
nfilter: int
Size of filter.
Returns:
========
idx_ref: int
Last valid value in slice.
"""
i = 0
while i < nfilter:
i += 1
idx_ref = j - i
idx = azipos[idx_ref]
vflag_ref = vflag[idx, n]
if vflag_ref > 0:
return idx_ref
return -999 | 52b7eb19d53d3c02392ece1eb6c4f52189fa8ec4 | 47,317 |
def to_3types_full(graph, old2new=None, augment_net = True):
"""
This transformation differs from to_3types in that even non-existent node-types
receive an id,
e.g. if node 2 receives no positive link, 2in- still occupies a new id
Also old2new mapper can be fed from an already conversion instead of building a new one
:param graph
:param old2new: convert the graph with the given map id (do not build a new one
:return: (graph, new2old, old2new)
:rtype: (dict, dict, dict)
"""
new_graph = {} # new_graph[new id 1][new id 2] = {weight: -1}
new2old = {} # new2old[new id] = {id: old id, type: out / in+ / in-}
old2new = {} if old2new is None else old2new # old2new[old id][out / in+ / in-] = new id
new_id = 0 # current new id counter
if len(old2new) == 0:
# assign new id to all types of visited nodes
def map(old_id, new_id, old_new, new_old):
if old_id not in old_new:
new_id_out = str(new_id)
new_id_inp = str(new_id + 1)
new_id_inn = str(new_id + 2)
new_id_out_dummy = str(new_id + 3)
old_new[old_id] = {'out': new_id_out, 'in+': new_id_inp, 'in-': new_id_inn, 'out_dummy': new_id_out_dummy}
new_old[new_id_out] = {"id": old_id, "type": "out"}
new_old[new_id_inp] = {"id": old_id, "type": "in+"}
new_old[new_id_inn] = {"id": old_id, "type": "in-"}
new_old[new_id_out_dummy] = {"id": old_id, "type": "out_dummy"}
return new_id + 4
else:
return new_id
for node, neighbors in graph.items():
new_id = map(old_id=node, new_id=new_id, old_new=old2new, new_old=new2old)
for n, attr in neighbors.items():
new_id = map(old_id=n, new_id=new_id, old_new=old2new, new_old=new2old)
# build the new graph
# adds (v1, v2) = value to graph
def add(v1, v2, weight, mat):
if v1 not in mat:
mat[v1] = {}
mat[v1][v2] = {'weight': weight}
for node, neighbors in graph.items():
for n, attr in neighbors.items():
weight = attr['weight']
if weight > 0:
add(old2new[node]["out"], old2new[n]["in+"], weight, new_graph)
add(old2new[node]["out_dummy"], old2new[n]["in-"], weight, new_graph)
elif weight < 0:
add(old2new[node]["out"], old2new[n]["in-"], -weight, new_graph)
add(old2new[node]["out_dummy"], old2new[n]["in+"], -weight, new_graph)
return new_graph, new2old, old2new | f2fc1c55d74ced3b497f55e25f1b5c42302208c8 | 47,318 |
def key_type(secret):
"""Return string value for keytype depending on passed secret bool.
Possible values returned are: `secret`, `public`.
.. Usage::
>>> key_type(secret=True)
'secret'
>>> key_type(secret=False)
'public'
"""
return "secret" if secret else "public" | 3f401b0f5ef3c95f6ee2ee6f1f81b9ee6ea0108d | 47,319 |
def case(text, casingformat='sentence'):
"""
Change the casing of some text.
:type text: string
:param text: The text to change the casing of.
:type casingformat: string
:param casingformat: The format of casing to apply to the text. Can be 'uppercase', 'lowercase', 'sentence' or 'caterpillar'.
:raises ValueError: Invalid text format specified.
>>> case("HELLO world", "uppercase")
'HELLO WORLD'
"""
# If the lowercase version of the casing format is 'uppercase'
if casingformat.lower() == 'uppercase':
# Return the uppercase version
return str(text.upper())
# If the lowercase version of the casing format is 'lowercase'
elif casingformat.lower() == 'lowercase':
# Return the lowercase version
return str(text.lower())
# If the lowercase version of the casing format is 'sentence'
elif casingformat.lower() == 'sentence':
# Return the sentence case version
return str(text[0].upper()) + str(text[1:])
# If the lowercase version of the casing format is 'caterpillar'
elif casingformat.lower() == 'caterpillar':
# Return the caterpillar case version
return str(text.lower().replace(" ", "_"))
# Raise a warning
raise ValueError("Invalid text format specified.") | a899c6fb4c2f9eb5f145c0a74221dcb14d9d0b19 | 47,320 |
import os
def path(filename):
"""
Return path of given filename, with trailing separator.
Parameters
----------
filename : :class:`str`
Name of the file (may contain absolute path and extension) the path
should be returned for.
Returns
-------
path : :class:`str`
path corresponding to the filename provided as input.
"""
return os.path.split(filename)[:-1][0] + os.sep | c4d1f9f5771b6277702d8efaa604f15c09102abc | 47,321 |
def get_coords(ds, topology="PPN",):
"""
Constructs the coords dict for ds to be passed to xgcm.Grid
Flat dimensions (F) are treated the same as Periodic ones (P)
"""
per = dict(left='xF', center='xC')
nper = dict(outer='xF', center='xC')
per = { dim : dict(left=f"{dim}F", center=f"{dim}C") for dim in "xyz" }
nper = { dim : dict(outer=f"{dim}F", center=f"{dim}C") for dim in "xyz" }
coords = { dim : per[dim] if top in "FP" else nper[dim] for dim, top in zip("xyz", topology) }
return coords | 5b80674083f665d5130bf830a1b5be4b6df2c684 | 47,323 |
def train_batch(player, X_list, y_list, winners, lr):
"""Given the outcomes of a mini-batch of play against a fixed opponent,
update the weights with reinforcement learning.
Args:
player -- player object with policy weights to be updated
X_list -- List of one-hot encoded states.
y_list -- List of one-hot encoded actions (to pair with X_list).
winners -- List of winners corresponding to each item in
training_pairs_list
lr -- Keras learning rate
Return:
player -- same player, with updated weights.
"""
for X, y, winner in zip(X_list, y_list, winners):
# Update weights in + direction if player won, and - direction if player lost.
# Setting learning rate negative is hack for negative weights update.
if winner == -1:
player.policy.model.optimizer.lr.set_value(-lr)
else:
player.policy.model.optimizer.lr.set_value(lr)
player.policy.model.fit(X, y, nb_epoch=1, batch_size=len(X))
return player | 3c0227eac58b04a74329bfb320a22eb6399e994a | 47,324 |
def get_pad(shape, *, to):
"""Pad the shape to target size.
Details
-------
Tries to pad symmetrically, but leans towards the origin
if odd padding is required.
"""
excess = [t - s for s, t in zip(shape, to)]
assert all(e >= 0 for e in excess) # safegurad
pad = []
# start from the last dimension and move forward (see `F.pad`)
for exc in reversed(excess):
div, mod = divmod(exc, 2)
pad.extend((div, div + mod))
# divmod guarantees that div + (div + mod) == exc, so no safeguard here
return pad | f019903d69e2a37ad0949f6eb1c06c3c1b6f0dbb | 47,326 |
def comparator(a,b,rel_tol=1e-3,abs_tol=1e-3,verbose=False):
"""
Compare absolute and relative tolerances
"""
abs_diff = abs(a-b)
rel_diff = abs(a-b)/b
if verbose == True:
print('\n Comparing a=',a,' b=',b)
print(' abs_diff = ', abs_diff)
print(' rel_diff = ', rel_diff)
value = True
if ( abs_diff > abs_tol ):
value=False
if ( rel_diff > rel_tol ):
value=False
return value | 853756c10264bbd7679776199746dcdf622f83e1 | 47,327 |
def get_auth_token(cloud, account, username, password, expected_status_code=None):
"""
Get auth token from user login
:param cloud: Cloud API object
:param account: Account id
:param username: User id
:param password: Password
:param expected_status_code: Asserts the result in the function
:return: Auth token
"""
r = cloud.iam.authenticate_user(account=account, username=username, password=password,
expected_status_code=expected_status_code).json()['token']
return r | a39044b68c8f0d70fc93c17ac2d57023994a4b51 | 47,329 |
def check_fields_valid(passport, validity_checks):
"""Automatic validation checks for passport fields. Returns bool."""
return all(check(passport[field]) for field, check in validity_checks.items()) | 223d832c954bf62d8704c820f0ab5236eb1afdd6 | 47,330 |
import re
def format_text(text, max_len, prefix="", min_indent=None):
"""
Format a text in the biggest lines possible with the constraint of a maximum length and an indentation.
Args:
text (`str`): The text to format
max_len (`int`): The maximum length per line to use
prefix (`str`, *optional*, defaults to `""`): A prefix that will be added to the text.
The prefix doesn't count toward the indent (like a - introducing a list).
min_indent (`int`, *optional*): The minimum indent of the text.
If not set, will default to the length of the `prefix`.
Returns:
`str`: The formatted text.
"""
text = re.sub(r"\s+", " ", text).strip()
if min_indent is not None:
if len(prefix) < min_indent:
prefix = " " * (min_indent - len(prefix)) + prefix
indent = " " * len(prefix)
new_lines = []
words = text.split(" ")
current_line = f"{prefix}{words[0]}"
for word in words[1:]:
try_line = f"{current_line} {word}"
if len(try_line) > max_len:
new_lines.append(current_line)
current_line = f"{indent}{word}"
else:
current_line = try_line
new_lines.append(current_line)
return "\n".join(new_lines) | 925f1075e5039dd876105c0a2e8e9b20e72af41d | 47,331 |
import hashlib
import json
import base64
def hash_dict(obj):
"""
Hashes the json representation of obj using sha-256 to have almost certain uniqueness.
@param obj: dict to be hashed
@return: sha256 b64 encoded hash of the dict
"""
m = hashlib.sha256()
s = json.dumps(obj, sort_keys=True)
m.update(bytes(s, 'utf-8'))
return base64.b64encode(m.digest()).decode('utf-8') | 45628ec94d01e0aac6696b8d65627968754f7f4b | 47,332 |
def add(coord1, coord2):
"""
A 'smart' tuple adder that checks that it's on the chessboard
"""
ret = tuple([coord1[i] + coord2[i] for i in range(len(coord1))])
for i in ret:
if (i < 0 or i > 7):
return (None)
return (ret) | 2a8ca3e3c28488ddef0a36ac4ddf3010b9093946 | 47,333 |
def singleton(my_class):
"""
Class Descriptor that realizes the Singleton Pattern.
"""
instances = {}
def get_instance():
if my_class not in instances:
instances[my_class] = my_class()
return instances[my_class]
return get_instance | 3b3d8fcac67b2fb51917bafa6b575a42b9b18655 | 47,334 |
def determinant(tup):
"""Calculates the determinant of a tuple (m11, m12, m13, m21, m22, m23, m31, m32, m33)
Args:
tup (tuple of ints)
Returns:
det (int)
"""
m11, m12, m13, m21, m22, m23, m31, m32, m33 = tup
det = m11 * m22 * m33 + m12 * m23 * m31 \
+ m13 * m21 * m32 - m11 * m23 * m32 \
- m12 * m21 * m33 - m13 * m22 * m31
return det | 4aadb49d7f9e10ad8d7134535aed29fb6864bcb7 | 47,338 |
def string_strip(lst):
"""
apply strip to each string in an iterable container of strings
ARGS:
lst (list): an iterable containing strings
"""
return([x.strip() for x in lst]) | 68b84f5fed2f903a0019042304da9b34b5e73aaf | 47,339 |
def octal(value):
"""
Takes an integer or a string containing an integer and returns the octal
value. Raises a ValueError if the value cannot be converted to octal.
"""
return int("{}".format(value), 8) | d3bcf427fe83ddfca03d3b3d1f45efe7fc51f1e0 | 47,341 |
def execute(opts, data, func, args, kwargs):
"""
Directly calls the given function with arguments
"""
return func(*args, **kwargs) | 460d735b0259bf5dfd4b9ea5229b2495540cf2bf | 47,343 |
def get_region_total_dispatch(m, region_id, trade_type):
"""
Compute total dispatch in a given region rounded to two decimal places
"""
total = sum(m.V_TRADER_TOTAL_OFFER[i, j].value
for i, j in m.S_TRADER_OFFERS
if (j == trade_type) and (m.P_TRADER_REGION[i] == region_id))
return total | 7f98e830eb3a413be82cfd8d1410ceabcfd3b9f8 | 47,345 |
def fib(n):
"""
Returns the nth Fibonacci number.
"""
if n < 2:
return n
else:
return fib(n-2) + fib(n-1) | ccbbab202e771d3344cd36b05cbb9a3565f43f78 | 47,346 |
import struct
def _pack(keyparts):
"""
Pack parts into a SSH key blob.
"""
parts = []
for part in keyparts:
parts.append(struct.pack('>I', len(part)))
parts.append(part)
return b''.join(parts) | 3f9cb19a3ed46dd9204a2b88eeefccfb1eecb7f3 | 47,348 |
def clean_id(id):
"""
Return id with initial and final whitespace removed, and
with any internal whitespace sequences replaced by a single
blank. Also, all nonprintable characters are removed.
"""
id = id.strip()
new_id = ""
for c in id:
if c.isspace():
c = " "
if (c != " " or (len(new_id)>0 and new_id[-1] != " ")) \
and c.isprintable():
new_id += c
return new_id | 0c9f455033de258879bff5ad305539ff636e5389 | 47,349 |
def parse_id(i):
"""Since we deal with both strings and ints, force appid to be correct."""
try:
return int(str(i).strip())
except:
return None | d6dedce65b8afbc7f3104f56cba195fac82731f8 | 47,351 |
import socket
def get_hostname_ip():
"""Returns the hostname and ip of the current machine"""
try:
host_name = socket.gethostname()
host_ip = socket.gethostbyname(host_name)
return host_name, host_ip
except:
print("Unable to retrieve hostname and ip.")
return None | 8a85fe12c5664f09bcea02b0133ba334a6621abc | 47,354 |
import sys
import inspect
import os
def get_frame_dir(depth=0):
"""Return the source file directory of a frame in the call stack.
"""
if hasattr(sys, "_getframe"):
frame = sys._getframe(depth + 1) # +1 for this function itself
else:
raise NotImplementedError("Support CPython only.")
file = inspect.getabsfile(frame)
return os.path.dirname(file) | 5228073ccef4d1be0ba63bc79c60569b5026e228 | 47,355 |
def actions_side_effect(state):
""" Side effect for actions, returns the actions available for a given state
:param state: the current state
:return: actions available for the given state
"""
match state:
case "a":
val = ["a1", "a2", "a3"]
case "b":
val = ["b1", "b2", "b3"]
case "c":
val = ["c1", "c2", "c3"]
case "d":
val = ["d1", "d2", "d3"]
case _:
val = [] # Cases e-m are leaves, so should have no possible actions
return val | 9361f4ac0be1d29f640387f03546fdc401bc1acc | 47,356 |
import torch
def gpu_device(gpu):
"""
Returns a device based on the passed parameters.
Parameters
----------
gpu: bool or int
If int, the returned device is the GPU with the specified ID. If False, the returned device
is the CPU, if True, the returned device is given as the GPU with the highest amount of
free memory.
Returns
-------
torch.device
A PyTorch device.
"""
if isinstance(gpu, bool) and gpu:
assert torch.cuda.is_available()
return torch.device('cuda', 0)
if isinstance(gpu, bool):
return torch.device('cpu')
assert gpu < torch.cuda.device_count()
return torch.device('cuda', gpu) | bb17d30af82f1d90fbc7bad7d9486947d76468a1 | 47,359 |
import torch
def torch_image_to_numpy(image_torch, inplace=False):
"""Convert PyTorch tensor to Numpy array.
:param image_torch: PyTorch float CHW Tensor in range [0..1].
:param inplace: modify the tensor in-place.
:returns: Numpy uint8 HWC array in range [0..255]."""
if not inplace:
image_torch = image_torch.clone()
return image_torch.mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy() | 1eb788a01c0052315f8f9adaa688502080470a38 | 47,361 |
def calculateCoefficients( c1=0,
c2=0,
c3=0,
c4=0,
r2=0,
r3=0,
r4=0,
flt_type='passive'):
""" return loop filter coeffiencients as list
a[0] = a0, a[1] = a1, etc.
"""
a = []
if flt_type == 'passive':
a.append( c1 + c2 + c3 + c4 )
a.append( c2*r2*(c1 + c3 + c4) + r3*(c1 + c2)*(c3 + c4) +\
c4*r4*(c1 + c2 + c3) )
a.append( c1*c2*r2*r3*(c3 + c4) +\
c4*r4*(c2*c3*r3 + c1*c3*r3 + c1*c2*r2 + c2*c3*r2) )
else:
a.append(c1 + c2)
a.append( (c1*c2*r2) + (c1 + c2) * (c3*r3 + c4*r4 + c4*r3) )
a.append( c3*c4*r3*r4 * (c1 + c2) + c1*c2*r2*(c3*r3 + c4*r4 + c4*r3) )
a.append(c1*c2*c3*c4*r2*r3*r4)
return a | 2a556fceafaa8095a4224b4d49d43e6b2825ff35 | 47,362 |
def _strip_tweet_hashtags(status_text: str) -> str:
"""Strip out words from tweet that are hashtags (ie. begin with a #)."""
text_split = [word for word in status_text.split() if not word.startswith("#")]
text = " ".join(text_split)
return text | f359071115b12b1d8ad54aba39a838d3ee207ae7 | 47,363 |
def get_all_inputs(ax):
"""Read selected input pins Analog and Digital
"""
con = ax.connection
con.setDigitalOutput(1)
A = con.analogInput(0) # N0 GIO 0, 1 0. . . 4095
T = con.analogInput(9) # Temperature GIO 9, 1 [°C]
V = con.analogInput(8) # Voltage GIO 8, 1 [1/10V]
# print(f'A:{A} T:{T} V:{V}')
# I1, I2, I3 = (con.digitalInput(i) for i in [1,2,3])
D = f'{con.digitalInput(255):04b}'
D = list(int(i) for i in D)
return A, T, V | 897d18771be49fbdf9ea2fefafde661d732035c4 | 47,364 |
def apply_to_feature(feature_df,groupby_func_name=None,function=None):
"""
Apply a function to the entries for each feature.
feature_df ... dataframe with index (chrom, feature_name, pos)
(Such as the output of data_per_feature())
groupby_func_name ... name of the function of the groupby object
to apply to the data
This is faster than applying a function object.
function ... alternatively: function object to apply
"""
groups = feature_df.groupby(lambda idx: idx[1])
if groupby_func_name is not None:
return getattr(groups,groupby_func_name)()
elif function is not None:
return groups.apply(function)
else:
raise ValueError("Either groupby_func_name or function have to be given.") | 4457641597303e2b422f84840c6e6fd2446b9c74 | 47,366 |
def getPrecedence(operator):
"""
Returns the precedence for operators for use in toReversePolish(), where high numbers represent greater precedence
:param operator: operator token data
:return: number representing the precedence of the given operator
"""
if operator == "^":
return 3
elif operator in ["*","/"]:
return 2
elif operator in ["+","-"]:
return 1 | 44532c6bec002aea1596219b78ec029955db0694 | 47,367 |
import os
def getFileSizeMB(filepath):
"""
Get the filesize (in MB) of the given file.
GIVEN:
filepath (str) -- absolute path to a file
RETURN:
size_mb (float) -- size of the file in MB
"""
size_bytes = os.path.getsize(filepath)
size_mb = float(size_bytes) / (1024 * 1024)
return size_mb | 9bca2e5bc7d3c57fe7c3f1255365e49a550c45fe | 47,368 |
def import_from_string(path):
"""
Utility function to dynamically load a class specified by a string,
e.g. 'path.to.my.Class'.
"""
module_name, klass = path.rsplit('.', 1)
module = __import__(module_name, fromlist=[klass])
return getattr(module, klass) | 234799abfcaebf7cceb679168d436ae7596a2d30 | 47,371 |
def thread_get_stats(client):
"""Query threads statistics.
Returns:
Current threads statistics.
"""
return client.call('thread_get_stats') | b9bd950b9dcf3888ac9f23ed0177823bf4f403ea | 47,372 |
def format_bad_blocks(bad_blocks):
"""Prints out block results in rows"""
return "\n".join(sorted([str(b).replace("\\", "/") for b in bad_blocks])) | f79ae1ba732be889f8ade7dea61227476a9ae54b | 47,373 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.