content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def both_set_and_different(first, second):
"""
If any of both arguments are unset (=``None``), return ``False``. Otherwise
return result of unequality comparsion.
Returns:
bool: True if both arguments are set and different.
"""
if first is None:
return False
if second is None:
return False
return first != second | e5313d79ec434ef79c2b054fbdf0f80458f223a8 | 34,945 |
def erg_cm_minus_2_s_minus_1__to__W_m_minus_2():
"""Convert erg per square centimeter and second to Watt per square meter"""
return '1.0E-3{kind}*{var}' | 2ae5c1b10812eff2202a1ae860b6de669e55193d | 34,946 |
def CtoF(t):
"""Conversion: Celcius to Fahrenheit."""
return (t*9)/5+32 | 7453fb09a8cda84a1ae32748e3ffc36cccfdf59d | 34,947 |
def l2_norm_sqr(w):
"""
Returns square of L2 norm of the given matrix (w).
square of L2 norm of a matrix is simply the sum of square of elements of the matrix.
@input: w, a theano shared variable.
@output: L2 norm of w
"""
return (w ** 2).sum() | 3573d004e4e545f4f6fba4bf44ed8bb39badaba7 | 34,948 |
def gen_tpod_image_label_line(annotations):
"""Generate a line in label_list from annotations in one image."""
labels = []
label_line = []
for _, annotation in annotations.iterrows():
if annotation['lost']:
continue
xmin = int(annotation['xmin'])
ymin = int(annotation['ymin'])
xmax = int(annotation['xmax'])
ymax = int(annotation['ymax'])
w = xmax - xmin
h = ymax - ymin
labels.append(','.join([str(xmin), str(ymin), str(w), str(h)]))
if labels:
# add this frame to training set
label_line.append(';'.join(labels))
return label_line | 2a2d34c77fce5642ef4ca4eb1d1dffc7fc75e235 | 34,949 |
def decrypt(encrypted_string, key_val):
"""This is the decryption function
It takes in input encrypted string and key value
Prints nothing and return the decrypted string value"""
final = '"'
encrypted_string = encrypted_string.upper()
for i in range(len(encrypted_string)):
decrypt_char = ord(encrypted_string[i]) + key_val
if decrypt_char > 90:
decrypt_char = 65 - (90 - decrypt_char)
decrypt_char = chr(decrypt_char)
final += decrypt_char
return final | d6115ca8129127d8effd702c9331c55c928d34bf | 34,951 |
def get_alt_sxx(x_sum: float, sqd_x_sum: float, n: int) -> float:
"""calculate sxx by given sum values"""
return sqd_x_sum - 2 * x_sum * x_sum / n + x_sum ** 2 / n | 68369b2cc9626569674703b16e5a6e61feac072b | 34,952 |
def load_external_ids(path):
"""
load file of ids to list
"""
with open(path) as f:
return [int(line.strip()) for line in f] | d430ed6541f390e03928beea1f4caa04cb244bd3 | 34,953 |
def second_to_human(time_in_sec):#{{{
"""
Returns a humanized string given the time in seconds
The output rounds up to days, hours, minutes, or seconds.
4 days 5 hours returns '4 days 5 hours'
0 days 4 hours 3 minutes returns '4 hours 3 mins', etc...
"""
days = int(time_in_sec)/3600/24
hours = int(time_in_sec - 3600*24*days)/3600
minutes = int(time_in_sec - 3600*24*days - 3600*hours)%3600/60
seconds = time_in_sec%3600%60
ss = ""
tStr = ""
if days > 0:
if days == 1: tStr = "day"
else: tStr = "days"
ss += " %s %s" %(days, tStr)
if hours > 0:
if hours == 1: tStr = "hour"
else: tStr = "hours"
ss += " %s %s" %(hours, tStr)
if minutes > 0:
if minutes == 1:tStr = "min"
else: tStr = "mins"
ss += " %s %s" %(minutes, tStr)
if seconds > 0 or (seconds == 0 and days == 0 and hours == 0 and minutes == 0):
if seconds <= 1:tStr = "sec"
else: tStr = "secs"
ss += " %g %s" %(seconds, tStr)
ss = ss.strip()
if ss != "":
return ss
else:
return None | f1f480f5ccbd05f3413f562f33beb7865e1a6d86 | 34,955 |
def bootstrap_button(text, **kwargs):
"""
Render a button
"""
button_type = kwargs.get('type', '')
button_size = kwargs.get('size', '')
button_disabled = kwargs.get('disabled', False) and kwargs.get('enabled', True)
button_icon = kwargs.get('icon', '')
# Build button classes
button_class = 'btn'
if button_type:
button_class += ' btn-' + button_type
if button_size:
button_class += ' btn-' + button_size
if button_disabled:
button_class += ' disabled'
# Build icon classes
icon_class = ''
if button_icon:
icon_class = 'icon-' + button_icon
if button_type and button_type != 'link':
icon_class += ' icon-white'
# Return context for template
return {
'text': text,
'url': kwargs.get('url', '#'),
'button_class': button_class,
'icon_class': icon_class,
} | 2a915079548d08bb47f86ef442f4821e2f9f88bb | 34,957 |
import warnings
def is_equal(v1, v2):
"""
Function for basic comparison compare.
Parameters
----------
v1: Any
first value
v2: Any
second value
Returns
-------
result : bool
Returns ``True`` if ``v1`` and ``v2`` are equivalent.
If an exception is raised during comparison, returns ``False``.
Warns
-----
UserWarning
If an exception is raised during comparison.
"""
try:
return bool(v1 == v2)
except Exception as e:
warnings.warn(
"Comparison method failed. Returned False. "
f"There may be need to define custom compare methods in __equality_checks__ dictionary. Exception {e}"
)
return False | 3955602d046e16c0488c288bc7d942e69042ab46 | 34,958 |
import math
def raw_euclidean_distance_between_blocks(blocks1,blocks2=None):
"""Returns the euclidean distance between the two sequence of blocks.
>For any pair of action sequences, we define the “raw action dissimilarity” as the mean Euclidean distance between corresponding pairs of [x, y, w, h] action vectors (Fig. 4A, light). When two sequences are of different lengths, we evaluate this metric over the first k actions in both, where k represents the length of the shorter sequence.
"""
if blocks2 is None:
#special case for parallelization allows passing a tuple
blocks2 = blocks1[1]
blocks1 = blocks1[0]
distances_sum = 0
for i in range(min(len(blocks1),len(blocks2))):
b1 = blocks1[i]
b2 = blocks2[i]
distance = math.sqrt((b1.x-b2.x)**2+(b1.y-b2.y)**2+(b1.width-b2.width)**2+(b1.height-b2.height)**2)
distances_sum += distance
return distances_sum | 1ae990ffbf884c911900c6bafeba22b4adbaf266 | 34,960 |
def rows_to_dict(
rows,
main_key_position=0,
null_value="delete",
header_line=0,
contains_open_ends=False,
):
"""
Convert a row of rows (e.g. csv) to dictionary
Parameters
----------
rows : list
the row based data to convert to `dict`
main_key_position : int, optional
if the main_key is not on the top left, its position can be specified
null_value : any, optional
if an emtpy field in the lists shall be represented somehow in the dictionary
header_line : int, optional
if the header_line is not the first one, its position can be specified
contains_open_ends : bool, optional
if each row is not in the same length (due to last set entry as last element in row),
a length check for corrupted data can be ignored
Returns
-------
dict
dictionary containing the information from row-based data
"""
data = dict()
header = rows[header_line]
for row in rows[header_line + 1 :]:
sub_data = dict()
for i in range(len(header)):
if i == main_key_position:
continue
elif i >= len(row):
if not contains_open_ends:
raise IndexError("not all elements are the same length")
elif null_value != "delete":
sub_data[header[i]] = null_value
elif not row[i] and null_value != "delete":
sub_data[header[i]] = null_value
elif not row[i]:
continue
else:
sub_data[header[i]] = row[i]
data[row[main_key_position]] = sub_data
data = {header[main_key_position]: data}
return data | 346f759464c1e4b9cd8f7b54387c10b4dc46453d | 34,961 |
def trimHighscores(arr):
"""Returns 5 greatest scores from 'arr'."""
arr = sorted(arr, key=lambda record: -int(record[1]))
return arr[:5] | 91f3ba4318104ec6a265b336cd2959fb78978864 | 34,963 |
def modexp5(b,m):
"""e=5, use addition chain"""
b2=(b*b)%m
b4=(b2*b2)%m
b5=(b*b4)%m
assert(b5==pow(b,5,m))
return b5 | 81d42f73f22422c489b3958d1f9ee2cc581b3132 | 34,964 |
def moeda(preco=0, moeda="R$"):
"""
-> Faz a formatação de um determinado preço.
:param preco: o preço a ser formatado.
:param moeda: determina o formato a ser aplicado.
:return: o valor formato
"""
return f'{moeda} {preco:.2f}'.replace('.', ',') | c9567ac6a205d8f37001188ade6ac3281d6f870f | 34,965 |
import json
def load_database(filename):
"""Loads a database from disk"""
print("[database] Loading database from '" + filename + "'")
with open(filename) as f:
return json.load(f) | 54167c37828ecad7fde56353d41441762d8849cd | 34,968 |
def linecol_to_pos(text, line, col):
"""Return the offset of this line and column in text.
Lines are one-based, columns zero-based.
This is how Jedi wants it. Don't ask me why.
"""
nth_newline_offset = 0
for i in range(line - 1):
new_offset = text.find("\n", nth_newline_offset)
if new_offset < 0:
raise ValueError("Text does not have {0} lines."
.format(line))
nth_newline_offset = new_offset + 1
offset = nth_newline_offset + col
if offset > len(text):
raise ValueError("Line {0} column {1} is not within the text"
.format(line, col))
return offset | 49abbeba51441c219e70354c273d9ccff5443a3d | 34,969 |
def set_transformer_in_out(transformer, inputCol, outputCol):
"""Set input and output column(s) of a transformer instance."""
transformer = transformer.copy()
try:
transformer.setInputCol(inputCol)
except AttributeError:
try:
transformer.setInputCols([inputCol])
except AttributeError:
message = (
"Invalid transformer (doesn't have setInputCol or setInputCols): ",
str(transformer.__class__)
)
raise ValueError(message)
try:
transformer.setOutputCol(outputCol)
except AttributeError:
try:
transformer.setOutputCols([outputCol])
except AttributeError:
# we accept transformers that do not have an outputCol
# (as ColumnDropper)
pass
return transformer | 4bf9bd4abeaa6435df5316c9201d62dd97645cd5 | 34,970 |
def join_bpe(lst:list, s:str):
"""Join words together that are prefaced with '##'. To be used with `reduce` """
if s[:2] == "##":
# How to handle situation where first term is double hashed?
base = lst.pop() # Remove from last position
new_term = base + s.strip("#")
return lst + [new_term]
return lst + [s] | 53d8891adcbced80bbded95bcd32c1aaa292fd25 | 34,971 |
def mock_tensor_history():
"""Mock tensor history."""
tensor_history = {
"tensor_history": [
{"name": "Default/TransData-op99:0",
"full_name": "Default/TransData-op99:0",
"graph_name": "kernel_graph_0",
"node_type": "TransData",
"type": "output",
"step": 0,
"dtype": "DT_FLOAT32",
"shape": [2, 3],
"has_prev_step": False,
"value": "click to view"},
{"name": "Default/args0:0",
"full_name": "Default/args0:0",
"graph_name": "kernel_graph_0",
"node_type": "Parameter",
"type": "input",
"step": 0,
"dtype": "DT_FLOAT32",
"shape": [2, 3],
"has_prev_step": False,
"value": "click to view"}
],
"metadata": {
"state": "waiting",
"step": 0,
"device_name": "0",
"pos": "0",
"ip": "127.0.0.1:57492",
"node_name": "",
"backend": "Ascend"
}
}
return tensor_history | b75e058e2c7996743970fbffa47962ac74fe439e | 34,972 |
def convert_date(date):
"""
Converts the date from the DD/MM/YYYY format to YYYY-MM-DD
>>> convert_date("13/04/2018")
'2018-04-13'
"""
tokens = date.split("/")
return f"{tokens[2]}-{tokens[1]}-{tokens[0]}" | dd7eb20f73412565285887fa8f9ec19bcde2b3dc | 34,973 |
import functools
import logging
import time
def RetryOnException(exc_type, retries):
"""Decorator to retry running a function if an exception is raised.
Implements exponential backoff to wait between each retry attempt, starting
with 1 second.
Note: the default number of retries is defined on the decorator, the decorated
function *must* also receive a "retries" argument (although its assigned
default value is ignored), and clients of the funtion may override the actual
number of retries at the call site.
The "unused" retries argument on the decorated function must be given to
keep pylint happy and to avoid breaking the Principle of Least Astonishment
if the decorator were to change the signature of the function.
For example:
@retry_util.RetryOnException(OSError, retries=3) # default no. of retries
def ProcessSomething(thing, retries=None): # this default value is ignored
del retries # Unused. Handled by the decorator.
# Do your thing processing here, maybe sometimes raising exeptions.
ProcessSomething(a_thing) # retries 3 times.
ProcessSomething(b_thing, retries=5) # retries 5 times.
Args:
exc_type: An exception type (or a tuple of them), on which to retry.
retries: Default number of extra attempts to try, the caller may also
override this number. If an exception is raised during the last try,
then the exception is not caught and passed back to the caller.
"""
def Decorator(f):
@functools.wraps(f)
def Wrapper(*args, **kwargs):
wait = 1
kwargs.setdefault('retries', retries)
for _ in range(kwargs['retries']):
try:
return f(*args, **kwargs)
except exc_type as exc:
logging.warning(
'%s raised %s, will retry in %d second%s ...',
f.__name__, type(exc).__name__, wait, '' if wait == 1 else 's')
time.sleep(wait)
wait *= 2
# Last try with no exception catching.
return f(*args, **kwargs)
return Wrapper
return Decorator | ea87117cd202cbc6ce5bc4fc9fb25a5b8a324e1f | 34,974 |
import csv
def read_csv(csv_path):
"""Assume that csv file is formatted as:
name1,label1
name2,label2
...
"""
with open(csv_path) as to_read:
csv_reader = csv.reader(to_read)
ret_list = [(name, int(label)) for name, label in csv_reader]
return ret_list | 182afc58b826079adee5e74ab07e4aa77febdb03 | 34,975 |
from typing import Any
from typing import Set
def get_all_subclasses(cls: Any) -> Set[Any]:
"""Get all subclasses of the given class."""
return set(cls.__subclasses__()).union(
[s for c in cls.__subclasses__() for s in get_all_subclasses(c)]) | 0f1c93b3db6834b1596df4d403a438b1c5597219 | 34,976 |
import numpy as np
def uniform(low, high, size=1):
"""
input min value, max value, output size\n
ATTENTION: [low,high)\n
return certain size of Uniform Distribution result
"""
return np.random.uniform(low,high,size) | 0c78ad306e74ad34d5b2a2ebf915b6eb123a687f | 34,977 |
def surfaceZetaFormatToTextureFormat(fmt, swizzled, is_float):
"""Convert nv2a zeta format to the equivalent Texture format."""
if fmt == 0x1: # Z16
if is_float:
return 0x2D if swizzled else 0x31
else:
return 0x2C if swizzled else 0x30
elif fmt == 0x2: # Z24S8
if is_float:
return 0x2B if swizzled else 0x2F
else:
return 0x2A if swizzled else 0x2E
else:
raise Exception("Unknown zeta fmt %d (0x%X) %s %s" % (fmt, fmt, "float" if is_float else "fixed", "swizzled" if swizzled else "unswizzled")) | 6b6d80e57344506ef281a2f0aa4631f067cb9707 | 34,978 |
def table_type_validator(type):
"""
Property: TableInput.TableType
"""
valid_types = [
"EXTERNAL_TABLE",
"VIRTUAL_VIEW",
]
if type not in valid_types:
raise ValueError("% is not a valid value for TableType" % type)
return type | bd66147a80ef6202ad8ba92a5b902cf351a38cb4 | 34,979 |
def gcd(x: int, y: int) -> int:
"""Greatest common divisor."""
while x % y != 0:
r = x % y
x = y
y = r
return abs(y) | e076707c26a63af1ce8e993163b4c165b361d074 | 34,980 |
def getDatalist (dataGraph = {}, varGraph = {}, variable = "" ):
""" Produces a data list for the via dataset and variables dictionary for a named variable"""
# List needs to be converted if data is to be treated as Integer or Numerical (floats)
# Default is to treat data as Categorical
data = dataGraph
vars = varGraph
var = variable
datalist = []
""" Extracts a Variable Column as a datalist, using dataset and column dictionaries"""
keys = data.keys()
keys.remove(0)
for key in keys:
datalist.append(data[key][varGraph[var][0]])
return datalist | 86de69433014a95de08355c28b264dfdfe06667a | 34,981 |
def _resnames_match(resnames, allowed_resnames):
"""
Return true if one element in resnames matches
one element in allowed_resnames.
Parameters
----------
resnames: `abc.iterable`
allowed_resnames: `abc.iterable`
"""
for resname in resnames:
if resname in allowed_resnames:
return True
return False | 4078b7baae89a7c8c0d5edbba78235704825ee29 | 34,983 |
def is_pandigital(x):
"""
Checks if x contains all digits between 1 and 9 (inclusive) exactly once
"""
digits = [int(i) for i in str(x)]
digits.sort()
return(digits == list(range(1, 10))) | 2024b596888e93d2ec91e2be60c0cde5ac30fce7 | 34,985 |
def calc_rel_pos(player1, player2):
"""
player1 slippi Port
player2 slippi Port
Given our players we will compute the relative position between themselves.
returns rel_pos_1, rel_pos_2
"""
rel_pos_1=((player1.leader.post.position.x-player2.leader.post.position.x)*int(player1.leader.post.direction), player1.leader.post.position.y-player2.leader.post.position.y)
#here we multiply times where player1 is facing to know whether the other player is in front of him or on the back
rel_pos_2=((player2.leader.post.position.x-player1.leader.post.position.x)*int(player2.leader.post.direction), player2.leader.post.position.y-player1.leader.post.position.y)
return rel_pos_1, rel_pos_2 | 5583eaab879aea0c115767c6220e02c3455b4cca | 34,986 |
def __edge_exists__(i, j, g):
"""
Checks if the edge i --> j exists in the graph, g.
:param i: Index of a node.
:param j: Index of a node.
:param g: Graph.
:return: A boolean indicating if j is a successor of i.
"""
return j in list(g.successors(i)) | 00cb1fb0bb6f2fffb1f6359c9a8fdd2afd939652 | 34,987 |
import torch
def load_model(save_path, model_class=None, model=None):
"""
Load the model from the path directory provided
"""
if model is None:
if model_class is None:
raise ValueError("No model to construct!")
model = model_class()
checkpoint = torch.load(save_path)
model_state_dict = checkpoint['state_dict']
model.load_state_dict(model_state_dict)
metrics = {k:checkpoint[k] for k in checkpoint if k!='state_dict'}
return model, metrics | 99c2fba4cd487236340ea9a4c846d507e57a799b | 34,988 |
def matrika_nicel(n, m):
"""
Funkcija naredi matriko n x m ničel.
"""
sez = []
for i in range(m):
sez += [[0]*n]
return sez | ff595a9422d17259c2b7599482c492f24786ab04 | 34,989 |
def iteritems(d):
"""Python 2, 3 compatibility."""
try:
return d.items()
except AttributeError:
return d.iteritems() | 8848c1efbddb1f4a1a404b8fa3ae800aeb5a0770 | 34,990 |
def phone_number(pstr):
""" Extract the extension from the phone number if it exists. """
if ';' in pstr:
# In one case we have multiple phone numbers separated by a
# semi-colon. We simply pick the first one. Note this means we're
# "throwing away" the other phone numbers.
pstr = pstr.split(';')[0]
for m in [' x ', 'ext.', 'ext']:
if m in pstr:
phone, extension = pstr.split(m)
return (phone.strip(), extension.strip())
return (pstr.strip(), None) | 346c2532ba58cc63e5a0896df6c938711af3368e | 34,994 |
from typing import Callable
from typing import Dict
import uuid
def _guid_replacer() -> Callable[[str], str]:
"""
Closure for replace_guid.
Returns
-------
Callable[[str], str]
replace_guid function
"""
guid_map: Dict[str, str] = {}
def _replace_guid(guid: str) -> str:
"""
Replace GUID/UUID with mapped random UUID.
Parameters
----------
guid : str
Input UUID.
Returns
-------
str
Mapped UUID
"""
if not guid or not isinstance(guid, str):
return guid
if guid in guid_map:
return guid_map[guid]
new_guid = str(uuid.uuid4())
guid_map[guid] = new_guid
return new_guid
return _replace_guid | 87c4b6553fb4dee1b588ce3f2eefde9723c9b28f | 34,996 |
import itertools
def compound_group_correlation(sensitive_attributes, non_sensitive_attributes):
"""
Is used for encoding sensitive group correlation in learning encoding.
"""
_sensitive_attributes = []
for _group in sensitive_attributes:
if(len(_group)==1):
_group = [_group[0], -1*_group[0]]
_sensitive_attributes.append(_group)
combinations = list(itertools.product(*_sensitive_attributes))
result = []
for attribute in non_sensitive_attributes:
for combination in combinations:
result.append((combination, attribute))
return result | b2219c5165563b49a34e6ae5172185d0be183f66 | 34,997 |
import argparse
def arg_bool(arg):
""" Verify the specified argument is either true or false """
if arg.lower() == "true":
return True
if arg.lower() == "false":
return False
raise argparse.ArgumentTypeError("not a boolean: %s" % arg) | 0d8c3bd0b05ee919b7fde95f40811e92523f0620 | 34,998 |
def _count_mines(grid, x, y):
"""
Helper function to count mines around a given cell.
"""
surrounding_cells = [(x, y-1),
(x, y+1),
(x-1, y+1),
(x-1, y),
(x-1, y-1),
(x+1, y+1),
(x+1, y),
(x+1, y-1)]
count = 0
for dx, dy in surrounding_cells:
if dx < 0 or dy < 0:
continue
try:
count += 1 if grid[dy][dx] == 0 else 0
except IndexError:
continue
return count if count > 0 else None | 1013b539514071cae2ee20393afd7eb37d64e5a0 | 34,999 |
def query_phantom(view, pid):
"""Query phantom."""
return view.query_phantom(pid) | 50d7c921264d924a38145af7b37086f8e750bd66 | 35,000 |
from pathlib import Path
def filename(filepath):
"""Returns the filename at the end of filepath.
Args:
filepath (str): A path to a file
Returns:
str: The filename
"""
return Path(filepath).name | 49aa2e76f7ce4749796d1870d55ff58a2a435b65 | 35,002 |
def clean_labels(_labels, keep=1):
"""An input list of string numeric labels is split and casted to int
Args:
_labels (list): List of strings to be processed
keep (int, optional): [description]. Defaults to 1. Tells the function
if either the labels or the value is to be kept
Returns:
list: list of cleaned up strings or integers
"""
cleaned = []
for label in _labels:
item = label.split('-')[keep]
if keep:
item = int(item)
cleaned.append(item)
return cleaned | 806da913dfdef53a04bb3b8d601feb9027be00a0 | 35,004 |
def filter_alpha(Input_Dict):
"""
Filter alpha parameter.
:param Input_Dict: input parameters dictionary
:type Input_Dict : dict
:return: modified dictionary
"""
try:
if Input_Dict["alpha"] > 1:
Input_Dict["alpha"] = 1
print("[Warning] Opem Automatically Set Alpha To Maximum Value (1) ")
elif Input_Dict["alpha"] < 0:
Input_Dict["alpha"] = 0
print("[Warning] Opem Automatically Set Alpha To Maximum Value (0) ")
return Input_Dict
except Exception:
return Input_Dict | b648f79aebce9cd1bc66702bb1b542f1a8434683 | 35,006 |
def generate_dict_vpn_ip_nexthops(nexthops):
"""
This function generates the block of configurtion for the VPN IP
nexthops that will be used in CLI template to generate VPN Feature
Template
"""
vipValue = []
for nexthop in nexthops:
vipValue.append(
{
"address": {
"vipObjectType": "object",
"vipType": "variableName",
"vipValue": "",
"vipVariableName": nexthop
}
}
)
return vipValue
pass | 88f213244a9a0714a402d70b10929bbbbb4b724a | 35,007 |
def getTypesWithName(types, names):
"""function returns all types that have a specific name
Keyword arguments:
types -- list of model.Type instances
names -- list of strings with names
"""
typesWithName = []
for type in types:
for name in names:
if type.name == name:
typesWithName.append(type)
break
return typesWithName | 8219b1df233d1aea21c5efe80c3c621d30a8d595 | 35,009 |
def getCenter(x, y, blockSize):
"""
Determines center of a block with x, y as top left corner coordinates and blockSize as blockSize
:return: x, y coordinates of center of a block
"""
return (int(x + blockSize/2), int(y + blockSize/2)) | 9398aa6438b5921c625381898a7666b4203d2e73 | 35,010 |
def read_arguments(t_input, split1="\n", split2="\""):
"""Function serves as a slave to read_cfg, it'll pull arguments from config lines."""
t_list = []
t_fulldata = str(t_input).split(split1)
for x in t_fulldata:
t_value = x.split(split2)
if len(t_value) != 3: # Check for an empty line
continue
t_list.append(t_value[1])
return t_list | c027ce87d894671eddaf3856b27363e0e1df7afb | 35,011 |
import csv
def read_objects_csv(filename):
"""Takes a CSV with headings and converts each row to a dict. Useful for populating create_network()
:param filename: Full filename of the CSV
:type filename: str
:return: A list of dicts, each dict is a row from the CSV, with the heading as key and column as value
:rtype: list
"""
objs = []
with open(filename) as objects_csv:
objects_dict = csv.DictReader(objects_csv)
for obj in objects_dict:
objs.append(obj)
return objs | 2782bb833e39c361338976f264b796f5231fc584 | 35,012 |
def threshold(pred, param):
"""
Takes the predicted image "pred", thresholds it with the determined
param, returns binary image.
Parameters
----------
pred : np.array
Prediction image
param : float
Threshold for the input image
Returns
-------
np.ndarray
Binary np.array
"""
pred[pred >= param] = 1
pred[pred < param] = 0
return pred | e38ab2683d48fc069782d0a18ffad51cce944aec | 35,015 |
from functools import reduce
def PostUploadHook(cl, change, output_api):
"""git cl upload will call this hook after the issue is created/modified.
This will add extra trybot coverage for non-default Android architectures
that have a history of breaking with Seccomp changes.
"""
def affects_seccomp(f):
seccomp_paths = [
'bpf_dsl/',
'seccomp-bpf/',
'seccomp-bpf-helpers/',
'system_headers/',
'tests/'
]
# If the file path contains any of the above fragments, it affects
# the Seccomp implementation.
affected_any = map(lambda sp: sp in f.LocalPath(), seccomp_paths)
return reduce(lambda a, b: a or b, affected_any)
if not change.AffectedFiles(file_filter=affects_seccomp):
return []
return output_api.EnsureCQIncludeTrybotsAreAdded(
cl,
[
'master.tryserver.chromium.android:android_arm64_dbg_recipe',
'master.tryserver.chromium.android:android_compile_x64_dbg',
'master.tryserver.chromium.android:android_compile_x86_dbg',
],
'Automatically added Android multi-arch compile bots to run on CQ.') | 1923453354990d037a8c0b5f7af9026974905a7b | 35,017 |
async def read_until(socket, messages):
"""
Reads from socket until all messages from the list are received
Returns the list of messages read
"""
messages = messages.copy()
res = []
while messages:
data = await socket.receive_json()
res += [data]
if data in messages:
messages.remove(data)
return res | 0dc807b23b6b65fc2952da56abef442a742fc85d | 35,018 |
def _DefaultRunnable(test_runner):
"""A default runnable for a PythonTestRunner.
Args:
test_runner: A PythonTestRunner which will run tests.
Returns:
The test results.
"""
return test_runner.RunTests() | 7b86682211924848c7ebb9bdc70a36c08bc63cbe | 35,020 |
def update_model_paths():
"""
Update the current MILP model with new link-path matrix
"""
return True | 8c59c8628db4866858726c32cae89fe881a23e25 | 35,021 |
def rekey_dict(d, key_map):
"""
Renames the keys in `d` based on `key_map`.
`d` is a dictionary whose keys are a superset of the keys in `key_map`.
`key_map` is a dictionary whose keys match at least some of the keys in `d` and whose values
are the new key names for `d`.
For example:
rekey_dict({'a': 1, 'b': 2}, {'a': 'b', 'b': 'c'}) =
{'b': 1, 'c': 2}
"""
# Create a new dictionary containing only the remapped key names.
new_dict = {new_key: d[old_key]
for old_key, new_key in key_map.items()
if old_key in d}
# Copy whatever key/value pairs were left after the remapping into the new dictionary.
keys_left = [key for key in d.keys() if key not in new_dict]
for key in keys_left:
new_dict[key] = d[key]
return new_dict | 1e88b6ca14d94f3fbbc73a2760833a5514264667 | 35,022 |
def merge_themes_occasions(df):
""" Megre themes and occasions columns in df"""
for index, row in df.iterrows():
oc = row['occasions']
themes = row['themes']
if not isinstance(oc, float) and not isinstance(themes, float) and len(oc) > 0:
for o in oc:
themes.append(o)
elif not isinstance(oc, float) and isinstance(themes, float):
row['themes'] = oc
return df.drop('occasions', 1) | 74c5e85429e1a48cc3cc286570fcfe45a2731d5e | 35,023 |
def markdown_paragraph(text: str) -> str:
"""Return the text as Markdown paragraph."""
return f"\n{text}\n\n" | 38f2a790f565e6417d73a2b2430cc0c5efabc27a | 35,027 |
def FIX_docSetCompressMode(doc, ratio):
"""get the compression ratio for a document, ZLIB based """
#traceln("ratio :", ratio)
assert ratio in [0,1,2,3,4,5,6,7,8,9], "Internal SW Error zlib in Component.py: ratio=%s"%ratio
ret = None #libxml2.libxml2mod.xmlSetDocCompressMode(doc._o, ratio)
return ret | 2eacbe96a886024a877180163779caede0d0f5fb | 35,028 |
def region_from(ctg_name, ctg_start=None, ctg_end=None):
"""
1-based region string [start, end]
"""
if ctg_name is None:
return ""
if (ctg_start is None) != (ctg_end is None):
return ""
if ctg_start is None and ctg_end is None:
return "{}".format(ctg_name)
return "{}:{}-{}".format(ctg_name, ctg_start, ctg_end) | 0b0be924f11737092cd95b8369fc69dd5e79cfb7 | 35,031 |
def patch(string):
"""
patch for boolean variable from tojson
"""
if string == "false": return "False"
if string == "true": return "True"
return string | 1ac98892de1060e0495099807731dadadd179710 | 35,034 |
def join_set(item_list, length):
""" Join a set with itself and returns the n-element (length) itemsets
Args:
--------
item_list: current list of columns
length: generate new items of length
Returns:
--------
return_list: list of items of length-element
"""
set_len = len(item_list)
return_list = []
for i in range(set_len):
for j in range(i+1, set_len):
i_set = set(item_list[i])
j_set = set(item_list[j])
if len(i_set.union(j_set)) == length:
joined = sorted(list(i_set.union(j_set)))
if joined not in return_list:
return_list.append(joined)
return_list = sorted(return_list)
return return_list | cb863bf1c19544c28e7b019b09a97296d864d2ee | 35,035 |
from torch.nn.modules.instancenorm import _InstanceNorm
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn import GroupNorm, LayerNorm
def assert_is_norm_layer(module) -> bool:
"""Check if the module is a norm layer.
Args:
module (nn.Module): The module to be checked.
Returns:
bool: Whether the module is a norm layer.
"""
norm_layer_candidates = (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm)
return isinstance(module, norm_layer_candidates) | 2698640b36b6b08068b4276fc725aa84d950ace7 | 35,036 |
import six
def is_integer(obj):
"""Helper to determine if the provided ``obj`` is an integer type or not"""
# DEV: We have to make sure it is an integer and not a boolean
# >>> type(True)
# <class 'bool'>
# >>> isinstance(True, int)
# True
return isinstance(obj, six.integer_types) and not isinstance(obj, bool) | 4b17b11fd0a86b900f21191522b636b1f63e5015 | 35,037 |
def get_square(i: int, j: int) -> list:
"""
Get the indexes numbers for the corresponding square
"""
square_1 = [(r, c) for r in range(3) for c in range(3)]
square_2 = [(r, c) for r in range(3) for c in range(3, 6)]
square_3 = [(r, c) for r in range(3) for c in range(6, 9)]
square_4 = [(r, c) for r in range(3, 6) for c in range(3)]
square_5 = [(r, c) for r in range(3, 6) for c in range(3, 6)]
square_6 = [(r, c) for r in range(3, 6) for c in range(6, 9)]
square_7 = [(r, c) for r in range(6, 9) for c in range(3)]
square_8 = [(r, c) for r in range(6, 9) for c in range(3, 6)]
square_9 = [(r, c) for r in range(6, 9) for c in range(6, 9)]
if (i, j) in square_1:
return square_1
elif (i, j) in square_2:
return square_2
elif (i, j) in square_3:
return square_3
elif (i, j) in square_4:
return square_4
elif (i, j) in square_5:
return square_5
elif (i, j) in square_6:
return square_6
elif (i, j) in square_7:
return square_7
elif (i, j) in square_8:
return square_8
else:
return square_9 | 91d97dff2e7018b549fd333d61163197dce60490 | 35,038 |
def confirm(msg):
"""Simple confirmation through user input
msg(str): Message expecting a yes or no answer
Returns True if answer is "yes" and False otherwise.
"""
question = f'{msg} (y/n) '
answer = ''
while answer not in ('y', 'n'):
answer = input(question).lower()
return answer == 'y' | 50913505396ce8df87084da70c6886d90a0b1714 | 35,039 |
def MakePartition(block_dev, part):
"""Helper function to build Linux device path for storage partition."""
return '%s%s%s' % (block_dev, 'p' if block_dev[-1].isdigit() else '', part) | 3e21c773a69b7c49bb4aad4210b9d168099565ef | 35,041 |
def apl_singleton(ip: str) -> str:
"""
Convert a single IP or net/mask to APL item form
"""
try:
ind = ip.find(":") # IPv6?
except Exception:
ind = -1
if ind >= 0:
prefix = "2:"
else:
prefix = "1:"
try:
ind = ip.index("/") # mask included?
except Exception:
ind = -1
if prefix == "1:":
ip += "/32"
else:
ip += "/128"
return prefix + ip | 32f4886bf7e55f7d15b916e6321454db7948f536 | 35,042 |
import torch
def project(x):
"""Project onto the hyeprboloid embedded in in n+1 dimensions."""
return torch.cat([torch.sqrt(1.0 + torch.sum(x * x, 1, keepdim=True)), x], 1) | 546753c3f2f0dab31b0e548c9fcd3c3da4854ff1 | 35,043 |
def last_commits(df, num_commits=500):
"""return last num_commits"""
last_commits = []
for c in df['commit'].unique()[-num_commits:]: # order is preserved here (nice!)
last_commits.append(c)
return last_commits | fd1dfc0a9528711e9d1e800e719015c21abcf229 | 35,046 |
import re
import random
def parseDice(args):
""" Parse a dice expression (e.g., 3d10-1d6) and evaluate the dice
(e.g., to the string 18-2) """
m = re.search(r'(\d*)d(\d+)', args)
while m is not None:
total = 0
qty = 1 if m.group(1) is "" else int(m.group(1))
val = int(m.group(2))
if qty > 100:
# prevent abusive requests
raise ValueError("I can't hold {} dice in my robotic hands!"
.format(qty))
if val == 0:
raise ValueError("A zero-sided die! "
"Quite the existential conundrum.")
if val < 0:
raise ValueError("You want me to roll a die with negative sides?")
for _i in range(qty):
total += random.randint(1, val)
args = args[:m.start()] + " " + str(total) + " " + args[m.end():]
m = re.search(r'(\d*)d(\d+)', args)
return args | ae58d706dc38439eb1d9ffcb904e76f0fb38a4f3 | 35,047 |
import os
def get_list_parquet(path, parameters=None):
"""
Gets the list of Parquets contained into a dataset
Parameters
-------------
path
Path where the dataset is
parameters
Possible parameters of the algorithm
Returns
-------------
paths
List of paths
"""
if parameters is None:
parameters = {}
paths = []
subelements = os.listdir(path)
for subel in subelements:
if os.path.isdir(os.path.join(path, subel)):
sub2 = os.listdir(os.path.join(path, subel))
for subel2 in sub2:
if subel2.endswith(".parquet"):
if os.path.isfile(os.path.join(path, subel, subel2)):
paths.append(os.path.join(path, subel, subel2))
else:
if subel.endswith(".parquet"):
if os.path.isfile(os.path.join(path, subel)):
paths.append(os.path.join(path, subel))
return paths | dbed5fc2bea8e414ad25c0111fd1c86b1924d2b6 | 35,048 |
def getDigit(num, n, base=10):
"""
return nth least-significant digit of integer num (n=0 returns ones place)
in specified base
"""
return int(num / base**n) % base | 1f63850ffb5f138056aa3111e3f472753edaf822 | 35,049 |
def build_json_from_bed(chrom_starts, chrom_ends, chromosomes):
""" Converts a parsed bed file into a json string in GA4GH schema.
Args:
:param list: start range values
:param list: end range values
:param list: chromosome range values
"""
json_ga4gh = "{\"features\":["
for i in range(len(chromosomes)+1):
if i < len(chromosomes):
bed_content = "\"referenceName\":{}, \"start\":{}, \"end\":{}".format("\""+chromosomes[i]+"\"", "\""+str(chrom_starts[i])+"\"", "\""+str(chrom_ends[i])+"\"")
json_ga4gh = json_ga4gh + "{" + bed_content + "},"
else:
json_ga4gh = json_ga4gh[:len(json_ga4gh)-1]
#ending json
json_ga4gh = json_ga4gh + "]}"
return json_ga4gh | 3b33a0e7fd08bf34779fe345dc96f2e14d673f4e | 35,050 |
def get_verb_root(regular_verb):
"""
Get the regular verb root/stem via indexing of the inputted verb
"""
verb_root = regular_verb[:-3]
return verb_root | e04e6bdb6db3a097056df0c029dcb254c84678fb | 35,051 |
def cut_phrasedict (story_phrasefreq_dict, phrasefreq_min):
"""Фильтруем фразы по минимальному числу совпадений."""
phrasefreq_dict_clean = { }
for phrase,value in story_phrasefreq_dict.items():
if value >= phrasefreq_min:
phrasefreq_dict_clean[phrase] = value
return phrasefreq_dict_clean | 9ab1bc8e59b7981321e8950ff2586cc91ef2b8de | 35,055 |
def _remove_duplicates(list):
"""
Returns list of unique dummies.
"""
result = []
while list:
i = list.pop()
if i in result:
pass
else:
result.append(i)
result.reverse()
return result | 3e0fcd010af543a04c3c9e057fca4d514aa15754 | 35,056 |
import os
import glob
def find_files(seek_dir, pattern):
"""
Finds all files within the directory specified that match
the glob-style pattern.
:parameter: seek_dir: directory to be searched.
:parameter: pattern: Unix shell pattern for finding files.
:return: list of relative paths of copied files (may be empty).
"""
paths = []
glob_pattern = os.path.join(seek_dir, pattern)
for found_file in glob.glob(glob_pattern):
paths.append(found_file)
return paths | 914720b28fc18db7aeabf28a38685837b727e255 | 35,057 |
import requests
def query_bulk_games_endpoint(username, year, month):
"""
Get data from the chess.com bulk game API endpoint.
Args:
username (str): A valid chess.com username.
year (str): Year in a YYYY format.
month (str): Month in a MM format.
Returns:
requests.response: A ``requests.response`` object from the
chess.com bulk download API.
"""
url = f"https://api.chess.com/pub/player/{username}/games/{year}/{month:02d}/pgn"
return requests.get(
url=url,
headers={
"Content-Type": "application/x-chess-pgn",
"Content-Disposition": 'attachment; filename="ChessCom_username_YYYYMM.pgn"',
},
) | ece7d261ef574b45d4fe6f9ac0170362ac44ae6f | 35,058 |
def param_string(pdict):
"""A function for creating a reduced parameter input file."""
param_dict = {}
for pair in pdict.items():
genus = pair[0].split('.')[0]
species = pair[0].split('.')[1]
if genus not in param_dict.keys():
param_dict[genus] = {}
param_dict[genus][species] = pair[1]
param_string = ""
for pair in param_dict.items():
param_string += pair[0]
param_string += " = "
param_string += str(pair[1])
param_string += "\n"
return param_string | f0db37d171b604710a1cda88a77aa6f2002ad54f | 35,061 |
def average_armor_reduction(armor):
"""Returns the proportion of base damage blocked by the given armor value"""
if armor < 0:
raise Exception("Armor cannot be less than 0")
elif armor <= 100:
return (armor+armor/2)/2/100
elif armor <= 200:
return 2-.0025*armor - 100/armor
else:
raise Exception("Armor cannot be more than 200") | f8a9d93210cba20b5e6e7dc79f3ea8f3a43c37a6 | 35,062 |
def cols_by_type(df, dtype):
""" Return all column names in `df` with data type `dtype`
"""
return [col for col, dtype in zip(df.columns, df.dtypes) if dtype == dtype] | 8f33bb8624895c3cfd3793ee72554587ca202adb | 35,065 |
def ceilsius_to_kelvin(t: float) -> float:
"""Converts the temperature from Ceilsius to Kelvin
Args:
t (float): Air temperatue [°C].
Returns:
float: Air temperature [K].
"""
t += 273.16
return t | a6310e213bfa699ce31d96782075de4fcc28ff20 | 35,066 |
import struct
def _pack_uint32(val):
""" Integer to 32-bit little-end bytes """
return struct.pack("<I", val) | 32f4e6d74d572a716d723af4f7b7a6911a38b17c | 35,069 |
from datetime import datetime
import statistics
def _compute_timestamp_offset(cam, timestamp_offset_iterations):
""" Gets timestamp offset in seconds from input camera """
# This method is required because the timestamp stored in the camera is relative to when it was powered on, so an
# offset needs to be applied to get it into epoch time; from tests I've done, this appears to be accurate to ~1e-3
# seconds.
timestamp_offsets = []
for i in range(timestamp_offset_iterations):
# Latch timestamp. This basically "freezes" the current camera timer into a variable that can be read with
# TimestampLatchValue()
cam.TimestampLatch.Execute()
# Compute timestamp offset in seconds; note that timestamp latch value is in nanoseconds
timestamp_offset = datetime.now().timestamp() - cam.TimestampLatchValue.GetValue()/1e9
# Append
timestamp_offsets.append(timestamp_offset)
# Return the median value
return statistics.median(timestamp_offsets) | 1b33e2cad8a8918808d9fd2c733f39ecbb8abf89 | 35,071 |
def calculate_basal_method(temp_list):
""" Calculate ovulation based on temperature """
if len(temp_list) < 9:
return 'not_enough_measure_points'
print(temp_list[0:1])
for i in range(len(temp_list)):
if i < 9:
continue
lower_list = []
higher_list = []
for low in temp_list[i-8:i-2]:
lower_list.append(low['temperature'])
for high in temp_list[i-2:i+1]:
higher_list.append(high['temperature'])
if max(lower_list) < min(higher_list) and higher_list[len(higher_list)-1] - max(lower_list) >= 0.2:
print("Value higher than previous 6")
print(lower_list)
print(max(lower_list), min(higher_list), higher_list[len(higher_list)-1] - max(lower_list))
print(higher_list, temp_list[i]['date'], temp_list[i]['temperature'])
else:
if i > 9:
larger_low = []
larger_high = []
larger_reduce = []
for low in temp_list[i-9:i-3]:
larger_low.append(low['temperature'])
for high in temp_list[i-3:i+1]:
larger_high.append(high['temperature'])
is_removed = False
for high in temp_list[i-3:i+1]:
if not is_removed and max(larger_low) >= high['temperature']:
is_removed = True
continue
larger_reduce.append(high['temperature'])
if (max(larger_low) < min(larger_high)):
print('Exception 4 values higher than previous 6')
print(temp_list[i]['date'], temp_list[i]['temperature'])
elif (max(larger_low) < min(larger_reduce)):
print("Exception one of 4 values lower than")
print(temp_list[i]['date'], temp_list[i]['temperature']) | 24004828e1c8f00072c432d252df97d41ac3ac85 | 35,072 |
def indent(s, indentation=" "):
"""
helper function to indent text
@param s the text (a string)
@param indentation the desired indentation
@return indented text
"""
return [indentation + _ for _ in s] | 59ec5d6751f906b84c2d46a51777f0831116082a | 35,074 |
def findSecondMinimumValue(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.res = []
if not root:
return -1
def dfs(node):
if node:
self.res.append(node.val)
if node.left:
dfs(node.left)
if node.right:
dfs(node.right)
dfs(root)
res_set = list(set(self.res))
res_set.sort()
return -1 if len(res_set) is 1 else res_set[1] | 3fdb686a8c61913c6b28c0310ade4a7a43ebfad4 | 35,076 |
def IC_fit_range(name):
"""
:param name:
:return:
"""
spectral_range_dic={}
spectral_range_dic['blind']=[16,28]
spectral_range_dic['ISP']=[17,28]
spectral_range_dic['LSP']=[17,28]
spectral_range_dic['HSP']=[22,28]
return spectral_range_dic[name] | b1e98480dbdb6bf8943a9b378bbf081edb4c4d16 | 35,077 |
def FixupMimeType(mime_type):
"""Helper function that normalizes platform differences in the mime type
returned by the Python's mimetypes.guess_type API.
"""
mappings = {
'image/x-png': 'image/png'
}
return mappings[mime_type] if mime_type in mappings else mime_type | aeb3e3f059287f42b19c96f1f01b6b8e2ee37bb1 | 35,078 |
def insert(base_seq, positions, sub_seq):
"""
Inserts a subsequence into a base sequence
"""
new_seq = list(base_seq)
for i, p in enumerate(positions):
new_seq[p-1] = sub_seq[i]
return "".join(new_seq) | 6234b55e82edf69efe9e31bf9fa35b46c51019a2 | 35,079 |
import pathlib
def is_config_valid(config):
"""Check if the config is valid"""
env = config["environment"]
# check if the engine root path key exists and check if the path exists on disk
if not env["engine_root_path"]:
print(r"Engine root key not found in config")
print(r"This usually means that the installation of unreal was not registerd in the registry")
print(r"It can be fixed by either Adding an entry to the registry SOFTWARE\EpicGames\Unreal Engine or re-installing UE4")
return False
elif not pathlib.Path(env["engine_root_path"]).exists():
print("Engine Path Not found at:" + pathlib.Path(env["engine_root_path"]).as_posix())
return False
else:
return True | 26ad6c86613a620dc14aac3c626c09f51bba94b1 | 35,080 |
import json
def open_file(file: str) -> dict:
"""Open the file, if present, if not, return an empty dict."""
try:
with open(file) as f:
return json.load(f)
except FileNotFoundError:
return {} | 435462a2a7a4d40d7980dce80562f9ab231c8ac0 | 35,081 |
from sys import intern
import logging
from typing import OrderedDict
def expand_attribute_strings(
attribute_strings,
quote_char='\"',
missing_value="",
usecols=None):
"""The last column of GTF has a variable number of key value pairs of the
format: "key1 value1; key2 value2;" Parse these into a dictionary mapping
each key onto a list of values, where the value is None for any row where
the key was missing.
Args:
attribute_strings (list of str):
quote_char (str): Quote character to remove from values
missing_value (any): If an attribute is missing from a row, give it this
value.
usecols (list of str or None): If not None, then only expand columns
included in this set, otherwise use all columns.
"""
n = len(attribute_strings)
extra_columns = {}
column_order = []
#
# SOME NOTES ABOUT THE BIZARRE STRING INTERNING GOING ON BELOW
#
# While parsing millions of repeated strings (e.g. "gene_id" and "TP53"),
# we can save a lot of memory by making sure there's only one string
# object per unique string. The canonical way to do this is using
# the 'intern' function. One problem is that Py2 won't let you intern
# unicode objects, so to get around this we call intern(str(...)).
#
# It also turns out to be faster to check interned strings ourselves
# using a local dictionary, hence the two dictionaries below
# and pair of try/except blocks in the loop.
column_interned_strings = {}
value_interned_strings = {}
for (i, attribute_string) in enumerate(attribute_strings):
for kv in attribute_string.split(";"):
# We're slicing the first two elements out of split() because
# Ensembl release 79 added values like:
# transcript_support_level "1 (assigned to previous version 5)";
# ...which gets mangled by splitting on spaces.
parts = kv.strip().split(" ", 2)[:2]
if len(parts) != 2:
continue
column_name, value = parts
try:
column_name = column_interned_strings[column_name]
except KeyError:
column_name = intern(str(column_name))
column_interned_strings[column_name] = column_name
if usecols is not None and column_name not in usecols:
continue
try:
column = extra_columns[column_name]
except KeyError:
column = [missing_value] * n
extra_columns[column_name] = column
column_order.append(column_name)
value = value.replace(quote_char, "") if value.startswith(quote_char) else value
try:
value = value_interned_strings[value]
except KeyError:
value = intern(str(value))
value_interned_strings[value] = value
# if an attribute is used repeatedly then
# keep track of all its values in a list
old_value = column[i]
if old_value is missing_value:
column[i] = value
else:
column[i] = "%s,%s" % (old_value, value)
logging.info("Extracted GTF attributes: %s" % column_order)
return OrderedDict(
(column_name, extra_columns[column_name])
for column_name in column_order) | 42f36e0260fd80b0d0d37b489b34ea4a144094d3 | 35,083 |
def getMultiFastaOffsets(fasta):
"""
reads in columns of multiple alignment and returns them iteratively
"""
f = open(fasta, 'r')
i = 0
j = f.read(1)
l = []
while j != '':
i += 1
if j == '>':
i += 1
while f.read(1) != '\n':
i += 1
l.append(i)
j = f.read(1)
f.close()
return l | 8b7d36ceac58df50ebfb88ef8427f92b01ea73d8 | 35,085 |
def toft_schabel():
"""
The geometry of this phantom is based on the 2D phantom shown in [3] and [4].
(Maybe this is the original Shepp-Logan head phantom?)
In [5], the intensities of the Shepp-Logan are modified
to yield higher contrast in the image.
It is known as 'Modified Shepp-Logan' of the `phantom` function of "Image Processing Toolbox" for MATLAB
In [6], it is extended to the 3D version. The parameters are as below.
The formula of geometry transfom for this option is the same as of [6] to reproduce the result,
while for other options, kak-slaney and yu-ye-wang, it is different.
Ref:
[3] Kak AC, Slaney M, Principles of Computerized Tomographic Imaging, 1988. p.55
[4] Jain, Anil K., Fundamentals of Digital Image Processing, Englewood Cliffs, NJ, Prentice Hall, 1989, p. 439.
[5] Toft P, The Radon Transform: Theory and Implementation, 1996.
[6] Matthias Schabel (2021). 3D Shepp-Logan phantom
(https://www.mathworks.com/matlabcentral/fileexchange/9416-3d-shepp-logan-phantom),
MATLAB Central File Exchange. Retrieved April 29, 2021.
"""
# a b c x0 y0 z0 phi1 phi2 phi3 A
# -----------------------------------------------------------------
ells = [
[0.6900, 0.9200, 0.810, 0, 0, 0, 0, 0, 0, 1.0],
[0.6624, 0.8740, 0.780, 0, -0.0184, 0, 0, 0, 0, -0.8],
[0.1100, 0.3100, 0.220, 0.22, 0, 0, -18, 0, 10, -0.2],
[0.1600, 0.4100, 0.280, -0.22, 0, 0, 18, 0, 10, -0.2],
[0.2100, 0.2500, 0.410, 0, 0.35, -0.15, 0, 0, 0, 0.1],
[0.0460, 0.0460, 0.050, 0, 0.1, 0.25, 0, 0, 0, 0.1],
[0.0460, 0.0460, 0.050, 0, -0.1, 0.25, 0, 0, 0, 0.1],
[0.0460, 0.0230, 0.050, -0.08, -0.605, 0, 0, 0, 0, 0.1],
[0.0230, 0.0230, 0.020, 0, -0.606, 0, 0, 0, 0, 0.1],
[0.0230, 0.0460, 0.020, 0.06, -0.605, 0, 0, 0, 0, 0.1],
]
return ells | bbdda2e68e744517e47c26dc24b100cebe185878 | 35,086 |
import os
def pid_touch(path):
""" Touch the pid. """
try:
os.utime(path, None)
except OSError:
raise OSError("cannot utime pidfile %s" % path)
else:
return True | 2749a1af8a3d84838f2ae45854e26386f6f1845e | 35,087 |
def get_node_text(nodes):
"""Recursively read text from a node tree."""
text = []
format_tags = {"literal": "`", "strong": "**", "emphasis": "*"}
for node in nodes:
# Handle inline formatting elements.
if node.nodeType == node.ELEMENT_NODE and node.tagName in format_tags:
wrap = format_tags[node.tagName]
text.append(wrap + get_node_text(node.childNodes) + wrap)
continue
if node.nodeType == node.TEXT_NODE:
text.append(node.data)
if node.nodeType == node.ELEMENT_NODE:
text.append(get_node_text(node.childNodes))
return "".join(text) | fdd0345472c4a01069f1c05cbeb9cd946405f552 | 35,088 |
def normalized(z):
"""Returns the complex number with the same argument/phase
but with a magnitude of 1."""
try:
return z/abs(z)
except ZeroDivisionError:
raise ZeroDivisionError("Cannot normalize 0.") | 441cbe83fbd88319830231d62822b85fcffc6ce3 | 35,089 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.