content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
from typing import Iterable
from typing import Callable
def compose_filters(filters: Iterable[Callable[..., bool]]) -> Callable[..., bool]:
"""
Compose a sequence of filter functions.
The resulting function returns True if all arguments also return True.
"""
filters = tuple(filters)
if not filters:
return lambda *args: True
elif len(filters) == 1:
return filters[0]
else:
return lambda *args: all(fn(*args) for fn in filters)
|
f6e9700c4ebd691f403e747e55a31e8876aea9d0
| 54,884
|
def get_action_response(author, game):
"""Gets a response for an action"""
wilds = ["wild", "wild+4"]
specials = ["skip", "+2", "reverse"]
colour = game.current_card[0]
value = game.current_card[1]
previous_player = game.previous_player.player_name
if colour in wilds:
colour_change = f"{author} changed the colour to {value}"
plus_four = f" and made {previous_player} draw 4 cards"
return colour_change if colour == "wild" else colour_change + plus_four
elif value in specials:
if value == "+2":
return f"{author} made {previous_player} draw 2 cards"
elif value == "skip":
return f"{author} skipped {previous_player}"
elif value == "reverse":
return f"{author} reversed the turn rotation"
else:
return f"{author} played a card"
|
5d46b43d56a65a78762e99cf018eed9a4ab87154
| 54,886
|
def concatenate(elems, operator='OR', class_to_embed_elem=None):
"""
Receives a list of elements to be concatenated, to generate a type
MatchFirst from pyParsing. Order is important given that it matches with the
one found first
:param elems: list of elements to concatenate
:param operator: type of operator to concatenate with
:param class_to_embed_elem: class to use to initialize each element in the list
:return: MatchFirst object representing the optional matching with any of the elements in the list
"""
combined_elems = class_to_embed_elem(elems[0]) if class_to_embed_elem else elems[0]
for e in elems[1:]:
elem_to_concat = class_to_embed_elem(e) if class_to_embed_elem else e
if operator == 'OR':
combined_elems = combined_elems | elem_to_concat
elif operator == 'AND':
combined_elems = combined_elems & elem_to_concat
elif operator == 'LONGEST_OR': # OR that matches the longest expression
combined_elems = combined_elems ^ elem_to_concat
return combined_elems
|
dfebffaa9a9e3ee4c6e28325bed417805e08e810
| 54,891
|
def _convert_to_string(srs):
"""Convert series to string.
Args:
srs (pd.Series): A series.
Returns:
pd.Series: An series with string values.
"""
return srs.map(lambda x: str(x))
|
6451ae6cace844e3eb57340e7babb23fc76370e8
| 54,893
|
import time
import math
def last_modified_from_epoch_ns(epoch_ns):
"""
Convert a Unix timestamp to an IMF-Fixdate timestamp.
:param epoch_ns: Unix time, expressed as an integral number of
nanoseconds since the epoch. Note that this is not the
usual Unix convention of a *real* number of *seconds*
since the epoch.
:returns: Last-Modified header value in IMF-Fixdate format as specified
in RFC 7231 section 7.1.1.1.
"""
return time.strftime(
'%a, %d %b %Y %H:%M:%S GMT',
time.gmtime(math.ceil(epoch_ns / 1000000000.0)))
|
556f545de60ac5faeabdfb6b537c81f2b5f133c3
| 54,896
|
import re
def template_title_regex(tpl_page):
"""
Return a regex that matches to variations of the template title.
It supports the transcluding variant as well as localized namespaces and
case-insensitivity depending on the namespace.
@param tpl_page: The template page
@type tpl_page: Page
"""
ns = tpl_page.site.namespaces[tpl_page.namespace()]
marker = '?' if ns.id == 10 else ''
title = tpl_page.title(with_ns=False)
if ns.case != 'case-sensitive':
title = '[{}{}]{}'.format(re.escape(title[0].upper()),
re.escape(title[0].lower()),
re.escape(title[1:]))
else:
title = re.escape(title)
return re.compile(r'(?:(?:%s):)%s%s' % ('|'.join(ns), marker, title))
|
2ab3a49afda3aa06ee21fde3a8e4972b9ac0d8d3
| 54,898
|
def _trim_barcode(barcode: str) -> str:
"""Trim the trailing 1 or 2 ASCII NULL (0x00) characters off barcode."""
if barcode[11] != chr(0):
return barcode
if barcode[10] != chr(0):
return barcode[:11]
return barcode[:10]
|
141763a632247375ba70621b1ad269a3d90c0f42
| 54,902
|
def parse_int_list(string):
"""
Parses a string of numbers and ranges into a list of integers. Ranges
are separated by dashes and inclusive of both the start and end number.
Example:
parse_int_list("8 9 10,11-13") == [8,9,10,11,12,13]
"""
integers = []
for comma_part in string.split(","):
for substring in comma_part.split(" "):
if len(substring) == 0:
continue
if "-" in substring:
left, right = substring.split("-")
left_val = int(left.strip())
right_val = int(right.strip())
integers.extend(range(left_val, right_val + 1))
else:
integers.append(int(substring.strip()))
return integers
|
837086eacea64517cd57a3a127ccc63071d137af
| 54,904
|
from typing import List
def parse_accuracies_from_log_file(file_name: str) -> List[str]:
"""
Read a log file produced by VISSL and extract the list of accuracies
as logged by VISSL (a string representation of a dictionary)
"""
accuracies = []
accuracy_tag = "accuracy_list_meter, value:"
with open(file_name, "r") as file:
for line in file:
if accuracy_tag in line:
i = line.index(accuracy_tag) + len(accuracy_tag)
content = line[i:].strip()
accuracies.append(content)
return accuracies
|
6e595e08f49e303d79ff675e639c8ea46add188b
| 54,906
|
def category_directory_path(instance, filename):
"""Upload path for a given photo's category."""
return f"{instance.category.name}/{filename}"
|
4f510406794be24341cb13decbecbf399ae7b435
| 54,907
|
def wrap_lines(lines, width=80, sep=' ', ellipsis="...", force_ellipsis=False, rjust_ellipsis=False):
""" Wraps given list of lines into a single line of specified width
while they can fit. Parts are separated with sep string.
If first line does not fit and part of it cannot be displayed,
or there are other lines that that cannot be displayed, displays ellipsis string
at the end (may squeeze line even more to fit into max width).
If rjust_ellipsis=True, puts ellipsis at the rightest possible position,
filling gaps with spaces. Otherwise sticks it to the text.
Returns pair (<number of lines displayed>, <result full line>).
If first line does not fully fit and some part of it cannot be displayed,
first number in the pair will be negative and it's abs will be equal to
amount of characters that are displayed.
"""
if not lines:
return 0, None
if not force_ellipsis and len(lines) == 1 and len(lines[0]) <= width:
return 0, lines[0]
result = lines[0]
if len(result) + len(ellipsis) > width:
result = result[:width-len(ellipsis)] + ellipsis
return -(width - len(ellipsis)), result
to_remove = 1
while len(lines) > to_remove and len(result) + len(sep) + len(lines[to_remove]) + len(ellipsis) <= width:
result += sep + lines[to_remove]
to_remove += 1
if not force_ellipsis and len(lines) == to_remove:
to_remove = 0
if to_remove:
if rjust_ellipsis:
result = result.ljust(width-len(ellipsis))
result += ellipsis
return to_remove, result
|
ecf2dac1c693687b19c058f13187ba1a4968c0de
| 54,911
|
def calculatePointsWon(difficulty_level, word):
"""
Returns the difficulty_level multiplied by the length of the word.
Returns an integer.
"""
return difficulty_level * len(word)
|
cc5dcad0974bcc4f2b30674672e8a34cb093c916
| 54,912
|
def make_swagger_name(group: str, version: str, name: str) -> str:
"""
This function creates properly formatted swagger names for an object
:param group: string; group that the object belongs to
:param version: string; version that the object belongs to
:param name: string: name of the object (class)
:return: A single string that combines the three input elements; can
be fed to process_swagger_name() and receive the original broken-out
parts.
"""
return f"{group}.{version}.{name}" if group is not None else f"{version}.{name}"
|
fe2a4331477f7cfc6daae146537edb6884ed535a
| 54,914
|
def is_not_null(value):
"""
test for None and empty string
:param value:
:return: True if value is not null/none or empty string
"""
return value is not None and len(str(value)) > 0
|
2da5a06eccd5c87510c7cf6ac7b28947c361f805
| 54,921
|
def flatten(array, level=1):
""" Flattens array to given level """
for i in range(level):
array = [item for sublist in array for item in sublist]
return array
|
e126110e4c3b49fc7320faab24e8715d7173e5ef
| 54,923
|
def disintegrate(obr):
"""Break image to RGB color representations, return list."""
r, g, b = obr.split()
return [r, g, b]
|
a7ec300d7089f2bde7a09f798252cdb1ca2b3443
| 54,925
|
import random
import string
def gen_random_char_string(n, base_s=""):
"""Generate a random character string of length n"""
if n == 0:
return base_s
c = random.choice(string.ascii_letters)
return gen_random_char_string(n-1, base_s + c)
|
8bb064d270d29660d80c79022af83a33d363a762
| 54,934
|
def _add_leading_dimension(x):
"""Add a new dimension to an array-like"""
return x[None, ...]
|
60929db6b5d2ddff5a94d00f475a83d8bb267f44
| 54,935
|
def is_whitespace(string):
"""
Determine if a string contains only whitespace characters or is empty.
"""
return string.strip() == ''
|
2dc6461b26d0da8c91eefa77026cc1f026f6ab13
| 54,943
|
def tag_key(tagname: str) -> int:
"""
Convert a tagname ("w.YYYY.NN" or "w.YYYY.N") into a key for sorting.
"w_2017_1" -> 201701
"w_2017_01" -> 201701
"w_2017_10" -> 201710
etc.
"""
return int(tagname.split("_")[1]) * 100 + int(tagname.split("_")[2])
|
91f3559519299ced3952a430dfdc166a42997d0b
| 54,945
|
def _test_metadata(line):
"""Returns a tuple indicating whether a line contains recipe metadata
as well as the attribute name and value.
A line is considered to contain recipe metadata if it contains a colon
and also contains some non-whitespace before the colon. The attribute name
comes before the colon and the value comes after.
"""
index = line.find(':')
if index >= 0:
attribute = line[:index].strip()
value = line[index+1:].strip()
if attribute:
return True, attribute, value
else:
return False, '', ''
else:
return False, '', ''
|
f5de783fdcea546a4432ad8bcffcbf26c768d38a
| 54,948
|
def norm_name(name):
"""Normalizes a color name."""
return name.upper().replace("#", "HEX")
|
2640e355ae494a2f9ac5ac93e8b150f9b9c10e94
| 54,952
|
def blaney_criddle(tmean, p, k=0.85):
"""Evaporation calculated according to [blaney_1952]_.
Parameters
----------
tmean: pandas.Series, optional
average day temperature [°C]
p: pandas.Series/float, optional
bright sunshine (hour day-1)
k: float, optional
calibration coefficient [-]
Returns
-------
pandas.Series containing the calculated evaporation.
Examples
--------
>>> et_blaney_criddle = blaney_criddle(tmean)
Notes
-----
Based on equation 6 in [xu_2001]_.
.. math:: PE=kp(0.46 * T_a + 8.13)
References
----------
.. [blaney_1952] Blaney, H. F. (1952). Determining water requirements in
irrigated areas from climatological and irrigation data.
.. [xu_2001] Xu, C. Y., & Singh, V. P. (2001). Evaluation and
generalization of temperature‐based methods for calculating evaporation.
Hydrological processes, 15(2), 305-319.
"""
et = k * p * (0.46 * tmean + 8.13)
return et
|
7666d7d4d36dce06331227331391bc5b4918cc16
| 54,958
|
def get_lomb_signif(lomb_model):
"""
Get the significance (in sigmas) of the first frequency from a fitted
Lomb-Scargle model.
"""
return lomb_model['freq_fits'][0]['signif']
|
876c29687fa3a538d859425a0d7030d17b7d9b2e
| 54,960
|
def process(data):
"""
Lower cases and reverses a string
Arguments:
data (str): The string to transform
Returns:
result (str): The lower cased, reversed string
"""
return data.lower()[::-1]
|
a7ef9633dc15aa4c6874f5c75addf11068a112c1
| 54,961
|
def compute_bounds(values):
"""
return bounding box for list of offsets
"""
i = [value[0] for value in values]
j = [value[1] for value in values]
k = [value[2] for value in values]
bounds = lambda offsets: (min(offsets), max(offsets))
return (bounds(i), bounds(j), bounds(k))
|
49cf046c5655655487ef612ef9af92d4bf971fad
| 54,962
|
import torch
def tokens_to_lang(tokens, vocab, skip_tokens=None, join=True):
"""
convert tokens into human-readable words
"""
if skip_tokens is None:
skip_tokens = {}
def _tokens_to_lang(seq):
if isinstance(seq, torch.Tensor):
seq = seq.tolist()
lang = [vocab.index2word(t) for t in seq if t not in skip_tokens]
lang = " ".join(lang) if join else lang
return lang
if isinstance(tokens[0], int):
# a list of ints is provided, only one sequence
output = _tokens_to_lang(tokens)
else:
# a list of lists is provided, several sequences
output = [_tokens_to_lang(seq) for seq in tokens]
return output
|
934153b3062e99a069b70109c332402c8099c378
| 54,964
|
import random
def generate_ksat_expression(n, m, k):
"""
Generate an arbitrary k-SAT expression according to the given parameters.
Args:
n: The number of groups
m: The number of variables in a group
k: The number of variables
Returns: A Boolean expression
"""
if m > k:
raise ValueError("m > k not possible for kSAT")
alphabet = []
for i in range(k):
alphabet.append(chr(97 + i))
expression = ""
for i in range(n):
literals = random.sample(alphabet, m)
expression += " and ({}".format(literals[0])
for l in literals[1:]:
if random.random() < 0.5:
expression += " or not({})".format(l)
else:
expression += " or {}".format(l)
expression += ")"
return expression.lstrip("and ")
|
06d75d1cc1879e3908c76d1962fb54b8d17bfba9
| 54,966
|
def checkCurrJackClicked(game, clickX, clickY):
"""
If one of the current player's jack cards has been clicked
(all jacks attached to curr points cards are considered theirs),
return the card that was clicked, else None
"""
for card in game.currPlayer.jacks.keys():
for jack in game.currPlayer.jacks[card]:
if jack.imageObj.collidepoint(clickX, clickY):
return card
return None
|
b4c3f12fabd0f99035641be5078865ad386bfff7
| 54,973
|
def format_d_mon_yyyy(date):
"""Return date formatted like 1 February 2016"""
return date.strftime("%d %B %Y").lstrip("0")
|
8fcf59aa1ec93fe1ddc57a33d8ab6bea10ace269
| 54,977
|
def operator(o):
"""Extract the head of an expression."""
return o.func
|
faf102303a274f06e5093eb220cdce86a5581234
| 54,980
|
def pivot(v):
"""
:param v: vector or list
:return: max in absolute value with original sign or max from origin.
Corresponds to the main direction for each column of an orientation matrix.
"""
return v[list(abs(v)).index(abs(v).max())]
|
99ea5099cd02296f92cf0e5ad9b620ba5d8e3d8f
| 54,988
|
def palindrome(value: str) -> bool:
"""
This function determines if a word or phrase is a palindrome
:param value: A string
:return: A boolean
"""
lowered = value.lower()
base = lowered.replace(" ", "")
if base[::-1] == base:
return True
else:
return False
# pass # remove pass statement and implement me
|
4c4515c1d29ab110e57e8323994f33355cb66163
| 54,991
|
def _rescale(x, xlim, ylim):
"""Rescale values to new bounds.
Parameters
----------
x : ndarray
Values to rescale
xlim : tuple
Original value bounds (xmin, xmax)
ylim : float
New value bounds (ymin, ymax)
Returns
-------
ndarray
Rescaled values
"""
m = (ylim[1] - ylim[0]) / (xlim[1] - xlim[0])
c = ylim[1] - m * xlim[1]
y = m * x + c
return y
|
94d388a4d9a076cfc3c3ae21768b60ab0a594e5c
| 54,992
|
def trim_obd_value(v):
"""
Trims unneeded data from an OBD response
:param str v:
:return str:
"""
if not v or len(v) < 4:
return ''
else:
return v[4:]
|
69e4243d235d701475e8ba9e474f43d36555b2d1
| 54,995
|
def make_list(n):
""" Returns a list of n zeros, for the given argument n. """
zeros = []
for _ in range(n):
zeros.append(0) # FWIW, the append method uses mutation
return zeros
|
9bd25f29007304aa16474d502ddb6598945a0bbe
| 55,006
|
def _adjust_component(component: int) -> int:
""" Developed by Trong Nguyen, 100848232
Reviewed by Ahmed Abdellah, 101163588
Return the midpoint value of the quadrant in which an input component
lies as defined by the range of 0 to 255, inclusive in four equal-size
quadrants.
>>> _adjust_component(r)
"""
if component < 64:
midpoint = 31
elif component < 128:
midpoint = 95
elif component < 192:
midpoint = 159
else:
midpoint = 223
return midpoint
|
34eb2142a97f515efeabf5b3258d2112e2a4a89c
| 55,009
|
import yaml
def _load_config_file(config_path):
"""Load a yaml config file
Args:
config_path: (str) Path to config.yaml file
Returns:
Dictionary with configs from config.yaml
"""
Loader = yaml.FullLoader
with open(config_path, 'r') as config_file:
cfg = yaml.load(config_file, Loader=Loader)
return cfg
|
b0ff510b58bd2b05e19a8f1fe4eee5b8769c344e
| 55,012
|
def build_intent_for_api(intent_name, questions, answer):
"""Build structure of intent for dialogflow API.
:param intent_name: str, name of intent
:param questions: iterable, iterable of questions
:param answer: str, answer to question
:return: dict, intent for api
"""
intent = {'display_name': intent_name,
'messages': [{'text': {'text': [answer]}}],
'training_phrases': []}
for question in questions:
phrase = {'parts': [{'text': question}]}
intent['training_phrases'].append(phrase)
return intent
|
7008918d02b404320ca58e970aec6c2880759ebd
| 55,014
|
from typing import Any
import uuid
def is_valid_uuid(uuid_str: Any) -> bool:
"""
Return true if `uuid_str` is a value GUID/UUID.
Parameters
----------
uuid_str : Any
String to test
Returns
-------
bool
True if valid GUID/UUID.
"""
if not uuid_str:
return False
try:
uuid.UUID(uuid_str)
except (ValueError, TypeError):
return False
return True
|
2cb9607c6cd42910179e2d09b5170c2dfaf95126
| 55,023
|
def read_file(file_path):
"""Reads in a Python requirements file.
Ignore empty lines, comments and editable requirements
:param str file_path: path to requirements file
:returns: mapping from a project to its pinned version
:rtype: dict
"""
data = {}
with open(file_path) as file_h:
for line in file_h:
line = line.strip()
if line and not line.startswith('#') and not line.startswith('-e'):
project, version = line.split('==')
if not version:
raise ValueError("Unexpected syntax '{0}'".format(line))
data[project] = version
return data
|
4a3cc35ad1a59f6fb569ffb01a30b729a6963164
| 55,031
|
def _create_match(rs, ttp):
""" Takes a ruleset and ttp and returns (name, rs_path, ttp_path) """
return rs[0] + "_" + ttp[0], rs[1], ttp[1]
|
adb8677db626e4b64178295942d0613e446438d1
| 55,034
|
def mass_to_mix(value, param, unit='ppb', **kwargs):
"""Convert units from ug/m3 to ppb or ppm. The conversion assumes an ambient
pressure of 1 atmosphere and ambient temperature of 25 degC.
:param value: the concentration in ug/m3
:param param: the parameter to convert {'co', 'no', 'no2', 'so2', 'o3'}
:param unit: the desired output unit {'ppb', 'ppm'}
:type value: float
:type param: string
:type unit: string
:returns: value
:Example:
>>> import openaq
>>> conc_ugm3 = 100
>>> conc_ppb = openaq.utils.mass_to_mix(conc_ugm3, param='co', unit='ppb')
>>> conc_ppb
114.5
"""
lookup = {
'co': 1.145,
'no': 1.25,
'no2': 1.88,
'so2': 2.62,
'o3': 2.0
}
param = param.lower()
if param not in lookup.keys():
return value
value = value / lookup[param]
if unit.lower() == 'ppm':
value *= 1e-3
return value
|
a196400d352420e99da00a6e5df82b36f79e5c0e
| 55,035
|
import itertools
def flatten_list(items):
"""
Method to flatten list of lists using itertools (faster than traditional lists)
Args:
items: the list of lists [ [item1],[item2] ]
"""
if len(items) > 0 and items is not None:
return list(itertools.chain.from_iterable(items))
|
5711480009a12e0cab72fecc8b51bc23a750e345
| 55,036
|
def publish_msg(rmq, f_name, **kwargs):
"""Function: publish_msg
Description: Publish a message to RabbitMQ.
Arguments:
(input) rmq -> RabbitMQ Publisher instance
(input) f_name -> Full path and file name of test file.
(output) status -> True|False - Status of publish.
(output) err_msg -> Error message.
"""
status = True
err_msg = None
with open(f_name, "r") as f_hldr:
body = f_hldr.read()
if not rmq.publish_msg(body):
err_msg = "\tError: Failed to publish message to RabbitMQ."
status = False
return status, err_msg
|
abe8ef099b53966c34c3a0535372b5916f5aa3ec
| 55,040
|
def get_connection_retry_time(time: int) -> int:
"""Returns time to wait for connection retry
Args:
time (int): current retry wait time
Returns:
int: next retry wait time
"""
return time if (time == 16) else time * 2
|
a2c80fa9e265fbb543dd1441b739d2a1f85058a4
| 55,041
|
def _band_power(mean_ds, f, center, bandwidth=2):
"""Sum DS values over a band of frequencies."""
low_f = center - bandwidth
high_f = center + bandwidth
f_idx = (low_f <= f) & (f <= high_f)
return mean_ds[f_idx].sum(axis=0)
|
5c11b912908f677f123b99c67018b0bcaee9658a
| 55,042
|
def add_s(i):
""" Return an "s" when 'i' > 1 """
if i > 1:
return 's'
else:
return ''
|
ed4b88d750f69b305f5dc3d4c488bb0e4ecb8eff
| 55,045
|
def scanner(height, time):
"""Return the position of a given scanner height at a given time."""
offset = time % ((height - 1) * 2)
if offset > height - 1:
return 2 * (height - 1)
else:
return offset
|
5c0e2e29955368bf789b66be66dd67a0e0fcf189
| 55,049
|
from pathlib import Path
def test_image_path() -> Path:
"""Return the path to the test.png image."""
fp: Path = Path(__file__).parent / "test.png"
assert fp.is_file()
return fp
|
7c9ca81d5b5325d2f0e44ba194bb9d470e7447ea
| 55,052
|
def start_stop_service(connection, login, password, name, id, address, action="START", error_msg=None):
"""Start or stop registered service.
Args:
connection(object): MicroStrategy connection object returned by
'connection.Connection()
login (string): login for SSH operation
password (string): password for SSH operation
name(string): name of the service
id(string): name of the service
action(string): action to do on the service (available are "START" and "STOP")
error_msg (string, optional): Custom Error Message for Error Handling
Returns:
Complete HTTP response object.
"""
body = {
"name": name,
"id": id,
"action": action,
"address": address,
"login": login,
"password": password
}
response = connection.session.post(url=connection.base_url + '/api/registrations/services/control',
json=body)
return response
|
f358a366945bfac44f0ff6202a8f88004e67df89
| 55,054
|
import hashlib
def password_hasher(username, pwd):
""" Generates a unique SHA-256 hash of a given auth combo """
hasher = hashlib.sha256()
hasher.update(username)
hasher.update(pwd)
return hasher.hexdigest()
|
27d48b1593d635d06a6c536f65ec61d58373eb0e
| 55,055
|
import torch
from typing import Tuple
def to_3dim(X: torch.Tensor, target_size: Tuple[int, int, int], dtype=torch.float32) -> torch.Tensor:
"""
Rearragne data matrix X of size (n_styles*dim_x, n_contents)
to (n_styles, n_contents, dim_x)
Args:
- X: torch.Tensor of 2dim data matrix
- target_size: tuple of n_style, n_contents, dim_x
"""
assert X.ndim == 2
n_styles, n_contents, dim_x = target_size
assert X.shape[0] == n_styles * dim_x
assert X.shape[1] == n_contents
target = torch.zeros(target_size, dtype=X.dtype)
for s in range(n_styles):
for c in range(n_contents):
img = X[s * dim_x: (s + 1) * dim_x, c]
target[s, c] = img
return target.to(dtype)
|
333386b5abd60d93daa378b3c5185923b31b4900
| 55,057
|
def remove_duplicates(duplist):
"""remove all duplicates from a list, so that only one of each item remains and output a list"""
nondup_list = list(set(duplist))
return nondup_list
|
0f78f897e3e829734e60dd95828af24f6898957e
| 55,059
|
def convert(number: int) -> str:
"""
Similar to FizzBuzz, create the string based on factors of the given number
:param number: Given number
:return: Created string
"""
res = ""
sounds = {
3: 'Pling',
5: 'Plang',
7: 'Plong'
}
for num, sound in sounds.items():
if number % num == 0:
res += sound
return res or str(number)
|
df785d42c4341a203e53e6c4da3b780347d67a43
| 55,060
|
def normalize_case(s):
"""
Convert to lower case if possible.
Used internally to allow keys that are not strings.
Will use casefold if available (Python >= 3.3).
"""
try:
return s.casefold()
except AttributeError:
try:
return s.lower()
except AttributeError:
return s
|
b913f620c28fd90e72bb761b4a80f1d84d60305f
| 55,064
|
import platform
from pathlib import Path
def path_to_python(basedir: str, envname: str) -> str:
"""Return the path to the python executable."""
if platform.system() == "Windows":
return str(Path(basedir) / envname / "Scripts" / "python")
else:
return str(Path(basedir) / envname / "bin" / "python")
|
e413f158df852b1e62c9dfca132e2c518acc0bba
| 55,065
|
def compute_alignment_matrix(seq_x, seq_y, scoring_matrix, global_flag):
"""
Takes as input two sequences seq_x and seq_y whose elements share a common
alphabet with the scoring matrix scores. The function computes and
returns the alignment matrix for seq_x and seq_y. If global_flag is True,
each entry of the matrix can be negative. If global_flag is False,
then we will force each entry to be positive.
"""
rows = len(seq_x)
cols = len(seq_y)
alignment_matrix = [[0 for dummy_j in range(cols+1)] for dummy_i in range(rows+1)]
for idx_i in range(1, rows+1):
#print(idx_i-1), print(seq_x[idx_i-1])
score = alignment_matrix[idx_i-1][0] + scoring_matrix[seq_x[idx_i-1]]['-']
if global_flag:
alignment_matrix[idx_i][0] = score
else:
alignment_matrix[idx_i][0] = max(0, score)
for idx_j in range(1, cols+1):
score = alignment_matrix[0][idx_j-1] + scoring_matrix['-'][seq_y[idx_j-1]]
if global_flag:
alignment_matrix[0][idx_j] = score
else:
alignment_matrix[0][idx_j] = max(0, score)
for idx_i in range(1, rows+1):
for idx_j in range(1, cols+1):
score_i_1_j_1 = alignment_matrix[idx_i-1][idx_j-1] + scoring_matrix[seq_x[idx_i-1]][seq_y[idx_j-1]]
score_i_j_1 = alignment_matrix[idx_i][idx_j-1] + scoring_matrix['-'][seq_y[idx_j-1]]
score_i_1_j = alignment_matrix[idx_i-1][idx_j] + scoring_matrix[seq_x[idx_i-1]]['-']
if global_flag:
alignment_matrix[idx_i][idx_j] = max([score_i_1_j_1, score_i_j_1, score_i_1_j])
else:
alignment_matrix[idx_i][idx_j] = max([max(0, score_i_1_j_1), max(0, score_i_j_1), max(0, score_i_1_j)])
return alignment_matrix
|
76cec408385c0097bb966d53d59c792750858f71
| 55,077
|
import math
def uni_comp_strength(c, Phi):
"""
calculates and returns the Uniaxial Compressive Strength of the rock given Cohesion (c) and Friction Angle (Phi).
"""
return (2*c*math.cos(math.radians(Phi)))/(1-math.sin(math.radians(Phi)))
|
4db4cdbf494a8f4f063ca5827e7459b7ec5558d1
| 55,079
|
def writable(verb):
"""Does the given verb require a writable file descriptor?"""
return verb == "write"
|
b1581d796186f4ac6a50dd7158dbb478efe24b06
| 55,080
|
def file_number_of_lines(file_name):
"""Counts the number of lines in a file
"""
try:
item = (0, None)
with open(file_name) as file_handler:
for item in enumerate(file_handler):
pass
return item[0] + 1
except IOError:
return 0
|
e0aafc299c7fb0eadcf2988664affa899c160f10
| 55,081
|
def max_sum_subarray(arr, k):
"""Find maximum sum of any contiguous subarray of size k.
Time: O(n)
Space: O(1)
"""
max_sum = win_sum = 0
win_start = 0
for win_end in range(len(arr)):
win_sum += arr[win_end]
if win_end >= k - 1:
max_sum = max(max_sum, win_sum)
win_sum -= arr[win_start]
win_start += 1
return max_sum
|
0ce9e5045c2cce94b6534fe01e455f1919713a37
| 55,085
|
import math
def lat_long_zoom_to_pixel_coords(lat, lon, zoom):
"""Create pixel coordinates from lat-long point at a given zoom level"""
sinLat = math.sin(lat * math.pi/180.0)
x = int(math.floor(((lon + 180) / 360) * 256 * math.pow(2, zoom)))
y = int(math.floor((0.5 - math.log((1 + sinLat) / (1 - sinLat))
/ (4 * math.pi)) * 256 * math.pow(2,zoom)))
return(x, y)
|
bd176b1a16c0db45228ffe314a71f91358046e28
| 55,086
|
def find_my_y(your_x, data_x, data_y, logged_data=False):
"""Takes an input x, linear interpolates the data and produces a corresponding y(s).
Parameters
----------
your_x : float
A single number, of which you want the corresponding y value(s) through linear interpolation of the data
given (data_x, data_y).
data_x : 1-d list/array
This is the original set of x values.
data_y : 1-d list/array
This is the original set of y values.
logged_data : Bool
If the data is logged (base 10) coming in and you want linear values back out set this to True.
Default: False
Returns
-------
A list of corresponding y(s) to the input your_x.
"""
your_x_between = []
#search for y range which has your point in it
for dx in range(len(data_x)-1):
if dx == 0: #so the first one isnt completely discounted
if data_x[dx] <= your_x <= data_x[dx+1]:
#append the coordinates of the range
your_x_between.append([[data_x[dx], data_y[dx]], [data_x[dx+1], data_y[dx+1]]])
else:
if (data_x[dx] < your_x <= data_x[dx+1]) or (data_x[dx] > your_x >= data_x[dx+1]):
your_x_between.append([[data_x[dx], data_y[dx]], [data_x[dx+1], data_y[dx+1]]])
#no extrapolation, if your_x is not within the set of x's given (data_x) then this won't work
if your_x_between == []:
print('Your x is out of range of this data_x.')
return
#make a straight line betwen the points and plug your x value in
found_y = []
for coords in your_x_between:
coord1 = coords[0]
coord2 = coords[1]
grad = (coord1[1] - coord2[1]) / (coord1[0] - coord2[0])
_found_y = grad * (your_x - coord1[0]) + coord1[1]
found_y.append(_found_y)
#return all the y's found, no guarentee the there is a one-to-one mapping
if logged_data == True:
return [10**y for y in found_y]
else:
return found_y
|
ecf46ff49747e767d004b46c7994cb5f23610c97
| 55,087
|
def _escape_markdown_special_characters(string_to_escape: str) -> str:
"""Escapes the special characters used for Markdown formatting in a provided string.
Args:
string_to_escape: The string that needs to have Markdown special characters escaped.
Returns:
A string with the special characters used for Markdown formatting escaped.
"""
for character in r"\`*_{}[]()#+-.!":
string_to_escape = string_to_escape.replace(character, f"\\{character}")
return string_to_escape
|
562936c57bc40d940ee7ddba91102a84e698428f
| 55,088
|
def read_dict_text(fn):
"""
Read a dictionary with several sections from plain text format. Section is denoted by first character being '#'.
Returns dictionary of lists
:param fn: filename
:return: Dictionary with a list of lines for each section
"""
dict = {}
lines = []
prevsec = ""
with open(fn) as f:
for x in f.readlines():
x = x.strip()
if x[0]=='#': # section
if len(lines)>0 and prevsec!="":
dict[prevsec] = lines
lines=[]
prevsec = x[1:]
else:
lines.append(x)
if len(lines) > 0 and prevsec != "":
dict[prevsec] = lines
return dict
|
8a5c185b5fbb209b8f276493a05c2c090a44e29d
| 55,096
|
def getClassLabel(file):
"""
This method extracts the class label from a given file name or file path.
The class label should either be the file name without ending numbers
or the folder in which the file is located
@param file: The file name or the file path
@returns the class label as string
"""
return file.split('.')[0].strip("0123456789")
|
cc604a09aed4067fc8e9a71d774822dbc409a736
| 55,099
|
def range_map(val, in_min, in_max, out_min, out_max, rnd=0):
"""
Takes a value from one range of possible values and maps
it to a value in the second range of possible values
Example 1: range_map(555, 0, 1023, 0, 100)
This will output a value of 54.252199413489734
Example 2: range_map(55, 0, 1023, 0, 100, 2)
This will output a value of 54.25
Parameters:
x: the value to map to a new range
in_min: the minumum value of the original range
in_max: the maximum value of the original range
out_min: the minimum value of the output range
out_max: the maximum value of the output range
rnd: the number of decimal places to round the result to,
if omitted, defaults to 0
"""
range_val = (val - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
if(rnd == 0):
return range_val
else:
return round(range_val, rnd)
|
b5d1fa8f157cac546a127d89794e394c72be1829
| 55,100
|
def get_stepsize(traj, min_points=200):
"""Returns a stepsize to display at least min_points data points on the graph"""
n = traj.n_frames
if n <= min_points:
return 1
else:
return n // min_points
|
fd3b372970f6af63ef3153531abad37af6e9a117
| 55,102
|
import math
def _gon2rad(angle):
""" Convert GON to RAD
"""
return angle / 200.0 * math.pi
|
bb8de29386cdddef4d33170b0cc91d62d25df1fe
| 55,109
|
def make_train_state(save_file):
"""Initializes history state dictionary
Args:
save_file (str): path to save directory
"""
return {
'epoch_idx': 0,
'model_save_file': save_file,
'best_joint_acc': 0,
'best_joint_loss': 1e9,
'best_epoch_idx_loss': 0,
'best_epoch_idx_acc': 0,
'run_time': 0,
'train_loss': [],
'train_acc': [],
'val_loss_l1': [],
'val_acc_l1': [],
'val_loss_l2': [],
'val_acc_l2': [],
'LDT_train_score': [],
'LDT_val_score': [],
'LDT_test_score': 0,
'test_loss_l1': -1,
'test_acc_l1': -1,
'test_loss_l2': -1,
'test_acc_l2': -1,
}
|
5527731fa05b02a8246331f1be64ffb6bf0236d6
| 55,110
|
def cls_name(cls):
"""Return the full name of a class (including the it's module name)."""
return "{}.{}".format(cls.__module__, cls.__name__)
|
4eec8a1eab33d6e9333894c15cbf012faea10bac
| 55,113
|
import re
def split_indices(s):
"""Splits a string on whitespaces and records the indices of each in the original string.
@:return generator((word, (start_idx, end_idx)), ...)
"""
return ((m.group(0), (m.start(), m.end() - 1)) for m in re.finditer(r'\S+', s))
|
777c57764e746d2d172b44f49940141cdce265c8
| 55,118
|
def _GetViewSpecs(view_args, width_per_col, grid_width, col_width=None):
"""Convert a single view into its JSON specifications.
Args:
view_args: A tuple of (title, widgets[, properties]).
width_per_col: The width for each column.
grid_width: The width of the canvas (in CSS style).
col_width: The pre-determined width of the view
(in units defined by layout._DESIRED_VIEW_COLS).
Returns:
A dict with parameters describing the view.
"""
if len(view_args) == 2:
title, widgets = view_args
properties = {}
else:
assert len(view_args) == 3
title, widgets, properties = view_args
view = {
'name': title,
'cols': 1, # By default, each view occupies one column.
'indicators': []
}
view.update(properties)
if col_width:
view['cols'] = col_width
view['grid_width'] = view['cols'] * width_per_col
for widget in widgets:
# Note that each grid cell is logically split into a finer grid.
# Let each widget span horizontally across the entire view.
widget.SetCols(grid_width)
view['indicators'] += widget.Indicators()
return view
|
32f36272c8bc843f5f34f625b646f2fbb10542b3
| 55,120
|
import random
def roll_dice(adv, disadv, seed):
"""
Roll the dice. If advantage or disadvantage flags are used,
roll the d20 twice and take the higher/lower number.
"""
if seed:
random.seed(seed)
if adv or disadv:
roll1 = random.randint(1, 20)
roll2 = random.randint(1, 20)
if adv:
roll = max(roll1, roll2)
else:
roll = min(roll1, roll2)
return [roll, roll1, roll2]
roll = random.randint(1, 20)
return [roll]
|
3931a79dc4ff49c3fc75eae4a5054e0056b7165a
| 55,123
|
def main(digits):
"""
Find the index of the first term in the Fibonacci sequence to contain the
given number of digits.
:param digits: Number of digits that the requested term should contains.
:rtype: int
"""
# The number of digits of the Fibonacci terms is increased according to the
# following sequences, where the values in the list are the number of terms
# with the specified number of digits (=index), starting from 1-digit -> 1th term.
initial = [0, 6, 5, 5]
# .................................after the first 4-digit term, the pattern
# below is repeated continuously
pattern = [4, 5, 5, 5, 4, 5, 5, 5, 5, 4, 5, 5, 5, 5]
length = 14 # ....................instead of: len(pattern)
if digits < 5:
return sum(initial[:digits]) + 1
digits -= 4
index = ((digits // length) * sum(pattern)) + sum(pattern[:(digits % length)])
return index + 16
|
274176089dd1accdd0b3b2356787fed100d4aab3
| 55,125
|
def shell_sort(arr: list) -> list:
"""Sort a list using shell sort
Args:
arr (list): the list to be sorted
Returns:
list: the sorted list
"""
# Calculate the gap to sort the items
gap = len(arr) // 2
# While the gap isn't 0
while gap > 0:
# Set i to the first element
i = 0
# Set j to the element of the gap
j = gap
# Loop over all items from gap to arr end
while j < len(arr):
# If the ith element is greater, swap the items
if arr[i] > arr[j]:
arr[i], arr[j] = arr[j], arr[i]
# Increase i index and j index to simulatiously
# Moves both pointers at the same time
i += 1
j += 1
# Set k to the index of the left selected element
k = i
# While k is greater than or equal to gap,
# Make it smaller than gap
while k >= gap:
# Check for
# left element > right element
if arr[k - gap] > arr[k]:
# Swap if true
arr[k - gap], arr[k] = arr[k], arr[k - gap]
# Reduce k to keep it within gap
k -= 1
# Make gap smaller
gap //= 2
return arr
|
7ba2e36000c8fb3f4b23aa020d65c2b78a94ad4a
| 55,127
|
import re
def split_args(arg_string):
"""
Split a string into a list based on whitespace, unless enclosed in quotes
:param arg_string: Command-line arguments as string
:return: list of strings
"""
return [r.strip("\"") for r in re.findall(
r'(?:"[^"]*"|[^\s"])+', arg_string)]
|
c0043bfd323052f7b5118aec171e2f0e1e20e28b
| 55,138
|
def _prepare_shape_for_squeeze(shape, axes):
"""
Creates the squeezed new shape based on the tensor and given axes.
Args:
shape (tuple): the shape of the tensor
axes Union[None, int, tuple(int), list(int)]: the axes with dimensions squeezed.
Returns:
new_shape(tuple): the shape with dimensions squeezed.
"""
new_shape = []
ndim = len(shape)
# Convert to set
if isinstance(axes, int):
if axes >= ndim or axes < -ndim:
raise ValueError(
f"axis {axes} is out of bounds for tensor of dimension {ndim}")
axes = {axes}
elif isinstance(axes, (list, tuple)):
for axis in axes:
if axis >= ndim or axis < -ndim:
raise ValueError(
f"axis {axis} is out of bounds for tensor of dimension {ndim}")
axes = set(axes)
elif axes is not None:
raise TypeError(
f"only int, tuple and list are allowed for axes, but got {type(axes)}")
if axes is None:
new_shape = [s for s in shape if s != 1]
else:
for idx, s in enumerate(shape):
if s != 1 or (idx not in axes) and (idx - ndim not in axes):
new_shape.append(s)
# if an axis is selected with shape entry greater than one, an error is raised.
if s != 1 and ((idx in axes) or (idx - ndim in axes)):
raise ValueError(
f"axis {axes} has shape entry {s} > 1, cannot be squeezed.")
return tuple(new_shape)
|
0f984a9eab2b5897556c3001e2ca0c607035534f
| 55,141
|
def bar_label_formatter(x, pos):
"""Return tick label for bar chart."""
return int(x)
|
b1e6d1534d1a5b403d7ca037eb8f445771c26683
| 55,142
|
def remove_suffix(files, suffix):
"""Remove suffix from files."""
return [f[:-len(suffix)] for f in files]
|
c57486f406141564844527b3a0cc763360ca48d8
| 55,146
|
def summarize_id(id):
"""
Docker uses hexadecimal strings of 65 characters to uniquely identify
containers, images and other objects. Docker's API almost always reports
full IDs of 65 characters, but the ``docker`` program abbreviates these IDs
to 12 characters in the user interface. We do the same because it makes the
output more user friendly.
:param id: A hexadecimal ID of 65 characters.
:returns: A summarized ID of 12 characters.
"""
return id[:12]
|
9aeca7bb9a1c0a03d449369f1cc9ab801fe6a23a
| 55,153
|
def _file_name_builder(test_name):
"""Build a name for the output file."""
try:
file_name = (test_name + ".dat").replace(' ', '_')
except TypeError:
file_name = ("NoName" + ".dat")
return file_name
|
b69f56b0e01c72b7530287cebbf09407bcc2bbe2
| 55,154
|
from datetime import datetime
def to_datetime(timeline, fmt='%Y-%m-%d'):
"""Convert a timeline from a string list to a Python datetime list.
Args:
timeline (list): a list of strings representing dates.
fmt (str): the format date (e.g. `"%Y-%m-%d`").
Returns:
list (datetime): a timeline with datetime values.
"""
date_timeline = [datetime.strptime(t, fmt).date() for t in timeline]
return date_timeline
|
daaf0684e2b2354da4ce3b70745d038cf7c391fa
| 55,159
|
def quick_exponent_with_mod(base, power, modulo):
"""Compute quickly the exponent within a given modulo range.
Will apply a modulo with the specified base at every iteration of the
exponentiation algorithm, making sure the result is in the given range."""
# 'powers' will be a list of the base with powers of two applied, i.e.:
# with base==3, powers==[3, 3^2, 3^4, 3^8, 3^16, ...]
powers = [base]
# for each power of two
i = 2
while i <= power:
# compute base^(2^i) and add it to the list
powers.append((powers[-1] * powers[-1]) % modulo)
# next power of two
i *= 2
# list of booleans corresponding to which powers of two to include to make
# up the whole exponent
powers_to_include = list(bool(int(digit)) for digit in bin(power)[2:][::-1])
# accumulator for the product
accumulator = 1
# for each factor==base^(2^index)
for index, factor in enumerate(powers):
# if this power should be included
if powers_to_include[index]:
# multiply and apply modulo
accumulator *= factor
accumulator %= modulo
# return the product accumulator
return accumulator
|
404373115f14b3d751c9bb7fb77b101aa7a7bd70
| 55,160
|
def merge_on_pitch(gt_df, trans_df, offset=True):
"""
Merge the given ground truth and transcribed dfs on pitch, with
corresponding suffixes, and possibly offset columns.
Parameters
----------
gt_df : pd.DataFrame
The ground truth data frame.
trans_df : pd.DataFrame
The transcription data frame.
offset : boolean
Calculate offset columns pre-merge.
Results
-------
merge_df : pd.DataFrame
The gt and trans DataFrames, merged on equal pitches, with index
columns added for each pre-merge, and _gt and _trans suffixes added
to the resulting columns. If offset is True, offset columns are
calculated pre-merge.
"""
# This both creates a copy and creates an index column which will be
# retained in the merge
gt_df = gt_df.reset_index()
trans_df = trans_df.reset_index()
# Pre-calculate offset time once
if offset:
gt_df["offset"] = gt_df.onset + gt_df.dur
trans_df["offset"] = trans_df.onset + trans_df.dur
# Merge notes with equal pitch -- keep all pairs
return trans_df.reset_index().merge(
gt_df.reset_index(), on="pitch", suffixes=("_trans", "_gt")
)
|
0e146f8df713413c0027c6e8d8aab84f5017865f
| 55,164
|
def swap_nibbles(data: str) -> str:
"""
Swaps nibbles (semi-octets) in the PDU hex string and returns the result.
Example:
>>> swap_nibbles('0123')
'1032'
"""
res = ''
for k in range(0, len(data), 2):
res += data[k+1] + data[k]
return res
|
6dc3294309c55a29af1f59b5716c24092655978f
| 55,170
|
def get_end(model):
"""
Get the end position of a (feature) location. For point locations
the position value is returned. In case of uncertain end range,
the maximum is returned.
"""
if model.get("location"):
model = model["location"]
if model["type"] == "range":
return model["end"]["position"]
elif model["type"] == "point":
return model["position"]
|
29bf438c48dcf4a1f549bbe0b9e91b5574e7166e
| 55,181
|
def closest(v, L):
"""
Returns the element in L (a sorted numpy array) which is closest to v
>>> R = np.array([9,3,6])
>>> R.sort()
>>> R
array([3, 6, 9])
>>> [closest(i, R) for i in range(1,12)]
[3, 3, 3, 3, 6, 6, 6, 9, 9, 9, 9]
"""
i = L.searchsorted(v)
return L[-1 if i == len(L) else (0 if i == 0 else (i if v - L[i - 1] > L[i] - v else i - 1))]
|
8930e49e36bb191c17e0dfe4e3cdd98da67f69d0
| 55,184
|
def fiscal_to_calendar(fiscal_year, fiscal_mo):
"""Converts a fiscal year and month into a calendar year and month for graphing purposes.
Returns (calendar_year, calendar_month) tuple."""
if fiscal_mo > 6:
calendar_month = fiscal_mo - 6
calendar_year = fiscal_year
else:
calendar_month = fiscal_mo + 6
calendar_year = fiscal_year - 1
return (calendar_year, calendar_month)
|
84f072dca3b4083e231344bd75365daab254fe9d
| 55,188
|
import pickle
def o2p(obj):
"""
Converts an object into a pickle string or a blank string if an
empty container.
"""
if isinstance(obj, (list, tuple, dict)) and not obj:
return ""
return pickle.dumps(obj)
|
b3bd8b30a5c9dd815291bbdc69edb5554cb3ba4f
| 55,189
|
def get_local_file_name(cur_key):
"""
Return the local file name for a given cost usage report key.
If an assemblyID is present in the key, it will prepend it to the filename.
Args:
cur_key (String): reportKey value from manifest file.
example:
With AssemblyID: /koku/20180701-20180801/882083b7-ea62-4aab-aa6a-f0d08d65ee2b/koku-1.csv.gz
Without AssemblyID: /koku/20180701-20180801/koku-Manifest.json
Returns:
(String): file name for the local file,
example:
With AssemblyID: "882083b7-ea62-4aab-aa6a-f0d08d65ee2b-koku-1.csv.gz"
Without AssemblyID: "koku-Manifest.json"
"""
local_file_name = cur_key.split("/")[-1]
return local_file_name
|
d169fc69f276b3a1003278430497dd3d6d822a28
| 55,190
|
def gather_processedfiles_for_expset(expset):
"""Collects all of the files for processed files.
Args:
expset(dict): Contains the embedded Experiment Set data.
Returns:
A dictionary with the following keys:
genome_assembly(string, optional, default=""): The genome assembly all
of the files use. Blank if there is an error or no files are found.
files(list) : A list of identifiers for the
discovered files.
auto_generated_higlass_view_config(string, optional, default=None): Returns the uuid of the Higlass Item generated by a previous check.
manual_higlass_view_config(string, optional, default=None): Returns the uuid of the Higlass Item that wasn't automatically generated.
error(string, optional, default="") : Describes any errors generated.
"""
# Collect all of the Processed files with a higlass uid.
processed_files = []
if "processed_files" in expset:
# The Experiment Set may have Processed Files.
processed_files = [ pf for pf in expset["processed_files"] if "higlass_uid" in pf ]
# Search each Experiment, they may have Processed Files.
if "experiments_in_set" in expset:
for experiment in [ exp for exp in expset["experiments_in_set"] if "processed_files" in exp]:
exp_processed_files = [ pf for pf in experiment["processed_files"] if "higlass_uid" in pf ]
processed_files += exp_processed_files
if len(processed_files) < 1:
return {
"error": "No processed files found",
"files": [],
"genome_assembly": "",
}
# Make sure all of them have the same genome assembly.
genome_assembly_set = { pf["genome_assembly"] for pf in processed_files if "genome_assembly" in pf }
if len(genome_assembly_set) > 1:
return {
"error": "Too many genome assemblies {gas}".format(gas=genome_assembly_set),
"files": [],
"genome_assembly": ""
}
# Return all of the processed files.
unique_accessions = { pf["accession"] for pf in processed_files }
unique_files = [{ "accession":pf["accession"], "status":pf["status"] } for pf in processed_files ]
# Get the higlass uuid, if an auto generated view conf already exists.
auto_generated_higlass_view_config = None
manual_higlass_view_config = None
if expset.get("static_content", None):
processed_file_tabs = [ sc for sc in expset["static_content"] if sc["location"] == "tab:processed-files" ]
auto_processed_file_tabs = [ sc for sc in processed_file_tabs if sc["description"] == "auto_generated_higlass_view_config" ]
if auto_processed_file_tabs:
auto_generated_higlass_view_config = auto_processed_file_tabs[0]["content"]["uuid"]
elif processed_file_tabs:
manual_higlass_view_config = processed_file_tabs[0]["uuid"]
return {
"error": "",
"files": unique_files,
"auto_generated_higlass_view_config": auto_generated_higlass_view_config,
"manual_higlass_view_config": manual_higlass_view_config,
"genome_assembly": processed_files[0]["genome_assembly"]
}
|
7c717027c429924f3605a171ea83a0e166c09176
| 55,200
|
def find_word(string, start=0):
"""Find the first word starting from `start` position
Return the word and the position before and after the word
"""
while start < len(string) and string[start].isspace():
start += 1
end = start
while end < len(string) and not string[end].isspace():
end += 1
return string[start:end], start, end
|
cd0f6f02d2797c5f7bd04b3307fe34c29e3be748
| 55,202
|
def factor_signatures(hash_value1, signature1, hash_value2, signature2):
"""
Given two hashes and their signatures that used the same random nonce `k`,
return the signer's private key. That's why randomness is important, folks.
"""
r1, s1 = signature1
r2, s2 = signature2
assert r1 == r2 # Did they really reuse the nonce?
r = int(r1)
return (s2 * hash_value1 - s1 * hash_value2) / (r * (s1 - s2))
|
5b722af03a32628af432cda501d212fbd6685f50
| 55,214
|
def _find_positions(hash_list, pHash, pattern, text):
"""
Function for finding position of pattern in text
Args:
hash_list: list of hashes from text
pHash: hash of pattern
pattern: pattern itself
text: text itself
Returns:
list of positions where pattern was found
"""
result = []
for i in range(0, len(text) - len(pattern) + 1):
if pHash != hash_list[i]:
continue
if text[i:i + len(pattern)] == pattern: # Only if hashes are the same we compare text symbol by symbol
result.append(i)
return result
|
7ecb9a22036ff5aa21738c6205c62c21a000c604
| 55,215
|
def next_bar(spec, env):
"""
Returns a function that will return the next bar for a given datetime.
"""
if spec.frequency.unit_str == 'd':
if spec.frequency.data_frequency == 'minute':
return lambda dt: env.get_open_and_close(
env.next_trading_day(dt),
)[1]
else:
return env.next_trading_day
else:
return env.next_market_minute
|
682d8b4c6e7aafb83bf50d2ea1df8e1ac8db3817
| 55,221
|
def return_dataframe_html(dataframe, print_index=False):
"""
Returns the html markup from a dataframe with a
couple of non-default options. Don't print the index
labels and make the column names bold.
index=False prevents the dataframe index from
being included in the CSV.
:param dataframe: a pandas dataframe
:return: a str of html markup
"""
df_html = dataframe.to_html(index=print_index)
return df_html
|
97f0264fd88bd5e5dbac151834ad001776aeee39
| 55,222
|
def Q_feed(deltaT, F_mass, Cp, phi_vapor, r_feed):
"""
Calculates the heat load of heat exchanger.
Parameters
----------
deltaT : float
The difference temperatures between the initial and the ultimate temperatures of mix , [degrees celcium]
F_mass : float
The mass flow rate of feed [kg/s]
Cp : float
The heat capacity of mix [J/(kg * degrees C)]
phi_vapor: float
The part of vapor in feed, [dimensionless]
r_feed : float
The heat vaporazation of mix, [J/kg]
Returns
-------
Q_feed : float
The heat load of heat exchanger, [W] , [J/s]
References
----------
Дытнерский, формула 2.2, стр.45
"""
return F_mass * (Cp * deltaT + phi_vapor * r_feed)
|
1db00a02b07b2dd3cf0b7b6ef063104d537e842b
| 55,224
|
def _GetDenseDimensions(list_of_lists):
"""Returns the inferred dense dimensions of a list of lists."""
if not isinstance(list_of_lists, (list, tuple)):
return []
elif not list_of_lists:
return [0]
else:
return [len(list_of_lists)] + _GetDenseDimensions(list_of_lists[0])
|
0ab54369cb4dc2596e3b799736dc878321ec3136
| 55,226
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.