content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def get_ip_from_host_name(name):
"""Extract ip from host name.
Host name is in this form: ip-xxx-xxx-xxx-xxx.ec2.internal
Args:
name (string): Host name.
Returns:
(string): ip address in xxx.xxx.xxx.xxx form.
"""
parts = name.split('-')
last_octet = parts[4].split('.')[0]
ip = parts[1] + '.' + parts[2] + '.' + parts[3] + '.' + last_octet
return ip | e4135dd02fab4ea2d2b07ce0f36508481316fff0 | 126,275 |
def load_field(field_file):
"""
Loads a field from a file
"""
field = []
with open(field_file, 'r') as ffile:
for row in ffile.readlines():
field.append(list(row.strip().replace(' ', '')))
return field | 66392ede0893f96ebb0e91981ed33f0a563b6527 | 126,288 |
def port_label(port):
"""Return normalized port for consistency in file names.
If empty, assume 5432.
:param port: port from the config file (may be empty or None)
:rtype: str
"""
if not port:
return '5432'
else:
return str(port) | 10891a0a56fa8226ddcf31f10056ea74236e0eb0 | 126,290 |
def convert_newlines(msg):
"""A routine that mimics Python's universal_newlines conversion."""
return msg.replace('\r\n', '\n').replace('\r', '\n') | 38459e52b040fd43de65b5a0b3f5ce85df585917 | 126,291 |
def _give_full_lines(list):
"""Returns all structured_data fields of a list of logfile_entry objects."""
return [x.structured_data for x in list] | 449f4209e8d5ad31a2d328da3d4535d7f1a70861 | 126,293 |
def fix_jvm(fig):
"""Normalize memory usage reported by Java NMT to KB"""
if fig.upper().endswith('MB'):
return int(fig.strip('MB')) * 1024
if fig.upper().endswith('KB'):
return int(fig.strip('KB'))
if fig.upper().endswith('B'):
return int(fig.strip('B')) / 1024 | 5984481c9d944c977ab82fd15f71f5f829630e4a | 126,297 |
def _regressLiteralType(x):
"""The function returns the literal with its proper type, such as int,
float, or string from the input string x
Examples
--------
>>> _regressLiteralType("1")->1
>>> _regressLiteralType("1.0")->1.0
>>> _regressLiteralType("1e-3")->0.001
>>> _regressLiteralType("YUV")->'YUV'
>>> _regressLiteralType("YO8")->'YO8'
"""
try:
float(x) # Test for numeric or string
lit = float(x) if set(['.','e','E']).intersection(x) else int(x)
except ValueError:
lit = str(x)
return lit | 358475f2b5a989a2ef325e34b97e69fd0176b0e9 | 126,299 |
def _replaceBR(mo):
"""Replaces <br> tags with '::' (useful for some akas)"""
txt = mo.group(0)
return txt.replace('<br>', '::') | d0916596af7a43df25431e1b125002ed1a19abc8 | 126,301 |
def get_adi_ids(metadata):
"""Returns a list of the adipose shell IDs for each sample
Parameters
----------
metadata : array_like
Array of metadata dicts for each sample
Returns
-------
adi_ids : array_like
Array of the adipose IDs for each sample in the metadata
"""
adi_ids = [md['phant_id'].split('F')[0] for md in metadata]
return adi_ids | b057d56e0e47bc2b604ccfa28bca1660ff711d5f | 126,307 |
import itertools
def merge_datum_pages(datum_pages):
"""
Combines a iterable of datum_pages to a single datum_page.
Parameters
----------
datum_pages: Iterabile
An iterable of datum_pages
Returns
------
datum_page : dict
"""
pages = list(datum_pages)
if len(pages) == 1:
return pages[0]
array_keys = ['datum_id']
return {'resource': pages[0]['resource'],
**{key: list(itertools.chain.from_iterable(
[page[key] for page in pages])) for key in array_keys},
'datum_kwargs': {key: list(itertools.chain.from_iterable(
[page['datum_kwargs'][key] for page in pages]))
for key in pages[0]['datum_kwargs'].keys()}} | 066760ea4ae91753a69f4d69e74cc075b1dd3764 | 126,308 |
def main(inlet_temp, ambient_temp, FRtaualpha, FRUL, irradiance, surface_area):
"""
Calculate heating energy generated from a solar thermal collector
:param inlet_temp: Inlet temperature into the collector [°C], time series
:param ambient_temp: Ambient air temperature at the collector [°C], time series
:param FRtaualpha: Optical efficiency [-], constant
:param FRUL: Heat loss coefficient [W/m2K], constant
:param irradiance: Irradiance on the collector [W/m2], time series
:param surface_area: Surface area of the solar thermal collector [m2]
:returns: heating energy [kWh] time resolved, efficiency [-] time resolved
"""
horizon = len(inlet_temp)
if horizon > len(ambient_temp):
horizon = len(ambient_temp)
if horizon > len(irradiance):
horizon = len(irradiance)
eta = [0.0] * horizon
heating = [0.0] * horizon
for i in range(horizon):
eta_temp = FRtaualpha - ((FRUL * (inlet_temp[i] - ambient_temp[i])) / irradiance[i])
eta[i] = max(0, eta_temp)
heating[i] = (irradiance[i] * eta[i] * surface_area) / 1000.0
return heating, eta | 6396c3f283d41e9aabb1da770b93a2ae8e8b6448 | 126,312 |
def get_sub_value(dictionary, aliases):
"""
:param dictionary: a dictionary to check in for aliases
:param aliases: list of keys to check in dictionary for value retrieval
:return: returns value if alias found else none
"""
if (dictionary and aliases) is not None:
for alias in aliases:
if alias in dictionary:
return dictionary[alias]
return None
else:
return None | 1f480c22156696aa7159e3db055bb326efe9b536 | 126,313 |
import math
def sparse_cosine_similarity(A, B):
"""
Function computing cosine similarity on sparse weighted sets represented
as python dicts.
Runs in O(n), n being the sum of A & B's sizes.
Args:
A (Counter): First weighted set.
B (Counter): Second weighted set.
Returns:
float: Cosine similarity between A & B.
Example:
from fog.metrics import sparse_cosine_similarity
# Basic
sparse_cosine_similarity({'apple': 34, 'pear': 3}, {'pear': 1, 'orange': 1})
>>> ~0.062
"""
# Early termination
if A is B:
return 1.0
if len(A) == 0 or len(B) == 0:
return 0.0
xx = 0.0
xy = 0.0
yy = 0.0
# Swapping to iterate over smaller set and minimize lookups
if len(A) > len(B):
A, B = B, A
for k, v in A.items():
weight = v
xx += weight ** 2
v2 = B.get(k)
if v2 is not None:
xy += weight * v2
for v in B.values():
weight = v
yy += weight ** 2
return xy / math.sqrt(xx * yy) | 1ce0f37ce8ee1aefd63c761482927072f44a00e5 | 126,314 |
def expand_position(position, length=4):
"""Take a tuple with length <= 4 and pad it with ``None``'s up to
length.
Example, if ``length=4`` and ``position=(1, 0)``, then the output
is ``(1, 0, None, None)``.
"""
new_position = tuple(position) + (None,) * (length-len(position))
return new_position | 3c8a1dfb434439aec7e458e7e36b65e98a2b6b58 | 126,317 |
def find_atoms(molecule, attr, value):
"""
Find all nodes of a `vermouth.molecule.Molecule` that have the
attribute `attr` with the corresponding value of `value`.
Parameters
----------
molecule: :class:vermouth.molecule.Molecule
attr: str
attribute that a node needs to have
value:
corresponding value
Returns
----------
list
list of nodes found
"""
nodes = []
for node in molecule.nodes:
if attr in molecule.nodes[node] and molecule.nodes[node][attr] == value:
nodes.append(node)
return nodes | 2b2286a8546f4b0a201ae7272827ac9ab5ad8a9e | 126,321 |
import re
def get_number_in_a_string(
string: str
) -> int:
"""
Return the number (int) present in a string.
Parameters
----------
string : str
String containing a number.
Returns
-------
int
Number present in the string.
"""
return list(map(int, re.findall(r'\d+', string)))[0] | 7eddb33dec36b2057192371fa0c59bc873d26c63 | 126,323 |
from functools import reduce
def uniquify(iterable):
""" Uniquify Iterable
>>> uniquify(([1, 1, 2, 3, 3, 3, 4, 5, 5]))
[1, 2, 3, 4, 5]
"""
return reduce(lambda l, x: l if x in l else l + [x], iterable, []) | b1e8d06e9cbcc87d17c540f4baac94c95834d104 | 126,324 |
import yaml
def yaml_to_dict(yaml_file):
"""Convert YAML file into a dictionary
Arguments
yaml_file (str) YAML file
returns (dict)
"""
with open(yaml_file, "rt") as file_obj:
dict_obj = yaml.safe_load(file_obj.read())
return dict_obj | ee974c5f9e1794fa9a2e6d23188f812744b3bf9e | 126,330 |
def count_holidays(filename):
"""Returns the number of holidays listed inside
the given csv file."""
with open(filename) as f:
return len(f.readlines()) - 1 | 721282e8f23a556155b66e9f6767757e4e01f93a | 126,333 |
def _to_latex(string):
"""Latex-decorate a string."""
return ('$' + string + '$') | fc03b93086393c772aa932a37560f472d0624c6f | 126,334 |
def validate_dialog(dialog, max_turns):
""" Check if the dialog consists of multiple turns with equal or
less than max_turns by two users without truncated tweets.
Args:
dialog: target dialog
max_turns: upper bound of #turns per dialog
Return:
True if the conditions are all satisfied
False, otherwise
"""
if len(dialog) > max_turns:
return False
# skip dialogs including truncated tweets or more users
users = set()
for utterance in dialog:
for tid,tweet in utterance.items():
if tweet['truncated'] == True:
return False
users.add(tweet['user']['id'])
if len(users) != 2:
return False
return True | 86c7d58c7f1702d18a514cfe4ebebc60a1571d06 | 126,335 |
from typing import Dict
import pickle
def download_dict_hash_cards(path: str) -> Dict[int, int]:
"""Downloading a dictionary of form {card: hash_cards} fron file
Args:
path (str): file path
Returns:
dict: dictionary of form {card: hash_card}
"""
filehandler = open(path, 'rb')
card_dict = pickle.load(filehandler)
filehandler.close()
return card_dict | d2090ab638f72434b49ce765975fb5de58eada0e | 126,338 |
from typing import Optional
def _format_titlecase(raw_title: Optional[str]) -> str:
"""Format string in titlecase replacing underscores with spaces.
Args:
raw_title: original string title. Typically the filename stem
Returns:
str: formatted string
"""
return raw_title.replace('_', ' ').strip().title() if raw_title else '' | 5cc5433b15d9057fea9845c2fd318f5ce915cc24 | 126,339 |
from datetime import datetime
def duration(entry):
"""
Calculates the time difference between the triggered time of the event and it's updated time.
:param entry: A dict with a 'triggered' and 'updated' keys that contain datetime objects.
:type entry: Dict
:return: A human readable string representing the time difference between the two events.
:raises TypeError: In case the parameter is not a dictionary.
:raises KeyError: In case the parameter doesn't have the correct keys.
"""
if type(entry) is dict:
if "triggered" in entry:
triggered = datetime.strptime(entry["triggered"], "%d-%m-%Y %H:%M:%S")
else:
raise KeyError("No 'triggered' key in the dict!")
if "updated" in entry:
updated = datetime.strptime(entry["updated"], "%d-%m-%Y %H:%M:%S")
else:
raise KeyError("No 'updated' key in the dict!")
delta = updated - triggered
result = "{} days, {}:{}:{}".format(delta.days, delta.seconds / 3600, (delta.seconds / 60) % 60,
delta.seconds % 60)
return result
else:
raise TypeError("Parameter must be a dictionary!") | 852af5a4061b7559630fc5fd4c458d884feec23b | 126,341 |
def dictlist(dict_):
"""
Convert dict to flat list
"""
return [item for pair in dict_.items() for item in pair] | fca91c7faa9abbeff15a981e23471fca23f69687 | 126,348 |
def split_lists(original_list, max_slices):
""" Split a list into a list of small lists given the desired number of sub lists """
slices = max_slices - 1
original_list_size = len(original_list)
split_index = int(original_list_size / slices)
return [original_list[x:x + split_index] for x in range(0, len(original_list), split_index)] | 644e47c276856ef69055fd62b2eec2c70930e6a1 | 126,351 |
def invalid_request_error(e):
"""Generates a valid ELG "failure" response if the request cannot be parsed"""
return {'failure':{ 'errors': [
{ 'code':'elg.request.invalid', 'text':'Invalid request message' }
] } }, 400 | 67d17732e6b2619bf8b2e8f58fc2f15ce118069d | 126,359 |
def graph_key_from_tag(tag, entity_index):
"""Returns a key from a tag entity
Args:
tag (tag) : this is the tag selected to get the key from
entity_index (int) : this is the index of the tagged entity
Returns:
str : String representing the key for the given tagged entity.
"""
start_token = tag.get('start_token')
entity = tag.get('entities', [])[entity_index]
return str(start_token) + '-' + entity.get('key') + '-' + str(entity.get('confidence')) | 09fc4228a3b773a9caecbd510ce4cda05e892fcb | 126,360 |
def valid_eyr(eyr):
"""eyr (Expiration Year) - four digits; at least 2020 and at most 2030."""
if len(eyr) == 4 and int(eyr) >= 2020 and int(eyr) <= 2030:
return True
else:
return False | 7e63c4302123d03ee9d8bfd5e51703cd3c55ee96 | 126,362 |
def newton_raphson_solver(
target_function,
target_function_derivative,
initial_point,
maximum_relative_error=0.0000000001,
max_iterations=100,
):
"""Numerical solver based on Newton-Raphson approximation method.
The Newton-Raphson method allows algorithmic approximation for the root
of a differentiable function, given its derivative and an initial point at
which this derivative does not vanish.
Let :math:`f:\\left[a,b\\right]\\longrightarrow\\mathbb{R}` a
differentiable function, :math:`f^{\\prime}` its derivative function and
:math:`x_0\\in\\left[a,b\\right]`. A root of :math:`f` is then iteratively
approximated by the recurrence
.. math::
x_n := x_{n-1} - \\frac{f(x_{n-1})}{f^{\\prime}(x_{n-1})}, n\\geq 1.
The *relative error* associated with the :math:`n`-th iteration of the
recurrence above is defined as
.. math::
e_n := | \\frac{x_n - x_{n-1}}{x_{n-1}} |, n \\geq 1.
The approximation stops if either :math:`e_n` > `maximum_relative_error`
or :math:`n` > `max_iterations`.
"""
def _iterating_function(x):
return x - target_function(x) / target_function_derivative(x)
def _error_function(reference_point, new_point):
return abs((new_point - reference_point) / reference_point)
past_point = initial_point
iterating_point = _iterating_function(past_point)
relative_error = _error_function(past_point, iterating_point)
num_iterations = 1
while (
relative_error >= maximum_relative_error and
num_iterations < max_iterations
):
past_point, iterating_point = (
iterating_point, _iterating_function(iterating_point)
)
relative_error = _error_function(past_point, iterating_point)
num_iterations += 1
return iterating_point | fabc2170fa2e9440001722e629415e8397e4fbe7 | 126,363 |
def metrics_dict(data_list):
"""Organize calculated ECG data into a dictionary
The ECG test characteristics of concern are extracted from the metrics
list and organized into a dictionary to make the data more readable
and easier to navigate.
:param data_list: list of ECG metrics
:returns: dictionary of metrics containing the keywords: 'duration',
'voltage_extremes', 'num_beats', 'mean_hr_bpm', and 'beats'
"""
metrics_dict = {'duration': data_list[0],
'voltage_extremes': data_list[1],
'num_beats': data_list[2],
'mean_hr_bpm': data_list[3],
'beats': data_list[4]
}
return metrics_dict | 73b13a2c244d3f82aaafe44cf3a0bc2414bf1e91 | 126,365 |
def print_square(num: int = 20) -> str:
"""Output the square of a number."""
result = num ** 2
return f"The square of {num:.2} is {result:.2}." | 29aefdbedcd8c05a71a1b72beae48ed9ce0c6954 | 126,367 |
def _stringify(**parameters):
"""Converts query parameters to a query string."""
return "&".join([f"{k}={v}" for k, v in parameters.items() if v is not None]) | 520a06e52c3876c4d2a32234cfecdfce37fb6f05 | 126,371 |
def calculate_iou(ground_truth, prediction):
"""Calculate the IoU of a single predicted ground truth box."""
x1_gt, x2_gt, y1_gt, y2_gt = ground_truth
x1_p, x2_p, y1_p, y2_p = prediction
if x1_p > x2_p or y1_p > y2_p:
raise AssertionError("Prediction box is malformed? {}".format(prediction))
if x1_gt > x2_gt or y1_gt > y2_gt:
raise AssertionError("Ground truth box is malformed? {}".format(ground_truth))
if x2_gt < x1_p or x2_p < x1_gt or y2_gt < y1_p or y2_p < y1_gt:
return 0.0
inter_x1 = max([x1_gt, x1_p])
inter_x2 = min([x2_gt, x2_p])
inter_y1 = max([y1_gt, y1_p])
inter_y2 = min([y2_gt, y2_p])
inter_area = (inter_x2 - inter_x1 + 1) * (inter_y2 - inter_y1 + 1)
gt_area = (x2_gt - x1_gt + 1) * (y2_gt - y1_gt + 1)
pred_area = (x2_p - x1_p + 1) * (y2_p - y1_p + 1)
return float(inter_area) / (gt_area + pred_area - inter_area) | 0ec3155cafa7b023bf9c3bffe7e87ef4d1bbc95c | 126,375 |
def HttpRequestToString(req, include_data=True):
"""Converts a urllib2.Request to a string.
Args:
req: urllib2.Request
Returns:
Multi-line string representing the request.
"""
headers = ""
for header in req.header_items():
headers += "%s: %s\n" % (header[0], header[1])
template = ("%(method)s %(selector)s %(type)s/1.1\n"
"Host: %(host)s\n"
"%(headers)s")
if include_data:
template = template + "\n%(data)s"
return template % {
'method': req.get_method(),
'selector': req.get_selector(),
'type': req.get_type().upper(),
'host': req.get_host(),
'headers': headers,
'data': req.get_data(),
} | e1e1dda999c5e3e665d5e7e7fe4179cc0d53e659 | 126,378 |
def take(n, collection):
"""Returns at most n items from the collection in a list
>>> take(4, range(100000, 1000000, 4))
[100000, 100004, 100008, 100012]
>>> take(10, ['hello', 'world'])
['hello', 'world']
"""
return [item for item, _ in zip(collection, range(n))] | b907e75285779a4d10cdd9ffa595de774bb97a6d | 126,380 |
import random
def choose_idx(arr):
"""Choose an index of the given array arr using its entries as weights.
"""
init = random.uniform(0, sum(arr))
upper = 0
for idx, a in enumerate(arr):
upper += a
if upper > init:
return idx
raise IndexError('Failed to generate valid index for array') | efac1618f8dcc9b1b55045d0b674933d9516e843 | 126,383 |
import uuid
def get(name):
"""
Returns a unique experiment ID.
:param name: [string] Prefix of the experiment.
"""
return name + '_' + str(uuid.uuid1()) | ebe2c3b0201927bce85ee8ad298421591398e1a6 | 126,384 |
def text2binary(string):
"""
Converts text to binary string.
>>> text = 'Hello'
>>> binary_text = text2binary(text)
>>> print(binary_text)
'10010001100101110110011011001101111'
"""
# creates a list of binary representation of each character
# and joins the list to create full binary string
output = ''.join('{0:08b}'.format(ord(x), 'b') for x in string)
return output | 707a1498d0bfa065d6e3f5c2fe231cdadaacc28e | 126,385 |
def _gen_offsets(F, dcn_kernel=3, dcn_pad=1):
""" Generate offset for deformable convolutions.
:param dcn_kernel: the kernel of deformable convolutions
:param dcn_pad: the padding of deformable convolutions
:return: the offsets
"""
dcn_base = F.arange(-dcn_pad, dcn_pad + 1)
dcn_base_y = F.repeat(dcn_base, dcn_kernel)
dcn_base_x = F.tile(dcn_base, dcn_kernel)
dcn_base_offset = F.reshape(F.stack(dcn_base_y, dcn_base_x, axis=1), (1, -1, 1, 1))
return dcn_base_offset | 285dd99aad2ebe11bed7bab42bbe7b727b8a8b00 | 126,388 |
import yaml
def get_creation_options(config: str, driver: str):
"""
Gets a list of options for a specific format or returns None.
:param config: The configuration for a datasource.
:param driver: The file format to look for specific creation options.
:return: A tuple of None or the first value is list of warp creation options, and
the second value is a list of translate create options."""
if config:
conf = yaml.safe_load(config) or dict()
params = conf.get("formats", {}).get(driver, {})
return params.get("warp_params"), params.get("translate_params")
return None, None | 9b4c31f13850ce22b91e3d0a8634eea97c505f9d | 126,394 |
def urljoin(*args):
"""
Joins given arguments into an url. Trailing but not leading slashes are
stripped for each argument.
"""
url = "/".join(map(lambda x: str(x).rstrip('/'), args))
if '/' in args[-1]:
url += '/'
if '/' not in args[0]:
url = '/' + url
url = url.replace('//', '/')
return url | 0b4c4d08782d5918409f95d82a5e8b0d84c8ef29 | 126,398 |
def factorial_loop(integer) -> int:
"""This function calculate factorial using for loop."""
result = 1
for i in range(1, integer + 1):
result *= i
return result | f2873828530c938999deca0ca7231830686c02fa | 126,399 |
def _bencode_bytes(value, encoding='utf-8'):
""" Encode a bytestring (strings as UTF-8), eg 'hello' -> 5:hello """
if isinstance(value, str):
value = value.encode(encoding)
return str(len(value)).encode(encoding) + b':' + value | ee676971431074eaea3c313f12064ed324cae346 | 126,400 |
def _non_projective(u, v, w, x):
""" Checks if an edge pair is non-projective """
mnu = min(u, v)
mxu = max(u, v)
mnw = min(w, x)
mxw = max(w, x)
if mnu < mnw:
return (mxu < mxw) and (mxu > mnw)
elif mxu > mxw:
return (mnu > mnw) and (mnu < mxw) | 90dff04e127cf26ff37c7948e27c86ddbdfa3d37 | 126,401 |
def field_identifier(field):
"""
Given a ``field`` of the format {'name': NAME, 'type': TYPE},
this function converts it to ``TYPE NAME``
"""
return "{0} {1}".format(field["type"], field["name"]) | d5af3e8fd4d981a98e3dd91e6f0e5b8b786e64fe | 126,402 |
def is_lower_case_letter(string):
"""Function to test whether a string is a single lower-case letter"""
if not string or string is None: return False
if len(string) == 1 and string.islower(): return True
return False | de25add98108813f3004ce3833dfc106be293698 | 126,403 |
def create_huc_ids_list(np_array_huc_ids):
"""
Create a Python list of HUC_IDs from the SuperPRZM output numpy array of HUC_IDs
:param np_array_huc_ids: numpy array, [['0', '5', '0', '0', '1', '1', '0', '5', '4', '2', '1', '1'], [...]]
:return: list
"""
out_list = []
for char_array in np_array_huc_ids:
out_list.append("".join(list(char_array)))
return out_list | dacd186bf53b91421cb8041a3ed1d89e2b01a2b1 | 126,405 |
import yaml
def create_hparams(hparams_string=None, verbose=False):
"""Create model hyperparameters. Parse nondefault from given string."""
with open('config.yaml') as f:
hparams = yaml.safe_load(f)
return hparams | 993725776e34672968d16549771e96ee04034618 | 126,408 |
def get_song_info(soup):
"""
Retrieve the song name (english / japanese) and anime name
:param BeautifulSoup soup: BeautifulSoup4 object of lyrics url
:rtype [str, str]
:return Tuple of song and anime name
"""
crumbs = soup.find("ul", {"id": "crumbs"})
crumbs_list = crumbs.find_all("li")
song_name = [name.strip() for name in crumbs_list[-1].get_text().split("-")]
anime_name = crumbs_list[-2].get_text()
return (song_name, anime_name) | 964afdb262b7b3190c437c71dbd8b62c8b27914f | 126,415 |
def _length_checker(length: int, content: str) -> str:
"""Helper function to check if a string is shorter than expected length of not.
Args:
length: Maximum length of an expected string.
content: A string to be validated.
Returns:
A string of error message if validation fails, or an empty string if validation succeeds.
"""
if len(content) > length:
return 'Invalid label: %s has %s characters. The maximum is %s.\n' % (
content,
len(content),
length,
)
else:
return '' | 093504911d31afa2e458ca0559f5da58ac31b910 | 126,421 |
def _convert_schedule_to_task_rng(schedule):
"""Convert the schedule to dict{task: starting time range}"""
task_start_rng = dict()
for record in schedule:
# parse schedule entry
task = record[4]
t_start = record[5] # process start time
task_start_rng[task] = (t_start, t_start+1)
return task_start_rng | 908abdd503d3713b16a45762684c0347c29a1add | 126,425 |
def _escape_js_template_tags(s):
"""
Jinja Filter to escape javascript template variables.
"""
return '{{ ' + str(s) + ' }}' | 576e61e308996a55080f3af717c7ad84df159bbf | 126,428 |
def parse_direction(direction):
"""
Use this to standardize parsing the traffic direction strings.
:param direction: str; The direction value to parse.
:return: Optional[str]; One of 'ingress', 'egress', or 'both'. Returns None if it could not parse the value.
"""
direction = direction.lower()
if direction in {'ingress', 'incoming', 'inbound', 'in', 'i'}:
return 'ingress'
if direction in {'egress', 'outgoing', 'outbound', 'out', 'o'}:
return 'egress'
if direction in {'both', 'b', 'all', 'a'}:
return 'both'
return None | 513274ba3be44f266099c1561c3719b37053c74d | 126,430 |
import functools
def _fix(param_name, fixture, func):
"""
Decorates `func` to inject the `fixture` callable result as `param_name`.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
kwargs[param_name] = fixture()
return func(*args, **kwargs)
return wrapper | 36d7ffa9c1b531866a75fda874414aead741f1a8 | 126,432 |
def oneline(string: str) -> str:
"""Helper method that transform multiline string to one line for grepable output"""
return " ".join(line.strip() for line in string.splitlines()) | fe68b1dd47c034d17a3835d140c03a207a713b15 | 126,433 |
import re
def snake_case(identifier: str) -> str:
"""Make a JavaScript identifier into a Python one
Parameters
----------
identifier: str
Javascript identifier in CapitalSnakeCase or lowerSnakeCase
Returns
-------
pythid: str
Python-standard identifier in snake_case
"""
pythid = re.sub(r"[A-Z]", lambda x: "_" + x.group(0).lower(), identifier)
if pythid[0] == "_":
pythid = pythid[1:]
return pythid | 28978c246560a9cdb484f430e4e271b665159491 | 126,435 |
def find_groups(context, node):
"""
Returns a list of all groups that contain the node.
"""
groups = []
for group in context.modeling.instance.groups.itervalues():
if node.id in group.member_node_ids:
groups.append(group)
return groups | 4a56ef2088e6a9fa2debad0b77ddb03bce3cb070 | 126,436 |
from typing import List
from typing import Any
import fnmatch
def is_list_element_fnmatching(l_elements: List[Any], s_fnmatch_searchpattern: str) -> bool:
"""True if at least one element is matching the searchpattern
>>> is_list_element_fnmatching([], 'bc')
False
>>> is_list_element_fnmatching(['abcd', 'def', 1, None], '*bc*')
True
>>> is_list_element_fnmatching(['abcd', 'def', 1, None], '*1*')
False
"""
if (not l_elements) or (l_elements is None):
return False
b_ls_fnmatching_searchstring = False
for s_element in l_elements:
if isinstance(s_element, str):
if fnmatch.fnmatch(s_element, s_fnmatch_searchpattern):
b_ls_fnmatching_searchstring = True
break
return b_ls_fnmatching_searchstring | fd4cd89805eefd23fa0c702be2e7b8c5f75351f2 | 126,438 |
def src_from_cell_col(cell_name_col, verbose=False):
"""
Takes column that contains sample names, extract the source name,
and returns the arr of source names. Prints value_counts if verbose=True.
"""
src_names_arr = cell_name_col.map(lambda x: x.split('.')[0].lower())
src_names_arr.name = 'source'
if verbose:
print(src_names_arr.value_counts())
return src_names_arr | 7b479587ff051f0d5d202d2a2e78abb871e538cc | 126,443 |
def get_tool_info(whitelist, tool_name):
"""
Search the whitelist by tool name.
:returns: A dictionary , or none if no tool matching that name was found.
:rtype: dict or None
"""
for category, category_tools in whitelist.items():
for tool_info in category_tools:
if tool_info.get("name") == tool_name:
tool_info.update({"category": category})
return tool_info
return None | 7853492e980990d8e9b3bebd0dbf9e207a1d3a58 | 126,445 |
def rivers_with_stations(stations):
"""Given a list of MonitoringStation objects, returns a sorted list of names
of rivers with at least 1 monitoring station"""
ans = set([])
for station in stations:
ans.add(station.river)
ans = sorted(ans)
return ans | b0b24f4a1834e1185e685d46e899e00be76d8fac | 126,448 |
def make_set_from_list(word_list):
"""
Make a set of unic element from a list
:type word_list: list(str)
:param word_list: The list of word to transform into a set
"""
lower_list = list()
for word in word_list:
lower_list.append(word.lower())
return set(lower_list) | 9ddfe8f9e9a761f6a95acf734e47599628a17596 | 126,450 |
def iso_format(dt):
"""
Converts a python datetime to an ISO-8601 standard string.
The Octopus Energy does not accept a string with UTC represented as +00:00, therefore this method replaces
the "+00:00" for UTC with the "Z" syntax. e.g:
datetime.datetime(2021, 1, 1, 23, 30, 0, tzinfo=pytz.utc) => "2021-01-01T23:30:00+00:00" => "2021-01-01T23:30:00Z"
:param datetime.datetime dt: The datetime to format.
:returns: An ISO-8601 formatted date string
:rtype: str
"""
return dt.isoformat().replace('+00:00', 'Z') | da34e8a510618febc5d6229fedd94cc26688c957 | 126,459 |
import bisect
def get_index_before_and_after(pos, position_list):
"""
Finds the index of the positions before and after the given position
Parameters
----------
pos : int
position_list : list of int
Returns
-------
(int, int)
"""
index1 = bisect.bisect_left(position_list, pos)
index0 = index1-1
return index0, index1 | 5b6b0e439c25a2a3d736f5cb5913356cfc986376 | 126,460 |
def cast_no_data_value(no_data_value, dtype):
"""Handles casting nodata values to correct type"""
int_types = ['uint8', 'uint16', 'int16', 'uint32', 'int32']
if dtype in int_types:
return int(no_data_value)
else:
return float(no_data_value) | e5d4fab57ca00b154990890aafee5ebc8290207d | 126,464 |
def combine_names(msg_name, new_name):
"""combine msg-name with a new name."""
if msg_name:
return "{0}_{1}".format(msg_name, new_name)
else:
return new_name | 908fa530021b4c17541d97571ede1aa1b546243b | 126,466 |
def duty_compare(duty, top):
""" Returns the compare value for a given duty cycle and top value.
Duty is in % """
return int(duty * top / 100) | e2ebcbb27b28442a08ff3ad9b8166c9411a59a91 | 126,467 |
def follow_path(game_data, ship_name):
"""
Make a ship follow a path, found by the path finding.
Parameters
----------
game_data: data of the game (dic).
ship_name: name of the space ship (str).
Return
------
order: the order to do in order to follow the path <none|left|right|faster|slower>(str).
Version
-------
Specification: Nicolas Van Bossuyt (v1. 28/04/17)
Implementation: Nicolas Van Bossuyt (v1. 28/04/17)
"""
ship = game_data['ships'][ship_name]
if len(ship['objective_path']) > 0:
to_do_node = ship['objective_path'].pop(0)
return to_do_node['to_do']
return 'none' | 7f931760bde4987236472a90cc075c160d24e2a4 | 126,471 |
def index_on_length(patterns: list[str]) -> dict[int, list[str]]:
"""
>>> index_on_length(['abcd', 'cde', 'agd'])
{4: ['abcd'], 3: ['cde', 'agd']}
"""
length_index = {}
for pattern in patterns:
length_index.setdefault(len(pattern), []).append(pattern)
return length_index | e375c089298cb0caa5ad269bebfbad23dc6b2bea | 126,474 |
def cmpid(a, b):
"""Compare two objects by their Python id."""
if id(a) > id(b): return 1
if id(a) < id(b): return -1
return 0 | f698d8e757418d9a2a6baa6596d6643b02e2a336 | 126,475 |
from typing import List
def get_header_from_scope(
scope: dict, header_name: str
) -> List[str]:
"""Retrieve a HTTP header value from the ASGI scope.
Returns:
A list with a single string with the header value if it exists, else an empty list.
"""
return [value for (key,value) in scope['headers'] if key == header_name] | eb5468f3685bd5ddc37cb2f1e1006e4e99a25e60 | 126,477 |
import socket
from contextlib import closing
def is_port_free(port, host="localhost"):
"""Checks if port is open on host"""
#based on http://stackoverflow.com/a/35370008/952600
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
if sock.connect_ex((host, port)) == 0:
return False
else:
return True | 4e30ce9ef103eff56b0a0aa876d0e35d74ca4836 | 126,483 |
def IsLoopEnter(op):
"""Returns true if `op` is an Enter."""
return op.type == "Enter" or op.type == "RefEnter" | 1bca9bf107a10b26882544d38697c3d8c6867238 | 126,484 |
def getThrusterFiringIntervalBefore(tfIndices, minDuration = 10):
"""Returns range of points between last two thruster firings in input
See getThrusterFiringIntervalAfter() for more details. This
function does the same job, but returns the last two indices
in the array meeting the criteria.
"""
nTfi = len(tfIndices)
if nTfi == 0:
return None, None
if nTfi == 1:
return 0, tfIndices[0]
i = len(tfIndices)-1
while i > 1:
if tfIndices[i-1] + minDuration < tfIndices[i]:
return tfIndices[i-1], tfIndices[i]
i -= 1
return None, None | 82c6da8282ac0eaa6bd5d3647b7698b922b1e827 | 126,489 |
def int_to_bin(num_int, length):
"""convert int to binary string of given length"""
num_str = bin(num_int)[2:]
str_len = len(num_str)
if length <= str_len:
return num_str[str_len - length : str_len]
else:
return num_str.zfill(length) | cc8873f6f3158cc0aa1f0bfbd91a4890dccb79df | 126,492 |
def bfs_shortest_path(graph, start, goal):
"""Finds shortest path between 2 nodes in search space using BFS
Args:
graph (dict): Search space represented by a graph
start (str): Starting state
goal (str): Goal state
Returns:
new_path (list): List of the states that bring you from the start to
the goal state, in the quickest way possible
"""
# keep track of explored nodes
explored = [ ]
# keep track of all the paths to be checked
queue = [start]
# return path if start is goal
if start == goal:
return "That was easy! Start == goal"
# keep looping until all possible paths have been checked
while queue:
# pop the first path from the queue
path = queue.pop(0)
# get the last node from the path
node = path[-1]
if node not in explored:
# get neighbours if node is present, otherwise default to empty
# list
neighbours = graph.get(node, [ ])
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
new_path = list(path)
new_path.append(neighbour)
queue.append(new_path)
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.append(node)
# in case there's no path between the 2 nodes
return "So sorry, but a connecting path doesn't exist :(" | 4d1f99e0e8f51ad2082ba127cca65f2cc1f6f579 | 126,495 |
def _fetch_none(_, __):
"""Fetch no rows and return `None`."""
return None | 95e786f4473e10db0bfaf5af7fcae7b904b456af | 126,498 |
def ewma_generator(dataframe_name, fast = 12, slow = 26):
"""Creates EWMA values
Args:
dataframe_name (df): Single security dataframe containing at least a 'close' column
fast (int): Desired timeframe window used for fast exponential moving averages
slow (int): Desired timeframe window used for slow exponential moving averages
Returns:
A dataframe of:
original data passed to function,
ewma_fast (flt): Column of values for fast average
ewma_slow (flt): Column of values for slow average
Tip:
fast = 5 and slow = 35 will provide greater sensitivity
"""
# Calculate fast and slow ewma
# Thank you Camden Kirkland - https://www.youtube.com/watch?v=-o7ByZc0UN8
dataframe_name['ewma_fast'] = dataframe_name['close'].ewm(span=fast).mean()
dataframe_name['ewma_slow'] = dataframe_name['close'].ewm(span=slow).mean()
return dataframe_name | ec45d5cfc73d8903a43effcbc5583828dadbd8a3 | 126,499 |
def breakpoint_A_for_period(binary_occupancy):
"""Find breakpoint A.
Breakpoint A is the first timestep where the detector has a static car on
it that stays there until the queue discharges (we see if the detector is
continuously occupied for 4 seconds)"""
continuously_occupied = (binary_occupancy[::-1]
.rolling(4)
.agg(lambda x: x.all())[::-1]
.fillna(0))
bkpt = continuously_occupied[continuously_occupied == 1]
try:
bkpt = bkpt.index[0]
except IndexError: # no breakpoints, the detector is not continuously occupied
return None
return bkpt | f8cc6604a0f078b4d8cc732d259c1480535562c1 | 126,506 |
import typing
import pathlib
def get_pkgs_from_requirement(requirement: typing.Union[str, pathlib.Path]) -> typing.List[str]:
"""Get list of packages in the requirement file"""
with open(requirement, mode='r') as requirement_file:
packages = requirement_file.readlines()
return packages | 03ed680d758b7b3543a9f2d251f5acb87b477e06 | 126,510 |
from collections import Counter
from typing import List
def check_magazine(magazine: List[str], note: List[str]) -> str:
"""
>>> check_magazine(['give', 'me', 'one', 'grand', 'today', 'night'],
... ['give', 'one', 'grand', 'today'])
'Yes'
>>> check_magazine(['two', 'times', 'three', 'is', 'not', 'four'],
... ['two', 'times', 'two', 'is', 'four'])
'No'
>>> check_magazine(['apgo', 'clm', 'w', 'lxkvg', 'mwz', 'elo', 'bg', 'elo',
... 'lxkvg', 'elo', 'apgo', 'apgo', 'w', 'elo', 'bg'],
... ['elo', 'lxkvg', 'bg', 'mwz', 'clm', 'w'])
'Yes'
"""
# count_magazine = Counter(magazine)
# count_magazine.subtract(Counter(note))
# ans = all(val >= 0 for val in count_magazine.values())
ans = not (Counter(note) - Counter(magazine))
return "Yes" if ans else "No" | 8fb83e559ca3ea262cefc37f50ed237934971237 | 126,511 |
import csv
def read_vote_data(filename):
"""
Reads a CSV file of data on the votes that were taken.
"""
f = open(filename, encoding='utf-8')
csv_reader = csv.reader(f)
votes = []
for row in csv_reader:
vote = {}
vote['date'] = row[0]
vote['number'] = row[3]
vote['motion'] = row[4]
vote['name'] = row[6]
vote['result'] = row[5]
votes.append(vote)
f.close()
return votes | 28c4e3d771298c45b169a9436205da69d62cb827 | 126,515 |
def get_A2_hom(s):
"""
Builds A2 for the spatial error GM estimation with homoscedasticity as in
Anselin (2011) [Anselin2011]_
.. math::
A_2 = \dfrac{(W + W')}{2}
...
Parameters
----------
s : csr_matrix
PySAL W object converted into Scipy sparse matrix
Returns
-------
Implicit : csr_matrix
A2 matrix in scipy sparse format
"""
return (s + s.T) / 2. | 078c1d390d5ed864fe13e7599deaac7c0583096d | 126,519 |
def dict2list(thedict, parmnames, default=None):
"""
Convert the values in thedict for the given list of parmnames to a list of values.
"""
return [thedict.get(n, default) for n in parmnames] | 3f22eb70629bdf3cab793bff637261324c21355e | 126,521 |
def is_unique(df, cols, ignore_null=False):
"""Fast determination of multi-column uniqueness."""
if ignore_null:
df.dropna(subset=cols, inplace=True)
return not (df.duplicated(subset=cols)).any() | 3d2a0c71aa06bd69b4ce0d79f8c7d0754b666984 | 126,524 |
import json
def load_json_fixture(path):
""" Return the Python representation of the JSON fixture stored in path.
:param path: Local path to JSON fixture file.
:type: str
:return: Python representation of JSON content.
:rtype: object
"""
with open(path, 'r') as f:
return json.load(f) | 13a56e01c149e7838ee3aa0008d9f3681e23f80c | 126,526 |
import torch
def order_parameter_cos(x):
"""
The order parameter calculated based on the cosine formula in the paper.
:param x: the state of the oscillators
:return: The r value
"""
n = x.shape[-1]
diff = torch.cos(x.unsqueeze(-1) - x.unsqueeze(-2))
sum_diff = (diff).sum(-1).sum(-1)
r = (1 / n) * (sum_diff ** (1 / 2))
return r | 93fc1875da3e0a1484056290aed1d99bce1d8b01 | 126,531 |
def _get_shape(x):
""" Get the shape of a Tensor. """
return x.get_shape().as_list() | 9586ec7f7ea6e26b146ab0bc3f71c571b8b6da80 | 126,535 |
def add_key_value(contents, key, start_line, new_value):
"""
Adds a new key to contents with the given value after line start_line, returning
the result. Also adds a blank line afterwards.
Does not modify the value of contents.
"""
new_contents = contents[:start_line]
new_contents.append("%s: %s" % (key, new_value))
new_contents.append("")
new_contents.extend(contents[start_line:])
return new_contents | e4dd7b5081f5364dc102da1d2f010319472480c3 | 126,537 |
def spol(g1, g2):
"""
Return the S-Polynomial of ``g_1`` and ``g_2``.
Let `a_i t_i` be `LT(g_i)`, `b_i = a/a_i` with `a = LCM(a_i,a_j)`,
and `s_i = t/t_i` with `t = LCM(t_i,t_j)`. Then the S-Polynomial
is defined as: `b_1s_1g_1 - b_2s_2g_2`.
INPUT:
- ``g1`` -- polynomial
- ``g2`` -- polynomial
EXAMPLES::
sage: from sage.rings.polynomial.toy_d_basis import spol
sage: P.<x, y, z> = PolynomialRing(IntegerRing(), 3, order='lex')
sage: f = x^2 - 1
sage: g = 2*x*y - z
sage: spol(f,g)
x*z - 2*y
"""
a1, a2 = g1.lc(), g2.lc()
a = a1.lcm(a2)
b1, b2 = a // a1, a // a2
t1, t2 = g1.lm(), g2.lm()
t = t1.parent().monomial_lcm(t1, t2)
s1, s2 = t // t1, t // t2
return b1 * s1 * g1 - b2 * s2 * g2 | 75d808d2cf22f64452bc83762a779c99587c8311 | 126,539 |
def old_hindu_lunar_date(year, month, leap, day):
"""Return an Old Hindu lunar date data structure."""
return [year, month, leap, day] | 39e55d87fdd2b1beed3ed7e4bb42a74d46a649fe | 126,541 |
def escape_latex(s):
""" Escape characters that are special in latex
http://stackoverflow.com/a/16264094/2570866"""
CHARS = {
'&': r'\&',
'%': r'\%',
'$': r'\$',
'#': r'\#',
'_': r'\_',
'{': r'\{',
'}': r'\}',
'~': r'\~',
'^': r'\^',
'\\': r'\\\\',
'\n': r'\\', # try to preserve newlines
}
return ("".join([CHARS.get(char, char) for char in s])) | 353f9c6d78be0d78f87eff03f7d79fc228b19cbb | 126,544 |
def formatError(error):
"""
Format an error as a string. Write the error type as prefix.
Eg. "[ValueError] invalid value".
"""
return "[%s] %s" % (error.__class__.__name__, error) | d37d19ab544e3c19fcfd59ce97732be9e6ba6efd | 126,545 |
def _sort_set_none_first(set_):
"""Sort a set, sorting ``None`` before other elements, if present.
"""
if None in set_:
set_.remove(None)
out = [None]
out.extend(sorted(set_))
set_.add(None)
return out
else:
return sorted(set_) | 7d83db3a5a6a36e9c4f5f0cb9641fa58e5d06f01 | 126,546 |
def hr_uptime(uptime):
"""
Human readable uptime (in days/hours/minutes/seconds)
Notes:
http://unix.stackexchange.com/a/27014
Args:
uptime (int): uptime value
Returns:
string: human readable string
"""
#get values
days = uptime / 60 / 60 / 24
hours = uptime / 60 / 60 % 24
minutes = uptime / 60 % 60
return u'%dd %dh %dm' % (days, hours, minutes) | fed9dddbe71cd40db8cb7ce1777261b6391701d4 | 126,550 |
import base64
import json
def encode_additional_data(additional_data):
"""Encodes additional_data object"""
if isinstance(additional_data, dict):
return base64.b64encode(json.dumps(additional_data).encode()).decode('utf-8')
raise ValueError("additional_data must be of type dict") | a053efb12b2e8be5f1ebc01293ee925d6217c50e | 126,552 |
import json
def print_json(analysis: dict):
"""
Converts the any analysis dictionary into prettified JSON output
"""
return json.dumps(
analysis,
indent=4,
sort_keys=True
) | a816ef708a51cf4d18f3ae583debe3e47ccfbe14 | 126,553 |
def _convert_grad_position_type(grad_position):
"""Check and convert the type and size of grad position index."""
if isinstance(grad_position, tuple):
for gp in grad_position:
if not isinstance(gp, int):
raise TypeError(f"For 'F.grad', the element in 'grad_position' should be int, "
f"but got {type(gp).__name__}")
if gp < 0:
raise ValueError("The element in grad_position must be >= 0.")
elif isinstance(grad_position, int):
if grad_position < 0:
raise ValueError("grad_position must be >= 0.")
grad_position = (grad_position,)
else:
raise TypeError(f"For 'F.grad', the 'grad_position' should be int or tuple, "
f"but got {type(grad_position).__name__}")
return grad_position | bc8992732769851840233c8979907949a13b38ea | 126,556 |
def compare(date_1, date_2):
"""Compare two dates.
Args:
date_1 (Date): The first date.
date_2 (Date): The second date.
Returns:
int: 0 if date_1 and date_2 are equal, -1 If date_2 is greater
than date_1, 1 If date_1 is greater than date_2.
"""
ret_val = 1
if date_1 == date_2:
ret_val = 0
elif date_1 < date_2:
ret_val = -1
return ret_val | 383cf97ab94ef9b587de8e8ee5749cbdb306372f | 126,557 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.