content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import math
def deg2rad(ang_deg):
"""
Converts degrees to radians
"""
return math.pi*ang_deg/180.0 | 190dbd8bfccba8ffe206ef5bb089145026bb9f07 | 103,596 |
from typing import Union
from typing import List
def scope_to_list(scope: Union[str, List]) -> List:
"""Convert a token scope to a list if necessary.
A scope in a theme should always be a string or list,
but just in case return an empty list if not
:param scope: The scope
:returns: Scope as list
"""
if isinstance(scope, list):
return scope
if isinstance(scope, str):
return [scope]
return [] | 41da6f9d70da060074e2bf58ad3782563b897949 | 103,597 |
def _num_extreme_words(words, extreme_words, average=True):
""" Count the number of common words
Inputs:
words (list of string): to be checked
extreme_words (set of string or dict[string] -> float): common words set
Returns:
tuple or list of int: # of extreme words in each extreme polars
"""
if not isinstance(extreme_words, (dict, set)):
raise Exception('[ERROR] common/rare word list should be set!')
elif isinstance(extreme_words, list):
extreme_words = set(extreme_words)
if not len(extreme_words) > 0:
raise Exception('[ERROR] no words found!!')
res = 0
for word in words:
if word in extreme_words:
res += 1
if average:
res /= len(extreme_words)
return res | 7e77fc34ca9908fdd1f11365b6844c096a142812 | 103,598 |
def issubclass(C, B): # real signature unknown; restored from __doc__
"""
issubclass(C, B) -> bool
Return whether class C is a subclass (i.e., a derived class) of class B.
When using a tuple as the second argument issubclass(X, (A, B, ...)),
is a shortcut for issubclass(X, A) or issubclass(X, B) or ... (etc.).
"""
return False | 0bd2ac3cdc9e21e713238dea61fe4c2f54ce7455 | 103,599 |
def parse_pointer(pointer):
""" Return the 3 bytes that make up the pointer in reversed order (little endian) """
str_repr = str(hex(pointer)).split("x")[1]
assert (
len(str_repr) == 6
), f"New location should only be 3 bytes. Was actually {str_repr}"
return int(str_repr[-2:], 16), int(str_repr[2:4], 16), int(str_repr[:2], 16) | 9fa5552f5bc47017c13057ff10a64bcc1a660735 | 103,601 |
def check_cut_condition(n, demands_across_cuts, capacities):
"""
Checks whether the cut condition is satisfied for the given demands across cuts and capacities.
:param n: ring size
:param demands_across_cuts: SymmetricMatrix containing demands across cuts
:param capacities: np.array containing capacities
:return: whether the cut condition is satisfied
"""
for i in range(0, n - 1):
for j in range(i + 1, n):
if demands_across_cuts[i, j] > capacities[i] + capacities[j]:
print(f"Cut condition violated for cut {(i, j)}!")
return False
return True | 67507601aaea8d00a5515bbda0a5e8f546db72fe | 103,603 |
from typing import Optional
def should_execute_now(step: int, step_every: Optional[int]) -> bool:
"""Returns true every step_every steps."""
if not step_every:
return False
return step % step_every == 0 | 26923fa512fcedc170772f7dad1028055084df4f | 103,604 |
def get_all_senders(df):
"""
Returns all unique sender names
"""
return df.sender.values | 0c44d4c362d34a6e40c77f7398b6dc1722583abe | 103,605 |
def _create_from_bytes(data, protobuf_klass):
"""Return a deserialized protobuf class from the passed in bytes data
and the protobuf class.
Args:
data (bytes): The serialized protobuf.
protobuf_klass (callable): The protobuf class. Either PolicyList or
RolesList.
Returns:
Either RolesList or PolicyList
"""
protobuf_instance = protobuf_klass()
protobuf_instance.ParseFromString(data)
return protobuf_instance | 28b6031a2630553b5f7ea6c6fb22ff53c69187cc | 103,606 |
def docfile(request):
"""Return all documentation files with Python code to be tested."""
return request.param | 09eef9dfce715b7415a8fd605ccb9d68c8c672e1 | 103,608 |
import re
def parse_size(size, binary=False):
"""
Parse a human readable data size and return the number of bytes.
Match humanfriendly.parse_size
:param size: The human readable file size to parse (a string).
:param binary: :data:`True` to use binary multiples of bytes (base-2) for
ambiguous unit symbols and names, :data:`False` to use
decimal multiples of bytes (base-10).
:returns: The corresponding size in bytes (an integer).
:raises: :exc:`InvalidSize` when the input can't be parsed.
This function knows how to parse sizes in bytes, kilobytes, megabytes,
gigabytes, terabytes and petabytes. Some examples:
>>> parse_size('42')
42
>>> parse_size('13b')
13
>>> parse_size('5 bytes')
5
>>> parse_size('1 KB')
1000
>>> parse_size('1 kilobyte')
1000
>>> parse_size('1 KiB')
1024
>>> parse_size('1 KB', binary=True)
1024
>>> parse_size('1.5 GB')
1500000000
>>> parse_size('1.5 GB', binary=True)
1610612736
"""
def tokenize(text):
tokenized_input = []
for token in re.split(r'(\d+(?:\.\d+)?)', text):
token = token.strip()
if re.match(r'\d+\.\d+', token):
tokenized_input.append(float(token))
elif token.isdigit():
tokenized_input.append(int(token))
elif token:
tokenized_input.append(token)
return tokenized_input
tokens = tokenize(str(size))
if tokens and isinstance(tokens[0], (int, float)):
disk_size_units_b = \
(('B', 'bytes'), ('KiB', 'kibibyte'), ('MiB', 'mebibyte'), ('GiB', 'gibibyte'),
('TiB', 'tebibyte'), ('PiB', 'pebibyte'))
disk_size_units_d = \
(('B', 'bytes'), ('KB', 'kilobyte'), ('MB', 'megabyte'), ('GB', 'gigabyte'),
('TB', 'terabyte'), ('PB', 'petabyte'))
disk_size_units_b = [(1024 ** i, s[0], s[1]) for i, s in enumerate(disk_size_units_b)]
k = 1024 if binary else 1000
disk_size_units_d = [(k ** i, s[0], s[1]) for i, s in enumerate(disk_size_units_d)]
disk_size_units = (disk_size_units_b + disk_size_units_d) \
if binary else (disk_size_units_d + disk_size_units_b)
# Get the normalized unit (if any) from the tokenized input.
normalized_unit = tokens[1].lower() if len(tokens) == 2 and isinstance(tokens[1], str) else ''
# If the input contains only a number, it's assumed to be the number of
# bytes. The second token can also explicitly reference the unit bytes.
if len(tokens) == 1 or normalized_unit.startswith('b'):
return int(tokens[0])
# Otherwise we expect two tokens: A number and a unit.
if normalized_unit:
# Convert plural units to singular units, for details:
# https://github.com/xolox/python-humanfriendly/issues/26
normalized_unit = normalized_unit.rstrip('s')
for k, low, high in disk_size_units:
# First we check for unambiguous symbols (KiB, MiB, GiB, etc)
# and names (kibibyte, mebibyte, gibibyte, etc) because their
# handling is always the same.
if normalized_unit in (low.lower(), high.lower()):
return int(tokens[0] * k)
# Now we will deal with ambiguous prefixes (K, M, G, etc),
# symbols (KB, MB, GB, etc) and names (kilobyte, megabyte,
# gigabyte, etc) according to the caller's preference.
if (normalized_unit in (low.lower(), high.lower()) or
normalized_unit.startswith(low.lower())):
return int(tokens[0] * k)
raise ValueError("Failed to parse size! (input {} was tokenized as {})".format(size, tokens)) | fc53c07d81df5e4fb39b5dca7c9a62c4a9a6f3b2 | 103,609 |
import re
def to_regex(regex, flags=0):
"""
Given a string, this function returns a new re.RegexObject.
Given a re.RegexObject, this function just returns the same object.
:type regex: string|re.RegexObject
:param regex: A regex or a re.RegexObject
:type flags: int
:param flags: See Python's re.compile().
:rtype: re.RegexObject
:return: The Python regex object.
"""
if regex is None:
raise TypeError('None can not be cast to re.RegexObject')
if hasattr(regex, 'match'):
return regex
return re.compile(regex, flags) | 1d2e81ce5f14fb63cc9a6d8945b381df919ccc05 | 103,610 |
def is_gcs(path):
"""
Check if path is to a google cloud storage directory or a local one. Determined from the presence of 'gs'
at the beginning of the path.
Arguments:
path (str): path to assess
Returns:
bool: True if path is on gcs and False if local
"""
if path[:2] == "gs":
return True
else:
return False | 8d29960bec296bd5a246adbf3b53bdd679f50560 | 103,611 |
def translate(x, y):
"""
Generate an SVG transform statement representing a simple translation.
"""
return "translate(%i %i)" % (x, y) | 82351dbbc0fe22b7db4141acd28d2fd40cc35162 | 103,613 |
def sphinx_doi_link(doi):
"""
Generate a string with a restructured text link to a given DOI.
Parameters
----------
doi : :class:`str`
Returns
--------
:class:`str`
String with doi link.
"""
return "`{} <https://dx.doi.org/{}>`__".format(doi, doi) | 561d1892361865786f47a9a1d3f8e18f6475da77 | 103,614 |
def faceToInt(face):
"""Transforms str-like face to int"""
if face == "A":
return 14
elif face=="K":
return 13
elif face=="Q":
return 12
elif face=="J":
return 11
elif face=="T":
return 10
else: return int(face) | 43137ca9bea239e00af158b11e8a06deec408158 | 103,620 |
def primary_function(x1, y1, x2, y2):
"""
a = (y2- y1) / (x2 -x1)
b = y1 - ax1
Return
y = ax + b
----------
a: float
b: float
"""
a = (y2 -y1) / ((x2 -x1))
b = y1 - a * x1
return [a, b] | 4b77b0d4f55d7a3365cbae3278d79908bc71cfec | 103,622 |
def mps_to_knots(mps):
"""Convert meters per second to knots."""
if mps is None:
return None
return mps / 0.51444 | ff46178b6aaf60c701b45879167285c9b6cdc803 | 103,623 |
def str2tuple(s):
"""
Returns tuple representation of product state.
Parameters
----------
s : str
Product state.
"""
return tuple( pos for pos, char in enumerate(s) if char == "1" ) | 9dfc66c382dc038bef8cd494508f7afa1b19e25d | 103,627 |
def pad_int_str(int_to_pad: int, n: int = 2) -> str:
"""Converts an integer to a string, padding with leading zeros
to get n characters. Intended for date and time formatting.
"""
base = str(int_to_pad)
for k in range(n - 1):
if int_to_pad < 10 ** (k + 1):
base = "0" + base
return base | b073efda364452570bd4f8598525c66f7518dc98 | 103,630 |
def get_most_popular(series):
"""
Gets the most repeated data entry in a pandas Series and its occurrences' count
Args:
(Series) series - Pandas Series from which the most repeated data entry is returned
Returns:
most_popular - the most common data entry in series
count - the count of occurrences of most_popular in series
"""
most_popular = series.mode()[0]
count = sum(series == most_popular)
return most_popular, count | 07a183f7cf92784c5951bd49b64502678afbd6f3 | 103,631 |
def find_largest_digit_helper(num, max_num):
"""
:param num: the number that we should pick the largest digit from.
:param max_num: current maximum number
:return: the largest number
"""
# there is no number left
if num == 0:
return max_num
else:
# pick the last number to compare
if num % 10 > max_num:
max_num = num % 10
# delete the last number and compare again
return find_largest_digit_helper(num//10, max_num)
else:
return find_largest_digit_helper(num//10, max_num) | 51bc059fdd9cb3ab73f88bf3e16f1530c669b180 | 103,632 |
import re
def parse_checker_config(config_dump):
"""
Return the parsed clang-tidy config options as a list of
(flag, default_value) tuples.
config_dump -- clang-tidy config options YAML dump.
"""
reg = re.compile(r'key:\s+(\S+)\s+value:\s+([^\n]+)')
return re.findall(reg, config_dump) | 4d95c7144bcfe981407aad302df2cd17d6c450c2 | 103,633 |
from typing import Tuple
def unit_scale(val: float) -> Tuple[float, str]:
"""Rescale value using scale units
Args:
val: input value
Returns:
tuple of rescaled value and unit
"""
if val // 1e12 > 0:
return val / 1e12, 'T'
elif val // 1e9 > 0:
return val / 1e9, 'G'
elif val // 1e6 > 0:
return val / 1e6, 'M'
elif val // 1e3 > 0:
return val / 1e3, 'k'
else:
return val, '' | cd16d9ee1d06dd275ef7553b5f47908d89039425 | 103,634 |
def string_to_number(df, column):
"""
Convert string into float
:param df: dataframe to convert
:param column: column index to convert
:return: converted dataframe
"""
# Check if first value (0) of the column (column) is a string
if isinstance(df.iloc[0, df.columns.get_loc(column)], str):
# Replace comma with an empty
df[column] = df[column].str.replace(",", "")
# Convert string into float
df[column] = df[column].astype(float)
# return converted dataframe
return df | e64d91474bd5b72c6174af5db726a56422af3917 | 103,635 |
import torch
def random_rectangles(size, num_rects, max_sidelen=20):
"""
>>> rects, masks = random_rectangles(256, 10)
>>> tuple(rects.shape)
(10, 4)
>>> tuple(masks.shape)
(10, 256, 256)
>>> bool(torch.all((0 <= rects[:, 0]) * (rects[:, 2] < 256)))
True
>>> bool(torch.all((0 <= rects[:, 1]) * (rects[:, 3] < 256)))
True
>>> bool(torch.all(rects[:, 0] < rects[:, 2]))
True
>>> bool(torch.all(rects[:, 1] < rects[:, 3]))
True
"""
# We use integer-tensor to make sure there's no issue with precision
xs, ys = torch.LongTensor(2, num_rects).random_(0, size - max_sidelen - 1)
ws, hs = torch.LongTensor(2, num_rects).random_(1, max_sidelen)
masks = torch.zeros(num_rects, size, size, dtype=torch.float32)
for mask, x, y, w, h in zip(masks, xs, ys, ws, hs):
mask[y : y + h + 1, x : x + w + 1] = 1
return torch.stack((xs, ys, xs + ws, ys + hs), dim=-1), masks | b565a21050f58636e54fde7f5b930d79d9832248 | 103,636 |
def contrast(lum1, lum2):
"""Get contrast ratio."""
return (lum1 + 0.05) / (lum2 + 0.05) if (lum1 > lum2) else (lum2 + 0.05) / (lum1 + 0.05) | 406b653fc192e5c63dc4669c21a8d1f04c7f1cfa | 103,637 |
import hashlib
def tx_compute_hash(tx):
"""
Compute the SHA256 double hash of a transaction.
Arguments:
tx (string): transaction data as an ASCII hex string
Return:
string: transaction hash as an ASCII hex string
"""
return hashlib.sha256(hashlib.sha256(bytes.fromhex(tx)).digest()).digest()[::-1].hex() | 41f31c8db656fee9469060793a4fcc165b4b1889 | 103,640 |
def dfs_iter(graph: dict, target, start=None) -> bool:
"""Iterative DFS.
Args:
graph: keys are vertices, values are lists of adjacent vertices
target: node value to search for
start: node to start search from
Examples:
>>> dfs_iter(graph, 7)
True
>>> dfs_iter(graph, 8)
False
Returns:
True if `target` found. Otherwise, False.
"""
if not graph or target not in graph or (start is not None and start not in graph):
return False
if start is None:
start = list(graph.keys())[0]
stack = [start]
visited = []
while stack:
v = stack.pop()
if v == target:
return True
if v not in visited:
visited.append(v)
for adj in graph[v]:
stack.append(adj)
return False | 491551b22f35abcd58eb277a011c49ee6090fd5a | 103,641 |
import hashlib
def generate_md5_hash(data: str):
"""Generate a md5 hash of the input str
:param data: the input str
:return: the md5 hash string
"""
return hashlib.md5(data.encode()).hexdigest() | 138bff30424f9d6a842091a1da68969fae21a6d7 | 103,643 |
from typing import Dict
from typing import List
from typing import Tuple
def count_bags(
rules: Dict[str, List[Tuple[str, int]]], bag_name: str, multiplier: int
) -> int:
"""
Count the number of bags necessarily contained in `multipler` bags of
type `bag_name` according to the `rules`.
Note that this includes the outer bags themselves!
"""
return multiplier * (
1 + sum(count_bags(rules, name, mult) for name, mult in rules[bag_name])
) | fd4c8d95e14f7cdf70517e3469adaf21ef4f2f08 | 103,645 |
def bin(s):
"""
Convert an integer into a binary string.
Included for portability, Python 3 has this function built-in.
"""
return str(s) if s<=1 else bin(s>>1) + str(s&1) | 7e90e56e2ad0a979c7583c10f616db50ae73a631 | 103,647 |
def check_if_nested(data):
"""Check whether a data structure contains lists/dicts."""
if isinstance(data, dict):
for k in data:
if isinstance(data[k], (list, dict)):
return True
elif isinstance(data, list):
for i in data:
if isinstance(i, (list, dict)):
return True
return False | 1fffc1a16d3993479a601d5f40ffae8c7f7e7b7c | 103,648 |
from typing import Dict
def acquire_labels(rules: Dict) -> Dict:
"""
Extract labels from rules.
:rules (Dict) A dictionary containing the rules
Return a dictionary containing the labels as key:value pairs
"""
labels = {}
for ruleset in rules['rules']:
for label in ruleset.get('labelSet', {}).keys():
if label not in labels:
labels[label] = ''
return labels | 5e03b0d799df9fcfbb62ed52f2303411dda3e226 | 103,649 |
def locate_candidate_recommendation(recommendation_list):
"""
Locates a candidate recommendation for workflow testing in our recommendation list.
Args:
recommendation_list (list[Recommendation]): List of possible recommendations to be our candidate.
Returns:
Recommendation: A candidate recommendation for testing. Looks for a recommendation with NEW status by
preference, then REJECTED, and then ACCEPTED. If none is found, returns None.
"""
for initial_status in ['NEW', 'REJECTED', 'ACCEPTED']:
rec_candidates = [rec for rec in recommendation_list
if rec.rule_type == 'reputation_override' and rec.workflow_.status == initial_status]
if len(rec_candidates) > 0:
return rec_candidates[0]
return None | 85ea14f4d017f2d6a3dd162d34d72d08e092864b | 103,652 |
def header_file_model_code(modelname, dyear,c_cofs,s_cofs,c_secvars,s_secvars):
"""return the code defining the WMM coefficents and model
Stored in a ConstModel, index=((2*maxdegree-m+1)*m)/2+n
typedef struct {
const float epoch;
const float Main_Field_Coeff_C[NUMCOF];
const float Main_Field_Coeff_S[NUMCOF];
const float Secular_Var_Coeff_C[NUMCOF];
const float Secular_Var_Coeff_S[NUMCOF];
} ConstModel;
Args:
modelname(str, a valid C++ name): name of the model
dyear(positive float): the year of the magnetic model, ex 2015.0
c_cofs,s_cofs,c_secvars,s_secvars(list of floats): coefficents of the model"""
head="""constexpr
#ifdef PROGMEM
PROGMEM
#endif /* PROGMEM */
ConstModel %s = {%f,\n"""%(modelname,dyear)
modeltail= '};\n\n'
cs='{'
ss='{'
csec='{'
ssec='{'
for i in range(len(c_cofs)):
cs+= repr(c_cofs[i])+','
ss+= repr(s_cofs[i])+','
csec+= repr(c_secvars[i])+','
ssec+= repr(s_secvars[i])+','
cs= cs[:-1]+'}'
ss= ss[:-1]+'}'
csec= csec[:-1]+'}'
ssec= ssec[:-1]+'}'
return head+cs+',\n'+ss+',\n'+csec+',\n'+ssec+modeltail | 24f8965a417ac0eddff403af6b4fae83eafc3db0 | 103,661 |
import re
def strip_tags(s):
"""
Remove tags from `s` using RE_TAGS.
"""
RE_TAGS = re.compile(r"<([^>]+)>", re.UNICODE)
return RE_TAGS.sub("", s) | 19cb5ae8a909deab8e65b2f8b3e017f034ec354a | 103,663 |
def processTermId(termInfo):
"""Parse the term ID column. Return the termId and termUrl. """
if not termInfo.has_key("href"):
termId = termInfo.text
termUrl = "missing"
else:
termId = termInfo.text
termUrl = termInfo["href"]
return((termId, termUrl)) | c29272dcf7da762c051181ff91853b114fe1bab2 | 103,680 |
import hashlib
def _sha256_hash(filename, blocksize=65536):
"""Hash the contents of an open file handle with SHA256"""
hash_obj = hashlib.sha256()
with open(filename, "rb") as file_obj:
block = file_obj.read(blocksize)
while block:
hash_obj.update(block)
block = file_obj.read(blocksize)
return hash_obj.hexdigest() | ad720bd9186dbd222a4b2483fa0084a3a30eb1e2 | 103,685 |
def transform_input_kernel(kernel):
"""Transforms weights of a single CuDNN input kernel into the regular Keras format."""
return kernel.T.reshape(kernel.shape, order='F') | fc2b69664c364e6989566c8f9538109c7a7833b5 | 103,686 |
def _try_format_numeric(text):
"""remove leading/trailing zeros, leading "+", etc. from numbers. Non numeric values are left untouched."""
try:
numeric = float(text)
if int(numeric) == numeric: # remove trailing .0
numeric = int(numeric)
text = str(numeric)
except ValueError:
pass
return text | c21760fccf6e4f340a5f47ec79af925508df9b84 | 103,687 |
def calculate_cardinality(feature_vector, indices=None):
"""
Calculate cardinality of each features
:param feature_vector: three dimensional vector with features
:param indices: list of indices of the features to be extracted
:return: a map where each key is the index of the feature and the value is a map feature_value,
value_index.
For example
[(0, {'feature1': 1, 'feature2': 2})]
indicates that the feature is at index 0 and has two values, features1 and features2 with two
unique index.
NOTE: the features are indexed from 1 to n + 1. The 0 value is reserved as padding
"""
columns_length = []
index = 0
if not len(feature_vector) > 0:
return []
for index_column in range(index, len(feature_vector[0][0])):
if indices and index_column not in indices:
index += 1
continue
values = set()
for index_document in range(0, len(feature_vector)):
for index_row in range(0, len(feature_vector[index_document])):
value = feature_vector[index_document][index_row][index_column]
if value != " ":
values.add(value)
values = sorted(values)
values_cardinality = len(values)
values_list = list(values)
values_to_int = {}
for val_num in range(0, values_cardinality):
# We reserve the 0 for the unseen features so the indexes will go from 1 to cardinality + 1
values_to_int[values_list[val_num]] = val_num + 1
columns_length.append((index, values_to_int))
index += 1
return columns_length | e54562a0272e5f6c10dbad3c41e02a81db912ca8 | 103,688 |
def difference_between_sum_of_squares_and_sum_of_n_first_natural_numbers(n: int) -> int:
"""
Difference between A and B^2, given that:
Sum of all natural numbers up to n:
B = n*(n + 1)/2
Sum of the squares of all natural numbers up to n:
A = n(n + 1)(2n + 1)/6
"""
# A = n * (n + 1) * ((2 * n) + 1) / 6
# B = n * (n + 1) / 2
# B_squared = B ** 2
#
# Algebraically simplified:
result = 3 * (n ** 4 - n ** 2) + 2 * (n ** 3 - n)
return int(result / 12) | 99021bf24a1d9b813e747a4f3d7e366835c99377 | 103,695 |
def strip_str(string):
"""Strip string."""
return string.strip('"').strip("'").strip() | 5aad4db2403b598471de0b8099281cf04b2c9784 | 103,696 |
def get_raw_warning(raw_input_file, initial_index):
"""This function returns the total text of a gbuild warning message.
Inputs:
- raw_input_file: Absolute path to the raw gbuild compiler log containing warnings [string]
- initial_index: The numeric index of where the warnings was found in the log file [int]
Outputs:
- warning_text: Full text of the warning starting at initial_index [string]
"""
# Read in the input file
with open(raw_input_file, 'r') as input_fh:
input_data = input_fh.readlines()
# Initialize variables
start_point = 0
end_point = len(input_data)
# Get the initial line
current_index = initial_index
line = input_data[current_index]
# Get the start point
while (not line.startswith('\n')) and ('output from compiling' not in line.lower()):
# Update the start point
start_point = current_index
# Update the current line
current_index = current_index - 1
line = input_data[current_index]
# Get the initial line
current_index = initial_index
line = input_data[current_index]
# Get the end point
while not line == '\n':
# Update the end point
end_point = current_index
# Update the current line
current_index = current_index + 1
line = input_data[current_index]
# Store the warning text
warning_text = []
for i in range(start_point, end_point):
warning_text.append(input_data[i])
return warning_text | dfb6302b48d328668e443b3c7f406bd312dd3d52 | 103,697 |
def ms_to_mph(ms):
"""
Convert miles per hour to meters per second
:param ms: meters per second (ms)
:type ms: float
:return: miles per hour
:rtype: float
"""
return ms / 0.44704 | e9de3ae8242a01989bd86ba1fcafba393ca7d1d8 | 103,699 |
import itertools
def list_of_first_n(v,n):
"""
Given an iterator v, return first n elements it produces as a list.
INPUT:
- ``v`` - an iterator
- ``n`` - an integer
OUTPUT:
- a list
EXAMPLES::
sage: sage.server.notebook.interact.list_of_first_n(Primes(), 10)
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
sage: sage.server.notebook.interact.list_of_first_n((1..5), 10)
[1, 2, 3, 4, 5]
sage: sage.server.notebook.interact.list_of_first_n(QQ, 10)
[0, 1, -1, 1/2, -1/2, 2, -2, 1/3, -1/3, 3]
"""
return list(itertools.islice(v, 0, n)) | c649458cba391c37fcf50b03d947fac061396e5c | 103,700 |
def _linear(x):
"""Linear activation. Do nothing on the input."""
return x | 262d377419410b1891f002112bbc2b4315cc0fe2 | 103,703 |
from typing import Sized
from typing import Iterable
from typing import Mapping
def is_listy(x):
"""
returns a boolean indicating whether the passed object is "listy",
which we define as a sized iterable which is not a map or string
"""
return isinstance(x, Sized) and isinstance(x, Iterable) and not isinstance(x, (Mapping, type(b''), type(''))) | 9261569c6a68695f1c67da6b5fe100f8bf0c1f47 | 103,704 |
import hashlib
def get_checksum_from_picture(picture: bytes, method: str = "md5") -> str:
"""Calculate the checksum of the provided picture, using the desired method.
Available methods can be fetched using the the algorithms_available function.
:param picture: picture as bytes
:param method: hashing method as string (optional, default=md5)
:return: checksum as string
"""
h = hashlib.new(method.lower())
h.update(picture)
return h.hexdigest() | 2a0ea3c47d55fffab4f9276381954d09d83c5fe2 | 103,705 |
def sum_array(lst: list):
"""Returns the sum of the numbers in the list.
Example:
>>> list(range(0,10))\n
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> myrange = list(range(0,10))\n
>>> sum_array(myrange)\n
45
>>> sum(list(range(0,10)))\n
45
"""
return sum(lst) | e4af18f200bc686475c11b0823b9e53e023563fb | 103,717 |
def new_cov_when_divide_x(cov_xy, x, B, B_err):
"""Analogous to the above, but when x is divided by independent data.
Need to give x/B and B."""
# when doing e.g. RV = AV / EBV, the covariance gets a factor 1 / EBV
new_cov = cov_xy.copy()
new_cov[:, 0, 1] /= B
new_cov[:, 1, 0] /= B
# NH_AV is not affected
# V(RV) = V(x / B) becomes (x / B)**2 * ( (Vx / x)**2 + (VB / B)**2 )
# (value * relative error)
Vx = cov_xy[:, 0, 0]
new_cov[:, 0, 0] = (x / B) ** 2 * (Vx / x ** 2 + (B_err / B) ** 2)
return new_cov | 6f5b54eeecf8d3ccf5ce37f146a165210ae172ff | 103,719 |
def duplicate_check(x, y, counter, queue):
"""
(int, int, int, list of tuple) -> bool
Return true if and only if queue does not contain an item with the same
x, y and a lower or equal counter
"""
for item in queue:
if item[0] == x and item[1] == y and item[2] <= counter:
return False
return True | 3f6d66b0d9b282ad8fe76b485d0e2fbea7cedb0f | 103,720 |
import importlib
def _can_import(name: str) -> bool:
"""Attempt to import a module. Return a bool indicating success."""
try:
importlib.import_module(name)
return True
except ImportError:
return False | 3febd8fed42279265c5faa26340997824ae6c8b5 | 103,725 |
import hashlib
def md5(s):
"""Compute MD5 Hash."""
_md5 = hashlib.md5()
_md5.update(s.encode())
return _md5.hexdigest() | c27271e80c8e5516a059871bbc73917b16498b19 | 103,730 |
import torch
def embeddings_to_cosine_similarity_matrix(z):
"""Converts a a tensor of n embeddings to an (n, n) tensor of similarities.
"""
cosine_similarity = torch.matmul(z, z.t())
embedding_norms = torch.norm(z, p=2, dim=1)
embedding_norms_mat = embedding_norms.unsqueeze(0)*embedding_norms.unsqueeze(1)
cosine_similarity = cosine_similarity / (embedding_norms_mat)
return cosine_similarity | f68dcdcceb4eea59d4525650111f3b2720ea878f | 103,732 |
def get_commands_to_remove_vpc(domain):
"""Gets commands to remove vpc domain
Args:
domain (str): vpc domain ID
Returns:
list: ordered list of commands to remove vpc domain
Note:
Specific for Ansible module(s). Not to be called otherwise.
"""
commands = []
commands.append('no vpc domain ' + domain)
return commands | 9fa31aa6ca84da1e7221d25d9b2a7d2490cbaa8d | 103,733 |
import struct
def parse_boolean(s):
"""Parses a single boolean value from a single byte"""
return struct.unpack('<?', s)[0] | 09322cbca1ab9cb04fce03ee3ca976bbef2fa53b | 103,734 |
async def root():
"""Ping the API."""
return True | 1fcb89d4a997d08f327f1acb633785cb09bd3841 | 103,739 |
def _object_to_dict(obj):
"""
Recursively get the attributes
of an object as a dictionary.
:param obj: Object whose attributes are to be extracted.
:returns: A dictionary or the object.
"""
# https://stackoverflow.com/a/71366813/7175713
data = {}
if getattr(obj, "__dict__", None):
for key, value in obj.__dict__.items():
data[key] = _object_to_dict(value)
return data
return obj | 00e25f3dc1111c314f946725902cfec3f612eb85 | 103,743 |
def transform_phone(number):
"""Expected phone number format (555)555-5555. Changes to spaces only."""
phone = number.replace("(", "").replace(")", " ").replace("-", " ")
return phone | 02f5f407a8920aea5f37dadd18ae2d81280a78b6 | 103,745 |
def get_facts_dict(junos_module):
"""Retreive PyEZ facts and convert to a standard dict w/o custom types.
Ansible >= 2.0 doesn't like custom objects in a modules return value.
Because PyEZ facts are a custom object rather than a true dict they must be
converted to a standard dict. Since facts are read-only, we must begin by
copying facts into a dict. Since PyEZ facts are "on-demand", the
junos_module.dev instance must be an open PyEZ Device instance ojbect
before this function is called.
Args:
junos_module: An instance of a JuniperJunosModule.
Returns:
A dict containing the device facts.
"""
# Retrieve all PyEZ-supported facts and copy to a standard dict.
facts = dict(junos_module.dev.facts)
# Add two useful facts that are implement as PyEZ Device attributes.
facts['re_name'] = junos_module.dev.re_name
facts['master_state'] = junos_module.dev.master
# Ansible doesn't allow keys starting with numbers.
# Replace the '2RE' key with the 'has_2RE' key.
if '2RE' in facts:
facts['has_2RE'] = facts['2RE']
del facts['2RE']
# The value of the 'version_info' key is a custom junos.version_info
# object. Convert this value to a dict.
if 'version_info' in facts and facts['version_info'] is not None:
facts['version_info'] = dict(facts['version_info'])
# The values of the ['junos_info'][re_name]['object'] keys are
# custom junos.version_info objects. Convert all of these to dicts.
if 'junos_info' in facts and facts['junos_info'] is not None:
for key in facts['junos_info']:
facts['junos_info'][key]['object'] = dict(
facts['junos_info'][key]['object'])
return facts | ed75ad2c0f6a341b66eea388e085014dce888cd1 | 103,750 |
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
Parameters
----------
a : array_like
Input array.
axes : list[int], optional
By default, reverse the dimensions, otherwise permute the axes
according to the values given.
Returns
-------
p : ndarray
`a` with its axes permuted. A view is returned whenever
possible.
See Also
--------
numpy.transpose
Availability
--------
Multiple GPUs, Multiple CPUs
"""
return a.transpose(axes=axes) | 608619bd4453bfe1c2105bdd49febbb167c2322a | 103,751 |
def reorderDFColumns(df, start=None, end=None):
"""
This function reorder columns of a DataFrame.
It takes columns given in the list `start` and move them to the left.
Its also takes columns in `end` and move them to the right.
"""
if start is None:
start = []
if end is None:
end = []
assert isinstance(start, list) and isinstance(end, list)
cols = list(df.columns)
for c in start:
if c not in cols:
start.remove(c)
for c in end:
if c not in cols or c in start:
end.remove(c)
for c in start + end:
cols.remove(c)
cols = list(start) + cols + list(end)
return df[cols] | 9062507b86f9b410cdc76c7b676dfe9d60d499e5 | 103,753 |
def _int_to_json(value):
"""Coerce 'value' to an JSON-compatible representation."""
if isinstance(value, int):
value = str(value)
return value | 7ce6ab8854b170b68eb9e792118482728a982b00 | 103,756 |
def make_lex_dict(lexicon_file):
"""
Convert lexicon file to a dictionary
"""
lex_dict = {}
for line in lexicon_file.split('\n'):
(word, measure) = line.strip().split('\t')[0:2]
lex_dict[word] = float(measure)
return lex_dict | 440f3d675100d4b3ca9546fba9774a18c6bc8067 | 103,757 |
def get_with_default(install_config, parameter_name, default_value):
"""
:param install_config: A dict represents the install section inside the configuration file
:param parameter_name: Specific key inside the install section
:param default_value: Default value in cases that the key cannot be found
:return: The value of the key in the configuration file or default value if key cannot be found
"""
return install_config[parameter_name] if install_config and parameter_name in install_config else default_value | d4fbbb4311433b0f1f19b842a71e0d3157ede586 | 103,758 |
def xor_bytes(a, b):
"""
XOR on two bytes objects
Args:
a (bytes): object 1
b (bytes): object 2
Returns:
bytes: The XOR result
"""
assert isinstance(a, bytes)
assert isinstance(b, bytes)
assert len(a) == len(b)
res = bytearray()
for i in range(len(a)):
res.append(a[i] ^ b[i])
return bytes(res) | 49a09b51e042e11fcf8f8570e3381d6bea751eac | 103,761 |
def make_arousals(events, time, s_freq):
"""Create dict for each arousal, based on events of time points.
Parameters
----------
events : ndarray (dtype='int')
N x 5 matrix with start, end samples
data : ndarray (dtype='float')
vector with the data
time : ndarray (dtype='float')
vector with time points
s_freq : float
sampling frequency
Returns
-------
list of dict
list of all the arousals, with information about start, end,
duration (s),
"""
arousals = []
for ev in events:
one_ar = {'start': time[ev[0]],
'end': time[ev[1] - 1],
'dur': (ev[1] - ev[0]) / s_freq,
}
arousals.append(one_ar)
return arousals | fb5bbd7b34ccf39c8f017d8cf6316e253345e9a1 | 103,766 |
from typing import Dict
from typing import Any
def interface() -> Dict[str, Any]:
"""
Returns:
The schema for the `interface` block
"""
data = {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"name": {"type": "string", "required": True},
"description": {"type": "string"},
"before": {"type": "string"},
"after": {"type": "string"},
"before_each": {"type": "string"},
"after_each": {"type": "string"},
"modules": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"name": {"type": "string"},
"before": {"type": "string"},
"after": {"type": "string"},
},
},
},
},
},
}
return data | 469aa834d11959ea6049d04a820418015821d086 | 103,767 |
def count_inversion(left, right):
"""
This function use merge sort algorithm to count the number of
inversions in a permutation of two parts (left, right).
Parameters
----------
left: ndarray
The first part of the permutation
right: ndarray
The second part of the permutation
Returns
-------
result: ndarray
The sorted permutation of the two parts
count: int
The number of inversions in these two parts
"""
result = []
count = 0
i, j = 0, 0
left_len = len(left)
while i < left_len and j < len(right):
if left[i] <= right[j]:
result.append(left[i])
i += 1
else:
result.append(right[j])
count += left_len - i
j += 1
result += left[i:]
result += right[j:]
return result, count | df3e12c6aea6a8242eafefa55521253b84c5e5a5 | 103,768 |
def reverse_string(phrase):
"""Reverse string"""
return phrase[::-1] | f5ec642af5a1db50562f796374139d0a22d299ea | 103,771 |
def paginate(text: str):
"""Simple generator that paginates text."""
last = 0
pages = []
appd_index = 0
curr = 0
for curr in range(0, len(text)):
if curr % 1980 == 0:
pages.append(text[last:curr])
last = curr
appd_index = curr
if appd_index != len(text) - 1:
pages.append(text[last:curr])
return list(filter(lambda a: a != '', pages)) | 55874c9fc9448f896c142479595a099cf96109fc | 103,773 |
import requests
def get_swapi_resource(resource, params=None, timeout=20):
"""
This function initiates an HTTP GET request to the SWAPI service in order to return a
representation of a resource. `params` is not included in the request if no params is passed to this
function during the function call. Once a response is received, it is converted to a python dict.
Parameters:
resource (string): a url that specifies the resource.
params (dict): optional dictionary of querystring arguments. The default value is None.
timeout (int): timeout value in seconds. The default value is 20.
Returns:
dict: dictionary representation of the decoded JSON.
"""
if params:
response = requests.get(resource, params=params,timeout=timeout)
else:
response = requests.get(resource, timeout=timeout)
return response.json() | acae62b1fbcf1eaa34fdd1a55f76a8ea9ccaec91 | 103,775 |
def getStyle( oDrawDoc, cStyleFamily, cStyleName ):
"""Lookup and return a style from the document.
"""
return oDrawDoc.getStyleFamilies().getByName( cStyleFamily ).getByName( cStyleName ) | ef7435d959c10a070e555ee5c61caa1b06594a58 | 103,777 |
def absmax(i):
"""
Returns the largest absolute value present in an array in its raw form
(e.g. in [-2, 0, 1] it returns -2, in [-2,0,3] it returns 3.)
"""
# Use the absolute largest value in its raw form
if max(i) > abs(min(i)):
return max(i)
elif abs(min(i)) >= max(i):
return min(i)
else:
raise ValueError() | 98592e2156d5956ec7769ad61546b6a84882eb96 | 103,779 |
def find_substring_occurences(xs, item):
"""Can be used to get the indexes of the required substring within a list of strings.
>>> find_substring_occurences(['ab', 'bcd', 'de'], 'd')
[1, 2]
"""
idxs = [i for (i, x) in enumerate(xs) if item in x]
return idxs | 8e329e25d4c3d81710c1ddd407286d1da74268fe | 103,783 |
import time
def task_storage(storage):
"""
Demonstrates a periodic task accessing and modifying storage.
"""
# Values in storage persist from call to call. Local variables do not
# In case storage values haven't been passed in, set defaults
storage.setdefault("a", 1)
storage.setdefault("b", "A")
# Do work
print(storage)
# Operate on storage
storage["a"] += 1
# Fetch from storage, operate
tmp = storage["b"]
tmp = chr((ord(tmp) - ord("A") + 1) % 26 + ord("A"))
# Update storage
# Note: for value to persist, it must be assigned back to storage
storage["b"] = tmp
# Sleep allows other threads to run
time.sleep(1)
return True | 53c4ef1f2acbe75af55dbffad7d321b18832579e | 103,784 |
def msd_average_precision(
recommended_songs_for_user,
user_library,
rank_limit=500
):
"""
Calculates average precision as in:
The Million Song Dataset Challenge, McFee, B., Bertin-Mahieux. T., Ellis, D.P.W., and Lanckriet, G.R.G.
4th International Workshop on Advances in Music Information Research (AdMIRe)
https://bmcfee.github.io/papers/msdchallenge.pdf
"""
library_length = len(user_library)
num_positives = 0
score = 0
recommended_songs_for_user = recommended_songs_for_user[:rank_limit]
for i, song in enumerate(recommended_songs_for_user[:4]):
if song in user_library:
num_positives += 1
score += num_positives/(i+1)
return score / min(library_length, rank_limit) | 17eeb26b39e8557e6882c6238662ca1a1942ea5a | 103,785 |
import hashlib
def calcChecksum(filepath):
""" Calculate MD5 of relevant information
Returns tuple of filename and calculated hash
"""
with open(filepath, "r") as fil:
cnt = fil.readlines()
relinfo = []
for line in cnt:
atm = line[12:16].rstrip().lstrip()
altloc = line[16]
if (atm == "CA"):
if (altloc == ' '):
AA = line[17:20].rstrip().lstrip()
relinfo.append(AA)
else:
return None
return (filepath, hashlib.md5("".join(relinfo).encode("utf-8")).hexdigest()) | 36a84ca868dd12f1ae42a037f057f507d395455a | 103,790 |
def normalize_vector(vec):
""" Normalize a vector by its L2 norm. """
norm = (vec ** 2).sum() ** 0.5
return (vec / norm) | b2f78e828b4fe07f1ee07d5883414cf7340d488b | 103,791 |
def simulation_paths(sim_id, conf):
"""
Get paths to simulation files.
:param sim_id: the simulation id
:param conf: configuration
"""
return {'log_path' : conf['logs_path'] + '/' + sim_id + '.log' ,
'json_path' : conf['jobs_path'] + '/' + sim_id + '.json',
'state_path' : conf['sims_path'] + '/' + sim_id + '.json',
'run_script' : conf['jobs_path'] + '/' + sim_id + '.sh'} | d6c2c67521299181fe5a2e596736874336cde845 | 103,795 |
def calc_t_s(p_in, p_atm, p_s, v, L):
"""
Calculates time at which pressure reaches saturation pressure.
"""
return (p_in - p_s)/(p_in - p_atm)*L/v | ae711e9ce8413e728be9c2cf38092257ea3102fd | 103,796 |
import logging
def get_default_logger(name):
"""
Get a logger from default logging manager. If no handler
is associated, add a default NullHandler
"""
logger = logging.getLogger(name)
if not logger.hasHandlers():
# If logging is not configured in the current project, configure
# this logger to discard all logs messages. This will avoid redirecting
# errors to the default 'lastResort' StreamHandler
logger.addHandler(logging.NullHandler())
return logger | 7427cde4aaa28f5dccdcdd921ee5819b896ab63c | 103,798 |
import json
def write_json_file(filename, data):
"""Write data to the file in JSON format.
Return True on success and False on failure.
"""
try:
with open(filename, "w") as file:
json.dump(data, file, indent=2)
return True
except Exception as e:
print(e)
return False | b256b399eec8ddf78d33f8b08bc45c759818809b | 103,799 |
import hashlib
def get_file_md5_hash(file_path):
""" Hashes a file.
:param file_path: Union[str, pathlib.Path] -> path of the file to hash
:return: str -> file hash
"""
etag = hashlib.md5()
with open(file_path, "rb") as f:
for byte_block in iter(lambda: f.read(4096), b""):
etag.update(byte_block)
return etag.hexdigest() | f01636526b28a3d71d0ef4539f18b7c673edafea | 103,800 |
from typing import Iterable
from typing import Optional
from typing import Iterator
def join(strings: Iterable[str], seperator: str = '', final: Optional[str] = None) -> str:
"""Join strings with seperators and optionally a different final seperator."""
string: str = ''
iterator: Iterator[str] = iter(strings)
try:
string = next(iterator)
except StopIteration:
return string
try:
next_string: str = next(iterator)
except StopIteration:
return string
while True:
try:
next_next_string = next(iterator)
except StopIteration:
final = final if final is not None else seperator
string += final + next_string
break
string += seperator + next_string
next_string = next_next_string
return string | db97b26980d166e2869ad87199e4350205ee938a | 103,801 |
def vecSub(vec1, vec2):
"""Vector substraction."""
return tuple(x - y for x, y in zip(vec1, vec2)) | ddd721c5503d561d8fb824a7ab6a365b156b468a | 103,802 |
def get_training_pairs(data_dict, pairs):
"""Returns the subset of pairs which have more than 3 reviews, i.e.
the set for which we will be able to perform training using `n-1` reviews
and testing for the last review."""
training_pairs = []
for u_id, l_id in pairs:
if len(data_dict[u_id][l_id]) >= 3:
training_pairs.append((u_id, l_id))
return training_pairs | 04152e73db557a949de41287f6b087130f1d38ae | 103,809 |
def _strip_dedup_section_from_pnx(pnx_xml):
""" We must strip the dedup section from a PNX record before re-inserting it """
for dedup_section in pnx_xml.findall('dedup'):
pnx_xml.remove(dedup_section)
return(pnx_xml) | a2d74cbc64828a7d0d0c92968144b8db7cf56e45 | 103,811 |
def normalize(ratings_dict, average): # Done
"""
Returns the normalized ratings of the user.
"""
normalized_dict = {}
for i in ratings_dict:
normalized_dict.update({i : ratings_dict[i] - average})
return normalized_dict | e8bb016da41f17e8f8bf698ca4ac78dc0d5d8db1 | 103,813 |
def find_nodes_with_degree(graph, degree):
""" Find nodes with degree N in a graph and return a list """
degrees = graph.degree()
nodes = list()
for node in degrees:
if degrees[node] == degree:
nodes.append(node)
return nodes | 846cacd69425c73db312dc2ac09fe4a2192114eb | 103,821 |
def GetFullPathForJavaFrame(function):
"""Uses java function package name to normalize and generate full file path.
Args:
function: Java function, for example, 'org.chromium.CrAct.onDestroy'.
Returns:
A string of normalized full path, for example, org/chromium/CrAct.java
"""
return '%s.java' % '/'.join(function.split('.')[:-1]) | b79653f4bee9cf2bed6ad9c6503094f92d58527c | 103,823 |
def scalar_mul(c, X):
"""Multiply vector by scalar."""
return [c*X[0], c*X[1]] | d8a1e53536d1d30e83abaf4f399726add447ebd0 | 103,824 |
def name(function):
"""Returns the name of a function"""
return function.__name__ | 476d59de4891d56894e7ca8bb91b929439074188 | 103,828 |
def get_template(path):
"""
Gets the HTML template and replaces patterns in a "template-like" manner
:param str path: Path to the initial HTML template/file
:return: The updated HTML content as string
:rtype: str
"""
with open(path, "r", encoding="utf-8") as f:
f_content = f.read()
# Getting the store list and placing it in the template
for element, pattern in zip(["a", "b"], ["{{pattern1}}", "{{pattern2}}"]):
f_content = f_content.replace(pattern, element)
return f_content | c6bd9af2ce702f2ac9db0daac1a14983406d1dc1 | 103,832 |
def sum_of_x(x):
"""Sums all the values in the list x"""
return sum(x) | 4b772bc7dd0643e630f86a367f0e73eb64eac7dc | 103,834 |
import csv
def csvtodict(csvfile, key, split_strings=True, fieldnames=None, delimiter=',', quotechar='"'):
"""
Translates a CSV into a dictionary using the values in one of the columns as the keys for the dictionary.
Args:
csvfile: The csv file to be converted.
Must be a list, file, or file-like object that supports the iterator protocol that
returns strings as values.
A column from the csv that contains strings will be considered a list delimited by
the same delimiter as the columns themselves.
key: The column from which the dict keys are to be collected from.
`key` has to be present in the dictionary headers as defined either by `fieldnames` or the first line
of the csv.
split_strings: If True, splits strings on the delimiter for the file into lists.
Does not affect the keys of the returned dictionary, just the values.
fieldnames: A list of headers, to be used if the csv does not have headers of their own.
If None, the first row of the csv is presumed to be the header columns.
delimiter: The delimiter to be used to separate values in the csv.
quotechar: The character which indicates the start and end of strings (lists).
Returns:
A dict of dicts representing the csv.
Note:
The returned dictionary is unordered.
Each "row" will include its own key in addition to all other items.
Example usage:
a_csv = ['a,b', '1,0', '"2,0",0']
ret = csvtodict(a_csv, 'a')
# ret is equal to {'1': {'a': '1', b: '0'}, '"2,0"': {'a': '"2,0"', 'b': '0'}}
"""
csvreader = csv.DictReader(csvfile, fieldnames=fieldnames, delimiter=delimiter, quotechar=quotechar)
translated = {}
for line in csvreader:
translated[line[key]] = {}
for header in line:
s = line[header]
if split_strings and delimiter in s:
s = s.split(delimiter)
translated[line[key]][header] = s
return translated | 62f2488bac124efcceff3728df5f87035a8afe70 | 103,840 |
def split_filter_part(filter_part):
"""
Transform dash.datatable filter part into pandas.DataFrame filter (source code : Dash documentation)
Parameters
----------
filter_part : str
filter apply on a column of the datatable
Returns
-------
tuple :
column, operator, value of the filter part
"""
operators = [['ge ', '>='],
['le ', '<='],
['lt ', '<'],
['gt ', '>'],
['ne ', '!='],
['eq ', '='],
['contains '],
['datestartswith ']]
for operator_type in operators:
for operator in operator_type:
if operator in filter_part:
name_part, value_part = filter_part.split(operator, 1)
name = name_part[name_part.find('{') + 1: name_part.rfind('}')]
value_part = value_part.strip()
v0 = value_part[0]
if v0 == value_part[-1] and v0 in ("'", '"', '`'):
value = value_part[1: -1].replace('\\' + v0, v0)
else:
try:
value = float(value_part)
except ValueError:
value = value_part
# word operators need spaces after them in the filter string,
# but we don't want these later
return name, operator_type[0].strip(), value
return [None] * 3 | 0ba11722d7151c9c48e619e18780e1d025a258ed | 103,842 |
def procurar(expressao, procura):
"""
Uma sub função utilizada na função "precedente", com a finalidade de saber se ainda existe um operador a ser
utilizado na expressão lógica
:param expressao: expressão em que será procurado
:param procura: elemento procurado
:return: primeira posição do elemento procurado | = -1 se não existir
"""
for pos, elemento in enumerate(expressao):
if elemento[0] == procura:
return pos
return -1 | e0a0dd198d610e20b12428a8548ecbbe8c997c30 | 103,845 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.