content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def mode(ary):
"""
mode function
find the object with the biggest number of appears in the given ary
:param ary: input ary (some iterable object)
:return: the number with the largest appearence number
"""
dct = {}
for value in ary:
if value in dct:
dct[value]+=1
else:
dct[value] =1
return max(dct, key=lambda k: dct[k]) | 29cdbd665f0e23347498f69895c622797451409a | 101,720 |
def _SecToUsec(t):
"""Converts a time in seconds since the epoch to usec since the epoch.
Args:
t: Time in seconds since the unix epoch
Returns:
An integer containing the number of usec since the unix epoch.
"""
return int(t * 1e6) | 45dbe09583e294e0c8178dfb7c0dcd00b1dc46b1 | 101,722 |
def check_if_directory_has_files(dirPath):
"""
Returns True if there is any files or folders in the argument given.
Otherwise, it will return False.
Requires one argument to be defined:
- A pathlib Path object
"""
hasFiles = any(dirPath.iterdir())
return hasFiles | 218beea8269d30d5601dc8f8d2d7af69e14d5a19 | 101,723 |
from typing import Tuple
from typing import List
def load_hurdat(filepath: str) -> Tuple[List[str], List[str]]:
"""
Extracts hurricane data from hurdat2.txt file
Parameters
----------
filepath : The pathname to the hurdat2.txt file
Return
------
tracks : List[str]
Contains all geographical data from hurdat2.txt
hurricanes : List[str]
Contains all hurricanes with ID, name and number of corresponding data points
"""
# Loading the text file:
tracks = []
hurricanes = []
# When reading the text file, we separate two possible line formats: whether the line contains an ID or not. This is
# due to the fact that ID's are followed by the corresponding data points without anymore mention of the ID.
# So it will be easier, later on to just add the ID's separately.
with open(filepath) as text:
for line in text:
if line.startswith('AL'):
hurricanes.append(line)
else:
tracks.append(line)
print(hurricanes[:5])
print('\n')
print(tracks[:15])
print('\n')
print('There are {} different hurricanes, for a total of {} measurements.'
.format(len(hurricanes), len(tracks)))
print('\n')
return tracks, hurricanes | 1ec7eeb71d25176dfac5b8848ab29004acd3563d | 101,728 |
def ComptageMot(tree):
"""Retourne le nombre de mots de l'arbre tree."""
return tree.number_words() | 4a3a53b0b2e02cf315bf60abf670ce6469b33420 | 101,730 |
def convert_df_to_array(df_Data, column):
"""
Converts data frames to arrays
Parameters
----------
df_Data : pandas dataframe
input dataframe
columns : list
columns of the input dataframe
Returns
----------
array_val : array
array version of the dataframe
"""
array_val = df_Data[column].values
return array_val | 576a16b34a3f8dd12a4de7b71aefebc5830ad7b8 | 101,733 |
import ctypes
def _get_openblas_version(openblas_dynlib):
"""Return the OpenBLAS version
None means OpenBLAS is not loaded or version < 0.3.4, since OpenBLAS
did not expose its version before that.
"""
get_config = getattr(openblas_dynlib, "openblas_get_config")
get_config.restype = ctypes.c_char_p
config = get_config().split()
if config[0] == b"OpenBLAS":
return config[1].decode('utf-8')
return None | b53d1c03f7da36d5b2b62e3f7464d3e2ce89d197 | 101,734 |
def constrain(value, minv, maxv):
"""Returns the constrained value so that it falls between minv and maxv."""
return min(max(value, minv), maxv) | d2d4c2d19f58807e750d70d362adbc04f870389d | 101,738 |
import torch
def get_mask(idx, num_nodes):
"""
Given a tensor of ids and a number of nodes, return a boolean mask of size num_nodes which is set to True at indices
in `idx`, and to False for other indices.
"""
mask = torch.zeros(num_nodes, dtype=torch.bool)
mask[idx] = 1
return mask | 5e6d949df4816cee3c643a0c0ee42dc3fb78f7fd | 101,742 |
def get_class(class_name, module_name):
"""
Retrieves a class based off the module using __import__.
"""
if not isinstance(class_name, str) or not isinstance(module_name, str):
return None
try:
# requiring parameter `fromlist` to get specified module
module = __import__(module_name, fromlist=[''])
if hasattr(module, class_name):
return getattr(module, class_name)
except ImportError:
return None | 4c0b46c0d56e05454dff8bd395e61541a5ff80f9 | 101,749 |
def format_file(path, tool):
"""Format file with clang-format and return True."""
tool("-i", path)
return True | f18e74c25b68f60ae1e468a88ec0a4301b5be506 | 101,751 |
def mem(data):
"""Total memory used by data
Parameters
----------
data : dict of pandas.DataFrames or pandas.DataFrame
Returns
-------
str : str
Human readable amount of memory used with unit (like KB, MB, GB etc.).
"""
if type(data) == dict:
num = sum([data[k].memory_usage(index=True).sum() for k in data])
else:
num = data.memory_usage(index=True).sum()
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
return "%3.1f %s" % (num, 'PB') | 7a69b55ac647e79b4a402b9286975f280c2aa484 | 101,755 |
def coerce_tag(tag):
"""
Coerce a BeautifulSoup tag to its string contents (stripped from leading and trailing whitespace).
Used by :func:`parse_status_table()` to get the text values of HTML tags.
"""
try:
return u''.join(tag.findAll(text=True)).strip()
except Exception:
return '' | ef20fe5c21c91c83a14abda9b4fa8261908c4974 | 101,758 |
import random
def fillTwoOrFour(board, iter=1):
"""
Randomly fill 2 or 4 in available spaces on the board.
Parameters:
board (list): game board
iter (int): number of times to repeat the process
Returns:
board (list): updated game board
"""
for _ in range(iter):
a = random.randint(0, 3)
b = random.randint(0, 3)
while(board[a][b] != 0):
a = random.randint(0, 3)
b = random.randint(0, 3)
if sum([cell for row in board for cell in row]) in (0, 2):
board[a][b] = 2
else:
board[a][b] = random.choice((2, 4))
return board | 8b9f55c98a14b35f6bc6e94c5d4fccdbf473a375 | 101,760 |
import secrets
import base64
def generate_password(length):
"""
Generates a secure random password, similar to `openssl rand -base64 [length]`.
"""
password_bytes = secrets.token_bytes(length)
password_b64 = base64.b64encode(password_bytes)
return password_b64.decode() | 35b0a2e107968693031cc2e747d73eacf3118dde | 101,773 |
def int2hexstrings(number_list):
"""
list of integers to list of 4-digit hex strings
>>> int2hexstrings([0, 1, 65535])
['0000', '0001', 'FFFF']
"""
return [str("{:04X}".format(n)) for n in number_list] | 278617f8e0e762725f9545b80baec323296990a6 | 101,780 |
def presence_of_all_elements_located(locator):
""" An expectation for checking that there is at least one element present
on a web page.
locator is used to find the element
returns the list of WebElements once they are located
"""
def _predicate(driver):
return driver.find_elements(*locator)
return _predicate | b2bd4729c003d099c19febf57eea4fee4abbfd41 | 101,783 |
def relu_prime(z):
"""
Derivative of the Rectified Linear Unit activation function
:param z: Pre-activation result
:return: derived input
"""
return (z > 0).astype(z.dtype) | 23b27c4703e4937d1191625996f3466861c7ed6a | 101,784 |
def load_forecastMock(key, lat, lon, units, lang): # pylint: disable=invalid-name
"""Mock darksky forecast loading."""
return "" | a7cfc7d4189ed614f0800c00dc11b267a5cbecca | 101,785 |
def get_short_usage_id(full_usage_id):
"""Extract 16 bits usage id from full usage id (32 bits)"""
return full_usage_id & 0xffff | 410d4ea8cb62b5c0b94db4f9e00c774b685fdd5f | 101,789 |
def bubblesort(x):
"""
Sort array by looping through array, swapping unordered
adjacent elements until the array is sorted
"""
assign, cond = 0, 0 # track assignments and conditionals
swapped = True # variable to determine whether elements in array have been swapped
assign += 1
# continue sorting until array is looped through once and no swap is made
while swapped == True:
swapped = False
assign += 1
# loop through array
for i in range(0, len(x)-1):
cond += 1
# if elements are unordered swap them
if x[i] > x[i+1]:
temp = x[i]
x[i], x[i+1] = x[i+1], temp
swapped = True
assign += 4
return x, assign, cond | 5f99a8be64f87a7a71d7e01253dff4bf32530c68 | 101,793 |
import ast
def is_docstring(node):
"""Checks if a Python AST node is a docstring"""
return isinstance(node, ast.Expr) and isinstance(node.value, ast.Str) | 48aafe94c616982d2c0be23c5453b91a98c16fe8 | 101,794 |
from typing import Tuple
def split_grab_pattern(pattern: str, default_target: str) -> Tuple[str, str]:
"""Performes splits on grab patterns ("source=>target")
will fill in the given default target, if split does not present one
Args:
pattern (str): The pattern to split
default_target (str): The target to be auto-filled if none given
Returns:
(str,str): pattern and pair
"""
parts = pattern.split('=>', 1)
target = default_target if len(parts) == 1 else parts[1]
return parts[0], target | 6bddcd22929960858f1ad70fbf858b752d8aebad | 101,803 |
def get_energy_stats(stats, energy):
"""Extract stats for a given energy type"""
if stats is None:
return None
return stats[stats['energy'] == energy] | 04af7a6ba58faebbe40ebace721acac3b898fa57 | 101,804 |
def _set_process_name(func, process_name):
"""_set_process_name
If process_name is not set on configuration file,
func.__name__ is used as the process_name
:param func:
:param process_name:
"""
if len(process_name) == 0:
process_name = func.__name__
return process_name | 4b68274bd3cbe336b03be7458e80c39a282fa13c | 101,805 |
def most_frequent(fills):
"""Find most frequent element in array"""
if len(fills) > 0:
return max(set(fills), key=fills.count)
else:
return '' | 303d153c16787e13b53ccb53f2d5dd5a46be66cf | 101,816 |
import json
def json_dump(obj, path):
"""Save a json object to a file."""
with open(path, 'w') as f:
return json.dump(obj, f, indent=2) | dc1ca23ed1322d860cf2c9236e56394489acf29e | 101,818 |
def groupByThree(lst):
"""
Given input [1,2,3,4,5,6,7,8,9]
outputs: [(1,2,3),(4,5,6),(7,8,9)]
"""
return zip(*[lst[x::3] for x in (0, 1, 2)]) | 7e2f80d3e27b0a75bcfd3507c4f9580b4c2cf1be | 101,824 |
def fmt(message, prefix):
"""
Formats the given message by adding `prefix` at the start of each line.
If the message is multi-line then split it according to the end-of-line
terminators and add the prefix string to the start of each line.
The `prefix` is not added to the beginning of a line if the line is
completely empty (only whitespace).
"""
message = str(message).splitlines(True)
if not message:
return prefix
output = ""
for index, line in enumerate(message):
if index >= 1 and not line.strip():
output += line
else:
output += "{} {}".format(prefix, line)
return output | 30716443a640e03e2a78f7d072ba1885a83f9141 | 101,828 |
import re
def _out_slugify_string(key: str) -> str:
"""Convert a string to snake_case."""
string = re.sub(r"[\-\.\s]", "_", str(key))
return (string[0]).lower() + re.sub(
r"[A-Z]", lambda matched: f"_{matched.group(0).lower()}", string[1:]
) | f8936467c5e5e7ff7bcbdbd4db2f6cb8c0a48522 | 101,832 |
def _complete(states):
"""Returns as per Task.complete depending on whether ALL elements of the given list are at least that state
Priority is "none", "skipped", "revealed", "complete"; this returns the lowest priority complete value that any
list entry has.
"""
toReturn = "complete"
for state in states:
if state == "none": return "none"
if state == "skipped": toReturn = "skipped"
if state == "revealed" and toReturn != "skipped": toReturn = "revealed"
return toReturn | 5a25cda918d00307b52cfb94e26e504eb749633c | 101,834 |
def get_user_input(validation_function, defaut=''):
"""
Asks user for input and checks if it is valid by validating with
validation function.
:param: validation_function: function on which the user input is validated
against
:type validation_function: function
:param defaut: default return value
:type defaut: str
:return: user input
:rtype: str
"""
while True:
try:
user_input = input()
corrected_value = validation_function(user_input)
if corrected_value:
return corrected_value
else:
continue
except Exception as e:
raise e | 1949bd48d1fe78950caa63ce4871d3d6ccc8904a | 101,840 |
def to_string(data, use_repr=True, verbatim=False):
"""
Convert a single datum into a string
Simply return strings, recursively convert the elements of any
objects with a :attr:`__len__` attribute, and use the object's
own :attr:`__repr__` attribute for all other objects.
Args:
data (object):
The object to stringify.
use_repr (:obj:`bool`, optional):
Use the objects :attr:`__repr__` method; otherwise, use a
direct string conversion.
verbatim (:obj:`bool`, optional):
Use quotes around the provided string to indicate that
the string should be represented in a verbatim (fixed
width) font.
Returns:
:obj:`str`: A string representation of the provided ``data``.
"""
if isinstance(data, str):
return data if not verbatim else '``' + data + '``'
if hasattr(data, '__len__'):
return '[]' if isinstance(data, list) and len(data) == 0 \
else ', '.join([to_string(d, use_repr=use_repr, verbatim=verbatim)
for d in data ])
return data.__repr__() if use_repr else str(data) | 7b9a3a97e39f2a6562946211d011066321f9e902 | 101,843 |
def getByName(list, name):
"""
Return element by a given name.
"""
if list is None or name is None:
return None
for element in list:
if element.get('name') is None:
continue
if element['name'] == name:
return element
return None | 5dae082da8e6620b6ab44eed58f0fb012dce84eb | 101,844 |
import traceback
def safer_format_traceback(exc_typ, exc_val, exc_tb):
"""
Safely format an exception traceback into a safe string.
There are common attacks that try to write arbitrary data to a server's
log files. This can happen if, for instance, a malicious user triggers
a ValueError with a carefully-crafted payload.
This function formats the traceback using "%r" for the actual exception
data, which passes it through repr() so that any special chars are
safely escaped.
"""
lines = ["Uncaught exception:\n"]
lines.extend(traceback.format_tb(exc_tb))
lines.append("%r\n" % (exc_typ,))
lines.append("%r\n" % (exc_val,))
return "".join(lines) | 519b531173dc431473575533fd0b216613e0efea | 101,849 |
def BodyForce(f, a):
"""
Adds a body force/acceleration to the acceleration.
Parameters
----------
f: float, force/acceleration to add to the
a: array, acceleration of the particles
Returns
-------
array of acceleration
"""
a += f
return a | 9ffef5b107fcb380940d1ed69e62cbdc954f77fb | 101,850 |
import copy
def nested_element(element, depth):
"""Makes a nested `tree.Element` given a single element.
If `depth=2`, the new tree will look like
```xml
<element>
<element>
<element>
</element>
</element>
</element>
```
Args:
element: The `etree.Element` used to create a nested structure.
depth: An `int` denoting the nesting depth. The resulting will contain
`element` nested `depth` times.
Returns:
A nested `etree.Element`.
"""
if depth > 0:
child = nested_element(copy.deepcopy(element), depth=(depth - 1))
element.append(child)
return element | 5c11964541eebed8d8f9b65d18d5640b9917e77e | 101,852 |
def lift(x):
"""
Lift an object of a quotient ring `R/I` to `R`.
EXAMPLES:
We lift an integer modulo `3`::
sage: Mod(2,3).lift()
2
We lift an element of a quotient polynomial ring::
sage: R.<x> = QQ['x']
sage: S.<xmod> = R.quo(x^2 + 1)
sage: lift(xmod-7)
x - 7
"""
try:
return x.lift()
except AttributeError:
raise ArithmeticError("no lift defined.") | 0eb345764280709ceae00cb323a4a5b23beb49b6 | 101,853 |
import pathlib
def path_to_str(source: pathlib.Path) -> str:
"""Converts a pathlib.Path to a str."""
return str(source) | 825fec05f1f80f7876b8171b8e54a145bea60cc3 | 101,856 |
def combine_real_imag(real_data, imag_data):
"""Combines two float data arrays into one complex64 array"""
return real_data + 1j * imag_data | a9f3eb9f7e6f2d2b70cca941a2c14802f4a5d069 | 101,857 |
def get_sequence(pdbfile):
"""Return Aminoacid Sequence as list"""
seq = []
oresnum = -1
for line in open(pdbfile, 'r'):
if line[:4] != 'ATOM':
continue
resnum = int(line[22:26])
if resnum != oresnum:
seq.append(line[17:20])
oresnum = resnum
return seq | e4b8e49942cd4ef2224b14282cd0b0195bac41c6 | 101,864 |
import yaml
def parse_config(file_list):
"""
Parse required options from a list of configuration files
:param file_list: list
list of config files to parse
:return: tuple
Dictionaries of the required simulation inputs
"""
corsika_dict = dict()
simulation_input = dict()
fit_input = dict()
telescope_input = dict()
# Loop over input config files
for f in file_list:
# Open YAML file
with open(f[0],"r") as yaml_file:
yaml_file = yaml.safe_load(yaml_file)
# Get the config options if they exist
if "CORSIKA" in yaml_file:
corsika_dict.update(yaml_file["CORSIKA"])
if "ShowerSimulation" in yaml_file:
simulation_input.update(yaml_file["ShowerSimulation"])
if "TelescopeSimulation" in yaml_file:
telescope_input.update(yaml_file["TelescopeSimulation"])
if "Fit" in yaml_file:
fit_input.update(yaml_file["Fit"])
return corsika_dict, simulation_input, telescope_input, fit_input | cba2fb046445fcfe84e214ef3cd4856e28b1b6fa | 101,865 |
def MakeFindPackage(modules):
"""
Make a useful find_package command.
"""
# Print a useful cmake command
res = 'find_package(VTK COMPONENTS\n'
for module in sorted(modules):
res += ' ' + module.replace('VTK::', 'vtk') + '\n'
res += ')'
return res | 9fd9f65ea628f4b2b7865be544658d5c5f150980 | 101,868 |
def is_directory_automatically_created(folder: str):
"""
Verifies the name of the directory -> if it contains a month it returns True, otherwise False.
"""
months = [
"(01)Janvier",
"(02)Fevrier",
"(03)Mars",
"(04)Avril",
"(05)Mai",
"(06)Juin",
"(07)Juillet",
"(08)Aout",
"(09)Septembre",
"(10)Octobre",
"(11)Novembre",
"(12)Decembre",
]
return any(month in folder for month in months) | 7e78466a662d7bcd899d1a15994bf03b2cf8c3da | 101,869 |
def unionIntervals(intervals):
"""
unionIntervals(intervals)
Return a list of union interval(s) of the input intervals, e.g.,
given [[1,2], [4,6], [5,8]] will result in [[1,2], [4,8]].
PARAMETERS
intervals: list or sequence-like
list of lists/tuples defining the intervals, e.g., [[0,1], [5,8], ...]
RETURN
union_intervals: list
list of list(s) defining the union interval(s)
"""
union_intervals = []
for interval in sorted(intervals):
interval = list(interval)
if union_intervals and union_intervals[-1][1] >= interval[0]:
union_intervals[-1][1] = max(union_intervals[-1][1], interval[1])
else:
union_intervals.append(interval)
return union_intervals | c7ebbe93d35207ea6f48b8252df6610b729ee1f2 | 101,870 |
def power_ranger(power: int, minimum: int, maximum: int) -> int:
"""Count of values raised to power are in range [minimum, maximum]."""
return len([base**power for base in range(1, maximum+1)
if base**power >= minimum and base**power <= maximum]) | ce3f99efcd92f61c10da92cc5dfd32c8e5da30e7 | 101,871 |
def regularize_name(name,salutation_set={"Prof."}):
""" Regularize professor name into last-name-first form.
Processes name in form:
'Special' (e.g., 'DGS')
'Last, First [Middle]'
'First [Middle] Last'
'Prof. First [Middle] Last'
Special (i.e., one-word) names are left untouched.
Arguments:
name (string) : the name
salutation_set (set of string, optional) : salutations to strip
Returns:
(string) : the regularized name
"""
tokens = name.split()
if (len(tokens) == 1):
# case 'Special' (e.g., 'DGS')
regularized_name = name
elif (tokens[0][-1] == ","):
# case 'Last, First [Middle]'
regularized_name = " ".join(tokens)
elif (tokens[0] in salutation_set):
# case 'Salutation First [Middle] Last'
regularized_name = "{}, {}".format(tokens[-1]," ".join(tokens[1:-1]))
else:
# case 'First [Middle] Last'
regularized_name = "{}, {}".format(tokens[-1]," ".join(tokens[0:-1]))
## print(name,tokens,regularized_name)
return regularized_name | 673f28d862e3049a00c260a04ff96afa198379ec | 101,875 |
def get_usergraph_feature_names(osn_name):
"""
Returns a set of the names of the user graph engineered features.
:param osn_name: The name of the dataset (i.e. reddit, slashdot, barrapunto)
:return: names: The set of feature names.
"""
names = set()
####################################################################################################################
# Add user graph features.
####################################################################################################################
names.update(["user_graph_user_count",
"user_graph_hirsch_index",
"user_graph_randic_index",
"user_graph_outdegree_entropy",
"user_graph_outdegree_normalized_entropy",
"user_graph_indegree_entropy",
"user_graph_indegree_normalized_entropy"])
return names | 2395183a3086a38732ce07af4a5b7e2a23ddc68d | 101,881 |
def row_to_dict(field_names, data, null_to_empty_string=False):
"""
Converts a tuple result of a cx_Oracle cursor execution to a dict, with the keys being the column names
:param field_names: The names of the columns in the result set (list)
:param data: The data in this row (tup)
:param null_to_empty_string: Whether or not to convert nulls to empty strings (bool)
:return: A row of results (dict)
"""
clean_data = (
["" if val is None else val for val in data] if null_to_empty_string else data
)
return dict(zip(field_names, clean_data)) | 40810af495907d329572988909b645ffefdb36c6 | 101,883 |
def bounded_integer(lower, upper):
"""Accepts an integer in [lower, upper]."""
# type: (int, int) -> Callable[str, int]
def parse(s):
# type: (str) -> int
iv = int(s)
if iv < lower or iv > upper:
raise ValueError("{} is not in [{}, {}]".format(iv, lower, upper))
return iv
return parse | 8ad85358bb3aed73310f023692ea88632b4d0ee8 | 101,885 |
def assert_one_click(session):
"""Asserts there has only been one click, and returns that."""
clicks = session.execute_script("return window.clicks")
assert len(clicks) == 1
return tuple(clicks[0]) | 9273aff7b2463c0ecc9ba599ba0da3697e764eca | 101,890 |
def resolve_translation(obj, _):
"""Convert 'translation' from bytes to string."""
return obj.translation.decode() | 0bcfd4f1f60be8cb44031c75f1acdde5ed44dc10 | 101,895 |
import time
def obfuscate_api_key(key, verbose=False):
"""Generates the obfuscated API key to attach while logging in.
Args:
verbose (bool): Defaults to False. If set to True, end result will be printed.
key (String): The API key
Returns:
int: Timestamp of generation
key: Obfuscated key
"""
seed = key
now = int(time.time() * 1000)
n = str(now)[-6:]
r = str(int(n) >> 1).zfill(6)
key = ""
for i in range(0, len(str(n)), 1):
key += seed[int(str(n)[i])]
for j in range(0, len(str(r)), 1):
key += seed[int(str(r)[j]) + 2]
if verbose:
print("Timestamp:", now, "\tKey:", key)
return now, key | 28f57431f6bda990e9042625814a919d91d4f61c | 101,897 |
def load_default_OSINT_dict(avail_hosts):
"""
Loads the default OSINT for the CybORG environment.
Arguments:
avail_hosts : dictionary of all available hosts with names as keys and extra info as values
Returns:
parsed_OSINT : the parsed OSINT dict
"""
print("OSINT: No OSINT specified so using publicly facing hosts")
parsed_OSINT = {}
# for action_name, action_info in avail_actions.items():
for host in avail_hosts.keys():
if "PublicFacing" in host:
parsed_OSINT[host] = "IP"
return parsed_OSINT | 7b69d5f5d831752cb278a02442d1fa1a8dbb13eb | 101,900 |
def docs_modified(file_paths):
"""Report if any file in the Doc directory has been changed."""
return bool(file_paths) | 91ed4efba66336e5beeb37ba136f497af389265b | 101,910 |
def unpack_row(row, cols):
"""Convert a suds row object into serializable format.
Transform a row of results objects received from the DFP API's
Publisher Query Language Service into a Python dict.
Args:
row: A row of suds object which include an array of values.
cols: An array of strings representing the column names.
Returns:
dict A serializable Python dict.
"""
try:
# throws AttributeError is 'values' does not exist in row object
values = map(lambda value: value['value'], row['values'])
return dict(zip(cols, values))
except AttributeError:
return {} | 13f5c8e7fddfd88c26166c29628360713acb8dfe | 101,915 |
def set_support_dimensions(loading_span=10e-3,support_span=20e-3):
"""set the jig dimensions
inputs:
loading_span:
default 10mm
support_span:
default 20mm
"""
dims = {'loading_span':loading_span, 'support_span':support_span}
dims['spanratio'] = loading_span / support_span
return dims | 96965dcacd03ec4fffc5f5d101a0e9e761336fc1 | 101,923 |
def roman_month(date):
"""Return the month of Roman date 'date'."""
return date[1] | e3174c16d6b76ee4d6707b24d0540663335f9999 | 101,925 |
import itertools
def prettylist(name, mylist, N=10, prefix='\t'):
"""Return formatted str: first N items of list or generator, prefix & name"""
try:
return('{}{}: {}...'.format(prefix, name, mylist[:N]))
except TypeError:
ans = itertools.islice(mylist, N)
return('{}{}: {}...'.format(prefix, name, ans)) | f17c685b37ed089083cac0ee0ad4b8ceff05dcd7 | 101,928 |
def make_splits(df,
target,
time_step=0,
ntrain=0.8,
nval=0.15,
ntest=0.05):
"""
Split the dataframe in train, validation and test dataframe.
For models not autoregressive, it is necessary to modify the target variable to create lagged
obersvations in order to predict the n future instants.
These models will try to find dependent relationships between the target y(t+n) and the features x(t).
Args:
df: DataFrame
target: Target variable.
time_step: Number of lags.
ntrain: Size for the training dataframe.
nval: Size for the validation dataframe.
ntest: Size for the test dataframe.
Returns: Dict splited into training, validation and test dataframe. Also into features and target.
{train:{X: , y: }, val:{X: , y: }, test:{X: , y: }}
"""
# Target variable
df = df.copy()
y_target = df[target].shift(-int(time_step))
target_t = f'{target}_(t+{time_step})'
df[target_t] = y_target
df = df.drop(columns=[target])
# Keep only rows with non-NANs
df = df[~df.isna().any(axis='columns')]
# split into train, val, test datasets.
n = len(df)
train_df = df[0:int(n * ntrain)]
val_df = df[int(n * ntrain):int(n * (ntrain + nval))]
test_df = df[int(n * (1 - ntest)):]
# split into features and target variables.
x_train = train_df.drop(columns=target_t)
x_val = val_df.drop(columns=target_t)
x_test = test_df.drop(columns=target_t)
y_train = train_df[target_t]
y_val = val_df[target_t]
y_test = test_df[target_t]
splits_df = {'train': {}, 'val': {}, 'test': {}}
splits_df['train']['X'] = x_train
splits_df['train']['y'] = y_train
splits_df['val']['X'] = x_val
splits_df['val']['y'] = y_val
splits_df['test']['X'] = x_test
splits_df['test']['y'] = y_test
return splits_df | 57f1128f3b9df06bd2343b5fda75c6fb72971c6c | 101,931 |
def compute_mean_C (counts, i, j):
"""
Computes the mean value <psi | Z_i Z_j |psi> with the results of the measures.
"""
total = sum(counts.values())
n = len( list(counts.keys())[0] )
# Probabilities of having ( psi & (2^i + 2^j) ) = ab with a, b in {0,1}
p_00 = 0
p_01 = 0
p_10 = 0
p_11 = 0
for key in counts:
if key[n-1-i] == '0' and key[n-1-j] == '0':
p_00 += counts[key]
elif key[n-1-i] == '0' and key[n-1-j] == '1':
p_01 += counts[key]
elif key[n-1-i] == '1' and key[n-1-j] == '0':
p_10 += counts[key]
else:
p_11 += counts[key]
return (p_00 - p_01 - p_10 - p_11) / total | 3255aaf23719b4ec28bd6b2e74e82fcd1e350ecd | 101,933 |
def _camel_case_to_snake_case(s: str) -> str:
"""Converts a CamelCase string to snake_case."""
out = []
last_was_uppercase = False
for c in s:
if c.isupper():
out.append(c.lower())
else:
if last_was_uppercase and len(out) > 1:
out[-1] = "_" + out[-1]
out.append(c)
last_was_uppercase = c.isupper()
return "".join(out) | 36abed4db978bb6cf05124a74cea38e1faef1cf2 | 101,940 |
def best_model_compare_fn(best_eval_result, current_eval_result, key):
"""Compares two evaluation results and returns true if the second one is
greater.
Both evaluation results should have the value for key, used for comparison.
Args:
best_eval_result: best eval metrics.
current_eval_result: current eval metrics.
key: key to value used for comparison.
Returns:
True if the loss of current_eval_result is smaller; otherwise, False.
Raises:
ValueError: If input eval result is None or no loss is available.
"""
if not best_eval_result or key not in best_eval_result:
raise ValueError(f'best_eval_result cannot be empty or key "{key}" is '
f'not found.')
if not current_eval_result or key not in current_eval_result:
raise ValueError(f'best_eval_result cannot be empty or key "{key}" is '
f'not found.')
return best_eval_result[key] < current_eval_result[key] | 43947b3db0643d2ae4886cb2cb85f7393e1f9aa5 | 101,944 |
def _decimal_to_ddm(dd: float) -> tuple[int, float]:
"""Convert decimal degrees to degrees, decimal minutes"""
degrees, minutes = divmod(abs(dd) * 60, 60)
return int(degrees), minutes | 2e2fd81ea5e78c8b545b35b8365379ff5ef137fe | 101,945 |
def cursors_line_num(view):
""" Return list of 0-based line numbers of the cursor(s). """
return [view.rowcol(region.b)[0] for region in view.sel()] | 164acfdbd6d08693963dbb7f44d99694152b07ec | 101,949 |
def expose(func=None, label=None, client=False):
"""A function decorator which exposes the target to the API.
Arguments:
func -- The function to be decorated
Keyword arguments:
label -- The name under which to expose this function, if
different from its real name.
client -- If True, the function will receive a keyword argument
"client" corresponding to the client which initiated
the function call.
"""
if func != None:
if not label:
label = func.__name__
setattr(func, "__api_exposed__", True)
setattr(func, "__api_label__", label)
if client:
setattr(func, "__api_pass_client__", True)
return func
else:
def partial(func):
return expose(func, label=label, client=client)
return partial | 00970848c88108bab1525670fbeed493b183d7ad | 101,953 |
from dateutil import tz
from datetime import datetime
def get_human_time(timestamp, timezone=None):
"""Get human-readable timestamp from UNIX UTC timestamp.
Args:
timestamp (int): UNIX UTC timestamp.
timezone (optional): Explicit timezone (e.g: "America/Chicago").
Returns:
str: Formatted human-readable date in ISO format (UTC), with
time zone added.
Example:
>>> get_human_time(1565092435, timezone='Europe/Paris')
>>> 2019-08-06T11:53:55.000000+02:00
which corresponds to the UTC time appended the timezone format
to help with display and retrieval of the date localized.
"""
if timezone is not None: # get timezone from arg
to_zone = tz.gettz(timezone)
else: # auto-detect locale
to_zone = tz.tzlocal()
dt_utc = datetime.utcfromtimestamp(timestamp)
dt_tz = dt_utc.replace(tzinfo=to_zone)
timeformat = '%Y-%m-%dT%H:%M:%S.%f%z'
date_str = datetime.strftime(dt_tz, timeformat)
core_str = date_str[:-2]
tz_str = date_str[-2:]
date_str = f'{core_str}:{tz_str}'
return date_str | b1213860bad7880ec818e6330a40230c2a6b5b3e | 101,956 |
def mapper_class(relation):
"""Return mapper class given an ORM relation attribute."""
return relation.property.mapper.class_ | 585ed4aa003c92db206b6bbdda713671806dc70a | 101,958 |
import itertools
import random
def select_item(items_with_profit_ratio):
"""Given a list of tuples of the form (item_index, item_profitability_ratio, item), select an item proportionally to its profitability ratio, and return its index"""
# find the cumulative profitability ratios, code based on random.choices() from the standard library of Python
cumulative_profit_ratios = list(itertools.accumulate(item_with_profit_ratio[1] for item_with_profit_ratio in items_with_profit_ratio))
profit_ratio_sum = cumulative_profit_ratios[-1]
# randomly select a ratio within the range of the sum
profit_ratio = random.uniform(0, profit_ratio_sum)
# find the value that lies within the random ratio selected; binary search code is based on bisect.bisect_left from standard Python library, but adapted to profitability ratio check
lowest = 0
highest = len(items_with_profit_ratio)
while lowest < highest:
middle = (lowest + highest) // 2
if cumulative_profit_ratios[middle] <= profit_ratio:
lowest = middle + 1
else:
highest = middle
return lowest | 4bdf310047a8385ec535e9fbdb5f366c108be6aa | 101,959 |
def is_dirty(store, dataset):
"""Check if a dataset is dirty."""
ds = store.get_dataset(dataset)
return ds.repo.dirty | 3b7fcd49a485343e6ccb400c051d2213723cacd6 | 101,961 |
def _clean_delim(raw_delim):
"""Cleans a run delimiter.
"""
if raw_delim == '=':
return '=='
else:
return raw_delim | 7305bf93e859a979da3508fa3910901d2555e072 | 101,962 |
import torch
def get_3d_box(tensor):
"""Calculate 3D bounding box corners from its parameterization.
Input:
box_size: tuple of (length,wide,height)
heading_angle: rad scalar, clockwise from pos x axis
center: tuple of (x,y,z)
Output:
corners_3d: numpy array of shape (8,3) for 3D box cornders
"""
center = tensor[3:6]
l,h,w = tensor[:3]
heading_angle = tensor[-1]
def roty(t):
c = torch.cos(t)
s = torch.sin(t)
return torch.tensor([[c, 0, s], [0, 1, 0], [-s, 0, c]])
R = roty(heading_angle)
x_corners = torch.tensor([l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2])
y_corners = torch.tensor([h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2])
z_corners = torch.tensor([w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2])
corners_3d = torch.mm(R, torch.vstack([x_corners, y_corners, z_corners]))
corners_3d[0, :] = corners_3d[0, :] + center[0]
corners_3d[1, :] = corners_3d[1, :] + center[1]
corners_3d[2, :] = corners_3d[2, :] + center[2]
corners_3d = corners_3d.transpose(-1,-2)
return corners_3d | 42f470d258945646e00e124e690f9ac58ee30ae8 | 101,964 |
def static_vars(**kwargs):
"""Decorator to create static variables for a function
Usage:
```python
@static_vars(i = 0)
def function(x):
i += 1
return x + i
function(0) # 1
function(0) # 2
function(10) # 13
```
"""
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate | e52e3dc0caaf4102bb229c4bec68cec85b56d337 | 101,968 |
import re
def parse_chunked_data(data):
"""Parse the body of an HTTP message transmitted with chunked transfer encoding."""
data = (data or "").strip()
chunks = []
while data:
length = re.match(r"^([0-9a-zA-Z]+)\r\n.*", data)
if not length:
break
length = length.group(1).lower()
length = int(length, 16)
data = data.partition("\r\n")[2]
chunks.append(data[:length])
data = data[length:].strip()
return "".join(chunks) | 431e65c1fbe708f211e6611a5e55d58b55d2c15e | 101,969 |
from typing import Dict
def sort_stats_by_kind(stats: Dict) -> Dict:
"""
Generate dict-of-dicts for statistics.
Returns
-------
Dict[str, Dict]
A dict-of-dicts, where the key is the kind of the statistics in each dict.
"""
failure = {}
report = {}
success = {}
for k, v in stats.items():
if v.kind == 'failure':
failure[k] = v
elif v.kind == 'success':
success[k] = v
else:
report[k] = v
return {'failure': failure, 'report': report, 'success': success} | 5bae8a5e1e05f1c3117882ff53a35bd1ee7768d8 | 101,972 |
def load_char_mappings(mapping_path):
"""
load EMNIST character mappings. This maps a label to the correspondent byte value of the given character
return: the dictionary of label mappings
"""
mappings = {}
with open(mapping_path) as f:
for line in f:
(key, val) = line.split()
mappings[int(key)] = int(val)
return mappings | 8653f22d8490add375747f2e4f1e3aa0bc006f47 | 101,973 |
def ddir(obj):
"""
List of (dir(obj)) attributes of obj that don't start with an underscore
:param obj: Any python object
:return: A list of attribute names
"""
return [a for a in dir(obj) if not a.startswith('_')] | a5c0356ddfeae9555f73b2f6d90f96070fec9631 | 101,974 |
def dissolve_contiguous(input_gdf):
"""Dissolve geometries into one polygon then explode to ensure no multiparts.
Args:
input_gdf (geopandas.GeoDataFrame): Geodataframe with polygon geometry column.
Returns:
geopandas.GeoDataFrame: Polygon geodataframe with cleaned internal boundaries.
"""
input_gdf["diss"] = 1
input_gdf = input_gdf.dissolve(by="diss", as_index=False)
input_gdf = input_gdf.explode(index_parts=True)
return input_gdf | dc6522589f5b7aea52ad8109ab98c5ec8fcfae29 | 101,978 |
from typing import Optional
from typing import List
def _get_password_option_args(password: Optional[str]) -> List[str]:
"""
Get password option arguments.
:param password: the password (optional).
:return: empty list if password is None, else ['--password', password].
"""
return [] if password is None else ["--password", password] | 4a887a204fd841a0aea364cbf0efd4b720de24f5 | 101,980 |
def write_alarm_msg(radar_name, param_name_unit, date_last, target, tol_abs,
np_trend, value_trend, tol_trend, nevents, np_last,
value_last, fname):
"""
writes an alarm file
Parameters
----------
radar_name : str
Name of the radar being controlled
param_name_unit : str
Parameter and units
date_last : datetime object
date of the current event
target, tol_abs : float
Target value and tolerance
np_trend : int
Total number of points in trend
value_trend, tol_trend : float
Trend value and tolerance
nevents: int
Number of events in trend
np_last : int
Number of points in the current event
value_last : float
Value of the current event
fname : str
Name of file where to store the alarm information
Returns
-------
fname : str
the name of the file where data has written
"""
with open(fname, 'w', newline='') as txtfile:
txtfile.write(
'Weather radar polarimetric parameters monitoring alarm\n')
if radar_name is not None:
txtfile.write('Radar name: '+radar_name+'\n')
txtfile.write('Parameter [Unit]: '+param_name_unit+'\n')
txtfile.write('Date : '+date_last.strftime('%Y%m%d')+'\n')
txtfile.write('Target value: '+str(target)+' +/- '+str(tol_abs)+'\n')
if np_trend > 0:
txtfile.write(
'Number of points in trend: '+str(np_trend)+' in ' +
str(nevents)+' events\n')
txtfile.write(
'Trend value: '+str(value_trend)+' (tolerance: +/- ' +
str(tol_trend)+')\n')
else:
txtfile.write('Number of points in trend: NA\n')
txtfile.write('Trend value: NA\n')
txtfile.write('Number of points: '+str(np_last)+'\n')
txtfile.write('Value: '+str(value_last)+'\n')
txtfile.close()
return fname | f734983e1aa59cb9c17a2e3988d870625f9aee3d | 101,983 |
def get_midpoint(point_a, point_b):
"""Finds the midpoint of two points.
Args:
point_a: Tuple (x, y) point.
point_b: Tuple (x, y) point.
Returns:
Tuple of (x, y) midpoint.
"""
x1, y1 = point_a
x2, y2 = point_b
return (x1 + x2) / 2, (y1 + y2) / 2 | 9be080b4ecc3f4bdb8dbdd65da04823c436f86ef | 101,984 |
from typing import Counter
def check_outliers(df,list):
"""
Check for upper and lower limit
Parameters
----------
df : dataframe
dataframe to be inspected
list : list
list of columns less id
Returns
-------
outlier_index_unique : list
index of rows with outlier values
"""
outlier_index=[]
for feature in list:
lower_limit=df[feature].quantile(0.25)
upper_limit=df[feature].quantile(0.75)
iqr=upper_limit-lower_limit
iqr_step=iqr*1.5
feature_outlier_index=df[(df[feature]<lower_limit-iqr_step)|(df[feature]>upper_limit+iqr_step)].index
outlier_index.extend(feature_outlier_index)
outlier_index_counter=Counter(outlier_index)
outlier_index_unique=[]
for key,value in outlier_index_counter.items():
outlier_index_unique.append(key)
return outlier_index_unique | d12f08f14254c135f95c74185564b2d637ecd5e5 | 101,990 |
def nonempty_lines(text):
"""Returns non-empty lines in the given text."""
return [line for line in text.split('\n') if line] | 40cc5c22fc8bef4c0387ae348af3cc63f8705de1 | 101,991 |
def remove_duplicates(names, versions, versions_dict):
"""Remove the versions that exist in versions_dict.
Args:
names (list): A list with names
versions (list): A list with versions
versions_dict (dict): A dict names as keys
versions_dict example:
{'name': {'version': 'foo', ...}, ...}
Returns:
new_names (list): A list with names
new_versions (list): A list with versions
"""
new_names = list()
new_versions = list()
for proj in zip(names, versions):
if proj[0] in versions_dict.keys() and\
proj[1] in versions_dict[proj[0]].keys():
continue
new_names.append(proj[0])
new_versions.append(proj[1])
return new_names, new_versions | be7edc4581a687adce674519012992a5fda56015 | 101,992 |
def algorithm_1(array: list) -> int:
"""
Algorithm 1 - Brute Force
A straightforward way to solve the problem is to go through all possible
subarrays, calculate the sum of values in each subarray and maintain the
maximum sum.
The variables [i] and [j] fix the first and last index of the subarray, and
the sum of values is calculated to the variable [cur]. The variable [best]
contains the maximum sum found during the search.
The time complexity of the algorithm is O(n^3), because it consists of
three nested loops that go through the input.
"""
best = 0
size = len(array)
for i in range(0, size):
for j in range(i, size):
curr = 0 # sum of current subarray
for k in range(i, j):
curr += array[k]
best = max(best, curr)
return best | 2830fbde752a4316cc7e0879a3baeee42ecdd813 | 102,002 |
def uncap_sentence(sent):
"""
Workaround for tweets: sometimes the entire sentence is in upper case. If more than 99%
of the sentence is in upper case, this function will convert it to lower case.
:param sent: the tweet
:return: the sentence, lower cased if it is all in upper case
"""
ssent = sent.split()
ssent_cap = [word for word in ssent if word[0].isupper()]
if len(ssent_cap) * 1.00 / len(ssent) > 0.9:
return sent.lower()
else:
return sent | e311c8f8ba3f995162a2235333c056785856810b | 102,009 |
def waseem_to_binary(label: str) -> str:
"""
Turn Waseem labels into binary labels.
:label: String as label.
:returns (str): label
"""
if label.lower() in ['sexism', 'racism', 'both']:
return 'abuse'
else:
return 'not-abuse' | 25cd90bdd626e8d14844bc5d44a0de044b04699c | 102,010 |
def GetSubmoduleName(fullname):
"""Determines the leaf submodule name of a full module name.
Args:
fullname: Fully qualified module name, e.g. 'foo.bar.baz'
Returns:
Submodule name, e.g. 'baz'. If the supplied module has no submodule (e.g.,
'stuff'), the returned value will just be that module name ('stuff').
"""
return fullname.rsplit('.', 1)[-1] | 0ee42fa620f0c4727ba5c519d837ecd8f8230627 | 102,011 |
def is_leap(year):
"""Returns true if the given year is a leap year, false otherwise."""
# Every year that is exactly divisible by four is a leap year, except for years that are
# exactly divisible by 100, but these centurial years are leap years, if they are exactly
# divisible by 400.
# @see https://www.cse.unsw.edu.au/~cs1511/17s2/week02/09_leapYear/
if year % 4 == 0:
if year % 100 == 0:
return year % 400 == 0
return True
return False | 5efc4d4a4c27a02dcc057aec16f915009ba45f5b | 102,012 |
def get_seasons(df):
"""
Changes months into season
Parameters
----------
df : dataframe
the forest fire dataframe
Returns
-------
Array
Seasons the months are associated with
"""
seasons = ["NONE"] * len(df)
for x in range(0, len(df)):
m = df.loc[x, "month"]
if m in ["dec", "jan", "feb"]:
seasons[x] = "winter"
elif m in ["mar", "apr", "may"]:
seasons[x] = "spring"
elif m in ["jun", "jul", "aug"]:
seasons[x] = "summer"
elif m in ["sep", "oct", "nov"]:
seasons[x] = "fall"
return seasons | bee62e066ee8439fb04ada471e3ebebe16d27365 | 102,015 |
def pandas_barplot(data, x, hue, y, x_order=None, hue_order=None, horizontal=True, stacked=True, **kwargs):
"""Create a barplot using pandas plot functionality
Mainly allows for stacked barplots
Parameters
----------
data : DataFrame
DataFrame with columns x and y
x : str
x defines the discrete variable that will be plotted on the x-axis.
hue : str
hue defines the variable that will separate variables with the same x value.
y: str
y is the continuous variable defining the height of each bar
x_order: list, optional
order of x axis
hue_order: list, optional
order of colors
horizontal: bool, optional
whether to lay the bar plot out horizontally
stacked: bool, optional
whether to stack barplots
**kwargs
passed on to Pandas' plot function or matplotlib's bar function
Returns
-------
matplotlib.axes.Axes
"""
spread_data = data.pivot(index=x, columns=hue, values=y)
if x_order is not None:
spread_data = spread_data.reindex(index=x_order)
if hue_order is not None:
spread_data = spread_data[hue_order]
if horizontal:
ax = spread_data.plot.barh(stacked=stacked, **kwargs)
else: # vertical
ax = spread_data.plot.bar(stacked=stacked, **kwargs)
return ax | 0201c427867e531d9719ffefc61821db31eed52a | 102,026 |
def has_fusion(args):
"""Returns whether some kind of Fusion reference is given
"""
return args.fusion or args.fusions or \
args.fusion_tag | 08377f211dbd48c404ef0488089a9b2923ee296e | 102,029 |
def concat(ext):
"""Returns a genrule command to concat files with the extension ext."""
return "ls $(SRCS) | grep -E '\.{ext}$$' | xargs cat > $@".format(ext=ext) | 8e7dcb680616a38d3818e4dc4dab8534cab037e8 | 102,034 |
def chunk(seq, count):
"""Splits given sequence to n chunks as evenly as possible.
Args:
seq: Sequence to split
count: Number of chunks
Returns:
List of chunks
"""
avg = len(seq) / float(count)
res = []
i = 0.0
while i < len(seq):
res.append(seq[int(i):int(i + avg)])
i += avg
return res | 35bcd80917a3a82db40d116cc3846c1ab7493194 | 102,035 |
import re
def grep(source, pattern):
"""Run regex PATTERN over each line in SOURCE and return
True if any match found"""
found = False
p = re.compile(pattern)
for line in source:
if p.match(line):
found = True
break
return found | 1f4f7f45f7a2e0dbe9fa52464707ea8ae2a97491 | 102,036 |
def add_path(tdict, path):
"""
Create or extend an argument tree `tdict` from `path`.
:param tdict: a dictionary representing a argument tree
:param path: a path list
:return: a dictionary
Convert a list of items in a 'path' into a nested dict, where the
second to last item becomes the key for the final item. The remaining
items in the path become keys in the nested dict around that final pair
of items.
For example, for input values of:
tdict={}
path = ['assertion', 'subject', 'subject_confirmation',
'method', 'urn:oasis:names:tc:SAML:2.0:cm:bearer']
Returns an output value of:
{'assertion': {'subject': {'subject_confirmation':
{'method': 'urn:oasis:names:tc:SAML:2.0:cm:bearer'}}}}
Another example, this time with a non-empty tdict input:
tdict={'method': 'urn:oasis:names:tc:SAML:2.0:cm:bearer'},
path=['subject_confirmation_data', 'in_response_to', '_012345']
Returns an output value of:
{'subject_confirmation_data': {'in_response_to': '_012345'},
'method': 'urn:oasis:names:tc:SAML:2.0:cm:bearer'}
"""
t = tdict
for step in path[:-2]:
try:
t = t[step]
except KeyError:
t[step] = {}
t = t[step]
t[path[-2]] = path[-1]
return tdict | ea5e596a7e8a3c6c47fa966bfd6f091797c60974 | 102,038 |
from typing import List
import requests
from bs4 import BeautifulSoup
import re
def get_keywords(url: str) -> List[str]:
"""
Returns the keywords of a given url.
Parameters:
url (str): a website url
Returns:
all_keywords_list (List[str]): list with all keywords as strings.
If the url does not have keywords a empty list is returned.
"""
res = requests.get(url, timeout=60.05)
res.raise_for_status()
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')
meta_list = soup.find_all('meta')
meta_keywords_list = []
for meta in meta_list:
meta_name = meta.attrs.get('name', '')
if re.fullmatch(r'keywords?\b', meta_name):
meta_keywords_list.append(meta.attrs.get('content'))
all_keywords_list = []
for meta_keyword in meta_keywords_list:
keywords_list = meta_keyword.split(',')
keywords_list = [key.strip() for key in keywords_list if key]
all_keywords_list.extend(keywords_list)
return all_keywords_list | 4a140d1dec9a19bbd401371dafd3726d49ab0d79 | 102,039 |
def prune_seg_tree(seg_tree, prob_threshold=0):
"""
Args:
seg_tree (list of dicts):
In order of splits as done by SegmentationModel.
E.g. 0-4, then 0-3, then 0-1, then 1-3, then 1-2, then 2-3, then 3-4
Each dict contains data about that segment.
'left': start idx
'right': end idx
'id':
'parent': parent's id
'text':
'score': Currently P(I|S) for that segment
prob_threshold (float): score must be greater than prob_threshold
Returns seg_tree (list of dicts)
all segments that fall below prob_threshold removed (including each segment's subsegments)
"""
pruned = [seg_tree[0]] # must have root
added_ids = set([seg_tree[0]['id']])
for i in range(1, len(seg_tree)):
seg = seg_tree[i]
if seg['score'] > prob_threshold:
if seg['parent'] in added_ids: # parent must have been added (i.e. above threshold)
pruned.append(seg)
added_ids.add(seg['id'])
return pruned | 9e65541ae37fdfa068f03cb00f408ab08ad88583 | 102,040 |
def _tag_block(block, tag):
"""Add the open and close tags to an input block."""
return f'<{tag}\n' + block + f'\n{tag}>\n' | bdd53897ec782b37e150592838ea11c58ae1d793 | 102,048 |
def unfresh_token(user):
"""Return unfresh token from user fixture
:param user: Pytest fixture
:return: Unfresh JWT access token
"""
return user.get_access_token() | 490fd3788b5c4a2110501016469496a494a784f3 | 102,051 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.