content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def check_module( mymodule, indent='' ):
"""
Returns a string describing the version number, import path and
other administrative information contained in a given module.
:Parameters:
mymodule: Python module
The module to be checked. It must already have been imported.
indent: str, optional, default=''
A string used to pad the left side of the report for indentation
:Returns:
moduleinfo: str
A string describing the module.
"""
_strg = "%s%s" % (indent, repr(mymodule))
if hasattr(mymodule, '__name__'):
_strg += "\n%s - name: %s" % (indent, str(mymodule.__name__))
if hasattr(mymodule, '__project__'):
_strg += "\n%s - project: %s" % (indent, str(mymodule.__project__))
if hasattr(mymodule, '__package__'):
_strg += "\n%s - package: %s" % (indent, str(mymodule.__package__))
if hasattr(mymodule, '__author__'):
_strg += "\n%s - author: %s" % (indent, str(mymodule.__author__))
if hasattr(mymodule, '__version__'):
_strg += "\n%s - version: %s" % (indent, str(mymodule.__version__))
if hasattr(mymodule, '__path__'):
_strg += "\n%s - imported from: %s" % \
(indent, str(mymodule.__path__))
if hasattr(mymodule, '__file__'):
_strg += "\n%s - initialised from: %s" % \
(indent, str(mymodule.__file__))
if hasattr(mymodule, '__all__'):
nall = len(mymodule.__all__)
_strg += "\n%s - length of _all_ list: %d" % (indent, nall)
if hasattr(mymodule, '__doc__'):
if mymodule.__doc__ is not None:
_strg += "\n%s - (%d character doc string)" % \
(indent, len(mymodule.__doc__))
else:
_strg += "\n%s - (null doc string)" % indent
else:
_strg += "\n%s - (no doc string)" % indent
return _strg | d86b87d1b031bed13610605c1b82920b28f31c23 | 49,397 |
from jinja2 import Environment, FileSystemLoader
import os
def load_template(template_dir: str, filename: str, message_dict: dict, lang: str) -> str:
"""
This function loads a template file by provided language.
"""
if isinstance(template_dir, str) and os.path.isdir(template_dir):
try:
f = '.'.join([filename, lang])
if os.path.exists(os.path.join(template_dir, f)):
filename = f
template = Environment(loader=FileSystemLoader(template_dir)).get_template(filename)
return template.render(message_dict)
except OSError:
pass
raise RuntimeError("template not found") | b540fc77f77e7a93f7171d8ccca900f8c75d3e3e | 49,398 |
def mul(a,b):
"""cumulative product reduction"""
return a*b | 8fce05af9e0059f4a0aca90325a5dc7cd58b1125 | 49,399 |
def get_bytes_used(current_time, process_durations):
"""
Return bytes used at given time.
>>> get_bytes_used(12, [2, 3, 4])
13
>>> get_bytes_used(14, [2, 3, 4])
14
:type current_time: int
:param current_time: Array index
:type process_durations: list
:param process_durations: List containing process durations to exhaust 1 byte.
:return: bytes_used
:rtype: int
"""
bytes_used = 0
for p_time in process_durations:
if p_time > current_time:
# Since the array is sorted we can break early
# and avoid unnecessary calculations.
break
bytes_used += current_time // p_time
return bytes_used | b87e4bc1f7050904a58ef6124beded518fda3094 | 49,400 |
def to_pascal_case(value: str, ignore_pattern =None) -> str:
"""
Convert camel case string to pascal case
:param value: string
:return: string
"""
content = value.split('_')
if len(content) == 1:
if ignore_pattern and ignore_pattern.match(content[0]):
return content[0]
else:
return content[0].title()
else:
return ''.join(word.title() for word in content[0:] if not word.isspace()) | f92dee394540fb305fb1a6943e26056539487011 | 49,401 |
def call_api(func, n):
"""
A wrapper to call api function. If response is null retry n times.
"""
if n == 0:
return func
else:
n -= 1
return func or call_api(func, n) | 8d61134ed7573eb3df99763a20f81bf081417bd1 | 49,402 |
from typing import List
import heapq
def maxEvents(self, events: List[List[int]]) -> int:
"""
>>> Greedy
Add the events to the min heap and always attend the event that ends
earliest if possible.
"""
events.sort(reverse=True)
heap = []
ans, curDay = 0, 0
while events or heap:
# if heap is empty, this suggest we have no events to attend
# therefore to avoid unnecessary loop, we can directly jump
# into next closest date that has events
if not heap: curDay = events[-1][0]
# add the events to the heap
while events and events[-1][0] == curDay:
heapq.heappush(heap, events.pop()[1])
heapq.heappop(heap)
ans += 1
curDay += 1
# pop expired events
while heap and heap[0] < curDay:
heapq.heappop(heap)
return ans | f6c53abaedbc3fca50d27f9c449c246bb30c7ec9 | 49,403 |
def produto_vetorial(mat_A: list, mat_B: list) -> list:
"""Aplica uma multiplicação matricial entre A e B."""
assert len(mat_A[0]) == len(mat_B)
matriz = []
for linha in range(len(mat_A)):
matriz.append([])
for coluna in range(len(mat_B[0])):
matriz[linha].append(0)
for k in range(len(mat_A[0])):
matriz[linha][coluna] += mat_A[linha][k] * mat_B[k][coluna]
return matriz | 130d58bc1ee96a01feaff9b9c5e1798e4a2f8654 | 49,404 |
def solutionClosed(n: int, p: float) -> float:
"""
A closed-form solution to solutionRecursive's recurrence relation.
Derivation:
Let q = (-2p + 1).
h[0] = 1,
h[n] = q h[n-1] + p.
By iterating,
h[1] = q + p,
h[2] = q (q + p) + p = q^2 + pq + p,
h[3] = q (q^2 + pq + p) + p = q^3 + pq^2 + pq + p,
h[n] = q^n + p(q^(n-1) + q^(n-2) + ... + q^0).
Because the term p(q^(n-1) + q^(n-2) + ... + q^0) is a geometric series,
h[n] = q^n + p(1 - q^n)/(1 - q).
Substituting q = (-2p + 1) and simplifying,
h[n] = ((-2p + 1)^n + 1)/2.
"""
return ((-2*p + 1)**n + 1)/2 | 2f3bac0cd6981989dac4570180a215e20966dc11 | 49,407 |
def has_lower_letters(password):
"""Return True if password has at least one lower letter."""
return any(char.islower() for char in password) | d55a37e994e289886efdce6e815a430777572b97 | 49,410 |
import yaml
def load(file: str = "lang/en_US.yaml") -> dict:
"""Loads the language file and returns it"""
global lang
with open(file, encoding="utf-8") as f:
lang = yaml.safe_load(f)
return lang | a34cb9d9561b2edf1597f542a07d50b683a8fbff | 49,411 |
async def watches(ctx, msg):
""" * desc: list your stattrak watches
"""
uid, _ = await ctx.db.get_user_id(msg.guild.id, msg.author.id)
watches = await ctx.db.get_user_watches(uid)
msg = ""
for _, _, name, _, _, _, count, _, _ in watches:
msg += "\n * `{}`: {:d}".format(name, count)
return {'reply': msg} | 5dbf78287d0f5b759714307c0f3ddcb4b2cd2edb | 49,413 |
import re
def GetRegexPattern(reobj):
"""
* Get the regex pattern string from
the regular expression.
Inputs:
* reobj: Regular expression object.
"""
if not isinstance(reobj, type(re.compile(''))):
raise Exception('reobj must be a regular expression object.')
objstr = str(reobj).strip("re.compile()'")
return objstr | 36b81d20c2dd8f32404c4aee21bd4583f095003f | 49,415 |
def extract_property_from_uri(uri: str) -> str:
"""
从property uri中提取出property name
:param uri: 如 <http://www.kg.com/kg/ontoligies/ifa#createTime>
:return: 如 'createTime'
"""
separator_idx = uri.rfind('#')
if separator_idx == -1:
raise ValueError
return uri[separator_idx + 1:] | 6c0902a954a247c7c843f15be6e3d98bf3a3721f | 49,416 |
def truncated_mean(data):
"""Compute the mean and excluding outliers.
Args:
data (DataFrame): Pandas dataframe.
Returns:
int: Mean of the column.
"""
top_val = data.quantile(0.9)
bot_val = data.quantile(0.1)
trunc_val = data[(data <= top_val) & (data >= bot_val)]
mean = trunc_val.mean()
return (mean) | c8fa4f57f22c5255467ce1022abdf8c0f2614e2f | 49,418 |
def is_supported_value_type(value):
"""
checks if the given value type is supported.
Supported Types:
- strings
- bytes
- numbers
- tuples
- lists
- dicts
- sets
- booleans
- None
"""
if (
isinstance(value, (str, bytes, int, float, bool)) or
value is None
):
return True
if isinstance(value, tuple):
for sub_value in value:
if not is_supported_value_type(sub_value):
return False
return True
if isinstance(value, list):
for sub_value in value:
if not is_supported_value_type(sub_value):
return False
return True
if isinstance(value, dict):
for sub_key, sub_value in value.items():
if not is_supported_value_type(sub_key):
return False
if not is_supported_value_type(sub_value):
return False
return True
if isinstance(value, set):
for sub_value in value:
if not is_supported_value_type(sub_value):
return False
return True
return False | fa9d8ae96dcde739e73d8108cfeefef8bd88451b | 49,419 |
import six
def _is_possible_token(token, token_length=6):
"""Determines if given value is acceptable as a token. Used when validating
tokens.
Currently allows only numeric tokens no longer than 6 chars.
:param token: token value to be checked
:type token: int or str
:param token_length: allowed length of token
:type token_length: int
:return: True if can be a candidate for token, False otherwise
:rtype: bool
>>> _is_possible_token(123456)
True
>>> _is_possible_token(b'123456')
True
>>> _is_possible_token(b'abcdef')
False
>>> _is_possible_token(b'12345678')
False
"""
if not isinstance(token, bytes):
token = six.b(str(token))
return token.isdigit() and len(token) <= token_length | 186447c08cb64178e1f99fabc5523f2a92762ca5 | 49,420 |
def add_releaseyear(dataframe):
"""extract release year from release date"""
dataframe['release_year'] = dataframe['release_date'].apply(lambda x: str(x)[:4])
return dataframe | b679361ec35d99611ca64207afd24b542868f179 | 49,422 |
def interpolate_grids(data2, interpolation_scheme):
""" Interpolate all variables in data to a different grid.
Possible interpolations are:
T -> U; T -> V; U -> V; V -> U.
Parameter
---------
data : xarray.Dataset
Dataset containing 4D ('time_counter','x','y','z') and
3D ('time_counter','x','y','z') variables.
interpolation_scheme: strings
Valied values are: t2u, t2v, u2v, v2u
Returns
-------
xarray.Dataset
"""
data = data2.copy()
if interpolation_scheme not in ["t2u", "t2v", "u2v", "v2u"]:
raise ValueError('Interpolation scheme is unknown. Valid values are:\
t2u, t2v, u2v, v2u')
if interpolation_scheme == "t2v":
for v in data.data_vars:
data[v] = (data[v] + data[v].shift(y=-1)) * 0.5
if interpolation_scheme == "t2u":
for v in data.data_vars:
data[v] = (data[v] + data[v].shift(x=-1)) * 0.5
if interpolation_scheme == "u2v":
for v in data.data_vars:
data[v] = (data[v] + data[v].shift(x=1) +
data[v].shift(y=-1) +
data[v].shift(x=1, y=-1)) * 0.25
if interpolation_scheme == "v2u":
for v in data.data_vars:
data[v] = (data[v] + data[v].shift(x=-1) +
data[v].shift(y=1) +
data[v].shift(x=-1, y=1)) * 0.25
return data | 11d340e701a74fcd74d898501a57a192aca3ccb4 | 49,423 |
import unicodedata
def maketrans_remove(accents=("COMBINING ACUTE ACCENT", "COMBINING GRAVE ACCENT")):
""" Makes a translation for removing accents from a string. """
return str.maketrans("", "", "".join([unicodedata.lookup(a) for a in accents])) | 516114526f6d7d36b2b454cd07e40302bd7a83f7 | 49,424 |
def config(request):
"""
fixture to add configurations on setup by received parameters.
The parameters expected in request are the avaiable additional configurations.
e.g. : test_techsupport[acl]
"""
return request.getfixturevalue(request.param) | 30cbfc54ef80739d97b5856f6dfe8f700dc5ca0e | 49,425 |
import struct
def unpackbyte(b):
"""
Given a one-byte long byte string, returns an integer. Equivalent
to struct.unpack("B", b)
"""
(ret,) = struct.unpack("B", b)
return ret | 8d4a79bda22554604637e1ca934a85b3a6f71cdb | 49,426 |
import math
def slurm_format_memory(n):
"""Format memory in bytes for use with slurm."""
if n >= 10 * (1024 ** 3):
return "%dG" % math.ceil(n / (1024 ** 3))
if n >= 10 * (1024 ** 2):
return "%dM" % math.ceil(n / (1024 ** 2))
if n >= 10 * 1024:
return "%dK" % math.ceil(n / 1024)
return "1K" | 7a454fee5754503ad30d3b0cb8e8261adac7d19f | 49,427 |
def clean(phrase, valid):
"""Remove all characters from `phrase` that are not in `valid`.
:param phrase: the phrase to clean
:param valid: the set of valid characters. A sensible default is
`sounds.ALL_TOKENS`.
"""
return ''.join([L for L in phrase if L in valid]) | 09f01b02b68cb6f63a54102b493e876d9dc1008f | 49,429 |
import time
from datetime import datetime
def base_record() -> dict:
"""
Return a basic record with the audit flags we use in all records.
Args:
None
Returns:
(dict): dict with audit fields populated.
"""
return {"time": time.time(), "time_str": datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")} | 49a16ac37947ccff482a1400c054f87abfb2553b | 49,431 |
from typing import List
def reverse(data: List[int]) -> List[int]:
"""Given a list of integers, returns the same list in reverse
order.
Initialize empty List Reversed
While there are Numbers in Data:
Remove last Number in Data
Append Number to Reversed
Return Reversed
"""
_reversed = []
while data:
current = data.pop()
_reversed.append(current)
return _reversed | 595a6815c1823141875d7cac7e5cdb5a2bb8b934 | 49,432 |
def _get_cbend_msg(is_stress, is_mag_phase, is_sort1):
"""get the header for the CBEND result"""
if is_stress:
stress_strain = ' S T R E S S E S I N B E N D E L E M E N T S ( C B E N D )'
else:
stress_strain = ' S T R A I N S I N B E N D E L E M E N T S ( C B E N D )'
assert is_sort1 is True
sort1 = ' ELEMENT-ID GRID END ANG. SXC SXD SXE SXF S-MAX S-MIN M.S.-T M.S.-C\n'
msg = [
stress_strain,
' CIRC.',
sort1,
]
#'0 6901 6901 A 0 4.372282E-16 -5.960465E-15 0.0 0.0 4.372282E-16 -5.960465E-15 '
#' 6902 B 0 -6.533992E-15 5.000001E-07 -5.000000E-13 -5.000000E-13 5.000001E-07 -5.000000E-13 -6.0E-01 6.0E+05'
#if is_sort1:
#msg.append(' ELEMENT-ID GRID END ANG. C D E F\n')
#else:
#msg.append(' FREQUENCY GRID END ANG. C D E F\n')
return msg | e0778a09369091cc6c8f0d408f34d097d84ee3ec | 49,433 |
def lowercase_first(value: str) -> str:
"""
Lower cases the first character of the value.
Parameters
----------
value: :class:`str`
The value to lower case.
Returns
-------
:class:`str`
The lower cased string.
"""
return value[0:1].lower() + value[1:] | df1ed3d9b3adf9f410cc708fc8505d2791c60187 | 49,434 |
def get_non_hidden_fields(form):
"""
Returns all the visible fields of the form.
"""
return form.visible_fields() | a7003b12f1ca89414db299114a06bd819b3f4932 | 49,435 |
import itertools
def correct_answer_combinations(correct_answer):
"""
Determine all possible orderings for the "correct answers" to a multi choice question.
Args:
correct_answer: An array who elements comprised the correct answer to the question.
"""
combos = itertools.permutations(correct_answer)
all_combos = [list(combo) for combo in combos]
return all_combos | b597c149f142aa69c8c89285e187e64457793db5 | 49,436 |
def is_namedtuple_cls(cls):
"""Test if an object is a namedtuple or a torch.return_types.* quasi-namedtuple"""
try:
if issubclass(cls, tuple):
bases = getattr(cls, "__bases__", []) or [None]
module = getattr(cls, "__module__", None)
return module == "torch.return_types" or (
bases[0] is tuple and hasattr(cls, "_make") and hasattr(cls, "_fields")
)
except TypeError:
pass
return False | 73698f8836b902405241df045aa01800173414c8 | 49,437 |
def sort_rat(name_list: list, ratings: list, reverse: bool = True):
"""
Sort ratings associated with names. The sort is stable.
:param name_list: The list of names to be associated with the ratings respectively
:param ratings: The list of ratings to sort
:param reverse: Descending by default. False means ascending.
:return: The sorted list of names and their ratings and rankings
"""
if len(name_list) != len(ratings):
raise ValueError("# of names %d does not equal to # of ratings %d" % (len(name_list), len(ratings)))
result = list()
for i, name in enumerate(name_list):
result.append((name, ratings[i]))
def compare(name_rat):
return name_rat[1]
result.sort(key=compare, reverse=reverse)
return [(r[0], r[1], i + 1) for i, r in enumerate(result)] | cfb944d067c41f45c4e558fdeb55905de0ec01b4 | 49,438 |
def sigmoid(x):
"""
Sigmoid(x) = 1 / (1+e^-x)
"""
return 1 / (1 + (-x).exp()) | 1d60e8061e239b4de471ae4a38eb337365f498b5 | 49,441 |
import subprocess
import os
def GSL_library_dir():
"""return directory for location of GSL binaries, useful when combining GSL and cython"""
try:
lib_gsl_dir = subprocess.check_output('gsl-config --libs', shell=True).decode('utf-8').split()[0][2:]
except subprocess.CalledProcessError:
lib_gsl_dir = os.getenv('LIB_GSL')
if lib_gsl_dir is None:
# Environmental variable LIB_GSL not set, use hardcoded path.
lib_gsl_dir = r"c:\Program Files\GnuWin32\lib"
else:
lib_gsl_dir += "/lib"
return lib_gsl_dir | 461fd615bea5bfc09547d9281da03be3ae9161e6 | 49,442 |
def to_the_power(number, power):
"""
Raises the
:param number:
:param power:
:return: result
"""
return number ** power | 71b9a84d613372c346e4447c5d98dcd6ff3d1a34 | 49,444 |
def duplicate_free_list_polynomials(list_of_polys):
"""
Takes a list of boolean functions and generates a duplicate free list of polynomials.
# Arguments
list_of_polys (BooleanFunction): A list of polynomials.
# Returns
list: A duplicate free list of functions
"""
outlist = []
for poly in list_of_polys:
if True not in [poly == poly_in_out for poly_in_out in outlist]:
outlist.append(poly)
return outlist | b5099bf832ff5f23d1f0a4ea7cd74fbba51fbc75 | 49,446 |
def copy_df(df):
"""
DESCRIPTION
-----------
Deep copy a pandas dataframe.
PARAMETERS
----------
df : pd.DataFrame
A pandas dataframe instance
RETURNS
-------
A deep copy of a given pandas dataframe
MODIFICATIONS
-------------
Created : 4/26/19
"""
return df.copy(deep=True) | a130830820a9aef0c2419580a2493bb1f14111df | 49,447 |
def strip_locals(data):
"""Returns a dictionary with all keys that begin with local_ removed.
If data is a dictionary, recurses through cleaning the keys of that as well.
If data is a list, any dictionaries it contains are cleaned. Any lists it
contains are recursively handled in the same way.
"""
if isinstance(data, dict):
return {key: strip_locals(value) for key, value in data.items()
if not (isinstance(key, str) and key.startswith('local_'))}
elif isinstance(data, list):
data = [strip_locals(item) for item in data]
return data | 92fb36a2f3bb618b7b706dd33a3347666d0fa07e | 49,448 |
import re
def get_error_log_compiled_pattern():
"""Creates the compiled pattern for error log file
:return: compiled pattern
:rtype: compiled pattern in re library
Example of valid pattern:
'the request e90f9480d500cad488650afb3a73c854 blocked '
'rule ID 1567'
"""
return re.compile(
r'the request (?P<id>[a-zA-Z0-9]+) .+'
r'rule ID (?P<rule_id>\d+)'
) | f2f53d9f8c540f72bb3fd8747e858cb0a7ef2774 | 49,449 |
def budget_equalities(sequence:str, price_vars:list, budget_vars:list)->list:
"""
Given a picking-sequence, create symbolic equalities determining that the
total price of each agent's bundle equals the agent's budget.
Currently works only for 2 agents.
:param sequence: a string determining the picking-sequence, e.g. "ABA".
:param price_vars: a list of symbols, e.g. symbols('p3,p2,p1')
:param budgets: a list of symbols, e.g. symbols('a,b')
:return: a list of constraints, as expressions that should be equal to 0.
>>> budgets = symbols("a,b")
>>> price_vars = symbols('p5,p4,p3,p2,p1')
>>> budget_equalities("ABABA", price_vars, budgets)
[a - p1 - p3 - p5, b - p2 - p4]
>>> budget_equalities("AABBB", price_vars, budgets)
[a - p4 - p5, b - p1 - p2 - p3]
"""
num_agents = len(budget_vars)
if num_agents!=2:
raise ValueError("Currently only 2 agents are supported")
price_equality_a = budget_vars[0]
price_equality_b = budget_vars[1]
for i in range(len(sequence)):
picker = sequence[i]
if picker=="A":
price_equality_a -= price_vars[i]
elif picker=="B":
price_equality_b -= price_vars[i]
else:
raise ValueError("Found picker {} but only two agents are supported".format(picker))
return [price_equality_a,price_equality_b] | ead48548766afc53e7583b65d88bfc8a10906382 | 49,450 |
def get_rAB_2(new_solution,rAB_p1,rAB_c1):
"""
Gets the rAB_p2 (and rAB_c2) values of all species in a given solution.
Parameters
----------
new_solution: The object representing the cantera model
rAB_p1: A dictionary containing the rAB_p1 values for the reduction
rAB_c1: A dictionary containing the rAB_c1 values for the reduction
Returns
-------
rAB_p2 and rAB_c2 dictionaries.
"""
rAB_p2 = {} # Set up dictionaries
rAB_c2 = {}
s_names = new_solution.species_names
for species_a in s_names: # For all pairs of species
for species_b in s_names:
if species_a != species_b:
full_name = species_a + "_" + species_b # Set up
rAB_p2[full_name] = 0
rAB_c2[full_name] = 0
# Look through all possible middle step species
for species_m in s_names:
if (species_m != species_a and species_m != species_b):
am_name = species_a + "_" + species_m
mb_name = species_m + "_" + species_b
# Get what to add for species_m
add_p = rAB_p1[am_name] * rAB_p1[mb_name]
add_c = rAB_c1[am_name] * rAB_c1[mb_name]
# Add that value
rAB_p2[full_name] += add_p
rAB_c2[full_name] += add_c
return rAB_p2,rAB_c2 | 0c2e9f9bd20c7f3c62e6e4b997771ab339c79c3d | 49,451 |
def newtonMethod(f, df, x0, tol=0.001):
"""Calculate the root of f within tolerance tol"""
xi = x0
while abs(f(xi)) > tol:
xi = xi - (float(f(xi))/df(xi))
return xi | 3abc05427a91bcf703eecf071d4a248d387f9a9f | 49,452 |
def get_recipes_in_node(node):
"""Gets the name of all recipes present in the run_list of a node"""
recipes = []
for elem in node.get('run_list'):
if elem.startswith("recipe"):
recipe = elem.split('[')[1].split(']')[0]
recipes.append(recipe)
return recipes | 9d2cc0aac4b819428a5984e3852035650afeedf4 | 49,453 |
import math
def fwhm_to_gamma(fwhm, beta):
""" in arcsec """
return fwhm / 2. / math.sqrt(2**(1./beta)-1) | 4e3deb88c6f21b04e0fd16ed352cce825c10c9a3 | 49,455 |
from pathlib import Path
from typing import Tuple
from datetime import datetime
def parse_dates_from_filename(file: Path) -> Tuple[datetime, datetime]:
"""
Extracts the start and end time from an existing file.
:param file:
:return: tuple of start and end time
"""
split_names = file.stem.split("__")
if len(split_names) < 4:
raise ValueError(f"Cannot extract datetimes from file name: {file.name}")
start_time = split_names[2]
end_time = split_names[3].replace("_temp", "")
start_time = datetime.strptime(start_time, "%Y_%m_%dT%H_%M_%SZ")
end_time = datetime.strptime(end_time, "%Y_%m_%dT%H_%M_%SZ")
return start_time, end_time | a03212ed2df9826cc631cda9e63157d6675f82a8 | 49,456 |
def weekend_init(M, i, t):
"""
Determines days to treat as weekend using midnight threshold parameters
:param M: Model
:param i: period
:param t: tour type
:return: list of ints; either [1, 7] or [6, 7]
"""
result = []
lens = [M.lengths[k] for k in M.tt_length_x[t]]
max_len = max(lens)
if i + max_len - 1 >= M.midnight_thresh[t]:
result.append(6)
else:
result.append(1)
result.append(7)
return result | 59e049e4d22f03e042aafa1799fe9f4e199babb4 | 49,457 |
import json
def canonical_json(o):
"""
Dumps an object as canonical JSON string.
Canonical JSON does not contain an space (except in strings) and
have all the keys sorted.
Args:
o: The object to dump.
Return:
The canonical JSON string.
"""
return json.dumps(o, sort_keys=True) | 74a6e880987cc1aaccd19a3d68f63ecf0d9835d3 | 49,458 |
def grep(pattern, flags, files):
"""
Search file(s) for lines matching a regular expression pattern
and return the line number and contents of each matching line.
:param pattern string - pattern used to match lines in a file.
:param flags string - Zero or more flags to customize the matching behavior.
:param files list - One or more files in which to search for matching lines.
:rturn string - line number and contents of each matching line
"""
output = ""
if flags.count("-i") > 0:
pattern = pattern.lower()
for file in files:
with open(file) as f:
line_number = 1
for line in f:
# regex?
# Try to match (based on flags) the pattern in the line of a file.
matched = False
if flags.count("-x") > 0:
if flags.count("-i") > 0 and line.lower() == pattern + "\n":
matched = True
elif line == pattern + "\n":
matched = True
elif flags.count("-i") > 0 and line.lower().count(pattern) > 0:
matched = True
elif line.count(pattern) > 0:
matched = True
# Invert matched and not matched (when -v is present)
if flags.count("-v") > 0:
if matched:
matched = False
else:
matched = True
# If it matches, append to output
if matched:
# Output just the file name, and move on
if flags.count("-l") > 0:
output += file + "\n"
break
if len(files) > 1:
output += file + ":"
if flags.count("-n") > 0:
output += str(line_number) + ":"
output += line
line_number += 1
return output | 45abd78939f2fe6bd68e8dbe2f59c4b3b93986d0 | 49,459 |
from bs4 import BeautifulSoup
def _parse_winrates(html):
"""
<td><strong>TvZ</strong>:</td>
<td>148-139</td>
<td>(51.6%)</td>
<td>[ <a href="/tlpd/korean/maps/237_Fighting_Spirit/games/TvZ">Games</a> ]</td>
<td><strong>ZvP</strong>:</td>
<td>134-117</td>
<td>(53.4%)</td>
<td>[ <a href="/tlpd/korean/maps/237_Fighting_Spirit/games/ZvP">Games</a> ]</td>
<td><strong>PvT</strong>:</td>
<td>143-137</td>
<td>(51.1%)</td>
<td>[ <a href="/tlpd/korean/maps/237_Fighting_Spirit/games/TvP">Games</a> ]</td>
"""
soup = BeautifulSoup(html, 'html.parser')
rows = [row.contents[0] for row in soup.select(".roundcont table td")]
tvz_games, tvz_wr = rows[1], rows[2]
zvp_games, zvp_wr = rows[5], rows[6]
pvt_games, pvt_wr = rows[9], rows[10]
return {'TvZ': "{} {}".format(tvz_games, tvz_wr),
'ZvP': "{} {}".format(zvp_games, zvp_wr),
'PvT': "{} {}".format(pvt_games, pvt_wr),
'summary': "TvZ: {} {}\n"
"ZvP: {} {}\n"
"PvT: {} {}".format(tvz_games, tvz_wr,
zvp_games, zvp_wr,
pvt_games, pvt_wr)} | 148142d5da9cbedf3eeb7282f7972f21f10b926f | 49,460 |
def function_with_docstring(a: int, b: int) -> int:
"""This function has a docstring.
Because of that, it shouldn't be generated.
"""
return a ** b | 9e6e1a6ebf4ea85b368d8ec53efb1747bdaa48c3 | 49,462 |
def _set_new_pose(pose_np, sign, axis):
"""set new pose with axis convention."""
target_sign = [-1, 1, -1]
target_axis = ['x', 'z', 'y']
pose_rearrange_axis_result = pose_np.copy()
for axis_index, axis_name in enumerate(target_axis):
src_axis_index = axis.index(axis_name)
pose_rearrange_axis_result[..., axis_index] = \
pose_np[..., src_axis_index]
for dim_index in range(pose_rearrange_axis_result.shape[-1]):
pose_rearrange_axis_result[
..., dim_index] = sign[dim_index] / target_sign[
dim_index] * pose_rearrange_axis_result[..., dim_index]
return pose_rearrange_axis_result | 5dfeac95bf97a88397e86e5ebee2ba7cae518961 | 49,463 |
def read_summary_file(summary_file):
"""
Read in partial summary and add gene info to dictionary
:param summary_file: (string) name of partial summary file
"""
genes = {}
case_qual = []
ctrl_qual = []
with open(summary_file, 'r') as infile:
header = infile.readline().strip().split(',')
gene_index = header.index('Gene Name')
qcase_index = header.index('Qualified Case')
uqcase_index = header.index('Unqualified Case')
qctrl_index = header.index('Qualified Ctrl')
uqctrl_index = header.index('Unqualified Ctrl')
# read first line to determine number of cases and ctrls
first = infile.readline().strip().split(',')
genes[first[gene_index]] = [x for x in first[2:]]
ncase = int(first[qcase_index]) + int(first[uqcase_index])
nctrl = int(first[qctrl_index]) + int(first[uqctrl_index])
case_qual.append(int(first[qcase_index]))
ctrl_qual.append(int(first[qctrl_index]))
for line in infile:
line = line.strip().split(',')
genes[line[gene_index]] = [x for x in line[2:]]
case_qual.append(int(line[qcase_index]))
ctrl_qual.append(int(line[qctrl_index]))
return (genes, ncase, nctrl, case_qual, ctrl_qual) | 06d5dfacbef00b4ec3feed6b8ce6559aa7046604 | 49,464 |
def _count_trailing_zeros(mask: int) -> int:
"""count the trailing zeros of a bit mask. Used for shifting.
Args:
mask (int): bit mask, eg 0b00111000
Returns:
int: number of trailing zeros
"""
if mask == 0:
raise ValueError("mask is all zeros")
count = 0
for i in range(mask.bit_length()):
if mask & 1:
return count
count += 1
mask >>= 1
return count | 3f8ce2c9a5a5143715e2fe52aabdeed3ae692f17 | 49,465 |
import os
def get_dirs_for_features(root, features, dirs):
"""
Find which directories to include in the list of build dirs based upon the
enabled port(s) and features.
"""
outdirs = dirs
for adir in dirs:
for feature in features:
relpath = os.path.join(adir, feature)
featuredir = os.path.join(root, relpath)
if os.path.exists(featuredir) and not relpath in outdirs:
outdirs.append(relpath)
return outdirs | 849bacca3822bd67889e199ff4cbbcebd61b958f | 49,466 |
def subsetData(data):
"""
function to subset data based on inputs
"""
### processes
return data | 4c2440d7b68cbc5ed9496caf366d5e329a6803f2 | 49,467 |
import os
def ensure_file(file):
"""Ensure a single file exists, returns the absolute path of the file if True or raises FileNotFoundError if not"""
# tilde expansion
file_path = os.path.normpath(os.path.expanduser(file))
if not os.path.isfile(file_path):
raise FileNotFoundError(f"The file {file_path} doesn't exist")
return file_path | 9ebc6910dfba0d9c5232781afa1bcd3cfe54d2f2 | 49,469 |
import logging
def prepare_data(df):
""" it is used to prepare dependent and independent features seprately from dataframe
Args:
df (pd.DataFrame): This method takes dataframe as input
Returns:
[tuple]: This method returns tuples
"""
logging.info("preparing the data for training")
X = df.drop("y", axis=1)
y = df["y"]
return X, y | 067c81d837018e2bb9c9b4e271611ace9dede1e7 | 49,470 |
def imm_to_bin(immediate):
"""
"""
return'{:#018b}'.format(int(immediate)) | 139323403d17551ef12fddc3943475b60d61ca70 | 49,472 |
def relative_x(xarr, yarr, *args):
"""First value in xarr is subtracted from each x
"""
return (xarr - xarr[0], yarr) + args | 8c8310b41b6de8a3d9a1846bda2e23a3d67474c8 | 49,473 |
def update_output_div(input_value):
"""Returning user input.
Args:
input_value: the input of dash.
Returns:
string of the output in html.
"""
return f"{input_value}" | 8c1cd2effd421c419c31b31f589d2d71ecb935ed | 49,474 |
def adjust_site_parameters(site):
"""Add the PV modeling parameters"""
out = site.copy()
modeling_params = {
'ac_capacity': 0.00324, # no clipping
'dc_capacity': 0.00324,
'temperature_coefficient': -0.420,
'dc_loss_factor': 0,
'ac_loss_factor': 0,
'surface_tilt': 35,
'surface_azimuth': 180,
'tracking_type': 'fixed'}
out['modeling_parameters'] = modeling_params
out['extra_parameters']['module'] = 'Suniva 270W'
return out | aa5713f201138a03371b21c63594ad72d47c008c | 49,475 |
import random
def purchase():
"""Return a random amount in cents """
return random.randrange(1000, 90000) | f09b626bc7c73e7281f762258bc795d25acd7b45 | 49,477 |
def get_state(person):
"""Feed a person dict return the state only"""
return person['state'] | b670382b80ccd5f14d6ff2b6cd98ef28330f3c76 | 49,478 |
def kwargsGet(kwargs, key, replacement):
"""As kwargs.get but uses replacement if the value is None."""
if key not in kwargs:
return replacement
elif key in kwargs and kwargs[key] is None:
return replacement
else:
return kwargs[key] | bcb5da19757f685d6ba5b7669c474e5cbc9a13f0 | 49,479 |
def countDigits(number: int):
"""this function is used for counting digits.
Args:
number (int): any number!
Returns:
int: number of digits of your inputted number.
"""
return len(str(number)) | 46d8772ea74694a011fb7651b0e1e7d82b7b96fa | 49,480 |
def calc_nbases(DNA):
"""This command takes a seq and calculates its nbases."""
DNA = DNA.upper()
for i in DNA:
if i not in 'AGCTN':
return 'Invalid Seq'
return DNA.count('N') | a0ca9c05048734fe1735ff9e6ad5f04e2e68ff49 | 49,481 |
def set_creo_version(client, version):
"""Set the version of Creo you are running.
This function only needs to be called once per creoson session.
This function must be called if you are doing certain functions
in Creo 7 due to deprecated config options.
Needed for functions:
familytable_replace
file_assemble
file_regenerate
feature_delete
feature_resume
feature_suppress
Args:
client (obj):
creopyson Client.
version (int):
Creo version.
Returns:
None.
"""
data = {"version": int(version)}
return client._creoson_post("creo", "set_creo_version", data) | ddedcbb614f9fd545538804ba306b375f26bb1c8 | 49,483 |
def get_define_item(define_module, name):
"""根据字段名取得定义项"""
for define_item in define_module.define:
if define_item[0] == name:
return define_item
return None | 9d17e139e7a79cb62c4d358d8e4cd5800916a8d6 | 49,484 |
def rank_items(items):
""" Get a rank for each item that is computed by price/weight """
for item in items:
item['rank'] = (item['price'] * 1.0) / (item['weight'] * 1.0) # I use 1.0 to get floats
return items | 2237664c7822afc76ddb6b32923e5fd98630b885 | 49,485 |
from typing import List
from typing import Dict
def get_entities_fields(entity_title: str, entities: List[str]) -> List[Dict]:
"""
Builds an entity from given entity title and entities list
Args:
entity_title (str): Title of the entity.
entities (List[str]): List of the entities.
Returns:
(List[Dict]): List of dict containing the entity. List is needed because it is the expected format by Slack API.
"""
return [{
"title": f'{entity_title}',
"value": '\n'.join(entities),
"short": False
}] | 201702971b3bb58285f3adf10e492a6b83f62d27 | 49,486 |
def ZhangJohnson(tokens, index, history):
"""
These features were taken from Zhang and Johnson (2003). They correspond
to the combination B+D+E+F in their paper.
Feature 'C' was not included, since
CRFsuite cannot handle 2nd order CRFS (which can use the i-1 and (i-2)th
labels) or features of the form label_(i-1) & tok_i.
Feature 'G' (part-of-speech tags), feature 'H' (chunking tags) and
features 'I' & 'J' (dictionaries and gazetteers specific to the CONLL 2003
shared task) are not included.
References
----------
- Turian J, Rativ L, Bengio Y. Word representations: a simple and general
method for semi-supervised learning. Proceedings of the 48th annual meeting
of the association for computational linguistics. Association for
Computational Linguistics, 2010.
- Zhang, T. and Johnson D. A robust risk minimization based named entity
recognition system. Proceedings of the seventh conference on natural
language learning at HLT-NAACL 2003-Volume 4, Association for Computational
Linguistics, 2003.
"""
start_pad2 = ('[START2]', '[START2]','[START2]')
start_pad1 = ('[START1]', '[START1]','[START1]')
end_pad2 = ('[END2]', '[END2]','[END2]')
end_pad1 = ('[END1]', '[END1]','[END1]')
tokens = [start_pad2, start_pad1] + list(tokens) + [end_pad1, end_pad2]
history = ['[START2]', '[START1]'] + history
index += 2 # Shift index to accommodate padding in front of list.
# Feature set 'B': Tokens in a window of 2
word = tokens[index][0]
prevword = tokens[index - 1][0]
prev2word = tokens[index - 2][0]
nextword = tokens[index + 1][0]
next2word = tokens[index + 2][0]
# Feature set 'D': Initial capitalization of tokens in a window of 2
capitalized = word[0] == word.capitalize()[0]
prevcapitalized = prevword[0] == prevword.capitalize()[0]
prev2capitalized = prev2word[0] == prev2word.capitalize()[0]
nextcapitalized = nextword[0] == nextword.capitalize()[0]
next2capitalized = next2word[0] == next2word.capitalize()[0]
# Feature set 'E': All capitalization, all digitals, or digitals containing
# punctuation (for center word only).
allcaps = word.isupper() #word == word.upper()
all_digits = word.isdigit()
all_letters = word.isalpha()
# NOTE: Zhang and Johnson use say "digitals containing punctuations"; we
# interpret this in the same way as Turian, Ratinov and Bengio (2010)
# to mean "all non-letters"
# Feature set 'F': Token prefix (lengths 3 & 4), and suffix (1 - 4)
prefix3 = word[:3]
prefix4 = word[:4]
suffix1 = word[-1:]
suffix2 = word[-2:]
suffix3 = word[-3:]
suffix4 = word[-4:]
features = {
'word': word,
'prevword': prevword,
'prev2word': prev2word,
'nextword': nextword,
'next2word': next2word,
'capitalized': capitalized,
'prevcapitalized': prevcapitalized,
'prev2capitalized': prev2capitalized,
'nextcapitalized': nextcapitalized,
'next2capitalized': next2capitalized,
'allcaps': allcaps,
'all_digits': all_digits,
'all_letters': all_letters,
'prefix3': prefix3,
'prefix4': prefix4,
'suffix1': suffix1,
'suffix2': suffix2,
'suffix3': suffix3,
'suffix4': suffix4,
}
return features | f63f22692a8198bba05091b8fdd62cd22f714159 | 49,488 |
import random
def get_random_colour():
"""
Returns a random colour from the entire colour spectrum
:return
colour: a tuple that represents an RGB value
"""
colour = [0, 0, 0]
for i in range(0, 2):
colour[i] = random.randint(0, 255)
return tuple(colour) | e1c8c6f4e721e141dc2bfd1a854fb8d6f9799091 | 49,489 |
import json
import requests
def download_rail_route(rr_file, verbose=False):
"""
Downloads rail route data from railrouter.sg repo.
Source: https://github.com/cheeaun/railrouter-sg
params
------
rr_file (str): filename in the data directory in the repo
verbose (bool): print downloaded route and pattern info
returns
-------
rr_data (dict): the core component from the raw data
patterns (list): a list of patterns
"""
url = 'https://raw.githubusercontent.com/' \
'cheeaun/railrouter-sg/master/data/v2/'
rr_data = json.loads(requests.get(url + rr_file).text)['routes'][0]
patterns = rr_data['patterns']
if verbose:
print("[Downloaded]")
print("Route:", rr_data['name'])
print("Patterns:")
print('\n'.join(
[(str(i) + ': ' + p['name']) for i, p in enumerate(patterns)]))
return rr_data, patterns | 93d5d16a5b7ba1afb11fc26c3d8a9df4a301dafa | 49,490 |
import os
import sys
def load_ref(obj_reference, paths=None, base_path=None):
"""Loads an object from a python module.
Parameters
----------
obj_reference: str
Reference to a python symbol, for instance 'pkg.mdl' to
load the python module ``pkg.mdl``, or 'pkg.mdl:symbol'
to load ``symbol`` from the python module ``pkg.mdl``.
A path can be provided, for instance: '/a/b/pkg.mdl:symbol'.
paths: str or list
Search path to be added to ``sys.path``.
base_path: str
Base path for relative paths.
Returns
-------
object
The loaded object.
Raises
------
ImportError
If the object cannot be loaded.
"""
if paths is None:
paths = []
else:
paths = list(paths)
tokens = obj_reference.rsplit("/", 1)
if len(tokens) == 1:
obj_reference = tokens[0]
else:
pth, obj_reference = tokens
paths.append(pth)
abs_paths = []
for pth in paths:
if not os.path.isabs(pth):
if base_path is None:
pth = os.path.abspath(pth)
else:
pth = os.path.normpath(os.path.join(base_path, pth))
abs_paths.append(pth)
tokens = obj_reference.split(":", 1)
module_name = tokens[0]
fromlist = module_name.split(".")[:-1]
old_sys_path = sys.path
if abs_paths:
sys.path = abs_paths[:]
sys.path.extend(old_sys_path)
try:
module = __import__(module_name, fromlist=fromlist)
finally:
sys.path = old_sys_path
if len(tokens) == 1:
obj = module
else:
symbol = tokens[1]
obj = getattr(module, symbol)
return obj | ef599904af0568b590471d01ba47c207d1cbfdb3 | 49,491 |
def insert_pattern(target_node_names, new_nodes, graph):
"""Replace the specific pattern matched from the new constructed graph with new pattern.
Args:
target_node_names: A string list ccontains the names of nodes that will be replaced
new_nodes: a list contains nodes with Operator class
graph: The Graph class
Returns:
graph: The Graph class which some nodes inside have been replaced.
"""
# empty node_names
if len(target_node_names) == 0:
return graph
# only one node
elif len(target_node_names) == 1:
node_name = target_node_names[0]
index = graph.get_node_id(node_name)
graph.remove_nodes([node_name])
else:
# check the order
# in some conditions, not every name in the target_node_names exists in the graph,
# the node may be removed last time. For example,
# a--b--c---d0--e0--f0
# ---d1--e1--f1
# ---d2--e2--f2
# ....
# ---dn--en--fn
# the [dn--en--fn] has same op_type, and the algorithm finds n results in graph,
# but at the first replace iteration, the [a,b,c] has been removed, so the next
# iterations missing the [a,b,c]. Need to get the real head and tail node names.
exist_node_index = []
for i in range(len(target_node_names)):
try:
j = graph.get_node_id(target_node_names[i])
exist_node_index.append([i, j])
except BaseException:
continue
exist_node_index = sorted(exist_node_index, key=lambda x: x[1])
exist_node_names = [target_node_names[i[0]] for i in exist_node_index]
head_name = exist_node_names[0]
tail_name = exist_node_names[-1]
head_id = graph.get_node_id(head_name)
tail_id = graph.get_node_id(tail_name)
# in the graph.nodes[head_id:tail_id+1], there may be some other nodes
# have input tensors of new_node
index = head_id
i = 0
while i < len(exist_node_names):
if exist_node_names[i] == graph.nodes[index].name:
graph.remove_nodes([exist_node_names[i]])
i += 1
else:
# if not has extra input tensors
if len(exist_node_names) == (tail_id - head_id + 1):
raise ValueError("The target nodes have node {} while graph has node {}."\
.format(exist_node_names[i], graph.nodes[index].name))
# if has extra input tensors
else:
index += 1
# insert new_nodes
graph.insert_nodes(index, new_nodes)
return graph | 6ca1e6f92b7ddd2ac837a4112c1c4a8f99bd1c1e | 49,493 |
def same_ratio(img_ratio, monitor_ratio, file):
"""
:param img_ratio: Float
:param monitor_ratio: Float
:param file: Str
:return: Bool
"""
percent = img_ratio / monitor_ratio
diff = int(abs(percent - 1) * 100)
if percent > 1:
print("Image is " + str(diff) + "% too wide for screen. Sides must be cropped off, or top/bottom filled.")
same = False
elif percent < 1:
print("Image is " + str(diff) + "% too narrow for screen. Top/bottom must be cropped off, or sides filled.")
same = False
else:
print("Image is the same aspect ratio as the screen.")
n = input("Press enter to exit.")
same = True
return same | 7631bac3533ff6a223b82e5fa1c1bfffafddce7f | 49,494 |
def add_to_qs(context, *pairs):
""" Adds item(s) to query string.
Usage:
{% add_to_qs key_1 value_1 key_2 value2 key_n value_n %}
"""
query = context['request']['GET'].copy()
keys = pairs[0:][::2] # 0,2,4, etc. are keys
values = pairs[1:][::2] # 1,3,5, etc. are values
for key, value in zip(keys, values):
query[key] = value
return query.urlencode() | f9d15488c57e435ab552e378479dbd84a3798050 | 49,495 |
import codecs
def data_load(data_file):
"""
:param data_file:
:return: [[sent, label,seq_fea,...,single_fea,...],...]
"""
with codecs.open(data_file, "r", encoding="utf-8") as f:
all_data = []
sent = []
label = []
en_ph=[]
tr_ph=[]
i=0
for line in f:
context=line
line = line.strip().split("\t")
if len(line) == 4:
sent.append(line[0])
label.append(line[1])
en_ph.append(line[2])
tr_ph.append(int(line[3]))
if int(line[3])==-1:
print(line,i)
elif len(line)==1:
if sent and label and en_ph and tr_ph:
all_data.append([sent, label, en_ph, tr_ph[0]])
sent = []
label = []
en_ph = []
tr_ph = []
else:
print("wrong",context,i)
i+=1
return all_data | 2f82c401e6e8f9b7c941c4b54cfc411da893c4ed | 49,496 |
from typing import OrderedDict
def f_state_dict_wrapper(state_dict, data_parallel=False):
""" a wrapper to take care of state_dict when using DataParallism
f_model_load_wrapper(state_dict, data_parallel):
state_dict: pytorch state_dict
data_parallel: whether DataParallel is used
https://discuss.pytorch.org/t/solved-keyerror-unexpected-
key-module-encoder-embedding-weight-in-state-dict/1686/3
"""
if data_parallel is True:
# if data_parallel is used
new_state_dict = OrderedDict()
for k, v in state_dict.items():
if not k.startswith('module'):
# if key is not starting with module, add it
name = 'module.' + k
else:
name = k
new_state_dict[name] = v
return new_state_dict
else:
new_state_dict = OrderedDict()
for k, v in state_dict.items():
if not k.startswith('module'):
name = k
else:
# remove module.
name = k[7:]
new_state_dict[name] = v
return new_state_dict | 45f2a39a6dc4bc92464b8f3d193ddd611e64d05a | 49,497 |
import argparse
def get_input_args():
"""
Retrieves and parses 8 command line arguments provided by the user when
they run the class from a terminal window.
"""
parser = argparse.ArgumentParser ()
#
parser.add_argument ( '--dt_path', type=str, default='ts.csv', help="path to your time series data" )
parser.add_argument ( '--value_column', type=str, default='Value', help="Value column name" )
parser.add_argument ( '--time_format', type=str, default='%Y-%m-%d %H:%M:%S', help="time format" )
parser.add_argument ( '--freq', type=str, default='H', help="time series modelling frequency" )
parser.add_argument ( '--transform', type=str, default='', help="time series transformation" )
parser.add_argument ( '--p_train', type=float, default=0.7, help="size of test data" )
parser.add_argument ( '--method', type=str, default='auto_arima', help="method to use" )
parser.add_argument ( '--n_forecast', type=int, default=5, help="number of periods to be forecasted" )
return parser.parse_args () | 9b5f83277f82176128b2144a72be93883e975f48 | 49,498 |
def _unsupported_config(ctx):
"""Checks whether we're building with an unsupported configuration.
See b/128308184 for context.
Args:
ctx: The current rule context
Returns:
An empty string if the current build is supported;
otherwise, a string with the name of the unsupported configuration.
"""
if ctx.var.get("msan_config") == "true":
return "--config=msan"
return "" | 465b42070b3d97d6420f6a1da3c42fa56cc2bfa7 | 49,499 |
import os
def _default_pptx_path():
"""
Return the path to the built-in default .pptx package.
"""
_thisdir = os.path.split(__file__)[0]
return os.path.join(_thisdir, "templates", "default.pptx") | ede3fa6ceb9cf610cea579bcb036e20edeb2299a | 49,500 |
import yaml
def calculator_swagger():
"""
Swagger API documentation for calculator API service
"""
f = open('./static/swagger.yaml', 'r')
response = f.read()
f.close()
return yaml.safe_load(response) | eca9f78ff03776b3aef8add49c619177a2b1bbae | 49,501 |
import re
def valid_uniprot_ac_pattern(uniprot_ac):
"""
Checks whether Uniprot AC is formally correct according to
https://www.uniprot.org/help/accession_numbers
This is no check whether it actually exists.
:param uniprot_ac: Accession code to be checked
"""
ac_pat = "[OPQ][0-9][A-Z0-9]{3}[0-9]|[A-NR-Z][0-9]([A-Z][A-Z0-9]{2}[0-9]){1,2}"
if re.match(ac_pat, uniprot_ac):
return True
else:
return False | 8478f998d1b8ff2b7f8343317056a9c2c0b36021 | 49,504 |
import subprocess
def grep_terms(term1, term2, filename):
"""
Get lines from the current file that contain both term1 and term2
"""
return subprocess.Popen(f'grep -E "{term1} ((.*\s)*){term2}" {filename}',
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True).communicate()[0].decode().split("\n") | 854c0db275a4f976705a710787f5fb399acef8e1 | 49,505 |
def compute_mse(theta_0, theta_1, data):
"""
Calcula o erro quadratico medio
:param theta_0: float - intercepto da reta
:param theta_1: float -inclinacao da reta
:param data: np.array - matriz com o conjunto de dados, x na coluna 0 e y na coluna 1
:return: float - o erro quadratico medio
"""
return 1 * sum((theta_0 + theta_1*data[:,0] - data[:,1])**2) / data.shape[0] | 3adfdc11394a51674ae0d3956ee092684933ef89 | 49,506 |
def AIC(params, log_likelihood_fun, data):
"""Calculates the AIC (akaike criterion)
Parameters
_________
params : tuple
MLE parameters for distribution
log_likelihood_fun : function
calculates the log likelihood for the desired distribution
data : array
empirical dataset to calculate log likelihood with respect to
Returns
_________
output : float
AIC value
"""
L = log_likelihood_fun(params, data);
return -2*(L) + 2*len(params) | 48f812b68fa44320f83032ec927c4d900ae44620 | 49,507 |
def depthwise_conv2d(attrs, args):
"""Check if the external ACL codegen for depthwise convolution should be used.
Note
----
Relay does not have a depthwise conv2d operator whilst ACL does. We simply
separate the checks for depthwise for clarity.
"""
kernel_typ = args[1].checked_type
# Only supports 3x3, 5x5 depthwise
if (
kernel_typ.shape[0] not in [3, 5]
or kernel_typ.shape[1] not in [3, 5]
or kernel_typ.shape[0] != kernel_typ.shape[1]
):
return False
# Stride must be (1, 1) or (2, 2)
if (attrs.strides[0], attrs.strides[1]) not in [(1, 1), (2, 2)]:
return False
return True | 4bb1424a14deaa29e585bdb6cefdda07d307a54f | 49,508 |
import json
def get_database_access(path=None):
"""
Put a json file at your home directory:
"~/access_information.json"
{
"database_name": {
"host": "database-db.host.net",
"user": "user",
"password": "1234",
"database": "database_name",
"port": 5432
},
}
Parameters
----------
:path: recieves access_information.json path
"""
database_file_name = '~/access_information.json' if path == None else path
with open(database_file_name, "r") as database_file:
database_access = json.load(database_file)
return database_access | 2e8171ecdacc01d24c5da953dd8c1d448f68442a | 49,509 |
import threading
def _start_background_task(target, *args, **kwargs):
"""We are monkey patching here to start threads in ``daemon`` mode.
Original docs below:
The patch allows for clean exits out of python.
Start a background task.
This is a utility function that applications can use to start a
background task.
Args:
target: the target function to execute
*args: arguments to pass to the function
**kwargs: keyword arguments to pass to the function
Returns:
an object compatible with the ``Thread`` class in the Python standard
library. The ``start()`` method on this object is called by this
function before returning it
"""
th = threading.Thread(target=target, args=args, kwargs=kwargs, daemon=True)
th.start()
return th | 8f79cdfab5590c87d709ea321cbf09bf1bba7aa8 | 49,514 |
def five_options_around(page):
""" Create five page numbers around current page for discovery pagination. """
if page.number <= 3:
return list(range(1, min(5, page.paginator.num_pages) + 1))
elif page.number >= (page.paginator.num_pages - 2):
return list(range(max((page.paginator.num_pages - 4), 1),
page.paginator.num_pages + 1))
else:
return list(range(max(1, (page.number - 2)),
min((page.number + 2), page.paginator.num_pages) + 1)) | b65b635df3fe0abd4c5674b5b8c41b47a5630f79 | 49,515 |
import base64
def parse_authorization_header(auth_header):
""" Parse auth header and return (login, password) """
auth_str = auth_header.split(' ')[1] # Remove 'Basic ' part
auth_str = base64.b64decode(auth_str).decode() # Decode from base64
auth_str = auth_str.split(':')
return auth_str[0], auth_str[1] | cc2db762ddf7b4ce0669a81f24bce9517785350a | 49,516 |
import math
def calculateATan(diff_x, diff_y):
""" Given the difference of the coordinate, calculate the angle """
diff_x += 0.0
diff_y += 0.0
if diff_x == 0:
if diff_y > 0:
return math.pi / 2
else:
return math.pi * 3 / 2
if diff_y == 0:
if diff_x > 0:
return 0
else:
return math.pi
angle = math.atan(diff_y / diff_x)
if diff_x > 0 and diff_y > 0:
return angle
elif diff_x < 0 and diff_y > 0:
return angle + math.pi
elif diff_x < 0 and diff_y < 0:
return angle + math.pi
else:
return angle + 2 * math.pi | 95c019a7bc42081ff3e05084b797892fee4c9cd2 | 49,517 |
def calc_kcorrected_properties(frequency, redshift, time):
"""
Perform k-correction
:param frequency: observer frame frequency
:param redshift: source redshift
:param time: observer frame time
:return: k-corrected frequency and source frame time
"""
time = time / (1 + redshift)
frequency = frequency * (1 + redshift)
return frequency, time | 3a7c151a6777a0e11022e38e73ded45fc7a6706c | 49,518 |
def shift(coord, offset):
"""
(tuple, tuple) -> tuple
Function shifts coord on defined offset
"""
return (chr(ord(coord[0]) + offset[0]), coord[1] + offset[1]) | b33d379ee5ffb1463046fb7ade3355656adebf30 | 49,519 |
def tokenize_options(options_from_db, option_name, option_value):
"""
This function will tokenize the string stored in database
e.g. database store the value as below
key1=value1, key2=value2, key3=value3, ....
This function will extract key and value from above string
Args:
options_from_db: Options from database
option_name: Option Name
option_value: Option Value
Returns:
Tokenized options
"""
options = []
if options_from_db is not None:
option_str = options_from_db.split(',')
for fdw_option in option_str:
k, v = fdw_option.split('=', 1)
options.append({option_name: k, option_value: v})
return options | 88127c5e693064ef0c3c087b18bbf213c8a6184f | 49,521 |
def check_int(num):
"""Check if arguement is integer.
Arg:
num (int): The only arguement.
Returns:
bool: The return value. True if num is indeed an integer, False otherwise.
"""
if num is not None:
try:
int(num)
return True
except ValueError:
return False
else:
return False | 556bc91dbdece439bd66f7f8fa2182d2f9683c67 | 49,522 |
def find_min_max(shape):
"""Finds min/max coordinates for a given shape and returns a tuple
of the form (minx, maxx, miny, maxy)
shape: list with points"""
minx = miny = 1000
maxx = maxy = -1000
for x, y in shape:
if x < minx:
minx = x
if x > maxx:
maxx = x
if y < miny:
miny = y
if y > maxy:
maxy = y
return (minx, maxx, miny, maxy) | 133bc42b9193a950869a784a5cf74b960e79eda1 | 49,523 |
def all_worddict(wordset, flairs):
"""A list of all the words seperately for all the flairs
Returns:
dict: with key as flair
"""
worddicts = {}
for flair in flairs:
worddicts[flair] = dict.fromkeys(wordset, 0)
return worddicts | f0e9ca6546153d657f6207b08a744431a111d5c8 | 49,524 |
def create_dict(breadcrumbs, value=None):
"""
Created a dict out of the breadcrumbs in a recursive manner.
each entry in the breadcrumb should be a valid dictionary key.
If value is None, the last string within' the breadcrumbs becomes the
final value.
:param breadcrumbs:
:param value:
:return:
"""
if value is not None:
if not breadcrumbs:
return value
elif len(breadcrumbs) == 1:
return breadcrumbs[0]
return {breadcrumbs[0]: create_dict(breadcrumbs[1:], value)} | f2f4d2be32aa96b31703cc548edff3999bd42bd3 | 49,525 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.