content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def read_data(input_file):
"""Read all lines from the input file."""
try:
with open(input_file, 'r') as infile:
return infile.read().splitlines()
except (FileNotFoundError, PermissionError, FileExistsError) as error:
raise ValueError(error)
|
fa4f39aa0fe91bd2309ac0149969cd923cf53418
| 57,657
|
def updateGameReleaseDate(gtitle: str, new_rdate: str) -> str:
"""Return a query to update the release date of a given game."""
return (f"UPDATE game "
f"SET release_date='{new_rdate}' "
f"WHERE title='{gtitle}';"
)
|
26ea9ad62a99fd210a250f2739f6142b9ecdd3de
| 57,659
|
def return_name_html(info):
"""
In the PBP html the name is in a format like: 'Center - MIKE RICHARDS'
Some also have a hyphen in their last name so can't just split by '-'
:param info: position and name
:return: name
"""
s = info.index('-') # Find first hyphen
return info[s + 1:].strip(' ')
|
2b616a244e9f5d11b9b226d3ca4b2d8dddf84990
| 57,660
|
import yaml
def configMap(name, **kwargs):
"""
>>> import nuvolaris.kube as kube, nuvolaris.testutil as tu
>>> tu.grep(kube.configMap("hello", value="world"), "kind:|name:|value:", sort=True)
kind: ConfigMap
name: hello
value: world
>>> tu.grep(kube.configMap("hello", **{"file.js":"function", "file.py": "def"}), "file.", sort=True)
file.js: function
file.py: def
"""
out = yaml.safe_load("""apiVersion: v1
kind: ConfigMap
metadata:
name: %s
data: {}
"""% name)
for key, value in kwargs.items():
out['data'][key] = value
return yaml.dump(out)
|
fdae8ac67a068946cf320aeb49b3753e6c26487e
| 57,665
|
def _sequence2num(sequence):
""" Returns the input sequence as a number. """
if isinstance(sequence, str):
sequence = int(sequence[2])
return sequence
|
78b02906758bf0f8d62ab7b8cca97b67bb6522ed
| 57,667
|
def HeaderType(string):
"""Returns ArgDict type for headers."""
header, value = string.split(':')
value = value.lstrip()
return {header: value}
|
a27d9e524f38cd198627c4f10ccca0cb902e806f
| 57,669
|
def list_plugins(module):
"""Return all objects of the module that satisfy the basic
plugin needs: id, name and don't start with '_'
If '__all__' is defined, only plugins in '__all__' will be loaded.
"""
try:
objs = [getattr(module, attr) for attr in module.__all__]
except AttributeError:
objs = [getattr(module, attr) for attr in vars(module)
if not attr.startswith("_")]
ok = []
for obj in objs:
if hasattr(obj, "PLUGIN_ID"):
if not hasattr(obj, "PLUGIN_NAME"):
obj.PLUGIN_NAME = obj.PLUGIN_ID
ok.append(obj)
return ok
|
900b7db5f1d3f38ea380a676f6481655ed026f92
| 57,671
|
def get_check_status(error_list, warning_list):
"""
Return check status.
--"Passed": no error, no warning
--"Passed With Warnings": no error, but have warnings
--"Failed": have errors
"""
if error_list:
return "Failed"
elif warning_list:
return "Passed With Warnings"
else:
return "Passed"
|
272ad16952656a3ce06c34390c49b08ef6bbb532
| 57,672
|
def fmt(obj):
"""fmt(obj)(form) = form % obj
With this ordering, this is useful for formatting a fixed object
in many different ways, for example:
>>> list(map(fmt(10), ["0x%x", "%d"]))
['0xa', '10']
If you want to format lots of objects according to the same spec,
use format_as.
"""
return lambda format_string : format_string % obj
|
2a1e6af6c80ebccb2eaacf344d38e965991cf4a5
| 57,675
|
def has_children(obj_json, client):
"""Checks whether an archival object has children using the tree/node endpoint."""
resource_uri = obj_json['resource']['ref']
tree_node = client.get('{}/tree/node?node_uri={}'.format(resource_uri, obj_json['uri'])).json()
return True if tree_node['child_count'] > 0 else False
|
0933dbdda99b1577ecc78d4b9dc73f50dcc72b4e
| 57,676
|
def parse_sas_token(sas_token):
"""Parse a SAS token into its components.
:param sas_token: The SAS token.
:type sas_token: str
:rtype: dict[str, str]
"""
sas_data = {}
token = sas_token.partition(' ')[2]
fields = token.split('&')
for field in fields:
key, value = field.split('=', 1)
sas_data[key.lower()] = value
return sas_data
|
957665a70fb10e9e7688a9f7ecb637bcf7f82ab6
| 57,682
|
def scale_simulation_fit(simulated_value, actual_value, number_individuals, total_individuals):
"""
Calculates goodness of fit for the provided values, and scales based on the total number of individuals that exist.
The calculation is 1 - (abs(x - y)/max(x, y)) * n/n_tot for x, y simulated and actual values, n, n_tot for metric
and total
number of individuals.
:param simulated_value: the simulated value of the metric
:param actual_value: the actual value of the metric
:param number_individuals: the number of individuals this metric relates to
:param total_individuals: the total number of individuals across all sites for this metric
:return: the scaled fit value
"""
return (
(1 - (abs(simulated_value - actual_value)) / max(simulated_value, actual_value))
* number_individuals
/ total_individuals
)
|
04f6364012c044387ab4007941321a655450e232
| 57,684
|
def t_STRING(t):
"""Return the parsed string value."""
return t
|
8a57d135da013c2bb7506990181dc280b608a200
| 57,685
|
def chi_resonant(x, amplitude, pos, width):
"""lorenzian chi resonance.
Parameters
----------
x : np.array
The x axis, wavenumbers of frequencies
amplitude:
The amplitude of the resonance
pos:
The position of the resonance
width:
The FWHM of the resonance
"""
A = amplitude
delta = pos - x
gamma = width / 2
ChiR_i = A * gamma / (delta**2 + gamma**2)
ChiR_r = A * delta / (delta**2 + gamma**2)
ChiR = ChiR_r + 1j * ChiR_i
return ChiR
|
8ab2268e0e5af65936af212f0c776336d23c2605
| 57,688
|
def find_index_list(inputlist, key):
"""get a list of index for key in inputlist"""
start = 0
indexlist = []
while 1:
try:
index = inputlist.index(key, start)
except:
break
indexlist.append(index)
start = index + 1
return indexlist
|
453365cc45c927446f08b9d3f448fcd62c54f6da
| 57,689
|
def get_dataset_dates(df):
"""
Input: dataset with date as one of the MultiIndex
Output: a list of all dates in this dataset as datetime objects
"""
#get all trading dates from dataset of this symbol
data_dates = sorted(list(set(df.index.get_level_values(0))))
data_converted_dates = []
for ts in data_dates:
data_converted_dates.append(ts.to_pydatetime())
return data_converted_dates
|
48abdf4e5f9f5b1eda6a1d531aec63f6c61aeb2e
| 57,691
|
def validate(name, bracket, bracket_side, bfr):
"""
Check if bracket is lowercase
"""
return bfr[bracket.begin:bracket.end].islower()
|
94b0790245d6590391fab9dc910cecd6aa7ab0d9
| 57,693
|
def cycle_length(k: int) -> int:
"""
Computes the repeated cycle length of the decimal expansion of 1/k.
e.g.
1/6 = 0.1(6) -> 1
1/7 = 0.(142857) -> 6
For k not equal to a multiple of 2 or 5,
1/k has a cycle of d digits if 10^d == 1 mod k = 0
"""
while k % 2 == 0:
k //= 2 # remove factors of 2
while k % 5 == 0:
k //= 5 # remove factors of 5
if k == 1:
return 0 # this is not a repeating decimal
d = 1
x = 10 % k
while x != 1:
x = (x*10) % k
d += 1
return d
|
f4516683928174fa1e730074c40892e7a56ac0e4
| 57,694
|
def calc_RC_via_bulk_HV_time(capacitance, L, sigma):
"""
Characteristic charging and relaxation time of the electric double layer through the bulk electrolyte
units: s
Notes:
Adjari, 2006 - "Relaxation time at high voltages"
--> At high voltages, the Stern/dielectric layer dominates the capacitance of the
--> double layer and the relaxation time changes
"""
tau_debye_highvoltage = capacitance * L / sigma
return tau_debye_highvoltage
|
efa1cdbb83f4e636e171c300a63539c461112d33
| 57,695
|
def calculate(follower_count, avg_like_count):
"""Calculate the price of a post based on followers and likes"""
cpf = 0.0025
cpl = 0.025
cost = (follower_count * cpf) + (avg_like_count * cpl)
return cost
|
c245cf7fa600e09d067f0ae559c7346cea891c8b
| 57,698
|
import re
def ElementToComplexType(xsd):
"""Replaces the first <element> tag with the complexType inside of it.
The complexType tag will use the name attribute from the removed element tag.
Args:
xsd: str XSD string contents.
Returns:
str: Modified XSD.
"""
xsd = re.sub(r'<(\w+:)?element name="(\w+)">\s*<(\w+:)?complexType>',
'<\\3complexType name="\\2">',
xsd)
xsd = re.sub(r'(\s+)?</(\w+:)?element>', '', xsd)
xsd = re.sub(r'<\?xml.*?>', '', xsd)
return xsd
|
5c78d07a8a900dc84512a69342ef82f00b92ed33
| 57,699
|
def identify_env(G):
"""
Identify the environment of the central node (index 0) of a graph
Input: - G Input graph
Output: - env environment of the central node in G
"""
# Initialize array of neighbouring elements
nei_elems = []
# Identify all nodes bonded to the central node
for e in G.edges:
if 0 in e:
# Get neighbour atom
if e[0] == 0:
i = e[1]
else:
i = e[0]
# Update array of neighbouring elements
nei_elems.append(G.nodes[i]["elem"])
# Return the environment in string format,
# with neighbours sorted alphabetically
return "-".join(sorted(nei_elems))
|
78e4f42132ce3a0445328e403064b9f969ba8f6b
| 57,700
|
import secrets
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return secrets.compare_digest(
bytes(val1, encoding="utf-8"), bytes(val2, encoding="utf-8")
)
|
8600706387d3cca097fdd7a2d84360665a1a60e6
| 57,701
|
def top_hat_width(subband_df, subband_f0, DM):
"""top_hat_width(subband_df, subband_f0, DM)
Returns width of a top-hat pulse to convolve with pulses for dipsersion
broadening. Following Lorimer and Kramer, 2005 (sec 4.1.1 and A2.4)
subband_df : subband bandwidth (MHz)
subband_f0 : subband center frequency (MHz)
DM : dispersion measure (pc/cm^3)
return top_hat_width (milliseconds)
"""
D = 4.148808e3 # sec*MHz^2*pc^-1*cm^3, dispersion const
width_sec = 2*D * DM * (subband_df) / (subband_f0)**3
return width_sec * 1.0e+3
|
5e6d399a3d78952ef6ea7f30101ebc5bddd2bc8a
| 57,705
|
def float_to_mac(mac_as_float_str):
"""Convert a float string to a mac address string
Args:
mac_as_float_str (str): float represented as a string e.g. "123456.0"
This float should be a whole number. (Right of the decimal == 0)
Returns:
MAC Address as a string. e.g. "00:00:00:01:e2:40"
"""
h = '%012x' % int(mac_as_float_str.split('.')[0])
macstr = h[:2] + ':' + h[2:4] + \
':' + h[4:6] + ':' + h[6:8] + \
':' + h[8:10] + ':' + h[10:12]
return macstr
|
5ae561ff56710d256557be71ddbfdd47da3ccde6
| 57,710
|
def tanimoto_sim(items, x, y, cache=False):
"""
Returns the similarity between x and y based on the tanimoto score
"""
c1, c2, shr = 0, 0, 0
for i in range(len(x)):
if x[i] != 0: c1 += 1 # in v1
if y[i] != 0: c2 += 1 # in v2
if x[i] != 0 and y[i] != 0: shr += 1 # in both
return 1.0 - (float(shr) / (c1 + c2 - shr))
|
2f247acbc15015a8d3d632b18d13197ce3345302
| 57,714
|
def percentOf(percent, whole):
"""Get percent of a whole"""
return int((float(f'0.{percent}'))*(whole))
|
f3dece35325bc6907ce75af783f3bf298ed79676
| 57,715
|
def get_which_data_rows(model, which_data_rows):
"""
Helper to get the data rows to plot.
"""
if which_data_rows == 'all' or which_data_rows is None:
return slice(None)
return which_data_rows
|
20ea954255b56a0800ad25647da0a9adfd81def3
| 57,719
|
def del_intermediate_node(node):
"""Deletes a node in the middle of a linked list.
We only have access to to-be-deleted node. Copy data from next node then
delete next node.
Args:
node: An instace obejct of class Node. Points to the node to be
deleted. It can not be the last node.
Returns:
True if successful False otherwise.
"""
# it can't be the last node
if not node:
return False
if not node.next_node: # It can't be the last node
return False
temp = node.next_node
node.data = temp.data
node.next_node = temp.next_node
del temp
return True
|
3180fa3f20b50944eb76e18b7b0c8b2735cf76b2
| 57,720
|
from dateutil import tz
def dt_as_utc(dt):
"""Convenience wrapper, converting the datetime object dt to UTC."""
if dt is None:
return None
return dt.astimezone(tz.tzutc())
|
c148d2dc3d8164324661ff6f4f070c49b6406ac7
| 57,721
|
def bytes2str(b):
"""In Python 3, subprocess.check_output will return a bytes class."""
return str(b.decode("utf-8"))
|
fddbf78a011163f1403bcd4c10d068a229804ce4
| 57,722
|
import torch
def reduce_embedding_attributions(attributions: torch.Tensor) -> torch.Tensor:
"""Reduces attributions of embedding weights to token level
Args:
attributions (torch.Tensor): Embeddings layer attributions
(from `LayerIntegratedGradients.attribute` method)
Returns:
torch.Tensor: Token-wise attributions
"""
outputs = attributions.sum(dim=2).squeeze(0)
outputs = outputs / torch.norm(outputs)
outputs = outputs.cpu().detach()
return outputs
|
234bb81d1a757e315d73f2b9e8db9201605a20f4
| 57,724
|
def get_doc(log, docs, index):
"""Get one document from a multi-document source."""
doc_count = len(docs)
max_index = doc_count - 1
if index > max_index:
log.critical((
"DOCUMENT_INDEX is too high; the maximum zero-based index is {}"
" when the document count is {}."
).format(max_index, doc_count), 1)
return docs[index]
|
543c4f88f91c853e8746fc78e4acb7ad6deb9c93
| 57,729
|
def _get_type_to_speed(cfs):
"""Given a list of charging functions, returns an object whose keys
are the CS types and values are speed rank.
Speed rank is a CS type's (0-indexed) position in the ordered list of fastest CS types.
"""
# compute max charge rates by type
result = [{
'cs_type': cf['@cs_type'],
'max_rate': (
(float(cf['breakpoint'][1]['battery_level'])-float(cf['breakpoint'][0]['battery_level'])) /
(float(cf['breakpoint'][1]['charging_time']) -
float(cf['breakpoint'][0]['charging_time']))
)} for cf in cfs]
# assign each type its speed rank (lowest = fastest --> highest = slowest)
result = sorted(result, key=lambda x: x['max_rate'], reverse=True)
for i, entry in enumerate(result):
entry.update({'speed_rank': i})
# return dict type:speed_rank
return {cf['cs_type']: cf['speed_rank'] for cf in result}
|
ca866af02fb39f31779de1bd7b3e6267c363b5b8
| 57,733
|
def cube(x,alpha=1.0):
"""
Cubic nonlinearity.
"""
return x**3
|
7a406bc48a4690f71e716b4c5a3e8b57b3605513
| 57,734
|
def month_year(dte):
"""
Return a string in the format MM-YYYY where MM is the month of the dte and YYYY is the year of the dte.
If the month is less than 10, MM is a single digit.
Argument:
dte - the dte to use in making the string
"""
result = str(dte.month) + "-" + str(dte.year)
return result
|
030bd9d97d59f59875c40f8defccad410dae5ccf
| 57,738
|
def make_slack_message_field_section(fields: list) -> dict:
"""Generates an object compatiable with Slack's text field section.
Args:
fields: A list of strigifiable objects. This should be an even
number since Slack formats fields as two per line,
generally consisting of key/value pairs.
Returns:
A text field section message for delivery via Slack.
"""
section = {
'type': 'section',
'fields': []
}
for field in fields:
section['fields'].append(
{
'type': 'mrkdwn',
'text': str(field)
}
)
return section
|
a06fc82ae272b0ef4d6fc204343dce8657f20c68
| 57,744
|
def sanitize_phonenumber(phonenumber):
""" All phonenumbers should be stored as strings """
return str(phonenumber)
|
218ba87f831d484829a4998bc65be82c475c2e74
| 57,753
|
from typing import Pattern
from typing import Tuple
import re
def strip_re(regex: Pattern, s: str) -> Tuple[str, str]:
"""Strip a substring matching a regex from a string and return the stripped part
and the remainder of the original string.
Returns an empty string and the original string if the regex is not found
"""
rgx = re.compile(regex)
match = rgx.search(s)
if match:
head, tail = s[: match.end()], s[match.end() :]
else:
head, tail = "", s
return head, tail
|
6b1a9c8fe28244870208659cfb738319ec9f351d
| 57,755
|
def parseDependencyFile(path, targetSuffix):
"""Parse a .d file and return the list of dependencies.
@param path: The path to the dependency file.
@type path: string
@param targetSuffix: Suffix used by targets.
@type targetSuffix: string
@return: A list of dependencies.
@rtype: list of string
"""
dependencies = []
uniqueDeps = set()
def addPath(path):
if path and path not in uniqueDeps:
uniqueDeps.add(path)
path = path.replace('\\ ', ' ') # fix escaped spaces
dependencies.append(path)
f = open(path, 'rt')
try:
text = f.read()
text = text.replace('\\\n', ' ') # join escaped lines
text = text.replace('\n', ' ') # join other lines
text = text.lstrip() # strip leading whitespace
# Find the 'target:' rule
i = text.find(targetSuffix + ':')
if i != -1:
text = text[i+len(targetSuffix)+1:] # strip target + ':'
while True:
text = text.lstrip() # strip leading whitespace
i = text.find(' ')
while i != -1 and text[i-1] == '\\': # Skip escaped spaces
i = text.find(' ', i+1)
if i == -1:
addPath(text)
break
else:
addPath(text[:i])
text = text[i:]
finally:
f.close()
return dependencies
|
14b624c1870083e154d896d77ed41133a4e5ef36
| 57,757
|
from datetime import datetime
def is_date_between_range(date: datetime, start_range: datetime,
end_range: datetime) -> bool:
"""Is the given date between the start and end range.
Args:
date (datetime): the date to check
start_range (datetime): the start of the range
end_range (datetime): the end of the range
Returns:
bool: true if date is between range otherwise false
"""
return date >= start_range and date <= end_range
|
c6d358b298354cd90d3e97f6c4558d18a4b570ba
| 57,758
|
def build_mount(disk, path, read_only):
"""Build a Mount object for a Pipeline request.
Args:
disk (str): Name of disk to mount, as specified in the resources section.
path (str): Path to mount the disk at inside the container.
read_only (boolean): If true, disk is mounted read only in the container.
Returns:
An object representing a Mount.
"""
return {
'disk': disk,
'path': path,
'readOnly': read_only,
}
|
b3346f477376b2239bf2977087775d984ae5abae
| 57,759
|
def normalize(values):
"""Normalize a list of values."""
total = sum(values)
return [1.0 * value / total for value in values]
|
28cd9fd49e3add28a088d38fafb83271d1b2f84a
| 57,761
|
def label_from_instance_with_pk(obj, val):
"""Associates a label with the pk of the object that generated it.
This is needed because we cannot rely on the order elements will appear
in select2. By doing a binding like this, we are sure to associate a pk
with the correct label (labels are shown, pks are submitted)."""
return u"{pk}:{val}".format(pk=obj.pk, val=val)
|
0b9810e28ed41cca528606618d658781f816a85d
| 57,764
|
from typing import Iterable
def all_elements_are_instances(iterable: Iterable, Class) -> bool:
"""Returns ``True`` if all elements of iterable are instances of Class.
False otherwise.
"""
return all([isinstance(e, Class) for e in iterable])
|
747e4b518789a7843fb46f37a4a473706ce8fd63
| 57,765
|
def _are_points_in_same_y_side(p1, p2):
"""
Return True if the 2 given points are on same side (y axis).
usage examples :
>>> p1 = Vector3(0, -5, 0)
>>> p2 = Vector3(0, 10, 0)
>>> _are_points_in_same_y_side(p1, p2)
True
>>> p3 = Vector3(20, -5, 0)
>>> _are_points_in_same_y_side(p1, p3)
False
:param pygame.Vector3 p1: 1st point
:param pygame.Vector3 p2: 2nd point
:return: True if points are in same side
"""
return p1.y * p2.y < 0
|
cd92325fa1857dc57837ff0773aeb022f50e7750
| 57,768
|
def restrict_keys(d: dict, domain) -> dict:
"""Remove from d all items whose key is not in domain; return d.
>>> d = {'a': 1, 'b': 2}
>>> dr = restrict_keys(d, {'a', 'c'})
>>> dr
{'a': 1}
>>> d == dr
True
"""
for k in set(d):
if k not in domain:
del d[k]
return d
|
ff0d87dcccb62e99b56e831d9fb2ec135c904160
| 57,769
|
import glob
def all_wave_directories(source, suffix):
""" Get all file names for bhps waves
Parameters
----------
source : str
`minos` where are directories.
Returns
-------
directories : list
List of `directories` to extract from.
"""
directories = sorted(glob.glob(source + suffix + "*"))
return directories
|
6e64c21cfcf6447be6873713cc3056a1b4c49587
| 57,770
|
def bash_quote(text):
"""Quotes a string for bash, by using single quotes."""
if text == None:
return ""
return "'%s'" % text.replace("'", "'\\''")
|
00047fefd47ac4cb9d9d4013cbd170475bf194d5
| 57,773
|
def get_nth_last_vowel(phones, n):
"""
Given the rhyme level n and a syllable (phone) list, count backward witin the list to find the nth vowel.
Return the (negative) index where it can be located.
"""
vowel_count = 0
for i in range(1, len(phones) + 1):
if phones[-i][-1].isdigit():
vowel_count += 1
if vowel_count == n:
return -i
|
7da09cd03f8c3f9a4d6f4c442086fc2d4c84a55a
| 57,774
|
def transform_lists(values):
"""Transform the output of lists to something more manageable.
:param list values: The list of values from `SHOW LISTS`
:rtype: dict
"""
return {row['list']: row['items'] for row in values}
|
6853e50758ccee66aa9f9009e3c86b1d04889194
| 57,775
|
def validate (type_, allow_none=False, default=None):
"""Create a validator for a field in an epgdb record.
type_: Class to expect the value to be an instance of
allow_none: Whether the value should pass validation if it's `None`
default: Value to use if `None` is allowed and the value is `None`
Returns a function which accepts the value and returns the value to use or
throws `ValueError`.
"""
def validate_fn (value):
if value is None:
if allow_none:
return default
else:
raise ValueError('field is missing')
if not isinstance(value, type_):
raise ValueError('expected: {}; got: {}'.format(type_, repr(value)))
return value
return validate_fn
|
c3ab2559106a2b0c649ef89250afb75329d9cd8e
| 57,776
|
def get_source_data_path(config, subdir_num):
"""
Return the source data path for the given subdir number
:param config: Config dictionary
:param subdir_num: Number of subdir
:return: Full path to given source data
"""
# assume data is a list corresponding with subdirs
data_paths = config["source_data_paths"]
if not isinstance(data_paths, list):
raise RuntimeError(
"Unexpected, source_data_paths entry in config should be a list corresponding with subdirs"
)
if subdir_num >= len(data_paths):
raise RuntimeError(
"Unexpected, could not get source_data_paths entry %d, found %d in list"
% (subdir_num, len(data_paths))
)
return data_paths[subdir_num]
|
ec568f5656a4b216152fe3de6f3ee557cd7defaf
| 57,777
|
import torch
def _trunc(x, minval=None, maxval=None):
"""Truncate vector values to have values on range [minval, maxval]"""
x = torch.clone(x)
if minval != None:
x[x < minval] = minval
if maxval != None:
x[x > maxval] = maxval
return x
|
82aaed7082e47e68122df8b97ad8bf0732e0b3ed
| 57,778
|
def lookup_person_in_list(name, ments):
"""Lookup a person in a list of strings as a partial match."""
for m in ments:
if name in m:
return True
return False
|
052a2a0f9f9b9accd7458dd7420a9c3a39adf3c6
| 57,782
|
def factorial(x):
"""This function returns the factorial of a single input/argument."""
# check if input value is negative or positive
if x < 0:
return print("Factorials do not exist for negative numbers.")
else:
y = 1
for i in range(1, x + 1):
y = y * i
return y
|
d2d8df80fd105a7308a1d409a4ec5908a4a2f367
| 57,783
|
def compose(fun1, fun2):
# Fill in
"""
Returns a new function f. f should take a single input i, and return
fun1(fun2(i))
:param fun1: function
:param fun2: function
:return: function
"""
def retfun(i):
# Fill in
return fun1(fun2(i))
pass
return retfun
|
99e8702a24978ac05188d48e129248f043a311dc
| 57,785
|
def make_get_request(client, endpoint):
"""
Makes a get request to the given endpoint from the client.
"""
return client.get(endpoint)
|
d64376e5e7b0ad42b3b093af48bfa97e3a137744
| 57,790
|
def latex_command(value, num, mapping):
"""
Helper function to generate a color name and definition for a given value.
Num is used to identify different colors. Mapping is the mapping generated in
generate_table
"""
r,g,b,a = mapping.to_rgba(value)
colorname = 'color' + str(num)
definition = "\definecolor{{{colorname}}}{{rgb}}{{{r},{g},{b}}}".format(colorname = colorname,
r = r,
g = g,
b = b)
return colorname, definition
|
31352b6861fb5cb17d994980ee729d0770dcc227
| 57,793
|
from typing import Tuple
def find_element(atom_symbol: str) -> Tuple[int, int]:
"""Returns the indices of the element component of a SMILES atom symbol.
That is, if atom_symbol[i:j] is the element substring of the SMILES atom,
then (i, j) is returned. For example:
* _find_element('b') = (0, 1).
* _find_element('B') = (0, 1).
* _find_element('[13C]') = (3, 4).
* _find_element('[nH+]') = (1, 2).
:param atom_symbol: a SMILES atom.
:return: a tuple of the indices of the element substring of
``atom_symbol``.
"""
if atom_symbol[0] != '[':
return 0, len(atom_symbol)
i = 1
while atom_symbol[i].isdigit(): # skip isotope number
i += 1
if atom_symbol[i + 1].isalpha() and atom_symbol[i + 1] != 'H':
return i, i + 2
else:
return i, i + 1
|
2694d5de2a9ac41f25f55139eb3169c68f2f2125
| 57,794
|
def is_number(arg):
"""Return True when arg is convertible to float, False otherwise"""
try:
float(arg)
return True
except:
return False
|
965b93ff922a2238148dbd871d599f65c9628885
| 57,796
|
def list_values_of_parameter(dataframe, parameter):
"""Return a list of all values of the parameter from the dataframe."""
index_of_parameter = dataframe['{}'.format(parameter)].value_counts().index
list_of_parameter = []
for value in index_of_parameter:
list_of_parameter.append(value)
return list_of_parameter
|
44e48c3033079f794028bbde1224f07e83f6c202
| 57,797
|
def _get_state_from_port(port,time_point):
"""
Attempt to find a StateBlock-like object connected to a Port. If the
object is indexed both in space and time, assume that the time index
comes first. If no components are assigned to the Port, raise a
ValueError. If the first component's parent block has no index, raise an
AttributeError. If different variables on the port appear to be connected
to different state blocks, raise a RuntimeError.
Args:
port (pyomo.network.Port): a port with variables derived from some
single StateBlock
time_point : point in the time domain at which to index StateBlock
(default = 0)
Returns:
(StateBlock-like) : an object containing all the components contained
in the port.
"""
vlist = list(port.iter_vars())
states = [v.parent_block().parent_component() for v in vlist]
if len(vlist) == 0:
raise ValueError(
f"No block could be retrieved from Port {port.name} "
f"because it contains no components."
)
# Check the number of indices of the parent property block. If its indexed
# both in space and time, keep the second, spatial index and throw out the
# first, temporal index. If that ordering is changed, this method will
# need to be changed as well.
try:
idx = vlist[0].parent_block().index()
except AttributeError as err:
raise AttributeError(
f"No block could be retrieved from Port {port.name} "
f"because block {vlist[0].parent_block().name} has no index."
) from err
# Assuming the time index is always first and the spatial indices are all
# the same
if isinstance(idx,tuple):
idx = (time_point,vlist[0].parent_block().index()[1:])
else:
idx = (time_point,)
# This method also assumes that ports with different spatial indices won't
# end up at the same port. Otherwise this check is insufficient.
if all(states[0] is s for s in states):
return states[0][idx]
raise RuntimeError(
f"No block could be retrieved from Port {port.name} "
f"because components are derived from multiple blocks."
)
|
7053e183373afe95c8d516776210fa7a6b7c7d53
| 57,799
|
import hashlib
import io
def SHA1_file(filepath, extra=b''):
"""
Returns hex digest of SHA1 hash of file at filepath
:param str filepath: File to hash
:param bytes extra: Extra content added to raw read of file before taking hash
:return: hex digest of hash
:rtype: str
"""
h = hashlib.sha1()
with io.open(filepath, 'rb') as f:
for chunk in iter(lambda: f.read(h.block_size), b''):
h.update(chunk)
h.update(extra)
return h.hexdigest()
|
f347b90d03d6c9e45db2e9f280582c546ba3ce34
| 57,803
|
def pool_to_HW(shape, data_frmt):
""" Convert from NHWC|NCHW => HW
"""
if len(shape) != 4:
return shape # Not NHWC|NCHW, return as is
if data_frmt == 'NCHW':
return [shape[2], shape[3]]
return [shape[1], shape[2]]
|
2c06b0995628ecd0edf4bab1551b020dbe9b549b
| 57,806
|
import math
def distance(a, b):
"""
Return distance between a and b
"""
return math.sqrt((b[0] - a[0])**2 + (b[1] - a[1])**2 + (b[2] - a[2])**2)
|
ce0c1ea266da0b6cffb4e5d0d369c8b26f8f2328
| 57,813
|
def correct_jumps_between_arr(arr1, arr2, jump_quantum):
"""
Shift the start value of one array to align with the end value of another.
Parameters
----------
arr1 : numpy.ndarray
Nx0 array of values in the trajectory of some variable.
arr2 : numpy.ndarray
Mx0 array of values in the trajectory of some variable. The function
will shift all values in this array such that its start aligns with the
end of arr1.
jump_quantum : float
The shift added to arr2 will be an integer multiple of this
jump_quantum size.
Returns
-------
np.ndarray
Mx0 array of shifted arr2 values.
"""
jump = arr2[0] - arr1[-1]
n = round(jump/jump_quantum)
# correct the elements following the jump
arr2 -= n * jump_quantum
return arr2
|
1733c313a70c72c6eeeec83f0e1b9527234abc09
| 57,818
|
def nutrition() -> None:
"""Watermelon nutrition info.
Contains nutrition facts of a serving of watermelon.
Returns:
Prints a series of strings containing the nutrition facts of a serving of watermelon.
"""
print("Serving: About 2 cups, diced (280g)\n",
"___________________________________\n",
"Calories: 80\n",
"___________________________________\n",
"Total Fat: 0g\n",
"Cholesterol: 0mg\n",
"Sodium: 0mg\n",
"Total Carbohydrate: 21g\n",
" Dietary Fiber: 1g\n",
" Total Sugars: 17g\n",
" Includes 0g Added Sugars\n",
"Protein: 2g\n",
"___________________________________\n",
"Vitamin D: 0mcg\n",
"Calcium: 20mg\n",
"Iron: 0.7mg\n",
"Potassium: 310mg\n",
"Vitamin A: 80mcg\n",
"Vitamin C: 23mg\n",
"___________________________________\n",
"INGREDIENTS: WATERMELON, RAW\n",
"Source: https://www.watermelon.org/assets/Nutrition/FDAWatermelonLabel.pdf")
return None
|
6ccff20a27651fa5a1a81e2e74e7654038d7c535
| 57,821
|
from typing import Callable
import functools
def precondition(checker: Callable[..., None], what: str = "") -> Callable:
"""
A decorator that adds checks to ensure any preconditions are met.
Args:
checker: The function to call to check whether the preconditions are met. It has the same signature as the wrapped
function with the addition of the keyword argument `what`.
what: A string that is passed in to `checker` to provide context information.
Returns:
Callable: A decorator that creates the wrapping.
"""
def outer(wrapped_function):
"""
A decorator that actually wraps the function for checking preconditions.
"""
@functools.wraps(wrapped_function)
def inner(*args, **kwargs):
"""
Check preconditions and if they are met, call the wrapped function.
"""
checker(*args, **kwargs, what=what)
result = wrapped_function(*args, **kwargs)
return result
return inner
return outer
|
5a7f7c98e4f852213320071c97b7dae674f29547
| 57,822
|
import struct
def _decode_byte(fp):
"""Decode a byte tag
:type fp: A binary `file object`
:rtype: byte
"""
return struct.unpack('b', fp.read(1))[0]
|
345984525230e06c1a37de175cf2098e250de1bb
| 57,824
|
def _get_collection_memcache_key(collection_id, version=None):
"""Returns a memcache key for an collection."""
if version:
return 'collection-version:%s:%s' % (collection_id, version)
else:
return 'collection:%s' % collection_id
|
703035f86bb861ed163b043c6f0d6ac9fd96b475
| 57,827
|
def day_to_str_int(d):
"""Converts a day to an int form, str type, with a leading zero"""
if d < 10:
ds = "0{}".format(d)
else:
ds = str(d)
return ds
|
687519b748351922432a7de66ccaac613eb3cd02
| 57,832
|
from functools import reduce
def total_worker_load(worker_id, worker_hours, close_time):
"""
Calculate load in all time restaurant works.
:param worker_id: id for current worker
:param worker_hours: map {id {interval obj: working seconds}}
:param close_time: model global time to calculate total load correctly
:return: load value if value < 1 else 1
"""
period_load = worker_hours[worker_id]
sum_seconds = reduce(lambda a, b: a + b, period_load.values())
sum_keys = 0
for key in period_load:
sum_keys += (key.toInterval - key.fromInterval) if not key.last \
else close_time - key.fromInterval
return round(sum_seconds / sum_keys, 3) if round(sum_seconds / sum_keys, 3) < 1 else 1
|
ba4ad6170f21ea72bcb54db4861efb90b7c6be1c
| 57,836
|
def weighted(x,y,p1,p2):
"""A weighted sum
Args:
x (float): first value
y (float): second value
p1 (float): first weight
p2 (float): second weight
Returns:
float: A weighted sum
"""
tmp = p1*x + p2*y
output = tmp
return output
|
9fcac1793d3c4584ddeb2dabeddb38849cbbbd17
| 57,837
|
def checkDouble3(t=True, r=True, s=False, **kwargs):
"""this is Return double3 standard keyable transform attributes.
Parameters
----------
t : bool
add translate to list.
r : bool
add rotate to list.
s : bool
scale on.
Returns
-------
list[ string ]
"""
attrs = []
if t:
attrs += ('translateX', 'translateY', 'translateZ')
if r:
attrs += ('rotateX', 'rotateY', 'rotateZ')
if s:
attrs += ('scaleX', 'scaleY', 'scaleZ')
return attrs
|
4af97fb3d5e194c8dc215a4f347ad8a49135e778
| 57,839
|
def get_course_num(course_count, iput):
"""Gets the course # from user input. Returns -1 if invalid input
Args:
course_count (int): number of courses
iput (string): raw user input string
Returns:
int: -1 if invalid user input. Otherwise the course number the user submitted
"""
try:
if 0 < int(iput.replace(".","")) <= course_count:
return int(iput.replace(".","")) - 1
except:
return -1
return -1
|
6712b2032de62adbb477034546138c64adcfcd83
| 57,842
|
def date_year(date):
"""Find the year from the given date."""
return date.year
|
7e263ada4f1fa65d304854b8180ae2d7ba468c66
| 57,844
|
from typing import Any
def _maybe_tuple_to_list(item: Any) -> Any:
"""Convert a tuple to a list. Leave as is if it's not a tuple."""
if isinstance(item, tuple):
return list(item)
return item
|
a0a3b387fda12010d88fcba741563def1adcdb08
| 57,846
|
def remove_newlines(bib):
"""Removes all newlines with spaces in the values of a dictionary result item"""
for key in bib:
bib[key] = bib[key].replace('\n',' ')
return bib
|
6e040a58082f94166fc415a33ed8c37dde5f88cd
| 57,850
|
from typing import Counter
def get_interval_histogram(chorale):
"""
Arguments
chorale: a music21 Stream object
Returns two interval histograms, one directed and one undirected,
as collections.Counter objects for input chorale
"""
directed_ih = Counter()
undirected_ih = Counter()
for part in chorale.parts:
intervals = part.melodicIntervals()[1:] # all but first meaningless result
for interval in intervals:
directed_ih[interval.directedName] += 1
undirected_ih[interval.name] += 1
return directed_ih, undirected_ih
|
7d763008497a43d1d5369eff0c6c72487fd28773
| 57,851
|
import ast
def parse_param_file(param_file_name):
"""
Read parameters for fault from parameter file
"""
params={}
with open(param_file_name) as f_in:
for line in f_in:
var, value = line.strip().split('=')
params[var.strip()] = ast.literal_eval(value.strip())
return params
|
b3e359fb300549c0780f89ab817987be6faf1bc0
| 57,853
|
def im_fits_path(source, band, epoch, stoke, base_path=None):
"""
Function that returns path to im-file for given source, epoch, band and
stokes parameter.
:param base_path: (optional)
Path to route of directory tree. If ``None`` then use current directory.
(default: ``None``)
"""
return base_path + source + '/' + epoch + '/' + band.upper() + '/im/' +\
stoke.upper() + '/'
|
33e52dcb28fd98e290d3546e44208fbb9665f6a8
| 57,856
|
def check(list1, list2):
"""
Checks if any value in list1 exists in list2
"""
for i in list1:
for n in list2:
if i == n:
return True
return False
|
5fcde83fbc2af493b589cfc447f0a6a88cad4e41
| 57,858
|
def iops_to_kiops(number: float) -> float:
"""
Convert iops to k-iops.
Parameters
----------
number : float
A ``float`` in iops.
Returns
-------
float
Returns a ``float`` of the number in k-iops.
"""
return round(number * 1e-3, 3)
|
c3e57ef09f0f2694fd13b61fcbfcf6dea8e8719e
| 57,860
|
def SHR(x, n):
"""The right shift operation
x is a w-bit word and n is an integer with 0 ≤ n < w
SHR_n(x) = x >> n
"""
return x >> n
|
9e910a358fbb32c60312885abc0de4d1aede073f
| 57,862
|
def get_ranked_sites(location, as_list=False):
"""
returns a dictionary of site rank from the
top-1m.csv file located at <location>
<as_list> (True) returns rank ordered list
(False) returns dict[url] = rank
"""
with open(location, 'r') as f:
sites = ['http://' + x.split(',')[1]
for x in f.read().strip().split('\n')]
if as_list:
return sites
site_rank = dict()
for i in range(len(sites)):
site_rank[sites[i]] = i + 1
return site_rank
|
b84c7699235193910ec788f926ff5571570d6621
| 57,864
|
def get_child_text(element):
"""Return the text directly in the element, not descendants."""
content = [element.text] if element.text else []
for child in element:
if child.tail:
content.append(child.tail)
return ''.join(content)
|
0c77a86ce6a15e6e1134e9b5d3b48c341e73d9d3
| 57,866
|
import time
def measureTime(method):
"""
Decorator for measuring how long the given function took to execute.
Credit to: https://thenextweb.com/news/decorators-in-python-make-code-better-syndication
Args:
method (function): Function to measure time to execute.
"""
def wrapper():
start_time = time.perf_counter()
method()
end_time = time.perf_counter()
print(method.__name__ + ' took ' + str(round((end_time - start_time), 3)) + ' seconds.')
return wrapper
|
d4f54954bed53fcfb50da0641d49b58b04567f6e
| 57,870
|
def getFrameSlicing(inputAxes, selectValue, selectAxis="t"):
"""
This methods helps to get a slice of a multidimensional array of the specified `inputAxes`,
where only for one specific axis (`selectAxis`) an index (or a list of indices, or a slicing object) is given.
Example: `myarray[getFrameSlicing('xzt', 3, t)]`
Example: `myarray[getFrameSlicing('xzt', [3,7,9], t)]`
"""
assert len(selectAxis) == 1
assert inputAxes.count(selectAxis) == 1
slicing = tuple()
for a in inputAxes:
if a == selectAxis:
slicing += (selectValue,)
else:
slicing += (slice(None),)
return slicing
|
0106978432a712881064d2332e2fb1a093d7d9d9
| 57,873
|
def to_tlwh(bb):
"""Takes a box of tlbr and returns a tlwh"""
w = bb[2] - bb[0]
h = bb[3] - bb[1]
return (bb[0], bb[1], w, h)
|
c94eaaaafdd4d6ce71e4c696ef2bb4129ebb0bc3
| 57,877
|
import torch
def cis(x: torch.Tensor) -> torch.Tensor:
"""Compute complex exponential exp(i x)."""
return torch.polar(torch.ones_like(x), x)
|
1fa1782dc857d86de2bbfa26e9c469223ba586fe
| 57,881
|
def prep_re_group(re_groupdict):
"""Extracts and prepares keyword match groups.
Arg:
re_groupdict: re.match.groupdict() instance.
Returns:
parameter keyword, historic mean range, $PxN channel name
"""
param_key = re_groupdict.get('param')
channel_name = re_groupdict.get('channel', '').strip('_').replace(' ','').upper()
tmp_val = re_groupdict.get('val', '')
if not tmp_val:
mean_range = 10
else:
mean_range = int(tmp_val.strip('_'))
return param_key, mean_range, channel_name
|
b90ab74b92e7fc372ccb4fd5da41a12864619dfe
| 57,882
|
from functools import reduce
def geom_mean(arr):
"""Geometric mean of an array of numbers."""
prod = reduce(float.__mul__, map(float, arr))
return pow(prod, 1./len(arr))
|
d2b5ac886055b75cf5891664f795e4934e06915e
| 57,883
|
import json
def load_es_query(es_query_path):
"""Load the ElasticSearch query json file.
Args:
es_query_path (str): The path to the ES query json file which is used for making subscription in BlueBox.
Returns:
query_obj (dict): A dict of loaded es query content.
"""
with open(es_query_path, 'r') as query_file:
query_obj = json.load(query_file)
return query_obj
|
0108933b10de18bfc4da97528fb527f52d5d29cb
| 57,885
|
def unblockable(function):
"""Decorator which exempts the function from nickname and hostname blocking.
This can be used to ensure events such as JOIN are always recorded.
"""
function.unblockable = True
return function
|
60f8465134324ef71d54a9fb5a152007c1668cc6
| 57,895
|
import random
def random_policy(state):
"""
Ignore the state, select randomly.
"""
action = {
'command': random.randint(1, 2)
}
return action
|
20940b8ec2dc71836c68d3e409b4f2a5879d78b8
| 57,903
|
import yaml # noqa: WPS433
def generate_yaml(config: dict, user_data: dict) -> str:
"""Generate out a YAML format document."""
return yaml.dump(user_data)
|
c7fce536bff7ea0610c2b7ee610d270158179eea
| 57,906
|
def is_valid_ip_netmask(ip_netmask):
"""Validates given string as IPv4 netmask.
Args:
ip_netmask (str): string to validate as IPv4 netmask.
Returns:
bool: True if string is valid IPv4 netmask, else False.
"""
ip_netmask_split = ip_netmask.split('.')
if len(ip_netmask_split) != 4:
return False
valid_octet_values = ['0', '128', '192', '224', '240', '248', '252', '254', '255']
for ip_netmask_octet in ip_netmask_split:
if ip_netmask_octet not in valid_octet_values:
return False
if ip_netmask_split[0] != '255' and (ip_netmask_split[1] != '0' or ip_netmask_split[2] != '0' or ip_netmask_split[3] != '0'):
return False
elif ip_netmask_split[1] != '255' and (ip_netmask_split[2] != '0' or ip_netmask_split[3] != '0'):
return False
elif ip_netmask_split[2] != '255' and ip_netmask_split[3] != '0':
return False
return True
|
1a630e78db089381339705d2ed7bf729d7f76c13
| 57,907
|
import math
def kl_divergence(p,q) :
"""Return Kullback-Leibler divergence for two probability distributions
p and q. p and q should be indexable objects of the same length where
p_i corresponds to q_i.
"""
kl_sum = 0.
for p_i, q_i in zip(p,q) :
if p_i != 0 and q_i != 0 :
kl_sum += p_i * math.log(p_i/q_i)
return kl_sum
|
5e8025bf05c9457a3b70c54c64d3d9da0b9d5a7d
| 57,910
|
def _same_atom_different_altloc(atom1, atom2):
"""
Determines whether atom1 and atom2 differ only by their alternate location.
Parameters
----------
atom1 : iotbx.pdb.hierarchy.atom
atom2 : iotbx.pdb.hierarchy.atom
Returns
-------
bool
"""
label1, label2 = [i.fetch_labels() for i in [atom1, atom2]]
name1, name2 = atom1.name.strip(), atom2.name.strip()
chain1, chain2 = label1.chain_id, label2.chain_id
res1, res2 = label1.resid(), label2.resid()
return name1 == name2 and chain1 == chain2 and res1 == res2
|
190127e1d1c6d5200baa21aa1936cab92014cb41
| 57,913
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.