content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def isPandigital(n):
"""
n: an int
output: True if is a pandigital
"""
if len(str(n)) > 10:
return False
numStrList = list(str(n))
if len(str(n)) is 10:
numList = list(range(10))
else:
numList = list(range(1, len(str(n))+1))
for num in numList:
if str(num) not in numStrList:
return False
return True
|
5da0d260389f10f6fdf2adf65af449a3664c3e2b
| 83,082
|
from typing import Tuple
def count_bases(dna: str) -> Tuple[int, int, int, int]:
""" Count each of the bases in DNA """
cnt_a, cnt_c, cnt_g, cnt_t = 0, 0, 0, 0
for base in dna:
if base == 'A':
cnt_a += 1
elif base == 'C':
cnt_c += 1
elif base == 'G':
cnt_g += 1
elif base == 'T':
cnt_t += 1
return (cnt_a, cnt_c, cnt_g, cnt_t)
|
adb3bf95d0177cb3533cbdf2dcac9ba587205647
| 83,088
|
def dec_perc_convert(input_value, input_units):
"""
Convert from decimal to percent or percent to decimal.
Parameters
----------
input_value : float
Value to be converted.
input_units : string
Units of the input value.
Enter either "percent" or "decimal"
Returns
-------
float
Returns converted value in percent or decimal.
"""
if input_units == "percent":
return input_value / 100
elif input_units == "decimal":
return input_value * 100
else:
raise Exception("Enter a valid unit value: decimal or percent")
|
5035b906bf7f25de482bd66b245e85dad3adae37
| 83,089
|
import shlex
def splitLine(line):
"""
Split lines read from files and preserve
paths and strings.
"""
splitLine = shlex.split(line)
return splitLine
|
4d3956a0dc06d0d0345e07746dd6119daafde73c
| 83,092
|
def v3ToStr(v3):
"""
convert a vbase3 vector to a string like v0,v1,v2
:param v3:
:return:
"""
return ','.join(str(e) for e in v3)
|
fa6aa3fda7b3248a98305cc1ed0a17d343469896
| 83,096
|
import math
def get_indices(width, height, xpartition, ypartition, xinput, yinput):
"""
Function to get the indices for grid.
Args:
width (float): width of the pitch.
height (float): height of the pitch.
xpartition (int): number of rows in a grid
ypartition (int): number of colimns in a grid.
xinput (float): x-coordinate location.
yinput (float): y-coordinate location.
Returns:
tuple: containing indices for the grid.
"""
## calculate number of partitions in x and y
x_step = width / xpartition
y_step = height / ypartition
## calculate x and y values
x = math.ceil((xinput if xinput > 0 else 0.5) / x_step) # handle border cases as well
y = math.ceil((yinput if yinput > 0 else 0.5) / y_step) # handle border cases as well
return (
ypartition - y, x - 1
)
|
a93aefb4f04106341b498844751e2bc61b1754d4
| 83,100
|
def split_off_tag(xpath):
"""
Splits off the last part of the given xpath
:param xpath: str of the xpath to split up
"""
split_xpath = xpath.split('/')
if split_xpath[-1] == '':
return '/'.join(split_xpath[:-2]), split_xpath[-2]
else:
return '/'.join(split_xpath[:-1]), split_xpath[-1]
|
bcfc909839bae9181700534e7a57dc117605ff6c
| 83,101
|
def bss_host_nid(host):
"""
Retrieves the nid from the BSS host object
"""
return host["NID"]
|
0767db41eda4877e3e61cf137d7f7da1275b7261
| 83,104
|
import torch
from typing import OrderedDict
def load_model_state_from_checkpoint(checkpoint_path, net=None, prefix='model.'):
"""
Load model weights from Pytorch Lightning trainer checkpoint.
Parameters
----------
net: nn.Module
Instance of the PyTorch model to load weights to
checkpoint_path: str
Path to PL Trainer checkpoint
prefix: str
Prefix used in LightningModule for model attribute.
Returns
-------
nn.Module
"""
if net is None:
# create new instance model
raise NotImplementedError
trainer_checkpoint = torch.load(checkpoint_path, map_location=lambda storage, loc: storage)
# chop off the prefix from LitModule, e.g. `self.model = model`
model_checkpoint = OrderedDict(((key[len(prefix):] if key.startswith(prefix) else key, value)
for key, value in trainer_checkpoint['state_dict'].items()))
net.load_state_dict(model_checkpoint)
return net
|
60f45a8d3231eb828ab7886ee66339fefdac8873
| 83,107
|
import hmac
import hashlib
def sign(base: bytes, private_key: str) -> str:
"""Calculate callback signature"""
return hmac.new(
bytes(private_key, "utf-8"),
base,
hashlib.sha256
).hexdigest()
|
1b56ca0ea319a6dbb86730c574c0536e67fce43f
| 83,108
|
import math
def find_t(X1, X2, S1, S2, N1, N2):
"""X1 and X2 are means, S1 and S2 std devs, N1 and N2 are
population sizes (and not degrees of freedom)
"""
#with this, the result between scipy and manual is the same
N1 = N1 - 1
N2 = N2 - 1
above = (X1 - X2)
below = math.sqrt( ((S1**2)/N1) + ((S2**2)/N2) )
return above/below
|
ce3a0d403a1ef844e52fcea7928213af8c87a994
| 83,110
|
def unzip(p, n):
"""Split a list-like object, 'p', into 'n' sub-lists by taking
the next unused element of the main list and adding it to the
next sub-list. A list of tuples (the sub-lists) is returned.
Each of the sub-lists is of the same length; if p%n != 0, the
shorter sub-lists are padded with 'None'.
Example:
>>> unzip(['a','b','c','d','e'], 3)
[('a', 'd'), ('b', 'e'), ('c', None)]
"""
(mlen, lft) = divmod(len(p),n) # find length of longest sub-list
if lft != 0: mlen += 1
lst = [[None]*mlen for i in range(n)] # initialize list of lists
for i in range(len(p)):
(j, k) = divmod(i, n)
lst[k][j] = p[i]
return map(tuple, lst)
|
b526d2f71303c739b68035e5ad6b5644f47c547a
| 83,118
|
import copy
def rhyme_quality( p1, p2 ):
""" Determine a numerical quality of the rhyme between two pronunciation lists.
>>> rhyme_quality( ["A", "B", "C"], ["A", "B"] )
0
>>> rhyme_quality( ["A", "B", "C"], ["B", "C"] )
2
>>> rhyme_quality( ["A", "B"], ["A", "B"] )
0
>>> rhyme_quality( ["B", "B", "C", "D"], ["A", "B", "C", "D"] )
#3
"""
p1 = copy.deepcopy(p1)
p2 = copy.deepcopy(p2)
p1.reverse()
p2.reverse()
if p1 == p2:
# G-Spot rocks the G-Spot
return 0
quality = 0
for i, p_chunk in enumerate(p1):
try:
if p_chunk == p2[i]:
quality += 1
if p_chunk != p2[i]:
break
except IndexError:
break
return quality
|
bc2f8189202ce29d818d942e6ad7b16a82ffe4f1
| 83,121
|
def _get_labels(model, X):
"""Get labels from a trained sklearn-like model"""
try:
y = model.labels_
except AttributeError:
y = model.predict(X)
return y
|
6bc2f20b18554a7aa7c0c676f520d434852e7085
| 83,125
|
import requests
import json
def get_response(url, data, headers):
"""
Return the response from Elsevier
:param url: <str> base_url
:param data: <dict> data parameters
:param headers: <dict> headers
:return: response
"""
response = requests.put(url, data=json.dumps(data), headers=headers)
response = response.text.replace('false', 'False').replace('true', 'True')
try:
response = eval(response)
except BaseException:
print(response)
return response
|
9f702ab9c5aa1cd42a6fd66307d4ca27787c4b29
| 83,129
|
def fix_sanitizer_crash_type(crash_type):
"""Ensure that Sanitizer crashes use generic formats."""
# General normalization.
crash_type = crash_type.lower().replace('_', '-').capitalize()
# Use more generic types for certain Sanitizer ones.
crash_type = crash_type.replace('Int-divide-by-zero', 'Divide-by-zero')
return crash_type
|
a2971f209e380f7b50d4234876e7421249737d44
| 83,133
|
def get_ardupilot_url(vehicle):
"""Return a formatted url to target a .xml on the ardupilot auto test server"""
return "http://autotest.ardupilot.org/Parameters/{0}/apm.pdef.xml".format(vehicle)
|
077ab98b1130ab2ebf84369a3a0df88bf74ec6c2
| 83,137
|
def get_evens(lst):
""" Returns a list of only even numbers from lst """
out = []
for val in lst:
if val % 2 == 0:
out += [val]
return out
|
75389aada6f6168266292e2ac74b99909d595d0f
| 83,139
|
def zscore_normalize(image, mask):
"""
https://github.com/jcreinhold/intensity-normalization/blob/master/intensity_normalization/normalize/zscore.py
Normalize a target image by subtracting the mean of the vertebra
and dividing by the standard deviation
Args:
image: target volume
mask: mask for image
Returns:
normalized: image with mean at norm_value
"""
logical_mask = mask == 1 # force the mask to be logical type
mean = image[logical_mask].mean()
std = image[logical_mask].std()
normalized = (image - mean) / std
return normalized
|
6eddb0bf2afd355f7a63e04ff104ed4e214ae7de
| 83,148
|
def is_empty_json_response(context):
"""Check if the JSON response is empty (but not None)."""
return context.response.json() == {}
|
c7b002f7695c364ce152ad9ad14291f6b0c31719
| 83,163
|
def get_signature_def_by_key(meta_graph_def, signature_def_key):
"""Utility function to get a SignatureDef protocol buffer by its key.
Args:
meta_graph_def: MetaGraphDef protocol buffer with the SignatureDefMap to
look up.
signature_def_key: Key of the SignatureDef protocol buffer to find in the
SignatureDefMap.
Returns:
A SignatureDef protocol buffer corresponding to the supplied key, if it
exists.
Raises:
ValueError: If no entry corresponding to the supplied key is found in the
SignatureDefMap of the MetaGraphDef.
"""
if signature_def_key not in meta_graph_def.signature_def:
raise ValueError("No SignatureDef with key '%s' found in MetaGraphDef." %
signature_def_key)
return meta_graph_def.signature_def[signature_def_key]
|
961f3742cf65ef2c06cbb7dc2cbc784ae1cd892d
| 83,164
|
import math
def get_num_tiles(rows, cols, row_tile_size, col_tile_size):
"""
Obtain the number of vertical and horizontal tiles that an image can be divided into given a row tile size and
a column tile size.
Args:
rows: Number of rows.
cols: Number of columns.
row_tile_size: Number of pixels in a tile row.
col_tile_size: Number of pixels in a tile column.
Returns:
Tuple consisting of the number of vertical tiles and the number of horizontal tiles that the image can be divided
into given the row tile size and the column tile size.
"""
num_row_tiles = math.ceil(rows / row_tile_size)
num_col_tiles = math.ceil(cols / col_tile_size)
return num_row_tiles, num_col_tiles
|
9f3517aa3a0293fbd1d43e03da1dc5357e54ee5a
| 83,165
|
import re
def flatMapFunc(document):
"""
Before we returned a list of words and used the map and reduce fucntions to
determine how many times each word occurred (regardless of document ID).
Now we want to know how many different documents contain the word.
This function should create a list of items which we will then run various
transformations on to eventually create a count of documents per word.
document[0] is the document ID (distinct for each document)
document[1] is a string of all text in that document
You will need to modify this code.
"""
documentID = document[0]
words = re.findall(r"\w+", document[1])
ret = []
for i in words:
ret.append((i, documentID))
return ret
|
5f56910461f16dfe1a95527d691f3b3f626866d0
| 83,172
|
def wait(self, dtime="", **kwargs):
"""APDL Command: /WAIT
Causes a delay before the reading of the next command.
Parameters
----------
dtime
Time delay (in seconds). Maximum time delay is 59 seconds.
Notes
-----
You should consider using ``time.sleep(dtime)``
The command following the /WAIT will not be processed until the
specified wait time increment has elapsed. Useful when reading from a
prepared input file to cause a pause, for example, after a display
command so that the display can be reviewed for a period of time.
Another "wait" feature is available via the *ASK command.
This command is valid in any processor.
"""
command = f"/WAIT,{dtime}"
return self.run(command, **kwargs)
|
394587ee3e96f1f945c99aedda8b493b7d2129e3
| 83,174
|
def miniAOD_customizeIsolatedTracksFastSim(process):
"""Switch off dE/dx hit info on fast sim, as it's not available"""
process.isolatedTracks.saveDeDxHitInfo = False
return process
|
b552f353ef2068c0d5d076ae288865382580cf34
| 83,175
|
def push(trace, instructions, reg):
"""Push the content of *reg(* onto the stack"""
instructions.append({"trace": trace, "op": "push", "reg": reg})
return instructions
|
14536be2f3997b4e29cb0206dd55e311dd6722a2
| 83,176
|
import pickle
import base64
def unserialize(x):
"""
Unserialize a serialized object.
:param x: Object to be unserialized
:type x: string
:return: Unserialized object
:rtype: arbitrary (typically a dictionary)
"""
return pickle.loads(base64.b64decode(x.encode()))
|
2079175e8ad98863f478ca205ed28d831546f98e
| 83,178
|
def convert_c_to_f(temp_c):
"""Converts temp (C) to temp (F)."""
try:
temp_f = (temp_c * 1.8) + 32
temp_f = round(temp_f, 2)
except TypeError:
temp_f = False
return temp_f
|
88b08f89c5d674a0220390aff91faa3db2e7e0be
| 83,183
|
def pip_safe_name(s: str):
""" Return a name that is converted to pypi safe format. """
return s.lower().replace("-", "_").replace(" ", "_")
|
c4dc9b80c7341323947ef162590adeeb3ba87ec1
| 83,188
|
import requests
import logging
def geocode(key, language, address, latitude, longitude):
"""get latitude, longitude from address using google geocode api
"""
try:
response = requests.get(
"https://maps.googleapis.com/maps/api/geocode/json",
params={
"address": address,
"language": language,
"latlng": "{},{}".format(latitude, longitude),
"key": key
})
response.raise_for_status()
data = response.json()
location = data["results"][0]["geometry"]["location"]
components = []
for component in data["results"][0]["address_components"]:
if component["types"][0] in [
"locality", "administrative_area_level_1"
]:
components.append(component["short_name"])
address = ",".join(components)
return location["lat"], location["lng"], address
except Exception as e:
logging.error(e, exc_info=True)
return None
|
a1580289b262e62f8bdd1dc15bb0027e5cfd39bf
| 83,196
|
from typing import Set
import inspect
def get_subclasses(cls: type, include_abstract: bool = False) -> Set[type]:
"""
Collects all subclasses of the given class.
:param cls: A base class
:param include_abstract: Whether abstract base classes should be included
:return: A set of all subclasses
"""
subclasses = set()
queue = cls.__subclasses__()
while queue:
cls = queue.pop()
if include_abstract or not inspect.isabstract(cls):
subclasses.add(cls)
queue += cls.__subclasses__()
return subclasses
|
41004d5a360b681aaa9a962269b9aaf91c72fabf
| 83,198
|
def coords_to_index(coords, width):
"""
Converts x,y coordinates of an image (with (0,0) in in the top-left corner) to an index
in a 1-D array of pixels
:param coords: A tuple (x,y)
:param width: The width of the image
:return: The index of the corresponding pixel
"""
return coords[1] * width + coords[0]
|
71c752b73c1e45287c9e7ef66c3b400f70f7e2a1
| 83,200
|
def num_to_one(x: int) -> int:
"""Returns +1 or -1, depending on whether parm is +ve or -ve."""
if x < 0:
return -1
else:
return 1
|
b030af4e81b3c9fedbe5acbb02269e3ea33e95c8
| 83,203
|
import re
def extract_bibtex_entries(master_bib_file, citekeys):
"""
Extract bibtex entries from master_bib_file that have certain citekeys.
Return the entries sorted by citekey.
"""
if len(citekeys) == 0:
return []
with open(master_bib_file, "r", encoding="utf-8") as file:
master_bib = file.read()
bibtex_entries = []
for citekey in citekeys:
match = re.search(
r"@.*?\{" + citekey + r"[\s\S]+?\n\}\n", master_bib, re.UNICODE
)
if match is None:
print(f"-> Citekey '{citekey}' was not found in {master_bib_file}")
else:
bibtex_entries.append((citekey, match.group(0)))
return [entry[1] for entry in sorted(bibtex_entries)]
|
fbd036fee7ad89164eb375574a7672dac70f2b15
| 83,205
|
import math
def kr_utility(x: float, mult: float = 10000) -> float:
""" A logarithmic utilitly function based on köszegi rabin 2006 """
return mult * math.log(x)
|
f455f7938a0f3ff4ba920d7991a6fa67207a10e0
| 83,210
|
def _safe_read(filepath):
"""Returns the content of the file if possible, None otherwise."""
try:
with open(filepath, 'rb') as f:
return f.read()
except (IOError, OSError):
return None
|
4c5d32d4feff3c94e2411b072d5f03e51b08ecc8
| 83,212
|
def megabytes(x, pos):
"""Formatter for Y axis, values are in megabytes"""
if x < 1024:
return '%d B' % (x)
elif x < 1024 * 1024:
return '%1.0f KiB' % (x/1024)
else:
return '%1.0f MiB' % (x/(1024*1024))
|
f8103a8b1f1c12957e1561ed4aab389a3532c69b
| 83,217
|
def check_evals_initialized(conn):
"""Check that evals table has been created in a sqlite3 database
Parameters
----------
conn : `sqlite3.Connection`
connection to a sqlite3 database
Returns
-------
is_created : `bool`
`True` if table was created and `False` otherwise
"""
cur = conn.execute(
'''SELECT name FROM sqlite_master WHERE type='table' AND name=?''',
('evals',)
)
return not (cur.fetchone() is None)
|
cfcee1ebe56edc5b8d9c0736d9d97a004761735f
| 83,218
|
def str_contains_all(string, items):
""" Gets a string and a list of string and checks all of list items are in string """
for item in items:
if not item in string:
return False
return True
|
264884e314571061e7a47c19203b3142cb30b0c9
| 83,219
|
import copy
import warnings
def defCol(name: str, typ: str, *coll_sets):
"""
Legacy utility method for simplifying trust sets.
>>> actual = defCol("a", "INTEGER", [1], [2], [1, 2, 3])
>>> expected = ("a", "INTEGER", {1, 2, 3})
>>> actual == expected
True
>>> actual = defCol("a", "INTEGER", 1, 2, 3)
>>> expected = ("a", "INTEGER", {1, 2, 3})
>>> actual == expected
True
>>> actual = defCol("a", "INTEGER", 1)
>>> expected = ("a", "INTEGER", {1})
>>> actual == expected
True
"""
if not coll_sets:
trust_set = set()
else:
first_set = coll_sets[0]
trust_set = copy.copy({first_set} if isinstance(first_set, int) else set(first_set))
for ts in coll_sets[1:]:
if isinstance(ts, int):
ts_set = {ts}
else:
warnings.warn("Use of lists for trust sets is deprecated")
ts_set = set(ts)
trust_set |= ts_set
return name, typ, trust_set
|
3c09c134a3f8b731cc4838802c660035b65a5871
| 83,222
|
def hamming(correct, observed):
"""
Calculates hamming distance between correct code and observed code with possible errors
Args:
correct: the correct code as list (binary values)
observed: the given code as list (binary values)
Returns:
distance: the hamming distance between correct and observed code
"""
distance = 0
for i in range(len(correct)):
if correct[i] != observed[i]:
distance += 1
return distance
|
b77554125e5e7f963d7f896e34dab8d70a3cd824
| 83,228
|
import csv
def read_base_maps(base_maps):
"""
Read all base map files and create a base map
:param base_maps: list of filenames
:return: a list like [("/bucket", "Server 1")]
"""
results = []
for filename in base_maps or []:
with open(filename, 'r') as file:
reader = csv.reader(file)
for row in reader:
if len(row) == 0:
continue
if len(row) != 2:
raise ValueError(f"Read basemap row {row} has length {len(row)} (should be 2)")
results.append((row[0], row[1]))
return results
|
6bca5529401ba29851b47716163f22e2e8e9dbc5
| 83,233
|
def expand(path:list, conc_var:list) -> list:
"""
expand( path=[1, 2, 3], conc_var=[['a', 'c'], ['b', 'c']] )
---> [[1, 'a', 2, 'c', 3], [1, 'b', 2, 'c', 3]]
gives the detailed path!
Parameters
----------
path : list
The ids of formulas that form a path.
conc_var : list
the conected variables between two formulas.
Returns
-------
list
The detailed path.
"""
ft = []
cv = [list(cc) for cc in list(conc_var)]
for c in cv:
w = []
p = path.copy()
while c:
w.append(p.pop())
w.append(c.pop())
w.append(p.pop())
ft.append(w[::-1])
return ft
|
33be48c324570d50ac62db2f25004f3ab8385fef
| 83,240
|
from typing import Sequence
def map_context(index: int, context_count: int) -> Sequence[float]:
"""Get a one-hot encoding for the given context."""
context_vector = [0.0 for _ in range(context_count)]
context_vector[index] = 1.0
return context_vector
|
f959e03fde3aa143d56754a1f3a34687cda9081b
| 83,247
|
def _request_get_json(response):
"""
Get the JSON from issuing a ``request``, or try to produce an error if the
response was unintelligible.
"""
try:
return response.json()
except ValueError as e:
return {"error": str(e)}
|
1b6a6d823c23f036ef3c2a06ed3a421544797bc5
| 83,249
|
def get_html_msg(color, tim, log_type, filename, lineno, msg):
"""
converts the message to html
Parameters
----------
color : str
the HTML color
time : str
the time for the message
log_type : str
the message type
filename : str
the filename the message came from
lineno : int
the line number the message came from
message : str
the message
Returns
-------
html_msg : str
the HTML message
"""
# log_type, filename, lineno, msg
html_msg = r'<font color="%s"> %s %s : %s:%i</font> %s <br>' % (
color, tim, log_type, filename, lineno, msg.replace('\n', '<br>'))
return html_msg
|
0673b455a45ef559b6c6d5fd634e5af6b4be7e38
| 83,259
|
def upper(series):
"""Transform all text to uppercase."""
return series.str.upper()
|
dbd5a069d4251aebf94c02d994c74ff4e012e649
| 83,266
|
def is_valid_move(data, obstacles, proposed_move):
"""Function that validates if we can perform given action without hitting wall, self, or other snake"""
if proposed_move["x"]<0 or proposed_move["y"]<0:
return False
board_height = data["board"]["height"]
board_width = data["board"]["width"]
if proposed_move["x"]==board_width or proposed_move["y"]==board_height:
return False
if proposed_move in obstacles:
return False
return True
|
955180a0adc95cc7c54a120f34ab9cc6025986b1
| 83,270
|
from typing import Mapping
from typing import Sequence
def iterate(mapping):
"""
Attempt to iterate over `mapping` such that key-values pairs are yielded per iteration. For
dictionaries and other mappings, this would be the keys and values. For lists and other
sequences, this would be the indexes and values. For other non- standard object types, some
duck-typing will be used:
- If `mapping` has callable ``mapping.items()`` attribute, it will be used.
- If `mapping` has callable ``mapping.keys()`` and ``__getitem__`` attributes, then
``(key, mapping[key])`` will be used.
- Otherwise, `iter(mapping)` will be returned.
"""
if isinstance(mapping, Mapping) or callable(getattr(mapping, "items", None)):
return mapping.items()
if isinstance(mapping, Sequence):
return enumerate(mapping)
if callable(getattr(mapping, "keys", None)) and hasattr(mapping, "__getitem__"):
return ((key, mapping[key]) for key in mapping.keys())
return iter(mapping)
|
8b651ea8b239883f0323ab8542bd912319c78012
| 83,278
|
def parse_out_ips(message):
"""Given a message, parse out the ips in the answer"""
ips = []
for entry in message.answer:
for rdata in entry.items:
ips.append(rdata.to_text())
return ips
|
a75489b1223f2a799fa0d54c80b3ce2e27494e99
| 83,283
|
def smiles(sid):
""" SMILES string from a species ID
"""
smi = str.split(sid, '_')[0]
return smi
|
8c47d163d9110027f9fa47903ab1a1abbaa54fe6
| 83,288
|
def get_common_path(paths):
"""
Find common path.
For example, [/api/test, /api/hello] => /api
:param paths: list of path
:return: string of common path
"""
if len(paths) == 0:
return '/'
mini_paths = [path.strip('/').split('/') for path in paths]
common_mini_paths = min(mini_paths)
common_path = []
for common_mini_path in common_mini_paths:
check_path = '/'.join(common_path) + common_mini_path
exist_flag = True
for path in paths:
if not path.strip('/').startswith(check_path):
exist_flag = False
break
if exist_flag:
common_path.append(common_mini_path)
else:
break
return '/' + '/'.join(common_path)
|
ea5005cf1b74bf1cb75d59e7cd93721564bc08d8
| 83,289
|
def unit(v):
"""Return a unit vector"""
return v/abs(v)
|
ade07b729a99a3f1016e755ee72a60b402356a83
| 83,292
|
def get_master_slaves(server, options=None):
"""Return the slaves registered for this master. In order to ease the
navigation through the result set, a named tuple is always returned.
Please, look up the `SHOW SLAVE HOSTS` in the MySQL Manual for further
details.
:param server: MySQL Server.
:param options: Define how the result is formatted and retrieved.
See :meth:`~mysql.fabric.server.MySQLServer.exec_stmt`.
"""
if options is None:
options = {}
options["columns"] = True
options["raw"] = False
return server.exec_stmt("SHOW SLAVE HOSTS", options)
|
ab4c5fc265a373e057551940f1b929e11d3bb63c
| 83,297
|
def get_conf_property(conf, name, expected_type=None):
"""
Check that configuration properties dictionary contains the given configuration property and return its value.
:param conf: Dictionary.
:param name: Configuration property string.
:param expected_type: Check that given value has an expected type.
:return: Configuration property value.
"""
if name in conf:
if expected_type and not isinstance(conf[name], expected_type):
raise TypeError("Expect configuration property '{}' to be set with a '{}' value but it has type '{}'".
format(name, str(expected_type), str(type(conf[name]))))
return conf[name]
else:
return None
|
a72995fa5780d960aee626979f0c6a1277ef8234
| 83,300
|
import re
def indent(code, indentation):
"""
Indent code by 'indentation' spaces.
For example, indent('hello\nworld\n', 2) -> ' hello\n world\n'.
"""
if len(code) == 0:
return code
if isinstance(indentation, int):
indentation = ' ' * indentation
elif not isinstance(indentation, str):
raise TypeError(f'Supports only int or str, got {type(indentation).__name__}')
# Replace every occurrence of \n, with \n followed by indentation,
# unless the \n is the last characther of the string or is followed by another \n.
# We enforce the "not followed by ..." condition using negative lookahead (?!\n|$),
# looking for end of string ($) or another \n.
return indentation + re.sub(r'\n(?!\n|$)', '\n' + indentation, code)
|
b9f234f83dc12035bce730ac43f49463b1d98e8f
| 83,302
|
def shQuote(text):
"""quote the given text so that it is a single, safe string in sh code.
Note that this leaves literal newlines alone (sh and bash are fine with that, but other tools may mess them up and need to do some special handling on the output of this function).
"""
return "'%s'" % text.replace("'", r"'\''")
|
123c0f73157f6d37c5b6ec1900579f394773cd92
| 83,303
|
import io
def load_vsp_pairs(path: str) -> dict:
"""
Parses a specific formatted VSP pairs file and returns the dictionary corresponding to the VSP pairs.
:param path: The path of the file where the dictionary is stored.
:return: The dictionary.
"""
vsp_pairs = dict()
with io.open(file=path, mode="r", encoding="utf-8") as src_file:
for line in src_file:
content = line.split(':', 1)
vsp_pairs[tuple(content[0].split(','))] = content[1]
return vsp_pairs
|
cfe72778b6d9efb3b7f7cc88bdd867f88fba1a20
| 83,305
|
def listify(df, axis=1):
"""
Consdense text information across columns into a single list.
Parameters
----------
df : :class:`pandas.DataFrame`
Dataframe (or slice of dataframe) to condense along axis.
axis : :class:`int`
Axis to condense along.
"""
return df.copy(deep=True).apply(list, axis=axis)
|
a8361b3c44377a8f34d99b674269577a1104ab9c
| 83,307
|
import re
def strip_nbsp(s):
""" Replace ' ' with spaces and then strip (beginning and end) spaces.
An interior will remain an interior space"""
return re.sub(" ", " ", s).strip()
|
69d61b9da69e090521800a464b2ec4b695758e2c
| 83,309
|
def find_first_entry(line, elements, start=0, not_found_value=-1):
"""
Find the index of the earliest position inside the `line` of any of the strings in `elements`, starting from `start`.
If none are found, return `not_found_value`.
"""
first_entry=len(line)
for e in elements:
pos=line.find(e,start)
if pos>0:
first_entry=min(first_entry,pos)
if first_entry==len(line):
return not_found_value
return first_entry
|
171f0f1cbbc9fbaa3d65faacec7e6eca673eb378
| 83,311
|
def svmPredict(model, X):
""" returns a vector of predictions using a trained SVM model.
model is a svm model returned from svmTrain.
X is a (m x n) matrix where there each example is a row.
pred is a vector of predictions of {0, 1} values.
"""
# X: shape (n_samples, n_features)
# For kernel=”precomputed”,
# the expected shape of X is [n_samples_test, n_samples_train]
pred = model.predict(X)
return pred
|
f9fd6e28855f2a1fcb646d0eef64e047ed43e355
| 83,313
|
def convert_eq_to_dict(equationstring):
"""
Converts an equation string to a dictionary
convert_eq_to_dict('1*Be12Ti->10*Be+1*Be2Ti+5*Be') ->
{'products': {'Be': 15, 'Be2Ti': 1}, 'educts': {'Be12Ti': 1}}
"""
eq_dict = {'products': {}, 'educts': {}}
product_dict = {}
educt_dict = {}
eq_split = equationstring.split('->')
products = eq_split[1].split('+')
educts = eq_split[0].split('+')
for product in products:
p_list = product.split('*')
product_dict[p_list[-1]] = int(p_list[0]) + product_dict.get(p_list[-1], 0)
for educt in educts:
e_list = educt.split('*')
educt_dict[e_list[-1]] = int(e_list[0]) + educt_dict.get(e_list[-1], 0)
eq_dict['products'] = product_dict
eq_dict['educts'] = educt_dict
return eq_dict
|
e467daf79a30d09a1c535b87d8e08c90fa862498
| 83,315
|
def new(sym):
""" Return the "new" version of symbol "sym", that is, the one
representing "sym" in the post_state.
"""
return sym.prefix('new_')
|
9d47dc6edf92fad115d92850e78b3eeeb43834e0
| 83,319
|
def parse_binvox_header(fp):
"""
Read the header from a binvox file.
Spec available:
https://www.patrickmin.com/binvox/binvox.html
Parameters
------------
fp: file-object
File like object with binvox file
Returns
----------
shape : tuple
Shape of binvox according to binvox spec
translate : tuple
Translation
scale : float
Scale of voxels
Raises
------------
IOError
If invalid binvox file.
"""
line = fp.readline().strip()
if hasattr(line, 'decode'):
binvox = b'#binvox'
space = b' '
else:
binvox = '#binvox'
space = ' '
if not line.startswith(binvox):
raise IOError('Not a binvox file')
shape = tuple(
int(s) for s in fp.readline().strip().split(space)[1:])
translate = tuple(
float(s) for s in fp.readline().strip().split(space)[1:])
scale = float(fp.readline().strip().split(space)[1])
fp.readline()
return shape, translate, scale
|
8ecf24aee98bd80270904f912cedab0dda384d0c
| 83,321
|
def _remove_keys(obj, keys_to_remove):
"""Remove the given fields from a JSON-like object.
Traverses `obj` and removes the given keys at any nesting depth.
Examples:
_strip_keys({'a': [{'a': 0, 'b': 1}], 'b': 2}, ['a']) ==
{'b': 2}
_strip_keys({'a': [{'a': 0, 'b': 1}], 'b': 2}, ['b']) ==
{'a': [{'a': 0}]}
Args:
obj: A JSON-like object
keys_to_remove: A list of keys
Returns:
A copy of `obj` with the given fields removed at any nesting depth.
"""
if isinstance(obj, list):
return [_remove_keys(item, keys_to_remove) for item in obj]
elif isinstance(obj, dict):
return {
key: _remove_keys(value, keys_to_remove)
for key, value in obj.items() if key not in keys_to_remove}
else:
return obj
|
ccfd89df496ad29045608227c0533b559a204157
| 83,324
|
from typing import Any
def coalesce(*values: Any, default=None) -> Any:
"""Get the first non-``None`` value in a list of values"""
return next((v for v in values if v is not None), default)
|
46afe01e33071df235447896d902647ea20959b5
| 83,325
|
def generate_filename(in_path, output_dir):
"""
Create the filename of the new test video clip.
Parameters
----------
in_path : str
Full path of input video.
output_dir : str
Directory of output video.
Returns
-------
out_path : str
Full path of output video.
"""
if in_path.count('.') >= 2:
raise Exception('Filename has multiple full stops')
output_video = in_path.split('/')[-1].replace('.', '_test.')
out_path = output_dir + output_video
return out_path
|
63baa6fa4a8afc4368f4a3d093b063f7f4a966c7
| 83,326
|
def _get_fwxm_boundary(data, max_val):
"""
Returns sample position and height for the last sample which
amplitude is below the specified value.
If no sample can be found returns position and value of last sample
seen.
Note:
For FWHM we assume that we start at the maximum.
"""
ind = None
s = None
for i, d in enumerate(data):
if d <= max_val:
ind = i
s = d
return ind, s
return len(data)-1, data[-1]
|
d536da3b983540b8f2424d8580b0220c553f794e
| 83,327
|
import string
import random
def generate_token(length=30, allowed_chars=''.join([string.ascii_letters, string.digits])):
"""Generates random CSRF token
:param length: Length of CSRF token, defaults to 30
:param allowed_chars: Characters to use, defaults to 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
:return: Token string
"""
return ''.join(random.choice(allowed_chars) for i in range(length))
|
07c4a66149f6f7eccd51217462405620cd6b86b3
| 83,329
|
def household_as_of(reference_date, returning=None, return_expectations=None):
"""
Return information about the household to which the patient belonged as of
the reference date. This is inferred from address data using an algorithm
developed by TPP (to be documented soon) so the results are not 100%
reliable but are apparently pretty good.
Options for `returning` are:
pseudo_id: An integer identifier for the household which has no meaning
other than to identify individual members of the same
household (0 if no household information available)
household_size: the number of individuals in the household (0 if no
household information available)
Examples:
household_id=patients.household_as_of(
"2020-02-01", returning="pseudo_id"
)
household_size=patients.household_as_of(
"2020-02-01", returning="household_size"
),
"""
return "household_as_of", locals()
|
cea12e916ed3e1fddd13eadf987abdd3d8ecb791
| 83,330
|
from typing import List
from typing import Tuple
def prepare_image_type_choices() -> List[Tuple[str, str]]:
"""
Returns image type choices for the prompt.
:return: list of 2-len tuples
"""
return [
("Distribution only", "distribution"),
("Application only", "application"),
("All available images", "all"),
]
|
2e376bd94befc9a1a7d5c6a251d9b3d08de3207c
| 83,331
|
from typing import Dict
from typing import List
import copy
def get_neighbors(state: Dict[int, List[int]]) -> List[Dict[int, List[int]]]:
"""given a state, return its neighbors
for each processor, move its task to another processor and add it as
neighbor
:param state: the root state
:return: list of all neighbors
"""
neighbors = []
for source in range(len(state)):
for task_id in range(len(state[source])):
for destination in range(len(state)):
# moving a task to its source is useless
if destination == source:
continue
# creating the neighbor
neighbors.append(copy.deepcopy(state))
# removing the task
task = neighbors[-1][source].pop(task_id)
# add it to the other processor
new_tasklist = neighbors[-1][destination][:]
new_tasklist.append(task)
neighbors[-1][destination] = sorted(new_tasklist)
return neighbors
|
049e5d6fedcdbb8dd506567c2201ab8dc50bff8d
| 83,333
|
import io
import json
def load(filename):
"""Load a JSON object from file."""
with io.open(filename, mode='r', encoding='utf-8') as file:
return json.load(file)
|
7c578fac5e4d41a1afb9f2734ffef322f704e683
| 83,334
|
import re
def block_contains(pattern, cell):
"""Returns True for non-empty notebook code block that contains pattern"""
if cell['cell_type'] != 'code': return False
src = cell['source']
if len(src) == 0 : return False
check_all = [re.search(f'^.*{pattern}.*$', line, re.IGNORECASE) for line in src]
if all(check is None for check in check_all):
return False
else:
return True
|
f1c24f21c653c0d58e1da6467c19c9d0670fc29d
| 83,335
|
def get_allowed_tokens(config):
"""Return a list of allowed auth tokens from the application config"""
return [token for token in (config.get('AUTH_TOKENS') or '').split(':') if token]
|
e046e369ef949501deaf91f6629a768af9416c75
| 83,336
|
def calculate_q_debye_linear(eps_fluid, lambda_d, zeta):
"""
Calculate the charge accumulated in the Debye layer
(Adjari, 2006)
units: Coulombs
Notes:
"""
q = -eps_fluid * zeta / lambda_d
return q
|
1b49d1cab7154496c8b342b9f349efe08f64f015
| 83,337
|
def largest_product_horizontally(matrix):
"""
Computes the largest product horizontally (line by line) on a given matrix
"""
largest_product = 1
for line in range(0, matrix.shape[0]):
for column in range(0, matrix.shape[1]-3):
product = int(matrix[line, column] *
matrix[line, column+1] *
matrix[line, column+2] *
matrix[line, column+3])
if product > largest_product:
largest_product = product
return largest_product
|
3cd7c17483729a17ba6c9fcc131e562286eb0528
| 83,338
|
def check_num_rows_of_parameter_array(param_array, correct_num_rows, title):
"""
Ensures that `param_array.shape[0]` has the correct magnitude. Raises a
helpful ValueError if otherwise.
Parameters
----------
param_array : ndarray.
correct_num_rows : int.
The int that `param_array.shape[0]` should equal.
title : str.
The 'name' of the param_array whose shape is being checked.
Results
-------
None.
"""
if param_array.shape[0] != correct_num_rows:
msg = "{}.shape[0] should equal {}, but it does not"
raise ValueError(msg.format(title, correct_num_rows))
return None
|
fad20c61b1b972fcb483a349bc808463478ece1a
| 83,342
|
def create_collection_filename(user: str, ext: str = "csv") -> str:
"""Return a filename for a collection."""
return f"export_collection_{user}.{ext}"
|
5313a7abcadf8ecacef4a17758dbe40d32f96cd9
| 83,349
|
def get_item(dictionary, key):
"""
:param dictionary: the dictionary where you want to access the data from
:param key: the key of the dictionary where you want to access the data from
:return: the content of the dictionary corresponding to the key entry or None if the key does not exists
"""
return dictionary.get(key, None)
|
5a17f4c733f327a83dfa43bba659a555c4d9e2f6
| 83,350
|
import math
def _int_32_value(client, value):
"""Converts a value to a protocol buffer Int32 wrapper.
Args:
client: A google.ads.google_ads.client.GoogleAdsClient instance.
value: A number to wrap, truncated to the nearest integer.
Returns:
The value wrapped in a google.ads.googleads_v2.types.Int32.
"""
int_32_val = client.get_type('Int32Value', version='v3')
int_32_val.value = math.trunc(value)
return int_32_val
|
48682ff6757d9a17d89856784fd51e3337e24e74
| 83,354
|
from typing import Dict
from typing import Any
def user_data() -> Dict[str, Any]:
"""Get user data to instantiate a User."""
return dict(
username="viniarck",
first_name="Vinicius",
last_name="Arcanjo",
email="viniarck@gmail.com",
password="sup3rs3cr3t!",
)
|
6c730fd969baaa3350e78babbb398da35f50f53d
| 83,365
|
def compute_moments_weights_pairs_std(neighbors, weights):
"""
Computes the expectations of the local pair-wise test statistic
This version assumes variables are standardized,
and so the moments are actually the same for all
pairs of variables
"""
N = neighbors.shape[0]
K = neighbors.shape[1]
# Calculate E[G]
EG = 0
# Calculate E[G^2]
EG2 = 0
# Get the x^2*y^2 terms
for i in range(N):
for k in range(K):
wij = weights[i, k]
EG2 += (wij**2)/2
return EG, EG2
|
6ada14ecc23355b24181cb5a0352a52efbfee29a
| 83,368
|
def parse_none(http_response, response):
"""If the body is not empty, convert it to a python object and set as the value of
response.body. http_response is always closed if no error occurs.
:param http_response: the http_response object returned by HTTPConnection.getresponse()
:type http_response: httplib.HTTPResponse
:param response: general response object which will be returned to the caller
:type response: baidubce.BceResponse
:return: always true
:rtype bool
"""
body = http_response.read()
if body:
response.body = body
http_response.close()
return True
|
ea9f50fa1f7667a247c7be20914414c810a45c78
| 83,370
|
import json
def jsonify(data):
"""
Type safe JSON dump
"""
return json.dumps(data, default=str)
|
2207893d5801e2e355b0c7b04e7844869ea53fc7
| 83,378
|
def shrink(line, bound=50, rep='[...]'):
"""Shrinks a string, adding an ellipsis to the middle"""
l = len(line)
if l < bound:
return line
if bound <= len(rep):
return rep
k = bound - len(rep)
return line[0:k / 2] + rep + line[-k / 2:]
|
14e70c452d8d9fdb85c61b45449ea6a0ef91ab65
| 83,380
|
from typing import Callable
def decorate_validation_errors(func: Callable) -> Callable:
"""
Decorator that wraps the output of `func` with separators if validation
errors are found.
"""
def wrapper(*args, **kwargs):
# All conditions were met: let's not print anything around the output
if all(value is None for value in args[0]):
return
print("=" * 60)
func(*args, **kwargs)
print("=" * 60)
return wrapper
|
ab949959dc0e9869e220110feae043278c859c94
| 83,382
|
from datetime import datetime
def is_datetime_aware(dt: datetime) -> bool:
"""Returns if datetime `dt` is timezone-aware"""
return dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None
|
e083bcc2d7e82e289c166fdfc901f1198023879e
| 83,387
|
def conv(value, fromLow=0, fromHigh=0, toLow=0, toHigh=0, func=None):
"""Re-maps a number from one range to another. That is, a value of fromLow would get mapped to toLow, a value of fromHigh to toHigh, values in-between to values in-between, etc.
Does not constrain values to within the range, because out-of-range values are sometimes intended and useful. The constrain() function may be used either before or after this function, if limits to the ranges are desired.
Note that the "lower bounds" of either range may be larger or smaller than the "upper bounds" so the conv() function may be used to reverse a range of numbers, for example
y = conv(x, 1, 50, 50, 1)
The function also handles negative numbers well, so that this example
y = conv(x, 1, 50, 50, -100)
is also valid and works well.
:param value: the number to map
:param fromLow: the lower bound of the value's current range
:param fromHigh: the upper bound of the value's current range
:param toLow: the lower bound of the value's target range
:param toHigh: the upper bound of the value's target range
:param func: function to be applied on result
:return: The mapped value."""
result = (value - fromLow) * (toHigh - toLow) / (fromHigh - fromLow) + toLow
if func is None:
return result
else:
return func(result)
|
b24adeb98096be0a1421a0471ac501731fe48a03
| 83,390
|
def get_app_details_hr(app_dict):
"""
Prepare application detail dictionary for human readable in 'risksense-get-app-detail' command.
:param app_dict: Dictionary containing application detail.
:return: List containing application detail dictionary.
"""
return [{
'Address': app_dict.get('uri', ''),
'Name': app_dict.get('name', ''),
'Network Name': app_dict.get('network', {}).get('name', ''),
'Network Type': app_dict.get('network', {}).get('type', ''),
'Discovered On': app_dict.get('discoveredOn', ''),
'Last Found On': app_dict.get('lastFoundOn', '')
}, {}]
|
7b377ad3b98224c1ec14ad0ae481e1179ecf5a8d
| 83,393
|
def matrix_index(user):
"""
Returns the keys associated with each axis of the matrices.
The first key is always the name of the current user, followed by the
sorted names of all the correspondants.
"""
other_keys = sorted([k for k in user.network.keys() if k != user.name])
return [user.name] + other_keys
|
52e1c86de67bf4ceac432d931b5f65798b8b7c4f
| 83,395
|
import torch
def cxcy_to_gcxgcy(cxcy, priors_cxcy):
"""
Encode bounding boxes (that are in center-size form) w.r.t. the corresponding prior boxes (that are in center-size form).
For the center coordinates, find the offset with respect to the prior box, and scale by the size of the prior box.
For the size coordinates, scale by the size of the prior box, and convert to the log-space.
In the model, we are predicting bounding box coordinates in this encoded form.
:param cxcy: bounding boxes in center-size coordinates, a tensor of size (n_priors, 4)
:param priors_cxcy: prior boxes with respect to which the encoding must be performed, a tensor of size (n_priors, 4)
:return: encoded bounding boxes, a tensor of size (n_priors, 4)
"""
# The 10 and 5 below are referred to as 'variances' in the original Caffe repo, completely empirical
# They are for some sort of numerical conditioning, for 'scaling the localization gradient'
# See https://github.com/weiliu89/caffe/issues/155
return torch.cat(
[
(cxcy[:, :2] - priors_cxcy[:, :2]) /
(priors_cxcy[:, 2:] / 10), # g_c_x, g_c_y
torch.log(cxcy[:, 2:] / priors_cxcy[:, 2:]) * 5
],
1)
|
e2d2f12709975c6151d3b78b1a598488baea590e
| 83,398
|
def multi_headers(data, column_id):
"""Get column names, except unique id."""
headers = [column for column in data.columns if column != column_id]
return headers
|
1c7a0e00ace233207add1d7f85a8fc16853a319b
| 83,401
|
def interp_to_order(interp):
"""Convert interpolation string to order."""
if isinstance(interp, int):
return interp
order_map = {None: 0, "nearest": 0, "linear": 1, "quadratic": 2, "cubic": 3}
return order_map.get(interp, None)
|
db18a038509c1283c57f6ec13a43cb27225d3887
| 83,405
|
import hashlib
def hash_document(document):
"""Returns hash of document"""
return hashlib.sha1(document).hexdigest()
|
5fa682f9d2e1e5bc2be6249c65c415ee20e868ce
| 83,409
|
def _colname_to_fname(colname):
"""
Turn column name 'colname' into a file name, replacing problematic characters that browsers
may refuse.
"""
return colname.replace("%", "_pcnt").replace("/", "-to-")
|
dab8e174d245b908115575755b1c23b53b14fee7
| 83,411
|
def get_device_str(device_id, num_gpus):
"""Return a device string for multi-GPU setup."""
if num_gpus == 0:
return "/cpu:0"
device_str_output = "/gpu:%d" % (device_id % num_gpus)
return device_str_output
|
dec2fd1b61916020c2b118714fe4997d00f753a3
| 83,417
|
def first(list_to_summarize):
"""Get the first item in a list.
"""
return list_to_summarize[0]
|
c2297cab3af4566687b8fe2b52f30fe475072196
| 83,418
|
def is_selected(context, name, value):
"""
Return "selected" if current request URL has a matching value for the query parameter.
Used to add the "selected" attribute to a `<select><option>` based on the URL e.g.
<option value="member" {% is_selected "role" "member" %}>{% trans "I am a member" %}</option>
"""
return "selected" if context["request"].GET.get(name) == value else ""
|
fd0c1ed125e217df4cab9f3f4eaa58b744d6c5a5
| 83,419
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.