content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def construct_inzending_sent_query(graph_uri, inzending_uri, verzonden):
"""
Construct a SPARQL query for marking a bericht as received by the other party (and thus 'sent' by us)
:param graph_uri: string
:param bericht_uri: URI of the bericht we would like to mark as sent.
:param verzonden: ISO-string representation of the datetetime when the message was sent
:returns: string containing SPARQL query
"""
q = """
PREFIX meb: <http://rdf.myexperiment.org/ontologies/base/>
PREFIX nmo: <http://www.semanticdesktop.org/ontologies/2007/03/22/nmo#>
PREFIX prov: <http://www.w3.org/ns/prov#>
INSERT DATA {{
GRAPH <{0}> {{
<{1}> nmo:receivedDate "{2}"^^xsd:dateTime .
}}
}}
""".format(graph_uri, inzending_uri, verzonden)
return q | 638af002229e940ab4d22df1af91830588c969de | 96,456 |
def load_sequences(contigFile, delimiter):
"""Load sequences into a variable.
Parameters
----------
contigFile : str
File where the seqs are stored.
delimiter : str
Delimiter of sequences.
Returns
-------
type
Variable with sequences stored.
"""
seqs = {}
name = ""
s = []
for line in open(contigFile):
line = line.strip()
if ">" in line:
if name != "":
seqs[name] = "".join(s)
if delimiter == "":
name = line.replace(">", "")
else:
name = line.replace(">", "").split(delimiter)[0]
s = []
else:
s.append(line.upper())
if len(s) != 0:
seqs[name] = "".join(s)
return seqs | da3883639f34917c87c64d40857e844bb100289c | 96,459 |
import math
def root_mean_square(x):
"""
Root mean square (RMS) is the square root of the sum of the squares of values in a list
divided by the length of the list. It is a mean function that measures the magnitude
of values in the list regardless of their sign.
Args:
x: A list or tuple of numerical objects.
Returns:
A float of the root mean square of the list.
Examples:
>>> root_mean_square([-1, 1, -1, 1])
1.0
>>> root_mean_square((9, 4))
6.96419413859206
>>> root_mean_square(9)
Traceback (most recent call last):
...
TypeError: root_mean_square() expects a list or a tuple.
"""
if type(x) not in [list, tuple]:
raise TypeError('root_mean_square() expects a list or a tuple.')
squares = []
squares = [pow(num, 2) for num in x]
sum_of_squares = sum(squares)
ms = sum_of_squares / len(x)
rms = math.sqrt(ms)
return(rms) | 9a594179fd873e8bd5dd439a427ed917cebcd05c | 96,463 |
from typing import Union
def spec_append(lst: Union[tuple, list], obj: object):
"""
@brief Append a value to a list.
If you use append, the copy() will not do successfully, two lists in two dicts will be same.
The solution is reset the list in the copied dict, then the first dict and the copied dict will not same.
So there we need to write a function by ourselves.
@param lst The list value.
@param obj The object to append.
"""
new_lst = []
for lstobj in lst:
new_lst.append(lstobj)
new_lst.append(obj)
return new_lst | 278217c5f6cd3a49fc362baa3e2f40368bf85891 | 96,467 |
import torch
import math
def complexity(dstrfs, batch_size=8):
"""
Measure general complexity of dSTRF function.
Arguments:
dstrfs: tensor of dSTRFs with shape [time * channel * lag * frequency]
Returns:
complexity: nonlinear function complexity, tensor of shape [channel]
"""
tdim, cdim, ldim, fdim = dstrfs.shape
if cdim > batch_size:
return torch.cat([
complexity(
dstrfs[:, k*batch_size:(k+1)*batch_size],
batch_size=batch_size
) for k in range(math.ceil(cdim / batch_size))
])
dstrfs = dstrfs.transpose(0, 1).reshape(cdim, tdim, ldim*fdim)
sing_vals = torch.linalg.svdvals(dstrfs.float())
complexty = (sing_vals / sing_vals.max(dim=1, keepdims=True)[0]).sum(dim=1)
return complexty.cpu() | 7dc135a3dee67871298229d041f06756e02e60a7 | 96,470 |
def adjust_colour_brightness(rgb, factor):
"""
Clue is in the name.
Parameters
----------
rgb : tuple
(Red, Green, Blue)
factor : Percentage Adjust
Amount to change brightness by.
Returns
-------
tuple
"""
return tuple([value + 255*max([0, factor])-abs(factor*value) for value in rgb]) | 5f7412e3392961e141de4e93aa63acf6f903f250 | 96,471 |
def rubygems_homepage_url(name, version=None):
"""
Return a Rubygems.org homepage URL given a ``name`` and optional
``version``, or None if ``name`` is empty.
For example:
>>> url = rubygems_homepage_url(name='mocha', version='1.7.0')
>>> assert url == 'https://rubygems.org/gems/mocha/versions/1.7.0'
>>> url = rubygems_homepage_url(name='mocha')
>>> assert url == 'https://rubygems.org/gems/mocha'
"""
if not name:
return
if version:
version = version.strip().strip('/')
return f'https://rubygems.org/gems/{name}/versions/{version}'
else:
return f'https://rubygems.org/gems/{name}' | 424489bee6578c201ac84fa74aa09f5d1b5c7ef3 | 96,476 |
def check_Nbyzan(opts, P):
"""
Check and get the number of Byzantine machines that
we are going to simulate
Parameters :
-----------
opts : str
Options passed by the command prompt
P : int
Total number of machines (nodes or workers).
1 coodinator ans the ramaining are workers
Return :
-------
n_byzantines : int (entire natural)
Number of byzantine machines that we
are going to simulate
"""
if len(opts) == 0:
n_byzantines = 0;
n_byzantines = int(opts[0][1]);
if n_byzantines < 0 or n_byzantines > P - 1:
raise ValueError("Number of byzantine must be an integer "
"< number of workers or >= 0");
return n_byzantines; | aecbcaa8bd7febb59971d27fa81e7d1b678d0ae9 | 96,477 |
def sig_code(p_value):
"""create a significance code in the style of R's lm
Arguments
---------
p_value : float on [0, 1]
Returns
-------
str
"""
assert 0 <= p_value <= 1, 'p_value must be on [0, 1]'
if p_value < 0.001:
return '***'
if p_value < 0.01:
return '**'
if p_value < 0.05:
return '*'
if p_value < 0.1:
return '.'
return ' ' | f7292e726b048be895fbd9cf0da435abb2f0a82e | 96,481 |
import yaml
def parse_yaml(path):
""" Parse a YAML config file to a dictionary.
"""
with open(path, 'r') as stream:
try:
config = yaml.safe_load(stream)
return config
except yaml.YAMLError as exc:
raise (exc) | 8b24adaf9a67bb62b2e1348887b4ad8f26454f01 | 96,486 |
def FindShortSourceChannels(these_source_nodes,threshold_length):
"""This function gets the list of sources that are shorter than a threshold value
Args:
these_source_nodes (dict): A dict from the FindSourceInformation module
threshold_length (float): The threshold of chi lenght of the source segment
Return:
long_sources: A list of integers of source with the appropriate length
Author: SMM
"""
long_sources = []
for key in these_source_nodes:
if these_source_nodes[key]["SourceLength"] > threshold_length:
long_sources.append(key)
return long_sources | 27045419f87b972c9cce6ac472c069132229ca28 | 96,488 |
def split_email(s, h):
"""Given a sender email s and a HELO domain h, create a valid tuple
(l, d) local-part and domain-part.
Examples:
>>> split_email('', 'wayforward.net')
('postmaster', 'wayforward.net')
>>> split_email('foo.com', 'wayforward.net')
('postmaster', 'foo.com')
>>> split_email('terry@wayforward.net', 'optsw.com')
('terry', 'wayforward.net')
"""
if not s:
return 'postmaster', h
else:
parts = s.split('@', 1)
if parts[0] == '':
parts[0] = 'postmaster'
if len(parts) == 2:
return tuple(parts)
else:
return 'postmaster', s | 5cdfc25a8105bdd7696d6cbe950c9ff72377c851 | 96,489 |
def read_requirements_file(requirements_file: str):
""" Read the requirements file and return requirements
This reads a format similar to requirements.txt and then returns the
requirements.
"""
requirements = []
with open(requirements_file) as fp:
for line in fp:
package = line.rstrip().strip()
if package == '' or package.startswith('#'):
continue
requirements.append(package)
return requirements | 26e3aa529908a2b257cb5c3834796bec0cc8ca3f | 96,493 |
def GenerateStringTable(symbol_names):
"""Generate the string table that corresponds to a list of symbol names.
Args:
symbol_names: List of input symbol names.
Returns:
A (string_table, symbol_offsets) tuple, where |string_table| is the
actual string table (terminated by two '\0' chars), and |symbol_offsets|
is a list of starting offsets for each symbol inside the table.
"""
symbol_offsets = []
string_table = "\0"
next_offset = 1
for symbol in symbol_names:
symbol_offsets.append(next_offset)
string_table += symbol
string_table += "\0"
next_offset += len(symbol) + 1
string_table += "\0"
return string_table, symbol_offsets | 55548ea58a67415f751b6650959389c99f8bfc1a | 96,500 |
def has_single_liaisons(liaisons):
"""Checks whether liaisons (a list of 1's and 0's) has consecutive liaisons
(1's) or not
:param liaisons: List of possible liaisons to apply per phonological group
:return: True if no consecutive liaisons, False otherwise
:rtype: bool
"""
return not any(i == j == 1 for i, j in zip(liaisons, liaisons[1:])) | 4dccf60e2158261755120dc297158b5a7123523b | 96,501 |
import collections
def _longest(d):
"""
Returns an OrderedDict with the contents of the input dictionary ``d``
sorted by the length of the keys, in descending order.
This is useful for performing substring matching via ``str.startswith``,
as it ensures the most complete match will be found.
>>> data = {'a': 1, 'bb': 2, 'ccc': 3}
>>> pwnlib.context._longest(data) == data
True
>>> for i in pwnlib.context._longest(data):
... print i
ccc
bb
a
"""
return collections.OrderedDict((k,d[k]) for k in sorted(d, key=len, reverse=True)) | 56b6efe5ae544f4026c462893d54fb626b7009b4 | 96,503 |
def load_vocab(vocab_path):
""" Load the vocabulary used for converting text into numeric format and lookup embeddings"""
vocab = {}
with open(vocab_path, 'r') as fp:
for line in fp:
parts = line.split(",")
vocab[parts[0]] = int(parts[1])
return vocab | f95c1ef747bb67732ff6705720dc9c23e4bf507a | 96,505 |
import base64
def b64_decode(value: bytes) -> bytes:
"""
URL safe base 64 decoding of a value
:param value: bytes
:return: bytes
"""
pad = b"=" * (-len(value) % 4)
return base64.urlsafe_b64decode(value + pad) | f7297c18c7ca9cee0663257187b896ed706b40e1 | 96,507 |
def map_data_values_to_header(header, line):
"""
Maps data values in 'line' to the column headers.
"""
data = dict(zip(header, list(map(str.strip, line.split("\t")))))
return data | 729431928704ce0feb614bb576452b0fe99f4e0e | 96,511 |
def part2(data):
"""Return how often the sum of 3 consecutive numbers increases."""
numbers = list(map(int, data.splitlines()))
return sum(sum(numbers[i-1:i+2]) < sum(numbers[i:i+3])
for i in range(1, len(numbers) - 2)) | 4cfc33fe1cd95cbde41b353953d167a468e48def | 96,514 |
def get_assigned_rids_from_vehplan(vehicle_plan):
""" this function returns a list of assigned request ids from the corresponding vehicle plan
:param vehicle_plan: corresponding vehicle plan object
:return: list of request ids that are part of the vehicle plan
"""
if vehicle_plan is None:
return []
return list(vehicle_plan.pax_info.keys()) | 551bcd0fd4c3ff2fef263560304fd1bf2abefb46 | 96,518 |
def find_median(arr):
"""Hackerrank Problem: https://www.hackerrank.com/challenges/find-the-median/problem
Return an integer that represents the median of the array.
Args:
arr: list of integers
Returns:
int: the value of the median in the array
"""
return sorted(arr)[(len(arr)-1)//2] | 2ff7486ee0844d315e2688df1e4eaec60a30b5a1 | 96,519 |
def load_vocabulary(fn):
"""Read the file fn and return a set containing all the words
of the vocabulary."""
vocabulary = set()
with open(fn) as f:
for line in f:
vocabulary.add(line.strip())
return vocabulary | 45d7938177dbb3d0770e0d494cf5224a15f74c64 | 96,521 |
from typing import Sequence
from typing import Callable
from typing import Any
import shutil
def requires_executable(executables: Sequence[str]) -> Callable[..., Callable]:
"""Decorator factory to control optional executables.
Args:
executables: Names of executables
Returns:
decorator: Either 'noop_decorator' that returns the original function or
'error_decorator' that raises an OSError and lists absent executables.
"""
def noop_decorator(function: Callable[..., Any]) -> Callable[..., Any]:
"""Returns function unchanged."""
return function
def error_decorator(function: Callable[..., Any]) -> Callable[..., Any]:
"""Raises error."""
def error(*args, **kwargs) -> OSError:
error_msg = "Required executables not found in path:"
for exe_error in exe_errors:
error_msg += f" {exe_error}"
raise OSError(error_msg)
return error
# Try to find excetubles in path
exe_errors = []
for executable in executables:
if not shutil.which(executable):
exe_errors.append(executable)
return error_decorator if len(exe_errors) > 0 else noop_decorator | 93b871ca560074d6d59730ee2a81d285a039441c | 96,526 |
def parse_input_descriptor(descriptors):
""" parse input descriptor checks whether an input descriptors dictionary
is a dictionary. If it is None instead it is replaced by an empty dict.
Otherwise an error is raised.
Args:
descriptors(dict/None): the descriptor dictionary
Returns:
dict: descriptor dictionary
"""
if descriptors is None:
descriptors = {}
elif not isinstance(descriptors, dict):
raise ValueError('Descriptors must be dictionaries!')
return descriptors | 4a09001d1b8cbcc94974679f40f18039ca099ece | 96,529 |
def get_error(intercept, slope, points):
"""get_error function computes the error for a line passing through a
given set of points (x, y)
:intercept: y-intercept of the line passing through a set of points
:slope: slope of the equation represented as m
:points: set of (x, y) coordinates
:returns: the error value computed
"""
error_value = 0
for i in range(0, len(points)):
error_value += (points[i].y - (slope * points[i].x + intercept)) ** 2
return error_value / float(len(points)) | cd271598a01eee12b343ae422b79085cac86a702 | 96,530 |
def MakeImportStackMessage(imported_filename_stack):
"""Make a (human-readable) message listing a chain of imports. (Returned
string begins with a newline (if nonempty) and does not end with one.)"""
return ''.join(
reversed(["\n %s was imported by %s" % (a, b) for (a, b) in \
zip(imported_filename_stack[1:], imported_filename_stack)])) | f841b801f0dc28f5437a596d09952be841442079 | 96,534 |
def is_gold_user(user):
"""Return True if the user is a Gold supporter."""
return user.is_authenticated() and (
user.gold.count() or
user.goldonce.count()
) | f8a634464de4ee11e257f503d205a46d5c789449 | 96,535 |
import re
def _ngrams(string, n=3):
"""
Calculate n-grams for the input string.
"""
string = re.sub(r"[,-./]|\sBD", r"", string)
ngrams = zip(*[string[i:] for i in range(n)])
return ["".join(ngram) for ngram in ngrams] | 11d332ef62f1a186130d55c08195e90193ebb782 | 96,539 |
def resize_image(img, new_width):
"""Resize an image opened via PIL's Image.open().
Maintains aspect ratio.
Parameters
----------
img : object
new_width : int
Returns
-------
object
A PIL Image
"""
original_width, original_height = img.size
# Get scale factor of output img and use it to calculate the output height
scale = new_width / original_width
new_height = original_height * scale
return img.resize((round(new_width), round(new_height))) | 0379d3153089eb0005c9d1b453a78943ef8d1518 | 96,544 |
def updated_dict(dic1, dic2):
"""Return dic1 updated with dic2 if dic2 is not None.
Example
-------
>>> my_dict = updated_dict({"size": 7, "color": "r"}, some_defaults_dict)
"""
if dic2 is not None:
dic1.update(dic2)
return dic1 | 9894c5e066d90034727c52df0ecde8747f412b85 | 96,550 |
def get_filenames(spec):
"""returns the list of files from an assignment spec"""
return [file['filename'] for file in spec['files']] | 5df9a382c293facafd3c9f6d65905abcbc2132b2 | 96,553 |
def _verbose_printer(verbose):
"""
Creates a drop-in replacement for the print() function that only prints if the variable "verbose" is true
Args:
verbose: Whether to turn on verbose printing or not
Returns:
A function that only prints if `verbose` was set to true.
"""
if verbose:
def inner(string):
print(string)
else:
def inner(string):
pass
return inner | 43fa404d6f49fc5953e5563edb209a68e3623713 | 96,557 |
import struct
def vox_chunk( id: bytes, content: bytes, children: bytes ) -> bytes:
"""Produces the chunk structure. Every chunk has a 4-byte long identifier
(e.g. 'SIZE'), optional content, and optional child chunks. The payload
and child chunks should by constructed by external functions, this function
packs them together.
Example
-------
>>> vox_chunk( 'SIZE', vox_size_content(10,10,10), b'' )
While 'vox_size_content(..)' produces the content for the chunk, the full
chunk (without child chunks) composed by this function.
"""
assert( len( id ) == 4 )
w = bytearray( )
w.extend( id )
w.extend( struct.pack( '<II', len( content ), len( children ) ) )
w.extend( content )
w.extend( children )
return bytes( w ) | 9d0f6daaae1dab0ab0a60a9d74c77fcdabba83ad | 96,563 |
import re
def strip_comments(x: str):
"""Remove SQL comments from a string."""
return re.sub("--.*?\n", "\n", x) | 90c9873ddae8ca2aeaa3e02f6e8de07dc721f64f | 96,564 |
def binary_entropy(x):
"""Calculate entropy for a list of binary random variables.
:param x: (torch tensor) the probability of the variable to be 1.
:return: entropy: (torch tensor) entropy, sum(entropy)
"""
entropy = -x * x.log2() - (1 - x) * (1 - x).log2()
entropy[x * (1 - x) == 0] = 0
return entropy, entropy.sum() | 74148b7ba69e9d885edde02b48d91098a4e5add6 | 96,566 |
import math
def divide(string, length):
"""
Divides a string into multiple substrings of equal length.
If there is not enough for the last substring to be equal,
it will simply use the rest of the string.
Can also be used for things like lists and tuples.
:param string: string to be divided.
:param length: length of each division.
:returns: list containing the divided strings.
Example:
>>>> a = divide('Hello World!', 2)
>>>> print(a)
['He', 'll', 'o ', 'Wo', 'rl', 'd!']
"""
return [string[length*x:length*(x+1)] for x in range(int(math.ceil(len(string)/length)))] | f27741e69fba0b3cf9f0bbaa43bccde88efb48bd | 96,567 |
def visw(t, p, ps):
"""Viscosity of liquid water as a function of temperature (deg C) and
pressure and saturation pressure (Pa)."""
EX = 247.8 / (t + 133.15)
PHI = 1.0467 * (t - 31.85)
AM = 1.0 + PHI * (p - ps) * 1.0e-11
return 1.0e-7 * AM * 241.4 * 10.0 ** EX | 56116601e372b460b4fa5390967669b404df0e37 | 96,568 |
import re
def output_dictionary(filename):
""" str -> dict of {int: int}
Read from the file, and output a dictionary where
the keys are word lengths, and the values are the
number of words that correspond to that length.
"""
dictionary = {}
file = open(filename)
for line in file:
line = line.rstrip()
word_array = re.split("\W+", line)
for word in word_array:
if not word.isalpha():
continue
if len(word) not in dictionary:
dictionary[len(word)] = 0
dictionary[len(word)] = dictionary[len(word)] + 1
return dictionary | e2ad028bbcd6ff7cbaf2778ef6885156482f8bca | 96,571 |
def is_from_webpack(request) -> bool:
"""Check if the request was forwarded from webpack."""
from_webpack = request.META.get("HTTP_X_FROM_WEBPACK", None)
if from_webpack == "true":
return True
else:
return False | cda1762650496a1edf1371a15a771a02bd6a3527 | 96,576 |
def w(a, cosmo):
"""
Effective equation of state parameter.
"""
return cosmo['w0'] + cosmo['wa'] * (1 - a) | 24277102f5431e9cfdaf9a5407ae53e296c595f6 | 96,578 |
def fresh(seed, names):
"""Generate a new name that is not in `names` starting with `seed`.
"""
i = 1
while True:
name = seed + str(i)
if name not in names:
return name
i += 1 | 9342ba838337e9a806d2bc6c6cf2ed0650c5d147 | 96,582 |
def format_string(string, *args):
"""
Format & return a string
"""
return string if not args else string.format(*args) | b0927a7781da47fa3b0563a44f42ebb44b048538 | 96,589 |
def divAdd5(a,b):
"""divides a by b, adds 5 and retuns the answer"""
c = (a/b)+5
return c | 862e644f67be06554843d53e4648404c393dd1fc | 96,590 |
def parse_colon_dict(data):
"""
Parse colon seperated values into a dictionary, treating lines
lacking a colon as continutation lines.
Any leading lines without a colon will be associated with the key
``None``.
This is the format output by ``rpm --query`` and ``dpkg --info``.
:param bytes data: Data to parse
:return: A ``dict`` containing the parsed data.
"""
result = {}
key = None
for line in data.splitlines():
parts = [value.strip() for value in line.split(':', 1)]
if len(parts) == 2:
key, val = parts
result[key] = val
else:
result.setdefault(key, '')
result[key] += parts[0]
return result | 7c945e803d25f69e9255eaf68beb5b76c5cdea43 | 96,592 |
def leapLanePenalty(l_, l):
"""
Returns the penalty of leaping from lane l_ to l. When l and l_ are
the same lane, then the penalty is just the energy cost of next hurdle.
"""
if l_ == l:
return 0
else:
return 1 * abs(l_ - l) | fb820e4b0cc2c46f669ffc5fba0ef3f494b3f938 | 96,594 |
def calcXa(x_ae, x_e):
"""
Calculate the asteroid position vector.
Parameters
----------
x_ae : `~numpy.ndarray` (3)
Topocenter to asteroid position vector in arbitrary units.
x_e : `~numpy.ndarray` (3)
Topocentric position vector in same units as x_ae.
Returns
-------
x_a : `~numpy.ndarray` (3)
Asteroid position vector in units of x_ae.
"""
return x_ae + x_e | e963b46e3ba4881a696ddb0cf9f88a3d8ad2d8af | 96,596 |
from typing import List
def split_evenly(number: int, count: int) -> List[int]:
"""Return mostly equal parts.
Example: number = 11, count = 3, result = [4, 4, 3]
"""
parts = []
for _ in range(count):
if number % count:
parts.append(number // count + 1)
number -= parts[-1]
else:
parts.append(number // count)
number -= parts[-1]
count -= 1
return parts | 91fcdb8078ada99b86418a485fa9880c34e5e9a9 | 96,597 |
from bisect import bisect_left
from typing import List
def find_nearest_wavelength( sorted_wavelengths: List[ float ], wavelength: float ) -> float:
"""
Finds the value of the nearest wavelength in the sorted list sorted_wavelengths.
:param sorted_wavelengths: List of wavelengths to parse
:type sorted_wavelengths: list
:param wavelength: Value of interest
:type wavelength: float
:return: Nearest value to wavelength contained withiin sorted_wavelengths
:rtype: float
"""
pos = bisect_left( sorted_wavelengths, wavelength )
if pos == 0:
return sorted_wavelengths[ 0 ]
if pos == len( sorted_wavelengths ):
return sorted_wavelengths[ -1 ]
before = sorted_wavelengths[ pos - 1 ]
after = sorted_wavelengths[ pos ]
if after - wavelength < wavelength - before:
return after
else:
return before | e0c701af6544a8d421f5b4502fad7b8112cb98b6 | 96,598 |
def log_to_linear_amp(x, arange=(-48., 0.)):
"""
Convert a 0-1 log-scaled signal (whose 0 and 1 levels are defined by `arange`) to linear scale.
Parameters
----------
x : np.array
Input signal that ranges from 0. to 1.
arange : tuple[float]
The range of the input in dB
Returns
-------
x_linear : np.array
Linear-scaled x
Examples
--------
>>> log_to_linear_amp(np.array([1.]))
array([ 1.])
>>> log_to_linear_amp(np.array([0.5]), arange=(-6., 0.))
array([ 0.70794578])
>>> log_to_linear_amp(np.array([0.]), arange=(-6., 0.))
array([ 0.])
>>> log_to_linear_amp(0., arange=(-6., 0.))
0.0
"""
x_linear = x * (arange[1] - arange[0]) + arange[0]
x_linear = (10.0**(x_linear/20.)) * (x > 0.) # make sure 0 is still 0
return x_linear | 73659fdb3bba49be14e840e55e5603137b78a5c7 | 96,600 |
from paho.mqtt.matcher import MQTTMatcher
def _match_topic(subscription: str, topic: str) -> bool:
"""Test if topic matches subscription."""
# pylint: disable=import-outside-toplevel
matcher = MQTTMatcher()
matcher[subscription] = True
try:
next(matcher.iter_match(topic))
return True
except StopIteration:
return False | a417c03393c4040ca9e1892fc35622ed212ee111 | 96,601 |
import functools
import operator
def product(data, start=1):
"""product(iterable [, start]) -> product of numbers
Return the product of ``data``, a sequence or iterator of numbers:
>>> product([2.0, 1.5, 3.0, 0.25])
2.25
If optional argument ``start`` is given and is not None, the total is
multiplied by it. If ``data`` is empty, ``start`` (defaulting to 1) is
returned.
"""
return functools.reduce(operator.mul, data, start)
# Don't be tempted to do anything clever with summation of logarithms,
# since that ends up *much* less accurate. | 6ee3c725c1ae03fdb579d3dfdd950a31907556f5 | 96,602 |
import requests
import json
def post_request(url: str, headers: dict, path: str, data: dict)->dict:
"""
Perform an HTTP POST request
:param url: the url to POST to
:param headers:a dict containing the ZT auth header
:param path: the path to POST to
:param data: a dict containing data to post
:return: a dict converted from the returned JSON object
raises a requests.exceptions.HTTPError when the response code is not 200
"""
_url = url + path
r = requests.post(_url, data=json.dumps(data), headers=headers)
r.raise_for_status()
return r.json() | 77a1d8293a951a9aa3e622594dedcb564049c08c | 96,604 |
from io import StringIO
import csv
def parse_asdcom_stdout(stdout_str):
"""
The original code in script used by MCU:
GetMappedUsers()
{
$ASDCOM GetMappedUsers | sed '1d' | grep -v "NSS$" 2> /dev/null
}
Example result:
$ /opt/quest/libexec/vas/sugi/asdcom GetMappedUsers
UPN,ULoginName,UniqueID,NTName,SourceFile
abaelemer@QASDEV.oi,abaeleloc,73D5FC4F-0B9A-470C-A9B1-7508C2671922,QASDEV\abaelemer,/etc/opt/quest/vas/console_mappings
abativadar@qasdev.oi,abativloc,B5D09597-4750-4B1F-888F-925BDF43850E,QASDEV\abativadar,/etc/vas-user-map-file
"""
# Return values
err = None
mapped_users = []
first_row = True
text_stream = StringIO(stdout_str)
csv_reader = csv.reader(text_stream)
for row in csv_reader:
if first_row:
first_row = False
continue
if row[-1].endswith('NSS'):
continue
mapped_users.append([ row[1], row[3] ])
# Return
return err, mapped_users | 1f1708a1d5b3fee8e3227a2fbbcfe1f6f8ba07de | 96,606 |
def get_features(config, data, datadir):
"""
Get features from the feature list created during data generation / or the list specified during init_city.
"""
cont_feat = config['cont_feat']
cat_feat = config['cat_feat']
# Dropping continuous features that don't exist
cont_feat_found = []
for f in cont_feat:
if f not in data.columns.values:
print("Feature " + f + " not found, skipping")
else:
cont_feat_found.append(f)
# Dropping categorical features that don't exist
cat_feat_found = []
for f in cat_feat:
if f not in data.columns.values:
print("Feature " + f + " not found, skipping")
else:
cat_feat_found.append(f)
# Create featureset holder
features = cont_feat_found + cat_feat_found
return cont_feat_found, cat_feat_found, features | 51b354ed34c5e223e083a32a56d8a1bac658339d | 96,608 |
def PrettifyFrameInfo(frame_indices, functions):
"""Return a string to represent the frames with functions."""
frames = []
for frame_index, function in zip(frame_indices, functions):
frames.append('frame #%s, "%s"' % (frame_index, function.split('(')[0]))
return '; '.join(frames) | 714d6c7c8295fd927888bd898b73c665087be864 | 96,609 |
import re
def fix_spaces(s):
"""Remove extra spaces and convert non-breaking spaces to normal ones."""
s = s.replace("\xa0", " ")
s = s.strip()
s = re.sub(" +", " ", s)
return s | 4077f538f2f4012fe10f27a160d2b0f7b42e1c60 | 96,613 |
def get_catalog_record_preferred_identifier(cr):
"""Get preferred identifier for a catalog record.
Args:
cr (dict): A catalog record.
Returns:
str: The preferred identifier of e dataset. If not found then ''.
"""
return cr.get('research_dataset', {}).get('preferred_identifier', '') | c3ab1fe64c07b63760829124769fb3e39519c083 | 96,615 |
def get_domain_setting(args):
"""
This function returns domain settings
Args:
args.flipped_terminals: {True, False},
args.flipped_actions: {True, False}
args.load: {True, False}
args.no_pre_training: {True, False}
inital state distributions can be chosen based on the domain
Returns:
domain_settings
"""
domain_settings = {}
domain_settings['flipped_terminals'] = args.flipped_terminals
domain_settings['flipped_actions'] = args.flipped_actions
# Flipped action experiment is done only in training phase
if args.no_pre_training or args.load:
domain_settings['flipped_actions'] = False
if not domain_settings['flipped_terminals']:
domain_settings['init_state_transition'] = [[[0.4, 0.5], [0, 0.07]]]
else:
domain_settings['init_state_transition'] = [[[-0.61, -0.44], [-0.008, 0.008]]]
domain_settings['num_tilings'] = 10
domain_settings['num_x_tiles'] = 10
domain_settings['num_v_tiles'] = 10
domain_settings['gamma'] = 0.997
domain_settings['init_state_train'] = [[[-1.2, 0.5], [-0.07, 0.07]], [[-1, 0], [-0.03, 0.03]]]
domain_settings['init_state_test'] = domain_settings['init_state_train']
domain_settings['init_state_eval'] = [[[-0.2, -0.1], [-0.01, 0.01]]] # to find
domain_settings['reward_taskB_R'] = 1
domain_settings['max_steps_per_episode'] = 400
domain_settings['phase'] = 'train'
return domain_settings | 73e2cf54e82e7c6dbf9b14e417237b64b1e8c2a2 | 96,620 |
def gather_input(input_string, datatype=int, req=[], notreq=[]):
"""
Create an input and return the users input - it will catch for invalid inputs.
Parameters
-----------
input_string: :class:`str`
This will be passed into the builtin input() function.
datatype: Any
The data type to convert the input into - if it cannot be converted it will ask the user again.
req: :class:`list`
A list with all possible inputs and if the user input is not a match it will ask again.
- If [], anything is allowed.
notreq: :class:`list`
A list with all inputs that should NOT be allowed
- If [], nothing will happen.
Returns
-------
Any
The input that was received from the user.
"""
while True:
try: menu = datatype(input(input_string).strip())
except:
print("Invalid input.")
continue
if req != []:
if menu not in req:
print("Invalid input.")
continue
elif notreq != []:
if menu in notreq:
print("Invalid input.")
continue
return menu | 564e7d41c2bfb1f92d89f6d0ad2b7c51c54819ff | 96,621 |
def get_tz_offset(d):
""" Returns a Timezone (sign, hours, minutes) tuples
E.g.: for '2020-01-21T12:30:00+01:30' ('+', 1, 30) is returned
"""
offset_minutes = d.utcoffset().total_seconds() / 60
if offset_minutes < 0:
sign = "-"
else:
sign = "+"
hours = int(abs(offset_minutes) / 60)
minutes = int(abs(offset_minutes) % 60)
return sign, hours, minutes | 314db209534e54a5ac5c68d1fe8b4421657f6c2c | 96,626 |
def replace_values(obj, replace_map):
"""
Deep replace of object values according to provided map.
:param obj: the object to have the values replaced
:param replace_map: the map of values with their replacements
:return: obj with replaced values
"""
if isinstance(obj, list):
for key, value in enumerate(obj):
obj[key] = replace_values(value, replace_map)
elif isinstance(obj, dict):
for key, value in obj.items():
obj[key] = replace_values(value, replace_map)
elif obj in replace_map:
return replace_map[obj]
return obj | a8e47c1a7462f80b5aa66332d011a3d2ac951594 | 96,628 |
def parse_stats(stats_file):
"""Parse stats file."""
stats = {}
with open(stats_file) as sfile:
for row_raw in sfile:
key, value = row_raw.strip().split("\t")
stats[key] = value
return stats | fe95359460bcfeb120d6bc9f199373784b6f59b6 | 96,634 |
def default_serialize_error(req, exception):
"""Serialize the given instance of HTTPError.
This function determines which of the supported media types, if
any, are acceptable by the client, and serializes the error
to the preferred type.
Currently, JSON and XML are the only supported media types. If the
client accepts both JSON and XML with equal weight, JSON will be
chosen.
Other media types can be supported by using a custom error serializer.
Note:
If a custom media type is used and the type includes a
"+json" or "+xml" suffix, the error will be serialized
to JSON or XML, respectively. If this behavior is not
desirable, a custom error serializer may be used to
override this one.
Args:
req: Instance of falcon.Request
exception: Instance of falcon.HTTPError
Returns:
A tuple of the form ``(media_type, representation)``, or
``(None, None)`` if the client does not support any of the
available media types.
"""
representation = None
preferred = req.client_prefers(('application/xml',
'text/xml',
'application/json'))
if preferred is None:
# NOTE(kgriffs): See if the client expects a custom media
# type based on something Falcon supports. Returning something
# is probably better than nothing, but if that is not
# desired, this behavior can be customized by adding a
# custom HTTPError serializer for the custom type.
accept = req.accept.lower()
# NOTE(kgriffs): Simple heuristic, but it's fast, and
# should be sufficiently accurate for our purposes. Does
# not take into account weights if both types are
# acceptable (simply chooses JSON). If it turns out we
# need to be more sophisticated, we can always change it
# later (YAGNI).
if '+json' in accept:
preferred = 'application/json'
elif '+xml' in accept:
preferred = 'application/xml'
if preferred is not None:
if preferred == 'application/json':
representation = exception.to_json()
else:
representation = exception.to_xml()
return (preferred, representation) | 21d15704c40b6dfae767da2ab5fcdf3d38a5a913 | 96,648 |
def bsa_dubois(weight: float, height: float) -> float:
""" Method returns body surface area using Debois method
Parameters
----------
weight
weight in kg
height
height in cm
Returns
-------
float
body surface area
"""
return 0.007184 * height ** 0.725 * weight ** 0.425 | 757dfcf8f3554d8eec3f4ad2e4ae203f025bf894 | 96,653 |
def get_query(event):
"""
Get event signup query class.
:param event: Target event to get query class from.
:type event: core.models.Event
:return: Query class or None if query class was not found.
:rtype: T <= QueryBuilder
"""
labour_meta = event.labour_event_meta
if labour_meta is None:
return None
signup_extra = labour_meta.signup_extra_model
return signup_extra.get_query_class() | 63708c0394bc538f913c0ece60e71609901d207e | 96,657 |
def write_datapoints(datapoints, writer):
"""
write datapoints to a file, remember the beginning and ending lines for the yes/no questions
:param datapoints: List[Dict] a batch of data points
:param writer: file handler of a file to write to
:return: examples List[Tuple[int, int]] the beginning and ending lines for the yes/no questions
"""
examples = [] # holds beginning and ending lines of each example in the respective file
start = 0
ex_id = None
for dp_idx, datapoint in enumerate(datapoints):
if ex_id is not None and datapoint['example_id'] != ex_id:
end = dp_idx
examples.append((start, end))
start = end
ex_id = datapoint['example_id']
writer.write(datapoint)
return examples, start | 718884c4cdf0003175fe40aaec05ba6aa03edc88 | 96,665 |
def valid_reading(reading, previous_reading):
""" Check if a reading is valid.
The SmartThings API may return zero or the reading may go backwards, which confuses things.
Ensure that we have a valid, increasing reading here.
"""
return (
reading is not None
and reading > 0
and (previous_reading is None or reading >= previous_reading)
) | 3d0c65b9540cd52d974991557c813db87f0b1211 | 96,667 |
def find(iterable, predicate, default = None):
"""Finds and returns the first item in iterable that passes the supplied predicate. If not item
matches and a default was not specified then a KeyError is raised; otherwise, the default is
returned"""
for item in iterable:
if (predicate(item)):
return item
if default is None:
raise KeyError
if callable(default):
return default()
else:
return default | e78cbbbad0d32c0ce9ff4dd3b660c3ef2e8d8f54 | 96,669 |
def get_url_and_time(line):
"""Extracts time and url from line of log file"""
try:
list_of_values = line.split(' ')
url = list_of_values[7].strip()
time = float(list_of_values[-1].strip())
except Exception:
return None, None
return url, time | a7977e536b65c0b72cc441e902a46c8c014f8c84 | 96,671 |
def _get_url(context, data):
"""
get url from context params or data dict,
log warning for no url found
"""
url = context.params.get("url")
if url is not None:
return url.format(**data)
url = data.get("url")
if url is None:
context.log.warning("No url found.")
return url | a52e715c84b89bc1aa0a6e6da67ecb32007d9b85 | 96,673 |
def xpath(elt, xp, ns, default=None):
"""
Run an xpath on an element and return the first result. If no results
were returned then return the default value.
"""
res = elt.xpath(xp, namespaces=ns)
if len(res) == 0:
return default
else:
return res[0] | e8e873db535597edc82a3af439db2ddb7a0c1c1b | 96,675 |
from typing import Any
import ast
def constant(value: Any) -> ast.Constant:
"""Generate an ast constant expression"""
return ast.Constant(value=value) | 722a3dff87151b420ed34d187bbbf76758f66d73 | 96,677 |
import ipaddress
def is_subnet_of(a, b):
"""
Returns boolean: is `a` a subnet of `b`?
"""
a = ipaddress.ip_network(a)
b = ipaddress.ip_network(b)
a_len = a.prefixlen
b_len = b.prefixlen
return a_len >= b_len and a.supernet(a_len - b_len) == b | a8f2ba60e6b3c03232964e59bcfe54a23fe44c00 | 96,680 |
def detect_cycles(data):
"""
Detects cycles in the data
Returns a tuple where the first item is the index at wich the cycle occurs
the first time and the second number indicates the lenght of the cycle
(it is 1 for a steady state)
"""
fsize = len(data)
# maximum size
for msize in range(1, int(fsize / 2 + 1)):
for index in range(fsize):
left = data[index:index + msize]
right = data[index + msize:index + 2 * msize]
if left == right:
return index, msize
return 0, 0 | 257b07a31988b94875d1f91c46fcfb2764103400 | 96,682 |
def create_motion_name(test_name, sensor_code, code_suffix=""):
"""
Builds the full name of the file
:param test_name: str, test name
:param sensor_code: str, a sensor code (e.g. ACCX-UB1-L2C-M)
:param code_suffix: str, suffix
:return:
"""
return "%s-%s-%s" % (test_name, sensor_code, code_suffix) | cfd583ec440c8ad0e92c849c09bacc29c45b4fe3 | 96,689 |
def parse_records(records):
"""
A helper function to parse a record returned by Springer API
"""
DEFAULT_TEXT = 'Not avaliable'
if len(records) == 0:
return [{'title': DEFAULT_TEXT, 'publicationName': DEFAULT_TEXT,
'abstract': DEFAULT_TEXT, 'doi': DEFAULT_TEXT,
'url': DEFAULT_TEXT}]
results = []
for record in records:
result = {}
result['title'] = record.get('title', DEFAULT_TEXT)
result['publicationName'] = record.get('publicationName', DEFAULT_TEXT)
result['abstract'] = record.get('abstract', DEFAULT_TEXT)
result['doi'] = record.get('doi', DEFAULT_TEXT)
result['url'] = DEFAULT_TEXT
if 'url' in record:
result['url'] = record['url'][0].get('value', None)
results.append(result)
return results | 1a17159e6da0df510584f5d19ae0e4d2f85c51b1 | 96,691 |
def is_blocking_state(state):
"""
Checks if blocking occurs in the given state. In essence, all states (u,v)
where u > 0 are considered blocking states
Set of blocking states: S_b = {(u,v) ∈ S | u > 0}
"""
return state[0] > 0 | 6b4531e096316f01c6c665f40e042f80fb4be097 | 96,693 |
def get_orfs_in_range(all_orfs, rang):
"""
Given a list of orfs, return only those contained within a range.
"""
orfs_in_range = []
for orf in all_orfs:
lower = min(orf[0][0], orf[0][1])
upper = max(orf[0][0], orf[0][1])
if lower >= rang[0] and upper <= rang[1]:
orfs_in_range.append(orf)
return orfs_in_range | cb03930442170da2eb6d60e3a1add319d8a11d13 | 96,697 |
import torch
def get_centroid_indices(masks):
"""
Params:
masks: Tensor[num_objects, height, width]
Returns:
centroids: Tensor[num_objects, (x, y)]
"""
_, height, width = masks.shape
dtype = masks.dtype
device = masks.device
location_x = torch.arange(0, width, 1, dtype=dtype, device=device) # Tensor[width]
location_y = torch.arange(0, height, 1, dtype=dtype, device=device) # Tensor[height]
total_area = masks.sum(dim=(1,2)) + 1e-9
centroids_x = torch.sum(masks.sum(dim=1) * location_x[None,:], dim=1) / total_area # Tensor[num_objects]
centroids_y = torch.sum(masks.sum(dim=2) * location_y[None,:], dim=1) / total_area # Tensor[num_objects]
centroids = torch.stack((centroids_x, centroids_y), dim=1) # Tensor[num_objects, (x, y)]
centroids = centroids.to(torch.int64)
return centroids | 89baf75058f6fe441ca27e28c635919067302f9e | 96,701 |
def is_distributed_table(v):
"""Determine if an object is a DistributedTable."""
return v.__class__.__name__ in ("DistributedTable",
"RestoredDistributedTable") | 78bc7a6f71181c3399d15bd056e3c30bedb82ee6 | 96,703 |
def massic_flux_cohen(conc, part_coef, k,area):
"""
Return the dissolution mass flux [kg/s]
source : (Cohen et al., 1980)
Parameters
----------
conc : Component concentration in oil phase [kg/m³]
part_coef : Partition coefficient ratio of component concentration
(oil to water)[(g/cm³)/(g/cm³)]
k : Water phase mass transfer coefficient (oil to water) [m/s]
area : Area of the slick [m²]
"""
return conc * part_coef*k*area | 76426319e8c22a62570b6c39d6b564728772e01f | 96,707 |
import torch
def _identity_window(dists, width):
"""
An "identity window". (I.e. a "window" which when multiplied by, will not change the input).
"""
return torch.ones(dists.size()) | cd90fb1dffe644d5ad467e6815ee5aa3ecc92d1c | 96,708 |
def secret_combine(secret_function1, secret_function2):
"""
Return a lambda that takes one argument and applies to it the two functions passed in to secret_combine in order.
:param secret_function1: function
:param secret_function2: function
:return: lambda that composes the two functions
"""
return lambda x: secret_function2(secret_function1(x)) | 7ae19e2b503e2046278259ee7290489c06631bf6 | 96,709 |
def sum_even_fibonaccis(limit):
"""Find the sum of all even terms in the Fibonacci sequence whose values
do not exceed the provided limit.
"""
# Fibonacci seed values are 0 and 1.
previous, current, even_fibonacci_sum = 0, 1, 0
while previous + current <= limit:
# This is a memoized calculation; a matrix calculation would be faster.
previous, current = current, previous + current
# Check if the current term in the sequence is even.
if current % 2 == 0:
even_fibonacci_sum += current
return even_fibonacci_sum | f390071f3bfa22506ffc1513567493fbac68f741 | 96,714 |
def _ncells_to_area(ncells, rho, ref_density=1250):
"""Return theoretical area taken up by `ncells` cells.
Returns
-------
area : number or numpy array (dtype float)
Area in units (mm^2, μm^2, etc.)
Parameters
----------
ncells : number or numpy array (dtype int)
Number of cells
rho : number or numpy array
Cell density in dimensionless units.
ref_density : number or numpy array
The cell density at a dimensionless density of `rho = 1`
in units of inverse area. Defaults to 1250 (mm^-2).
"""
return ncells / (rho * ref_density) | 977ddb5d7e34872a0363b442376f5c2dfb9de4b7 | 96,717 |
def count_unique_value_in_column(data: list, column: int) -> int:
"""
Count unique number of items in column. If column number is incorrect returns -1
:param data:
:param column:
:return: Number of unique fields or -1 if column number is incorrect
"""
unique_values = set()
if column < 0 and column <= len(data):
return -1
for row_index, row in enumerate(data):
unique_values.add(row[column])
return len(unique_values) | 0d818338024efca35c6123ae66f987e986623ebe | 96,718 |
def num_utterances(dataset):
"""Returns the total number of utterances in the dataset."""
return sum([len(interaction) for interaction in dataset.examples]) | 657ea0b7b94426b703f3f748ebafff1f4b8086f1 | 96,722 |
def _zscore(d):
"""
Calculates z-score of data
Parameters
----------
d : array
Data of interest
Returns
-------
z : array
Z-score
"""
z = (d - d.mean(0)) / d.std(0)
return z | 8d3e7314d32d718c97ca214314d6caa64a78851d | 96,723 |
def get_card_k(k:int) -> int:
""" Returns the cardinal of (Z/3^kZ)*. I.e. the numbers of
non multiple of 3 between 0 and 3^k. """
return 2*3**(k-1) | f103d2926a38e0e4c1e1398afec104619e6f71a7 | 96,726 |
import re
def cleaning(text):
"""
Cleans the provided text by lowering all letters, removing urls, removing emails,
substituing punctuations with spaces
Args:
text (str): Input string to be cleaned
Returns:
text: cleaned text
"""
text = text.lower()
text = re.sub('http\S+', '' , text)
text = re.sub('\S*@\S+', '', text)
# Sub-ing punctuation with whitespace.
# text = re.sub(r'[<.*?>;\!()/,:&\\]+', ' ', text)
# Removing everything except numbers, capital & small letters
text = re.sub(r'[^0-9A-Za-z\s]', '', text)
# Remove multiple whitespace to single whitespace
# text = re.sub(r'\w+', ' ', text)
# Removing everything except numbers, capital & small letters & hyphens
# text = re.sub(r'[^0-9A-Za-z\-\s]', '', text)
# Example statement for removing words with numbers in them
# re.sub('\w*\d\w*', ' ', clean_text)
# Alternate version of removing all punctuations only
# re.sub('[%s]' % re.escape(string.punctuation), ' ', my_text)
return text | beafd409f030f85105e6d3ac0bd50f93b8ccb4cd | 96,728 |
import json
def is_stringified_list(s):
"""
Checks whether string is a list using JSON.
TIP: In JSON, strings are only characterized by double quotation marks.
I.e., "animal" is a valid string in JSON, whereas 'animal' is not.
:param s: An arbitrary string.
"""
return isinstance(json.loads(s), list) if not s else False | ba014c384de9d97db33062c71497be1525b776f0 | 96,729 |
def author_list(authors):
"""Convert list of author DB objects to JSON."""
return [{"name": a.name, "email": a.email} for a in authors] | 708305f9c252c6f6a1f8f001e25ed3bb55c9195e | 96,731 |
def cvss_to_severity(cvss):
""" Map CVSS score to Carrier severity """
if cvss >= 9.0:
return "Critical"
if cvss >= 7.0:
return "High"
if cvss >= 4.0:
return "Medium"
if cvss >= 0.1:
return "Low"
return "Info" | 616fa8664a89340b16ec2bbbb41ab181c0d28d0b | 96,733 |
def slugify(s):
"""
Slugify a string for use in URLs. This mirrors ``nsot.util.slugify()``.
:param s:
String to slugify
"""
disallowed_chars = ['/']
replacement = '_'
for char in disallowed_chars:
s = s.replace(char, replacement)
return s | def04e175e1b5adc6353203ced150327eda5cc3b | 96,736 |
from datetime import datetime
def checkDate(date: str) -> bool:
"""
Checks if a string is a valid dob
Parameters
----------
dob: str
The name to be Tested
Returns
-------
bool
True/False according to the validity of the dob
"""
if not type(date) == str:
return False
try:
datetime.strptime(date, "%Y-%m-%d")
except Exception:
return False
return True | acc6afe9d55a623148858e6250cd50015fc05ac6 | 96,740 |
import pickle
def unPickle(pickle_file):
"""
Unpickles the data file into tr, te, and validation data
"""
with open(pickle_file, 'rb') as f:
datasets = pickle.load(f)
test_dataset = datasets['test_dataset']
test_labels = datasets['test_labels']
train_dataset = datasets['train_dataset']
train_labels = datasets['train_labels']
valid_dataset = datasets['valid_dataset']
valid_labels = datasets['valid_labels']
return test_dataset, test_labels, train_dataset, train_labels,\
valid_dataset, valid_labels | 76e96291e830bc898151aeea706bf5de9df3fdaa | 96,744 |
def _raw_bus_voltage_ovf(bus_voltage_register):
"""The Math Overflow Flag (OVF) is set when the power or current calculations are out of range"""
return bus_voltage_register & 0x1 | 4de5cc4222632795b965cf2a96349b8140ec8838 | 96,745 |
def normalize(hex_code):
"""Convert hex code to six digit lowercase notation.
"""
hex_digits = hex_code.lstrip('#')
if len(hex_digits) == 3:
hex_digits = ''.join(2 * c for c in hex_digits)
return '#{}'.format(hex_digits.lower()) | 9d17e0a696ef6a13a7bd203443cf815a369fcac9 | 96,746 |
import re
def check_fqdn(name):
"""
Check if a hostname is fully qualified, it must only contain
letters, - and have dots.
"""
# StackOverflow #11809631
if re.match(r'(?=^.{4,253}$)(^((?!-)[a-zA-Z0-9-]{1,63}(?<!-)\.)+[a-zA-Z]'
'{2,63}\.?$)', name):
return True
else:
return False | 12ff7966584e4b5eeb689ef5acffd6a7b158b44a | 96,747 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.