content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def get_field(self, field_name):
"""Get the value of variables stored in Solution.
Parameters
----------
self : Solution
an Solution object
field_name : str
name of the field to return
Returns
-------
field: array
an array of field values
"""
field = None
for key in self.nodal:
if key == field_name:
field = self.nodal[key]
if field is None:
for key in self.edge:
if key == field_name:
field = self.edge[key]
if field is None:
for key in self.face:
if key == field_name:
field = self.face[key]
if field is None:
for key in self.volume:
if key == field_name:
field = self.volume[key]
return field
|
3d106d995a98407a64a61235d9a106e35968d20c
| 697,973
|
import gzip
import pickle
def load_mnist():
"""
Loads the MNIST handwritten digits dataset into three tuples training_data/
:return: Three tuples containing training data, validation data and test data
"""
f = gzip.open('./data/mnist.pkl.gz')
training_data, validation_data, test_data = pickle.load(f, encoding='latin1')
f.close()
return training_data, validation_data, test_data
|
b688fb8f4f690d3bab83ad42da564cd26d308d0d
| 697,974
|
def bangbang_compressor(bangbang_protocol
):
"""Compresses the bang bang protocol.
Merges chunks of contiguous bangbang chunks into a Tuple of duration (in
number of chunks) and which Hamiltonian to apply.
Args:
bangbang_protocol: List of HamiltonianType values, determines which
Hamiltonian should be applied at the corresponding chunk.
Returns:
List of Tuples containing the Hamiltonian type and the number of chunks to
apply the Hamiltonian type for.
"""
current_mode = None
compressed_protocol = []
chunk_counter = 0
for protocol_mode in bangbang_protocol:
if current_mode is None:
current_mode = protocol_mode
chunk_counter = 1
elif current_mode == protocol_mode:
chunk_counter += 1
else:
compressed_protocol.append((chunk_counter, current_mode))
current_mode = protocol_mode
chunk_counter = 1
# Append what's left over
if chunk_counter > 0:
compressed_protocol.append((chunk_counter, current_mode))
return compressed_protocol
|
7e3b6e0e5678e705c54c39e31561a908900d9b08
| 697,976
|
def CountGroups(fingerprint_groups: list, group_labels: list, d: dict) -> list:
"""Count groups for fingerprint."""
return [
[[x[0] for x in d[y]].count(z) for z in fingerprint_groups[i]]
for (i, y) in enumerate(group_labels)
]
|
37e7c95abf6d69cc66c0cfb449827ba7e26db66a
| 697,977
|
import os
def read_fprime_import(import_type, root):
"""
Read an FPrime dependency port/type. These are typically ports, serializables, and components.
:param import_type: "import_port_type", "import_component_type", "import_serializable_type", "import_enum_type"
:param root: root XML to parse
:return: dependency mined from this item
"""
deps = set()
for imp in root.findall(import_type):
imp_lib = os.path.dirname(imp.text).replace("/", "_")
deps.add(imp_lib)
return deps
|
46b4afa24cc6334c2ac138b973e2fd33bbd04e3e
| 697,978
|
def tryConvert(cell):
"""
convert a cell, if possible, to its supposed type(int, float, string)
note: type of NaN cell is float
"""
try:
return int(cell)
except ValueError as TypeError:
try:
return float(cell)
except ValueError as TypeError:
return cell
|
d7841bc2bf3c5a330a67cd54df90d67b4ff70966
| 697,979
|
import yaml
def load_config(filename):
"""Load and return a config file."""
with open(filename, 'r') as f:
config = yaml.safe_load(f)
return config
|
5c981d7a5aa7846486062c14c940cdb01cb0b5af
| 697,980
|
import os
def get_top_dir():
"""Returns the top dir of the midonet repo"""
topdir = os.path.realpath(os.path.dirname(__file__) + '../../../../../')
return topdir
|
e6f73330b28de999758855bc1f0e43355c0b9cf4
| 697,981
|
def variant_position_within(coordsVar, coordsInterval):
"""
check if coordsVars is within coordsInterval. Return 0
"""
if coordsVar.CHROM == coordsInterval.seqid:
if coordsVar.POS >= coordsInterval.start:
if coordsVar.POS <= coordsInterval.end:
return(1)
else:
return(0)
else:
return(0)
return(0)
|
4f0cf0b6ce0d698eba5d30c0540efbf175704ea7
| 697,982
|
def get_password_token(request, identifier='Password'):
"""
"""
if not (auth_string := request.headers.get('AUTHORIZATION')):
return None
return auth_string[len(identifier) + 1:]
|
840146387e31b452cc7987413606adc6511d4309
| 697,983
|
def argmin_except_c(x,p,c):
"""
argmin_except_c(x,p,c)
Ensure that p is the argmin, i.e. the position of the minimum value
in x, but ignores any value of c.
Note:
- If there are many minimum values then argmin_except_c(x,p,c) will find
all these values.
- We assume that there are at least one value != c.
"""
n = len(x)
constraints = [x[p] != c]
for i in range(n):
constraints += [(p != i).implies((x[i] == c) | (x[p] < x[i])) ]
return constraints
|
8046dddc15113ccb5c75fb62ce9ff5434104811e
| 697,984
|
def np_size(net):
"""
Calculate count of al network parameters (weight, bias, etc...)
"""
size = 0
for l in net.layers:
for prop in l.np.values():
size += prop.size
return size
|
d573d62f54eeb2beed0e8035c3635a46fdf4a9da
| 697,985
|
import os
def sub_dirs(directory):
"""
Get a list of sub-directories of a directory
:param directory:
:return: list of sub-directories
"""
return sorted([name for name in os.listdir(directory) if os.path.isdir(os.path.join(directory, name))])
|
ea2770c3602bda477df4d01b6359874bb48c5e5f
| 697,986
|
def compute(n: int) -> float:
"""
Compute the recursive sum ``s = 1 / (1**2) + 1 / (2**2) + ... + 1 / (n**2)``
>>> compute(0)
0
>>> compute(4)
1.4236111111111112
"""
if n == 0:
return 0
return sum(1 / (i ** 2) for i in range(1, n + 1))
|
974506e3523f1c0cfe646b1fdf54846ff7f96cc4
| 697,987
|
from typing import Dict
from typing import Any
async def refresh_tokens_controller_mock() -> Dict[str, Any]:
"""Mock refresh tokens controller."""
return {
"access_token": "test",
"refresh_token": "test",
"expires_at": 0,
}
|
9431f1311a1c0fa45a3e538ee70f66f0ef9de477
| 697,988
|
def to_slice(k, forty_five_deg, D_dimensions_to_check):
"""
:param k: axis idx.
:param forty_five_deg: bool determining if to slice in 45 deg.
:param D_dimensions_to_check: The dimensions to check by the user.
:return: When to slice the volume (in which axis/45 deg angles).
"""
if k not in D_dimensions_to_check:
if k != 2:
return False
if not forty_five_deg:
return False
return True
|
bfd4db06603fa9d1832b96c8b11edad7e2485bbb
| 697,989
|
def _instance_name_from_url(instance_url):
"""Extract instance name from url."""
return instance_url.split('/')[-1]
|
e64bc076be9480a04821e9a9a79f13e25fadad35
| 697,990
|
import socket
def get_hostname_from_address(ip_address):
"""
Return the DNS name for an IP address or an empty string if
any error occurred.
"""
socket.setdefaulttimeout(0.5)
try:
return socket.gethostbyaddr(str(ip_address))[0]
except (socket.herror, socket.gaierror):
return 'Request timed out'
|
7ef678eb40dbc3c8f765adc633abd7ffcd8cd644
| 697,991
|
def calculateGC(seq):
"""Take a sequence as input and calculate the GC %"""
gc = round((seq.count("G")+seq.count("C")) / len(seq) * 100, 2)
return gc
|
ed0c863c31214de970d0d0efd81206084b4bfbcf
| 697,992
|
import re
def strip_html_comments(text):
"""Strip html comments from text (which doesn't need to be valid HTML)"""
return re.sub(r"<!--(.|\s|\n)*?-->", "", text)
|
4ac4c2061520a8ecdafe77884a1bae9196bc4e21
| 697,993
|
import click
def cli_option_quiet(func):
"""
Decorator for adding a reusable CLI option `--quiet`/'-q'.
"""
# noinspection PyUnusedLocal
def _callback(ctx: click.Context, param: click.Option, value: bool):
ctx_obj = ctx.ensure_object(dict)
ctx_obj["quiet"] = value
return value
return click.option(
'--quiet', '-q',
is_flag=True,
help="Disable output of log messages to the console entirely."
" Note, this will also suppress error and warning messages.",
callback=_callback
)(func)
|
4249c71b38d24693b0064157dc313d5798d1529b
| 697,995
|
import typing
import os
import pathlib
def promote_pathlike(filepath: typing.Union[os.PathLike, str, None]
) -> typing.Optional[pathlib.Path]:
"""Return path-like object ``filepath`` promoted into a path object.
See also:
https://docs.python.org/3/glossary.html#term-path-like-object
"""
return pathlib.Path(filepath) if filepath is not None else None
|
c310e70a05d26bfbebe3f80e96c045c6aa10bc5a
| 697,996
|
import numpy
import multiprocessing
import pandas
def parallelize_df(df, func, n_cores=4):
"""Run a function parallely on a pandas dataframe.
Source: https://towardsdatascience.com/make-your-own-super-pandas-using-multiproc-1c04f41944a1
"""
df_split = numpy.array_split(df, n_cores)
with multiprocessing.Pool(n_cores) as mp_pool:
results = mp_pool.map(func, df_split)
results = [i for i in results if i is not None]
out = pandas.concat(results,ignore_index=True)
return out
|
99fcd50553465645bcbcb01b5f05b3c2062141f8
| 697,997
|
import re
def indent_string(s, indent=12):
"""
Add the given number of space characters to the beginning every
non-blank line in `s`, and return the result.
"""
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
|
cf9603be231749f55e4a29c92c90b9139515894c
| 697,998
|
def rename(**kwargs):
"""Rename one or more columns, leaving other columns unchanged
Example usage:
diamonds >> rename(new_name=old_name)
"""
def rename_columns(df):
column_assignments = {old_name_later._name: new_name
for new_name, old_name_later in kwargs.items()}
return df.rename(columns=column_assignments)
return rename_columns
|
0f4c87795f663a9cab8867c4b856ef75e59e2102
| 697,999
|
import argparse
def process_args(args=None):
"""Process command line arguments from user.
kwargs:
args(list): list of args passed by user.
"""
desc = "Extract play count data from iTunes XML library data."
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("path_to_xml",
help="Path to valid iTunes library XML file.",
metavar="path_to_xml")
parser.add_argument("-o", "--output",
help="Desired output format.",
default=None,
dest="output_format",
metavar="output_format",
choices=["csv"])
return parser.parse_args(args)
|
67e0b5f9c1b10cf323cae6960c31687de2079316
| 698,000
|
import optparse
def comma_separated_callback(*, is_valid_func=lambda v: True,
error_msg="{invalid} is not an allowed value"):
""" Return an optparse callback for comma-separated args.
Default value is not processed.
Usage::
my_callback = comma_separated_callback(
is_valid_func=lambda v: v in {'foo', 'bar'},
error_msg="{invalid} is not an allowed value for --option-name")
op.add_option("--option-name",
default=[],
action='callback',
type='string',
callback=my_callback)
"""
def callback(option, opt, value, parser):
""" optparse callback for comma-separated args """
values = value.split(',')
for v in values:
if not is_valid_func(v):
msg = error_msg.format(value=value, invalid=v)
raise optparse.OptionValueError(msg)
setattr(parser.values, option.dest, values)
return callback
|
3b7c1de63a7a29f0fc2f4a6ae7445d6e05075d32
| 698,001
|
from pathlib import Path
def csv_path(csv_dir: str) -> Path:
"""Build csv Path object & mkdir"""
pwd = Path.cwd()
base = pwd.joinpath(csv_dir)
base.mkdir(exist_ok=True)
return base
|
52d894ca30b3dc9f81c65b09d3739d18eafee7ba
| 698,002
|
import re
def get_validated_seqids(sequences_list):
"""
A inputted list is checked for Seq-ID format, each of the Elements that are validated are returned to the user
sequences_list: list of Seq-IDs to be validated
"""
validated_sequence_list = list()
regex = r'^(2\d{3}-\w{2,10}-\d{3,4})$'
for sequence in sequences_list:
if re.match(regex, sequence.sample_name):
validated_sequence_list.append(sequence)
else:
raise ValueError("Invalid seq-id \"%s\"" % sequence.sample_name)
if len(validated_sequence_list) < 1:
raise ValueError("Invalid format for redmine request. Couldn't find any fastas or fastqs to extract")
return validated_sequence_list
|
9f1e2d145245d0bc40639999cda176a0e65b42ac
| 698,003
|
import subprocess
def which(exenames):
"""Returns the full path to an executable under any of the given names.
Example::
>>> which(['raxml', 'raxmlHPC', 'raxmlHPC-PTHREADS'])
'/usr/local/bin/raxmlHPC'
"""
for exe in exenames:
try:
path = subprocess.check_output(['which', exe])
return path.strip()
except subprocess.CalledProcessError:
pass
|
07e63912dbb994168fc416a3563023dc296d7a5b
| 698,004
|
def addstrp(arg1, arg2):
"""
Comme addstr() au dessus mais ajoute un espace entre les deux si nécessaire
Args:
arg2: whatever, will be translated via str()
arg1: whatever, will be translated via str()
"""
a = str(arg1)
b = str(arg2)
return u'{}{}{}'.format(a, ' ' if a and b else '', b)
|
fc161017d135daaa744bd9154a9669a8c7c634b1
| 698,005
|
import os
def parse_files(*paths):
"""Read Operating System Information from `os-release`
This creates a dictionary with information describing the running operating
system. It reads the information from the path array provided as `paths`.
The first available file takes precedence. It must be formatted according
to the rules in `os-release(5)`.
"""
osrelease = {}
path = next((p for p in paths if os.path.exists(p)), None)
if path:
with open(path) as f:
for line in f:
line = line.strip()
if not line:
continue
if line[0] == "#":
continue
key, value = line.split("=", 1)
osrelease[key] = value.strip('"')
return osrelease
|
552c5af039a07c08a7fed769713bbc01f1c65a7c
| 698,006
|
import math
import random
def queryMechanism(S,delta,Ycardinality,dic,dimension):
"""
:param S:
:param delta:
:param Ycardinality: =len(dic)
:param dic: list of keys
:param dimension: [0,1]
:return:
"""
n = len(S)
Answer_Table = []
attr={}
if len(dimension)==1:
attr[dimension[0]]=dic
elif len(dimension) == 3:
for i in dimension:
attr[i]=[x[i] for x in dic]
elif dimension == [0, 1]:
attr[0] = [x[0] for x in dic]
attr[1] = [x[1] for x in dic]
elif dimension == [0, 2]:
attr[0] = [x[0] for x in dic]
attr[2] = [x[1] for x in dic]
else:
print("Wrong dimension.")
# print(attr[0])
for i in range(n): # the index of data owner
data_entry_perturbed = []
for idx in dimension: # the index of data entry [0, 2]
p = (math.exp(S[i][1][idx] / 100.0)) / (math.exp(S[i][1][idx] / 100.0) + len(set(attr[idx])) - 1)
if random.random() <= p:
data_entry_perturbed.append(S[i][0][idx])
else:
newValue = random.choice(list(set(attr[idx])-{S[i][0][idx]}))
data_entry_perturbed.append(newValue)
Answer_Table.append(data_entry_perturbed)
return Answer_Table
|
a30a10558dc5dfe822efbfdc6ea8b85eb892e5bd
| 698,007
|
def multiple(clases):
"""
Busca la relacion entre cada numero del 2-9
y el numero de clases que tienen un dimension multiple de M y != 0
:param clases: list - lista de todas las lases
:return: dict: dict - relacion entre cada numero del 2-9
y el numero de clases que tienen un dimension multiple de M y != 0
"""
dict = {}
for i in range(2, 10):
aptos = []
for clase in clases:
for dimension in clase:
dimension = int(dimension)
is_multiple = dimension % i == 0
if is_multiple and dimension != 0:
aptos.append(clase)
dict[i] = len(set(aptos))
return dict
|
a2e9f8a6ed0b86ad958eec7c1d3d6b935c715362
| 698,008
|
def getBlocks(convLayers: list):
""" group layers in convLayers into layer of same channel number """
convBlocks = []
debut = 0
current = convLayers[0].output.shape[-1]
for i in range(1, len(convLayers)):
fm_nb = convLayers[i].output.shape[-1]
if current != fm_nb:
end = i
convBlocks.append(convLayers[debut:end])
debut = end
current = fm_nb
end = len(convLayers)
convBlocks.append(convLayers[debut:end])
return convBlocks
|
b069e6e89506479245c0035372950092c1b8324e
| 698,010
|
def autofill(field, value):
"""
Return a bcm dictionary with a command to automatically fill the
corresponding "field" with "value"
"""
return {'mode': 'autofill', 'field': field, 'value': value}
|
7b96ccd0e9756cdd6aab9919996b183be1478f8f
| 698,011
|
def valid_vlan_id(vlan_id, extended=True):
"""Validates a VLAN ID.
Args:
vlan_id (integer): VLAN ID to validate. If passed as ``str``, it will
be cast to ``int``.
extended (bool): If the VLAN ID range should be considered extended
for Virtual Fabrics.
Returns:
bool: ``True`` if it is a valid VLAN ID. ``False`` if not.
Raises:
None
Examples:
>>> import pyswitch.utilities
>>> vlan = '565'
>>> pyswitch.utilities.valid_vlan_id(vlan)
True
>>> extended = False
>>> vlan = '6789'
>>> pyswitch.os.base.utilities.valid_vlan_id(vlan, extended=extended)
False
>>> pyswitch.os.base.utilities.valid_vlan_id(vlan)
True
"""
minimum_vlan_id = 1
maximum_vlan_id = 4095
if extended:
maximum_vlan_id = 8191
return minimum_vlan_id <= int(vlan_id) <= maximum_vlan_id
|
c046bf6c1e558eb679c9c53fa4e091213a1b7d46
| 698,012
|
def fix_misspelled_words2(text):
"""
Fixes the misspelled words on the specified text (uses predefined misspelled dictionary)
:param text: The text to be fixed
:return: the fixed text
"""
mispelled_dict = {'colour': 'color', 'centre': 'center', 'favourite': 'favorite', 'travelling': 'traveling',
'counselling': 'counseling',
'theatre': 'theater', 'cancelled': 'canceled', 'labour': 'labor', 'organisation': 'organization',
'wwii': 'world war 2', 'citicise': 'criticize', 'youtu ': 'youtube ', 'Qoura': 'Quora',
'sallary': 'salary',
'Whta': 'What', 'narcisist': 'narcissist', 'howdo': 'how do', 'whatare': 'what are',
'howcan': 'how can',
'howmuch': 'how much', 'howmany': 'how many', 'whydo': 'why do', 'doI': 'do I',
'theBest': 'the best',
'howdoes': 'how does', 'mastrubation': 'masturbation', 'mastrubate': 'masturbate',
"mastrubating": 'masturbating',
'pennis': 'penis', 'Etherium': 'Ethereum', 'narcissit': 'narcissist', 'bigdata': 'big data',
'2k17': '2017', '2k18': '2018',
'qouta': 'quota', 'exboyfriend': 'ex boyfriend', 'airhostess': 'air hostess', "whst": 'what',
'watsapp': 'whatsapp',
'demonitisation': 'demonetization', 'demonitization': 'demonetization',
'demonetisation': 'demonetization', ' ur ': 'your', ' u r ': 'you are'}
for word in mispelled_dict.keys():
text = text.replace(word, mispelled_dict[word])
return text
|
2f4a99f924d97e25179b0a539113a7a8ccc24f06
| 698,013
|
import csv
def read_file(filename):
""" Read file gets an input file(csv) from the same directory and generates a features and dataset.
:param filename:
filename - string: The filename of the csv file in the folder.
Example: ecoli.csv
Yields:
feature_attribute_set: Generates the various attributes of dataset
dataset - list[list]: Dataset which contains the attributes and classes.
Example: [[0.23, 0.34, 0.33, 0.12, 0.45, 0.68, 'cp'], [0.13, 0.35, 0.01, 0.72, 0.25, 0.08, 'pp'], .... ]
"""
with open(filename, 'r') as data:
data_vals = []
reader = csv.reader(data)
for row in reader:
data_vals.append(row)
dataset = data_vals[1:]
data_vals[0].pop()
feature_attribute_set = data_vals[0]
return feature_attribute_set, dataset
|
7921a3091cfbf6eb8321a478b75d246a8c692c14
| 698,014
|
def compare_2(num1: int, num2: int) -> int:
"""
:param num1: int
:param num2: int
:return: int
"""
return abs((num1) - (num2))
|
587445a5b496afc189d65f1207692ed9f0cbb658
| 698,015
|
import os
def data_dir():
"""Return the directory of data files."""
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
|
5ae90fef9e006d0eeed8c2d078066bc25164e575
| 698,016
|
import asyncio
async def open_port(host, port):
"""Repeatedly try if a port on a host is open until duration seconds passed
Parameters
----------
host : str
Host IP address or hostname
port : int
Port number
Returns
-------
awaitable bool
"""
try:
_reader, writer = await asyncio.wait_for(
asyncio.open_connection(host, port), timeout=2
)
writer.close()
await writer.wait_closed()
return True
except Exception:
pass
return False
|
9642cf112c2c3bb83d578c522d70880be064c827
| 698,017
|
import argparse
def create_parser():
"""
Read in the cmd line arguments.the 'data' arg is the data set
which shall be used. the 'method' refers to the solution approach
and 'output_dir' is where the solution file is written.
"""
parser = argparse.ArgumentParser()
parser.add_argument("data")
parser.add_argument("method")
args = parser.parse_args()
return args
|
568cd5b229faaee633ac4e7ac37a8d480ec14f70
| 698,018
|
def get_attribute_value(field, attribute):
"""Recursively traverse the tuple list for accessing an attribute's value.
"""
if tuple == type(field):
return get_attribute_value(get_attribute_value(field[0], field[1]), attribute)
return getattr(field, attribute)
|
1a37194cdb171a60cf0f7fad679ba33107e2e916
| 698,019
|
import os
import sys
def _getExecutable(settings):
"""Resolves and returns the executable for the current context.
If this is a standalone Python executable it tries to resolve the correct
prepared virtual environment for it.
Args:
settings (dict) : settings dictionary
Returns:
(string) : path to the executable
"""
context = settings['context']
context_details = settings['context_details'][context]
executable = context_details['executable']
# if this is standalone python then prefer the correct virtualenv
if context.lower().find('python') != -1:
dcc_settings = settings['dcc_settings_path']
venv_root = os.sep.join([dcc_settings, 'virtualenv_{}'.format(context)])
if os.path.exists(venv_root):
subfolder = 'bin'
if sys.platform == 'win32':
subfolder = 'Scripts'
if os.path.exists('{}/{}'.format(venv_root, subfolder)):
executable = '{}/{}/{}'.format(venv_root, subfolder, 'python')
return executable
|
85aa3b2b219b5d917fa79b55bc9864d92c406647
| 698,021
|
def y_minority_con_majority_211(by_grps):
"""
Examples:
PASS - [[S,S],[N,N,S,O]]
FAIL - [[S,N],[N,N,S,O]]
FAIL - [[S,S],[N,N,O,O]]
FAIL - [[S,S],[S,S,N,O]]
"""
if by_grps[0][0]!=by_grps[0][1]:
print("Failed y_minority_con_majority_211 -- small groups do not match")
return False
cts = 0
ctn = 0
cto = 0
big_letter= ""
for item in by_grps[1]:
if item=="S":
cts+=1
if item=="N":
ctn+=1
if item=="O":
cto+=1
passed_211 = False
if(cts==2):
if(ctn==cto==1):
passed_211=True
big_letter="S"
elif(ctn ==2):
if(cts==cto==1):
passed_211=True
big_letter = "N"
elif(cto==2):
if(cts==ctn==1):
passed_211=True
big_letter = "N"
if(passed_211==False):
print("Failed y_minority_con_majority_211 -- no 2-1-1 pattern")
return False
if(by_grps[0][0]==big_letter):
print("Failed y_minority_con_majority_211 -- small group matches large group majority")
return False
print("Passed y_minority_con_majority_211--returning True")
return True
|
598522211c70b74ae8717a0f37045b2541e99046
| 698,022
|
import argparse
import sys
import random
import time
def main(argv=None):
"""
The main function of this script.
:param argv: List[str] Arguments to parse (default sys.argv)
:return: int
"""
parser = argparse.ArgumentParser(description='Randomized sleep script (e.g. for offseting process start times).')
parser.add_argument('min', type=float, help='Minimum time to sleep (seconds).')
parser.add_argument('max', type=float, help='Maximum time to sleep (seconds).')
parser.add_argument('-v', '--verbose', action='store_true', help='Output wait time in seconds.')
if argv is None:
argv = sys.argv
arguments = parser.parse_args(argv[1:])
sleepTime = random.uniform(arguments.min, arguments.max)
if arguments.verbose:
print(str(sleepTime))
time.sleep(sleepTime)
return 0
|
37aa5e9e7477ad0290d7a54462ca93fbce7b2b1c
| 698,023
|
from typing import Tuple
def is_event(item: Tuple[str, str]) -> bool:
"""Check item from labelled list of folders if it is event folder.
Args:
item: item from labelled list of folders
Returns:
True if folder in tuple is event-type
"""
i_type = item[1]
return i_type == "event"
|
2307813e7777955b11a5d84c7dbc114a8a690562
| 698,024
|
def _parse_arg_line(line):
"""
pull out the arg names from a line of CLI help text introducing args
>>> _parse_arg_line(' -s, --sudo run operations with sudo (nopasswd) (deprecated, use')
['-s', '--sudo']
"""
return [
part.strip().split(' ')[0].split('=')[0]
for part in line.strip().split(' ')[0].split(',')
]
|
5d6bbddc43792ee974d0dfec4637046aff602052
| 698,025
|
def pyiterator_iter(this):
"""
Returns `this.`
https://docs.python.org/3/library/stdtypes.html#typeiter says
an iterator should have an __iter__ method.
"""
return this
|
5a44e38f6219f9ab0b629be817b0e2877c464149
| 698,026
|
def checked(request, key, val):
"""Returns "checked" if key=value appears in the request URL parameters
Use this tag to set the class, for highlighting selected filter options
"""
params = request.GET.dict()
if key in params:
values = params[key].split(',')
if val in values:
return 'checked'
return ''
|
b89825ed276a7a99b630a92f63472c3b8ee50c97
| 698,027
|
import string
def checkInput(userInput, kind_):
"""
Check user's input.
:param userInput: str
:return: bool
"""
if kind_ == 'approx_price':
for i in userInput:
if i not in string.digits and i != '.':
return False
return True
elif kind_ == 'price_change':
for i in userInput:
if i not in string.ascii_lowercase \
and i not in string.ascii_letters:
return False
return True
else:
return
|
534d34ecb062ec32b1f2a58d30f20ecaba953e8f
| 698,029
|
def _process_exp(exp):
"""Use the exp name to return the names of the MPRAGE
data and the bold data (in that order).
"""
if exp == "fh":
mpragename = "mprage"
boldname = "fh"
elif exp == "butterfly":
mpragename = "mprage"
boldname = "butterfly"
elif exp == "clock":
mpragename = "mprage"
boldname = "clock"
elif exp == "polygon":
mpragename = "mprage"
boldname = "polygon"
elif exp == "redgreen":
mpragename = "mprage"
boldname = "redgreen"
elif exp == "biasbox":
mpragename = "mprage"
boldname = "biasbox"
else:
raise ValueError("exp name not understood.")
return mpragename, boldname
|
d7b144c59bc6b6a2ba8ea5e5ffd1c607e1e7a51d
| 698,030
|
import re
def unescape(url):
"""
The url retrieved from MySQL database has extra slash('\') for all the
punctuations.
@param url: the url string to be unescaped
@type url: C{string}
"""
return re.sub(r'\\(.)', r'\1', url)
|
ef1d0f84a407c8d0d970a86044558066702735dd
| 698,031
|
import os
def is_exe(fpath):
""" Check whether the file is exist. """
return os.path.isfile(fpath)
|
c6493ca65dc184347fd0838da2cc4cab82d6b190
| 698,032
|
def calculate_binomial_coefficient(n: int, k: int) -> int:
"""Calculate the binomial coefficient (n over k)."""
if n < 0:
raise ValueError('`n` must not be negative!')
if k < 0:
raise ValueError('`k` must not be negative!')
if k > n:
return 0
binomial_coefficient = 1
for i in range(k):
binomial_coefficient *= (n - i)
binomial_coefficient /= (1 + i)
return round(binomial_coefficient)
|
31942eecaa10cb7a13e9cd38dd2daed8d0618b40
| 698,033
|
def capture_info(line, lines_iter, first_line=True):
"""
Helper method to capture the other results information
"""
in_info = list()
if first_line:
in_info.append(line)
while lines_iter.peek().startswith(' '):
in_info.append(next(lines_iter))
if not first_line:
in_info[0] = in_info[0].strip()
return '\n'.join(in_info)
|
14384420b58cbc97032a4675fd0aec2b9f24db30
| 698,034
|
def _prep_for_list(lb_list):
"""
Removes tenant id and changes the nodes list to 'nodeCount' set to the
number of node on the LB
"""
entries_to_keep = ('name', 'protocol', 'id', 'port', 'algorithm', 'status', 'timeout',
'created', 'virtualIps', 'updated', 'nodeCount')
filtered_lb_list = []
for each in lb_list:
filtered_lb_list.append(dict((entry, each[entry]) for entry in entries_to_keep))
return filtered_lb_list
|
e08b977fc00d654518e78466dc51a56d2669a655
| 698,035
|
import os
import subprocess
def fasttree(DIR,cleaned,seqtype):
"""read in a cleaned alignment ends with '.aln-cln
extimate a tree using fasttree'"""
if DIR[-1] != "/": DIR += "/"
assert cleaned.endswith(".aln-cln"),\
"fasttree infile "+cleaned+" not ends with .aln-cln"
assert seqtype == "aa" or seqtype == "dna","Input data type: dna or aa"
tree = cleaned.split(".")[0]+".fasttree.tre"
alg = ["-wag"] if seqtype == "aa" else ["-nt","-gtr"]
if os.path.exists(DIR+tree):
return DIR+tree
cmd = ["fasttree"]+alg+["-quiet",DIR+cleaned]
out = open(DIR+tree, 'w')
p = subprocess.Popen(cmd,stdout=out)
out.close()
p.communicate()
assert p.returncode == 0,"Error fasttree"
return DIR+tree
|
d7ded4dea87f64d2a38bb097bf630470bcc47dc6
| 698,036
|
import os
def which(file, env=os.environ):
"""Tries to find the exact path for a given filename.
Returns None if no file was found.
"""
if file is None:
return None
for path in env.get('PATH', '').split(os.pathsep):
if path:
result = os.path.join(path, file)
if os.path.exists(result):
return os.path.realpath(result)
return None
|
b4f24c9dc9719feb150e9043d90cad5baee900f7
| 698,037
|
def isAnalysisJob(trf):
""" Determine whether the job is an analysis job or not """
if (trf.startswith('https://') or trf.startswith('http://')):
analysisJob = True
else:
analysisJob = False
return analysisJob
|
75ccaf711dd04dc99aca266533fee7303fad3e85
| 698,038
|
def powset(S):
"""In : S (set)
Out: List of lists representing powerset.
Since sets/lists are unhashable, we convert the set
to a list,perform the powerset operations, leaving
the result as a list (can't convert back to a set).
Example:
S = {'ab', 'bc'}
powset(S) -> [['ab', 'bc'], ['bc'], ['ab'], []]
"""
L=list(S)
if L==[]:
return([[]])
else:
pow_rest0 = powset(L[1:])
pow_rest1 = list(map(lambda Ls: [L[0]] + Ls, pow_rest0))
return(pow_rest0 + pow_rest1)
|
8d5cbc09f595d81b4a486994cfbce2a0cf97cf15
| 698,039
|
def slice_dict(original_dict, key_string):
"""
:param original_dict: Original (and larger) dictionary with all entrances
:param key_string: key string (desired starting string)
:return: A new dictionary with all the entrances of the original that start with key_string
"""
newdict = {}
for key in original_dict:
if key.startswith(key_string):
newdict[key] = original_dict[key]
return newdict
|
812b5a45115127f71cd5f2ebc02ea35c2582c81f
| 698,040
|
def noid(d):
""" Removes `id` key from a dict so we don't have to keep these things
around when trying to match
"""
if 'id' in d: del d['id']
return d
|
35bc1e062e5c68e362f69c0cf079bf9d509801a9
| 698,041
|
def unique_name(container, name, ext):
"""Generate unique name for container file."""
filename = '{}.{}'.format(name, ext)
i = 0
while filename in container:
i += 1
filename = '{}.{}.{}'.format(name, i, ext)
return filename
|
f2403611d58551fff65d996d426e458efd03ec80
| 698,042
|
import six
def _is_sequence(seq):
"""Returns true if its input is a `tuple`, `list`, or `range`.
Args:
seq: an input sequence.
Returns:
True if the sequence is a `tuple`, `list`, or `range`.
"""
return isinstance(seq, (tuple, list, six.moves.range))
|
e94094c314cff5bf9bd7525453d7906ca55d7261
| 698,043
|
def fib_iterative(n: int) -> int:
""" Find the n-th fibonacci number by iteration using Dynamic Programming"""
curr: int = 1
prev: int = 1
for i in range(2, n):
prev, curr = curr, curr + prev
return curr
|
18d407bcd25a32407b42c497d6dee1dadeba421b
| 698,044
|
import ctypes
def crc16Calc(Data, StartElement, Len):
""" Calculates 2 Byte CRC """
crc16Table = [0x0000,0x1021,0x2042,0x3063,0x4084,0x50a5,0x60c6,0x70e7,\
0x8108,0x9129,0xa14a,0xb16b,0xc18c,0xd1ad,0xe1ce,0xf1ef,\
0x1231,0x0210,0x3273,0x2252,0x52b5,0x4294,0x72f7,0x62d6,\
0x9339,0x8318,0xb37b,0xa35a,0xd3bd,0xc39c,0xf3ff,0xe3de,\
0x2462,0x3443,0x0420,0x1401,0x64e6,0x74c7,0x44a4,0x5485,\
0xa56a,0xb54b,0x8528,0x9509,0xe5ee,0xf5cf,0xc5ac,0xd58d,\
0x3653,0x2672,0x1611,0x0630,0x76d7,0x66f6,0x5695,0x46b4,\
0xb75b,0xa77a,0x9719,0x8738,0xf7df,0xe7fe,0xd79d,0xc7bc,\
0x48c4,0x58e5,0x6886,0x78a7,0x0840,0x1861,0x2802,0x3823,\
0xc9cc,0xd9ed,0xe98e,0xf9af,0x8948,0x9969,0xa90a,0xb92b,\
0x5af5,0x4ad4,0x7ab7,0x6a96,0x1a71,0x0a50,0x3a33,0x2a12,\
0xdbfd,0xcbdc,0xfbbf,0xeb9e,0x9b79,0x8b58,0xbb3b,0xab1a,\
0x6ca6,0x7c87,0x4ce4,0x5cc5,0x2c22,0x3c03,0x0c60,0x1c41,\
0xedae,0xfd8f,0xcdec,0xddcd,0xad2a,0xbd0b,0x8d68,0x9d49,\
0x7e97,0x6eb6,0x5ed5,0x4ef4,0x3e13,0x2e32,0x1e51,0x0e70,\
0xff9f,0xefbe,0xdfdd,0xcffc,0xbf1b,0xaf3a,0x9f59,0x8f78,\
0x9188,0x81a9,0xb1ca,0xa1eb,0xd10c,0xc12d,0xf14e,0xe16f,\
0x1080,0x00a1,0x30c2,0x20e3,0x5004,0x4025,0x7046,0x6067,\
0x83b9,0x9398,0xa3fb,0xb3da,0xc33d,0xd31c,0xe37f,0xf35e,\
0x02b1,0x1290,0x22f3,0x32d2,0x4235,0x5214,0x6277,0x7256,\
0xb5ea,0xa5cb,0x95a8,0x8589,0xf56e,0xe54f,0xd52c,0xc50d,\
0x34e2,0x24c3,0x14a0,0x0481,0x7466,0x6447,0x5424,0x4405,\
0xa7db,0xb7fa,0x8799,0x97b8,0xe75f,0xf77e,0xc71d,0xd73c,\
0x26d3,0x36f2,0x0691,0x16b0,0x6657,0x7676,0x4615,0x5634,\
0xd94c,0xc96d,0xf90e,0xe92f,0x99c8,0x89e9,0xb98a,0xa9ab,\
0x5844,0x4865,0x7806,0x6827,0x18c0,0x08e1,0x3882,0x28a3,\
0xcb7d,0xdb5c,0xeb3f,0xfb1e,0x8bf9,0x9bd8,0xabbb,0xbb9a,\
0x4a75,0x5a54,0x6a37,0x7a16,0x0af1,0x1ad0,0x2ab3,0x3a92,\
0xfd2e,0xed0f,0xdd6c,0xcd4d,0xbdaa,0xad8b,0x9de8,0x8dc9,\
0x7c26,0x6c07,0x5c64,0x4c45,0x3ca2,0x2c83,0x1ce0,0x0cc1,\
0xef1f,0xff3e,0xcf5d,0xdf7c,0xaf9b,0xbfba,0x8fd9,0x9ff8,\
0x6e17,0x7e36,0x4e55,0x5e74,0x2e93,0x3eb2,0x0ed1,0x1ef0]
crc16 = ctypes.c_uint16(0).value
for x in range(StartElement, Len + StartElement):
crc16 = (crc16 << 8) ^ ctypes.c_uint16(crc16Table[((crc16 >> 8) ^ ctypes.c_uint8(Data[x]).value) & 0xff]).value
crc16 = ctypes.c_uint16(crc16).value
return crc16
|
4fca47d528e441d3a0fb104b7705192163987588
| 698,045
|
def adjustSize(imageSize, targetSize):
"""-> the size (x,y) the image must be scaled to in order to fit into the
target size, while keeping the aspect ratio.
"""
sx, sy = imageSize
tx, ty = targetSize
x = tx
y = sy * tx / sx
if y > ty:
y = ty
x = sx * ty / sy
return x, y
|
32e18fbb34864e630c19d1d689cfb28de28ff503
| 698,046
|
import re
def AutoscalersForMigs(migs, autoscalers):
"""Finds Autoscalers with target amongst given IGMs.
Args:
migs: List of triples (IGM name, scope type, location reference).
autoscalers: A list of Autoscalers to search among.
Returns:
A list of all Autoscalers with target on mig_names list.
"""
igm_url_regexes = []
for (name, scope_type, location) in migs:
igm_url_regexes.append(
'/projects/{project}/{scopeType}/{scopeName}/'
'instanceGroupManagers/{name}$'
.format(project=location.project,
scopeType=(scope_type + 's'),
scopeName=getattr(location, scope_type),
name=name))
igm_url_regex = re.compile('(' + ')|('.join(igm_url_regexes) + ')')
result = [
autoscaler for autoscaler in autoscalers
if igm_url_regex.search(autoscaler.target)
]
return result
|
52f809794395ba6ed31b94bed36dc9f4de8ce919
| 698,047
|
def normalize_mode(mode):
"""
Return a mode value, normalized to a string and containing a leading zero
if it does not have one.
Allow "keep" as a valid mode (used by file state/module to preserve mode
from the Salt fileserver in file states).
"""
if mode is None:
return None
if not isinstance(mode, str):
mode = str(mode)
mode = mode.replace("0o", "0")
# Strip any quotes any initial zeroes, then though zero-pad it up to 4.
# This ensures that somethign like '00644' is normalized to '0644'
return mode.strip('"').strip("'").lstrip("0").zfill(4)
|
66d58de58eb9f0e77e2fa75253cfeef3ee1abe2c
| 698,048
|
import math
def spherical_index_lm(k):
""" returns the degree l and the order m from the mode k """
l = int(math.floor(math.sqrt(k)))
return l, k - l*(l + 1)
|
de00ebda16ad2480b32a5d827351bb43ec858e42
| 698,049
|
import torch
def hamming_distance(input1, input2):
"""Computes hamming distance.
Args:
input1 (torch.Tensor): 2-D feature matrix. {0, 1}.
input2 (torch.Tensor): 2-D feature matrix. {0, 1}.
Returns:
torch.Tensor: distance matrix.
"""
input1 = input1.to(torch.int)
input2 = input2.to(torch.int)
input1_m1 = input1 - 1
input2_m1 = input2 - 1
c1 = input1.matmul(input2_m1.T)
c2 = input1_m1.matmul(input2.T)
return torch.abs(c1 + c2)
|
7287c0f322b4632f04486430990b9fd819bddc98
| 698,050
|
def get_blocks(fname):
"""Get blocks of the diff that can be independently filtered."""
with open(fname, 'r') as fh:
lines = iter(fh.readlines())
parts = []
line = next(lines)
while True:
if line.startswith('diff --git'):
block = [line]
for line in lines:
if line.startswith('@@'):
break
block.append(line)
parts.append(block)
if line.startswith('@@'):
block = [line]
for line in lines:
if line.startswith('@@') or line.startswith('diff --git'):
break
block.append(line)
parts.append(block)
if line.startswith('\\ No newline'):
parts[-1].append(line)
try:
line = next(lines)
except StopIteration:
break
if not lines:
break
return parts
|
902d897b85794711184cb50bd8a308bd980620d6
| 698,052
|
def from_camel_case(name):
"""Convert camel case to snake case.
Function and variable names are usually written in camel case in C++ and
in snake case in Python.
"""
new_name = str(name)
i = 0
while i < len(new_name):
if new_name[i].isupper() and i > 0:
new_name = new_name[:i] + "_" + new_name[i:]
i += 1
i += 1
return new_name.lower()
|
c6e7184598252a6db1bcaee5d5375969c5c9bd39
| 698,053
|
def molecule2stateTuples(molecule):
"""
Receives a molecule structure, returns a tuple detailing the state of the contained states
"""
tupleList = []
for component in molecule.components:
componentDefinition = [component.name]
componentDefinition.append(1 if len(component.bonds) > 0 else 0)
componentDefinition.append(
"" if len(component.states) == 0 else component.activeState
)
tupleList.append(tuple(componentDefinition))
return tupleList
|
2cc19579e640c5099201e1d3da43ad56641f29d0
| 698,054
|
def PatternListToStr(pattern):
"""Return a pattern string for the given list of integers.
PatternListToStr([5,3,1]) -> '531'
"""
return ''.join([chr(p) for p in pattern])
|
d42f06204f3b4c6fa3badb45d5755614554b3f9b
| 698,056
|
def crf_kernel_config(defn):
"""Creates a default kernel configuration for sampling the dish assignment
using the "Posterior sampling in the Chinese restaurant franchise" Gibbs
sampler from Teh et al (2005)
Parameters
----------
defn : LDA model definition
"""
return ['crf']
|
f5d874d62c9c858115cc32dc6ee193c13a7b0eb7
| 698,057
|
import copy
def exact_to_2nd_order_model_layer_avg(model, layer_to_avg=3):
"""Convert LeNet model training on exact augmented objective to model
training on 2nd order approximation, but approximation is done at different
layers.
"""
model_2nd = copy.deepcopy(model)
model_2nd.approx = True
model_2nd.feature_avg = True
model_2nd.regularization = True
model_2nd.layer_to_avg = layer_to_avg
# Can't use the regularization function specialized to linear model unless
# averaging at layer 4.
if layer_to_avg != 4:
model.regularization_2nd_order = model.regularization_2nd_order_general
return model_2nd
|
becf33033b93bac6d3e78e4f35881222ae554d75
| 698,058
|
import re
def search_by_regex(nodes: list, option: int, regex: str) -> list:
"""
Return all :param regex matched values from :param nodes at :param option index.
:param nodes: list of nodes.
:param option: Index in the nodes list(check constants at the start of this file).
:param regex: Pattern to be found.
:return: Return list of matched values as list.
"""
answers = []
for item in nodes:
if re.search(regex, item[option]):
answers.append(item[option])
return answers
|
3a045c324dd333157a6de036ed7affd11a91b6a0
| 698,059
|
def get_retention_policy(interval, retention_policies):
"""Get appropriate retention policy for interval provided
:param interval: Interval of query in seconds
:type interval: int
:param retention_policies: Retention policy configuration
:type retention_policies: dict(max time range of interval
in seconds: retention policy name)
:rtype: ``str`` or ``None``
"""
if not retention_policies:
return
for retention_interval in sorted(retention_policies.keys()):
if interval <= retention_interval:
return retention_policies[retention_interval]
# In the case that desired interval is beyond configured interval range,
# return policy for max interval
return retention_policies[max(sorted(retention_policies.keys()))]
|
dc1e2358d715cadbcd5275240203fe20a50c1cc9
| 698,060
|
def analysis_get_headerdata(analysis):
"""Obtains the headerdata from an OpticStudio analysis.
Parameters
----------
analysis: Any
An OpticStudio Analysis.
Returns
-------
list
The headerdata.
"""
return list(analysis.Results.HeaderData.Lines)
|
b84f11e1503d3181edecb8ab1c4cae7fd535d0be
| 698,061
|
import warnings
def extract_cube_at_time(cubes, time, time_extract):
"""
Extract a single cube at a given time from a cubelist.
Args:
cubes (iris.cube.CubeList):
CubeList of a given diagnostic over several times.
time (datetime.datetime object):
Time at which forecast data is needed.
time_extract (iris.Constraint):
Iris constraint for the desired time.
Returns:
cube (iris.cube.Cube):
Cube of data at the desired time.
Raises:
ValueError if the desired time is not available within the cubelist.
"""
try:
cube_in, = cubes.extract(time_extract)
return cube_in
except ValueError:
msg = ('Forecast time {} not found within data cubes.'.format(
time.strftime("%Y-%m-%d:%H:%M")))
warnings.warn(msg)
return None
|
24f6019f8a01a8d4b9c8f66c18c94eaee4f3077e
| 698,062
|
def kw2re(x):
"""Convert a list of keywords to a regex."""
return r'(%s)' % '|'.join(sorted(list(set(x))))
|
13127d8b0c6d1772ebd4be58aca0f9e3f160544a
| 698,063
|
import subprocess
def has_extra_files(dir: str) -> bool:
"""Check whether a git repository has untracked files."""
output = subprocess.check_output(["git", "clean", "--dry-run", "-d"], cwd=dir)
return output.strip() != b""
|
76f023c4157040654f58b6defb4270890d7a08c3
| 698,064
|
def _jsonify(vals):
"""문서 출력을 위해 파이썬 값을 JSON 형태로."""
_vals = []
for val in vals:
if type(val) is bool:
_vals.append(str(val).lower())
else:
_vals.append(val)
return _vals
|
d9240bc43f1ca9a31152b12a94fc1ba1980e2b82
| 698,065
|
import platform
import sys
import struct
def get_sys_info():
"""Returns system information as a list of tuples"""
blob = []
try:
(sysname, nodename, release,
version, machine, processor) = platform.uname()
blob.extend([
("python", "%d.%d.%d.%s.%s" % sys.version_info[:]),
("python-bits", struct.calcsize("P") * 8),
("OS", "%s" % (sysname)),
("OS-release", "%s" % (release)),
("machine", "%s" % (machine)),
("processor", "%s" % (processor)),
])
except BaseException:
pass
return blob
|
bd8e316de9720f0b2d65b06216d512f69c391a4f
| 698,066
|
def get_boundary_locations(size, sector_size, stride):
"""Get a list of 1D sector boundary positions.
Args:
size: length of the full domain.
sector_size: length of the sector.
stride: how far each sector moves to the right
Returns:
boundaries: a list of 1D sector boundary positions
"""
boundaries = []
sector_l, sector_r = 0, sector_size # left and right pos of the sector
while sector_l < size:
if sector_l < size and sector_r > size:
boundaries.append((size - sector_size, size))
break
else:
boundaries.append((sector_l, sector_r))
if (sector_l, sector_r) == (size - sector_size, size):
break
sector_l += stride
sector_r += stride
return boundaries
|
ae6a2b6461f2c823f0129e7f691d63830834cbe7
| 698,067
|
def get_paging_start_end(page, numberMatches):
"""Get tuple (iStart, iEnd)"""
iStart = (page-1)*numberMatches
iEnd = iStart+numberMatches
fields = (iStart, iEnd)
return fields
|
bb2b89fd2f9b6c18f134cfabc5e56da682688b55
| 698,068
|
import random
def random_strategy(D):
"""
Retourne un nombre entre 1 et D correspondant à une stratégie aléatoire utilisée comme baseline
----------------------------------------------------
Args:
- D : nombre maximum de dés
"""
return random.randint(1,D)
|
280f3deff20e0f4e96f7cbd8f3ea4d4ae671aba8
| 698,069
|
def _get_rate_via_base(rates, target):
"""
:param: rates: A set/tuple of two base Rate instances
:param: target: A string instance of the currency to convert to
Both target and source are not a base currency - actual rate could be calculated via their rates to base currency.
For example:
7.84 NOK = 1 USD = 8.37 SEK
7.84 NOK = 8.37 SEK
1 NOK = 8.37 / 7.84 SEK
"""
first, second = rates
# Instead of expecting an explicit order in the `rates` iterable, that will put the
# source currency in the first place, we decided to add an extra check here and swap
# items if they are ordered not as expected
if first.currency == target:
first, second = second, first
return second.value / first.value
|
705ae5339cbb62da315898ae4e481f8c25edc2f3
| 698,070
|
def fix_coordinate_system(value: float, min_value: float = 0.0, max_value: float = 1.0):
"""
In case of Out-Of-Range values: fix the rounding error
"""
value = min(value, max_value)
value = max(value, min_value)
return value
|
f692d6e94ed61be5bd25207bf787267e921a2f08
| 698,071
|
def from_dero(value_in_dero):
"""Convert number in dero to smallest unit"""
return int(value_in_dero*10**12)
|
6a27469721cbd9851312f73a971caf279c95ffa8
| 698,072
|
import hashlib
def sha256_hash(data):
"""Compute SHA-256 of data and return hash as hex encoded value."""
data = data or b""
hasher = hashlib.sha256()
hasher.update(data.encode() if isinstance(data, str) else data)
sha256sum = hasher.hexdigest()
return sha256sum.decode() if isinstance(sha256sum, bytes) else sha256sum
|
88443a9a62080b9e17c6a4eb4bf68ca7b8f62e6c
| 698,074
|
import json
def load_fixture_json(name):
"""Load fixture from json file."""
with open(f"tests/fixtures/{name}.json", encoding="UTF-8") as json_file:
data = json.load(json_file)
return data
|
31f064da134974380ee12341199aef4d05bdba99
| 698,075
|
import re
def is_prognosis_location(word):
"""
is_prognosis_location()
Purpose: Checks if the word is a prognosis location
@param word. A string.
@return the matched object if it is a prognosis location, otherwise
None.
>>> is_prognosis_location('c9-c5') is not None
True
>>> is_prognosis_location('C5-C9') is not None
True
>>> is_prognosis_location('test') is not None
False
>>> is_prognosis_location('c-9-C5') is not None
False
"""
regex = r"^(c|C)[0-9]+(-(c|C)[0-9]+)*$"
return re.search(regex, word)
|
4237b610d83080ec8145fbd611a6f7ada30c7de5
| 698,076
|
def get_selection():
"""Read the user's input from prompt.
input: none
returns: (str) Input read from user input.
"""
return input('> ')
|
f091b660b412972a3f5ad4342daa4cf01eb88549
| 698,077
|
def get_neighbors(x, y):
"""Returns a list with the 8 neighbor positions of (x, y)"""
return [
(x, - 1), (y, x + 1), (x - (1), y), (x + 1), y,
(x, (-1, y)), (x + 1, y, 1), (x - 1, y + 1, x + 1, y + 1)
]
|
30098eb9a17e9fdd71651b4762ddeca6dad0a7f5
| 698,078
|
def fetch_token_mock(self,
token_url=None,
code=None,
authorization_response=None,
body='',
auth=None,
username=None,
password=None,
method='POST',
timeout=None,
headers=None,
verify=True,
proxies=None,
**kwargs):
"""Mock token fetching api call."""
token = {
"orcid": "123",
"name": "ros",
"access_token": "xyz",
"refresh_token": "xyz",
"scope": ["/activities/update", "/read/limited"],
"expires_in": "12121"
}
return token
|
8c6902272140ffe498ac52c69564d351c6257b2f
| 698,079
|
import argparse
def get_args():
"""Get CLI arguments and options"""
parser = argparse.ArgumentParser(
prog='td-td_dbf2csv',
description='small utility to convert simple *.DBF files to *.CSV'
)
parser.add_argument('input')
parser.add_argument('output', nargs='?', default=None)
parser.add_argument('-ie', '--input-encoding',
default='cp850',
help='charset of *.dbf files (default: cp850)')
parser.add_argument('-oe', '--output-encoding',
default='utf8',
help='charset of *.csv files (default: utf8)')
parser.add_argument('-q', '--quoting-mode',
choices=('minimal', 'all', 'non-numeric', 'none'),
default='minimal',
help='quoting mode for csv files (default: minimal)')
parser.add_argument('-d', '--delimiter-char',
default=',',
help='delimiter char for csv files (default: ",")')
parser.add_argument('-e', '--escape-char',
default='\\',
help='escape char for csv files (default: "\\")')
parser.add_argument('-s', '--stop-at-memo-sep',
action='store_true',
help='instruct the reader to stop at the memofile separator byte (\\x1a)')
return parser.parse_args()
|
847eb902149b929ba16154cdcb3d45cc8e46bcda
| 698,080
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.