content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def _rav_geterr_ ( self ) :
"""Get the error
>>> var = ...
>>> print(var.error)
"""
return self.getError() | dd61827bc7702006b6eee0969d23fed2e643ba74 | 38,790 |
import os
def process_exists(pid):
"""
Determine if process 'pid' exists.
"""
try:
os.kill(pid, 0)
except ProcessLookupError:
return False # Doesn't exist
except PermissionError:
pass # Exists but not ours
return True | ad10f02c91cbe162ab3dd819c94e50514a91f166 | 38,792 |
def cell_num_to_block_offsets(cell_num):
"""
:param cell_num: The cell number within the block. Precondition: 0 <= y < 9
:return: (y_offset, x_offset) where y_offset is the number of cells from the top
and x_offset is the number of cells from the bottom
"""
return int(cell_num / 3), cell_num % 3 | b19c100491c8a902e51df69f8b69dfe5418d2f4b | 38,794 |
def to_node(lines, node):
""" Returns all lines connected to a given node"""
return list(lines[((lines.node_i == node) | (lines.node_e == node))].index) | 65132478e0ffab8e4b8687d88a02b7efc35e065e | 38,796 |
def multi_find(s: str, f: str):
"""Finds every occurrence of substring F in string S.
Returns a list of indexes where each occurence starts."""
res = []
scanned = 0
while len(s) > 0:
found = s.find(f)
if found == -1: break
res.append(found + scanned)
s = s[found + 1:]
scanned += found + 1
return res | b306ddab98c8b7da056879c4c1e1437954d508a0 | 38,798 |
def makeTypeMaps(types):
""" Build maps that allow to find appropriate
serializer (by object type) or deserializer (by prefix symbol).
If serialized type does not match serializer class (true for
embedded types) then Class.ptype is used. If a type does not have
direct analog in Python (is Hessian - specific) then its serializer
is used as type.
"""
codeMap = {} # type code to streamer map
typeMap = {} # Python type to streamer map
for c in types:
streamer = c()
if hasattr(streamer, "autoRegister") and not streamer.autoRegister:
continue
for ch in streamer.codes:
# assert not ch in codeMap
codeMap[ch] = streamer
if hasattr(streamer, "ptype"):
assert not streamer.ptype in typeMap
typeMap[streamer.ptype] = streamer
else:
typeMap[streamer.__class__] = streamer
return codeMap, typeMap | 6e67f5242a12ae30950e1d38bc8b46bb49a2526d | 38,799 |
def sub_symbols(pattern, code, symbol):
"""Substitutes symbols in CLDR number pattern."""
return pattern.replace('¤¤', code).replace('¤', symbol) | 4d87e263ba53e99368fb82c8c93b3998834852ad | 38,800 |
def get_K_loss_pp(has_pipe):
"""配管の線熱損失係数
Args:
has_pipe(bool): 配管の断熱の有無
Returns:
float: 配管の線熱損失係数 (W/mK)
"""
if has_pipe:
return 0.15
else:
return 0.21 | 6d5baf5303e442f2dce94dba7cfb78b68a45e50a | 38,801 |
def recursive_factorial(num):
"""Recursive solution"""
if num >= 2:
return num * recursive_factorial(num - 1)
return 1 | 0152d119b2c69f9b1a0aa3223681733ab9718085 | 38,804 |
def count_tuples(map):
""" @brief counts non zero tuples in a map
@param map BitArray containing the bitmap
@return Count of non-zero bytes in the bitmap"""
count = 0
if map != None:
for bit in map.tobytes():
if bit != 0:
count += 1
return count | 63725c8dfb129aaf7fa25e0362563572958cf0fb | 38,807 |
import os
def is_writeable(path, check_parent=False):
"""
Check if a given path is writeable by the current user.
:param path: The path to check
:param check_parent: If the path to check does not exist, check for the
ability to write to the parent directory instead
:returns: True or False
"""
if os.access(path, os.F_OK) and os.access(path, os.W_OK):
# The path exists and is writeable
return True
if os.access(path, os.F_OK) and not os.access(path, os.W_OK):
# The path exists and is not writeable
return False
# The path does not exists or is not writeable
if check_parent is False:
# We're not allowed to check the parent directory of the provided path
return False
# Lets get the parent directory of the provided path
parent_dir = os.path.dirname(path)
if not os.access(parent_dir, os.F_OK):
# Parent directory does not exit
return False
# Finally, return if we're allowed to write in the parent directory of the
# provided path
return os.access(parent_dir, os.W_OK) | d015676d09a91601c8361a24544e865a686c7511 | 38,808 |
def get_sentence(root_node):
"""
Given a complete sentence like (ROOT (S ...)) or (ROOT (SQ ...)), returns
the nested (S ...) or (SQ ...).
"""
return root_node[0] | db8256fc7cfe56e469cd983c8f2097162671d519 | 38,810 |
def divide(dividend, divisor):
"""
:param dividend: a numerical value
:param divisor: a numerical
:return: a numerical value if division is possible. Otherwise, None
"""
quotient = None
if divisor is not None :
if divisor != 0:
quotient = dividend / divisor
return quotient | e5e6678f7de33524444b3bcd40fa45c21f42c77d | 38,811 |
import struct
import binascii
def make_chunk(type, data):
"""Create a raw chunk by composing chunk's ``type`` and ``data``. It
calculates chunk length and CRC for you.
:arg str type: PNG chunk type.
:arg bytes data: PNG chunk data, **excluding chunk length, type, and CRC**.
:rtype: bytes
"""
out = struct.pack("!I", len(data))
data = type.encode("latin-1") + data
out += data + struct.pack("!I", binascii.crc32(data))
return out | f4ef2410c28437561b316d768d5cdbe7cbbadc15 | 38,813 |
def strip_lower(data):
"""对传入的字符串进行strip和lower操作并去掉空格
"""
return data.lower().strip().replace(' ','') | 1717265aa2bddda6127b8c144b11e0f2337af8d2 | 38,814 |
def first_index_k_ones_right(qstr, k, P):
"""
For a binary string qstr, return the first index of q with k (mod P) ones to the right.
Return: index in [0, qstr.length]
"""
num_ones_right = 0
for j in range(qstr.length, -1, -1):
if (num_ones_right - k) % P == 0:
return j
if j == 0:
raise Exception("No valid position found")
if qstr[j-1] == 1:
num_ones_right += 1 | ed258c584957c5b9a778eec555c4efe80505be85 | 38,815 |
def contour_coords(contour, source='scikit'):
"""Extract x, y tuple of contour positions from contour data.
Scikit has reversed x, y coordinates
OpenCV has an unusual numpy array shape (npts, 1, 2)
Parameters
----------
contour: contour data
source: 'scikit' or 'opencv'
Output
------
x, y (tuple of numpy arrays) that can be used directly on an imshow() graph
"""
if source == 'scikit':
return contour[:, 1], contour[:, 0]
elif source == 'opencv':
contour = contour.squeeze() # eliminate middle dimension in array
return contour[:, 0], contour[:, 1]
else:
raise ValueError(f'{source} not a valid source for contour data.') | 971f269fab6c476aed1be0a047f843ff0372fe08 | 38,817 |
def find(inp, success_fn):
""" Finds an element for which the success_fn responds true """
def rec_find(structure):
if isinstance(structure, list) or isinstance(structure, tuple):
items = list(structure)
elif isinstance(structure, dict):
items = list(structure.items())
else:
# Base case, if not dict or iterable
success = success_fn(structure)
return structure, success
# If dict or iterable, iterate and find a tensor
for item in items:
result, success = rec_find(item)
if success:
return result, success
return None, False
return rec_find(inp)[0] | cf5918af04ebf66d4eab6923e84c7ff2ebe0c7f6 | 38,818 |
def get_text(node):
"""Get the contents of text nodes in a parent node"""
return node.text | f0ac82cd958f762e9e0e5425a73a49bc5b9eaa68 | 38,819 |
import re
def get_argument_value(input_string, argument):
"""This function gets the value of the argument from the input_string.
Inputs:
- input_string: Input_string containing all arguments [string]
- argument: The argument of interest to be gathered from the input_string [string]
Outputs:
- argument_value: the value of the argument of interest [string]
"""
# Gather the argument value
argument_value = list(filter(None, re.split(' ', list(filter(None, re.split(argument, input_string)))[-1])))[0]
return argument_value | 7b2662e16459e2500084e53303516a821cf255dc | 38,820 |
def read_column_data_from_txt(fname):
"""
Read data from a simple text file.
Format should be just numbers.
First column is the dependent variable. others are independent.
Whitespace delimited.
Returns
-------
x_values : list
List of x columns
y_values : list
list of y values
"""
datafile = open(fname)
datarows = []
for line in datafile:
datarows.append([float(li) for li in line.split()])
datacols = list(zip(*datarows))
x_values = datacols[1:]
y_values = datacols[0]
return x_values, y_values | d115c85d6675150082af61de833a5614b80bf34b | 38,822 |
import pickle
def leer_desde_archivo(archivo, valor_por_defecto=None):
"""Lee del archivo archivo un registro y lo retorna junto con una
variable booleana que indica si llegó al fin de archivo o no.
El parámetro llamado archivo tiene que estar abierto en modo
binario y para lectura (rb).
Si se intenta leer más allá del fin de archivo, data valdrá lo que le
hayan pasado en valor_por_defecto (si no le pasan nada será None)
y fin_de_archivo será True. En cualquier otro caso fin_de_archivo
será False.
"""
try:
data = pickle.load(archivo)
fin_de_archivo = False
except EOFError:
data = valor_por_defecto
fin_de_archivo = True
return data, fin_de_archivo | 8dbaf607a3bbc0464e75b85194fd3098f0126f10 | 38,823 |
def FeaturesToXYZ(geodataframe, attr, adjustment=0.0, mult=1.0):
"""Converts Linestring collections to points
Parameters:
GeoDataFrame geodataframe
Object to convert
string attr
Attribute to convert
float adjustment
Optional bias value
float mult
Optional multiplier for unit conversions
Returns:
List of (x,y,z) tuples representing the Linestring coordinates.
"""
print("Converting features to points:")
points = []
print("...found",len(geodataframe),"features.")
for row in range(0,len(geodataframe)):
x, y = geodataframe.iloc[row].geometry.xy
z = float(geodataframe[attr].iloc[row])
for p in range(0,max(len(x)-1,1)): # Ignore last (or first) coordinate pair as it is repeated
points.append((x[p],y[p],mult*z+adjustment))
print("...found a total of",len(points),"points.")
return points | 3ef4422d7155829f36f3ecd97e42d1e48e397a52 | 38,824 |
import os
def get_env_setting(setting, default):
""" Get the environment setting or return exception """
try:
return os.environ[setting]
except KeyError:
return default | dd748782d5dd8903eed981ecde85710187ea4322 | 38,825 |
def hash_password(password):
"""
This function is for saving passwords in a discreet way
@type password: string
@param password: the password to be 'hashed'
"""
hashed = ''
for ch in password:
hashed = hashed + str(ord(ch))
return hashed[::-1] | f9db1dd40cf690de5911dc9901f25a4e406c2ed0 | 38,826 |
import random
def mutate(solution):
"""mutate(solution) Returns a mutated solution by character shift by 1 place."""
workingset=list(range(ord(" "),ord("~")+1))
new_solution=[]
for cell in solution:
if random.random() < (1/20): #1 in 20 chance to mutate cell.
#Mutation is a character shift up or down alphanumericly.
change = random.choice([-1,1])#Randomly choose up or down shift.
if (cell + change) > workingset[-1]:#If hit upper bound, shift down.
cell += -(change)
elif (cell + change) < workingset[0]:#If hit lower bound shift up.
cell += -(change)
else:
cell += change
new_solution.append(cell)
return new_solution | ae3ad411dc4c85008ff899b30b728d8a8f3bcf09 | 38,827 |
def _map_operation(operation):
"""Default function to map the operation to target"""
return operation | 8a42b9306ef570485e27ad62b58134d58a0604b9 | 38,828 |
import subprocess
def do_subprocess_with_output(cmd):
""" Standardize error processing
Args:
cmd (str): command to execute
Returns:
(str): output of command
"""
try:
output = subprocess.check_output(cmd)
return output
except subprocess.CalledProcessError as cpe:
raise | 707934ac77cfe765f9234a6bf16f30359330c578 | 38,829 |
import os
def _data_root_Linux():
"""
Use freedesktop.org Base Dir Specfication to determine storage
location.
"""
fallback = os.path.expanduser("~/.local/share")
root = os.environ.get("XDG_DATA_HOME", None) or fallback
return os.path.join(root, "python_keyring") | b64325692bc0b1a62d01e6dbad041c6c65d006cc | 38,830 |
def validate_item_against_schema(schema, item):
"""Returns whether or not the given item has the same format as the
given schema.
Args:
schema: A dictionary mapping field name to expected value type.
item: A dictionary mapping field name to value.
Returns:
A boolean representing whether or not the item matches the
schema.
Raises:
TypeError: If either argument is not a dictionary.
"""
if not isinstance(schema, dict):
raise TypeError("Schema is not a dict object.")
if not isinstance(item, dict):
raise TypeError("Item is not a dict object.")
if len(schema) != len(item):
return False
for key, value_type in schema.items():
if key not in item:
return False
if not isinstance(item[key], value_type):
return False
return True | 319e8b7c703c65fea20cb586d62bfb5140f346fb | 38,831 |
def abband(matrix, vecin, vecout, neq, iband):
"""
Multiplies [ banded ]{vector} = {vector}
"""
for i in range(neq):
jlim = max(0, i - iband + 1)
for j in range(jlim, i):
val = vecin[j]
vecout[i] += val * matrix[j][i - j + 1]
#
jlim = min(iband, neq - i + 1)
for j in range(1, jlim):
val = vecin[i + j - 1]
vecout[i] += val * matrix[i][j]
#
return vecout | b5bf2ce9b5a678e662c31cbd409870df5d2aa494 | 38,832 |
def getNodeIdentifiersInRow(nodes, elementsCountAround, row):
"""
:param: row starting at 0 for apex
:return: list of node identifiers in row
"""
nodeIdentifiers = []
nodeiterator = nodes.createNodeiterator()
node = nodeiterator.next()
remainingnodesinrow = 1
currentRow = 0
while node.isValid():
if currentRow == row:
nodeIdentifiers.append(node.getIdentifier())
elif currentRow > row:
break
remainingnodesinrow -= 1
if remainingnodesinrow == 0:
currentRow += 1
remainingnodesinrow = elementsCountAround
node = nodeiterator.next()
return nodeIdentifiers | daa3ea848a2226c378e6f11c6768ca50ed578671 | 38,833 |
import re
def _extract_current_step(current_status_string):
"""
Attempts to extract the current step numeric identifier from the given status string.
Returns the step number or None if none.
"""
# Older format: `Step 12 :`
# Newer format: `Step 4/13 :`
step_increment = re.search(r"Step ([0-9]+)/([0-9]+) :", current_status_string)
if step_increment:
return int(step_increment.group(1))
step_increment = re.search(r"Step ([0-9]+) :", current_status_string)
if step_increment:
return int(step_increment.group(1)) | 2d1ee544c1d719ddbef1c175233d8304296ea33c | 38,834 |
def _result_str_valid(result):
"""Check the format of a result string per the SGF file format.
See http://www.red-bean.com/sgf/ for details.
"""
if result in ['0', 'Draw', 'Void', '?']:
return True
if result.startswith('B') or result.startswith('W'):
score = result[2:]
if score in ['R', 'Resign', 'T', 'Time', 'F', 'Forfeit']:
return True
try:
score = float(score)
return True
except ValueError:
return False
return False | 57fe041359dac8c97064b264881d8e192c687793 | 38,835 |
def make_class_name(pv):
"""
Make a class name based on a given PV.
"""
return '{}'.format(pv.title()) | 90c3820eb68e0afb8ec90c49b94f94129646dc3d | 38,836 |
def get_edges(faces):
"""
根据面得到相应的边
@faces: 模型的所有面
return: 模型的边
"""
edge2key = dict()
edges = []
edges_count = 0
for face_id, face in enumerate(faces):
faces_edges = []
for i in range(3):
cur_edge = (face[i], face[(i + 1) % 3])
faces_edges.append(cur_edge)
for idx, edge in enumerate(faces_edges):
edge = tuple(sorted(list(edge)))
if edge not in edge2key:
edge2key[edge] = edges_count
edges_count += 1
edges.append(list(edge))
return edges | bc12844bd979a54e9d4e5b559a3cc846aad5929e | 38,837 |
def _no_constraint(info_list):
"""
If there is no constraint set, then anything passes.
"""
try:
if len(info_list) == 0:
return True
except TypeError:
# constraint not set (weirdness in DB)
return True
return False | 083c3569ac5d1d48e18708b34cb20d4b586cd86f | 38,841 |
def binary_search_base(nums: list, target: int) -> int:
"""
Time complexi O(logn)
The basic binary search
nums is a sorted list
if multi targets in nums, return one target index
else return -1
"""
if not nums:
return -1
left, right = 0, len(nums) - 1
while left < right:
mid = left + (right - left) // 2
if nums[mid] == target:
return mid
elif nums[mid] < target:
left = mid + 1
elif nums[mid] > target:
right = mid - 1
return -1 | 1af211a911ef0e206fcf0b6e25a8fe0b9cedebf8 | 38,843 |
from pathlib import Path
from typing import Tuple
from typing import List
def python_packages_find(package_path: Path, tracing: str = "") -> Tuple[Path, ...]:
"""Find the various python packages within a ROS2 package."""
# next_tracing: str = tracing + " " if tracing else ""
if tracing:
print(f"{tracing}=>python_packages_find({package_path})")
package_name: str = package_path.name
python_package_paths: List[Path] = []
init_path: Path
for init_path in package_path.glob(f"{package_name}/__init__.py"):
python_package_paths.append(init_path.parent)
python_package_paths.sort()
if tracing:
print(f"{tracing}<=python_packages_find({package_path})=>{python_package_paths}")
return tuple(python_package_paths) | 7a982d8e45a2b140cbd894ad83b856d5dd4811e6 | 38,844 |
def build_array(text):
"""Returns an array of numbers contained in a text file"""
with open(text, 'r') as f:
nums = {int(line.strip()) for line in f}
return nums | 6ef1580ca0b77b3e505274feeef692009e400edf | 38,845 |
def _pd_df_cols_match_metadata_cols(df, table_metadata):
"""
Is the set of columns in the metadata equal to the set of columns in the dataframe?
This check is irrespective of column order, does not check for duplicates.
"""
pd_columns = set(df.columns)
md_columns = set([c["name"] for c in table_metadata["columns"]])
return pd_columns == md_columns | 5c793646dddad977d2ec61908932ac0fac096564 | 38,847 |
def process_config_str(config_str):
"""
Takes potentially multi-line RawConfigParser-returned strings, strips them, and splits them by line.
:param config_str: String parsed in by RawConfigParser
:return: List of strings broken up and trimmed.
"""
if config_str is not None:
return [i.strip() for i in config_str.split("\n") if len(i.strip()) > 0] | 192b29f7f9fe5b2f8401f80ef462491c750c6588 | 38,848 |
import os
import logging
def create_dir(dirname):
"""
dirs - a list of directories to create if these directories are not found
:param dirs:
:return:
"""
try:
if not os.path.exists(dirname):
os.makedirs(dirname)
return True
else:
logging.info("directory exists"+dirname)
return False
except Exception as err:
logging.getLogger("Dirs Creator").info(
"Creating directories error: {0}".format(err))
exit(-1)
return False | 08aaa681f772382e234696be1887d0bc52d0ed32 | 38,849 |
def daysinmonth(year, month):
"""
Return days in month based on the month and year
Parameters
----------
year : str
month : str
Returns
-------
integer of the days in the month
"""
if year%4 == 0:
daysinmonth_dict = {
1:31, 2:29, 3:31, 4:30, 5:31, 6:30, 7:31, 8:31, 9:30, 10:31, 11:30, 12:31}
else:
daysinmonth_dict = {
1:31, 2:28, 3:31, 4:30, 5:31, 6:30, 7:31, 8:31, 9:30, 10:31, 11:30, 12:31}
return daysinmonth_dict[month] | 663e4449bb7273a5a08bc7b7953540f80ac3ad2c | 38,850 |
import os
def _should_wrap_non_coroutines() -> bool:
"""Return ``True`` IFF ``FALCON_ASGI_WRAP_NON_COROUTINES`` is set in the environ.
This should only be used for Falcon's own test suite.
"""
return 'FALCON_ASGI_WRAP_NON_COROUTINES' in os.environ | fb1aecb7fcd029c92f009836ee4758d161d078db | 38,853 |
def get_pokemon_names(cur):
"""Returns a list of pokemon names in the database (as strs) sorted in
alphabetical order
Args:
cur: an open sqlite3 cursor created from a connection to the pokemon db
"""
new_list = []
query = ('SELECT name FROM pokemon')
cur.execute(query)
data = cur.fetchall()
for element in data:
new_list.append(element[0])
return sorted(new_list) | f55fcaaf86072a6f5ec87b40c7d75ac121e41868 | 38,854 |
def truncate_text_by_num_tokens(text, max_tokens, tok_separator=" "):
"""
Truncate a text to left a maximum number of tokens.
:param text:
:param max_tokens:
:param tok_separator:
:return: The truncated text.
"""
_toks = text.split(tok_separator)
return tok_separator.join(_toks[:min(max_tokens, len(_toks))]) | 62cc97f101c37b6452cb5649706bdf6f5f47a68d | 38,855 |
def loop_condition(first_count, second_count, third_count):
"""
Custom function used to break out of while loop
:first_count: number of syllables in first line
:second_count: number of syllables in second line
:third_count: number of syllables in third line
"""
return first_count <= 5 and second_count <= 7 and third_count < 5 | 434e5a1cfe2f9271756139eac10bcb08149f3cbd | 38,856 |
from glob import glob
from os import path
def contain_dicom(folder_path):
"""Check if a folder contains DICOM images.
Args:
folder_path: path to the folder
Returns:
True if DICOM files are found inside the folder, False otherwise
"""
dcm_files = glob(path.join(folder_path, "*.dcm"))
if len(dcm_files) > 0:
return True
return False | 4d346cfef7493b4cb6d276ffeeb7e9bde6e96efc | 38,857 |
import re
def format_submitter_id(node_name, *argv):
"""Format submitter id"""
submitter_id = node_name
for v in argv:
submitter_id = submitter_id + f"_{v}"
submitter_id = submitter_id.lower().replace(", ", "_")
submitter_id = re.sub("[^a-z0-9-_]+", "-", submitter_id)
return submitter_id.strip("-") | c5dd4782d90a340bcedf52d15908019bbba5344b | 38,858 |
import os
def get_mount_point(path):
"""Get path mount point."""
if not os.path.islink(path):
path = os.path.abspath(path)
elif os.path.islink(path) and os.path.lexists(os.readlink(path)):
path = os.path.realpath(path)
while not os.path.ismount(path):
path = os.path.dirname(path)
if os.path.islink(path) and os.path.lexists(os.readlink(path)):
path = os.path.realpath(path)
return path | 5a82dbdb7d5980810d8841fdda166909d4d64862 | 38,859 |
def form_line(ltokens=[], rtokens=[]):
"""Form line."""
if not isinstance(ltokens, (list, tuple)):
return str(ltokens) + ';'
ltext = ' '.join(map(str, ltokens))
rtext = ' '.join(map(str, rtokens))
if len(rtokens) > 0:
return '{ltext} = {rtext};'.format(ltext=ltext, rtext=rtext)
else:
return '{ltext};'.format(ltext=ltext) | 53f0f09a44799a425df876c1d85318725aa5a794 | 38,860 |
from typing import Dict
import types
import pkgutil
import importlib
def import_submodules(package: str) -> Dict[str, types.ModuleType]:
"""Recursively import all submodules of a package"""
results = {}
for _, name, is_pkg in pkgutil.walk_packages((package,)):
full_name = package + '.' + name
results[full_name] = importlib.import_module(full_name)
if is_pkg:
results.update(import_submodules(full_name))
return results | b0aa688050f4385a76eca86b1361cae187229625 | 38,861 |
def transform(order_data_dict: dict):
"""
#### Transform task
A simple Transform task which takes in the collection of order data and
computes the total order value.
"""
total_order_value = 0
for value in order_data_dict.values():
total_order_value += value
return {"total_order_value": total_order_value} | 2f6b7b96a2ca5285cb668aad2333395ee3f9c474 | 38,862 |
import json
def list_liveNode_to_json(nodes,is_pretty=False):
"""Print a list of LiveNode in JSON format
Arguments:
nodes {LiveNode} -- List of LiveNode
Keyword Arguments:
is_pretty {Boolean} -- True to have pretty JSON (default: {False})
Returns:
JSON -- JSON format
"""
if (is_pretty== True):
return json.dumps([node.__dict__ for node in nodes],indent=4, sort_keys=True)
else:
return json.dumps([node.__dict__ for node in nodes]) | 186cd107a456df7316730991193fcacbe95abb01 | 38,864 |
import os
def get_var(name, default=""):
"""
Gets the text of an environment variable, or default if it doesn't exist.
"""
if name in os.environ:
return os.environ[name]
return default | 22c2e555380d2c5f052d249f666c76c05ccc6b7c | 38,867 |
import numpy
def l2(pData1, pData2):
"""
Returns the l2 distance
>>> x = numpy.array([1,2,3]); y = numpy.array([4,5,6])
>>> l2(x,y)
5.196152422706632
"""
return numpy.linalg.norm(pData1 - pData2) | f6ac962798b179b328117251485fb0ff68a188ad | 38,868 |
def stringquote(text):
"""escapes quotes as neccessary and returns a string representing
the text"""
if "'" in text:
if '"' in text:
return '"' + text.replace('"', '""') + '"'
else:
return '"' + text + '"'
else:
return "'" + text + "'" | 81a88b41cf85ec8927d3f51c4d6448fc3b13366c | 38,869 |
def check_ports(port_dictionary):
"""
This will run through the ports that are supposedly open and see if 22 is listed. If so, it will return the device type if it can be determined
or False if not open
Args:
port_dictionary (dict) : dictionary of either one port or multiple ports of format {<port_number>:<header>}
Return
False : if 22 or SSH is not in the list
string : device type or unknown from header information
int : port number that ssh is running on
"""
if not isinstance(port_dictionary, dict):
raise TypeError(
f"Port dictionary passed was of type {type(port_dictionary).__name__}. It needs to be a dictionary"
)
for key, value in port_dictionary.items():
if not isinstance(key, str):
try:
key = str(key)
except Exception as ex:
print(
f"Port could not be made into a string. It was a type of {type(key).__name__}"
)
print(ex)
raise
if not isinstance(value, dict):
raise TypeError(
f"The Port value was not a dict. It was a type of {type(value).__name__}"
)
if key == "22":
for key, value in port_dictionary[key].items():
if key == "ERROR":
continue
if "Cisco" in value:
return (22, "Cisco")
# This can be expanded as more and more are learned
elif "Ubuntu" in value:
return (22, "Linux")
else:
return (22, "Other")
for value in port_dictionary[key].values():
if "SSH" in value:
return (int(key), "Other")
return (False, False) | 57527bc72827903a5583d481b8e420f3a388aa1d | 38,872 |
def div(dividend, divisor):
"""
Takes two ints and returns a tuple (quotient, remainder)
"""
quotient = 0
while dividend - divisor >= 0:
dividend -= divisor
quotient += 1
return (quotient, dividend) | 78ba5951df412e0599d834ebc2a71adf0be9370f | 38,873 |
def make_rev_adict(adict):
"""
An adict maps text answers to neuron indices. A reverse adict maps neuron
indices to text answers.
"""
rev_adict = {}
for k,v in adict.items():
rev_adict[v] = k
return rev_adict | 827a288a6937d482d7d54a4509cd85ace5ee074e | 38,874 |
def pluralize(number, singular, plural=None):
"""Helper function for getting the appropriate singular or plural
variant of a word or phrase.
pluralize(0, 'awoo') #=> "0 awoos"
pluralize(1, 'awoo') #=> "1 awoo"
pluralize(2, 'awoo') #=> "2 awoos"
pluralize(1, 'box', 'boxen') #=> "1 box"
pluralize(2, 'box', 'boxen') #=> "2 boxen"
"""
if plural is None:
plural = singular + 's'
if number == 1:
ret = singular
else:
ret = plural
return '{} {}'.format(number, ret) | da7bbe4864d38717811cc468bbda4cc076c23cf1 | 38,877 |
def my_get_sodar_info(_self):
"""Method is used to patch cubi_tk.snappy.itransfer_common.SnappyItransferCommandBase.get_sodar_info"""
return "466ab946-ce6a-4c78-9981-19b79e7bbe86", "/irods/dest" | b2bf796f998675fb6e688c69997f24e4e6401be0 | 38,879 |
from typing import Iterable
def field_lookup(obj, field_path):
"""
Lookup django model field in similar way of django query lookup.
Args:
obj (instance): Django Model instance
field_path (str): '__' separated field path
Example:
>>> from django.db import model
>>> from django.contrib.auth.models import User
>>> class Article(models.Model):
>>> title = models.CharField('title', max_length=200)
>>> author = models.ForeignKey(User, null=True,
>>> related_name='permission_test_articles_author')
>>> editors = models.ManyToManyField(User,
>>> related_name='permission_test_articles_editors')
>>> user = User.objects.create_user('test_user', 'password')
>>> article = Article.objects.create(title='test_article',
... author=user)
>>> article.editors.add(user)
>>> assert 'test_article' == field_lookup(article, 'title')
>>> assert 'test_user' == field_lookup(article, 'user__username')
>>> assert ['test_user'] == list(field_lookup(article,
... 'editors__username'))
"""
if hasattr(obj, 'iterator'):
return (field_lookup(x, field_path) for x in obj.iterator())
elif isinstance(obj, Iterable):
return (field_lookup(x, field_path) for x in iter(obj))
# split the path
field_path = field_path.split('__', 1)
if len(field_path) == 1:
return getattr(obj, field_path[0], None)
return field_lookup(field_lookup(obj, field_path[0]), field_path[1]) | 38bdf5efa75fc9f8273d1a40f719a700b86aa026 | 38,880 |
import time
def time_spent_from(start_time: float) -> float:
"""Calculate time spent from start_time to now
Example:
>>> start_time = time.time()
>>> ...
>>> time_spent = time_spent_from(start_time)
:param start_time: time in seconds since the epoch
:return: time spent from start_time to now
"""
return time.time() - start_time | e96c9cc6c0ae33f6f577e49ee5c6fb95dae682e6 | 38,881 |
import re
def reduce_lengthening(text):
"""
This helper function takes any word with more than 2 repetitions of the same
char (yaaaay for instance, has 4 a repeated), and returns the word with at most
2 repetitions.
Can be useful to make similar words fall together:
aaaaaah and aaah will both become aah for instance
This comes from the fact that no english word has more than 2 characters repeated
together.
ccciiiiiioooo will be cciioo (all occurrences of repeated chars will be truncated to 2)
Code is taken from https://rustyonrampage.github.io/text-mining/2017/11/28/spelling-correction-with-python-and-nltk.html
"""
pattern = re.compile(r"(.)\1{2,}")
return pattern.sub(r"\1\1", text) | d67a323ef8b4ca18ea27452513bcb99f675ef25a | 38,882 |
def get_combat_skills(lower: bool=False):
""" Returns a list of the skills that contribute to combat level in no particular order.
Args:
lower: If the skills should be lowercase or titlecase. """
skills = [
"Attack",
"Strength",
"Defence",
"Hitpoints",
"Ranged",
"Magic",
"Prayer"
]
return [s.lower() for s in skills] if lower else skills | e7da123590719f50a80bbd503901931b1a5f1dc7 | 38,883 |
def safe_characters(filename):
"""
Converts special characters e.g. from YouTube Video titles to 'safe' filenames (initially Windows 10)
"""
characters = {"|": "_", '"': "'", ":": " -", "@": "", "#": "", "?": ""}
for old, new in characters.items():
filename = filename.replace(old, new)
return filename | 4a90e3d2eb7f5375a7c23aaff0f9a34f06208e2c | 38,884 |
def parse_did(did):
"""Parses a Rucio DID and returns a tuple of (number:int, dtype:str, hash: str)"""
scope, name = did.split(':')
number = int(scope.split('_')[1])
dtype, hsh = name.split('-')
return number, dtype, hsh | fc8ffd99743682895b4a7dc10cbbe911a0c6ca61 | 38,885 |
def compose_subject(raw_subject: str, tag: str = "", category: str = "") -> str:
"""Compose a subject containing a tag and a category.
If any of tag or category is missing, don't print the
corresponding part (without whitespace issues).
:param raw_subject: The original subject
:param tag:
:param category:
:returns: The subject. Form: "[{tag}] {category}: {raw_subject}"
"""
subject = ""
if tag:
subject += f"[{tag}] "
if category:
subject += f"{category}: "
subject += raw_subject
return subject | 69fc7bd6bc5a4c6568f1b9a2642c7417d18f97b9 | 38,886 |
from datetime import datetime
import click
def validate_optional_timestamp(ctx, param, value):
"""Ensure that a valid value for a timestamp is used."""
if value:
try:
return datetime.strptime(value, "%Y-%m-%dT%H:%M:%SZ").replace(
hour=0, minute=0, second=0
)
except ValueError:
raise click.BadParameter(
"{} must be a valid utc timestamp formatted as `%Y-%m-%dT%H:%M:%SZ` "
"e.g. `2020-12-31T00:00:00Z`".format(param.name),
param=param,
)
return value | ace2b46b6b36d078b450f264164b73f88ebc2537 | 38,887 |
def _star_wrapper(arg):
"""
Internal helper function used to allow multiple arguments for functions
called by `mp_starmap`.
"""
(func, args) = arg
return func(*args) | e7f8dbd1cb50c35648b7cd9939d61024d7f0ff90 | 38,888 |
def track_variable(name, line_used, smali_file):
"""
Tries to identify the last instance a variable was set on the file before
the line where it was used
:param str name: the name of the variable (e.g. p1, v2, etc.)
:param int line_used: the line where the variable is being used
:param str smali_file: path to the file to look for the variable
:return: a list with lines where the variable has been set or empty list
"""
# prepare vraiables and read content
older_lines =[]
track_invoke = False
name = name.lower()
line_used = int(line_used)
with open(smali_file, "r") as fp:
smali = fp.read()
# go through the lines in reverse looking for initialization
for i, line in enumerate(reversed(smali.split("\n")[:line_used])):
if name in line and ("new-" in line or "const" in line):
return older_lines + [{
"line": line_used - i,
"details": line.strip()
}]
elif "move-object" in line and name in line:
older_lines += [{
"line": line_used - i,
"details": line.strip()
}]
name = line.rsplit(",", 1)[-1].strip()
elif track_invoke and "invoke" in line:
return older_lines + [{
"line": line_used - i,
"details": line.strip()
}]
# if "move-result" then need to check last invoke
elif "move-result" in line and name in line:
older_lines += [{
"line": line_used - i,
"details": line.strip()
}]
track_invoke = True
# reached method header
elif ".method " in line:
# if variable is pX and no other evidence was found then it has
# been passed over on method call so just return method header
if name.startswith("p"):
return older_lines + [{
"line": line_used - i,
"details": line.strip()
}]
# if not variable pX then nothing was found
else:
return []
return [] | 19451be4853cc43451bf32ca3bc16be6cdce5e37 | 38,890 |
def _transform_str_numeric_read_negocios_realizados(row):
"""Transformation fucntion from string to numeric for rows of table net_per_stock.
Args:
row (list): List of strings
Returns:
list: List of strings and numeric.
"""
row[8] = int(float(row[8]))
row[9] = float(row[9])
row[10] = float(row[10])
row[13] = float(row[13])
return row | a298ddb509908cac73d4477f3af492378ef1a3af | 38,891 |
import os
def validate_image_size(fname):
"""ensure that the image size is smaller than 5mb"""
size = os.path.getsize(fname)
if size > 5242880:
return "invalid"
return "valid" | 587eb0eeef92650cfe3f97368c11237bcf212d34 | 38,894 |
import operator
def dfSubset(data, where):
"""
Return a subset of the data given a series of conditions
.. versionadded:: 0.1.9
Parameters
----------
data: :py:class:`pandas.DataFrame`:
DataFrame to view
where: str or list or tuple
Conditions to apply.
Notes
-----
If the argument is a string, it will be converted
to a tuple for iteration. Items in iterable can be either a string
or three-valued iterable of the following form::
string: 'column operand target'
iterable: ('column', 'operand', 'target')
If the first-level item is a string, it will be split at spaces.
Operands are string-representations of operators from the operator module,
e.g.::
'eq', 'ge', 'le', 'ne', 'gt', 'lt', 'contains'
Returns
-------
view: :py:class:`pandas.DataFrame`:
View into the data frame after successive slices
See Also
--------
:py:mod:`operator`
"""
view = data
if isinstance(where, str):
where = where,
for item in where:
if isinstance(item, str):
cond = item.split()
else:
cond = item
assert len(cond) == 3, ('Conditions should have three arguments, '
'not like {}'.format(item))
evalFunc = getattr(operator, cond[1])
view = view[evalFunc(view[cond[0]], cond[2])]
return view | 01ae55694f89a314ef00796017cb76d393ace90f | 38,895 |
def _match_suffix(suffix, ffilter):
"""Return file type (textual description) for a given suffix.
Parameters
----------
suffix : str
File extension to check (must include the leading dot).
ffilter : dict
ffilter : dict
Known file types. The keys contain descriptions (names), whereas the
values contain the corresponding file extension(s).
Returns
-------
ftype : str | None
File type (None if unknown file type).
"""
for ftype, ext in ffilter.items():
if suffix in ext:
return ftype | 7fd59ba819ac0f151d5b8156b69282f97f8601a2 | 38,896 |
from typing import Iterable
from typing import Any
from typing import Tuple
def grouped(iterable: Iterable[Any], n: int) -> Iterable[Tuple[Any, ...]]:
""" Create groups of tuples comprising 'n' values from the given iter.
s -> (s0, s1, s2, ..., s(n-1)),
(sn, s(n+1), s(n+2), ..., s(2n-1))...
"""
return zip(*[iter(iterable)] * n) | 6f45ff7151ef2501dbd0da6da6d3c6876b0ea7dc | 38,899 |
def compare_hashes(target_hash, file_hash):
""" Will compare the hash of two files. """
return target_hash == file_hash | 6c197bca4fdb3d5a38e9b16848ee5010ddf7958c | 38,900 |
def unwrap(value):
""" Unwrap a quoted string """
return value[1:-1] | 4852eab462a8961deb1d29b0eb4359ab12c00378 | 38,901 |
import argparse
def bool_from_string(raw_value):
"""Parse a boolean command line argument value"""
if raw_value == 'True':
return True
elif raw_value == 'False':
return False
else:
raise argparse.ArgumentTypeError(
'Value must be True or False, got %r instead.' % raw_value) | b04abb48b851a22b44cc9aa486afcab2c5b8bccd | 38,902 |
def expand(data):
"""
Function to expand dot separated match variable names
to nested dictionary structure
"""
# do sanity check on data
if not isinstance(data, dict):
return data, True
ret = {}
# expand match variable names to dictionary
for key, value in data.items():
ref = ret
keys = key.split(".")
while True:
new_key = keys.pop(0)
# handle last item in keys
if not keys:
if isinstance(ref, dict):
ref[new_key] = value
break
# expand dictionary tree
ref = ref.setdefault(new_key, {})
del data
return ret, True | 60b9e77c1e2ac37132f013c9f482c6b8ffe66bd9 | 38,904 |
def update_from_one(row):
"""
Function to update rows that need updated
from agreement to single_coordinator
Parameter
---------
row: Series
Returns
-------
updated: Boolean or other
Examples
--------
>>> import pandas as pd
>>> row = pd.Series(
... {
... "one_coordinator": True,
... "both_coordinators": False,
... "needs_updated": True
... }
... )
>>> update_from_one(row)
True
>>> import pandas as pd
>>> row = pd.Series(
... {
... "one_coordinator": True,
... "both_coordinators": False,
... "needs_updated": False
... }
... )
>>> update_from_one(row)
False
>>> import pandas as pd
>>> row = pd.Series(
... {
... "one_coordinator": False,
... "both_coordinators": False,
... "needs_updated": True
... }
... )
>>> update_from_one(row)
False
"""
try:
return(
row.one_coordinator if row.needs_updated \
else row.both_coordinators
)
except:
print("except")
print(row) | 76c6a92acce5f30e682e2a9cf3a4cc75181aeed3 | 38,905 |
def is_board_full(board: list) -> bool:
"""return true if board is full else false"""
return all([i != " " for i in board]) | 920d1693f34162fd00636f48c4ef7cfcd17c9d8c | 38,906 |
def _is_tcp_synack(tcp_flags):
"""
Passed a TCP flags object (hex) and return 1 if it
contains TCP SYN + ACK flags and no other flags
"""
if tcp_flags == 0x12:
return 1
else:
return 0 | 7537f12321bb52667118fdf4ce75985c59e6bead | 38,907 |
import re
def parse_swift_recon(recon_out):
"""Parse swift-recon output into list of lists grouped by the content of
the delimited blocks.
Args:
recon_out (str): CLI output from the `swift-recon` command.
Returns:
list: List of lists grouped by the content of the delimited blocks
Example output from `swift-recon --md5` to be parsed:
============================================================================
--> Starting reconnaissance on 3 hosts (object)
============================================================================
[2018-07-19 15:36:40] Checking ring md5sums
3/3 hosts matched, 0 error[s] while checking hosts.
============================================================================
[2018-07-19 15:36:40] Checking swift.conf md5sum
3/3 hosts matched, 0 error[s] while checking hosts.
============================================================================
"""
lines = recon_out.splitlines()
delimiter_regex = re.compile(r'^={79}')
collection = []
delimiter_positions = [ind for ind, x in enumerate(lines)
if delimiter_regex.match(x)]
for ind, delimiter_position in enumerate(delimiter_positions):
if ind != len(delimiter_positions) - 1: # Are in the last position?
start = delimiter_position + 1
end = delimiter_positions[ind + 1]
collection.append(lines[start:end])
return collection | 6fd92e6dffab521edd3dfe414ca1902d4f1859fd | 38,908 |
import random
def slice_df(df, num_devices, date_start, date_end):
"""
num_devices = 1000
date_start = 2014-09-30 00:00:00
date_end = 2014-12-25 20:45:00
"""
df2 = df[ (df['end_time'] >= date_start) & (df['end_time'] <= date_end) ]
devices = df2['Device_number'].unique()
sliced_devices = random.sample(devices, num_devices)
df3 = df2[ df2['Device_number'].isin(sliced_devices) ]
return df3 | 757b5139c5cecaf5cfbbfcd6fc89e0fbac7e5b5d | 38,910 |
def sam2fastq(line):
"""
print fastq from sam
"""
fastq = []
fastq.append('@%s' % line[0])
fastq.append(line[9])
fastq.append('+%s' % line[0])
fastq.append(line[10])
return fastq | 3bc2fefea2594bd3130abe97db668324daa79adc | 38,911 |
def get_emissions_probability(label_matches, given_tag, given_word, tag_counts):
"""Calculates the emissions probability of associating a given
tag and word by the following formula:
emissions_probability = (count of word associated with tag) / (count of tag)
"""
lookup_tuple = (given_word, given_tag)
word_tag_frequency = label_matches.get(lookup_tuple, 0)
tag_frequency = tag_counts[given_tag]
if tag_frequency == 0:
emissions_probability = 0
else:
emissions_probability = float(word_tag_frequency)/float(tag_frequency)
return emissions_probability | cdf9ba0a1645c9bc38d7bcde11096326e001ffb7 | 38,912 |
def validate_password(password):
"""
ensure that any input password is at least length 6, has at least one upper case,
at least one lower case, and at least one special character
:param password: the password of the user
:return: returns True if the password is valid, False otherwise
"""
special_chars = "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~"
has_uppercase = any(char.isupper() for char in password)
has_lower = any(char.islower() for char in password)
has_special = any(special_char in special_chars for special_char in password)
if len(password) >= 6 and has_lower and has_uppercase and has_special:
return True
return False | e8ba6946e617ac224b9733a2f7ff6dc7256bdb86 | 38,913 |
import collections
def get_recommendations_item_filtred(items, similarity_matrix, x, n=5):
"""
Returns recommendations for x from items, based on items similar to user's items
"""
similarities_sum = collections.defaultdict(int)
sum_prod_sim_score = collections.defaultdict(int)
for item, score in items[x].items(): # loop over item from x
for (sim, sim_item) in similarity_matrix[item]: # loop over similar items to item
if sim_item in items[x]: continue
# Weighted sum of scores times similarity
similarities_sum[sim_item] += sim
sum_prod_sim_score[sim_item] += sim * score
# Divide each total score by total weighting to get an average
rankings = [(score / similarities_sum[item], item) for item, score in sum_prod_sim_score.items()]
rankings.sort()
rankings.reverse()
return rankings[:n] | 6f9175ea293f10c2d3c3a95ad0f3c458c291d8e5 | 38,915 |
from typing import ByteString
from unittest.mock import Mock
def create_mock_process(
returncode: int = 0,
stdout: ByteString = b'success',
stderr: ByteString = b'',
):
"""
Create a mock Popen process for later inspection.
Defaults to a successfully ran process.
:param returncode: what the process will return
:param stdout: what the process will write to STDOUT
:param stderr: what the process will write to STDERR
:return: the mock
"""
mock = Mock()
mock.poll = returncode
mock.wait = lambda: None
mock.kill = lambda: None
mock.__enter__ = Mock(return_value=mock)
mock.__exit__ = Mock()
mock.returncode = returncode
mock.communicate = Mock(return_value=(stdout, stderr))
return mock | b310ce45eca976d399ea91dbaa40c4d1a51c5cd3 | 38,916 |
from typing import OrderedDict
def deepmerge(original, new):
"""
Deep merge two dictionaries (new into original) and return a new dict
"""
merged = OrderedDict(original)
for key, val in new.items():
if key in original and isinstance(original[key], dict) and isinstance(val, dict):
merged[key] = deepmerge(original[key], val)
else:
merged[key] = val
return merged | f9b3cf7313f003fb8913c4cbb573eb9743fd6ffe | 38,917 |
import random
def random_num():
"""
flask 只能返回字符串数据
:return:
"""
return '%s' % random.random() | 9e91c4794f89583a56fccbbb0bd21f363dadd54b | 38,919 |
def filter_keys(keys):
"""This api is used to take a string or a list of one string and split it
appropriately to get the list of keys to look for while filtering a
dictionary. It is first split on basis of semicolon(;) which acts like a
separator of scenarios given. Next it's split on comma which is the list
of keys that the scenario mean.
"""
if keys is None:
filtered_keys = [[]]
else:
if isinstance(keys, str):
keys = [keys]
filtered_list = keys[0].split(";")
filtered_keys = [val.split(",") for val in filtered_list]
return filtered_keys | f51b783bd5e7eb054e28d0bb6f3f1689c9d61dc7 | 38,920 |
def readOrder(filename):
"""Read an elimination order from a file
Elimination orders are stored as unknown length vectors, format "[nvar] [v0] [v1] ... [vn]"
Note: the same file format may also be useful for MPE configurations, etc.
"""
with open(filename,'r') as fp:
lines = fp.readlines();
text = lines[-1].strip('\n').split(' ');
nvar = int(text[0]);
vals = [int(text[i]) for i in range(1,nvar+1)];
if len(vals) != nvar: raise ValueError("Problem with file?");
return vals | 49af53bcabb134a2717c339f8f51bbdfbfba328c | 38,921 |
def lower_world():
"""Convert world to lower"""
return {'input':'HELLO',
'output': 'hello'} | b7c9aec7756c3b0cb975be2f8d7163e989ddf589 | 38,922 |
import numpy
def fill_with_zeros(evecs, from_res, to_res, width):
"""
Pads an eigenvector array with 0s .
from_res starts from 1
"""
left_padding = [0.]*((from_res-1)*3)
right_padding = [0.]*((width-to_res)*3)
new_evecs = []
for evec in evecs:
new_evec = []
new_evec.extend(left_padding)
new_evec.extend(evec)
new_evec.extend(right_padding)
new_evecs.append(new_evec)
return numpy.array(new_evecs) | 5a8ef0d8a0d1fbc680c010e5fb4db0722af49c16 | 38,923 |
def verifyIDcardValid(ID):
"""verifyIDcardValid(ID)
Check ID card whethear it valid
The checksum is 7, 9, 10, 5, 8, 4, 2, 1, 6, 3, 7, 9, 10, 5, 8, 4, 2
Using the first 17 number to multiply the corresponding checksum and
add them all together, then module it to 11 to get the remainder, the
remainders are 1 throught 10, using the remainder as the list position
to retrive the value. Finally, using the value to match the 18th number
with it, if it matches, then the ID is valide, or not.
"""
if len(ID.strip()) != 18:
return "NOT A ID"
else:
ID_check = ID[17]
W = [7, 9, 10, 5, 8, 4, 2, 1, 6, 3, 7, 9, 10, 5, 8, 4, 2]
ID_CHECK = ['1', '0', 'X', '9', '8', '7', '6', '5', '4', '3', '2']
ID_aXw = 0
for i in range(len(W)):
ID_aXw = ID_aXw + int(ID[i]) * W[i]
ID_Check = ID_aXw % 11
if ID_check != ID_CHECK[ID_Check]:
return 'INVALID'
else:
return ID | 7c6b2cb4f45a51ac64819cf2cbe9cb8cf0554422 | 38,924 |
from typing import Union
import sys
def validate_smoothing_window(arg_value: Union[str, int]) -> int:
"""
Validates the smoothing window for the trendline. Allowed values are 0 or integers >=2.
:param arg_value: The input value
:return: The input parsed to an int if it's valid
"""
try:
parsed = int(arg_value)
if parsed < 2 and parsed != 0:
print("Smoothing window must be greater than 1 or 0 for no smoothing.")
sys.exit(2)
except ValueError:
print("Smoothing window must be an integer. {} is not.".format(arg_value))
sys.exit(2)
return parsed | e93cd340f77a101b55916f0d45197f52eaa7dfd9 | 38,925 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.