content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import string import os def resolve_env(s): """Resolve all environment variables in target string or :class:`~pathlib.Path`. This command always uses the bash syntax ``$VARIABLE`` or ``${VARIABLE}``. This also applies in Windows. Windows native syntax ``%VARIABLE%`` is not supported. Unlike in :func:`os.path.expandvars`, undefined variables raise an exception instead of being silently replaced by an empty string. :param s: string or :class:`~pathlib.Path` potentially containing environment variables :returns: resolved string, or :class:`~pathlib.Path` if the input is a :class:`~pathlib.Path` :raise EnvironmentError: in case of missing environment variable """ try: return type(s)(string.Template(str(s)).substitute(os.environ)) except KeyError as e: raise EnvironmentError(f"Environment variable {e} not found")
c362bf09ef644f5a62fcd62d219020c941db3768
20,597
from pathlib import Path from typing import Set from typing import IO from typing import List def python_imports_get(python_path: Path, tracing: str = "") -> Set[str]: """Scan the python files searching for imported files to keep track of.""" # next_tracing: str = tracing + " " if tracing else "" if tracing: print(f"{tracing}=>python_imports_get({python_path})") imports_set: Set[str] = set() python_file: IO[str] with open(python_path) as python_file: python_lines: List[str] = python_file.read().split("\n") python_line: str index: int for python_line in python_lines: if python_line.startswith("import "): imports_set.add(python_line[7:]) elif python_line.startswith("from "): dot_index: int = python_line.find('.') space_index: int = python_line.find(' ') if dot_index >= 0: imports_set.add(python_line[5:dot_index]) elif space_index >= 0: imports_set.add(python_line[5:space_index]) # This is kludge for now. What we want to do is to restrict the imports to ROS2 packages. imports_set -= {"os", "sys"} if tracing: print(f"{tracing}<=python_imports_get({python_path})=>{imports_set}") return imports_set
19446ac264e47699e9e9cfe8ade279f628a1f96d
20,598
def get_koji_build_info(build_id, session, config): """ Returns build information from koji based on build id. :param dict build_id: build id of a build in koji. :param koji.ClientSession session: koji connection session object :return: build information. :rtype: dict """ print("Retriewing build metadata from: ", config.koji_host) build = session.getBuild(build_id) if not build: raise Exception("Build with id '{id}' has not been found.".format(id=build_id)) print("Build with the ID", build_id, "found.") return build
4ffc925ca3ec6ede46d55f7a98f129683d2b4980
20,599
import numpy def dreger(prop, depth): """ SoCal model of Dreger and Helmberger (1991). prop: 'rho', 'vp', or 'vs'. depth: Array of depth values in meters. Returns array of properties (kg/m^3 or m/s) """ m = { 'z': (5.5, 5.5, 16.0, 16.0, 35.0, 35.0), 'rho': (2.4, 2.67, 2.67, 2.8, 2.8, 3.0), 'vp': (5.5, 6.3, 6.3, 6.7, 6.7, 7.8), 'vs': (3.18, 3.64, 3.64, 3.87, 3.87, 4.5), } depth = numpy.asarray(depth) z = 1000.0 * numpy.array(m['z']) f = 1000.0 * numpy.array(m[prop]) f = numpy.interp(depth, z, f) return f
f6fb72d582dbd762b6d5d1c273a95f677da31b99
20,600
def find_combos_internal_cache(adapters, position, cache): """Part 2 - recursion, using dynamic programming (cache/memoization) - wrote this afterwards""" # successful combo - we made it to the end! if position == len(adapters) - 1: return 1, cache # if the value is in the cache, grab it elif position in cache: return cache[position], cache # if it's not in the cache, do the work else: answer = 0 for new_position in range(position + 1, len(adapters)): if adapters[new_position] - adapters[position] <= 3: this_answer, cache = find_combos_internal_cache(adapters, new_position, cache) answer += this_answer cache[position] = answer return answer, cache
b44c79acb23ee1f0ed6d9bc584f4126327595ee8
20,601
def filter_rename_table(table, parsers): """ 过滤重命名的表格 :param table: :param parsers: :return: """ for p in parsers: if p.new_table == table and p.new_table != p.old_table: return p.old_table return table
8e594165e772be35fc88dc7e570d1685740d84d7
20,602
def dict_from_list(keyfunc, l): """ Generate a dictionary from a list where the keys for each element are generated based off of keyfunc. """ result = dict() for item in l: result[keyfunc(item)] = item return result
a676eb6cefaf99cbb6dd8d0aa61f05c31f2a2382
20,604
import os import yaml def parse_config(filename, env_vars_prefix='bot'): """ Load a yaml configuration file and resolve any environment variables. """ if not os.path.isfile(filename): raise ValueError('Invalid filename: %s' % filename) config = None with open(filename) as data: config = yaml.load(data, yaml.loader.SafeLoader) def update_config_with_env_vars(config, config_name, env_var_separator='_'): if config == None: return config if type(config) == dict: for node in config: config[node] = update_config_with_env_vars( config[node], '{}.{}'.format(config_name, node)) return config env_var_name = config_name.upper().replace('.', env_var_separator) return os.environ.get(env_var_name) or config return update_config_with_env_vars(config, env_vars_prefix)
4762287b7727543c9240945fd4ccc3ea65c4a259
20,605
import re def add_item_offset(token, sentence): """Get the start and end offset of a token in a sentence""" s_pattern = re.compile(re.escape(token), re.I) token_offset_list = [] for m in s_pattern.finditer(sentence): token_offset_list.append((m.group(), m.start(), m.end())) return token_offset_list
45c387674c84cb6ba7559acc98b69e6789040f50
20,606
import torch def get_lastlayers_model_weights(mdl, is_mask_class_specific = False, is_fc_lastlayer = False): """ Returns the weights at the head of the ROI predictor -> classification and bounding box regressor weights + bias :returns cls_weights, bbox_pred_weights, bbox_pred_bias :rtype: tuple """ if isinstance(mdl, str): mdl = torch.load(mdl)['model'] elif isinstance(mdl, dict): if 'model' in mdl: mdl = mdl['model'] else: raise TypeError('Expected dict or str') cls_weights = mdl['roi_heads.box_predictor.cls_score.weight'] bbox_pred_weights = mdl['roi_heads.box_predictor.bbox_pred.weight'] bbox_pred_bias = mdl['roi_heads.box_predictor.bbox_pred.bias'] return cls_weights, bbox_pred_weights, bbox_pred_bias
fe662b448dc514113a8692cfb06417c99ab77244
20,607
import subprocess def types_valid_global_rules(file_name, ignorelisted): # type: (str, bool) -> bool """ Run Mypy check with global rules on the given file, return TRUE if Mypy check passes """ output = subprocess.DEVNULL if ignorelisted else None mypy_exit_code = subprocess.call('mypy {}'.format(file_name), shell=True, stdout=output) return not bool(mypy_exit_code)
9b36f8804dcd08b2d5c554a4744a6e459785bec0
20,608
from datetime import datetime def remove_outdated_incident_ids(found_incidents_ids, latest_incident_time_str): """ To avoid a continuously growing context size, we must delete outdated incident IDs. To do that, we delete any ID that dates before the start time of the current fetch. """ new_found_ids = {} latest_incident_time = datetime.strptime(latest_incident_time_str, '%Y-%m-%d %H:%M:%S.%f') for incident_id, date_str in found_incidents_ids.items(): incident_time = datetime.strptime(date_str, '%Y-%m-%d %H:%M:%S.%f') if incident_time >= latest_incident_time: new_found_ids[incident_id] = date_str return new_found_ids
a8163447163cf1b418d859f2b168959f76fcbdec
20,609
def custom_rounder(input_number, p): """ Return round of the input number respected to the digit. :param input_number: number that should be round :type input_number: float :param p: 10 powered by number of digits that wanted to be rounded to :type p: int :return: rounded number in float """ return int(input_number * p + 0.5) / p
01cc63849c180024f83bb530aa0dfd69cbfc1495
20,610
def get_type(string_: str): """ Find type into which provided string should be casted. :param string_: str -- single string to reformat :return: float, int, bool or str """ if "." in string_ and string_.replace(".", "").isdigit(): return float elif string_.replace("-", "").isdigit(): return int elif string_ in ("True", "False"): return bool else: return str
1ca3de231a973488a77489b938eeaddb9531de1e
20,612
def checkPassing(listCars, passingPoints, listLanes, l, wEnd, k,toTransfer): """ Start the passing phase when needed listCars : List of cars passingPoints : List of passing points listLanes : List of lanes l : Current car wEnd : End of the passing path k : kth passing line toTransfer : Indicates changes between lines""" close=listCars.get_listCars()[l].get_curr_position() closeX=abs(passingPoints[0][0]-close[0]) closeY=abs(passingPoints[0][1]-close[1]) testReturn=False # Check if the car want to pass, to start the process if(listCars.get_listCars()[l].wPassingStart()==True and \ (closeX<1 and closeY<1)): pathX, pathY = listLanes.get_path_passing(toTransfer, k) listCars.get_listCars()[l].path(pathX, pathY) listCars.get_listCars()[l].x=0 listCars.get_listCars()[l].y=0 listCars.get_listCars()[l].lane=3+toTransfer listCars.get_listCars()[l].setwPassingStart(False) listCars.get_listCars()[l].set_isPassing(True) listCars.get_listSpeed()[l]=listCars.get_listWantSpeed()[l] listCars.get_listCars()[l].update_pos() closeX=abs(passingPoints[1][0]-close[0]) closeY=abs(passingPoints[1][1]-close[1]) # Once the car choose the passing line, change path if(listCars.get_listCars()[l].wPassingStart()==False and \ (closeX<1 and closeY<1) and \ listCars.get_listCars()[l].get_isPassing()==True): pathX, pathY = listLanes.get_path_lane(1+toTransfer) listCars.get_listCars()[l].path(pathX, pathY) listCars.get_listCars()[l].x=wEnd listCars.get_listCars()[l].y=0 listCars.get_listCars()[l].lane=1+toTransfer listCars.get_listCars()[l].set_isPassing(False) listCars.get_listCars()[l].setwPassingStart(True) listCars.get_listCars()[l].update_pos() listCars.get_listCars()[l].set_goBack(True) listCars.get_listCars()[l].set_timeUp() return listCars, listLanes
f3d5beb774f3e1eb83a0b10cb30f9d47c3c5ded7
20,613
def fixture_ext_markdown(plugin): """Return a Markdown instance with MkdocstringsExtension. Parameters: plugin: A configurated plugin instance. (fixture). Returns: The plugin Markdown instance. """ return plugin.md
383fdd433990c417b5af39a332148744b25840aa
20,615
import torch import math def elastic_deformation(img: torch.Tensor, sample_mode: str = "bilinear", alpha: int = 50, sigma: int = 12) -> torch.Tensor: """ Performs random elastic deformation to the given Tensor image :param img: (torch.Tensor) Input image :param sample_mode: (str) Resmapling mode :param alpha: (int) Scale factor of the deformation :param sigma: (int) Standard deviation of the gaussian kernel to be applied """ # Get image shape height, width = img.shape[-2:] # Get kernel size kernel_size = (sigma * 4) + 1 # Get mean of gaussian kernel mean = (kernel_size - 1) / 2. # Make gaussian kernel # https://discuss.pytorch.org/t/is-there-anyway-to-do-gaussian-filtering-for-an-image-2d-3d-in-pytorch/12351/7 x_cord = torch.arange(kernel_size, device=img.device) x_grid = x_cord.repeat(kernel_size).view(kernel_size, kernel_size) y_grid = x_grid.t() xy_grid = torch.stack([x_grid, y_grid], dim=-1) gaussian_kernel = (1. / (2. * math.pi * sigma ** 2)) \ * torch.exp(-torch.sum((xy_grid - mean) ** 2., dim=-1) / (2. * sigma ** 2)) gaussian_kernel = gaussian_kernel.view(1, 1, kernel_size, kernel_size) gaussian_kernel = gaussian_kernel.repeat(1, 1, 1, 1) gaussian_kernel.requires_grad = False # Make random deformations in the range of [-1, 1] dx = (torch.rand((height, width), dtype=torch.float, device=img.device) * 2. - 1.).view(1, 1, height, width) dy = (torch.rand((height, width), dtype=torch.float, device=img.device) * 2. - 1.).view(1, 1, height, width) # Apply gaussian filter to deformations dx, dy = torch.nn.functional.conv2d(input=torch.cat([dx, dy], dim=0), weight=gaussian_kernel, stride=1, padding=kernel_size // 2).squeeze(dim=0) * alpha # Add deformations to coordinate grid grid = torch.stack(torch.meshgrid([torch.arange(height, dtype=torch.float, device=img.device), torch.arange(width, dtype=torch.float, device=img.device)]), dim=-1).unsqueeze(dim=0).flip(dims=(-1,)) grid[..., 0] += dx grid[..., 1] += dy # Convert grid to relative sampling location in the range of [-1, 1] grid[..., 0] = 2 * (grid[..., 0] - (height // 2)) / height grid[..., 1] = 2 * (grid[..., 1] - (width // 2)) / width # Resample image img_deformed = torch.nn.functional.grid_sample(input=img[None] if img.ndimension() == 3 else img, grid=grid, mode=sample_mode, padding_mode='border', align_corners=False)[0] return img_deformed
ce8e884780a8dd54851d375f058fb836dfca0a0a
20,617
def as_list(value): """\ Cast anything to a list (just copy a list or a tuple, or put an atomic item to as a single element to a list). """ if isinstance(value, (list, tuple)): return list(value) return [value]
02f8aa7194a594e0fd4bbddde77995382e011ac2
20,618
def user_register(request): """ 用户注册页面路由函数 :param request: 请求对象 :return: 用户注册页面 """ return { '__template__': 'user_register.html' }
72813591d14a63a5788c2c9056d1482e8d07a31b
20,620
import os def get_data_dir(): """ Returns the data dir relative from this file """ project_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir) data_dir = os.path.join(project_dir,"data") return os.path.abspath(data_dir)
19c45200c6c6bc70fb691735d5fc21edf355873b
20,621
def modules_download_range_resolver(range_list): """resolves the input choice given by user ex : 1 - 13, 22 , 47 """ comma_seperated = [item.strip() for item in range_list.split(",")] dash_list = [] for item in comma_seperated: if "-" in item: tmp = [i.strip() for i in item.split("-")] if len(tmp) == 2: if int(tmp[0]) <= int(tmp[1]): dash_list.extend(list(range(int(tmp[0]), int(tmp[1]) + 1))) else: dash_list.extend(list(range(int(tmp[0]), int(tmp[1]) - 1, -1))) else: dash_list.append(int(item)) return list(dict.fromkeys(dash_list))
f308fd2d415bc1d6efefdbb2d360eaa0d16c6698
20,623
from pathlib import Path def dbt_artifacts_directory() -> Path: """ Get the path to the dbt artifacts directory. Returns ------- out : Path The dbt artifacts directory """ artifactis_directory = Path(__file__).parent / "dbt/data/" return artifactis_directory
94b44a3418c4d307f2bed977325015ca2e78ed00
20,624
from datetime import datetime def get_data_for_daily_statistics_table(df): """ Return data which is ready to be inserted to the daily_statistics table in the database. Parameters ---------- df : pandas.core.frame.DataFrame Pandas Dataframe containing data received from the API. Returns ------- df_daily_statistics_data : pandas.core.frame.DataFrame Pandas Dataframe containing data to be inserted to the daily_statistics table in the database. """ df_daily_statistics_data = ( df.groupby(["district_name", "min_age_limit", "vaccine"])["available_capacity"] .sum() .reset_index() ) df_daily_statistics_data["vaccine"] = df_daily_statistics_data[ "vaccine" ].str.upper() df_daily_statistics_data["timestamp"] = datetime.utcnow().strftime( "%Y-%m-%d %H:%M:%S" ) return df_daily_statistics_data
faf7cfb76e88838d049e79bcabfbeef2238dc304
20,625
from pathlib import Path from datetime import datetime import os def _create_data_folder(path, props): """ Create a new directory to put dataset in & generate appropriate name & update dataset properties """ if 'data_folder' in props: # will this work? # => regenerating from existing data props['name'] = props['data_folder'] + '_regen' data_folder = props['name'] else: data_folder = props['name'] + '_' + Path(props['templates']).stem # make unique data_folder += '_' + datetime.now().strftime('%y%m%d-%H-%M-%S') props['data_folder'] = data_folder path_with_dataset = path / data_folder os.makedirs(path_with_dataset) return path_with_dataset
7f044a9daf24b6fc050836a1bbe4a0f2abff5134
20,626
def new_pp(board): """Update piece_placement""" nboard = [[' '] * 8 for _ in range(8)] for square in board: nboard[square[0]][square[1]] = board[square] piece_placement = '' for row_num in range(8): count = 0 for col_num in range(8): square = nboard[row_num][col_num] if square == ' ': count += 1 else: if count != 0: piece_placement += str(count) piece_placement += square count = 0 if count != 0: piece_placement += str(count) piece_placement += '/' return piece_placement[:-1]
df93d432678190bd16dd38e04d382864f5406b07
20,627
import string import random def random_str(lmin, lmax=0, charset=None): """Generate a random byte array Args: lmin (int) : Minimum number of bytes lmax (int) : Maximum number of bytes Returns: str: Random string """ if lmin < 0: lmin = 0 if lmax == 0: lmax = lmin lmin = 0 elif lmax < lmin: lmin, lmax = lmax, lmin if charset is None: charset = string.printable[:-5] return ''.join([random.choice(charset) \ for i in range(lmin)]) \ + ''.join([random.choice(charset) \ for i in range(random.randint(0, lmax-lmin))])
4d2f78766fe99b4a47747244b7112802dfd3e192
20,628
def number_of_authors(publications: list[dict]) -> int: """Computes the number of different authors. Authors are differentiated by their ID. :param: a list of publications. :return: the number of authors. """ authors = set() for publication in publications: authors.update(x['id'] for x in publication['authors']) return len(authors)
7d4c610bb2f9a8003ae440e1408998ee28733bbc
20,629
import argparse def _create_parser() -> argparse.ArgumentParser: """Create the argparse parser with all the arguments Returns: the created parser """ parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) # Add subcommands subparsers = parser.add_subparsers(title="subcommands", dest="cmd") parser_create = subparsers.add_parser( "create", help="Send a CreateSessionRequest to SessionD", ) parser_end = subparsers.add_parser( "end", help="Send a EndSession to SessionD", ) # Add arguments for cmd in (parser_create, parser_end): cmd.add_argument("--num", default=5, help="number of requests") cmd.add_argument( '--service_name', default='magma.lte.LocalSessionManager', help='proto service name', ) cmd.add_argument( '--import_path', default=None, help='Protobuf import path directory', ) # Add function callbacks parser_create.set_defaults(func=parser_create) parser_end.set_defaults(func=parser_end) return parser
03cb5400fd308aa5965c48565ad054799a303b2f
20,630
def get_lowest(pair_list): """Gets the pairs with the lowest score. Assumes pair_list[0] has the lowest score and the score is the first element of the pair. """ low_score = pair_list[0][0] result = [] index = 0 while pair_list[index][0] == low_score: result.append(pair_list[index]) index += 1 return result
282e324bed12f1005491600e305ed1e498b7c6d1
20,631
def get_slope(r, sy, sx): """ Get the slope for a regression line having given parameters. Parameters ---------- > `r`: regrwssion coefficient of the line > `sy` sample standard deviation of y distribution > `sx`: sample standard deviation of x distribution Returns ------- The slope of the given regression line with the above parameters. """ return r * (sy / sx)
ada3f2b105634635a41d973a629aa32b64f8bbaf
20,632
def clean_translation(translation): """Clean the translation string from unwanted substrings.""" illegal_substrings = ['; french.languagedaily.com', ';\u00a0french.languagedaily.com', ' french.languagedaily.co'] for iss in illegal_substrings: translation = translation.replace(iss, '') return translation
9e20b739ebb47900555309d3a91f58f9ef0e8f7c
20,634
def get_ids_from_nodes(node_prefix, nodes): """ Take a list of nodes from G and returns a list of the ids of only the nodes with the given prefix. param node_prefix: prefix for the nodes to keep. type node_prefix: str. param nodes: list of nodes from G. type nodes: [(str, int)]. """ return map(lambda pair: pair[1], filter(lambda pair: pair[0] == node_prefix, nodes))
08c53235c0164dee66b63f7d672550763bb7e26d
20,636
def parse_cachecontrol(header): """Parse Cache-Control header https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 >>> parse_cachecontrol(b'public, max-age=3600') == {b'public': None, ... b'max-age': b'3600'} True >>> parse_cachecontrol(b'') == {} True """ directives = {} for directive in header.split(b','): key, sep, val = directive.strip().partition(b'=') if key: directives[key.lower()] = val if sep else None return directives
8ebcd161f1361c59b82dd6eda42d429dc3bf1e4b
20,637
import random import os def define_validation_set(f_path): """ Input: 'f_path' : location of test annotations file Output: 'vald_set' : list of annotated samples chosen in validation set, we choose 1/5 of normal videos and 1/4 of anomaly videos from test set in validation set Also saves this list as txt file and returns it location """ #f_path = '/content/drive/Shared drives/EECS 545 - ML Project/data/Temporal_Anomaly_Annotation_for_Testing_Videos.txt' f_obj= open(f_path,'r') alllines = f_obj.readlines() normal_set = [lines for lines in alllines if lines.startswith('Normal')] anomaly_set = [lines for lines in alllines if lines not in normal_set] num_normal_test = len(normal_set) num_normal_vald = num_normal_test // 5 num_anomaly_test = len(anomaly_set) num_anomaly_vald = num_anomaly_test // 4 val_set_normal = random.sample(normal_set,num_normal_vald) val_set_anomaly = random.sample(anomaly_set,num_anomaly_vald) vald_set = val_set_normal + val_set_anomaly vald_set = random.sample(vald_set, len(vald_set)) filename = os.path.join(os.getcwd(),'..','Anomaly_Detection_splits/Temporal_annotations_for_vald.txt') if not os.path.exists(filename): with open(filename, 'w') as f: for item in vald_set: f.write("%s" % item) return vald_set, filename
4695f03461e17dd036b5b674319300c74442c1f2
20,638
def get_prefix0_format_string(item_num): """Get the prefix 0 format string from item_num. For example, 3 items would result in {:01d} """ max_digit_num = len(str(item_num)) output_pattern = '{:0' + str(max_digit_num) + 'd}' return output_pattern
9b5e5597763d16577aacc2f884cc391edea4dcd4
20,639
def evaluate_condition(condition, answer_value, match_value): """ :param condition: string representation of comparison operator :param answer_value: the left hand operand in the comparison :param match_value: the right hand operand in the comparison :return: boolean value of comparing lhs and rhs using the specified operator """ answer_and_match = answer_value is not None and match_value is not None comparison_operators = { 'equals': lambda answer_value, match_value: answer_value == match_value, 'not equals': lambda answer_value, match_value: answer_value != match_value, 'contains': lambda answer_value, match_value: isinstance(answer_value, list) and match_value in answer_value, 'not contains': lambda answer_value, match_value: isinstance(answer_value, list) and match_value not in answer_value, 'set': lambda answer_value, _: answer_value is not None and answer_value != [], 'not set': lambda answer_value, _: answer_value is None or answer_value == [], 'greater than': lambda answer_value, match_value: answer_and_match and answer_value > match_value, 'greater than or equal to': lambda answer_value, match_value: answer_and_match and answer_value >= match_value, 'less than': lambda answer_value, match_value: answer_and_match and answer_value < match_value, 'less than or equal to': lambda answer_value, match_value: answer_and_match and answer_value <= match_value, } match_function = comparison_operators[condition] return match_function(answer_value, match_value)
9bb31ace6d183446dcc43bfd3b7fe6f2f40a2db7
20,640
from typing import Iterable def join_comma_or(items: Iterable[str]) -> str: """Join strings with commas and 'or'.""" if not items: raise ValueError("No items to join") *rest, last = items if not rest: return last return f"{', '.join(rest)} or {last}"
08126eb0db002943c7613f140ad8ee279a1a8515
20,642
def generate_cursor(collection, parameters): """Query collection and return a cursor to be used for data retrieval.""" # We set no_cursor_timeout so that long retrievals do not cause generated # cursors to expire on the MongoDB server. This allows us to generate all cursors # up front and then pull results without worrying about a generated cursor # timing out on the server. return collection.find( parameters["query"], parameters["projection"], no_cursor_timeout=True )
7b50cff3bad2cc0907f262008ff4e2c91b794d35
20,644
def is_prod_of_two_3_digit_num(n): """Determine whether n is the product of 3-digit numbers.""" result = False for i in range(100, 1000): if n % i == 0 and n // i in range(100, 1000): result = True break return result
db0cb1b3ae1ecb8b15d01582f8c0599ce00ce766
20,647
def split_authority(authority): """ Basic authority parser that splits authority into component parts >>> split_authority("user:password@host:port") ('user', 'password', 'host', 'port') """ if '@' in authority: userinfo, hostport = authority.split('@', 1) else: userinfo, hostport = None, authority if userinfo and ':' in userinfo: user, passwd = userinfo.split(':', 1) else: user, passwd = userinfo, None if hostport and ':' in hostport: host, port = hostport.split(':', 1) else: host, port = hostport, None if not host: host = None return (user, passwd, host, port)
bb6663646cec725ecb809cccfd75e7ee48a1684e
20,648
def finite_fault_factor(magnitude, model="BT15"): """ Finite fault factor for converting Rrup to an equivalent point source distance. Args: magnitude (float): Earthquake moment magnitude. model (str): Which model to use; currently only suppport "BT15". Returns: float: Adjusted distance. """ if model == "BT15": Mt1 = 5.744 Mt2 = 7.744 if magnitude < Mt1: c0 = 0.7497 c1 = 0.4300 c2 = 0.0 Mt = Mt1 elif magnitude < Mt2: c0 = 0.7497 c1 = 0.4300 c2 = -0.04875 Mt = Mt1 else: c0 = 1.4147 c1 = 0.2350 c2 = 0 Mt = Mt2 logH = c0 + c1 * (magnitude - Mt) + c2 * (magnitude - Mt)**2 h = 10**(logH) else: raise ValueError("Unsupported finite fault adjustment model.") return h
55d74563cdfd367e7866ebc5285d6abed9c649df
20,650
def calculate_indices(n): """Calculates the indices for presentation assuming screen size of 600x600. This determines where the boulders and alien will appear on screen. Parameters: n: number of rows/columns. Range(3,7) Returns: dictionary of the screen coordinates """ x_y_index=[[int((600/n-50)/2+600/n*i) for i in range(n)] for n in range(3,7)] # calculates indices range [3,6] p_values={} for i in range(n): p_values[i]=x_y_index[n-3][i] return p_values
a237187fdafa1970c1c7d879df40297d66a3782d
20,651
def addattr(**kwargs): """ Decorator to add attributes to a function The shortcut is most useful for admin representations of methods or attributes. Example: Instead of writing >>> def is_valid(self): >>> return self.name != "foo" >>> is_valid.short_description = "The name for the function" >>> is_valid.boolean = True You write >>> @addattr(short_description="The name for the function", boolean=True) >>> def is_valid(self): >>> return self.name != "foo" :param kwargs: the properties to add to a function """ def decorator(func): for key in kwargs: setattr(func, key, kwargs[key]) return func return decorator
bbf9ed404cc90413e6e186967621ed5d8ef858ad
20,652
def outputCoords(x:int,y:int) -> str: """ Converts 2D list indexes into Human-Readable chess-board coordinates x and y correspond to the indexes such that it looks like this: `dataList[x][y]` """ columnCheck = ["A","B","C"] column = columnCheck[y] row = str(x+1) return column+row
37412d503f792822f2264ac59eb22aee334182e8
20,653
def replace_ch(string: str, characters: list, replacement: str) -> str: """Replaces all intances of characters in a given string with a given replacement character. """ n_string = string for ch in characters: n_string = n_string.replace(ch, replacement) return n_string
30939885554638d4b1cf844211e687a1447cd72b
20,654
import timeit def ten_million_addition_trial(): """Time the addition of first ten million numbers.""" loop = ''' x = 0 for i in range(10000000): x += i ''' return min(timeit.repeat(stmt=loop, repeat=10, number=1))
272e8971a25ee3192dd385e5b26f49f0933824c7
20,655
def get_even_lines(list_file): """Get all even lines == makes enumerate start at 1""" even_lines = [] for index, line in enumerate(list_file, start=1): if index % 2 == 0: even_lines.append(line) return even_lines
4e5b83784a7ebd21735311ae06069da72562b73d
20,656
def isfloat(element): """ This function check if a string is convertable to float """ try: float(element) return True except ValueError: return False
0a2c209b998a8aeea696a35f2cb3b587373739a5
20,657
def extract_topic_data(msg, t): """ Reads all data in a message This is a recursive function. Given a message, extract all of the data in the message to a dictionary. The keys of the dictionary are the field names within the message, and the values are the values of the fields in the message. Recursively call this function on a message to build up a dictionary of dictionaries representing the ROS message. Args: msg: A ROS message t: Time the message was recorded in the bag file Returns: A dictionary containing all information found in the message. """ # Initialize the information found for this message data = {} # If the message has slots, we have a non-primitive type, and need to extract all of the information from this # message by recursively calling this function on the data in that slot. For example, we may have a message with a # geometry_msgs/Vector3 as a field. Call this function on that field to get the x, y, and z components if hasattr(msg, '__slots__'): # Extract all information on a non-primitive type for slot in msg.__slots__: data[slot] = extract_topic_data(getattr(msg, slot), t) else: # We encountered a primitive type, like a double. Just return it so it gets put into the output dictionary return msg # Return the dictionary representing all of the fields and their information in this message return data
2a80771b70aa012bd626da7fbe8d5509c444481b
20,658
def filter(case_ids, to_filter): """Filter cases. :param case_ids: Parameter list for pytest.mark.parametrize. :param to_filter: List of parameters to filter from case_ids. :return: Filtered case_ids. """ return [case for case in case_ids if case not in to_filter]
fb5767cb5c5efc75d11bbcbdf75f9c8d4479c6fe
20,660
def atm2Btu_ft3(x): """atm -> Btu/ft^3""" return 2.719*x
e9717d6990e18a50c644bd5c942b965fabff861b
20,661
def release_mod(request): """Modifies a release string to alternative valid values.""" return request.param
9aef0b404b5b3238837fd829fb74748113508fdc
20,662
def binary_to_decimal(decimal_num: str): """ Converts binary number to decimal number. @return: <int> int of the decimal number """ decimal_string = str(decimal_num) if len(decimal_string) > 0: first = decimal_string[0] current = 2**(len(decimal_string) - 1) if first == '1' else 0 decimal_string = decimal_string[1:] return current + binary_to_decimal(decimal_string) return 0
79c7f3b22ae609ec393b121a6c5a5548a19b7b87
20,663
def calc_angle(per, line): """ Calculate angle between two vector. Take into consideration a quarter circle :param per: first vector :type per: DB.XYZ :param line: second vector :type line: DB.XYZ :return: Angle between [-pi, pi] :rtype: float """ return (1 if per.Y >= 0 else -1) * per.AngleTo(line)
81290fd40dfd4d714f56478a2c41b33156ca8157
20,665
from pathlib import Path def check_dir(dir, mkdir=False): """check directory ディレクトリの存在を確認する。 Args: dir (str): 対象ディレクトリのパス(文字列) mkdir (bool, optional): 存在しない場合にディレクトリを作成するかを指定するフラグ. Defaults to False. Raises: FileNotFoundError: mkdir=Falseの場合に、ディレクトリが存在しない場合に例外をraise Returns: dir_path : ディレクトリのPathオブジェクト """ dir_path = Path(dir) if not dir_path.exists(): print(f"directory not found : {dir}") if mkdir: print(f"make directory : {dir}") dir_path.mkdir(parents=True) else: raise FileNotFoundError(f"{dir}") return dir_path
1ada67ae07bdfe05c25474f3028cf4b5808d541b
20,667
import numpy as np def _make_angle_gradient(X, Y, angle): """Generates index map for angle gradients.""" Z = (((180 * np.arctan2(Y, X) / np.pi) + angle) % 360) / 360 return Z
f30dcd0101038830f97c0111181ee28cdaa38582
20,669
def extract_column_from_array(array, col_number): """ Extracts a specific column from an array and copies it to a list :param array: The NumPy array with the data :param col_number: The number of the column that should be extracted :return: The list with the extracted values """ extracted_column = [] for row in array: for number, column in enumerate(row): if number == col_number: extracted_column.append(column) return extracted_column
b390ee58aab8802302996488925855de4d428f1a
20,671
def spreadsheet_col_num_to_name(num): """Convert a column index to spreadsheet letters. Adapted from http://asyoulook.com/computers%20&%20internet/python-convert-spreadsheet-number-to-column-letter/659618 """ letters = '' num += 1 while num: mod = num % 26 letters += chr(mod + 64) num = num // 26 return ''.join(reversed(letters))
c39a96ed5794f582ce790a025ddecfe2cff39bf0
20,673
def create_empty_features_dict(n_feats, n_iss, n_k): """Create null features for different iss in an listdict-form. Parameters ---------- n_feats: int the number of features. n_iss: int the number of the elements to create their features. n_k: int the number of perturbations. Returns ------- features: list the null features we want to compute. """ return [[{}]*n_iss]*n_k
3521bc35ea9e32cb4e1517828afb3a5f59274da7
20,674
def _SequentialProvider(path_source): """A provider that iterates over the output of a function that produces paths. _SequentialProvider takes in a path_source, which is a function that returns a list of all currently available paths. _SequentialProvider returns in a path provider (see documentation for the |DirectoryWatcher| class for the semantics) that will return the alphabetically next path after the current one (or the earliest path if the current path is None). The provider will never return a path which is alphanumerically less than the current path; as such, if the path source provides a high path (e.g. "c") and later doubles back and provides a low path (e.g. "b"), once the current path was set to "c" the _SequentialProvider will ignore the "b" and never return it. Args: path_source: A function that returns an iterable of paths. Returns: A path provider for use with DirectoryWatcher. """ def _Provider(current_path): next_paths = list(path for path in path_source() if current_path is None or path > current_path) if next_paths: return min(next_paths) else: return None return _Provider
147d6891a650f24fb3ec42f94d34c41bb06848ca
20,676
def _format_data_as_table(data): """Format data as a table """ if isinstance(data, dict): data = [data] # Get common keys common_keys = {key for key, val in data[0].items() if isinstance(val, str)} for idx in range(1, len(data)): common_keys = common_keys.intersection(set(data[idx].keys())) common_keys = sorted(common_keys) # Construct output as table column_width = {val: len(data[0][val]) for val in common_keys} row_format = ''.join(['{:' + str(width) + '}\t\t' for _, width in column_width.items()]) title = row_format.format(*column_width.keys()) separator_column_width = ['-' * width for _, width in column_width.items()] separator = row_format.format(*separator_column_width) formatted_data = title + '\n' + separator # Construct each row data for entry in data: row_data = [entry[key] for key in common_keys] formatted_data += '\n' + row_format.format(*row_data) return formatted_data
e1cb643d64ca207b9847c79f41caaed4f558fe48
20,677
def cycphase(self, type_="", option="", **kwargs): """Provides tools for determining minimum and maximum possible result APDL Command: CYCPHASE values from frequency couplets produced in a modal cyclic symmetry analysis. Parameters ---------- type\_ The type of operation requested: DISP - Calculate the maximum and minimum possible displacement at each node in the original sector model. Store the values and the phase angle at which they occurred. STRESS - Calculate the maximum and minimum possible stresses at each node in the original sector model. Store the values and the phase angle at which they occurred. STRAIN - Calculate the maximum and minimum possible strains at each node in the original sector model. Store the values and the phase angle at which they occurred. ALL - Calculate the maximum and minimum possible displacement, stress and strain at each node in the original sector model. Store the values and the phase angle at which they occurred. GET - Places the value of a MAX or MIN item into the _CYCVALUE parameter, the node for that value in the _CYCNODE parameter, and the phase angle for the value in the _CYCPHASE parameter. PUT - Put resulting sweep values for printing (via the PRNSOL command ) or plotting (via the PLNSOL command). LIST - List the current minimum/maximum displacement, stress and strain nodal values. STAT - Summarize the results of the last phase sweep. CLEAR - Clear phase-sweep information from the database. option If TYPE = DISP, STRAIN, STRESS or ALL, controls the sweep angle increment to use in the search: Angle - The sweep angle increment in degrees, greater than 0.1 and less than 10. The default is 1. Notes ----- When you expand the results of a modal cyclic symmetry analysis (via the /CYCEXPAND or EXPAND command), ANSYS combines the real and imaginary results for a given nodal diameter, assuming no phase shift between them; however, the modal response can occur at any phase shift. CYCPHASE response results are valid only for the first cyclic sector. To obtain the response at any part of the expanded model, ANSYS, Inc. recommends using cyclic symmetry results expansion at the phase angle obtained via CYCPHASE. The phase angles returned by CYCPHASE contain the minimum and maximum values for USUM, SEQV and other scalar principal stress and strain quantities; however, they do not always return the true minimum and maximum values for directional quantities like UX or SX unless the values fall in the first sector. CYCPHASE does not consider midside node values when evaluating maximum and minimum values, which may affect DISPLAY quantities but no others. (Typically, ANSYS ignores midside node stresses and strains during postprocessing.) Issuing CYCPHASE,PUT clears the result values for midside nodes on high order elements; therefore, this option sets element faceting (/EFACET) to 1. The command reports that midside nodal values are set to zero and indicates that element faceting is set to 1. If the sweep values are available after issuing a CYCPHASE,PUT command, the PRNSOL or PLNSOL command will print or plot (respectively) the sweep values of structure displacement Ux, Uy, Uz, component stress/strain X, Y, Z, XY, YZ, ZX, principal stress/strain 1, 2, 3 and equivalent stress/strain EQV. The vector sum of displacement (USUM) and stress/strain intensity (SINT) are not valid phase-sweep results. You can specify any coordinate system via the RSYS command for displaying or printing CYCPHASE results. However, after CYCPHASE results have been extracted, you cannot then transform them via the RSYS command. If you try to do so, ANSYS issues a warning message. The CYCPHASE command is valid in /POST1 and for cyclically symmetric models only. To learn more about analyzing a cyclically symmetric structure, see the Cyclic Symmetry Analysis Guide. """ command = f"CYCPHASE,{type_},{option}" return self.run(command, **kwargs)
9562e8d5b67cd0dfdd753586d3e65257570cf89d
20,678
def _get_states(rev, act): """Determines the initial state and the final state based on boolean inputs Parameters ---------- rev : bool True if the reaction is in the reverse direction. act : bool True if the transition state is the final state. Returns ------- initial_state : str Initial state of the reaction. Either 'reactants', 'products' or 'transition state' final_state : str Final state of the reaction. Either 'reactants', 'products' or 'transition state' """ if rev: initial_state = 'products' final_state = 'reactants' else: initial_state = 'reactants' final_state = 'products' # Overwrites the final state if necessary if act: final_state = 'transition state' return initial_state, final_state
4303b193a40515b4b7c52c4e2b5286dc6a9f4cd1
20,679
from numpy import dtype def common_dtype(dtype1,dtype2): """The data type that can represent objects or both 'dtype1' and 'dtype2' without loss of precision. E.g. (int,float64) -> float64, ('S20','S26') -> 'S26'""" # Make sure data types are numpy data types. dtype1,dtype2 = dtype(dtype1),dtype(dtype2) if dtype1.kind == dtype2.kind: return dtype1 if dtype1.itemsize > dtype2.itemsize else dtype2 if dtype1.kind == "S": return dtype1 if dtype2.kind == "S": return dtype2 if dtype1.kind == "f": return dtype1 if dtype2.kind == "f": return dtype2 if dtype1.kind == "i": return dtype1 if dtype2.kind == "i": return dtype2
9e2edeeac8f864f2a0b1a52cb746ddbd6e2818e4
20,680
from typing import Any def compare_any_with_str(other: Any, str_value: str) -> bool: """Compare any value with a string value in its str() form. :param other: the other value to be compared with a string. :param str_value: the string value to be compared. :return: True if the str() of the other value is equal to str_value. """ return str(other) == str_value
be88d4e15a609468d3d65eb99417a4e76582ac9a
20,682
def get_temp_column_name(df) -> str: """Small helper to get a new column name that does not already exist""" temp_column_name = '__tmp__' while temp_column_name in df.columns: temp_column_name += '_' return temp_column_name
a4dba2fb09166b2797f8c4c6dd93f46aaebb408e
20,683
def enable() -> dict: """Enables tracking security state changes.""" return {"method": "Security.enable", "params": {}}
a9c341edf37ec5ebbc0b372b4e56a81e98aff903
20,685
def sad_merge_segments(segments): """For SAD, segments given a single speaker label (SPEECH) and overlapping segments are merged.""" prev_beg, prev_end = 0.0, 0.0 smoothed_segments = [] for seg in segments: beg, end = seg[0], seg[2] if beg > prev_beg and end < prev_end: continue elif end > prev_end: if beg > prev_end: smooth = [prev_beg, (prev_end - prev_beg), prev_end, "speech"] smoothed_segments.append(smooth) prev_beg = beg prev_end = end else: prev_end = end smooth = [prev_beg, (prev_end - prev_beg), prev_end, "speech"] smoothed_segments.append(smooth) return smoothed_segments
9492d3ee4dfa5cec6d3c9827bd72ef0704539a00
20,686
def _copier(d): """Recursively copy `d`, used for `self.win`.""" if isinstance(d, list): return d.copy() assert isinstance(d, dict), d r = dict() for k, v in d.items(): r[k] = _copier(v) return r
16ff1f783ef2724328f9c010030577c5bce67d0e
20,687
import math def check_monotonically_increase(parameter_tup): """Check if a, b, c could let g(r) monotonically increase in [0,1]""" a, b, c = parameter_tup if c == 0: if a >= 0 and a + 2 * b >= 0 and not(a == 0 and b == 0): return True return False if c < 0: if b**2 > 3 * a * c: q_plus = (-2 * b + math.sqrt(4 * b**2 - 12 * a * c)) / (6 * c) q_minus = (-2 * b - math.sqrt(4 * b**2 - 12 * a * c)) / (6 * c) if q_plus <= 0 and q_minus >= 1: return True return False if c > 0: if b**2 < 3 * a * c: return True elif b**2 == 3 * a * c: if b >= 0 or 3 * c + b <= 0: return True else: q_plus = (-2 * b + math.sqrt(4 * b**2 - 12 * a * c)) / (6 * c) q_minus = (-2 * b - math.sqrt(4 * b**2 - 12 * a * c)) / (6 * c) if q_plus <= 0 or q_minus >= 1: return True return False
4d05589bea3ac6c51db9007c1c1056eb0aa862fe
20,689
def process_hv_plots(widgets, plots): """ Temporary fix to patch HoloViews plot comms """ bokeh_plots = [] for plot in plots: if hasattr(plot, '_update_callbacks'): for subplot in plot.traverse(lambda x: x): subplot.comm = widgets.server_comm for cb in subplot.callbacks: for c in cb.callbacks: c.code = c.code.replace(plot.id, widgets.plot_id) plot = plot.state bokeh_plots.append(plot) return bokeh_plots
e9023d013caa676ab424c4a34b1c9cd51ab73c04
20,690
def _sanitize_for_filename(text): """ Sanitize the given text for use in a filename. (particularly log and lock files under Unix. So we lowercase them.) :type text: str :rtype: str >>> _sanitize_for_filename('some one') 'some-one' >>> _sanitize_for_filename('s@me One') 's-me-one' >>> _sanitize_for_filename('LS8 BPF') 'ls8-bpf' """ return "".join([x if x.isalnum() else "-" for x in text.lower()])
7edda0859a6527c9a4cb0a464afb82c16d6df6dc
20,691
import argparse import textwrap def get_parser(): """Create the parser that will be used to add arguments to the script. """ parser = argparse.ArgumentParser(description=textwrap.dedent(""" Creates conda environments for a given version of bokeh, installed using pip and conda and including python 2.7 and python 3.4. The --version ('-v') option takes an earlier version of bokeh, for use in creating environments where bokeh will be updated. Ex: ' python test_matrix.py -v 0.7.0' """), formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-v', '--version', action='store', default=False, help='Version of bokeh', required=True) # parser.add_argument('') return parser
9030d041ee1c13cb27526772550e0c7b3743856b
20,693
import os def directorySize(directory): """ Walks all the subdirections and files in the given directory and counts the cumulative sizes of files. :param directory: name of the root directory :type directory: string :return: cumulative size of a given directory in kB :rtype: float """ fs = 0. for path, dirs, files in os.walk(directory): for file in files: name = os.path.join(path, file) fs += os.path.getsize(name) return fs / 1024.
2e4a367c136c5bfb97eba0bf0a7fa19e77491256
20,694
import os def files2list(path, file_extension): """ Return a list of found files with that extension from the given path :param path: The folder to read from :param file_extension: The type of file to read """ files=[] for file in os.listdir(path): if file.endswith(file_extension): files.append(file) return files
b356ba1fc6848e61ca3348494f2aea281df0044a
20,696
def subs_str_finder(control_s, sub_str): """ Finds indexes of all sub_str occurences in control_s. """ sub_len = len(sub_str) while sub_str in control_s: first_index = control_s.find(sub_str) second_index = first_index + sub_len return first_index, second_index
fab2c3a6e9e9370721f36d96354b8def04598ef4
20,697
import argparse def key_value_pair_or_value(opt: str): """Argument type for argparse, which accepts either a single float or a key=value pair. Args: opt: Command line parameter Returns: Single float or tuple containing key and value """ # does it contain a =? pos = opt.find('=') if pos < 0: # single value? try: return float(opt) except ValueError: raise argparse.ArgumentTypeError('Value %s is not a float.' % opt) # split key = opt[:pos] value = opt[pos + 1:] # convert value to float try: value = float(value) except ValueError: raise argparse.ArgumentTypeError('Value %s for parameter %s is not a float.' % (value, key)) # and return return key, value
7b4340752d99ca3ac2eff26dfca5108e433f1960
20,700
import os def update_volume(adc, current_station, current_volume): """ Updates the volume of the player Arguments ---------- adc : gpiozero MCO3xxx object Analog to Digital Converter wich reads the raw volume knob position """ new_volume = 100 - int(adc.value * 100) if current_volume == new_volume: return current_volume else: os.system('mpc --host=/home/pi/.config/mpd/socket_' + str(current_station) + ' volume ' + str(new_volume)) return new_volume
493764683e159ea36a74012d9ee509ebe9025b98
20,701
def estimate_parms(W, X, Y, n): """Compute estimates of q_mod and q_con parameters.""" q_mod_hat = 1 - X / Y q_con_hat = W / (n - 1) return (q_mod_hat, q_con_hat)
b1f47de984482dee9d99f8ffa33ccb570079ba93
20,702
def _is_empty(value): """Returns true if value is none or empty string""" return value is None or value is ""
8afbcbc71ab47097520c7a7e646406967d1086f6
20,704
from html.entities import name2codepoint import re def extract_text(html, start, end, decode_entities=True, strip_tags=True): """Given *html*, a string of HTML content, and two substrings (*start* and *end*) present in this string, return all text between the substrings, optionally decoding any HTML entities and removing HTML tags. >>> extract_text("<body><div><b>Hello</b> <i>World</i>&trade;</div></body>", ... "<div>", "</div>") == 'Hello World™' True >>> extract_text("<body><div><b>Hello</b> <i>World</i>&trade;</div></body>", ... "<div>", "</div>", decode_entities=False) == 'Hello World&trade;' True >>> extract_text("<body><div><b>Hello</b> <i>World</i>&trade;</div></body>", ... "<div>", "</div>", strip_tags=False) == '<b>Hello</b> <i>World</i>™' True """ startidx = html.index(start) endidx = html.rindex(end) text = html[startidx + len(start):endidx] if decode_entities: entities = re.compile("&(\w+?);") text = entities.sub( lambda m: chr( name2codepoint[ m.group(1)]), text) if strip_tags: # http://stackoverflow.com/a/1732454 tags = re.compile("</?\w+>") text = tags.sub('', text) return text
1770ad49ec992df949725c9595f48e93188dc0e8
20,705
def buildmatrix(first, second, method): """ Builds the matrix for the Hungarian algorithm. Pads with the worst to make the matrix square. """ costs = [[method(f,s) for s in second] for f in first] if len(first) and len(second): horrible = [max(max(costs)) + 1] else: horrible = [1e10] if len(first) > len(second): for row in costs: row.extend(horrible * (len(first) - len(second))) elif len(first) < len(second): costs.extend([horrible * len(second)] * (len(second) - len(first))) return costs
4bf3ec4c4e8b145dc9eb3884827f7ac647b8b7c4
20,706
import subprocess def git_is_clean(): """ Return True if `git status` reports clean, otherwise False """ proc = subprocess.Popen(['git', 'status', '--porcelain'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = proc.communicate() success = proc.wait() == 0 # gets the return code, False on nonzero success = success and len(out) == 0 # false if any standard output success = success and len(err) == 0 # false if any standard error return success
0d66eee21c670a4def8c2ac99f38ae9d089d4984
20,707
def SetOutputAmplifier(typ): """ Sets the second output amplifier Parameters: int typ: type Valid values: typ: 0 electron multiplication/Conventional(clara) 1 conventional/Extended NIR Mode(clara) """ return None
690dadbfac6bdecdd976be266df7a05b8b1203f2
20,708
def merge(intervals): """ :type intervals: List[List] :rtype: List[List] """ i = 0 intervals = sorted(intervals, key=lambda x: x[0]) while i < len(intervals) - 1: if intervals[i][1] >= intervals[i + 1][0]: intervals[i] = [intervals[i][0], max(intervals[i+1][1], intervals[i][1])] del intervals[i + 1] else: i += 1 return intervals
bcee2987df10a45d19c9679dd8e69cb0243982d0
20,709
def as_bool(v): """Convert the given string in a Boolean value. Args: string: the string that should be converted in boolean Returns: The boolean converted value for the given string """ return str(v).lower() in ("true", "1")
6a6fd6053d5c7bc0a5f68e19fe47919704b87217
20,710
def serializer_is_dirty(preference_serializer): """ Return True if saving the supplied (Raw)UserPreferenceSerializer would change the database. """ return ( preference_serializer.instance is None or preference_serializer.instance.value != preference_serializer.validated_data['value'] )
19ce15215a13e96020e0503c28930525d6243330
20,711
def version_tuple(version): """Returns a tuple from version string""" if isinstance(version, tuple): return version return tuple(version.split("."))
9711ca388ae3d10cf59bd868e135e3902cb11dc5
20,712
import json import requests def adjust_led(state_irgb): """ Sends a request to the arduino to adjust the leds. """ state = state_irgb.split()[0] irgb = state_irgb.split()[1].upper() url = f"http://192.168.1.188/led?state={state};irgb={irgb};" return json.loads(requests.get(url).text)
8330842295b0bb3dcfb99085fb9d27e18b12c8a0
20,713
import re def isfloat(word): """Matches ANY number; it can be a decimal, scientific notation, integer, or what have you""" return re.match('^[-+]?[0-9]*\.?[0-9]*([eEdD][-+]?[0-9]+)?$',word)
8df3be23d1e39590c88fb0e8de275571b0ec4c57
20,715
def _unpack_data(recs) : """Reconstruct data records from file to 2-d (or 1-d) list of values. """ if len(recs) == 0 : return None if len(recs) == 1 : for rec in recs : fields = rec.strip('\n').split() return [float(v) for v in fields] arr = [] for rec in recs : fields = rec.strip('\n').split() vals = [float(v) for v in fields] arr.append(vals) return arr
d271417b9f2f8e5515011293a83d4b03a619cd27
20,717
import functools import threading def thread_n_funcrun(number_of_threads=1): """ run function in multiple threads Examples: .. example_code:: >>> from apu.mp.thread_funcrun import thread_funcrun >>> @thread_n_funcrun(number_of_threads=3) ... def test(*args, **kwargs): ... pass Thread started for function "test" Thread started for function "test" Thread started for function "test" """ def wrapper(func): @functools.wraps(func) def wrapper(*args, **kwargs): for _ in range(number_of_threads): threading.Thread(target=func, args=(args, kwargs)).start() print(f"Thread started for function {func.__name__}") return wrapper return wrapper
8fa0eadc024b33867fa300dc6bf36d64ea3f8660
20,718
import os def collect_top_level_files(package_dir): """Return a list of dart filenames under the package's lib directory.""" return sorted( os.path.basename(p) for p in os.listdir(os.path.join(package_dir, 'lib')) if os.path.basename(p).endswith('.dart'))
480c0ecd6d614bf9601229ca037360465b8666da
20,719
import subprocess def run_md5(input_stdin): """ run_md5 @param input_stdin: @return: """ proc = subprocess.Popen( ['md5'], stdin=input_stdin, stdout=subprocess.PIPE, ) return proc
c1b96421d5c2b52f5dcbcea6d62f0726951ee9d0
20,720
def _same_value(obj1, obj2): """ Helper function used during namespace resolution. """ if obj1 is obj2: return True try: obj1 = obj1.get_value() except (AttributeError, TypeError): pass try: obj2 = obj2.get_value() except (AttributeError, TypeError): pass return obj1 is obj2
c409dec624b016f24f98cfc4f34056cb1c1e2545
20,722
def safestart(text: str, piece: str, lc: bool = False) -> bool: """ Checks if text starts with another, safely :param text: the string to check :param piece: the start string :return: true if text starts with piece """ check = text if lc else text.lower() return len(text) >= len(piece) and check.startswith(piece)
9bdefe01f97be4660b11ed4ce36b08da410680e3
20,723
def upload_status() -> str: """ Mettre une enum """ return "I DON'T KNOW"
8cc2563170cbd032def71f71a50035562534399c
20,724
def onecase(case): """Check if the binary string is all ones""" if case == "1" * len(case): return True else: return False
d21bbf34960abcf3eafe6f0b4271ea9054d3e77f
20,725
def prime(num): """" To check whether a number is prime """ num2 = num while num2 > 0: if num == num2: num2 -= 1 elif num2 == 1: num2 -= 1 elif num % num2 == 0: return False num2 -= 1 return True
f1c02878ec71bf066419dd1c460a4a694a8fe424
20,726
def dash_not_ready(item): """Return True if the item doesn't contains a telemDashboard status other than ready. Return ValueError if item doesn't return the expected data shape. Return RuntimeError if item's .json() method raises an error. """ if not hasattr(item, "json"): return ValueError("dash_not_ready cannot check item without a json method") item_json = None try: item_json = item.json() except Exception as e: return RuntimeError( f"dash_not_ready encountered an error while running the item's .json() method:\n{e.__repr__()}" ) if not ("status" in item_json): return ValueError( "dash_not_ready cannot check item without a status key in content" ) if not ("telemDashboard" in item_json["status"]): return ValueError( "dash_not_ready cannot check item without status.telemDashboard key" ) if item_json["status"]["telemDashboard"] == "READY": return False else: return (True, f"Status = {item_json['status']['telemDashboard']}")
3b4f1292588a3753a8e50cf74ff84307f844956f
20,727