content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def get_board_from_hwid(arduino_info, vid, pid): """.""" name = '' boards_info = arduino_info.get('boards', {}) board_names = boards_info.get('names', []) has_board = False for board_name in board_names: board_info = boards_info.get(board_name) generic_info = board_info.get('generic', {}) for key in generic_info: if key.startswith('vid'): value = generic_info.get(key) if value == vid: suffix = '' if '.' in key: number = key.split('.')[-1] suffix = '.' + number pid_key = 'pid' + suffix if pid_key in generic_info: value = generic_info[pid_key] if value == pid: name = board_name has_board = True break if has_board: break return name
19e9f683063ad662866b58bdc67b6213cbbac868
33,298
def clip_categorical(ser, MIN_LEVELS=5, MIN_FREQ=0.05, COVERAGE=0.95): """ Manage Categoricals with too many levels If the categorical has only 2 levels, it will be returned as-is Parameters ---------- SR: pandas.Series the input Categorical series with >= MIN_LEVELS MIN_FREQ: float Levels with at least MIN_FREQ %cases will survive COVERAGE: float Levels that make up COVERAGE% of the data will survive Returns ------- A pandas.Series object with retained labels for levels that account for COVERAGE% of the data replaced labels (with 'Other') for rare levels """ sr = ser.copy() if sr.nunique() >= MIN_LEVELS: KEEP_1 = \ (sr .value_counts(normalize=True) .where(lambda i: i >= MIN_FREQ) .dropna() .index .tolist() ) KEEP_2 = \ (sr .value_counts(normalize=True) .cumsum() .where(lambda x: x <= COVERAGE) .dropna() .index .tolist() ) KEEP = set(KEEP_1).union(set(KEEP_2)) sr[-sr.isin(KEEP)] = 'Other' sr = sr.map(lambda x: '_'.join(str(x).split())) print("{} now has {} Levels and {} % Coverage".format(sr.name, sr.nunique(), 100 * COVERAGE)) result = sr else: print("{} doesn't have more than {} levels. Returning as-is.".format(sr.name, MIN_LEVELS)) result = sr return sr
29a264857acddfa76a185d4345604c9e69c5f1a6
33,299
def _parse_spec(spec, db=None): """ Return a Command that has parsed the relevant detail for the given pymongo SON spec. """ try: items = list(spec.items()) except TypeError: return None, None, None, None if not items: return None, None, None, None name, coll = items[0] query = None if name == 'update': updates = spec.get('updates') if updates: query = updates[0].get("q") elif name == 'find': filter = spec.get('filter') if filter: query = filter elif name == 'delete': dels = spec.get('deletes') if dels: query = dels[0].get("q") return name, db, coll, query
56bb1f871238d55d89d22c0157c9e50074498d37
33,300
def geojson_to_polygons(geojson): """ Convert geojson as generated by geojs's annotation layer. :param geojson: geojson record. :returns: an array of polygons, each of which is an array of points. """ polys = [] for feature in geojson['features']: if feature.get('geometry', {}).get('type') == 'Polygon': polys.append(feature['geometry']['coordinates']) return polys
6b82742335841897511640fc9323c5e4895bf367
33,302
import torch def check_cuda(): """ Checks if GPU is available. """ cuda_available = torch.cuda.is_available() device = torch.device('cuda' if cuda_available else 'cpu') return cuda_available, device
88ea60017c012bbb92883617c8a73e1737aceffa
33,303
from typing import List def exif_datetime_fields() -> List[List[str]]: """ Date time fields in EXIF """ return [ [ "EXIF DateTimeOriginal", "Image DateTimeOriginal", "EXIF DateTimeDigitized", "Image DateTimeDigitized", "EXIF DateTime", "Image DateTime", "GPS GPSDate", "EXIF GPS GPSDate", "EXIF DateTimeModified", ] ]
7615ca8ccc0a6a16325a12e5f7256a87b43c0565
33,304
def int_or_none(arg): """Returns None or int from a `int_or_none` input argument. """ if arg is None or str(arg).lower() == 'none': return None return int(arg)
651168240209540966ced78a5a2642a5979022fd
33,305
def pluralize(count, singular, plural): """Return singular or plural based on count""" return singular if count == 1 else plural
752b744ea80571e2a01332dd5a050ee4919d5637
33,307
import numpy def hskl(m, b, mu, cosq, y, z): """Thomson-Haskell's matrix for Love-wave.""" hl = numpy.zeros((2, 2), dtype=numpy.float64) if b[m] > 0.01: hl[0, 0] = cosq hl[0, 1] = y / mu hl[1, 0] = z * mu hl[1, 1] = cosq else: hl[0, 0] = 1.0 hl[0, 1] = 0.0 hl[1, 0] = 0.0 hl[1, 1] = 1.0 return hl
40a819eb0398e243c2b703a50dbb9468ba7da7b2
33,308
def column_exists(df, col): """Check the column name exists in the DataFrame. Args: df (:obj:`DataFrame`): Pandas DataFrame. col (str): Column name. Returns: bool: True if exists, False if not exists. """ if col and (col not in df.columns): print("The specify column `{0!s}` not found in the input file" .format(col)) return False else: return True
18a1d5c9e1b40bdab38332e9aee17911d1fe153e
33,309
def update_console_links_markdown(region, ml_channel_name, ml_channel_id, mp_channel_names): """Update the markdown of the text widget to show the list of console links for the specific MediaLive and MediaPackage channels""" result = "MediaLive: [{0} - {1}](https://{2}.console.aws.amazon.com/medialive/home?region={2}#/" \ "channels/{1}) MediaPackage: ".format(ml_channel_name, ml_channel_id, region) mp_name_count = len(mp_channel_names) index = 1 for mp_name in mp_channel_names: tmp = " [{0}](https://{1}.console.aws.amazon.com/mediapackage/home?region={1}#/channels" \ "/{0})".format(mp_name, region) if index < mp_name_count: tmp += " , " index += 1 result += tmp result += " \n" return result
586794349f2e1cc90c48c172b59ab991eafa4d0f
33,310
def paren_int(number): """ returns an integer from a string "([0-9]+)" """ return int(number.replace('(', '').replace(')', ''))
86b86ac0215a12e658f2d7e9de634be7e524f571
33,311
def subtract(value, arg): """subtracts arg from value""" return int(value) - int(arg)
2a622bd5fea4acb58fae06c6f645409b2f6c512a
33,313
def sum_numbers(filename): """ @filename: file that must contain numbers separated by " " @Description: returns sum of numbers in the file given as first argument """ fd = open(filename, 'r') line = fd.read() fd.close() numbers_list = [int(x) for x in line.split()] return sum(numbers_list)
4b3cc8d1cdc2659e697a2798b417d9655f1d9cf0
33,314
def set_predicted_column(DataFrame, score_variable, threshold=0.5, predicted_label='Predicted', fill_missing=0): """ Parameters ---------- DataFrame : pandas.DataFrame DataFrame score_variable : str Name of the variable where the score is based on. thresholds : int or list(int), default 0.5 fill_missing : int, default 0 Returns ------- DataFrame : pandas.DataFrame """ str_condition = '{}>{}'.format(score_variable, threshold) DataFrame[predicted_label] = DataFrame.eval(str_condition).astype('int8').fillna(fill_missing) #np.where(TestDataset[score_variable]>threshold,1,0) return DataFrame
7ca265754f8fe115bd76b2eeaed428756dabae29
33,316
def get_instance_info(inst): """ Retrieves instance information from an instance ID and returns it as a dictionary """ instance_info = {'id': inst.id, 'ami_launch_index': inst.ami_launch_index, 'private_ip': inst.private_ip_address, 'private_dns_name': inst.private_dns_name, 'public_ip': inst.ip_address, 'dns_name': inst.dns_name, 'public_dns_name': inst.public_dns_name, 'state_code': inst.state_code, 'architecture': inst.architecture, 'image_id': inst.image_id, 'key_name': inst.key_name, 'placement': inst.placement, 'region': inst.placement[:-1], 'kernel': inst.kernel, 'ramdisk': inst.ramdisk, 'launch_time': inst.launch_time, 'instance_type': inst.instance_type, 'root_device_type': inst.root_device_type, 'root_device_name': inst.root_device_name, 'state': inst.state, 'hypervisor': inst.hypervisor, 'tags': inst.tags, 'groups': dict((group.id, group.name) for group in inst.groups), } try: instance_info['virtualization_type'] = getattr(inst, 'virtualization_type') except AttributeError: instance_info['virtualization_type'] = None try: instance_info['ebs_optimized'] = getattr(inst, 'ebs_optimized') except AttributeError: instance_info['ebs_optimized'] = False try: bdm_dict = {} bdm = getattr(inst, 'block_device_mapping') for device_name in bdm.keys(): bdm_dict[device_name] = { 'status': bdm[device_name].status, 'volume_id': bdm[device_name].volume_id, 'delete_on_termination': bdm[device_name].delete_on_termination } instance_info['block_device_mapping'] = bdm_dict except AttributeError: instance_info['block_device_mapping'] = False try: instance_info['tenancy'] = getattr(inst, 'placement_tenancy') except AttributeError: instance_info['tenancy'] = 'default' return instance_info
698f01909a67941a230d8c582fec13193150b7df
33,318
def filter_points(points, box): """ Remove points that lie inside the box from the set. """ new_points_ind = [] for i, p in enumerate(points): if (box.xmin <= p[0] <= box.xmax and box.ymin <= p[1] <= box.ymax): continue else: new_points_ind.append(i) return points[new_points_ind, :]
9b89f67a60fdf482a4586433a526d2ee055132e7
33,319
def convertTime(time): """ Converts any entered time to seconds """ pos = ["s","m","h","d","mth","y"] time_dict = {"s":1,"m":60,"h":3600,"d":3600*24,"mth":3600*24*30,"y":3600*24*30*365} unit = time[-1] if unit not in pos: return -1 try: val = int(time[:-1]) except: return -1 return val * time_dict[unit]
97f23f7b7472b5cf64ac26a1653f70a9d57fdb89
33,320
def get_file_name(file_url): """ Extracts file name from the file path url """ file_name = None if file_url.find('/'): file_name = file_url.rsplit('/', 1)[1] return file_name
3d62696506ad27e8d84ba8d2a8c17b1ff6706553
33,321
from pathlib import Path import shutil def copy_assembly(src, dst): """ Copy the directory passed to src to the dst directory and return the destination path. An exception is raised if the dst directory already exists. Args: src (str): The path of the directory that is copied. dst (str): The path of the destination directory. Returns: The path of the destination directory """ dst = Path(dst) if dst.exists() and dst.is_dir(): raise Exception('destination folder already exists') src = Path(src) shutil.copytree(src, dst) return dst
66334040447907c99cbcd7e3dab884c5fd5a0da7
33,322
def _prune_dockerfile(string, comment_char="#"): """Remove comments, emptylines, and last layer (serialize to JSON).""" json_removed = '\n\n'.join(string.split('\n\n')[:-1]) return '\n'.join(row for row in json_removed.split('\n') if not row.startswith(comment_char) and row)
484b1113dff432c56c23a36da507063561ec4a90
33,323
def eq_operator(one: object, another: object) -> bool: """ Compare whether one value equals another. Function equivalent to the equals (`__eq__`) operator. :param one: One value to compare :param another: Another value to compare :returns: `True` if `one == another`. """ return one == another
118ed13401b5d3b9a0446197c1f056e3e60c08c4
33,324
def need_attention(status_msg): """Return True if a repo status is not exactly same as that of remote""" msg = ["not staged", "behind", "ahead", "Untracked"] if any([each in status_msg for each in msg]): return True return False
c0a28b32313ce4c6c0b079c27a7274545bf48a80
33,326
import binascii def formatted_print(msg): """Receives all message parts from socket, printing each frame neatly""" r = "----------------------------------------\n" for part in msg: r += "[%03d]" % len(part) # Trailing comma suppresses newline try: r += "%s" % part.decode('ascii') r += "\t(" r += r"0x%s" % (binascii.hexlify(part).decode('ascii')) r += ")" except UnicodeDecodeError: r += r"0x%s" % (binascii.hexlify(part).decode('ascii')) r += '\n' return r
ae3cbbe1c9ffa139808993a9d92fe3ad588e0d14
33,328
def move_telescope_west(): """Return Lx200 protocol command for "move_telescope_west". """ command = b'#:Mw#' return command
087ada723af757db9655e814dd0ac51bf881297a
33,329
def initMyGraph(ctor): """Constructs and returns a hard-coded sample graph. ctor must be a callable that gets the number of vertices and creates a graph with the given number of vertces and with no edges""" g = ctor(5) g.addEdge(0, 1) g.addEdge(1, 0) g.addEdge(1, 1) g.addEdge(1, 2) g.addEdge(4, 0) g.addEdge(4, 2) return g
4fecca553d2b6d8accadd2f1ce6ccb2a1baed2d7
33,331
import threading def synchronized(lock=None): """Decorator that synchronizes a method or a function with a mutex lock. Example usage: @synchronized() def operation(self, a, b): ... """ if lock is None: lock = threading.Lock() def wrapper(function): def new_function(*args, **kwargs): lock.acquire() try: return function(*args, **kwargs) finally: lock.release() return new_function return wrapper
23241ed1046a91d8448e1258bd947e6846326edd
33,332
import functools def limit_api_logic(): """ Return a dict of logic function names -> wrappers that override existing api calls to set new default limits and hard limits """ return {} context_limit_packages = { 'group_show': (5, 20), 'organization_show': (5, 20), } data_dict_limit = { 'package_search': (int(config.get('ckan.datasets_per_page', 20)), 300), 'package_activity_list': (20, 100), 'recently_changed_packages_activity_list': (20, 100), 'package_activity_list_html': (20, 100), 'dashboard_activity_list': (20, 100), 'dashboard_activity_list_html': (20, 100), } out = {} for name, (default, limit) in context_limit_packages.items(): action = getattr(core_get, name) @functools.wraps(action) def wrapper(context, data_dict, default=default, limit=limit, action=action): #value = int(context.get('limits', {}).get('packages', default)) #context.setdefault('limits', {})['packages'] = min(value, limit) return action(context, data_dict) if hasattr(action, 'side_effect_free'): wrapper.side_effect_free = action.side_effect_free out[name] = wrapper for name, (default, limit) in data_dict_limit.items(): action = getattr(core_get, name) # package_search is special... :-( param = 'rows' if name == 'package_search' else 'limit' @functools.wraps(action) def wrapper(context, data_dict, default=default, limit=limit, action=action, param=param): try: if int(data_dict.get('offset', '0')) > 1000: return [] # no. value = int(data_dict.get(param, default)) except ValueError: return [] data_dict[param] = min(value, limit) return action(context, data_dict) if hasattr(action, 'side_effect_free'): wrapper.side_effect_free = action.side_effect_free out[name] = wrapper return out
42d9a9743312e888fb2ee2cd1ff30a6f5f4f56d8
33,333
def cast(type, object): """Cast a value to a type. This only affects static checking; simply return object at runtime. """ return object
8a695a006f42d7f39db5938058395124390a99c6
33,335
import os import time def choose_dir(): """ User chooses where all work will be saved and time stamp is created for future reference """ # Where all work to follow will be saved folder_to_save = input('Type name of directory to save all data being created\n:') # make and change to directory named by user os.mkdir(folder_to_save) os.chdir(folder_to_save) # recording date and time that program is run, saving it to folder with open("time_created.txt", "w") as text_file: text_file.write("Time this program was run: {} \n".format(time.strftime("%Y-%m-%d %H:%M"))) os.chdir('..') return folder_to_save
bd3e9072ecbb1d2b16ecd3cea307f1e200812fc1
33,336
def doc_wrapper(squad_para, title=""): """ This function wrap paragraphs into a document. :param squad_para: paragraphs in SQuAD format. :param title: the title of paragraphs. :return: wrap of title and paragraphs """ squad_doc = { 'title': title, 'paragraphs': [squad_para] } return squad_doc
bcc513bbaa2ba885d242009eaae72e7c5b04aea3
33,337
def cstr(arg, arg_name, default, custom_str=False): """ Get config str for arg, ignoring if set to default. """ not_default = arg != default if not custom_str: # no custom_str, use name+value custom_str = f'_{arg_name}{arg}' return custom_str if not_default else ''
bfdeae45a3426b19f07e855e1f520de646ba3e98
33,339
def smart_round_format(number, precision): """ Args: number (float): precision (int): Returns: str: Examples: >>> smart_round_format(258.658, 2) '258.66' >>> smart_round_format(0.258658, 2) '0.26' >>> smart_round_format(0.0000258658, 2) '2.59e-05' """ if number >= 0.1: return str(round(number, 2)) else: return ('{:0.' + str(precision) + 'e}').format(number)
9bc2cac03892e868a83e90a0df987692ad2d0a1e
33,340
import pickle def get_earlier_cpds(month): """ Finds all compounds which were inputted into SureChemBL prior to or equal to a given month Args: month (string): Month, in the form YYYY-MM Returns: pandas dataframe: dataframe containing SureChemBL patent id, month of first entry, and igraph index """ #Read in master compound-date-index dataframe agave_fp = "Data/Cpd_Data/master_cpd_date_index_df.p" #drive_fp = "G:/Shared drives/SureChemBL_Patents/Cpd_Data/master_cpd_date_index_df.p" df = pickle.load(file=open(agave_fp, "rb")) # #Small dataframe analysis # check_indicies(df) return df[df["Month"] <= month]
b6f7c976d523f3c308eb647bb31851b99a8b7856
33,341
import os def build_jar_files(dir): """ Build a list of directories via SBT """ print(f"[info] Building Jar file for: {dir}") cmd = f"cd {dir}/ && sbt clean package" exit_status = os.system(cmd) if exit_status: os._exit(exit_status) return exit_status
f1785c823ff8f3a8d609b92f012cd94198d952f6
33,342
def function_example(point_cloud,bool_flag=False): """ Returns the given point cloud, with the potential to raise errors if the input is too large. Parameters ---------- point_cloud : (n,3) numpy array Array containing the x-y-z coords of a 3d point cloud. bool_flag : boolean, optional A boolean flag to toggle error checking. Returns ------- out : (n,3) numpy array Array containing the x-y-z coords of a 3d point cloud. Raises ------ ValueError If the length of our point cloud exceeds 10,000. """ if bool and len(point_cloud) > 10000: raise ValueError('length of point_cloud cannot exceed 10,000 points') out = point_cloud return out
d4449ea8d9ea9db61a712679236ddcb903c41adc
33,343
def make_aa_to_codon_backtable(codontable): """Convert a codontable for use with convert_seq_to_seqspace() Returns a codontable (dict) in the format `{'F': ['TTT', 'TTC'], 'L': ['TTA', ...` Parameters ---------- codontable dict of codons in the format `{'TTT': 'F', 'TTC': 'F', 'TTA': 'L', ...` """ aa_list = list(set(codontable.values())) backtable = {key: [] for key in aa_list} for codon, aa in codontable.items(): backtable[aa] = backtable[aa] + [codon] return backtable
3bd8327e639a11742a1914bf7234769b3f240e6e
33,344
def fitness(bits): """ Gets a numeric value corresponding to the fitness of a given solution :param bits: A solution :return: A fitness value """ return sum(bits)
ecc86a1e91298f6ecd1aebb3796afdfbf9ab628d
33,345
import random def get_witness(problem, assignment, side_obfuscator=0, shift=-1): """ Given an instance of a partition problem via a list of numbers (the problem) and a list of (-1, 1), we say that the assignment satisfies the problem if their dot product is 0. Example problem: [4,11,8,1] Example assignment: [1,-1,1,-1] """ sum = 0 mx = 0 if(side_obfuscator == 0): side_obfuscator = 1 - 2 * random.randint(0, 1) witness = [sum] assert len(problem) == len(assignment) for num, side in zip(problem, assignment): assert side == 1 or side == -1 sum += side * num * side_obfuscator witness += [sum] mx = max(mx, num) # make sure that it is a satisfying assignment assert sum == 0 if(shift == -1): shift = random.randint(0, mx) witness = [x + shift for x in witness] return witness
118be68e1c4a602c0b8916b74be2c21b32c0d883
33,348
import numpy def calculate_fwhm_camera( regions, rej_low: int = 1, rej_high: int = 3, ): """Calcualtes the FWHM from a list of detections with outlier rejection. Parameters ---------- regions The pandas data frame with the list of regions. Usually an output from ``sep``. Must include columns ``valid``, ``a``, and ``b``. rej_low How many of the lowest ranked FWHM measurements to remove for the average. rej_high How many of the highest ranked FWHM measurements to remove for the average. Returns ------- fwhm,a,b,ell,nkeep The FWHM measured as the average of the circle that envelops the minor and major axis after outlier rejection, the averaged semi-major and smi-minor axes, the ellipticity, and the number of data points kept. """ if len(regions) == 0: return -999, -999, -999, -999, 0 fwhm = numpy.max([regions.a * 2, regions.b * 2], axis=0) fwhm_argsort = numpy.argsort(fwhm) if len(fwhm) - (rej_low + rej_high) <= 0: nkeep = len(fwhm) else: fwhm_argsort = fwhm_argsort.tolist()[rej_low : len(fwhm_argsort) - rej_high] nkeep = len(fwhm_argsort) fwhm = numpy.mean(fwhm[fwhm_argsort]) a = numpy.mean(regions.a.iloc[fwhm_argsort]) b = numpy.mean(regions.b.iloc[fwhm_argsort]) ell = 1 - b / a return fwhm, a, b, ell, nkeep
7a38ad772fbb36dba2da6654fcdae78ec2ebae82
33,349
def get_disk_info(metadata): """ Modified from: https://github.com/broadinstitute/dsde-pipelines/blob/develop/scripts/calculate_cost.py Modified to return (hdd_size, ssd_size) """ if "runtimeAttributes" in metadata and "disks" in metadata['runtimeAttributes']: boot_disk_gb = 0.0 if "bootDiskSizeGb" in metadata['runtimeAttributes']: boot_disk_gb = float(metadata['runtimeAttributes']['bootDiskSizeGb']) # Note - am lumping boot disk in with requested disk. Assuming boot disk is same type as requested. # i.e. is it possible that boot disk is HDD when requested is SDD. (name, disk_size, disk_type) = metadata['runtimeAttributes']["disks"].split() if disk_type == "HDD": return float(disk_size) + boot_disk_gb, float(0) elif disk_type == "SSD": return float(0), float(disk_size) + boot_disk_gb else: return float(0), float(0) else: # we can't tell disk size in this case so just return nothing return float(0), float(0)
95895954065e4d5aa609a717b491ccb34c2d3b7b
33,350
def calc_eval_metrics(aa_match_binary_list, orig_total_num_aa, pred_total_num_aa): """ Calculate evaluation metrics using amino acid matches Parameters ---------- aa_match_binary_list : list of lists List of amino acid matches in each predicted peptide orig_total_num_aa : int Number of amino acids in the original peptide sequences pred_total_num_aa : int Number of amino acids in the predicted peptide sequences Returns ------- aa_precision: float Number of correct aa predictions divided by all predicted aa aa_recall: float Number of correct aa predictions divided by all original aa pep_recall: float Number of correct peptide predictions divided by all original peptide """ correct_aa_count = sum([sum(pred_tuple[0]) for pred_tuple in aa_match_binary_list]) aa_recall = correct_aa_count/(orig_total_num_aa+1e-8) aa_precision = correct_aa_count/(pred_total_num_aa+1e-8) pep_recall = sum([pred_tuple[1] for pred_tuple in aa_match_binary_list])/(len(aa_match_binary_list)+1e-8) return aa_precision, aa_recall, pep_recall
c0c843a3bc26587bdd6607dde4ad958e01fe38a2
33,351
import math def find_first_divisor(N): """Finds the first divisor of N Args: N: an integer to be factored Returns: an integer representing the first divisor of N """ for i in range(2, int(math.sqrt(N)) + 1): if N % i == 0: return i return N
8ea0aa52341ce3d11ef27e7eafd6665a93e201ca
33,352
import os import stat def inode(path): """returns inode of a given path""" return os.stat(path)[stat.ST_INO]
1c87e56e9bd5cb940bf22ca10fa037fbbb4ba379
33,354
def token_value(token): """Get the value from a token.""" return token.value
32b0697328b30df5c36e88db426b78e495be3c72
33,355
def to_dict_index(df): """ Pandas dataframe to dictionary (index method) Parameters --------------- df Dataframe Returns -------------- dict dict like {index -> {column -> value}} """ return df.to_dict('index')
b30a84b581c2ea7392958a3b0752a3126d360c61
33,356
def __calc_nr(entered_week: int, beginning_week: int, year: int) -> int: """ Calculates the nr (number). This is more or less the count of how many report booklets are done! (+1 because it's also zero indexed) """ return (year - 1) * 52 + entered_week - beginning_week + 1
f3108c852794173721ea9c1a7264248fdeb98f33
33,357
def proxied_attribute(local_attr, proxied_attr, doc): """Create a property that proxies attribute ``proxied_attr`` through the local attribute ``local_attr``. """ def fget(self): return getattr(getattr(self, local_attr), proxied_attr) def fset(self, value): setattr(getattr(self, local_attr), proxied_attr, value) def fdel(self): delattr(getattr(self, local_attr), proxied_attr) return property(fget, fset, fdel, doc)
0a751a980db6de45bbafd12a24aeb743284750a3
33,358
import logging def get_child_logger(name: str): """Returns a child logger from the root logger. The configuration of the logger is written to 'log.ini'. """ return logging.getLogger(f"{name}")
3b78db85042a3ea318ebc3d92b0951ff4c88634f
33,359
import six import re def listsearch(query, item): """Return match with query on an item from the list of input/output files The lists of input and output files can consist either of file names (strings), or lists containing the filename and the hash of the file (if hashing is enabled). This function provides a transparent interface to these two options. Parameters: query : str The search query item : str or list containing two strings A file name or a list containing a file name and hash Returns: boolean """ fh = '' if not isinstance(item, six.string_types): fh = item[1] item = item[0] return bool(re.search(query, item) or re.search(query, fh))
143debe27f3a206021aa42272da08763a8cae425
33,360
def get_type_qualname(cls): """Get a string uniquely identifying the supplied class""" if isinstance(cls, str): return cls if cls.__module__ == "__main__": return cls.__qualname__ return f"{cls.__module__}.{cls.__qualname__}"
968346708d2f0f8be6c92d84e3d65e4050904f86
33,363
def split_indices(a: int, b: int, val_split=0.15, test_split=0.15): """ Calculate the necessary indices for splitting a dataset into training, validation and test set in a diversified manner. :param a: First index. :param b: Last index. :param val_split: Float describing the fraction used for validation. :param test_split: Float describing the fraction used for testing. :return: A 7-tuple of integers with the following values: a: First index for training data val1: Starting index of first validation split test1: Starting index of first testset split data: Second index for training data val2: Starting index of second validation split test2: Starting index of second testset split b: Last index of the data """ half = int((b - a) / 2) val_len = int(half * val_split) test_len = int(half * test_split) val1 = a + half - val_len - test_len test1 = a + half - test_len data = a + half val2 = b - val_len - test_len test2 = b - test_len return a, val1, test1, data, val2, test2, b
6054e171d67405c2ba2bd653a2f9e08e8781c4f4
33,364
import os def is_valid_dir(aparser, arg): """ Checks whether the directory provided is valid for use with Surround. :param aparser: the parser being used :type aparser: <class 'argparse.ArgumentParser'> :param arg: the argument containing the directory path :type arg: string :return: the path if valid, or nothing :rtype: string """ if not os.path.isdir(arg): aparser.error("Invalid directory %s" % arg) elif not os.access(arg, os.W_OK | os.X_OK): aparser.error("Can't write to %s" % arg) else: return arg
bf8149e8fe62b43a14d32fa245f20d6b99c75584
33,365
from typing import Union from typing import List def strip_seed(seed: Union[List[int], int]) -> int: """ Takes 1-element long list and returns it's element. Used to turn a single element list of seeds to its value. """ if isinstance(seed, list): assert len(seed) == 1, "Expected list of length 1." return seed[0] return seed
c8086c4f86e3600b8a5dedd948bf0507f1fd792d
33,366
def grad_costo(x, y, w, b): """ Calcula el gradiente respecto a w y b de los datos existentes Parámetros ---------- x: ndarray de dimension [M, n] con los datos de entrada y: ndarray de dimension [M,] con los datos de salida w: ndarray de dimension [n, ] con los pesos b: flotante con el sesgo Devuelve -------- dw, db: donde dw es un vector de dimension de w con el gradiente de la función de costo respecto a w, y db es la derivada de la funcion de costo respecto a b """ error = y - (x @ w + b) # --aqui hay que poner código-- dw = -x.T @ error / y.shape[0] db = -error.mean() #------------------------------ return dw, db
1859744020c3b6aaa06e098d29b9d77c8d89ac6d
33,367
def clean_args_references(args): """ Removes the information used as reference for the scripts """ if "all_fields" in args and args["all_fields"] and \ "new_fields" not in args: del args["all_fields"] if "objective_field" in args and \ isinstance(args["objective_field"], dict) and \ "name" in args["objective_field"]: del args["objective_field"]["name"] return args
4ab577e5697ff1798ba5867b56ebe697b30892e4
33,368
def class_balancing(X, onehot_lab, labels): """ Undersample overrepresented class (background) so that road to non-road patches are in 1:1 ratio Input: X: train or test image onehot_lab: onehot coded labels or each image in X Output: X: image data with balanced class ratio. Will have less data than original X """ c0 = 0 c1 = 0 for i in range(len(onehot_lab)): if onehot_lab[i][0] == 1: c0 = c0 + 1 else: c1 = c1 + 1 print ('Number of data points per class: c0 = ' + str(c0) + ' c1 = ' + str(c1)) print ('----------Balancing training data...----------') min_c = min(c0, c1) idx0 = [i for i, j in enumerate(onehot_lab) if j[0] == 1] idx1 = [i for i, j in enumerate(onehot_lab) if j[1] == 1] new_indices = idx0[0:min_c] + idx1[0:min_c] print('Original training data size: {s:}'.format(s=X.shape)) X = X[new_indices,:,:,:] onehot_lab = onehot_lab[new_indices] labels = labels[new_indices] size = onehot_lab.shape[0] c0 = 0 c1 = 0 for i in range(len(onehot_lab)): if onehot_lab[i][0] == 1: c0 = c0 + 1 else: c1 = c1 + 1 print('Number of data points per class: c0 = ' + str(c0) + ' c1 = ' + str(c1)) print('New training data size: {s:}'.format(s=X.shape)) return X, onehot_lab, labels
c5e2ad1fe59de20c9a06753e934eac86d9fc334c
33,371
import re def clean_taxa(taxa): """ Clean up SILVA taxonomic names Input: - taxa: list of SILVA taxa names """ new_taxa = [t.replace(";__", "") for t in taxa] new_taxa = [re.sub(r";\s*D_[1-9]__metagenome", "", t) for t in new_taxa] new_taxa = [t.replace("Ambiguous_taxa", "") for t in new_taxa] new_taxa = [re.sub(r';\s*D_[1-9]__uncultured.*', '', t) for t in new_taxa] new_taxa = [re.sub(r'\s*D_[0-9]__', '', t) for t in new_taxa] new_taxa = [re.sub(r'.*;', '', t) for t in new_taxa] return new_taxa
522929f752dc93a0de6c327d77042c1e9cb3ba04
33,373
def roa(balance_df, financials_df): """Checks if the ROA (Return on Assets) is positive. Explanation of ROA: https://www.investopedia.com/terms/r/returnonassets.asp balance_df = Balance Sheet of the specified company financials_df = Financial Statement of the specified company """ total_assets = balance_df.iloc[balance_df.index.get_loc("Total Assets"),0] net_income = financials_df.iloc[financials_df.index.get_loc("Net Income"),0] roa = net_income/total_assets if (roa > 0): return True else: return False
7c639b140584b7147bcdd914b80e5723c9590b19
33,375
def match_target_amplitude(aChunk, target_dBFS): """ Normalize given audio chunk """ change_in_dBFS = target_dBFS - aChunk.dBFS return aChunk.apply_gain(change_in_dBFS)
3ce98b47ec1f147aff3fb0dbb4acf78d4c42a1a2
33,377
def format_element(candidate, markup, anchor): """Format the anchor the why you want overwrite me! Beware of markup rest! Markup rest is called when specified, on the rest of the elements. """ _from, _to, token, element = candidate if 'token' not in element: element['token'] = token if anchor: anchor_pattern = markup.get('anchor_pattern') the_anchor = anchor_pattern.format(**element) else: the_anchor = token element.update({markup.get('decorate_anchor_key'): the_anchor}) decorate_markup = markup.get('decorate') if decorate_markup: decorate_pattern = decorate_markup.get('decorate_pattern') decorated = decorate_pattern.format(**element) else: decorated = the_anchor anchorman = decorated return anchorman
f503a796d2c71270c063e3fb110d4a13a84af229
33,378
def boxMean(box): """simple mean of box""" mean=box.box_data.mean() return mean
7cd5459840f830ebd442b69be16423be42023b2e
33,382
import os def get_files(folder,topdown): """ Return a dictionary of files in the folder where keys are the parent folders of the files and the values are a list of files in that folder. If topdown is true then return all files in that folder, else return only the immediate files , that is those files that are not under subfolders, in the dictionary """ if not topdown: return {folder:list(filter(lambda x:os.path.isfile(os.path.join(folder,x)),[files for files in os.listdir(folder)]))} #Use os.walk for recursively accessing all files #Use dict constructor method inplace of dictionary comprehension for Python 2.6/3.0 compatibility return dict([(root,files) for root, dirs,files in os.walk(folder)])
85fd046ce3c03ab6b96530a1e5453c45517226aa
33,383
import math def calc_spec_smh(T, specs): """Calculate standard-state entropies minus enthalpies for all species. Parameters ---------- T : float Temperature of gas mixture. specs : list of SpecInfo List of species. Returns ------- spec_smh : list of float List of species' standard-state entropies minus enthalpies. """ spec_smh = [] Tlog = math.log(T) T2 = T * T T3 = T2 * T T4 = T3 * T Thalf = T / 2.0 T2 = T2 / 6.0 T3 = T3 / 12.0 T4 = T4 / 20.0 for sp in specs: if T <= sp.Trange[1]: smh = (sp.lo[0] * (Tlog - 1.0) + sp.lo[1] * Thalf + sp.lo[2] * T2 + sp.lo[3] * T3 + sp.lo[4] * T4 - (sp.lo[5] / T) + sp.lo[6] ) else: smh = (sp.hi[0] * (Tlog - 1.0) + sp.hi[1] * Thalf + sp.hi[2] * T2 + sp.hi[3] * T3 + sp.hi[4] * T4 - (sp.hi[5] / T) + sp.hi[6] ) spec_smh.append(smh) return (spec_smh)
c773a0807fa5b4b199e3db775f6b455d15b643be
33,384
from typing import Dict def construct_query_parts(parts: Dict[str, str]) -> str: """Return a query string constrcuted from key value pairs""" if isinstance(parts, dict): return '&'.join(x + '=' + y for x, y in parts.items()) else: return parts
4693f09958f4a3bafb86a501cf4fd96278a354b2
33,385
def dummy_unit_for_node(_spinedb_importer, _node_name: str, _unit_name: str, _direction: str, _alternative='Base', **kwargs): """ :param _spinedb_importer: an instance of class gdx2spinedb.import_ts.SpineDBImporter :param _node_name: the node that needs spill or fueling units, must be created in advance in the _spinedb_importer :param _unit_name: the dummy unit :param _direction: SpineOpt direction, "to_node" (for fueling) or "from_node" (for spill) :param _alternative: :param kwargs: parameter name and the value for relationship of the node and the dummy unit, activated parameters: unit_capacity, fuel_cost, vom_cost :return: """ if all([_direction != "from_node", _direction != "to_node"]): print("Wrong direction term, only 'from_node', 'to_node' and 'auto' are accepted.") raise _spinedb_importer.objects.append(("unit", _unit_name)) _spinedb_importer.relationships.append((f"unit__{_direction}", (_unit_name, _node_name))) _unit_capacity = kwargs.get("unit_capacity", None) _fuel_cost = kwargs.get("fuel_cost", None) _vom_cost = kwargs.get("vom_cost", None) _spinedb_importer.relationship_parameter_values += [ (f"unit__{_direction}", (_unit_name, _node_name), key, value, _alternative) for key, value in kwargs.items() if all([key, value]) ] return _spinedb_importer
28ae5b0c0a1d70c6c04bb022d6ad16c84a8ef14d
33,387
import requests def get_resolution(pdb_id): """ Get the resolution for a PDB id, or None case it doesn't have. """ ret = requests.post( "https://data.rcsb.org/graphql", json={ "query": f""" {{ entry(entry_id: "{pdb_id}") {{ pdbx_vrpt_summary {{ PDB_resolution }} }} }} """ }, ) data = ret.json() resol = data["data"]["entry"]["pdbx_vrpt_summary"]["PDB_resolution"] return resol
76249898e0b159235e8c9e5a3e6ede2c23c0e565
33,388
def calculate_deployment_wait_time(test_obj, failure_count=0): """ Calculates wait time based potentially a number of factors. If we need an exponential backoff this is the place. possbilities: deploy_results.avg_response_time, deploy_results.last_response_time failure_count outstanding_deployments current_scale max response time is 10s, as we approach that bad things happen return time in seconds to wait """ deploy_results = test_obj.deploy_results wait_time = 1 if deploy_results.last_response_time < 1: wait_time = 1 elif deploy_results.last_response_time > 8: wait_time = 5 if failure_count > 3 and failure_count < 7: wait_time = wait_time + 5 elif failure_count > 7: wait_time = wait_time + 10 return wait_time
d6f195c3963686e5ad955c1770587de374893b09
33,389
import importlib def timbral_measures(fname, measure): """ Given a audio file, calculate various timbral characteristics. We typically only ask for roughness to evaluate if the decoder performed well. Other measures have various parameters that are not asked for in this function such as fft window size. Args: fname (str) : full path directory of the audio file measure (str) : Must be one of the following exactly Timbral_Hardness Timbral_Depth Timbral_Brightness Timbral_Roughness Timbral_Warmth Timbral_Sharpness Timbral_Booming Return: output (float) """ model = importlib.import_module('.' + measure, package='timbral_models') function = getattr(model, measure.lower()) output = function(fname) return output
d5554906b6d11a78b8dfb23d217e812f61c2a334
33,390
def graycode_unrank(k): """ Return the *graycode* in position `k` respect binary reflected construction. Of course, argument `k` should be a *non-negative* integer. Examples ======== >>> bin(graycode_unrank(0b101100110)) '0b111010101' """ g = k ^ (k >> 1) return g
8e7b9005398d5c01f23f337310efb74e4f984c84
33,391
import gzip import json def load_gzip(file): """ Read a gzip file. :param file: file to read :return: the dictionary contained in the gzip file """ with gzip.open(file, "rb") as f: data = f.read() d = json.loads(data) return d
10b2639b30fd90a06c9182ad15a0beb5c09e8efd
33,393
import csv def _csv_sniff(f): """ Sniff using csv module whether or not a csv file (csv or gz) has a header. Arguments: f (filehandle) : filehandle of the file to be read """ sniff_size = 2**20 - 1 dialect = csv.Sniffer().sniff(f.read(sniff_size)) f.seek(0) has_header = csv.Sniffer().has_header(f.read(sniff_size)) f.seek(0) return dialect, has_header
f03f684d00fff20ac0f8c17fbbd4811c32aae9aa
33,394
def _arch_file_filter(arch_members, fname=None): """Filter func to return one archive member with name 'fname' of an archive """ return [f for f in arch_members if f.name == fname]
6a19c3b8f7d2faa410a21922b5a5f6f3e0fd380c
33,397
def prepend_dollar_sign_to_ticker(list, str): """Add a dollar sign character to the beginning of each ticker in the list""" str += '{0}' list = [str.format(i) for i in list] return(list)
d192e5450dc7d7793a7daf06a8578433fefda35e
33,398
def banner() -> None: """ Return an ascii art """ with open("src/interface/art.txt", "r") as file: return print(f"[red]{file.read()}[/]")
5e3e0dab46e7e6e33fad90a516490151c80f3719
33,399
def linear_interpolate(a, b, v1, v2, i): """Linear interpolation""" if v1 == v2: return a else: return a + (b - a) * (i - v1) / (v2 - v1)
dd686797f5311ff08ef5c0f7bb3642344ce8c705
33,401
def mk_crs(data_id, data): # measurement group 7 """ transforms a m-crs.json form into the triples used by insertMeasurementGroup to store each measurement that is in the form :param data_id: unique id from the json form :param data: data array from the json form :return: The list of (typeid,valType,value) triples that are used by insertMeasurementGroup to add the measurements """ return [(220, 2, data_id), (42, 7, data['cq1']), (43, 7, data['cq2']), (44, 7, data['cq3']), (45, 7, data['cq4']), (46, 7, data['cq5']), (47, 7, data['cq6'])]
e79d4b5d8f322db63e248c2999ffaecd5ff94e00
33,402
def cat_from(strlist, v, suf): """ Concatenate sublist of strings :param strlist: list of strings :param v: sublist starting position :param suf: glue string :return: concatenated string """ # return ''.join(map(lambda s: s + suf, strlist[v:])) return suf.join(strlist[v:])
1c4e2c0a4e7e8861c477bfd75e23c36a8ec8c370
33,405
def process_stochastic_results(data, results, constellation, scenario, parameters): """ Process results. """ output = [] overbooking_factor = parameters[constellation.lower()]['overbooking_factor'] for i in range(0, 6): if i == 0: i = 0.1 for idx, result in results.iterrows(): if constellation.lower() == result['constellation']: if constellation == 'Starlink': if not result['number_of_satellites'] == 5040: continue if constellation == 'OneWeb': if not result['number_of_satellites'] == 720: continue if constellation == 'Kuiper': if not result['number_of_satellites'] == 3240: continue users_per_km2 = i active_users_km2 = users_per_km2 / overbooking_factor if active_users_km2 > 0: per_user_capacity = result['capacity_kmsq'] / active_users_km2 else: per_user_capacity = 0 output.append({ 'scenario': scenario[0], 'constellation': constellation, 'iteration': result['iteration'], 'number_of_satellites': result['number_of_satellites'], 'satellite_coverage_area': result['satellite_coverage_area'], 'pop_density_km2': i, 'users_per_km2': users_per_km2, 'active_users_km2': active_users_km2, 'per_user_capacity': per_user_capacity, }) return output
003759bd494a4cc85b92de99fe6258f9add0abee
33,408
def filter_event(event, happening_before): """Check if the following keys are present. These keys only show up when using the API. If fetching from the iCal, JSON, or RSS feeds it will just compare the dates """ status = True visibility = True actions = True if 'status' in event: status = event['status'] == 'upcoming' if 'visibility' in event: visibility = event['visibility'] == 'public' if 'self' in event: actions = 'announce' not in event['self']['actions'] return (status and visibility and actions and event['time'] < happening_before)
f22a3fb6007cbe3f9ed5dfb57d62462c632a9a78
33,409
import json import argparse def _opt_json_file(filename): """Parses a filename as JSON input if defined :param filename: The path to the file to parse :type filename: basestring :return: A decoded JSON object if a filename was specified :rtype: dict|list|NoneType """ if filename: return json.load(argparse.FileType('r')(filename))
9affa72e723822ae530bc705196df7432b6fe2fb
33,410
def p2roundup(val: int, align: int) -> int: """ Round up `val` to the next `align` boundary. """ return ((val - 1) | (align - 1)) + 1
639793ab502297ecdfbf243084ccba31fdcc2a31
33,411
import inspect def list_functions(module): """ List top-level function name & function :param module: the module :return: dict of all functions """ return dict(inspect.getmembers(module, inspect.isfunction))
5c0b97101de64c4c48db92209547c9e1c2675a29
33,412
def fix_taiko_big_drum(ticks, hitsounds): """ Remove finishes when there is another note next tick """ for i,tick in enumerate(ticks): if tick+1 in ticks: if hitsounds[i] & 4 == 4: # has finish hitsound == big drum hitsounds[i] -= 4 return hitsounds
3b68a10752d37ba433e9d063f6a024d63a303f63
33,414
def GWpower(clm,harmvals): """ Construct the GW power flowing into each pixel """ Pdist=0. for ll in range(len(harmvals)): for mm in range(len(harmvals[ll])): Pdist += clm[ ll**2 + mm ] * harmvals[ll][mm] return Pdist
49a72b15806b78c7b294d89eb471da5ae2fb298a
33,417
def alpha_synapse_params(lpu): """ Generate AlphaSynapse params. """ k = 1000 s = 1e-3#0.001 s1 = 0.001 if lpu == 'BU' or lpu == 'bu': return {'conductance': True, 'ad': 0.16, 'ar': 0.11, 'gmax': 1e-3*1, 'reverse': -65.} elif lpu == 'EB': return {'conductance': True, 'ad': 0.16, 'ar': 0.11, 'gmax': 3e-3, 'reverse': 65.} elif lpu == 'FB': return {'conductance': True, 'ad': 0.16, 'ar': 0.11, 'gmax': 1e-2, 'reverse': 65.} elif lpu == 'PB': return {'conductance': True, 'ad': 0.19, 'ar': 0.11, 'gmax': 2e-3*10, 'reverse': 65.} elif lpu in ['LAL', 'lal']: return {'conductance': True, 'ad': 0.19, 'ar': 0.11, 'gmax': 2e-3, 'reverse': 65.} elif lpu in ['NO', 'no']: return {'conductance': True, 'ad': 0.19, 'ar': 0.11, 'gmax': 2e-3, 'reverse': 65.} elif lpu in ['CRE', 'cre']: return {'conductance': True, 'ad': 0.19, 'ar': 0.11, 'gmax': 2e-3, 'reverse': 65.} else: raise ValueError('unrecognized LPU name')
7a3e436042e3b2e6c7311ca4414aaf4f68db191c
33,418
from typing import Tuple def _unpack_query_timestamp(packed_timestamp: bytes) -> Tuple[bytes, bool]: """Split the result of :func:`katsdptelstate.utils.pack_query_timestamp`. Returns ------- value Boundary value include Whether the boundary value is inclusive (false for ``-`` and ``+``). """ if packed_timestamp in {b'-', b'+'}: return packed_timestamp, False elif packed_timestamp[:1] == b'[': return packed_timestamp[1:], True elif packed_timestamp[:1] == b'(': return packed_timestamp[1:], False else: raise ValueError('packed_timestamp must be -, +, or start with [ or (')
7577b1bdff625e12493b16d216706455b0eed793
33,420
import re def has_drm_match(ocr_result, drm): """ Checks if a drm matches the ocr_result format. Args: ocr_result (str): OCR result string; drm (dict): DRM dict object for parsing the OCR string. Returns: (bool): Returns True if the DRM identifier matches with OCR result string. """ id_regexps = drm["identifiers"] for id_regexp in id_regexps: regexp = re.compile(id_regexp, re.IGNORECASE) if not re.search(regexp, ocr_result): return False return True
2f0f067bee08ce9a309ccacd76c07fb81681675f
33,422
import re def regex_match(regex, string): """ Returns: `True` if the string match the regex. """ if re.search(regex, string): return True else: return False
24f0940b684cc7605b5a7dcede836f12cc82c8ee
33,424
def forward_segment(text, dic): """ 正向最长匹配的中文分词算法 :param text:待分词的文本 :param dic:词典 :return:单词列表 """ word_list = [] i = 0 while i < len(text): longest_word = text[i] for j in range(i + 1, len(text) + 1): word = text[i:j] if (word in dic) and (len(word) > len(longest_word)): longest_word = word word_list.append(longest_word) # 正向搜索,越先找到的单词排在越前面 i += len(longest_word) return word_list
24c1158551563e82ea856de1322b65512d1aceaa
33,427
import json import subprocess def get_python_version(executable, as_string=False): """Get the version of the Python interperter.""" args = [ executable, "-c", "import sys,json;print(json.dumps(tuple(sys.version_info[:3])))", ] result = tuple(json.loads(subprocess.check_output(args))) if not as_string: return result return ".".join(map(str, result))
4367dbb4691ad80e016a3e2773b62d07124610b4
33,429
def wulffmaker_gamma(energy): """ Returns the string to be used for the Wulffmaker default gamma values. Arguments --------- energy: iterable Any iterable that holds the surface energies Returns ------- str String to be copied to wulffmaker for the surface energies. """ gamma_string = "pickGamma[i_] :=\n" gamma_string += "Which[\n" idx = 1 for idx,value in enumerate(energy): idx += 1 gamma_string += "i=={},\n".format(idx) gamma_string += "{:.4f},\n".format(value) gamma_string += "True,\n" gamma_string += "1]" return gamma_string
756bd972cab96ef143303c3084e293d0c4e82c28
33,430
def mangle_dupe_cols(columns): """remove/mangle any duplicate columns (we are naming line a, a.1, a.2 etc if duplicates) """ counts = {} for i, col in enumerate(columns): cur_count = counts.get(col, 0) if cur_count > 0: columns[i] = '%s.%d' % (col, cur_count) print ('mangle_dupe_col: Duplicate column name: ' + str(col)) counts[col] = cur_count + 1 return columns
4f1a2fa033ac902f69641af489ddc4007f4e3699
33,434
import re def parse_handler(handler): """ Parse handler to retrieve module name and function name. """ match = re.match('^([\w|-]+(\.[\w|-]+)*)$', handler) if not match: raise ValueError('malformed handler - {!r}'.format(handler)) mod_name = match.group(1) return mod_name
73e003ccca4dbc5050d027c5eed4d17b36d29d2c
33,435
def switch_testing(fuel_switches, service_switches, capacity_switches): """Test if swithes defined for same enduse Arguments --------- fuel_switches : list Switches service_switches : list Switches capacity_switches : list Switches """ all_switches_incl_sectors = {} enduses_service_switch = set([]) for switch in service_switches: enduses_service_switch.add(switch.enduse) # Collect all enduses and affected sectors if switch.enduse not in all_switches_incl_sectors: all_switches_incl_sectors[switch.enduse] = set([]) if not switch.sector: all_switches_incl_sectors[switch.enduse] = None else: all_switches_incl_sectors[switch.enduse].add(switch.sector) else: if not switch.sector: pass else: all_switches_incl_sectors[switch.enduse].add(switch.sector) enduses_capacity_switch = set([]) for switch in capacity_switches: enduses_capacity_switch.add(switch.enduse) # Collect all enduses and affected sectors if switch.enduse not in all_switches_incl_sectors: all_switches_incl_sectors[switch.enduse] = set([]) if not switch.sector: all_switches_incl_sectors[switch.enduse] = None else: all_switches_incl_sectors[switch.enduse].add(switch.sector) else: if not switch.sector: pass else: all_switches_incl_sectors[switch.enduse].add(switch.sector) enduses_service_switch = list(enduses_service_switch) enduses_capacity_switch = list(enduses_capacity_switch) for enduse in all_switches_incl_sectors: if all_switches_incl_sectors[enduse] != None: all_switches_incl_sectors[enduse] = list(all_switches_incl_sectors[enduse]) return all_switches_incl_sectors
7966ffdeb76650c3d8c5e136db9c82a65470b259
33,438
def has_same_parameters(est_params, boundaries_id, labels_id, params): """Checks whether the parameters in params are the same as the estimated parameters in est_params.""" K = 0 for param_key in params.keys(): if param_key in est_params.keys() and \ est_params[param_key] == params[param_key] and \ est_params["boundaries_id"] == boundaries_id and \ (labels_id is None or est_params["labels_id"] == labels_id): K += 1 return K == len(params.keys())
b896d3f772e43f2cfd2eb91ab30636c8fa51c435
33,439
def check_repetitive(combination, df_main): """Check to avoid repetitive combinations Parameters ---------- combination: combinations that want to be checked df_main: pandas.DataFrame source dataframe Returns ------- boolean: True: it exists in df_main False: it's not """ comparison_df = df_main.merge(combination, indicator=True, how='outer') if 'both' in comparison_df._merge.unique(): return False else: return True
8d6a40e2e233c5b04c27fee0db851bde52694cfd
33,442
import torch def embedding_fn(x, n_freq=5, keep_ori=True): """ create sin embedding for 3d coordinates input: x: Px3 n_freq: number of raised frequency """ embedded = [] if keep_ori: embedded.append(x) emb_fns = [torch.sin, torch.cos] freqs = 2. ** torch.linspace(0., n_freq - 1, steps=n_freq) for freq in freqs: for emb_fn in emb_fns: embedded.append(emb_fn(freq * x)) embedded_ = torch.cat(embedded, dim=1) return embedded_
18485fcfe0bf90756a362963c57ddba0d4a932a6
33,443
def d8flowdir(np, input, output1, output2): """ command: d8flowdir -fel demfel.tif -p demp.tif -sd8 demsd8.tif, demfile: Pit filled elevation input data, pointfile: D8 flow directions output, slopefile: D8 slopes output """ d8flowdir = "mpirun -np {} d8flowdir -fel {} -p {} -sd8 {}".format( np, input, output1, output2) return d8flowdir
bf437ab23cdb266100ce4405a9296e955d8c1e31
33,444
def _ExcludeScore(config_name, capture_name, render_name, echo_simulator_name, test_data_gen_name, score_name, args): """Decides whether excluding a score. A set of optional regular expressions in args is used to determine if the score should be excluded (depending on its |*_name| descriptors). Args: config_name: APM configuration name. capture_name: capture audio track name. render_name: render audio track name. echo_simulator_name: echo simulator name. test_data_gen_name: test data generator name. score_name: evaluation score name. args: parsed arguments. Returns: A boolean. """ value_regexpr_pairs = [ (config_name, args.config_names), (capture_name, args.capture_names), (render_name, args.render_names), (echo_simulator_name, args.echo_simulator_names), (test_data_gen_name, args.test_data_generators), (score_name, args.eval_scores), ] # Score accepted if each value matches the corresponding regular expression. for value, regexpr in value_regexpr_pairs: if regexpr is None: continue if not regexpr.match(value): return True return False
99f9ea3e8cbefa4cc05d7a1dc4742381eca82aff
33,445