content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def __is_label(string: str) -> bool: """ Return if a given string is a label declaration. :param string: :return: """ return string.endswith(":") and '"' not in string and "'" not in string
2e401f9e81122dd0888297127741cb377f15d6f9
21,009
def get_resources_to_delete(resources): """returns a list of all resource ids to delete Args: resources (list(dict)): resources to compare to the paths Returns: list(str): list of ids of resources to remove """ resources_to_delete = [(resource['id'], resource["path"]) for resource in resources if resource["path"].count("/") == 1 and resource["path"] != "/"] return resources_to_delete
ece8b021821aed148c4bb41beed3da90e22f71f6
21,010
def clean_row(row): """Clean the data regarding a perona in `row`.""" # remove number row = row[1:] # persona ingredients personas = row[1:] for i, persona in enumerate(personas, 1): persona_info = persona.split() # keep only the name if persona_info[-1] == '⚠': row[i] = " ".join(persona_info[:-4]) else: row[i] = " ".join(persona_info[:-3]) return row
a704b902b186e2e5cbf4db963669aa62eb9ab8ba
21,011
def bam2bw_and_track(curr_sample, curr_color, out_dir, template_dir, track_fn, bigdata_path, len_fn): """ Convert bam to bw file """ cmd="" cmd=cmd+"genomeCoverageBed -split -bg -ibam "+curr_sample+"_accepted_hits.sorted.bam"+" -g "+len_fn+" > "+curr_sample+".bdg\n" cmd=cmd+"LC_COLLATE=C sort -k1,1 -k2,2n "+curr_sample+".bdg > "+curr_sample+".sorted.bdg\n" cmd=cmd+"bedGraphToBigWig "+curr_sample+".sorted.bdg "+len_fn+" "+curr_sample+".bw\n" """ Create UCSC track file """ outp=open(track_fn, 'a') outp.write("track type=bigWig name="+"\""+curr_sample+"\" color="+curr_color+" gridDefault=on maxHeightPixels=50 visibility=full autoScale=off viewLimits=5:100 description=\""+curr_sample+"\" bigDataUrl="+bigdata_path+curr_sample+".bw\n") outp.close() return cmd
fbe23b1320f770dce1f3e693876db5341e12929d
21,012
def reprint_form(): """ (Unimplemented) Reprint a previous list. """ return 'This feature is currently not implemented.'
31ad18df5533e833bbd9f3d4a22f89c4c57b5c2f
21,014
def _is_ref(schema): """ Given a JSON Schema compatible dict, returns True when the schema implements `$ref` NOTE: `$ref` OVERRIDES all other keys present in a schema :param schema: :return: Boolean """ return '$ref' in schema
1f805929a6b28cf4fbe16714d300f3beffeb6e73
21,015
def add_one(number): """ Example of a simple function. Parameters ---------- number: int, float, str Returns ------- out: int, float, str The input value plus one. Raises TypeError if the input is not the expected type """ if isinstance(number, (float, int)): return number + 1 elif isinstance(number, (str)): return number + '1' else: raise TypeError('Expecting an int, float or string.')
d24a5d9e1a02098d1a6638bdc8b5493bc4d732e2
21,016
import json def load_settings(settings_fname='settings.json'): """ Загрузить настройки из файла Parameters ---------- settings_fname : str Имя файла с настройками, по умолчанию имя файла: settings.json Returns ------- username : str Имя пользователя passw : str пароль пользователя root_url : str URL к API client_id : str Идентификатор клиента для доступа к API client_secret : str Секрет для доступа к API auth_server : str URL к серверу авторизации """ if settings_fname is None: settings_fname = 'settings.json' with open(settings_fname) as datafile: jd = json.load(datafile) return jd['username'], \ jd['passw'], \ jd['root_url'], \ jd['client_id'], \ jd['client_secret'], \ jd['auth_server']
63067d6d351bbe799f5e9a839c9e7fba92fe54ef
21,017
def eval_cluster(labels, cluster_id, is_clean): """ Evaluate true positives in provided cluster. :param labels: (np.ndarray) array of labels :param cluster_id: (int) identifier of the cluster to evaluate :param is_clean: (array) bitmap where 1 means the point is not attacked :return: (int) number of identified backdoored points """ cluster = labels == cluster_id identified = 0 for i in range(len(is_clean)): if cluster[i] and is_clean[i] == 0: identified += 1 return identified
1499d705bf6a250e214b5f6cac9f1daa3a22d30f
21,019
def iter_dicts_table(iter_dicts, classes=None, check=False): """Convert an iterable sequence of dictionaries (all of which with identical keys) to an HTML table. Args: iter_dicts (iter): an iterable sequence (list, tuple) of dictionaries, dictionaries having identical keys. classes (str): Is the substitute for <table class="%s">. check (bool): check for key consistency! Returns: str|None: HTML tabular representation of a Python iterable sequence of dictionaries which share identical keys. Returns None if table is not consistent (dictionary keys). """ # check key consistency if check: first_keys = iter_dicts[0].keys() for d in iter_dicts: if d.keys() != first_keys: return None table_parts = {} table_parts['classes'] = ' class="%s"' % classes if classes else '' table_parts['thead'] = ''.join(['<th>%s</th>' % k for k in iter_dicts[0]]) # tbody keys = ['<td>%(' + key + ')s</td>' for key in iter_dicts[0]] row = '<tr>' + ''.join(keys) + '</tr>' table_parts['tbody'] = '\n'.join([row % d for d in iter_dicts]) return ''' <table{classes}> <thead> <tr>{thead}</tr> </thead> <tbody> {tbody} </tbody> </table> '''.format(**table_parts)
e1be3d05180eef1a9a039011007413d4fec65301
21,020
def AxisUnitsConvertRev(UnitCode): """ convert axis untis""" switcher = { 'nm' : 'nm', '$\mu$m' : 'microns' } return switcher.get(UnitCode, 'nm')
4d8e1e2d803bd32cc0bd4d84377623d98e203bd5
21,021
def filter_post_edits(dict_elem): """ Filter post history events that modified the body of the posts. :param dict_elem: dict with parsed XML attributes :return: boolean indicating whether element modified the body of the corresponding post """ return int(dict_elem['PostHistoryTypeId']) in [2, 5, 8]
b8f567d6dceb0dd1deb6cffac631bfc44f0fb282
21,023
import socket def get_celery_startup_attachment(**kwargs): """Create the slack message attachment for celery startup.""" if kwargs["show_celery_hostname"]: message = "*Celery is starting up on {}.*".format( socket.gethostname() ) else: message = "*Celery is starting up.*" attachment = { "attachments": [ { "fallback": message, "color": kwargs["slack_celery_startup_color"], "text": message, "mrkdwn_in": ["text"] } ], "text": "" } return attachment
1c18bec214a19b47495c922ce1ac13a6b22a2a3e
21,025
def split_str(string: str, maxsplit: int = 1) -> list: """ Splits characters of a String into List of Strings :param string: String to split :param maxsplit: It is the number of skips in character before splitting, DEFAULT = 1 :return: Returns the Array containing elements of characters splitted from the String Example: >>> split_str("HELLO WORLD", 2) >>> ['He', 'll', 'o ', 'Wo', 'rl'] """ txt = "" str_list = [] for i in string: txt += i if len(txt) == maxsplit: str_list.append(txt) txt = '' return str_list
0d0e2a43c03009c85b2e2fcfa0df8ac4bd241787
21,026
def add_item_to_inventory(game, *args): """ Add a newly created Item and add it to your inventory.""" (item, action_description, already_done_description) = args[0] if(not game.is_in_inventory(item)): print(action_description) game.add_to_inventory(item) else: print(already_done_description) return False
837cc7be252fce90eb082eb80b9093d92a1fd4a8
21,027
import collections def items_to_dict(l, attrs='name', ordered=False): """Given a list of attr instances, return a dict using specified attrs as keys Parameters ---------- attrs : str or list of str Which attributes of the items to use to group ordered : bool, optional Either to return an ordered dictionary following the original order of items in the list Raises ------ ValueError If there is a conflict - multiple items with the same attrs used for key Returns ------- dict or collections.OrderedDict """ many = isinstance(attrs, (list, tuple)) out = (collections.OrderedDict if ordered else dict)() for i in l: k = tuple(getattr(i, a) for a in attrs) if many else getattr(i, attrs) if k in out: raise ValueError( "We already saw entry for %s: %s. Not adding %s", k, out[k], i ) out[k] = i return out
0948ab89944319887661804c78c995adcf306695
21,028
import functools def wrap_hook(func): """ Decorator for wrapping hooks around payload functions. decorate function with @wrap_hook so that two hooks point will be automatically wrapped around the execution. For example: @wrap_hook def foo(self): pass will register both 'before_foo' and 'after_foo', which will be invoked before and after the foo function being executed. """ @functools.wraps(func) def func_with_hook(self, *args, **kwargs): self.execute_hook('before_{}'.format(func.__name__)) result = func(self, *args, **kwargs) self.execute_hook('after_{}'.format(func.__name__)) return result return func_with_hook
1e58c347c6d52125d6f52c2fe7753f36314e7698
21,030
def add_email(email, emails): """Add an email address to a list of email addresses. """ if not email: return emails if isinstance(email, str): email = [email] if emails is None: emails = [] elif isinstance(emails, str): emails = [emails] email = email.copy() email.extend(emails) return email
5831200f903e99fddbd57bae0320d3747d363e11
21,031
import json def parse_raw_config(raw_config_object): """parse the raw config to something good""" crossref_config = {} boolean_values = [] int_values = [] list_values = [] boolean_values.append("jats_abstract") boolean_values.append("face_markup") boolean_values.append("crossmark") boolean_values.append("elocation_id") boolean_values.append("elife_style_component_doi") int_values.append("year_of_first_volume") list_values.append("contrib_types") list_values.append("archive_locations") list_values.append("access_indicators_applies_to") list_values.append("pub_date_types") list_values.append("component_exclude_types") list_values.append("crossmark_domains") list_values.append("assertion_display_channel_types") for value_name in raw_config_object: if value_name in boolean_values: crossref_config[value_name] = raw_config_object.getboolean(value_name) elif value_name in int_values: crossref_config[value_name] = raw_config_object.getint(value_name) elif value_name in list_values: crossref_config[value_name] = json.loads(raw_config_object.get(value_name)) else: # default crossref_config[value_name] = raw_config_object.get(value_name) return crossref_config
7b410289e1202f2d7b38a40b8e7da7f972fef353
21,032
def median(seq): """Returns the median of a sequence. Note that if the list is even-length, then just returns the item to the left of the median, not the average of the median elements, as is strictly correct. """ seq = sorted(seq) return seq[len(seq)//2]
c463fa83908606472ead5ef7d56d8b72c7c34edd
21,033
import os def getTargetLanguages(): """ Provides a list of the target languages """ # default language is Python return os.environ.get( "NCBI_SWIG_LANG", "python" ).strip().lower().split()
024ad72a543d1cb0d17d0acf031d0d5216ab6b9b
21,034
import sys import os def get_etag_from_file(f): """Get etag from a filepath or file-like object. This function will flush/sync the file as much as necessary to obtain a correct value. """ if hasattr(f, "read"): f.flush() # Only this is necessary on Linux if sys.platform == "win32": os.fsync(f.fileno()) # Apparently necessary on Windows stat = os.fstat(f.fileno()) else: stat = os.stat(f) mtime = getattr(stat, "st_mtime_ns", None) if mtime is None: mtime = stat.st_mtime return f"{mtime:.9f};{stat.st_ino}"
cc282a4d19ca2cd76a15dc8ebaf8dbc5b87a6623
21,035
def get_dict_value(value, arg, default=''): """テンプレート内で辞書型に変数でアクセスするための関数""" if arg in value: return value[arg] else: return default
64c731b1367ae341e4cacaae208fa6f05917a111
21,037
def AddFieldToUpdateMask(field, patch_request): """Adds name of field to update mask.""" update_mask = patch_request.updateMask if not update_mask: patch_request.updateMask = field elif field not in update_mask: patch_request.updateMask = update_mask + "," + field return patch_request
f8533b38e3ecf7658aa06850869727ab3f34e7de
21,038
import sys import argparse def _parse_arguments(): """ an internal function to parse input arguments """ # imports # parse parser = argparse.ArgumentParser(description=''' This program takes existing Freesurfer analysis results of one or more subjects and computes a set of quality metrics. These will be reported in a summary csv table. For a description of these metrics, see the gitlab/github page or the header section of this script. ''', add_help=False, formatter_class=argparse.RawTextHelpFormatter) required = parser.add_argument_group('required arguments') required.add_argument('--subjects_dir', dest="subjects_dir", help="subjects directory with a set of Freesurfer \nprocessed individual datasets.", metavar="<directory>", required=True) required.add_argument('--output_dir', dest="output_dir", help="output directory", metavar="<directory>", required=True) optional = parser.add_argument_group('optional arguments') optional.add_argument('--subjects', dest="subjects", help="list of subject IDs. If omitted, all suitable sub-\ndirectories witin the subjects directory will be \nused.", default=None, nargs='+', metavar="SubjectID", required=False) optional.add_argument('--subjects-file', dest="subjects_file", help="filename with list of subject IDs (one per line). \nIf omitted, all suitable sub-directories witin \nthe subjects directory will be used.", default=None, metavar="<filename>", required=False) optional.add_argument('--shape', dest='shape', help="run shape analysis", default=False, action="store_true", required=False) optional.add_argument('--screenshots', dest='screenshots', help="create screenshots of individual brains", default=False, action="store_true", required=False) optional.add_argument('--screenshots-html', dest='screenshots_html', help="create html summary page for screenshots", default=False, action="store_true", required=False) optional.add_argument('--screenshots_base', dest='screenshots_base', help=argparse.SUPPRESS, default="default", metavar="<base image for screenshots>", required=False) # this is currently a hidden "expert" option optional.add_argument('--screenshots_overlay', dest='screenshots_overlay', help=argparse.SUPPRESS, default="default", metavar="<overlay image for screenshots>", required=False) # this is currently a hidden "expert" option optional.add_argument('--screenshots_surf', dest='screenshots_surf', help=argparse.SUPPRESS, default="default", nargs="+", metavar="<surface(s) for screenshots>", required=False) # this is currently a hidden "expert" option optional.add_argument('--screenshots_views', dest='screenshots_views', help=argparse.SUPPRESS, default="default", nargs="+", metavar="<dimension=coordinate [dimension=coordinate]>", required=False) # this is currently a hidden "expert" option optional.add_argument('--screenshots_layout', dest='screenshots_layout', help=argparse.SUPPRESS, default=None, nargs=2, metavar="<rows> <columns>", required=False) # this is currently a hidden "expert" option optional.add_argument('--fornix', dest='fornix', help="check fornix segmentation", default=False, action="store_true", required=False) optional.add_argument('--fornix-html', dest='fornix_html', help="create html summary page for fornix evaluation", default=False, action="store_true", required=False) optional.add_argument('--hypothalamus', dest='hypothalamus', help="check hypothalamus segmentation", default=False, action="store_true", required=False) optional.add_argument('--hypothalamus-html', dest='hypothalamus_html', help="create html summary page for hypothalamus evaluation", default=False, action="store_true", required=False) optional.add_argument('--outlier', dest='outlier', help="run outlier detection", default=False, action="store_true", required=False) optional.add_argument('--outlier-table', dest="outlier_table", help="specify normative values", default=None, metavar="<filename>", required=False) optional.add_argument('--fastsurfer', dest='fastsurfer', help="use FastSurfer output", default=False, action="store_true", required=False) help = parser.add_argument_group('getting help') help.add_argument('-h', '--help', help="display this help message and exit", action='help') # check if there are any inputs; if not, print help and exit if len(sys.argv)==1: args = parser.parse_args(['--help']) else: args = parser.parse_args() return args.subjects_dir, args.output_dir, args.subjects, \ args.subjects_file, args.shape, args.screenshots, \ args.screenshots_html, args.screenshots_base, \ args.screenshots_overlay, args.screenshots_surf, \ args.screenshots_views, args.screenshots_layout, args.fornix, \ args.fornix_html, args.hypothalamus, \ args.hypothalamus_html, args.outlier, args.outlier_table, args.fastsurfer
8e887fd4151feac665e66b1c06cefaf25ac6b5fd
21,039
import argparse def arg_parse(): """Parsing arguments""" parser = argparse.ArgumentParser(description="Script for downloading data from google drive") parser.add_argument('--token', required=True, type=str, metavar='TOKEN', help='OAuth2 token') parser.add_argument('--refresh_token', required=True, type=str, metavar='REFRESH_TOKEN', help='refresh token') parser.add_argument('--client_id', required=True, type=str, metavar='CLIENT_ID', help='client id') parser.add_argument('--client_secret', required=True, type=str, metavar='CLIENT_SECRET', help='client secret') parser.add_argument('--folder_id', required=True, type=str, metavar='FOLDER_ID', help='Download folder id') parser.add_argument('--output_path', required=True, type=str, metavar='OUTPUT', help='Output folder name') args = parser.parse_args() return args
b82ddc7c1a942eef1b83ff55aeb3df503289251f
21,040
import glob def get_pacs(dirname): """ return a dictionary of the set of fits.gz files from a PACS hierarchy two levels deep, e.g. unlike get_spire(), this one allows multiple fits files per entry """ fns = glob.glob(dirname + '/*/*/*.fits.gz') d = {} for fn in fns: fn2 = fn.split('/') if fn2[-3] in d: d[fn2[-3]].append(fn) else: d[fn2[-3]] = [fn] return d
67d66c5f7fce349c8fa2673c8c464c05706e7a41
21,041
def tag_pattern_option_validator(ctx, param, value): """The provided string must contain **{version}** placeholder in order to be valid. Otherwise :class:`click.UsageError` is raised. """ if not value or "{version}" not in value: ctx.fail("Missing {version} placeholder in tag_pattern.") return value
f7149cb692254e8bac8a9f1dd107cc1b355ef45d
21,043
def find_layer_name(lines): """ Find the layer name :param lines: :return: """ layer_name = None top_name = None flag_count = 0 first_line = lines[0] assert first_line.split()[1] is '{', 'Something is wrong' brack_count = 1 for l in lines[1:]: if '{' in l: brack_count += 1 if '}' in l: brack_count -= 1 if brack_count == 0: break if 'name' in l and brack_count == 1: flag_count += 1 _, layer_name = l.split() layer_name = layer_name[1:-1] if 'top' in l and brack_count == 1: flag_count += 1 _, top_name = l.split() top_name = top_name[1:-1] assert layer_name is not None, 'no name of a layer found' return layer_name, top_name
3c1816c5280fa8caf6169683e741ae7d5bbde60a
21,044
def form_valid(files): """Check the validity of the form. Check if fields are present; if yes test filestorage exists. Return False if ALL filestorage objects are empty or if at least 1 field is missing. :param: Iterable of files in form. :type: <werkzeug.datastructures.ImmutableMultiDict> :return: True or False according to the form validity. :rtype: <bool> """ ids = ('ns_fic', 'rp_fic', 'fp_fic') if len(ids) != len(files): return False file_found = False for id in ids: # Get file in form (return None if expected id is not in fields) filestorage = files.get(id, None) if filestorage is None: # 1 field absent (None) = danger return False elif filestorage: # Detect if all files are empty file_found = True return file_found
658bee1e4aa7f2f6cb37538e8dbc58c5054c0954
21,046
def sub_from_color(color, value): """Subtract value from a color.""" sub = lambda v: (v - value) % 256 if isinstance(color, int): return sub(color) return tuple(map(sub, color))
71be1ef8f956ec2c9fdee0516a01833922934aa7
21,050
import re def replace_with_null_body(udf_path): """ For a given path to a UDF DDL file, parse the SQL and return the UDF with the body entirely replaced with NULL. :param udf_path: Path to the UDF DDL .sql file :return: Input UDF DDL with a NULL body """ with open(udf_path) as udf_file: udf_sql = udf_file.read() udf_sql = udf_sql.replace('\n', ' ') pattern = re.compile(r'FUNCTION\s+(`?.+?`?.*?\).*?\s+)AS') match = pattern.search(udf_sql) if match: udf_signature = match[1].replace('LANGUAGE js', '') udf_null_body = (f'CREATE FUNCTION IF NOT EXISTS {udf_signature}' f' AS (NULL)') return udf_null_body else: return None
7a014192b4623ed90f04b50fe52dceb696355c9c
21,052
def create_login_url(path): """Returns the URL of a login page that redirects to 'path' on success.""" return "/auth/hello?redirect=%s" % path
0ba50d443d8cddbbc77ada8a0828cf5a0e534ef1
21,053
def vol2covdeg(nvol, tr): """Kay heuristic for selecting polynomial degree by run duration.""" return int((nvol * tr / 60 / 2).round())
6681d5779c3c41774fe276e295d68188c50f83aa
21,054
import subprocess def head(): """Returns the head commit""" sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'])[:40] return sha.decode('utf-8')
21c55bbc441b63ee48ba0349d16f54423fb78e0a
21,056
def validate_eyr(expiration_year: str) -> bool: """eyr (Expiration Year) - four digits; at least 2020 and at most 2030.""" return ( len(expiration_year) == 4 and int(expiration_year) >= 2020 and int(expiration_year) <= 2030 )
c1c332c01cdc58b6363ee9788fc2d52def9aa697
21,057
def load_alarm_list(): """Load the cached status from disk, return it as a string""" alarm_list_path = '/var/lib/nagios/etcd-alarm-list.txt' with open(alarm_list_path, 'r') as alarm_list_log: alarm_list = alarm_list_log.read() return alarm_list.strip()
ed7a98fe843e0d98d286959313b8eb1a44c79e8b
21,061
def option_name_to_variable_name(option: str): """ Convert an option name like `--ec2-user` to the Python name it gets mapped to, like `ec2_user`. """ return option.replace('--', '', 1).replace('-', '_')
abaf2bb749eed54372233db677f9668c2d486f29
21,062
def convert_labels(labels,transl): """ Convert between strings and numbers. """ return [transl[l] for l in labels]
6aa73b299dc84a12dcd21acb51f07546b2d8f311
21,064
import os def _patch_path(path): """ Paths have a max length of api.MAX_PATH characters (260). If a target path is longer than that, it needs to be made absolute and prepended with \\?\ in order to work with API calls. See http://msdn.microsoft.com/en-us/library/aa365247%28v=vs.85%29.aspx for details. """ if path.startswith('\\\\?\\'): return path abs_path = os.path.abspath(path) if not abs_path[1] == ':': # python doesn't include the drive letter, but \\?\ requires it abs_path = os.getcwd()[:2] + abs_path return '\\\\?\\' + abs_path
17d8e9d2a5e1adc9b5cb167a53b86f8a351b02d1
21,066
import json def get_user_from_file(user_file): """ Get users from a file :param user_file: :return: a list of SystemUsers """ user = {} try: with open(user_file, 'r') as file: user = json.load(file) except Exception as error: raise error return user
990a984452234ce531fa8dc2eca4ca203ad33aab
21,068
import os import pickle def call_with_legacy_params(func, legacy_pickle, override_args=None, override_kwargs=None, verbose=False): """Given a file path `legacy_pickle` containing an encoded Python dictionary, run function `func` with the same args encoded in `legacy_pickle`, and pickle the returned value. Returns tuple with filepath to pickled legacy return value, filepath to pickled new return value, the legacy return value object, and the new return value object, respectively. The dictionary encoded in `legacy_pickle` must be type dict and minimally contain keys args, kwargs, and result. Specify keyword arguments `override_args` or `override_kwargs` to override pickled args/kwargs. For instance, specifying both will call func(*override_args, **override_kwargs). """ assert os.path.isfile(legacy_pickle) with open(legacy_pickle, 'rb') as f: unpickled = pickle.load(f) assert isinstance(unpickled, dict) assert "args" in unpickled assert "kwargs" in unpickled assert "result" in unpickled # print function name and commit sha if verbose: if "fname" in unpickled: print("function name (fname):", unpickled['fname']) if "commit" in unpickled: print("'{}' was pickled on commit: {}".format( legacy_pickle, unpickled['commit'])) print("'{}' was run with args: {}".format( legacy_pickle, unpickled['args'])) print("'{}' was run with kwargs: {}".format( legacy_pickle, unpickled['kwargs'])) # add override args and kwargs if they were specified if override_args is None: args = unpickled['args'] else: args = override_args if override_kwargs is None: kwargs = unpickled['kwargs'] else: kwargs = override_kwargs # run same function using the same params result = func(*args, **kwargs) return (unpickled['result'], result)
b78caf7c4b99a7354e2faeb00d76dad56af09d0e
21,072
def normalize_map(mapping): """ Creates a new dictionary with the frequency of each transition. Each state transition count is normalized by the total number of transitions out of a given state. Args: maps (List[dict]): A list of mappings, these should be dictionaries of States mapped to dictionaries of States mapped to numbers {State -> {State -> int}} Returns: dict: A dictionary of the normalized counts """ normalized_dict = {} for word in mapping: normalized_dict[word] = {} count = sum(mapping[word].values()) for other in mapping[word]: normalized_dict[word][other] = mapping[word][other] / count return normalized_dict
d59e6e4ae7f343388de094b10b7a60e34ec72e0c
21,074
def file_unload(file: str): """ Open text file, read it's contents and write unpack them into list. :param file: str -- name of the config file :return: list -- list of lines read from the file """ unpacked_file = [] with open(file, "r") as f: for line in f: if line.startswith("#") or line == "\n": continue else: unpacked_file.append(line.strip("\n")) return unpacked_file
7bdc61b662796ec2fd7324d4d6934755dc9a2ab7
21,075
import inspect def _get_object_to_check(python_object): """Check if inspect.getfile has a chance to find the source.""" try: python_object = inspect.unwrap(python_object) except ValueError: # Can return a ValueError when it wraps around pass if (inspect.ismodule(python_object) or inspect.isclass(python_object) or inspect.ismethod(python_object) or inspect.isfunction(python_object) or inspect.istraceback(python_object) or inspect.isframe(python_object) or inspect.iscode(python_object)): return python_object try: return python_object.__class__ except AttributeError: raise TypeError
b3daaab6415978ebbc20dee5149533f3e81bd401
21,076
def ez_user(client, django_user_model): """A Django test client that has been logged in as a regular user named "ezuser", with password "password". """ username, password = "ezuser", "password" django_user_model.objects.create_user(username=username, password=password) client.login(username=username, password=password) return client
20e54603e8154cbf9f6c7e0cbd06a78627beecd0
21,077
def _split_left(val, sep): """Split a string by a delimiter which can be escaped by \\""" result = [] temp = u"" escaped = False index = 0 left = True for c in val: left = False temp += c if c == sep[index] and not escaped: index += 1 else: index = 0 if c == u"\\": escaped ^= True else: escaped = False if index >= len(sep): left = True index = 0 result.append(temp[:-len(sep)]) temp = u"" if temp or left: result.append(temp) return result
c85d8e0e07b9a99ad3f299f44d722c514a11935a
21,079
def char_month_converter(month): """ integer month to 3 character month """ months = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC'] return months[(month-1)]
b624d6f5c52f51bf41a54fbf25e389f26562df15
21,082
def parent_directory(path): """ Get parent directory. If root, return None "" => None "foo/" => "/" "foo/bar/" => "foo/" """ if path == '': return None prefix = '/'.join(path.split('/')[:-2]) if prefix != '': prefix += '/' return prefix
261943945531fc348c46c49f5d3081cbd815bfdb
21,083
def FakeDataConfig(argument_parser): """ Set CLI arguments :param argument_parser: argument parser :type argument_parser: ```ArgumentParser``` :return: argument_parser :rtype: ```ArgumentParser``` """ argument_parser.description = ( "A fake dataset that returns randomly generated images and returns them as PIL" " images" ) argument_parser.add_argument( "--size", type=str, help="Size of the dataset.", default="1000 images" ) argument_parser.add_argument( "--image_size", type=str, help="Size if the returned images.", default="(3, 224, 224)", ) argument_parser.add_argument( "--num_classes", type=int, help="Number of classes in the datset.", default=10 ) argument_parser.add_argument( "--transform", type=str, help="""A function/transform that takes in an PIL image and returns a transformed version. E.g, ``transforms.RandomCrop``""", ) argument_parser.add_argument( "--target_transform", type=str, help="""A function/transform that takes in the target and transforms it.""", ) argument_parser.add_argument( "--random_offset", type=int, help="""Offsets the index-based random seed used to generate each image.""", required=True, default=0, ) return argument_parser
9acb95942f961404423fe89747c52468e917ceb1
21,084
from pathlib import Path import shutil def data_files(): """Copy data files to Docker data volume for testing. Return generator of destination paths of the copied files. """ src = Path('tests/unit/data') dest = Path('src/bokeh_server/data') src_files = src.glob('*') for file in src_files: shutil.copy(file, dest) return dest.glob('*')
3b9032110fd8d4979952f40579a176aed79963e8
21,085
import csv def get_ycoordinates(filename): """Transforms CSV file of Y coordinates (new line seperated) to list""" with open(filename) as o: y = [float(item) for sublist in csv.reader(o) for item in sublist] return y
05e54b2cb4e7f9c7b58d3dfbe241090e1b0170ea
21,086
import io def print_builtin(): """print: Print an object on the standard input.""" with io.StringIO() as fake_output: print("let's go to the beach", file=fake_output) fake_output.seek(0) return fake_output.read().rstrip()
7a5586e4f2dc1c7ba0878f9010c517debbe4a353
21,087
def node_weight(G, u): """ Computes the weighted degree of a node :param G: networkx.Graph Graph containing the node u :param u: node Node of which the degree will be computed :return: w: double Degree of u """ w = 0 for v in G[u].keys(): w = w + G[u][v]['weight'] return w
a6872007b3e8b60ce6e4893631c613febd59453a
21,088
def fibonacci(): """Find the fibonacci term with 1000 or more digits""" n1 = 1 n2 = 2 nth = 0 # given that the first number with a 1000 digit is 10 ** 999 thousands = 10 ** 999 count = 4 while True: nth = n1 + n2 n1 = n2 n2 = nth if nth > thousands: return count, len(str(nth)), n1 count += 1
ea9b4f4b1e40ce7622b54781215e4148dbad3e17
21,092
def compare_balance_with_zero(balance): """ :param balance: a double with the value of the balance in after a year. :return: 0 if the balance is equal to zero or nearly equal, 1 if the balance is greater than zero and -1 if the balance is lower than zero. """ if 0.05 >= balance >= -0.05: return 0 elif balance > 0.05: return 1 else: return -1
17b70cc282aa42495fa205ab9ab802a913d78341
21,093
def get_subseq(df, perc_start, perc_end): """Get a subsequence from a dataframe Args: df (pd.DataFrame): Pandas DataFrame perc_start (int): Starting percentage of the subsequence perc_end (int): Ending percentage of the subsequence Returns: subseq (pd.DataFrame): The requested subsequence """ start = int(len(df) * perc_start/100) end = int(len(df) * perc_end/100) df = df.iloc[start:end] return df
986e00834e59dc8489e790b706882e9397e6dbaf
21,095
import os import glob def get_icecube_includes(): """Helper function to get include paths for IceCube headers. """ # find all icecube packages in the source directory include_pattern = os.path.join(os.environ['I3_SRC'], '*/public') include_dirs = glob.glob(include_pattern) include_dirs.append(os.path.join(os.environ['I3_SRC'], 'cmake/tool-patches/common/')) # For parasitic metaprojects, the I3_SRC directory will only contain # packages that were added on top of the host metaproject. # In this case we need to scan the source directory of the host as well. # We can obtain the host metaproject by checking the symlinks of the # resources directories in the I3_BUILD directory. # Gather source directories resource_pattern = os.path.join(os.environ['I3_BUILD'], '*/resources') resource_dirs = glob.glob(resource_pattern) source_dirs = set([os.readlink(d).replace('resources', 'public') for d in resource_dirs if os.path.islink(d)]) include_dirs.extend(source_dirs) if 'SROOT' in os.environ: include_dirs.append(os.path.join(os.environ['SROOT'], 'include/')) return include_dirs
0ace345f3d939e1b1ba60e725c8f4fb192f65bc8
21,097
def build_property_filter_spec(client_factory, property_specs, object_specs): """Builds the Property Filter Spec.""" property_filter_spec = client_factory.create('ns0:PropertyFilterSpec') property_filter_spec.propSet = property_specs property_filter_spec.objectSet = object_specs return property_filter_spec
09e590863e90b554fd16e4af7e0089b321c9d17b
21,098
def chunker(testset, size): """ https://stackoverflow.com/a/434328 """ seq = [testset[i] for i in range(len(testset))] return (seq[pos:pos + size] for pos in range(0, len(seq), size))
6d0d6a8d249100d73228a9e5dc032d07ff76ac59
21,099
def distance_to_earth(location): """ Calculates the distance to earth :param location: The location :return: Distance to earth in parsec """ try: if location.distance_module is not None: return 10 ** ((location.distance_module + 5) / 5) elif location.parallax is not None: return 1 / location.parallax else: raise ValueError("There is no way to find out the distance to earth for this location.") except AttributeError: raise ValueError("There is no way to find out the distance to earth for this location.")
e2c9d9269e47676d2daa43e66a98d91f389977e2
21,102
def rshift(val, n): """python port for '>>>'(right shift with padding) """ return (val % 0x100000000) >> n
23205c84be8256bb289a8618107b73784446751b
21,103
def Transpose(axis_order, name='unnamed'): # language=rst """ Transpose an input """ order = None order_inverse = None batched_order = None batched_order_inverse = None def init_fun(key, input_shape, condition_shape): nonlocal order nonlocal batched_order order = [ax%len(axis_order) for ax in axis_order] batched_order = [0] + [o + 1 for o in order] assert len(order) == len(input_shape) assert len(set(order)) == len(order) params, state = (), () output_shape = [input_shape[ax] for ax in order] nonlocal order_inverse nonlocal batched_order_inverse order_inverse = [order.index(i) for i in range(len(order))] batched_order_inverse = [0] + [o + 1 for o in order_inverse] return name, output_shape, params, state def forward(params, state, log_px, x, condition, **kwargs): if(x.ndim == 2 or x.ndim == 4): z = x.transpose(batched_order) else: z = x.transpose(order) return log_px, z, state def inverse(params, state, log_pz, z, condition, **kwargs): if(z.ndim == 2 or z.ndim == 4): x = z.transpose(batched_order_inverse) else: x = z.transpose(order_inverse) return log_pz, x, state return init_fun, forward, inverse
127b2458b34ca0d3c3e1765e4c040b766196af7a
21,104
def special_pythagorean_triplet(number): """Finds it the first Pythagorean triplet for which a + b + c = n.""" triplet = [] flag = True for index_i in range(1, number): for index_ii in range(1, index_i): for index_iii in range(1,index_ii): if (index_iii*index_ii)+(index_i*number) == (number**2)/2 and index_i + index_ii + index_iii == number: triplet.append(index_i) triplet.append(index_ii) triplet.append(index_iii) return triplet
f136d77c4e0a5d4bf610eda1ba7e2bee4baacc1a
21,105
def _to_household_ids(in_file_paths): """ SERIALNO,puma_id,place_id,SYNTHETIC_HID,longitude, latitude """ hid_column = 3 result = set() for in_file_path in in_file_paths: print('reading', in_file_path) with open(in_file_path, 'r') as fin: for line in fin: cells = line.split(',') if len(cells) <= hid_column: print(line) result.add(cells[hid_column]) return result
4b0f111a0b90ff8b75b7c8e5b7c12f979dddecbb
21,106
def slice_examples(examples_by_length, slicing, strategy_set, mixture): """Divide the examples between strategies to enable parallel processing. Parameters ---------- examples_by_length : dict A dictionary mapping from sequence_length to the bin of examples of that sequence length. slicing : np.array The slicing information obtained using get_example_slicing strategy_set : list[list[int]] The list of unique packing strategies with which the packing problem was solved. mixture : list[int] pf shape [len(strategy_set)] States how many times each of the strategies from the strategy set should be repeated to cover the entire dataset. Returns ------- example_slices : list[multi_sequence] Each component of the list is a list of multiple sequences i.e. specifically sequences which are to be combined together to form a pack according to a strategy. strategies : list[int] A list containing the strategy for each slice i.e. strategy will appear multiple times if it the work to fill the strategy has been split into multiple parts. The repetition of strategies is the main difference with strategy_set, which is unique. part_idx : int Used to ensure uniqueness of output filenames i.e. if a strategy contains many examples the work is split into multiple parts and processed in parallel. Each process receives a different part_idx such that they write to different files. """ chunk_limit = 50000 example_slices = [] strategies = [] part_idx = [] for strategy, slice_offsets, repeat_count in zip(strategy_set, slicing, mixture): if repeat_count == 0: continue # Slice out the sequences allocated to this strategy in increments of 50k num_parts = repeat_count // chunk_limit num_parts = num_parts + int(repeat_count != num_parts * chunk_limit) subcounts = (min(chunk_limit, repeat_count - chunk_limit * (i - 1)) for i in range(1, num_parts + 1)) for part_id, part_count in enumerate(subcounts): examples = [] for k, seq_len in enumerate(strategy): slice_start = int(slice_offsets[seq_len - 1]) slice_end = slice_start + int(part_count) slice_offsets[seq_len - 1] = slice_end examples.append(examples_by_length[seq_len][slice_start:slice_end]) example_slices.append(examples) strategies.append(strategy) part_idx.append(part_id) return example_slices, strategies, part_idx
c382c1dc6171f3d2db23536f15e16b3f530253e0
21,107
def _state(state): """Get status from instance.""" return state['Name']
c9cddbcc80ea7ad549c433e00eca8a90f3c9e211
21,108
def get_async_or_sync_fn(fn): """Returns an async function for the specified fn, if it exists; otherwise returns source.""" if hasattr(fn, "asynq"): return fn.asynq if hasattr(fn, "async"): return getattr(fn, "async") return fn
8e91c176e6d50883b5aa8e58dbbff05e6606bf9e
21,109
import math def line_angle(p1, p2): """ Return the angle of the line that goes from p1 to p2 Clockwise in pygame window Counter clockwise in xy-space """ angle = math.atan2((p1[1]-p2[1]), (p1[0]-p2[0])) * 180.0/math.pi return (angle + 360) % 360
9bafb4e3ac4de4a30a85c04074f8ce169c116538
21,110
import os import zipfile import json def save_timeline(jsonData, filePath): """Write the jsonData structure to a zipfile located at filePath. Return a message beginning with SUCCESS or ERROR. """ if os.path.isfile(filePath): os.replace(filePath, filePath + '.bak') backedUp = True else: backedUp = False try: with zipfile.ZipFile(filePath, 'w', compression=zipfile.ZIP_DEFLATED) as f: f.writestr('timeline.json', json.dumps(jsonData)) except: if backedUp: os.replace(filePath + '.bak', filePath) return 'ERROR: Cannot write "' + os.path.normpath(filePath) + '".' return 'SUCCESS: "' + os.path.normpath(filePath) + '" written.'
8b9f8dc2a5b52912ad576012105335d8f39c54d1
21,111
def create_profile_name_from_role_arn( role_arn, account_alias, profile_name_format ): """Create a profile name for a give role ARN and account alias.""" profile_name = role_arn.split("role/")[-1].replace("/", "-") if profile_name_format == "RoleName-AccountAlias": return f"{profile_name}-{account_alias}" return profile_name
3df10e12ccae591f963e25bb9df3d8a7634c7463
21,112
import subprocess def verify_git_repo(giturl): """ Verify that the giturl passed in can be connected to. This can be used as a check for the existence of the given repo and/or basic git remote connectivity. Returns True if the connection was successful, fals otherwise """ if not giturl: return False gitcmd = "git ls-remote %s > /dev/null 2>&1" % (giturl) rc = subprocess.call(gitcmd, shell=True) if rc == 0: return True return False
b2dd4670ca7bf98a8e54b8ff72875db2f4958483
21,113
def GetPDBAtomNames(mol): """Extracts PDB atoms names""" names = {} for i, atom in enumerate(mol.GetAtoms()): name = atom.GetPDBResidueInfo().GetName() names[name.strip()] = i return names
e983861fb6e581d5a40eba9b5e4febe77206faae
21,115
def cache_hostinfo(environ): """Processes the host information and stores a copy This work was previously done but wasn't stored in environ, nor is it guaranteed to be setup in the future (Routes 2 and beyond). cache_hostinfo processes environ keys that may be present to determine the proper host, protocol, and port information to use when generating routes. """ hostinfo = {} if environ.get('HTTPS') or environ.get('wsgi.url_scheme') == 'https' \ or 'https' in environ.get('HTTP_X_FORWARDED_PROTO', "").split(', '): hostinfo['protocol'] = 'https' else: hostinfo['protocol'] = 'http' if environ.get('HTTP_X_FORWARDED_HOST'): hostinfo['host'] = environ['HTTP_X_FORWARDED_HOST'].split(', ', 1)[0] elif environ.get('HTTP_HOST'): hostinfo['host'] = environ['HTTP_HOST'] else: hostinfo['host'] = environ['SERVER_NAME'] if environ.get('wsgi.url_scheme') == 'https': if environ['SERVER_PORT'] != '443': hostinfo['host'] += ':' + environ['SERVER_PORT'] else: if environ['SERVER_PORT'] != '80': hostinfo['host'] += ':' + environ['SERVER_PORT'] environ['routes.cached_hostinfo'] = hostinfo return hostinfo
a00ad59410586fa6c62a9c91bc0f6f24eb8b269d
21,116
def make_list(*items): """ Makes a list out of objects. """ new = [] for item in items: if isinstance(item, str): new.append(item) else: new.extend(item) return new
3a81a3e086f410c605f69039e6295fe424c50ed8
21,117
def initialize_cache( decoding_states, attention_keys=None, memory=None, memory_bias=None): """ Creates a cache dict for tf.while_loop. Args: decoding_states: A Tensor or a structure of Tensors for decoding while loop. attention_keys: A Tensor. The attention keys for encoder-decoder attention. memory: A Tensor. The attention values for encoder-decoder attention. memory_bias: A Tensor. The attention bias for encoder-decoder attention. Returns: A dict. """ cache = {"decoding_states": decoding_states} # encoder-related information (not influenced by beam search) if attention_keys is not None: cache["attention_keys"] = attention_keys if memory is not None: cache["memory"] = memory if memory_bias is not None: cache["memory_bias"] = memory_bias return cache
c793a921ee4bef6f6fea7c8ca3e93b9233a69e07
21,118
def splitParents(parents): """Splits the input string into at most 3 parts: - father's first name - mother's first name - mother's last name. The input is in the format: "{father_first_name},{mother_first_name} {mother_last_name}" """ split = parents.split(',', 1) if len(split) == 1: father = '' mother = parents else: father = split[0].strip() mother = split[1].strip() motherSplit = mother.rsplit(' ', 1) if not father: return motherSplit return [father] + motherSplit
4f1621f3ce77df544b2cdcd13985c4b1000945fc
21,119
def best_known_date_variation (date_var_1digit): """ Best known date variation usaged by airline (often not specified in the IATA guideline """ if date_var_1digit in ['A', 'J']: return -1 elif date_var_1digit in ['1', '2']: return int (date_var_1digit) else: return 0
40d0b79145f1bdee0273d8aae733e1d21161a7f6
21,120
from typing import Union def shebang(filepath: str) -> Union[None, str]: """returns the shebang path given a filepath or None if it does not exist. :param filepath: path to a file w/ a shebange line :return: shebang line or None .. doctest::python >>> from inspect import getabsfile >>> script = 'ashellscript.sh' >>> with open(script, 'w') as f: ... f.write('#!/bin/bash\\necho "howdy"\\n') 25 >>> shebang(script) '#!/bin/bash' >>> from os import remove >>> remove(script) """ with open(filepath, "r") as f: first = f.readline().strip("\n") return first if "#!" in first[:2] else None
f4ae27100e2e132ebca589fa48e92dd4029de108
21,121
import os def is_pathname_valid(pathname): """ Returns whether the pathname is writable and valid. This method was adapted from a Stack Overflow Question: https://stackoverflow.com/a/34102855 Args: pathname (str): A string representation of a system path. Returns: `True` if the passed pathname is a valid pathname for the current OS; `False` otherwise. """ # If this pathname is either not a string or is but is empty, this pathname # is invalid. try: if not isinstance(pathname, str) or not pathname: return False pathname = os.path.abspath(pathname) # Root directory guaranteed to exist root_dirname = os.path.sep assert os.path.isdir(root_dirname) # ...Murphy and her ironclad Law # Test whether each path component split from this pathname is valid or # not, ignoring non-existent and non-readable path components. for pathname_part in pathname.split(os.path.sep): try: if len(pathname_part): if root_dirname is os.path.sep: root_dirname += pathname_part else: root_dirname = root_dirname + os.path.sep + pathname_part os.lstat(root_dirname) except OSError: return False # If a "TypeError" exception was raised, it almost certainly has the # error message "embedded NUL character" indicating an invalid pathname. except TypeError: return False # If no exception was raised, all path components and hence this # pathname itself are valid. (Praise be to the curmudgeonly python.) else: return True
f56894517b0c2555c98f2782fb26aa6400412225
21,122
def get_top_industry_topics_by_social_engagement(el): """ TOP INDUSTRY TOPICS BY SOCIAL ENGAGEMENT """ top_topics = el.find('div', {"id": "card_mini_topics"}) if not top_topics: return None topics = [topic.span.text for topic in top_topics.find_all('div', {'class': 'Showme'})] stat_fields = [topic.text for topic in top_topics.find_all('div', {'class': 'Third Right'})] average_engagement = [{"this_site": this_site, "competitor_avg": competitor_avg, "total_avg": total_avg} for this_site, competitor_avg, total_avg in list(zip(stat_fields, stat_fields[1:], stat_fields[2:]))[::3]] return [{'topic': topics[index], **average_engagement[index]} for index, _ in enumerate(topics)]
3a96690652f5772ce3d0875941b5274b4638633b
21,123
def _merge_tables(d1, d2): """ Merge dictionaries Args: d1 (dict): first dict to merge d2 (dict): second dict to merge """ for key, l in d2.items(): if key in d1: for item in l: if item not in d1[key]: d1[key].append(item) else: d1[key] = l return d1
2f1fabcd9ef7ce2f8f53405e267561c88002f457
21,124
def _sort_face(face): """ A sorted face starts at the minimum id and steps up to the next lowest. Then it just continues and order doesn't matter """ iface = face.index(min(face)) face2 = face[iface:] + face[:iface] assert len(face) == len(face2), face2 # flip face because: # [n1, n4, n3, n2] # is not in simplest form, so we change it to: # [n1, n2, n3, n4] # # by reversing it: # [n2, n3, n4, n1] # and: # slicing it #print('-----') #print(f'face = {face}') #print(f'face2 = {face2}') # reverse if n2 < n4 if face2[1] > face2[-1]: face2 = [face2[0]] + face2[1:][::-1] #print(f'*face3 = {face3}') #x = 1 assert len(face2) == len(face) return tuple(face2)
5c1ea8b47fce4873895b288dc5609f173808ac88
21,125
def people_speed(): """People Speed Distribution, integer from 0 to 100 representing a percentage of PEOPLE <enumeratedValueSet variable="Slow"> <value value=""/> </enumeratedValueSet> <enumeratedValueSet variable="Medium"> <value value=""/> </enumeratedValueSet> <enumeratedValueSet variable="Fast"> <value value=""/> </enumeratedValueSet> """ slow = 100 medium = 0 fast = 100 - medium - slow assert slow + medium + fast == 100 return f"""<enumeratedValueSet variable="Slow"> <value value="{slow}"/> </enumeratedValueSet> <enumeratedValueSet variable="Medium"> <value value="{medium}"/> </enumeratedValueSet> <enumeratedValueSet variable="Fast"> <value value="{fast}"/> </enumeratedValueSet> """
0fcb2200ce6e186a2458e850922ba46ac85a8295
21,126
def unpad(data, length): """ PKCS #7-style unpadding with the given block length """ assert length < 256 assert length > 0 padlen = ord(data[-1]) assert padlen <= length assert padlen > 0 assert data[-padlen:] == padlen * chr(padlen) return data[:-padlen]
826969b67e7d0a8b4aa65394cc06a3dbf0844dd8
21,127
import torch def dis_primal( input_view: torch.Tensor, param: torch.Tensor, n_sample: int, ) -> torch.Tensor: """Computes distortion penalty for the primal formulation. Let n be the number of samples in the view of interest and p the number of features. In the primal formulation, the 'param' matrix is the p*low_dim model parameter and input_view corresponds to the input data, of shape n*p. The distortion penalty can be written as distortion = ||input_view*input_view.T - input_view*param*param.T*input_view.T||_2. The distortion is computed as is when n < p. However, if n > p, we compute the following formulation: distortion = torch.sqrt(Tr((I - param*param.T)*input_view.T*input_view *(I - param*param.T)*input_view.T*input_view)) to avoid computing terms that are O(n**2) in memory or runtime. Arguments: input_view: torch.Tensor, one of the two views. param: torch.Tensor, model parameters. n_sample: int, sample size of entire dataset. Returns: distortion_value: torch.Tensor, scalar value. """ n_sample, p_feature = input_view.shape if n_sample < p_feature: inner_prod = torch.matmul(input_view, input_view.t()) tmp = torch.matmul(torch.matmul( torch.matmul(input_view, param), param.t()), input_view.t()) tmp = (inner_prod - tmp)**2 distortion_value = torch.sqrt(torch.sum(tmp)) else: gram = torch.matmul(input_view.t(), input_view) tmp = torch.matmul(param, torch.matmul(param.t(), gram)) prod = gram - tmp distortion_value = torch.sqrt(torch.trace(torch.matmul(prod, prod))) return distortion_value
6826994ce40799e5b83059d158bd65f50c381622
21,130
import os import glob def GetAapt(): """Returns the path to aapt. Args: None Returns: the pathname of the 'aapt' executable. """ sdk_home = os.path.join('third_party', 'android_tools', 'sdk') sdk_home = os.environ.get('SDK_HOME', sdk_home) aapt_exe = glob.glob(os.path.join(sdk_home, 'build-tools', '*', 'aapt')) if not aapt_exe: return None return sorted(aapt_exe, key=os.path.getmtime, reverse=True)[0]
32868bc309708a57f3b700d916c17a2e8020286e
21,131
def create_key_by_args(*args, **kwargs) -> str: """ Формирование ключа по списку параметров :param args: Список параметров :param kwargs: Словарь параметров :return: Ключ для кэширования """ dict_args = [f"{key}:{value}" for key, value in kwargs.items()] key = "_".join([*args, *dict_args]) return key
76745b39f689bfc0a1f0286d9ba017c0387519cb
21,132
def ensure_crypto_config(partition): """ Ensure that the 'crypto-configuration' property on the faked partition is initialized. """ if 'crypto-configuration' not in partition.properties or \ partition.properties['crypto-configuration'] is None: partition.properties['crypto-configuration'] = {} crypto_config = partition.properties['crypto-configuration'] if 'crypto-adapter-uris' not in crypto_config or \ crypto_config['crypto-adapter-uris'] is None: crypto_config['crypto-adapter-uris'] = [] adapter_uris = crypto_config['crypto-adapter-uris'] if 'crypto-domain-configurations' not in crypto_config or \ crypto_config['crypto-domain-configurations'] is None: crypto_config['crypto-domain-configurations'] = [] domain_configs = crypto_config['crypto-domain-configurations'] return adapter_uris, domain_configs
aa83d37d98b46600a6f80540bd0a3a4ecb476e3e
21,133
def fill_dict (feed_dict, placeholders, data): """Feeds a dictionary of data into a dictionary of placeholders.""" for k in data: feed_dict[placeholders[k]] = data[k] return feed_dict
31b9554a531cc7880e92371c8b3f17364a9b59de
21,134
def configure_flask_app(graph): """ Configure a Flask application with common conventions, bound to the "app" key. """ graph.use( "audit", "request_context", "basic_auth", "error_handlers", "logger", "opaque", ) return graph.flask
95dd1d45e3dc80fc2934ac3382bcb02b320f002c
21,136
def compare_dates(converted_creation_date, rotation_date): """ Compares createTime date to x (default 90) days ago. Args: converted_creation_date - The datatime formatted creation date of our API key. rotation_date - datetime formatted "rotation_period" days ago (default 90). Example: 2020-09-18 13:38:52.943663 """ # If the createTime value for our key # is over x days (default 90) # Return true to key_analysis function if converted_creation_date < rotation_date: return True else: return False
7cbf3e63b1fd8ce4d5c0db386517920940dda427
21,137
def pdb_code(template_name): """ Recupere le code pdb du template voulu """ file = open("data/metafold.list", "r") flag = 0 line = "123" code_pdb = 0 while flag == 0 and line != "":#tant qu'on ne l'a pas trouve on continue line = file.readline() if template_name == line.split(" ")[0] : code_pdb = (line.split(" ")[1][:-1]).split(".")[0] #On obtient le code pdb flag = 1 file.close() return(code_pdb)
2531e9dc03d1e078f4ae2c56b43dad8c9d14803e
21,140
import os def is_packaged(): """ Checks if the documentation is packaged """ return "_package" in os.listdir('.')
3aba542d74a14b7c1a0ee445a3c2cb0b218addf1
21,141
def bytes2str(val): """ bytes 2 str conversion, only for python3 """ if isinstance(val, bytes): return str(val, "utf8") else: return val
6109d230fe9ea67da13c9c71bb56b21fc473c204
21,142
def _deg_ord_idx(deg, order): """Get the index into S_in or S_out given a degree and order.""" # The -1 here is because we typically exclude the degree=0 term return deg * deg + deg + order - 1
9aa5159a5d92e00e6f391e2ca313eb8c342f0a8d
21,143
def linkify_phone(value): """ Render a telephone number as a hyperlink. """ if value is None: return None return f"tel:{value}"
3eacfe7dc80b873d0c9b7df2cd92daf66bceca84
21,145
def _convert_unit(size_string): """ Convert given string to size in megabytes :param string size_string: Size with unit :returns integer: Converted size from given unit :rtype integer: """ size, unit = size_string.split(' ') if 'M' in unit: return int(float(size)) elif 'G' in unit: return int(float(size)) * 1024 elif 'T' in unit: return int(float(size)) * 1024 * 1024
c0581514194f95d0e5fd871edd559e09f9fc2234
21,146
def _norm(adj_mat): """Normalize adjacency matrix""" norm = adj_mat.sum(axis=0) norm[norm == 0] = 1 return adj_mat / norm
4d1dda6e50d194a8426a56ae044b202ebe9074d7
21,147
import os from datetime import datetime def healthcheck(): """ going to the doctor """ message = "alive and kicking" version = os.getenv("SHORT_SHA", "local") response = { "message": message, "version": version, "time": datetime.utcnow() } return response
e243d64ae7270eb608d5d6922a5505c33b9da53a
21,148