content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def obstruction(info_dict): """ Obstruction due to M2 """ # Multiply by 1.05 to take the arms into account info_dict["obstruction"] = ( info_dict["M2_factor"] * info_dict["D_M2"] ) ** 2.0 / info_dict["D_M1"] ** 2.0 return info_dict
b67cc8b87d68dbd386d75fdb869f999072085607
701,632
def parse_response(response): """ :param response: output of boto3 rds client describe_db_instances :return: an array, each element is an 3-element array with DBInstanceIdentifier, Engine, and Endpoint Address Example: [ ['devdb-ldn-test1', 'mysql', 'devdb-ldn-test.cjjimtutptto.eu-west-2.rds.amazonaws.com'], ['devdb-ldn-test2', 'postgres', 'devdb-ldn-test.cjjimtutptto.eu-west-2.rds.amazonaws.com'], ... ] """ res = [] # json output parse for db in response['DBInstances']: res.append([db['DBInstanceIdentifier'], db['Engine'], db['Endpoint']['Address']]) return res
edaa4abbb695adb06c43dc93d70178bc10a82445
701,633
def format_csv(factor_name, entry): """Format a data entry as a csv line.""" return "%s, %s, %s" % (entry[factor_name], entry['quarter'], entry['count'])
8d3f4f794f58f6aa0c6d259fcda124340df8d4da
701,634
def _remove_quotes(values): """Remove any quotes from quoted values.""" removed = [] for value in values: if value.startswith('"') and value.endswith('"'): value = value[1:-1] removed.append(value) return removed
a75bd25198a56a28748af059647e62c26df74232
701,635
def consolidate(voice, length): """ Join notes of the same pitch together, and increase their duration. No attempt is made to handle ties between bars, They are just treated as separate notes. :param voice: :param length: :return: """ out = list() try: for bars in range(0, length): duration = 1 prev_n = voice.popleft() for i in range(1, 16): n = voice.popleft() if n == prev_n: duration += 1 else: out.append('{0}{1}'.format(prev_n, 16 // duration)) duration = 1 prev_n = n out.append('{0}{1}'.format(prev_n, 16 // duration)) return out except IndexError: return out
6e6a60be0a6438b6f7c5fb23a1be720d5486e693
701,637
import json def get_dimensions(cube_id): """ For this test data we will use a predefined dimension object matching cube-420 in the acceptance environment, but this could be read from the TAP service. """ dims = json.loads( '{"axes": [{"name": "RA", "numPixels": "4096", "pixelSize": "5.5555555555560e-04", "pixelUnit": "deg"},' + '{"name": "DEC", "numPixels": "4096", "pixelSize": "5.5555555555560e-04", "pixelUnit": "deg"},' + '{"name": "STOKES", "numPixels": "1", "pixelSize": "1.0000000000000e+00", "pixelUnit": " ",' + '"min": "5.0000000000000e-01", "max": "1.5000000000000e+00", "centre": "1.0000000000000e+00"},' + '{"name": "FREQ", "numPixels": "16416", "pixelSize": "1.0000000000000e+00", "pixelUnit": "Hz",' + '"min": "1.2699999995000e+09", "max": "1.2700164155000e+09", "centre": "1.2700082075000e+09"}],' + '"corners": [{"RA": "1.8942941872444e+02", "DEC": "5.3846168509499e+01"},' + '{"RA": "1.8557152279432e+02", "DEC": "5.3846183833748e+01"},' + '{"RA": "1.8545899454910e+02", "DEC": "5.6120973603008e+01"},' + '{"RA": "1.8954200183991e+02", "DEC": "5.6120957384947e+01"}],' + '"centre": {"RA": "1.8750048428742e+02", "DEC": "5.4999722221261e+01"}}') return dims
aab639236510785649fd02ff80c795d7941007fd
701,638
from pathlib import Path import yaml def load_yaml_config(file_path: str | Path) -> dict: """ Parameters ---------- file_path : str or Path Yaml config file name. The file is assumed to be in the repo's config directory. Returns ------- config : dict Configuration parameters stored in a dictionary. """ file_path = Path(file_path) with open(file_path) as file: config = yaml.load(file, Loader=yaml.CLoader) return config
88a137807d6d1caabd3b8f7f7a03a3be3f04bdfe
701,639
def split_in_words(text, num_letters): """ Split a long text (without space) into words of num_letters letters. For instance if text is niijighkqj and num_letters is 4, will return ["niij", "iiji", "ijig", "jigh", "ighk", "ghkg", "hkgj"] :param text: Text to split into words :param num_letters: Size of each word :return: A list of words representing the text """ words = [] for i in range(0, len(text)): if i + num_letters > len(text): break w = text[i:i+num_letters] words.append(w) return list(set(words))
334be65a1592b13056dede738fb5cabb67e79236
701,640
import re def get_specified_file(file_names, *args): """Get specified filename extension from a list of file names.""" specified_filename = [] if args is (): raise Exception('get_specified_file() missing at least 1 required specified argument') for name in file_names: for extension in args: if re.fullmatch(r'.+\.%s' % extension, name): specified_filename.append(name) break return specified_filename
195e6d1556afb46fd98972c6c7f5a4b897efbdc2
701,641
import re def has_sh_placeholders(message): """Returns true if the message has placeholders.""" return re.search(r'\$\{(\w+)\}', message) is not None
9bd5b4a22c89cfa1d45ea28bf7121cd4171828ee
701,642
import re def _find_char(input_char): """ find english char in input string """ result = re.findall(r'[a-zA-Z=_/0-9.]+', str(input_char)) return result
b89bc97e0b73c71ec6a1a875414b73e16c9d6036
701,643
import functools def verifyrun(func): """Prints whether the decorated function ran.""" @functools.wraps(func) def wrapper_verifyrun(*args, **kwargs): print(f'Ran {func.__name__!r} from {func.__module__}.') value = func(*args, **kwargs) return value return wrapper_verifyrun
5f2d1289573a9069f283e508b1ce133eccfe3529
701,644
def bold_viewed(val, viewed_pages): """ Takes a scalar and returns a string with the css property `'color: red'` for negative strings, black otherwise. """ weight = 'bold' if val in viewed_pages else 'normal' return 'font-weight: %s' % weight
f7cbe6b3d3736926841941dfbc8cc4595a1eda62
701,645
def get_can_reach_set(n, reach_dic, max_trip_duration=150): """Return the set of all nodes whose trip to node n takes less than "max_trip_duration" seconds. Arguments: n {int} -- target node id reach_dic {dict[int][dict[int][set]]} -- Stores the node ids whose distance to n is whitin max. trip duration (e.g., 30, 60, etc.) Keyword Arguments: max_trip_duration {int} -- Max. trip duration in seconds a node can be distant from n (default: {150}) Returns: Set -- Set of nodes that can reach n in less than max_trip_duration seconds. """ can_reach_target = set() for t in reach_dic[n].keys(): if t<=max_trip_duration: can_reach_target.update(reach_dic[n][t]) return can_reach_target
f422db246ac926c8d0e2144ef42c09ba06b8a488
701,646
def create_empty_gid_matrix(width, height): """Creates a matrix of the given size initialized with all zeroes.""" return [[0] * width for row_index in range(height)]
8f1a0adf9e45bb6fc267a5cec3079657dbace51d
701,647
def value_to_none_low_medium_high(confidence_value): """ This method will transform an integer value into the None / Low / Med / High scale string representation. The scale for this confidence representation is the following: .. list-table:: STIX Confidence to None, Low, Med, High :header-rows: 1 * - Range of Values - None/ Low/ Med/ High * - 0 - None * - 1-29 - Low * - 30-69 - Med * - 70-100 - High Args: confidence_value (int): An integer value between 0 and 100. Returns: str: A string corresponding to the None / Low / Med / High scale. Raises: ValueError: If `confidence_value` is out of bounds. """ if confidence_value == 0: return 'None' elif 29 >= confidence_value >= 1: return 'Low' elif 69 >= confidence_value >= 30: return 'Med' elif 100 >= confidence_value >= 70: return 'High' else: raise ValueError("Range of values out of bounds: %s" % confidence_value)
ac3b39ae12591408fca2f8b4e844b535e7b1aaa3
701,648
def process_detail(hvr_client, hub_name, channel, source, target): """ Get all process details """ rt = {} jobs = hvr_client.get_hubs_jobs(hub=hub_name) for job_name in jobs: if ( job_name == f"{channel}-activate" or job_name == f"{channel}-refr-{source}-{target}" or job_name == f"{channel}-cmp-{source}-{target}" ): if job_name == f"{channel}-activate": short = "activate" elif job_name == f"{channel}-cmp-{source}-{target}": short = "compare" else: short = "refresh" result_pattern = "Table_State|Table_Start_Time|Source_Rows_Used|Subtasks_Done|Subtasks_Total|Subtasks_Busy|Rows_Only_On_Target|Rows_Only_On_Source|Rows_Which_Differ" event_status_rp = hvr_client.get_hubs_events( hub=hub_name, job=job_name, fetch_results=True, result_pattern=result_pattern, max_events=1, ) for ev_id in event_status_rp: # there is one key, ev_id rt[short] = event_status_rp[ev_id] else: if job_name == f"{channel}-cap-{source}": rt["capture"] = jobs[job_name] elif job_name == f"{channel}-integ-{target}": rt["integrate"] = jobs[job_name] return rt
0a8e2f1bc0f45c1e4854421497bb809c7d8ef457
701,649
from typing import Iterable def iterify(x): """Return an iterable form of a given value.""" if isinstance(x, Iterable): return x else: return (x,)
85373e5ac0e03caf2115096088ce92ca27b65b4a
701,650
import os def get_subfolder(path, subfolder, init=True): """ Check if subfolder already exists in given directory, if not, create one. :param path: Path in which subfolder should be located (String) :param subfolder: Name of the subfolder that must be created (String) :param init: Initialize folder with __init__.py file (Bool) :return: Path name if exists or possible to create, raise exception otherwise """ if subfolder and subfolder[-1] not in ['/', '\\']: subfolder += '/' # Path exists if os.path.isdir(path) or path == '': if not os.path.isdir(path + subfolder): # Folder does not exist, create new one os.mkdir(path + subfolder) if init: with open(path + subfolder + '__init__.py', 'w') as f: f.write('') return path + subfolder # Given path does not exist, raise Exception raise FileNotFoundError("Path '{p}' does not exist".format(p=path))
2109aa0c8e8d402f6ced8437e5399b754d9cbfa9
701,651
import binascii import os def choose_boundary() -> str: """Random boundary name.""" return binascii.hexlify(os.urandom(16)).decode("ascii")
6335e14abc34652141e7c77989d60fcb40ec3d17
701,652
def load_ignore(fname): """Loads patterns signalling lines to ignore Args: fname: File name containing patterns Returns: A list of patterns """ values = [] with open(fname, 'r') as f: lines = f.readlines() for line in lines: values.append(line.rstrip('\n')) return values
6a2b4aad3bb4f2747e91a0b1a9a58b70187d1d1e
701,653
def _parse_xtekct_file(file_path): """Parse a X-tec-CT file into a dictionary Only = is considered valid separators Parameters ---------- file_path : string The path to the file to be parsed Returns ------- string A dictionary containing all key-value pairs found in the X-tec-CT input file """ myvars = {} with open(file_path) as myfile: for line in myfile: name, var = line.partition("=")[::2] try: if "." in var: myvars[name.strip()] = float(var) else: myvars[name.strip()] = int(var) except ValueError: myvars[name.strip()] = var return myvars
744f67638c50573cfa7d568238b488f12e59217b
701,654
def removeObstacle(numRows, numColumns, lot): """ See shortestMazePath for more info. This is similar to shortestMazePath with slightly different conditions. 1 <= numRows, numColumns <= 1000 """ possible_paths = { 'left': [-1, 0], 'right': [1, 0], 'up': [0, 1], 'down': [0, -1] } numRows, numColumns, dist = len(lot), len(lot[0]), 0 queue = [(0, 0, lot[0][0])] # (x, y, val) visited = set() # Points already explored while queue: next = [] for x, y, val in queue: if val == 9: return dist if (x,y) not in visited: for x1, y1 in possible_paths.values(): nextX, nextY = x + x1, y + y1 if 0 <= nextX < numRows and 0 <= nextY < numColumns: next.append((nextX, nextY, lot[nextX][nextY])) visited.add((x,y)) queue = next dist += 1 return -1
2a1d541742b478f1132b01e018a7aed1b9e7493c
701,655
def _read_stimtime_AFNI(stimtime_files, n_C, n_S, scan_onoff): """ Utility called by gen_design. It reads in one or more stimulus timing file comforming to AFNI style, and return a list (size of ``[number of runs \\* number of conditions]``) of dictionary including onsets, durations and weights of each event. Parameters ---------- stimtime_files: a string or a list of string. Each string is the name of the file storing the stimulus timing information of one task condition. The contents in the files should follow the style of AFNI stimulus timing files, refer to gen_design. n_C: integer, number of task conditions n_S: integer, number of scans scan_onoff: list of numbers. The onset of each scan after concatenating all scans, together with the offset of the last scan. For example, if 3 scans of duration 100s, 150s, 120s are run, scan_onoff is [0, 100, 250, 370] Returns ------- design_info: list of stimulus information The first level of the list correspond to different scans. The second level of the list correspond to different conditions. Each item in the list is a dictiornary with keys "onset", "duration" and "weight". If one condition includes no event in a scan, the values of these keys in that scan of the condition are empty lists. See also -------- gen_design """ design_info = [[{'onset': [], 'duration': [], 'weight': []} for i_c in range(n_C)] for i_s in range(n_S)] # Read stimulus timing files for i_c in range(n_C): with open(stimtime_files[i_c]) as f: text = f.readlines() assert len(text) == n_S, \ 'Number of lines does not match number of runs!' for i_s, line in enumerate(text): events = line.strip().split() if events[0] == '*': continue for event in events: assert event != '*' tmp = str.split(event, ':') if len(tmp) == 2: duration = float(tmp[1]) else: duration = 1.0 tmp = str.split(tmp[0], '*') if len(tmp) == 2: weight = float(tmp[1]) else: weight = 1.0 if (float(tmp[0]) >= 0 and float(tmp[0]) < scan_onoff[i_s + 1] - scan_onoff[i_s]): design_info[i_s][i_c]['onset'].append(float(tmp[0])) design_info[i_s][i_c]['duration'].append(duration) design_info[i_s][i_c]['weight'].append(weight) return design_info
5fcda88f9d29606ee2e13e005d9523d58ef88e9c
701,656
def isok(num: int): """주어진 인수가 0과 1로만 이루어져 있을 경우 참 반환""" cnv = set(int(n) for n in str(num)) return all(e in {0, 1} for e in cnv)
bb47608695029ef9450b29b7c69c16a00f1032f5
701,657
def heapsort(values): """Heapsorts a list of values in nondecreasing order.""" length = len(values) def pick_child(parent): left = parent * 2 + 1 if left >= length: return None right = left + 1 if right == length or values[left] >= values[right]: return left return right def sift_down(parent): while True: child = pick_child(parent) if child is None or values[parent] >= values[child]: break values[parent], values[child] = values[child], values[parent] parent = child # Convert the list into a maxheap. for parent in range(length // 2, -1, -1): sift_down(parent) # Extract each element from the maxheap, placing them from right to left. while length > 1: length -= 1 values[0], values[length] = values[length], values[0] sift_down(0)
74e1afaf33e474611e842e97032a60a28fe5664d
701,658
def get_array_info(subs, dictofsubs): """ Returns information needed to create and access members of the numpy array based upon the string names given to them in the model file. Parameters ---------- subs : Array of strings of subscripts These should be all of the subscript names that are needed to create the array dictofsubs : dictionary returns ------- A dictionary of the dimensions associating their names with their numpy indices directory = {'dimension name 1':0, 'dimension name 2':1} A list of the length of each dimension. Equivalently, the shape of the array: shape = [5,4] """ # subscript references here are lists of array 'coordinate' names if isinstance(subs,list): element=subs[0] else: element=subs # we collect the references used in each dimension as a set, so we can compare contents position=[] directory={} dirpos=0 elements=element.replace('!','').replace(' ','').split(',') for element in elements: if element in dictofsubs.keys(): if isinstance(dictofsubs[element],list): dir,pos = (get_array_info(dictofsubs[element][-1],dictofsubs)) position.append(pos[0]) directory[dictofsubs[element][-1]]=dirpos dirpos+=1 else: position.append(len(dictofsubs[element])) directory[element]=dirpos dirpos+=1 else: for famname,value in dictofsubs.iteritems(): try: (value[element]) except: pass else: position.append(len(value)) directory[famname]=dirpos dirpos+=1 return directory, position
ad97545278eddd12e8098dc623024beb797baebc
701,659
import click def validate_nonempty(ctx, param, value): """Validate parameter is not an empty string.""" if not value.strip(): raise click.BadParameter('value cannot be empty') return value
a8dd9a81c7fc7b0d0064fe34e849d35c0ce52a04
701,660
import re def get_genres_from_soup(soup): """Get the genres of a book. Parameters ---------- soup : BeautifulSoup BeautifulSoup object created from a book page. Returns ------- list Book genres. """ genres_elements = soup.find_all('a', {'href': re.compile('/genres/')}, class_='bookPageGenreLink') return list(map(lambda element: element.get_text(), genres_elements))
16db0fc8cb58cdcf19aa89ea8fef27078d33a390
701,661
def truncate(string, length, extra=0, add_whitespace=True): """ Add whitespace to strings shorter than the length, truncate strings longer than the length, replace the last few characters with ellipsis """ # Strip whitespace base = string.strip() difference = length-(len(base)) if difference > 0: whitespace = difference*" " if add_whitespace else "" return base + whitespace + extra*" " else: return base[0:(length-1)]+"…" + extra*" "
228612cc4591122085ca358513eac7d025b19e77
701,662
import torch def accuracy(predictions, labels): """ Evaluate accuracy from model predictions against ground truth labels. """ ind = torch.argmax(predictions, 1) # provide labels only for samples, where prediction is available (during the training, not every samples prediction is returned for efficiency reasons) labels = labels[-predictions.size()[0]:] accuracy = torch.sum(torch.eq(ind, labels)).item() / \ labels.size()[0] * 100.0 return accuracy
484ba64b2239363daddd206e747f6c1456e236c9
701,663
def inet_ntoa(i): """Convert an int to dotted quad.""" return '.'.join(map(str, [(i >> (3-j)*8) & 0xff for j in range(4)]))
b83a6b08118bcd7858cb588f53b71daaf31d358e
701,664
def cleanup_string(string): """ >>> cleanup_string(u', Road - ') u'road' >>> cleanup_string(u',Lighting - ') u'lighting' >>> cleanup_string(u', Length - ') u'length' >>> cleanup_string(None) '' >>> cleanup_string(' LIT ..') 'lit' >>> cleanup_string('poor.') 'poor' """ if string is None: return '' string = string.replace(',', '') string = string.replace('.', '') string = string.replace(' ', '') string = string.replace('-', '') string = string.lower() return string
5f9a369a52b798ff8c26bea56fbfe585b3612db0
701,665
def is_field_allowed(name, field_filter=None): """ Check is field name is eligible for being split. For example, '__str__' is not, but 'related__field' is. """ if field_filter in ["year", "month", "week", "day", "hour", "minute", "second"]: return False return isinstance(name, str) and not name.startswith('__') and not name.endswith('__') and '__' in name
8be38b79bab3aeb49219155db0159cc143c38111
701,666
def merge_user_settings(settings): """Return the default linter settings merged with the user's settings.""" user = settings.get('user', {}) default = settings.get('default', {}) if user: tooltip_styles = default.get('tooltip_styles', {}) user_tooltip_styles = user.get('tooltip_styles', {}) for field in user_tooltip_styles: if field in tooltip_styles: tooltip_styles[field] = user_tooltip_styles[field] default['tooltip_styles'] = tooltip_styles user.pop('tooltip_styles', None) default.update(user) return default
969457f907d8431c9af6ef8a1b587575cb3ba681
701,667
from typing import Optional from typing import Dict import os def true_color_supported(env: Optional[Dict[str, str]] = None) -> bool: """Check if truecolor is supported by the current tty. Note: this currently only checks to see if COLORTERM contains one of the following enumerated case-sensitive values: - truecolor - 24bit """ if env is None: env = {k: v for k, v in os.environ.items()} color_term = env.get("COLORTERM", "") return True if any(check in color_term for check in ["truecolor", "24bit"]) else False
dc69282c90b57bec6ad2eb348ca6c327aacb5426
701,669
def strip_outer_matching_chars(s, outer_char): """ If a string has the same characters wrapped around it, remove them. Make sure the pair match. """ s = s.strip() if (s[0] == s[-1]) and s.startswith(outer_char): return s[1:-1] return s
b55d1f966a8b216dce2d4817117f350f639b9b83
701,671
import sys def get_sentence(lower=True): """Simple function to prompt user for input and return it w/o newline. Frequently used in chat sessions, of course. """ sys.stdout.write("Human: ") sys.stdout.flush() sentence = input() if lower: return sentence.lower() return sentence
8e0ae0591bace7da27dc0d044272793fd397276a
701,672
def mclag_ka_session_dep_check(ka, session_tmout): """Check if the MCLAG Keepalive timer and session timeout values are multiples of each other and keepalive is < session timeout value """ if not session_tmout >= ( 3 * ka): return False, "MCLAG Keepalive:{} Session_timeout:{} values not satisfying session_timeout >= (3 * KA) ".format(ka, session_tmout) if session_tmout % ka: return False, "MCLAG keepalive:{} Session_timeout:{} Values not satisfying session_timeout should be a multiple of KA".format(ka, session_tmout) return True, ""
3f3fd6a12711c0c290cdb0fbd68cfd1c743ef515
701,673
def lucas(n): """ compute the nth Lucas number """ a, b = 2, 1 # notice that all I had to change from fib were these values? if n == 0: return a for _ in range(n - 1): a, b = b, a + b return b
9d9404edf59690cafc49ba70d7dc776376d1f020
701,674
def _get_edge_attrs(edge_attrs, concat_qualifiers): """ get edge attrs, returns for qualifiers always a list """ attrs = dict() if "qualifiers" not in edge_attrs: attrs["qualifiers"] = [] elif edge_attrs["qualifiers"] is None: attrs["qualifiers"] = edge_attrs["qualifiers"] if attrs["qualifiers"] is None: attrs["qualifiers"] = [] else: attrs["qualifiers"] = edge_attrs["qualifiers"] # add here the qualifiers afterwards from merged supernodes if concat_qualifiers is not None: attrs["qualifiers"] += concat_qualifiers for key in edge_attrs.keys(): if key != "qualifiers": attrs[key] = edge_attrs[key] return attrs
8500ae7c606041071264d980155fbada98ba870e
701,675
def pair_align(a, b): """ Accurate Registration. :param a: Point cloud for previous frame. :param b: Point cloud for current frame. :return: The matrix. """ x = 0 return x
033515d87b900a7790c4a461409138d420aa164d
701,678
from typing import Tuple from typing import Union def _verify_data_shape(data, shape, path=None) -> Tuple[bool, Union[str, None]]: """ _verify_data_shape( {'data': []}, {'data': list} ) == (True, None) _verify_data_shape( {'data': ''}, {'data': list} ) == (False, '.data') _verify_data_shape( {'data': '', 'empty': 10}, {'data': list} ) == (False, '.data') This function is what handles the data shape verification. You can use this function, or the decorator on uploaded data to verify its use before usage. You can basically write out what the shape should look like. This function supports nested dictionaries. Here, we will return a tuple of a boolean indicating success or failure, and a error string. If there was an error validating a given field, the error string will be a path to the unvalidated field. An example would be: _verify_data_shape( {'data': ''}, {'data': list} ) -> (False, '.data') :return: success as bool, error path """ if path is None: path = "" if shape is dict or shape is list: # Free, just need a match if isinstance(data, shape): return True, None return False, path # Verify if data is constant for _t in [int, str, float]: if isinstance(data, _t): return (True, None) if shape == _t else (False, path) if isinstance(data, dict): # Verify dict keys for s_key, s_value in shape.items(): # Verify key is included if s_key not in data: return False, path + "." + s_key # Supported basic types for _t in [int, str, float]: # Check free strings are strings and lists if s_value is _t: if not isinstance(data[s_key], s_value): return False, path + "." + s_key # Check explicit strings and lists elif isinstance(s_value, _t): if not isinstance(data[s_key], type(s_value)): return False, path + "." + s_key # Recurse on other dicts if isinstance(s_value, dict): # Free dict ( no need to verify more ) if s_value == dict: return True, None # Explicit Dict ( need to recurse ) elif isinstance(s_value, dict): # Recurse on dict r, e = _verify_data_shape(data[s_key], s_value, path + "." + s_key) if r is False: return r, e # Type s_value was not dict ( type mismatch ) else: return False, path + "." + s_key # Recurse on lists if isinstance(s_value, list): # Free list ( no need to verify more ) if s_value == list: return True, None # Explicit list ( need to recurse ) elif isinstance(s_value, list): # If we have a type specified in the list, # we should iterate, then recurse on the # elements of the data. Otherwise there's # nothing to do. if len(s_value) == 1: s_value = s_value[0] for item in data[s_key]: # Recurse on list item r, e = _verify_data_shape( item, s_value, path + ".[" + s_key + "]" ) if r is False: return r, e # Type s_value was not dict ( type mismatch ) else: return False, path + "." + s_key if s_value is list or s_value is dict: if isinstance(data[s_key], s_value): return True, None return ( False, path + ".[" + s_key + "]" if s_value is list else path + "." + s_key + "", ) return True, None
3e76362938146972d96e34a22373b43dca23381b
701,680
import tempfile import os import tarfile def get_tarinfo(path): """Gets the `TarInfo` object for the file at the specified path. This contains useful information such as the owner and the group. @param path: The path. @type path: str @return: The info for that path @rtype: tarfile.TarInfo """ # The `tarfile` library does not let us get this directly. We need to actually open a tarfile for writing # and have it look at the file path. So, we create a temporary file to write it to. fd, fn = tempfile.mkstemp() file_obj = os.fdopen(fd, 'wb') try: tmp_tar = tarfile.open(fn, fileobj=file_obj, mode='w:gz') result = tmp_tar.gettarinfo(path) tmp_tar.close() return result finally: file_obj.close()
365d978083c494bf76c518eeb0fecc8f06d32a0a
701,681
from datetime import datetime def _prepare_transactions(response, telegram_id, mcc_codes): """Parse response from monobank API and return formatted transaction.""" transactions = [] costs_converter = 100.0 for transaction in response: transactions.append(( transaction["id"], telegram_id, transaction["amount"] / costs_converter, transaction["balance"] / costs_converter, transaction["cashbackAmount"] / costs_converter, transaction["mcc"] if transaction["mcc"] in mcc_codes else -1, datetime.fromtimestamp(transaction["time"]), transaction["description"], )) return transactions
0794c0761e7232bf517dd867f4d8bab252b2c708
701,684
def create_dict_playlists_playlistids(p_list, pid_list): """ Create a dictionary of playlists and playlist ids """ playlists_and_playlist_ids = {} for i in range(len(p_list)): playlists_and_playlist_ids[p_list[i]] = pid_list[i] return playlists_and_playlist_ids
173850ed85b3dc774ddea14674e22e701991c807
701,685
def required_input(message, method=input): """ Collect input from user and repeat until they answer. """ result = method(message) while len(result) < 1: result = method(message) return result
c9a21d6e63ab6bdde081db471cf0d2420f9047ea
701,686
def get_first_char(value): """ Returns the first char of the given string :param value: :return: """ return value[:1]
98207e7269371f0177f45a2c85f874e1d9bbb756
701,687
import json def load_instructions(ufilename): """ Expand this, maybe in the readme/docs because it's pretty much the heart and soul of the program. Loads a json file that describes the titlecard. In general, it'll be pairings of json keys and values. There are two keys that are handled specially; the 'background' key is drawn first and everything else is drawn on top of it with no specific order. The two text-items will be for static text and ranges of information handled with a format string. if x or y is omitted from a caption, make it align center for that value""" return json.load(open(ufilename, 'r'))
e6b0d5ed81f5bc5c3a1837455f3815b282533e80
701,688
def dfs_search_recursive(G, src): """Entry to recursive Depth First Search.""" marked = {} node_from = {} def dfs(v): """Recursive DFS.""" marked[v] = True for w in G[v]: if not w in marked: node_from[w] = v dfs(w) dfs(src) return node_from
b433aba6a0d397c355ce578a867cfdebcccd980d
701,689
def is_cat(filename: str) -> bool: """ Returns true if filename is an image of a cat. In the dataset we are using this is indicated by the first letter of the filename; cats are labeled with uppercase letters, dogs with lowercase ones. """ result = filename[0].isupper() # print(f"File: {filename}, initial: '{filename[0]}', result: {result}") return result
ad56c7c3ae28951fc31bcf70fece29bf934e4cec
701,690
def analyze_text(filename): """ Calculate the number of lines and characters in a file :param filename: the name of the file to analyze :raises: IOError: if ``filename`` does not exist or can't be read :return: a tuple where the first element is the number of lines in the file and the second element is the number of characters """ lines = 0 chars = 0 with open(filename, 'r') as f: for line in f: lines += 1 chars += len(line) return lines, chars
1670d3bff0402482e9e33be401e8914eea117f6c
701,691
def auto_adapt_batch(train_size, val_size, batch_count_multiple=1, max_size=256): """ returns a suitable batch size according to train and val dataset size, say max_size = 128, and val_size is smaller than train_size, if val_size < 128, the batch_size1 to be returned is val_size if 128 < val_size <= 256, the batch size is 1/2 of val_size, at most 1 validation sample cannot be used if 256 < val_size <= 384, the batch size is 1/3 of val_size, at most 2 validation samples cannot be used ... :param train_size: the number of training samples in the training set :param val_size: the number of validation samples in the validation set :param max_size: the maximum batch_size1 that is allowed to be returned :param batch_count_multiple: force the batch count to be a multiple of this number, default = 1 :return: a suitable batch_size1 for the input """ print('Auto adapting batch size...') numerator = min(train_size, val_size) denominator = 0 while True: denominator += batch_count_multiple batch_size = numerator // denominator if batch_size <= max_size: return batch_size
d0a6fa9e6bde3d563bd7fad5e2bbcf7068f9ff65
701,692
from typing import Tuple def pascals_triangle(rows: int) -> Tuple[Tuple[int, ...], ...]: """Return tuple containing pascals triangle up to specified length.""" result = [] next_numbers = [1] for _ in range(0, rows): # move row current_numbers = next_numbers next_numbers = [] result.append(tuple(current_numbers)) for index, number in enumerate(current_numbers): # check whether it is the first or last number if index == 0: next_numbers.append(number) if index == len(current_numbers) - 1: next_numbers.append(number) else: next_numbers.append(number + current_numbers[index + 1]) return tuple(result)
80c46657d413ff67bf3fc94091fd8a81bdb5a148
701,693
import os def list_results_files(path, instanceid, omittedfiles): """ lists the files associated with an instanceid leavuing out the omittedfiles and in ascending age. :param path: :param instanceid: :param omittedfiles: :return: """ files = sorted(os.listdir(os.path.join(path, instanceid)), key=lambda fn: os.path.getctime(os.path.join(path, instanceid, fn))) for filename in omittedfiles: try: files.remove(filename) except: pass return files
c7e1d5e62ef1e4dc87a321cf0c391397fad8bc7c
701,694
def two_fer(name="you"): """Returns a string in the two-fer format.""" return "One for " + name + ", one for me."
a7f10a45b214ea1ea79a6956148b3c6677f27e21
701,695
import math def create_pagination(page, results_per_page, total_results): """Create pagination to filter results to manageable amounts.""" pagination = {} # For UI pagination['page'] = page pagination['total_results'] = total_results pagination['total_pages'] = math.ceil(total_results / results_per_page) # For database pagination['limit'] = results_per_page pagination['offset'] = (page - 1) * results_per_page return pagination
d58bf2adee3e090e88aa82a5a91560e8fb1631e0
701,697
def conv_len(a, l): """ Function that converts a number into a bit string of given length :param a: number to convert :param l: length of bit string :return: padded bit string """ b = bin(a)[2:] padding = l - len(b) b = '0' * padding + b return b
b3c28e82c759e3a433ca9b52d7e7726f786e76ff
701,700
import math def a_raininess_oracle(timestep): """Mimics an external data source for raininess Arguments ========= timestep : int Requires a year between 2010 and 2050 Returns ======= raininess : int """ msg = "timestep {} is outside of the range [2010, 2050]".format(timestep) assert timestep in [x for x in range(2010, 2051, 1)], msg raininess = math.floor((timestep - 2000) / 10) return raininess
e1b4f32f62fe19f95ac876a0acf03fe533858366
701,701
import torch def matrix_to_homogeneous(batch: torch.Tensor) -> torch.Tensor: """ Transforms a given transformation matrix to a homogeneous transformation matrix. Args: batch: the batch of matrices to convert [N, dim, dim] Returns: torch.Tensor: the converted batch of matrices """ if batch.size(-1) == batch.size(-2): missing = batch.new_zeros(size=(*batch.shape[:-1], 1)) batch = torch.cat([batch, missing], dim=-1) missing = torch.zeros( (batch.size(0), *[1 for tmp in batch.shape[1:-1]], batch.size(-1)), device=batch.device, dtype=batch.dtype ) missing[..., -1] = 1 return torch.cat([batch, missing], dim=-2)
ab3bf1acf1e8fab2d4a4fcdcfd062821bc891b9d
701,702
import struct def readHeader(fd): """read protocol header""" data = fd.recv(6) value = struct.unpack(">bib", data) return value
6f1ac379f9eb1f5862754dd10fced9a817bb664a
701,703
def get_RSI(df, column='Close', time_window=14): """Function to make the RSI values for a given stock dataframe""" # Differential between the Column diff = df[column].diff(1) # Integrity of the difference values up_chg = 0 * diff down_chg = 0 * diff # We consider the upchange as positive difference, otherwise keep it as zero up_chg[diff > 0] = diff[diff > 0] down_chg[diff < 0] = diff[diff < 0] # We set change of time_window-1 so our decay is alpha=1/time_window. up_chg_avg = up_chg.ewm(com=time_window - 1, min_periods=time_window).mean() down_chg_avg = down_chg.ewm(com=time_window - 1, min_periods=time_window).mean() RS = abs(up_chg_avg / down_chg_avg) df['RSI'] = 100 - 100 / (1 + RS) return df
dca6f44062b8dbc04444f033d2e3b54075ad2ca6
701,704
def find_ch_interest_dict(show_channel_dict : dict, usr_pref_dict : dict): """Pass in show_channel_dict {show:channels} and usr_pref_dict {show: rating}. Returns dictionary {channel : total rating}""" ch_interest_dict = {} for show in usr_pref_dict: if show in show_channel_dict: if show_channel_dict[show] in ch_interest_dict: ch_interest_dict[show_channel_dict[show]] += usr_pref_dict[show] else: ch_interest_dict[show_channel_dict[show]] = usr_pref_dict[show] return ch_interest_dict
9928b03c0ceea3ea38c3808a5fd4053553f4e5c4
701,706
def is_valid_int(s: str) -> bool: """ Return true if s can be converted into a valid integer, and false otherwise. :param s: value to check if can be converted into a valid integer :return: true if s can be converted into a valid integer, false otherwise >>> is_valid_int("hello") False >>> is_valid_int("506") True """ try: int(s) except ValueError: return False else: return True
9d2c849839f6fdcf729a7c1503a3eac3daa5f000
701,707
def clean_software_config(config): """Take an individual `config` data structure (as specified by config_validation.SoftwareSchema) and return a 'clean' version suitable for internal use. This allows for a simplified schema to be available to users whilst preserving consistent internal data structures by e.g. replacing null values with empty lists etc. args: config (dict): A validated SoftwareSchema returns: (dict): A cleaned version of `config` """ config = config.copy() if not config["input_files"]["required"]: config["input_files"]["required"] = [] if not config["input_files"]["optional"]: config["input_files"]["optional"] = [] return config
c89ad5a4b61e4214d4b79ce6782e4fe5a86311bf
701,708
def miller_rabin_d(n: int) -> bool: """Check if n is a prime number via deterministic Miller-Rabin test. Miller showed that it is possible to make the algorithm deterministic by only checking all bases ≤ O(lg(n)^2). Bach later gave a concrete bound, it is only necessary to test all bases a ≤ 2lg(n)^2. It turns out, for testing a 32 bit integer it is only necessary to check the first 4 prime bases: 2, 3, 5 and 7. The smallest composite number that fails this test is 3,215,031,751=151⋅751⋅28351. And for testing 64 bit integer it is enough to check the first 12 prime bases: 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, and 37.""" if n < 2: return False s, d = 0, n-1 while d&1 == 0: s += 1 d >>= 1 for a in 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37: if n == a: return True v = pow(a, d, n) if v != 1 and v != n-1: for j in range(1, s): v = pow(v, 2, n) if v == n-1: break if v != n-1: return False return True
ac668cb55e417a6784ba52d1c5da1dc26d3693ad
701,709
from typing import List from typing import Callable def evaluate_predictions(preds: List[List[float]], targets: List[List[float]], metric_func: Callable) -> List[float]: """ Evaluates predictions using a metric function and filtering out invalid targets. :param preds: A list of lists of shape (data_size, num_tasks) with model predictions. :param targets: A list of lists of shape (data_size, num_tasks) with targets. :param metric_func: Metric function which takes in a list of targets and a list of predictions. :return: A list with the score for each task based on `metric_func`. """ data_size, num_tasks = len(preds), len(preds[0]) # Filter out empty targets # valid_preds and valid_targets have shape (num_tasks, data_size) valid_preds = [[] for _ in range(num_tasks)] valid_targets = [[] for _ in range(num_tasks)] for i in range(num_tasks): for j in range(data_size): if targets[j][i] is not None: # Skip those without targets valid_preds[i].append(preds[j][i]) valid_targets[i].append(targets[j][i]) # Compute metric results = [] for i in range(num_tasks): # Skip if all targets are identical if all(target == 0 for target in valid_targets[i]) or all(target == 1 for target in valid_targets[i]): continue results.append(metric_func(valid_targets[i], valid_preds[i])) return results
7b7f550a0983cbb8af90f13b214a195cdb8cbfe3
701,711
def recommend_size(args, img): """ Recommend size (in pixels) for populated image to reach 300 dpi""" current_ppi = 72.0/img.scale target_ppi = args.normal_ppi scale = target_ppi/current_ppi return (int(img.image.width*scale), int(img.image.height*scale))
b32a0520586984124ba2134dc99ecc0b71cd01df
701,712
from typing import Union def flatten(x: Union[list, tuple]) -> list: """ Flattening function for nested lists and tuples Args: x: List or tuple Returns: object (list): Flat list """ if not isinstance(x, list) and isinstance(x, tuple): raise TypeError("input must be a list or tuple") out: list = [] for item in x: if isinstance(item, (list, tuple)): out.extend(flatten(item)) else: out.append(item) return out
36c35dfbef4214ccf0f6d355f36865996fd6d88e
701,713
from typing import Mapping def update_nested(original_dict, update_dict): """Update a nested dictionary with another nested dictionary. Has equivalent behaviour to :obj:`dict.update(self, update_dict)`. Args: original_dict (dict): The original dictionary to update. update_dict (dict): The dictionary from which to extract updates. Returns: original_dict (dict): The original dictionary after updates. """ for k, v in update_dict.items(): nested_dict = v if isinstance(v, Mapping): # Mapping ~= any dict-like object nested_dict = original_dict.get(k, {}) if nested_dict is not None: nested_dict = update_nested(nested_dict, v) original_dict[k] = nested_dict return original_dict
a1a372ac4d26066c3fe32cd4ee1a49fff6972cd9
701,714
import argparse def process_command_line_args(): """ Returns: tuple(str, str): Command line args for (config, output) files """ ap = argparse.ArgumentParser() ap.add_argument("-d", "--directory", required=True, help="Path to dot file directory") args = vars(ap.parse_args()) return args["directory"]
505e7fc47c06162fb2f1328882a59caa3b0539a2
701,715
def remove_comment(line): """Remove trailing comments from one line.""" start = 0 while True: loc = line.find('#', start) if loc == -1: return line.replace('\\#', '#') elif not (loc and line[loc - 1] == '\\'): return line[:loc].replace('\\#', '#') start = loc + 1
e2ab53813efd17e00240f747709330c44c875235
701,716
def augment(cls): """Add `time` to kwargs list.""" class New(cls): @staticmethod def _myfun(x, *args, time=0, **kwargs): return super(New,New)._myfun(x) return New
cf08f753ba3af3ff2d2c11ca24c5796d2b0a4c12
701,717
def handle_none(func): """A decorator function to handle cases where partition values are `None` or "__HIVE_DEFAULT_PARTITION__" Args: func (Callable): A function registered to the singledispatch function `partition_to_py` """ def wrapper(primitive_type, value_str): if value_str is None: return None elif value_str == "__HIVE_DEFAULT_PARTITION__": return None return func(primitive_type, value_str) return wrapper
23db0c46a35f2e433735c4a863d5619bf4c3cc55
701,718
from typing import List from typing import Tuple import os import csv def get_location_replacements() -> List[Tuple[str,str]]: """Gets a list of location replacement tuples from csv""" replacements = [] with open(os.path.join(os.path.dirname(__file__), '../config/location.replacements.csv'), 'r') as infile: reader = csv.reader(infile) next(reader, None) # skip the headers replacements = [(rows[0], rows[1]) for rows in reader] return replacements
c1e2a67ed64df8e3610ffd3150b5a5dd1982f61c
701,719
def asInteger(epsg): """ convert EPSG code to integer """ return int(epsg)
18a14944f5f29ec09585757f0edc912b896a12ba
701,720
def monitor_cb(ud, msg): """Callback for the MonitorStates, listening to /click/start_button""" # Return False when you want the MonitorState to terminate return False
34f5065aadf8ec96bbe0fb54b791f7a4385a55b5
701,721
def mag(initial, current): """ Calculates the magnification of a specified value **Parameters** intial: *float* initial value (magnificiation of 1) current: *float* current value **Returns** magnification: *float* the magnification of the current value """ return float(initial) / float(current)
abc8d3603f11e62f57a62c47dc372b4b9ea19b0c
701,722
import re def capitalize(word): """Only capitalize the first letter of a word, even when written in CamlCase. Args: word (str): Input string. Returns: str: Input string with first letter capitalized. """ return re.sub('([a-zA-Z])', lambda x: x.groups()[0].upper(), word, 1)
4f254696e00c24a85a20ea74fc66a32fceb541c6
701,723
import re def cassandra_ddl_repr(data): """Generate a string representation of a map suitable for use in Cassandra DDL.""" if isinstance(data, str): return "'" + re.sub(r"(?<!\\)'", "\\'", data) + "'" elif isinstance(data, dict): pairs = [] for k, v in data.items(): if not isinstance(k, str): raise ValueError('DDL map keys must be strings') pairs.append(cassandra_ddl_repr(k) + ': ' + cassandra_ddl_repr(v)) return '{' + ', '.join(pairs) + '}' elif isinstance(data, int): return str(data) elif isinstance(data, bool): if data: return 'true' else: return 'false' else: raise ValueError('Cannot convert data to a DDL representation')
c81ad24c0185ef10646644b82399c202c2261a1a
701,724
import tempfile def build_dir(): """Test build dir for the sphinx compiled docs.""" return tempfile.mkdtemp()
aef168b1031a9ebc15d502a3aedb89da004caffa
701,725
import csv def read_csv_file(file_name): """ Given a CSV file, read the data into a nested list Input: String corresponding to comma-separated CSV file Output: Nested list consisting of the fields in the CSV file """ with open(file_name, newline='') as csv_file: # don't need to explicitly close the file now csv_table = [] csv_reader = csv.reader(csv_file, delimiter=',') for row in csv_reader: csv_table.append(row) return csv_table
65f9d2edb9ecf020d773a8d8516f31247fa680ed
701,726
def get_all_entries(df, pdbid, cdr): """ Get all entries of a given PDBID and CDR. :param df: dataframe.DataFrame :rtype: pandas.DataFrame """ return df[(df['input_tag'].str.contains(pdbid)) & (df['CDR'] == cdr)]
414eca4481bde0ccc5cdd6e143f7d4b06216a102
701,728
import argparse def process_create_experiment_arguments(): """ Processing command line arguments for 01_create_experiment script """ # defining command line arguments parser = argparse.ArgumentParser() # general arguments parser.add_argument("-d", "--exp_directory", help="Directory where the experiment folder will be created", default="test_dir") # dataset parameters parser.add_argument('--dataset_name', help="Dataset to take the images from [mnist, svhn, div2k]", default="svhn") # denoising parameters parser.add_argument('--denoiser', help="Type of denoising approach ['', 'median_filter',\ 'wiener_filter', 'autoencoder']", default="") parser.add_argument('--denoiser_type', help="Where the denoiser is applied ['', 'prenetwork',\ 'innetwork']", default="") parser.add_argument('--kernel_size', help="Size of the median pooling or wiener filter\ kernel. Must be integer, so far only square kernels are allowed", default="5") parser.add_argument('--bottleneck', help="Dimensionality of the blottleneck code for the case of\ denoising autoencoders", default="128") # agumentation parameters parser.add_argument('--rotation', help="Binary flag for applying rotation augmentation", default="False") parser.add_argument('--translation', help="Binary flag for applying tranlation augmentation", default="False") # training parameters parser.add_argument('--num_epochs', help="Number of epochs to train for", default="100") parser.add_argument('--batch_size', help="Number of examples in each batch", default="12") parser.add_argument('--patches_per_image', help="Number of patches to sample per image", default="10") parser.add_argument('--optimizer', help="Optimizer used to update the weights ['ADAM']", default="ADAM") parser.add_argument('--loss_function', help="Loss function used ['mae', 'mse']", default="mae") parser.add_argument('--learning_rate', help="Learning rate", default="3e-4") parser.add_argument('--lr_decay', help='Factor by which the learning rate will be decreased during decay', default='0.1') parser.add_argument('--patience', help='Number of epochs in which the loss does not decrease before changing lr', default='8') parser.add_argument('--validation_size', help="Size of the validation set [0,0.5]", default="0.2") parser.add_argument('--save_frequency', help="Number of epochs after which we save a checkpoint", default="10") # model parameters parser.add_argument('--model_name', help="Name of the model to use ['wdsr_a', 'wdsr_b']", default="wdsr_a") parser.add_argument('--num_filters', help="Number of filters in the conv layers of the residual blocks", default="32") parser.add_argument('--num_res_blocks', help="Number of residual blocks", default="16") parser.add_argument('--num_block_features', help="Number of blocks features", default="256") parser.add_argument('--res_scale', help="Weight scale of the residual", default="0.1") parser.add_argument('--r_mean', help='Mean of R Channel', default=0.5) parser.add_argument('--g_mean', help='Mean of G channel', default=0.5) parser.add_argument('--b_mean', help='Mean of B channel', default=0.5) # corruption parameters parser.add_argument('--noise', help="Type of noise to be used to corrupt the images\ ['', 'gaussian', 'poisson', 'speckle', 'salt_pepper']", default="") parser.add_argument('--std', help="Standard deviation of the noise", default=0) parser.add_argument('--downscaling', help="Factor by which the images will be downscaled and the upsampled", default=1) args = parser.parse_args() # formating arguments args.kernel_size = int(args.kernel_size) args.bottleneck = int(args.bottleneck) args.num_epochs = int(args.num_epochs) args.batch_size = int(args.batch_size) args.patches_per_image = int(args.patches_per_image) args.learning_rate = float(args.learning_rate) args.lr_decay = float(args.lr_decay) args.patience = int(args.patience) args.validation_size = float(args.validation_size) args.save_frequency = int(args.save_frequency) args.num_filters = int(args.num_filters) args.num_res_blocks = int(args.num_res_blocks) args.num_block_features = int(args.num_block_features) args.res_scale = float(args.res_scale) args.std = float(args.std) args.downscaling = int(args.downscaling) # ensuring only known values go through assert args.dataset_name in ["mnist", "svhn", "div2k"] assert args.denoiser in ["", "median_filter", "wiener_filter", "autoencoder"] assert args.denoiser_type in ['', 'prenetwork', 'innetwork'] assert args.kernel_size > 0 assert args.bottleneck > 0 assert args.optimizer in ["ADAM"] assert args.loss_function in ["mse", "mae"] assert args.learning_rate > 0 assert args.batch_size > 0 assert args.patches_per_image > 0 assert args.validation_size > 0 and args.validation_size < 0.5 assert args.model_name in ["wdsr_a"] assert args.num_filters > 0 assert args.num_res_blocks > 0 assert args.noise in ["", "gaussian", "poisson", "speckle", "salt_pepper"] assert args.downscaling >= 1 return args
e6830a43ad215f03ad59a1b496140d4247aa124d
701,729
def merge_result(res): """ Merges all items in `res` into a list. This command is used when sending a command to multiple nodes and they result from each node should be merged into a single list. """ if not isinstance(res, dict): raise ValueError("Value should be of dict type") result = set([]) for _, v in res.items(): for value in v: result.add(value) return list(result)
28d21ca00316303c0e2fc0400599921154253236
701,730
def get_file_section_name(section_key, section_label=None): """Build a section name as in the config file, given section key and label.""" return section_key + (" {0}".format(section_label) if section_label else "")
01e1f46d2a949315ba2e927ddfab610064539e3b
701,731
import os def FindSrcDirPath(): """Returns the abs path to the src/ dir of the project.""" src_dir = os.path.dirname(os.path.abspath(__file__)) while os.path.basename(src_dir) != 'src': src_dir = os.path.normpath(os.path.join(src_dir, os.pardir)) return src_dir
d41d225fd65b3a6e934abd42bcbbe5c25a064a31
701,732
import re import os def _parse_rptfiles_from_log(log_filename): """Parses Red stdout log and returns a list with the names of output files with repeat coordinates""" rpt_files = [] try: logfile = open(log_filename) except OSError as error: print("# ERROR: cannot open/read file:", log_filename, error) return rpt_files job_done = False repeats_ok = True for line in logfile: repeats = re.search(r"locations to: (\S+)", line) if repeats: rpt_filename = repeats.group(1) if not (os.path.isfile(rpt_filename)): repeats_ok = False break else: rpt_files.append(rpt_filename) else: # last line in log summary = re.search(r"Genome length: \d+", line) if summary: job_done = True logfile.close if repeats_ok and job_done: return rpt_files else: return []
4607ba7005dfacf747f8dfe36b520737092d1747
701,733
import torch import math def positionalencoding2d(pos_embed_dim, height, width): """ :param pos_embed_dim: dimension of the model embeddings :param height: height of the positions :param width: width of the positions :return: height * width * pos_embed_dim matrix """ if pos_embed_dim % 4 != 0: raise ValueError("Cannot use sin/cos positional encoding with " "odd dimension (got dim={:d})".format(pos_embed_dim)) pe = torch.zeros(pos_embed_dim, height, width) # Each dimension use half of pos_embed_dim pos_embed_dim = int(pos_embed_dim / 2) div_term = torch.exp(torch.arange(0., pos_embed_dim, 2) * -(math.log(10000.0) / pos_embed_dim)) pos_w = torch.arange(0., width).unsqueeze(1) pos_h = torch.arange(0., height).unsqueeze(1) pe[0:pos_embed_dim:2, :, :] = torch.sin(pos_w * div_term).transpose(0, 1).unsqueeze(1).repeat(1, height, 1) pe[1:pos_embed_dim:2, :, :] = torch.cos(pos_w * div_term).transpose(0, 1).unsqueeze(1).repeat(1, height, 1) pe[pos_embed_dim::2, :, :] = torch.sin(pos_h * div_term).transpose(0, 1).unsqueeze(2).repeat(1, 1, width) pe[pos_embed_dim + 1::2, :, :] = torch.cos(pos_h * div_term).transpose(0, 1).unsqueeze(2).repeat(1, 1, width) pe = pe.permute(1, 2, 0) return pe
685396742b965df3b409203656707f1868e30c45
701,734
import random def shades_of_jop(): """Return a pretty colour.""" c1 = random.randint(127, 255) c2 = random.randint(0, 127) c3 = random.randint(0, 255) return tuple(random.sample([c1, c2, c3], 3))
366bc5bada332b6de3561d6c906a1880119b02f2
701,735
def _final_frame_length(header, final_frame_bytes): """Calculates the length of a final ciphertext frame, given a complete header and the number of bytes of ciphertext in the final frame. :param header: Complete message header object :type header: aws_encryption_sdk.structures.MessageHeader :param int final_frame_bytes: Bytes of ciphertext in the final frame :rtype: int """ final_frame_length = 4 # Sequence Number End final_frame_length += 4 # Sequence Number final_frame_length += header.algorithm.iv_len # IV final_frame_length += 4 # Encrypted Content Length final_frame_length += final_frame_bytes # Encrypted Content final_frame_length += header.algorithm.auth_len # Authentication Tag return final_frame_length
b7029e3b705194ee7daa02b4400d124ffe6efc2a
701,736
def real2complex(input_data): """ Parameters ---------- input_data : row x col x 2 Returns ------- output : row x col """ return input_data[..., 0] + 1j * input_data[..., 1]
9e359903f9653e8ea799a2baedcc9c274471f34f
701,737
def check_is_paired(df, subject, group): """ Check if samples are paired. :param df: pandas dataframe with samples as rows and protein identifiers as columns (with additional columns 'group', 'sample' and 'subject'). :param str subject: column with subject identifiers :param str group: column with group identifiers :return: True if paired samples. :rtype: bool """ is_pair = False if subject is not None: count_subject_groups = df.groupby(subject)[group].count() is_pair = (count_subject_groups > 1).all() return is_pair
38f9b0722e77edb88ff44a7bc73eb24a8f1aa097
701,738
def ascii(object: object) -> str: """ascii.""" return repr(object)
44dc1a77ebd46215aa25a2fea91f9c7c41bd4e7a
701,740
import subprocess import sys import json def collect_clusteroperator_relatedobjects(): """ Returns a list of every namespace listed as a relatedObject by every clusterOperator. This captures managed namespaces that aren't defined in the OCP manifests. """ co_namespaces = [] try: result = subprocess.run(["oc", "get", "co", "-o", "name"], capture_output=True) except subprocess.CalledProcessError as e: sys.exit(f"Failed to get list of installed cluster operators: {e}") for n in result.stdout.splitlines(): r = subprocess.run(["oc", "get", n, "-o", "json"], capture_output=True) operator_dict = json.loads(r.stdout) related_namespaces = [ ns["name"] for ns in operator_dict["status"]["relatedObjects"] if ns["resource"] == "namespaces" ] co_namespaces.extend(related_namespaces) return co_namespaces
94324dbca3457d76f50d80020f560dad6f97798c
701,742
def specific_heat(mat): """Calculate specifc heat""" cw = 4183 mr = mat['m_heat'] mw = mat['m_w'] Tr = mat['Tr'] Tw = mat['Tw'] Te = mat['Te'] return (mw * cw * (Te - Tw)) / (mr * (Tr - Te))
7d3fbe3f67b3df593c94c93ab7d8523242d17b46
701,743
def _flatten(suitable_for_isinstance): """ isinstance() can accept a bunch of really annoying different types: * a single type * a tuple of types * an arbitrary nested tree of tuples Return a flattened tuple of the given argument. """ types = set() if not isinstance(suitable_for_isinstance, tuple): suitable_for_isinstance = (suitable_for_isinstance,) for thing in suitable_for_isinstance: if isinstance(thing, tuple): types.update(_flatten(thing)) else: types.add(thing) return tuple(types)
5ba63f39b2d22da78f5a362ce6821239714a9e6a
701,745
def attach_tasks(queryset, as_field="tasks_attr"): """Attach tasks as json column to each object of the queryset. :param queryset: A Django user stories queryset object. :param as_field: Attach tasks as an attribute with this name. :return: Queryset object with the additional `as_field` field. """ model = queryset.model sql = """SELECT json_agg(row_to_json(t)) FROM( SELECT tasks_task.id, tasks_task.ref, tasks_task.subject, tasks_task.status_id, tasks_task.is_blocked, tasks_task.is_iocaine, projects_taskstatus.is_closed FROM tasks_task INNER JOIN projects_taskstatus on projects_taskstatus.id = tasks_task.status_id WHERE user_story_id = {tbl}.id ORDER BY tasks_task.us_order, tasks_task.ref ) t """ sql = sql.format(tbl=model._meta.db_table) queryset = queryset.extra(select={as_field: sql}) return queryset
02a7e189226f9fb5809d7b4d18f3055e5fbc5462
701,746
import string def PrintableString(s): """For pretty-printing in tests.""" if all(c in string.printable for c in s): return s return repr(s)
4f22a5ed8152039a21e045ea2e04b4cff3dbec85
701,747
import six def bitcast_to_bytes(s): """ Take a string and return a string(PY2) or a bytes(PY3) object. The returned object contains the exact same bytes as the input string. (latin1 <-> unicode transformation is an identity operation for the first 256 code points). """ return s if six.PY2 else s.encode("latin1")
b902550be03f447a286490653a2a1361257ac88c
701,748