content
stringlengths
42
6.51k
def IsTelemetryCommand(command): """Attempts to discern whether or not a given command is running telemetry.""" return 'tools/perf/run_' in command or 'tools\\perf\\run_' in command
def count_change(total): """Return the number of ways to make change for total. >>> count_change(7) 6 >>> count_change(10) 14 >>> count_change(20) 60 >>> count_change(100) 9828 >>> from construct_check import check >>> # ban iteration >>> check(HW_SOURCE_FILE, 'count_change', ['While', 'For']) True """ "*** YOUR CODE HERE ***" def compute(sum_price, power): if sum_price == 0: return 1 cur_price = 1 << power if sum_price < cur_price: return 0 return compute(sum_price, power + 1) + compute(sum_price - cur_price, power) return compute(total, 0)
def clean_parses(parses): """ Remove the stuff we don't need from a list of parses Arguments parses : list of (bis,buffer,route,k) Returns list of (bis,route) """ return [ (bis,route) for (bis,_,route,_) in parses ]
def find_arg_end(field_name): """ Returns the index of the end of an attribute name or element index. field_name: the field being looked up, e.g. "0.name" or "lookup[3]" """ dot_idx = field_name.find(".") lbrack_idx = field_name.find("[") if dot_idx == -1: return lbrack_idx if lbrack_idx == -1: return dot_idx return min(dot_idx, lbrack_idx)
def sum_digits(n : int) -> int: """ Given a non-negative integer n, return the sum of its digits. Parameters ---------- n : int number to return the sum of Returns ------- int sum of numbers digits """ sum = 0 for i in [int(i) for i in str(n)]: sum += i return sum
def create_approval_email(user: dict) -> str: """ Creates the registration approval email. Arguments: user {dict} -- User record from /accounts Returns: str -- Formatted email. """ return ''' Dear %s %s: Your registration for the CIMAC-CIDC DATA Portal has now been approved. To continue, please go to: https://portal.cimac-network.org. If you have any questions, please email us at: cidc@jimmy.harvard.edu. Thanks, The CIDC Project Team ''' % (user["first_n"], user["last_n"])
def map(inp,x_min,x_max,y_min,y_max): """ Resets neopixels by sweeping red than green and finally turns off all """ return (inp-x_min) / (x_max-x_min) * (y_max-y_min) + y_min
def _str_to_list(val): """If val is str, return list with single entry, else return as-is.""" l = [] if val.__class__ == str: l.append(val) return l else: return val
def param_cleaner(param_dict): """ Helper function for random_generator, cleans the given param_dict of 'empty' values :param param_dict: param_dict from random_generator :return: param_dict_cleaned """ def generate_number_from_type(type, to_from, value): if value == 0 or value == '': if type == 'release_year': if to_from == 'from': return 1900 elif to_from == 'to': return 2020 if type == 'duration': if to_from == 'from': return 0 elif to_from == 'to': return 1000 if type == 'number_of_votes': if to_from == 'from': return 0 elif to_from == 'to': return 10000000 if type == 'rating': if to_from == 'from': return 0 elif to_from == 'to': return 11 return value cleaned = dict() for key, value in param_dict.items(): if isinstance(value, dict): if value['to'] == '' and value['from'] == '': pass else: cleaned[key] = {'from': 0, 'to': 0} cleaned[key]['from'] = generate_number_from_type(key, 'from', param_dict[key]['from']) cleaned[key]['to'] = generate_number_from_type(key, 'to', param_dict[key]['to']) else: if value == '' or value == 0: pass else: cleaned[key] = value return cleaned
def unescape_glob(string): """Unescape glob pattern in `string`.""" def unescape(s): for pattern in '*[]!?': s = s.replace(r'\{0}'.format(pattern), pattern) return s return '\\'.join(map(unescape, string.split('\\\\')))
def _tgt_set(tgt): """ Return the tgt as a set of literal names """ try: # A comma-delimited string return set(tgt.split(",")) except AttributeError: # Assume tgt is already a non-string iterable. return set(tgt)
def str2floats(s): """Look for single float or comma-separated floats.""" return tuple(float(f) for f in s.split(','))
def get_product(product_db, product_id): """Returns Feature at given location or None.""" print('hello') # return 'hello' for product in product_db: if product.product_num == product_id: return product return None
def parse_metric(metric, coords, coords2=None, coords3=None): """ Convert a string metric into the corresponding enum to pass to the C code. """ if coords2 is None: auto = True else: auto = False # Special Rlens doesn't care about the distance to the sources, so spherical is fine # for cat2, cat3 in that case. if metric == 'Rlens': if coords2 == 'spherical': coords2 = '3d' if coords3 == 'spherical': coords3 = '3d' if metric == 'Arc': # If all coords are 3d, then leave it 3d, but if any are spherical, # then convert to spherical. if all([c in [None, '3d'] for c in [coords, coords2, coords3]]): # Leave coords as '3d' pass elif any([c not in [None, 'spherical', '3d'] for c in [coords, coords2, coords3]]): raise ValueError("Arc metric is only valid for catalogs with spherical positions.") elif any([c == 'spherical' for c in [coords, coords2, coords3]]): # pragma: no branch # Switch to spherical coords = 'spherical' else: # pragma: no cover # This is impossible now, but here in case we add additional coordinates. raise ValueError("Cannot correlate catalogs with different coordinate systems.") else: if ( (coords2 != coords) or (coords3 is not None and coords3 != coords) ): raise ValueError("Cannot correlate catalogs with different coordinate systems.") if coords not in ['flat', 'spherical', '3d']: raise ValueError("Invalid coords %s"%coords) if metric not in ['Euclidean', 'Rperp', 'OldRperp', 'FisherRperp', 'Rlens', 'Arc', 'Periodic']: raise ValueError("Invalid metric %s"%metric) if metric in ['Rperp', 'OldRperp', 'FisherRperp'] and coords != '3d': raise ValueError("%s metric is only valid for catalogs with 3d positions."%metric) if metric == 'Rlens' and auto: raise ValueError("Rlens metric is only valid for cross correlations.") if metric == 'Rlens' and coords != '3d': raise ValueError("Rlens metric is only valid for catalogs with 3d positions.") if metric == 'Arc' and coords not in ['spherical', '3d']: raise ValueError("Arc metric is only valid for catalogs with spherical positions.") return coords, metric
def get_license(license, settings): """Modify license for file type Args: license (str): License to be inserted settings (Dict): File type settings Returns: str: Modified license to be inserted in the file """ lines = [] header_start_line = settings["headerStartLine"] header_end_line = settings["headerEndLine"] header_line_prefix = settings["headerLinePrefix"] header_line_suffix = settings["headerLineSuffix"] if header_start_line is not None: lines.append(header_start_line.rstrip()) for line in license.split("\n"): tmp = line if header_line_prefix is not None: tmp = header_line_prefix + tmp if header_line_suffix is not None: tmp = tmp + header_line_suffix lines.append(tmp.rstrip()) if header_end_line is not None: lines.append(header_end_line.rstrip()) return "\n".join(lines) + "\n"
def _get_TZIDs(lines): """from a list of strings, get all unique strings that start with TZID""" return sorted(line for line in lines if line.startswith('TZID'))
def get_active_nodes(response_json): """ Get the active node list and parse for ID and namne return node_dict for comparison """ node_dict = {} node_list_json = response_json for node in node_list_json['result']['nodes']: node_id = node['nodeID'] node_name = node['name'] node_dict[node_id] = node_name return node_dict
def set_comment(s: str) -> str: """set the indent of each line in `s` `indent`""" lines = s.splitlines(False) new_lines = [] for line in lines: line = '// ' + line new_lines.append(line) return '\n'.join(new_lines)+'\n'
def squeeze(shape, n=0): """Removes all 1-dimensional entries for all dimensions > ``n``. For negative ``n`` the last ``|n|`` dimensions are sequeezed. """ dims = list(shape[:n]) for dim in shape[n:]: if dim > 1: dims.append(dim) return type(shape)(dims)
def lib1_cons2_neutral3(x): """Rearrange questions where 3 is neutral.""" return -3 + x if x != 1 else x
def version_dicttotuple(dict_version): """ Converts a version dictionary into a tuple """ return (int(dict_version['major']), int(dict_version['minor']), int(dict_version['release']))
def makeDirectoryEntry(name, size, isdir = False, readonly = False): """Utility class for returning a dictionary of directory content information.""" out = {"name": name, "size": size, "isDir": isdir, "readonly": readonly } return out
def convert_to_float(value): """Attempts to convert a string or a number to a float. If unsuccessful returns the value unchanged. Note that this function will return True for boolean values, faux string boolean values (e.g., "true"), "NaN", exponential notation, etc. Parameters: value (str|int): string or number to be converted Returns: float: if value successfully converted else returns value unchanged """ try: return float(value) except ValueError: return value
def isiter(x): """ Returns `True` if the given value implements an valid iterable interface. Arguments: x (mixed): value to check if it is an iterable. Returns: bool """ return hasattr(x, '__iter__') and not isinstance(x, (str, bytes))
def convert_old_time(time: str, revert_12hour=False, truncate_minute=False) -> str: """ Convert previous float-formatted times to 24-hour, full format Parameters ---------- time: a time from the previous CourseTable format revert_12hour: whether or not to convert back to 12-hour format truncate_minute: whether or not to remove the minute if it is :00 Returns ------- time: string formatted time """ if "." in time: hour = time.split(".")[0] minute = time.split(".")[1] if len(minute) == 1: minute = minute + "0" else: hour = time minute = "00" formatted_time = f"{hour}:{minute}" if truncate_minute and minute == "00": formatted_time = formatted_time.split(":")[0] if revert_12hour: hour_num = int(hour) if hour_num > 12: hour_num = hour_num - 12 formatted_time = f"{str(hour_num)}:{minute}pm" elif hour_num == 12: formatted_time = f"{str(hour_num)}:{minute}pm" elif hour_num == 0: hour_num = 12 formatted_time = f"{str(hour_num)}:{minute}am" else: formatted_time = f"{str(hour_num)}:{minute}am" if truncate_minute and minute == "00": formatted_time = formatted_time.split(":")[0] + formatted_time[-2:] return formatted_time
def shell_quote(s): """Given bl"a, returns "bl\\"a". Returns bytes. """ if not isinstance(s, bytes): s = s.encode('utf-8') if any(c in s for c in b' \t\n\r\x0b\x0c*$\\"\''): return b'"' + (s.replace(b'\\', b'\\\\') .replace(b'"', b'\\"') .replace(b'$', b'\\$')) + b'"' else: return s
def write_encrypted_file(filename, encrypted_data): """ Write an encrypted bytestring to file """ filename = filename + '.enc' with open(filename, "wb") as file: file.write(encrypted_data) return filename
def current_iteration(parent_iteration, child_iteration): """ :param parent_iteration: :param child_iteration: :return: """ return str(parent_iteration) + '-' + str(child_iteration)
def dbKeys(db): """ Fetch name, CAS, and formula from db and return them as a list of tuples Parameters: db, nested dictionary like {'<name>':{'CAS':'', 'formula':'', ...}} Returns: nCF, list of tuples like [('name0','CAS0','formula0'), ('name1','CAS1','formula1'), ...] Notes: extract names, CAS, and formulas to separate tuples via zip, e.g.: dbNames, dbCAS, dbFormula = zip(*dbKeys(db)) """ nCF = [] for k in db.keys(): if 'CAS' in db[k].keys(): CAS = db[k]['CAS'] else: CAS = "" if 'formula' in db[k].keys(): formula = db[k]['formula'] else: formula = "" nCF.append((k,CAS,formula)) return nCF
def fix_indentation(text, indent_chars): """Replace tabs by spaces""" return text.replace('\t', indent_chars)
def unhex(str): """Converts an 'ethereum' style hex value to decimal""" if str == "0x": return 0 return int(str, 16)
def kappa_confusion_matrix(rater_a, rater_b, min_rating=None, max_rating=None): """ Returns the confusion matrix between rater's ratings """ assert (len(rater_a) == len(rater_b)) if min_rating is None: min_rating = min(rater_a + rater_b) if max_rating is None: max_rating = max(rater_a + rater_b) num_ratings = int(max_rating - min_rating + 1) conf_mat = [[0 for i in range(num_ratings)] for j in range(num_ratings)] for a, b in zip(rater_a, rater_b): conf_mat[a - min_rating][b - min_rating] += 1 return conf_mat
def default(x, default): """ Returns x if x is not none, otherwise default. """ return x if x is not None else default
def job_wrapper_generic(param, user_defined_work_func, register_cleanup, touch_files_only): """ run func """ assert(user_defined_work_func) #try: # print sys._MEIPASS # os.system("rm -rf %s"%(sys._MEIPASS)) #except Exception: # pass return user_defined_work_func(*param)
def apply_if_callable(maybe_callable, obj, **kwargs): """ Evaluate possibly callable input using obj and kwargs if it is callable, otherwise return as it is. Parameters ---------- maybe_callable : possibly a callable obj : NDFrame **kwargs """ if callable(maybe_callable): return maybe_callable(obj, **kwargs) return maybe_callable
def ltypeOfLaueGroup(tag): """ See quatOfLaueGroup """ if not isinstance(tag, str): raise RuntimeError("entered flag is not a string!") if tag.lower() == 'ci' or tag.lower() == 's2': ltype = 'triclinic' elif tag.lower() == 'c2h': ltype = 'monoclinic' elif tag.lower() == 'd2h' or tag.lower() == 'vh': ltype = 'orthorhombic' elif tag.lower() == 'c4h' or tag.lower() == 'd4h': ltype = 'tetragonal' elif tag.lower() == 'c3i' or tag.lower() == 's6' or tag.lower() == 'd3d': ltype = 'trigonal' elif tag.lower() == 'c6h' or tag.lower() == 'd6h': ltype = 'hexagonal' elif tag.lower() == 'th' or tag.lower() == 'oh': ltype = 'cubic' else: raise RuntimeError( "unrecognized symmetry group. " + "See ''help(quatOfLaueGroup)'' for a list of valid options. " + "Oh, and have a great day ;-)" ) return ltype
def is_valid_unicode(text): """Check if a string is valid unicode. Did we slice on an invalid boundary?""" try: text.decode("utf-8") return True except UnicodeDecodeError: return False
def split_list(l, n): """ :param l: (nested) list :param n: n chunks to be split by :return: list of size n """ d, r = divmod(len(l), n) mylist = [] for i in range(n): si = (d + 1) * (i if i < r else r) + d * (0 if i < r else i - r) # yield l[si:si + (d + 1 if i < r else d)] mylist.append(l[si:si + (d + 1 if i < r else d)]) return mylist
def flatten(x): """ Flatten list of lists """ import itertools flatted_list = list(itertools.chain(*x)) return flatted_list
def gain_mode_index_from_fractions(grp_prob): """Returns int gain mode index or None from list of gain group fractions.""" return next((i for i,p in enumerate(grp_prob) if p>0.5), None)
def bool_eval(token_lst): """token_lst has length 3 and format: [left_arg, operator, right_arg] operator(left_arg, right_arg) is returned""" return token_lst[1](token_lst[0], token_lst[2])
def print_value(value, ndigits=3): """Prepare a parameter value for printing, used in CyclingParams.__str__""" if len(value) == 1: return format(value[0], '.%df' % ndigits) else: return 'array(%d)' % len(value)
def parse_url_subdomains(url): """Parses site, area, and category from Craigslist query URL.""" parsed_url = url.split("https://")[1].split(".") parsed_suburl = parsed_url[2].split("/") site = parsed_url[0] # `parsed_suburl` will have len == 5 if query has no area. # `parsed_suburl` will have len == 6 if query has area. if len(parsed_suburl) == 5: area = "" category = parsed_suburl[1] else: area = parsed_suburl[1] category = parsed_suburl[2] return site, area, category
def _skip_nonwhitespace(data, pos): """Return first position not before pos which contains a non-whitespace character.""" for i, x in enumerate(data[pos:]): if x.isspace(): return pos + i return len(data)
def module_level_function(param1, param2=None, *args, **kwargs): """This is an example of a module level function. Function parameters should be documented in the ``Parameters`` section. The name of each parameter is required. The type and description of each parameter is optional, but should be included if not obvious. If *args or **kwargs are accepted, they should be listed as ``*args`` and ``**kwargs``. The format for a parameter is:: name : type description The description may span multiple lines. Following lines should be indented to match the first line of the description. The ": type" is optional. Multiple paragraphs are supported in parameter descriptions. Parameters ---------- param1 : int The first parameter. param2 : :obj:`str`, optional The second parameter. *args Variable length argument list. **kwargs Arbitrary keyword arguments. Returns ------- bool True if successful, False otherwise. The return type is not optional. The ``Returns`` section may span multiple lines and paragraphs. Following lines should be indented to match the first line of the description. The ``Returns`` section supports any reStructuredText formatting, including literal blocks:: { 'param1': param1, 'param2': param2 } Raises ------ AttributeError The ``Raises`` section is a list of all exceptions that are relevant to the interface. ValueError If `param2` is equal to `param1`. """ if param1 == param2: raise ValueError('param1 may not be equal to param2') return True
def get_species_counts(individuals): """ Returns a dictionary with species name as the key and the number of individuals in that species as the value. :param individuals: :return: """ species_counts = {} for individual in individuals: species = individual.split("_")[-1] if species not in species_counts: species_counts[species] = 0 species_counts[species] += 1 return species_counts
def multiprocessing_func(fn_to_eval, random_var_gen, i): """Allows monte carlo to run on multiple CPUs.""" random_vars = random_var_gen(i) result = fn_to_eval(*random_vars) return result
def argument_names(args): """Give arguments alpha-numeric names. >>> names = argument_names(range(100)) >>> [names[i] for i in range(0,100,26)] [u'?a', u'?a1', u'?a2', u'?a3'] >>> [names[i] for i in range(1,100,26)] [u'?b', u'?b1', u'?b2', u'?b3'] """ # Argument naming scheme: integer -> `?[a-z]` with potentially a number if # there more than 26 arguments. name = {} for i, arg in enumerate(args): c = i // 26 if i >= 26 else '' name[arg] = '?%s%s' % (chr(97+(i % 26)), c) return name
def reverse_lookup( d, val ): """Builds and Returns a list of all keys that map to val, or an empty list if there are none. val: value in dictionary """ keys_list = [] for key in d: if d[key] == val: keys_list.append( key ) return keys_list
def read_bytes_sync(sock, nbytes): """ Read number of bytes from a blocking socket This is not typically used, see IOStream.read_bytes instead """ frames = [] while nbytes: frame = sock.recv(nbytes) frames.append(frame) nbytes -= len(frame) if len(frames) == 1: return frames[0] else: return b''.join(frames)
def ip_received(data=None): """ Construct a template for IP incoming packet """ tpl = { 'ip-event': 'received' } if data is not None: tpl['ip-data'] = data return tpl
def quote(toquote): """quote('abc def') -> 'abc%20def' Each part of a URL, e.g. the path info, the query, etc., has a different set of reserved characters that must be quoted. RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists the following reserved characters. reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | "$" | "," Each of these characters is reserved in some component of a URL, but not necessarily in all of them. By default, the quote function is intended for quoting the path section of a URL. Thus, it will not encode '/'. This character is reserved, but in typical usage the quote function is being called on a path where the existing slash characters are used as reserved characters. """ # fastpath if not toquote: if toquote is None: raise TypeError('None object cannot be quoted') return toquote always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' '0123456789_.-') quoted = [] for char in toquote: ooo = ord(char) if ooo < 128 and char in always_safe: quoted.append(char) elif ooo < 0x7f: quoted.append('%{:02X}'.format(ooo)) elif ooo < 0xbf: quoted.append('%C2%{:02X}'.format(ooo)) return ''.join(quoted)
def autodoc_skip_member_handler(app, what, name, obj, skip, options): """Manually exclude certain methods/functions from docs""" # exclude special methods from unittest excludes = ["setUp", "setUpClass", "tearDown", "tearDownClass"] return name.startswith("_") or name in excludes
def latex_parse_eq(eq): """Tests cases : - v = d/t - 2piRC - 1/(2piRC) - use of sqrt, quad - parse lmbda, nu, exp """ #if "$" in eq: # return eq #if "=" in eq: # left, right = eq.split("=") # res = "=".join([str(sp.latex(sp.sympify(left))), str(sp.latex(sp.sympify(right)))]) # return "$" + res + "$" #else: # return "$" + str(sp.sympify(sp.latex(eq))) + "$" return eq
def hexRot(ch): """ rotate hex character by 8 """ return format((int(ch, base=16) + 8) % 16, 'x')
def make_divisible(v, divisor, min_value=None): """ This function is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by 8 It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py :param v: :param divisor: :param min_value: :return: """ if min_value is None: min_value = divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_v < 0.9 * v: new_v += divisor return new_v
def cryptMessage(mode, message, shiftKey): """ The encryption / decryption action is here """ if mode[0] == 'd': shiftKey = -shiftKey translated = '' for symbol in message: # The encryption stuff if symbol.isalpha(): num = ord(symbol) num += shiftKey if symbol.isupper(): if num > ord('Z'): num -= 26 elif num < ord('A'): num += 26 elif symbol.islower(): if num > ord('z'): num -= 26 elif num < ord('a'): num += 26 translated += chr(num) else: translated += symbol return translated
def parse_ignore_patterns_from_dict(ignore_patterns_dict) -> tuple: """Parse dictionary containing (file_name_pattern, exclude_patterns) key value pairs to return an output consistent with ignore patterns parsed by `parse_ignore_names_file` Parameters ---------- ignore_patterns_dict: Dict A dict where each key is a string and each value is a string or a nonempty list of strings. Returns ------- Tuple Tuple of iterables of string with the same form as the output of `parse_ignore_names_file` Notes ----- To align the workflow with `parse_ignore_names_file`, we check that the passed values are of type string, but we do not yet check if they are valid regular expressions""" def _assert_valid_key_value(k, v): if not isinstance(k, str): raise TypeError("ignore patterns in config contained non-string key {}".format(k)) if len(k.strip()) == 0: raise ValueError("ignore pattern in config contained empty (file name) regex") if not all(isinstance(v, str) for v in v) and len(v) > 0: raise TypeError( "ignore patterns for key {} contained non-string values or was empty.".format(k) ) if not all(len(v.strip()) > 0 for v in v): raise ValueError("ignore pattern for key {} contained empty regex".format(k)) if not isinstance(ignore_patterns_dict, dict): raise TypeError( "ignore patterns in config must have type Dict[str, Union[str, List[str]]]," "but was {}".format(type(ignore_patterns_dict)) ) result_list = [] for key, value in ignore_patterns_dict.items(): res = [key] if not isinstance(value, list): value = [value] _assert_valid_key_value(key, value) res += value result_list.append(res) return tuple(result_list)
def get_input_config_by_name(model_config, name): """Get input properties corresponding to the input with given `name` Parameters ---------- model_config : dict dictionary object containing the model configuration name : str name of the input object Returns ------- dict A dictionary containing all the properties for a given input name, or None if no input with this name exists """ if 'input' in model_config: inputs = model_config['input'] for input_properties in inputs: if input_properties['name'] == name: return input_properties return None
def _adjust_component(r, g, b)-> tuple: """ Created by Eric Muzzo. Returns a tuple conatining the midpoint values of the quadrant in which each color lies when given an RGB value. >>>mypic = load_image(choose_file()) >>>show(posterizing(mypic)) """ mid1 = 31 mid2 = 95 mid3 = 159 mid4 = 223 colorList = [] for x in (r, g, b): if x < 64: colorList.append(mid1) elif x > 63 and x < 128: colorList.append(mid2) elif x > 127 and x < 192: colorList.append(mid3) else: colorList.append(mid4) colorTuple = tuple(colorList) return colorTuple
def library_sqrt(x): """Uses math library""" from math import sqrt return sqrt(x)
def convertStrand(strand): """convert various strand notations into [+-.]. """ s = str(strand) if s in ("-", "0", "-1"): return "-" elif s in ("+", "1"): return "+" else: return "."
def wc(file): """Get number of lines in a file""" with open(file, mode='rb') as fd: return sum(1 for _ in fd)
def is_dir_hidden(dir): """this code tests if a directory is hidden (has a ./<name> format) and returns true if it is hidden""" slash_id=[] for i in range(0, len(dir)): if dir[i]=="/": slash_id.append(i) if dir[slash_id[len(slash_id)-1]+1]==".": return True else : return False
def center(size, fit_size, offset): """ Center a given area within another area at an offset. Arguments: size: a tuple containing the width and height of the area to be centered. fit_size: a tuple containing the width and heigh of the area in which to center 'size' offset: a tuple representing an x/y coordinate of the offset. """ w, h = size fw, fh = fit_size x, y = offset return x + (fw - w) // 2, y + (fh - h) // 2
def build_bsub_command(command_template, lsf_args): """Build and return a lsf batch command template The structure will be 'bsub -s <key> <value> <command_template>' where <key> and <value> refer to items in lsf_args """ if command_template is None: return "" full_command = 'bsub -o {logfile}' for key, value in lsf_args.items(): full_command += ' -%s' % key if value is not None: full_command += ' %s' % value full_command += ' %s' % command_template return full_command
def lineincols (inlist, colsize): """ Returns a string composed of elements in inlist, with each element right-aligned in columns of (fixed) colsize. Usage: lineincols (inlist,colsize) where colsize is an integer """ outstr = '' for item in inlist: if not isinstance(item, str): item = str(item) size = len(item) if size <= colsize: for i in range(colsize-size): outstr = outstr + ' ' outstr = outstr + item else: outstr = outstr + item[0:colsize+1] return outstr
def _isint(string): """ returns true if string can be cast as an int, returns zero otherwise """ try: f = float(string) except: return False if round(f) - f == 0: return True return False
def explode_string(text): """Returns a list containing all the chars in <txt> one by one""" chars = [] for char in text: chars.append(char) return chars
def column_converter(cname, gn2conv): """ helper function to apply to each of the column names in the geno dataframe (i.e., the gene network names of each BXD """ if cname == "marker": return cname else: if cname in gn2conv: return gn2conv[cname] else: return "NOT_IN_METADATA"
def get_baji(b, a, j, i, no): """Function: Search the position for baji in the spin-adapted index Author(s): Takashi Tsuchimochi """ bj = b*no + j ai = a*no + i if bj > ai: baji = bj*(bj+1)//2 + ai else: baji = ai*(ai+1)//2 + bj return baji
def conjx(x, *args): """Return the conjugate of x multiplied by arguments *args.""" result = x.conjugate() for y in args: result *= y return result
def s_sort_rec(seq, i=None): """ perform selection sort recursively """ # if the sequence is empty return it if not seq: return seq if i is None: i = len(seq)-1 if i == 0: return max_ind, _ = max(enumerate(seq[0:i+1]), key=lambda x: x[1]) seq[max_ind], seq[i] = seq[i], seq[max_ind] s_sort_rec(seq, i-1)
def parsePeakIntensity(lines): """#!/usr/bin/python Args: lines: all information on all identified molecular lines (nested list) Returns: list of peak intensity of all identified molecular lines (list) """ result = [] for line in lines: peakIntensity = line["peakintensity"] result.append(peakIntensity) print(result) return result
def convertFromRavenComment(msg): """ Converts fake comment nodes back into real comments @ In, msg, converted file contents as a string (with line seperators) @ Out, string, string contents of a file """ msg=msg.replace('<ravenTEMPcomment>','<!--') msg=msg.replace('</ravenTEMPcomment>','-->') return msg
def write_float_11e(val: float) -> str: """writes a Nastran formatted 11.4 float""" v2 = '%11.4E' % val if v2 in (' 0.0000E+00', '-0.0000E+00'): v2 = ' 0.0' return v2
def _bounce(open, close, level, previous_touch): """ did we bounce above the given level :param open: :param close: :param level: :param previous_touch :return: """ if previous_touch == 1 and open > level and close > level: return 1 elif previous_touch == 1 and open < level and close < level: return -1 else: return 0
def wipe_out_eol(line): """ wipe out EOL. """ assert line[-1:] == '\n' if line[-2:-1] == '\r': return line[:-2] else: return line[:-1]
def luaquote(string): """Quotes a python string as a Lua string literal.""" replacements = [ ('\\', '\\\\'), ('\'', '\\\''), ('\n', '\\n'), ] for old, new in replacements: string = string.replace(old, new) return '\'' + string + '\''
def known_domain_data(known_uid, known_verbose_name, known_os_type): """Known domain data fields.""" return { 'id': known_uid, 'verbose_name': known_verbose_name, 'os_type': known_os_type }
def histogram(data): """Returns a histogram of your data. :param data: The data to histogram :type data: list[object] :return: The histogram :rtype: dict[object, int] """ ret = {} for datum in data: if datum in ret: ret[datum] += 1 else: ret[datum] = 1 return ret
def triplets(a, b, c): """ Time: O(n) Space: O(n lg n), for sorting - n = a_len + b_len + c_len """ a = list(sorted(set(a))) b = list(sorted(set(b))) c = list(sorted(set(c))) ai = bi = ci = 0 a_len, c_len = len(a), len(c) answer = 0 while bi < len(b): while ai < a_len and a[ai] <= b[bi]: ai += 1 while ci < c_len and b[bi] >= c[ci]: ci += 1 answer += ai * ci bi += 1 return answer
def get_json_field(json_data, field_names): """This function retrieves a value for a specific field from the JSON data for a user. .. versionchanged:: 3.1.0 Refactored the function to be more efficient. :param json_data: The JSON data from which the field value must be retrieved :type json_data: dict :param field_names: The field name along with any parent field paths :type field_names: tuple, list :returns: The value for the specific field in its original format :raises: :py:exc:`ValueError`, :py:exc:`TypeError`, :py:exc:`KeyError` """ field_value = None if isinstance(field_names, str): field_value = json_data.get(field_names) elif isinstance(field_names, tuple) or isinstance(field_names, list): if len(field_names) == 2: field_one, field_two = field_names field_value = json_data.get(field_one).get(field_two) elif len(field_names) == 3: field_one, field_two, field_three = field_names field_value = json_data.get(field_one).get(field_two).get(field_three) elif len(field_names) == 4: field_one, field_two, field_three, field_four = field_names field_value = json_data.get(field_one).get(field_two).get(field_three).get(field_four) return field_value
def rename_labels_to_internal(x): """Shorten labels and convert them to lower-case.""" return x.replace("Experience", "exp").lower()
def list_resources(technologies): """List resources (there are no technologies that produce them).""" inputs = set(i.name for t in technologies for i in t.inputs) outputs = set(i.name for t in technologies for i in t.outputs) return inputs - outputs
def unload_fields( obj, fields ): """ The complement of load_fields(). @param obj Source object to read @param fields Field definition list @return List of data fields suitable for a binary pack """ data = [] for field in fields: value = 0 for name, desc in field.items(): if hasattr( obj, name ) == True: value |= getattr( obj, name ) << desc[ 1 ] data.append( value ) return data
def clean_int(s): """ Clean an integer """ while len(s)>0: if s[0]==' ': s=s[1:] continue if s[-1] in " ,": s=s[0:-1] continue break return int(s)
def _rstrip(line, JUNK='\n \t'): """Return line stripped of trailing spaces, tabs, newlines. Note that line.rstrip() instead also strips sundry control characters, but at least one known Emacs user expects to keep junk like that, not mentioning Barry by name or anything <wink>. """ i = len(line) while i > 0 and line[i-1] in JUNK: i -= 1 return line[:i]
def sort_states(state, columns, reverse=True): """ Sort the states according to the list given by prioritize_jobs. prioritize_jobs (or columns in this case) is list of according to which state should be prioritized for job submission. The position in the list indicates the prioritization. columns = ["memory", "disk"] with reverse=True means jobs with high memory will be submitted before jobs with lower memory requirements, followed by jobs with high disk vs. low disk requirement. Jobs with high memory and disk requirements will be submitted first then jobs with high memory and medium disk requirements, and so on and so forth. Args: state: List of states columns: List of keys in the dict by which dict is sorted reverse: Reverse the sorting or not. True = Bigger first, False = smaller first """ key_cache = {} col_cache = dict([(c[1:],-1) if c[0] == '-' else (c,1) for c in columns]) def comp_key(key): if key in key_cache: return key_cache[key] if key in col_cache: ret = len(columns)-columns.index(key if col_cache[key] == 1 else '-'+key) else: ret = 0 key_cache[key] = ret return ret def compare(row): ret = [] for k in sorted(row, key=comp_key, reverse=True): v = row[k] if k in col_cache: v *= col_cache[k] ret.append(v) return ret return sorted(state, key=compare, reverse=reverse)
def evaluateCondition(instructionLine, dict): """ Evaluate the condition for a line of instruction """ register,sign,num = [el for el in instructionLine.split()][-3:] return eval(f'dict["{register}"] {sign} {num}')
def get_prev_page_pointer(words, line_breaks): """ Finds the start position of the last word in the page immediately preceding the current one. Parameters ---------- words: list The words in the previous page line_breaks: The words split across a line in the previous page Returns ------- dict Pointer to the last word of the previous page """ if not ("end" in line_breaks): empty_pointer = {} return empty_pointer if len(line_breaks["end"]) == 0: empty_pointer = {} return empty_pointer last_line_break = line_breaks["end"][-1] last_word = line_breaks["tokens"][last_line_break] indicies = last_word["positions"][0] page, line, pos = indicies if(pos <= 0): if line > 0: line = line - 1 pos = len(words[line]) - 1 else: pos = pos - 1 prev_pointer = { "line": line, "page": page, "pos": pos } return prev_pointer
def bernoulli_prob(var, bias=0.5): """ Returns Sympy-expression of a Bernoulli PMF for var at specified bias. :param var: boolean symbol to express pmf :param bias: bias probability (float ranging from 0 to 1). :return bias*var + (1-bias)*(1-var) """ # bv + (1-b)(1-v) = bv + 1 - v - b + bv = 2bv - v - b + 1 = v(2b-1) + (1-b) return var * (2*bias - 1) + (1 - bias)
def find(f, seq): """ Search for item in a list Returns: Boolean """ for item in seq: if (f == item): return True return False
def _filter_pairs_in (pair_list,dict_context) : """Filter the pairs based on the fact that some pronouns allow pairs where the pronoun is the first and the mention appears after""" good_pair_list=[] for pair in pair_list : pron=pair[1][0] difference=int(pair[1][-1])-int(pair[0][-1]) if dict_context[pron]["cataphora"]==0 and difference<0 : pass else : good_pair_list.append(pair) return good_pair_list
def total_cost(J_content, J_style, alpha = 10, beta = 40): """ Arguments: J_content -- content cost coded above J_style -- style cost coded above alpha -- hyperparameter weighting the importance of the content cost beta -- hyperparameter weighting the importance of the style cost Returns: J -- total cost as defined by the formula above. """ J = alpha * J_content + beta * J_style return J
def decode_str(string): """Convert a bytestring to a string. Is a no-op for strings. This is necessary because python3 distinguishes between bytestrings and unicode strings, and uses the former for operations that read from files or other operations. In general programmatically we're fine with just using unicode, so we decode everything. This function also makes backwards-compatibility with python2, since this will have no effect. :param string: Either a string or bytes. :type string: ``str`` or ``bytes`` :return: A unicode string. :rtype: ``str`` """ if hasattr(string, "decode"): return string.decode("utf-8") else: return string
def delete_fav_msg(bus_stop_code): """ Message that will be sent if user delete a bus stop code from their favourites """ return 'Bus Stop Code /{} has been deleted! \n\n' \ 'To add another bus stop code, type: /add_favourites [BUS STOP CODE]' \ '\n\n e.g: /add_favourites 14141'.format(bus_stop_code)
def dict_key_filter(function, dictionary): """ Filter dictionary by its key. Args: function: takes key as argument and returns True if that item should be included dictionary: python dict to filter """ return {k: v for k, v in dictionary.items() if function(k)}
def _parse_or(t): """Evaluate OR t.""" try: return t[0]["lhs"] except KeyError: try: return t[0]["rhs"] except KeyError: return False
def ij2M(ij): """ Convert (i, j) indices of a symmetric 2nd-order tensor to its vector index """ if ij == "11": return 0 elif ij == "22": return 1 elif ij == "33": return 2 elif ij == "12" or ij == "21": return 3 elif ij == "23" or ij == "32": return 4 elif ij == "13" or ij == "31": return 5