content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import functools def env_default(**options): """ Use env variable, if set, otherwise execute function to compute attribute. """ not_given = object() def decorator(func): name = getattr(func, "env_name", func.__name__) type = getattr(func, "env_type", str) default = getattr(func, "env_default", not_given) @functools.wraps(func) def decorated(self): value = self.env(name, type=type, default=default) if value is not_given: return func(self) return value return decorated return decorator
ee2a85f995e585d1195351b8f77320e9c1087e89
12,452
def a_simple_function(a: str) -> str: """ This is a basic module-level function. For a more complex example, take a look at `a_complex_function`! """ return a.upper()
a511eb8577e0440d0026c4762628fdb6de9be643
12,454
def paginated_list(full_list, max_results, next_token): """ Returns a tuple containing a slice of the full list starting at next_token and ending with at most the max_results number of elements, and the new next_token which can be passed back in for the next segment of the full list. """ sorted_list = sorted(full_list) list_len = len(sorted_list) start = sorted_list.index(next_token) if next_token else 0 end = min(start + max_results, list_len) new_next = None if end == list_len else sorted_list[end] return sorted_list[start:end], new_next
99a20e3af4ff64d3de52eccbc00aea9fda735e00
12,455
def celsius_to_kelvin(deg_C): """Convert degree Celsius to Kelvin.""" return deg_C + 273.15
45eb2dd781adbab2db44f5a8ce5042b2834c7c00
12,456
from os.path import expandvars, basename from glob import glob def all_models(): """ Get a list of models that are valid arguments to load_model() """ basedir=expandvars('$I3_BUILD/MuonGun/resources/tables/') suffix = '.single_flux.fits' return [p[:-len(suffix)] for p in map(basename, glob(basedir+'*'+suffix))]
ed9cc1c9fb980f342022ee82dce23828ac28df4d
12,458
def get_points_2D(outputs, threshold=0.2): """Pour 1 personnage capté: [Pose(keypoints={ <KeypointType.NOSE: 0>: Keypoint(point=Point(x=717.3, y=340.8), score=0.98), <KeypointType.LEFT_EYE: 1>: Keypoint(point=Point(x=716.2, y=312.5), score=0.6), <KeypointType.RIGHT_EYE: 2>: Keypoint(point=Point(x=699.6, y=312.8), score=0.98), <KeypointType.LEFT_EAR: 3>: Keypoint(point=Point(x=720.13306, y=314.34964) }, score=0.34098125)] xys = {0: (698, 320), 1: (698, 297), 2: (675, 295), .... } """ pose = outputs[0] xys = {} for label, keypoint in pose.keypoints.items(): if keypoint.score > threshold: xys[label.value] = [int(keypoint.point[0]), int(keypoint.point[1])] return xys
524b350c7a5cceb60e07008ff00c8ee869e0b71b
12,459
def params_linear_and_squares(factors): """Index tuples for the linear_and_squares production function.""" names = factors + [f"{factor} ** 2" for factor in factors] + ["constant"] return names
8f7505cb2a07e08d3ad2686d52960d631cd7b458
12,461
from typing import Tuple def metadataAppendCompatible(meta1: dict, meta2: dict) -> Tuple[bool, str]: """ Verifies two metadata dictionaries match in a set of required fields. If a field is present in only one or neither of the two dictionaries, this is considered a match. This is primarily intended as a safety check, and does not conclusively determine that two images are valid to append to together or are part of the same series. Args: meta1: First metadata dictionary meta2: Second metadata dictionary Returns: True if all keys that are present in both dictionaries have equivalent values, false otherwise. """ matchFields = ["Modality", "MagneticFieldStrength", "ImagingFrequency", "Manufacturer", "ManufacturersModelName", "InstitutionName", "InstitutionAddress", "DeviceSerialNumber", "StationName", "BodyPartExamined", "PatientPosition", "EchoTime", "ProcedureStepDescription", "SoftwareVersions", "MRAcquisitionType", "SeriesDescription", "ProtocolName", "ScanningSequence", "SequenceVariant", "ScanOptions", "SequenceName", "SpacingBetweenSlices", "SliceThickness", "ImageType", "RepetitionTime", "PhaseEncodingDirection", "FlipAngle", "InPlanePhaseEncodingDirectionDICOM", "ImageOrientationPatientDICOM", "PartialFourier"] # If a particular metadata field is not defined (i.e., 'None'), then # there can't be a conflict in value; thus, short-circuit and skip the # rest of the check if a None value is found for a field. for field in matchFields: value1 = meta1.get(field, None) if value1 is None: continue value2 = meta2.get(field, None) if value2 is None: continue if value1 != value2: errorMsg = (f"Metadata doesn't match on field: {field} " f"(value 1: {value1}, value 2: {value2}") return (False, errorMsg) return (True, "")
8a3fc01d331f2f0c4dadb148819460092619f64c
12,462
from typing import Callable import itertools def _word_wrap(paragraph: str, width: float, measure_func: Callable[[str], float]): """Break text in multiple line.""" result = [] for line in paragraph.split("\n"): # handle empty lines if line == "": result.append("\n") continue fields = itertools.groupby(line, lambda c: c.isspace()) fields = ["".join(g) for _, g in fields] if len(fields) % 2 == 1: fields.append("") x = "" for a, b in zip(fields[::2], fields[1::2]): w = measure_func(x + a) if w > width: if x == "": result.append(a) continue else: result.append(x) x = "" x += a + b if x != "": result.append(x + "\n") return result
721185a4788e08a7126f8755157dc2d2f9442313
12,463
def set_model_internal_data(model, original_data, modified_data, deleted_data): """ Set internal data to model. """ model.__original_data__ = original_data list(map(model._prepare_child, model.__original_data__)) model.__modified_data__ = modified_data list(map(model._prepare_child, model.__modified_data__)) model.__deleted_fields__ = deleted_data return model
cf17f9f04342be01706530c79f9bb83cd8c03b73
12,464
def tuple_as_atom(atom:tuple) -> str: """Return readable version of given atom. >>> tuple_as_atom(('a', (3,))) 'a(3)' >>> tuple_as_atom(('bcd', ('bcd',12))) 'bcd(bcd,12)' """ assert len(atom) == 2 return '{}({})'.format(atom[0], ','.join(map(str, atom[1])))
5c18f34733d839865eef35509f95c2d4d198a903
12,465
import struct def Fbytes(f): """ Return bytes representation of float """ return struct.pack("f", f)
117fb86216ad6983851923ac9dbd0196cc29b92d
12,466
def clamp(value, low, high): """Clamp the given value in the given range.""" return max(low, min(high, value))
48303b27d78f8d532b1e74db052457ae19b9f10d
12,469
def fmt_n(large_number): """ Formats a large number with thousands separator, for printing and logging. Param large_number (int) like 1_000_000_000 Returns (str) like '1,000,000,000' """ return f"{large_number:,.0f}"
7b23e902a1c9600f1421b45b27623abaa1930f05
12,470
from time import strftime import os def construct_path(pathv, project_path, folder_path, ext, **kwargs): """ THINGS TO ADD: 1. CHECK PROJECT_PATH, FOLDER_PATH IF EXISTS 2. CHECK IF FULL PATH ALREADY EXISTS """ filename = "\\" for i,arg in enumerate(pathv): if i == (len(pathv) - 1): filename += pathv[i] else: filename += pathv[i] + "_" if kwargs: if kwargs["time"]: if kwargs["time"] == True: filename += strftime("%H%M%S") if kwargs["date"]: if kwargs["date"] == True: date= strftime("%Y%m%d") os.system("mkdir %(date)s") folder_path = folder_path + "\\" + date if list(ext)[0] != ".": ext = "." + ext PATH = project_path + folder_path + filename + ext return PATH
5322a6cbee9001ff9205a4fff16d4ea8b21031f6
12,471
def inner_product(L1, L2): """ Take the inner product of the frequency maps. """ result = 0. for word1, count1 in L1: for word2, count2 in L2: if word1 == word2: result += count1 * count2 return result
65ede8eddcf86d75a1b130a76381416c2a272f61
12,472
def img_to_slices(img): """ From 3D image to slices list """ res = [] for i, slice_img in enumerate(img): res.append(slice_img) return res
7776eed3c9abaf3267eb304421b58a6af190a63f
12,473
def x_point_wgt_av(df_agg, x_var): """ Set the x_point to be the weighted average of x_var within the bucket, weighted by stat_wgt. """ if not (x_var + '_wgt_av') in df_agg.columns: raise ValueError( "\n\tx_point_wgt_av: This method can only be used when" "\n\tthe weighted average has already been calculated." ) res = df_agg.assign( x_point=lambda df: df[x_var + '_wgt_av'] ) return(res)
15d8515efceb67e1dc9062d7fd79250c5935b549
12,474
import hashlib def generate_hash(bytes_, hash_algorithm=hashlib.sha256) -> str: """ Return string with hash containing only hexadecimal digits """ return hash_algorithm(bytes_).hexdigest()
2f4bbc9d47d9cf22f005434aea7b2749ae8594bd
12,476
from typing import Tuple import subprocess from bs4 import BeautifulSoup def get_pdf_dim(pdf_file: str, page: int = 1) -> Tuple[int, int]: """Get the dimension of a pdf. :param pdf_file: path to the pdf file :param page: page number (starting from 1) to get a dimension for :return: width, height """ html_content = subprocess.check_output( f"pdftotext -f {page} -l {page} -bbox '{pdf_file}' -", shell=True ) soup = BeautifulSoup(html_content, "html.parser") pages = soup.find_all("page") page_width, page_height = ( int(float(pages[0].get("width"))), int(float(pages[0].get("height"))), ) return page_width, page_height
b432c41c417a90474f61846d422d7f6e9c7c22d0
12,477
from typing import Mapping from typing import Optional def _rename_nodes_on_tree( node: dict, name_map: Mapping[str, str], save_key: Optional[str] = None, ) -> dict: """Given a tree, a mapping of identifiers to their replacements, rename the nodes on the tree. If `save_key` is provided, then the original identifier is saved using that as the key.""" name = node["name"] renamed_value = name_map.get(name, None) if renamed_value is not None: # we found the replacement value! first, save the old value if the caller # requested. if save_key is not None: node[save_key] = name node["name"] = renamed_value for child in node.get("children", []): _rename_nodes_on_tree(child, name_map, save_key) return node
b8d3df2f2247b27c614b767b28eda7b91e380524
12,478
def add_edges(t, countDict, G, last_k): """ :param t: Tree of the sequnces available as states :param countDict: dicionary counting the occurence for each sequence :param G: the graph containing the states (each one is a sequence) :param last_k: the number of recent item considered :return: the same graph G, with edges connecting states """ # add links rootNode = t.get_root() for node in G.nodes_iter(): # if the sequence is shorter than states's len, the next state has all the sequence as prefix next_state_prefix = node[1:] if len(node) == last_k else node p = t.find_path(rootNode, next_state_prefix) if t.path_is_valid(p): children = t.get_nodes_tag(t[p].fpointer) for c in children: # the tree may suggest a children which is not a state of the graph, because it was part of a longer # sequence, in that case no edge has to be added if next_state_prefix + (c,) in G.nodes(): if countDict.get(node + (c,), 0) != 0: # do not add edge if count is 0 G.add_edge(node, next_state_prefix + (c,), {'count': countDict.get(node + (c,), 0)}) return G
9016808d44230503521ba098aef4a5ac157fdf1d
12,479
import os def _get_stripped_path(bin_path): """Finds the stripped version of |bin_path| in the build output directory. returns |bin_path| if no stripped path is found. """ stripped_path = bin_path.replace('lib.unstripped/', 'lib/').replace('exe.unstripped/', '') if os.path.exists(stripped_path): return stripped_path else: return bin_path
616cf9b66d9002792ab51489a25e0bbc49096f6b
12,480
def ignore(name): """ Files to ignore when diffing These are packages that we're already diffing elsewhere, or files that we expect to be different for every build, or known problems. """ # We're looking at the files that make the images, so no need to search them if name in ['IMAGES']: return True # These are packages of the recovery partition, which we're already diffing if name in ['SYSTEM/etc/recovery-resource.dat', 'SYSTEM/recovery-from-boot.p']: return True # These files are just the BUILD_NUMBER, and will always be different if name in ['BOOT/RAMDISK/selinux_version', 'RECOVERY/RAMDISK/selinux_version']: return True # b/26956807 .odex files are not deterministic if name.endswith('.odex'): return True return False
d5f64616480fa14c03b420165d2c89040a8cc768
12,481
def power(work, time): """ Power is the rate at which the work is done. Calculates the amountof work done divided by the time it takes to do the work Parameters ---------- work : float time : float Returns ------- float """ return work / time
48eb476658fa19a6002b428993bfa58c81634638
12,482
def find(*patterns): """Decorate a function to be called for each time a pattern is found in a line. :param str patterns: one or more regular expression(s) Each argument is a regular expression which will trigger the function:: @find('hello', 'here') # will trigger once on "hello you" # will trigger twice on "hello here" # will trigger once on "I'm right here!" This decorator can be used multiple times to add more rules:: @find('here') @find('hello') # will trigger once on "hello you" # will trigger twice on "hello here" # will trigger once on "I'm right here!" If the Sopel instance is in a channel, or sent a ``PRIVMSG``, the function will execute for each time a received message matches an expression. Each match will also contain the position of the instance it found. Inside the regular expression, some special directives can be used. ``$nick`` will be replaced with the nick of the bot and ``,`` or ``:``, and ``$nickname`` will be replaced with the nick of the bot:: @find('$nickname') # will trigger for each time the bot's nick is in a trigger .. versionadded:: 7.1 .. note:: The regex rule will match once for each non-overlapping match, from left to right, and the function will execute for each of these matches. To match only once from anywhere in the line, use the :func:`search` decorator instead. To match only once from the start of the line, use the :func:`rule` decorator instead. """ def add_attribute(function): function._sopel_callable = True if not hasattr(function, "find_rules"): function.find_rules = [] for value in patterns: if value not in function.find_rules: function.find_rules.append(value) return function return add_attribute
884005008791baba3a9949e9d2c02aee0f985552
12,484
import subprocess def get_installed_reqs(site_packages_dir): """ Return the installed pip requirements as text found in `site_packages_dir` as a text. """ # Also include these packages in the output with --all: wheel, distribute, setuptools, pip args = ['pip', 'freeze', '--exclude-editable', '--all', '--path', site_packages_dir] return subprocess.check_output(args, encoding='utf-8')
6201718f42b22e60ba3a1088e80be8767958feb3
12,485
def _basis2name(basis): """ converts the 'basis' into the proper name. """ component_name = ( 'DC' if basis == 'diffmap' else 'tSNE' if basis == 'tsne' else 'UMAP' if basis == 'umap' else 'PC' if basis == 'pca' else basis.replace('draw_graph_', '').upper() if 'draw_graph' in basis else basis ) return component_name
d359fc8a1fe91ccf954a5e0a1dbf69077d444a44
12,486
from typing import List import ast from typing import cast def get_funcs(module) -> List[ast.FunctionDef]: """ get function list """ return [ cast(ast.FunctionDef, stm) for stm in ast.walk(module) if isinstance( stm, ast.FunctionDef)]
98d62ee1e13461cc13226037fa1233431533ca08
12,487
def remove_folders(bookmarks, folders): """Function to remove top level folders from bookmarks.""" for i, item in enumerate(bookmarks['Data Science']): if type(item) is dict: folder_name = list(item.keys())[0] if folder_name in folders: del bookmarks['Data Science'][i] return bookmarks
b2f6a85e9a518f475985124820ba931a442d3c38
12,488
import re def validate_container_name(name): """Make sure a container name accordings to the naming convention https://docs.openstack.org/developer/swift/api/object_api_v1_overview.html https://lists.launchpad.net/openstack/msg06956.html > Length of container names / Maximum value 256 bytes / Cannot contain the / character. """ validate_name = re.compile('^[^/]+$') return ( len(name) <= 256 and bool(validate_name.match(name)) )
5bef8b304c004dc3169b6984b49a0d669fc9b7b3
12,490
def function(values): """A simple fitness function, evaluating the sum of squared parameters.""" return sum([x ** 2 for x in values])
ee96b0948e43eec1e86ffeeca681567d2a0afa53
12,491
def create_redis_compose_node(name): """ Args: name(str): Name of the redis node Returns: dict: The service configuration for the redis node """ return { "container_name": name, "image": "redis:3.2.8", "command": "redis-server --appendonly yes", "deploy": { "placement": { "constraints": ["node.role == worker"] } }, "volumes": ["./volumes/{:s}/:/data/".format(name)] }
e6d01acc8b0c5c324ad3e0f6e5f527e2a6433705
12,493
def join_germanic(iterable, capitalize=True, quoteChars="\"", concat="'"): """Like "".join(iterable) but with special handling, making it easier to just concatenate a list of words. Tries to join an interable as if it was a sequence of words of a generic western germanic language. Inserts a space between each word. If capitalize=True any word following a single period character ("."), will be capitalized. quoteChars specifies a string of characters that specifies a quote. It tries to be smart about the quotes and keep track of when they start and end. The following conditions yield a space before the current "word": - Current is not "." and the previous was ".". - Previous is not in quoteChars or deemed a start quote. - Current is not in quoteChars and deemed a start quote. - Current is not "!?,:;". Any word in concat, will never have spaces around it. The above rules should ensure that things like [".", ".", "."] yields a "... ", that quotes look reasonably okay ["Hey", "\"", "Friend", "\""] yields "Hey \"Friend\"". The function has no concept of semantics, and is thus limited in what it can do. For example if quoteChars="'", it won't know whether an apostrophe is an apostrophe or a quote. """ def mkcapital(w): return w.capitalize() def nopmkcapital(w): return w capital = mkcapital if capitalize else nopmkcapital quoteLevels = {c: 0 for c in quoteChars} # Check whether c is a quote, and handle it. def checkQuote(c): if c in quoteChars: ql = quoteLevels[c] # If we have already seen this quote, decrement, if not we increment. # This way we can know how many start quotes we have seen if ql > 0: ql -= 1 else: ql += 1 quoteLevels[c] = ql s = "" last = "" for w in iterable: w = str(w) space = True if last != "" else False # Don't add spaces around concat-words. if w in concat or last in concat: space = False # "."" followed by more "." elif last.endswith("."): w = capital(w) if w.startswith("."): space = False # Remove space after last word in a sentence or certain punctuation. elif w in ".!?,;:": space = False # The last two takes care of end and start quotes. elif w in quoteChars: ql = quoteLevels[w] if ql == 1: space = False elif last != "" and last in quoteChars: ql = quoteLevels[last] if ql == 1: space = False checkQuote(w) if space: s += " " s += w last = w return s
94b4117994b4d83ce47cfed7cd461741bb1c131c
12,494
def file_name_to_multiline_readable( file: str, two_rows_only: bool = False, net_only: bool = False ) -> str: """Returns a stylised file_name to be human readable across multiple lines, e.g. for titling a plot. Args: file: Processed results filename with or without path. """ # remove path if present if "/" in file: file = file.split("/")[-1] intermediate = ( file.replace("results_", "") .replace(".npy", "") .replace("NET_", "network: ") .replace("_SCI-CASE_", "\nscience case: ") .replace("..", ", ") ) if net_only: return intermediate.split("\n")[0] else: if two_rows_only: return intermediate.replace("_WF_", ", waveform: ").replace( "_INJS-PER-ZBIN_", ", injections per bin: " ) else: return intermediate.replace("_WF_", "\nwaveform: ").replace( "_INJS-PER-ZBIN_", "\ninjections per bin: " )
8d00ddf7764a6458a28bbd9f5bb1317b392f64cd
12,496
def sun_rot_elements_at_epoch(T, d): """Calculate rotational elements for Sun. Parameters ---------- T: float Interval from the standard epoch, in Julian centuries i.e. 36525 days. d: float Interval in days from the standard epoch. Returns ------- ra, dec, W: tuple (float) Right ascension and declination of north pole, and angle of the prime meridian. """ ra = 286.13 dec = 63.87 W = 84.176 + 14.1844000 * d return ra, dec, W
9a74edc686869eebd851200687fe4d10d38d550a
12,497
def mejor_aerolinea(vuelos: dict) -> str: """ La mejor aerolínea Parámetros: vuelos (dict): Es un diccionario de diccionarios con la información de los vuelos. Retorno: str: El nombre de la mejor aerolínea (la que tenga menor retraso promedio) """ aero_con_promedioretraso = {} # vuelos = {'codigovuelo':{'aerolinea': nombre, 'retraso': minutos}} for vuelo in vuelos: aereo = vuelos[vuelo]['aerolinea'] retra = vuelos[vuelo]['retraso'] if aereo not in aero_con_promedioretraso: retra_total = 0 cantidad = 0 else: retra_total = aero_con_promedioretraso[aereo][0] cantidad = aero_con_promedioretraso[aereo][1] aero_con_promedioretraso[aereo] = (retra_total + retra, cantidad + 1) menor_aero = 'aviancazaza' menor_promedio = -1 for aerolinea in aero_con_promedioretraso: promedio_actual = aero_con_promedioretraso[aerolinea][0] / aero_con_promedioretraso[aerolinea][1] if promedio_actual < menor_promedio or menor_promedio == -1: menor_promedio = promedio_actual menor_aero = aerolinea return menor_aero
68816ae69bc3b7189e06f690c635739b6933eb17
12,498
def write_positions(positions): """parse float tuples of a text string return string """ result = "" if positions is not None: for x,y in positions: result = "%s %s,%s" % (result, x, y) return result
0587272400c2e835bcf2ebd3e55f65e2daa7ee1f
12,499
def NameAndAttribute(line): """ Split name and attribute. :param line: DOT file name :return: name string and attribute string """ split_index = line.index("[") name = line[:split_index] attr = line[split_index:] return name, attr
7595f51d728c5527f76f3b67a99eccd82fb9e8b7
12,500
import os def get_game_bypath(root, path): """ Search and get game-element from an ElementTree XML root. First matching game element will be read. Comparison is done at basename level of path, which means ignoring its directory part and comparing filenames only. Parameters ---------- root : ElementTree.Element ElementTree object with gameList-root and game-sub elements. Used as the source xml to look for game entries. path : str Exact full path to search for. Although the function will extract the basename (means excluding any directory part) and compare the filename only. Returns ------- ElementTree.Element or None game root object with all tags and sub elements if any match is found, None otherwise. """ base_path = os.path.basename(path) game = None for element in root.getiterator('game'): element_path = element.findtext('path', '') # Get full game-element, if filenames from both path match. if base_path == os.path.basename(element_path): game = element break return game
1b1317476836c6a2049130393a9578c65873d374
12,502
def create_relationship_query(entity_map: dict, rel_name: str, rel_map: dict) -> str: """ Purpose: Create relationship definition query Args: entity_map: Entities map rel_name: The name of the relationship rel_map: Relationship Map Returns: graql_insert_query: The query to run """ graql_insert_query = "define " + rel_name + " sub relation, " # relates 1 graql_insert_query += "relates " + rel_map["rel1"]["role"] + ", " # relates2 graql_insert_query += "relates " + rel_map["rel2"]["role"] + ", " # add our custom attr # graql_insert_query += 'has codex_details' # check if attrs attr_length = len(entity_map.keys()) # if attr_length == 0: # graql_insert_query += ";" # return graql_insert_query # #check if blank attr # graql_insert_query += "," attr_counter = 1 for attr in entity_map: graql_insert_query += "has " + str(attr) # check if last if attr_counter == attr_length: graql_insert_query += ";" else: graql_insert_query += ", " attr_counter += 1 return graql_insert_query
683a0a316c132db7ec5ee0f01c302c06c2a45d77
12,503
import torch import copy def clones(module: torch.nn.Module, n: int): """ Produce N identical copies of module in a ModuleList :param module: The module to be copied. The module itself is not part of the output module list :param n: Number of copies """ return torch.nn.ModuleList([copy.deepcopy(module) for _ in range(n)])
e4807cda87c90af415555606e3308f07ff9ddf49
12,504
def is_sorted(array): """ Check if array of numbers is sorted :param array: [list] of numbers :return: [boolean] - True if array is sorted and False otherwise """ for i in range(len(array) - 1): if array[i] > array[i + 1]: return False return True
8542e162aa96f7035d33f1eb7b7159031a8a41b8
12,506
def FormatDescriptorToPython(i): """ Format a descriptor into a form which can be used as a python attribute example:: >>> FormatDescriptorToPython('(Ljava/lang/Long; Ljava/lang/Long; Z Z)V') 'Ljava_lang_LongLjava_lang_LongZZV :param i: name to transform :rtype: str """ i = i.replace("/", "_") i = i.replace(";", "") i = i.replace("[", "") i = i.replace("(", "") i = i.replace(")", "") i = i.replace(" ", "") i = i.replace("$", "") return i
8d217883603ae9e9c8f282985b456aa97494beba
12,507
def factoring_visitor(state, primes): """Use with multiset_partitions_taocp to enumerate the ways a number can be expressed as a product of factors. For this usage, the exponents of the prime factors of a number are arguments to the partition enumerator, while the corresponding prime factors are input here. Examples ======== To enumerate the factorings of a number we can think of the elements of the partition as being the prime factors and the multiplicities as being their exponents. >>> from sympy.utilities.enumerative import factoring_visitor >>> from sympy.utilities.enumerative import multiset_partitions_taocp >>> from sympy import factorint >>> primes, multiplicities = zip(*factorint(24).items()) >>> primes (2, 3) >>> multiplicities (3, 1) >>> states = multiset_partitions_taocp(multiplicities) >>> list(factoring_visitor(state, primes) for state in states) [[24], [8, 3], [12, 2], [4, 6], [4, 2, 3], [6, 2, 2], [2, 2, 2, 3]] """ f, lpart, pstack = state factoring = [] for i in range(lpart + 1): factor = 1 for ps in pstack[f[i]: f[i + 1]]: if ps.v > 0: factor *= primes[ps.c] ** ps.v factoring.append(factor) return factoring
0692d5c2260ca6c98ab874b4a44dc10f3630c57b
12,508
def layer_severity(layers, layer): """Return severity of layer in layers.""" return layers[layer]['severity']
e8a7a95268ddd2d4aa6b5f7fa66d5828016517eb
12,509
from typing import List import argparse def parse_args_for_monitoring( args_list: List[str], unit_name: str) -> argparse.Namespace: """ CLI useful to monitoring scripts. The beauty is in what said scripts do with this info. :param args_list: command line arguments :param unit_name: so far we have "node" and "miner", describing what the role of the monitored. :return: """ parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=unit_name.capitalize() + """ monitoring script. Collects information on ping and http responses as well as some basic shell commands over SSH. Servers file is a list of servers with these fields: { "ip": "21.151.211.10", "creds": [ { "username": "megamind", "password": "fumbledpass", "key_filename": "/home/megs/.ssh/id_rsa" } ], "http_target": "optional path component of URL to request.", "ssh_peers": "known or permitted ssh clients, separated by commas.", "known_ports": "known or permitted listening ports, separated by commas." } """) parser.add_argument( "email_addy", help="Email (gmail) account to use for sending error and " "(optional) success emails.") parser.add_argument( "password", help="Password for the provided email (gmail) address.") parser.add_argument( "-e", "--email_to", help="Don't test, just email the month's log, to this address.") parser.add_argument( "-s", "--send_on_success", help="Send an email upon success.", action="store_true") parser.add_argument( "-n", "--nodes_file", help="Name of json file describing the nodes to monitor.", default="monitored_{}s.json".format(unit_name)) args = parser.parse_args(args_list) return args
87fde80171c31c6bcea4ab6a82e9834fe7f61e00
12,510
def order_verts_by_edge_connection(connected_verts, results=[]): """ Orders connected verts by edge connection. """ next_verts = list( set(results[-1].connectedVertices()) & set(connected_verts) ) for vert in next_verts: if vert not in results: results.append(vert) break return results
9f6b8cc6dc4816c4136da234e90924d2a8d2eae5
12,511
def _parse_alt_title(html_chunk): """ Parse title from alternative location if not found where it should be. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: str: Book's title. """ title = html_chunk.find( "input", {"src": "../images_buttons/objednat_off.gif"} ) assert title, "Can't find alternative title!" title = title[0] assert "title" in title.params, "Can't find alternative title source!" # title is stored as Bleh bleh: Title title = title.params["title"].split(":", 1)[-1] return title.strip()
bd5df0edfc174653731256d50e0a8f84a6f84880
12,512
def prompt_worker_amount(cpu_cores: int): """Prompt the user for the amount of Celery workers they want to run. Start 2 less workers than amount of CPU cores by default.""" answer = 0 safe_cores_suggestion = cpu_cores - 2 def invalid_answer(): """Restart prompt if answer invalid""" print(f"[red]Invalid number! Please enter a whole number.[/]") prompt_worker_amount(cpu_cores) try: # Input doesn't like parsing colours print( f"[yellow]How many workers would you like to start?[/]\n" + f"Press ENTER for default: {safe_cores_suggestion}\n" ) answer = int(input() or safe_cores_suggestion) except ValueError: invalid_answer() if answer == 0: print(f"[yellow]Using suggested amount: {safe_cores_suggestion}[/]") answer = safe_cores_suggestion return answer
2f2e4d423eb94d0bf709feda09c5c45ca8cc481a
12,513
import os import sys import subprocess def pw_convert(filename, bit=64, compression=True, gzip=True, verbose=True): """ Runs msconvert.exe from ProteoWizard to convert Waters .RAW format to .mzXML which can then be parsed by python. module requirements: os, subprocess, sys ProteoWizard must be installed for this script to function. go to http://proteowizard.sourceforge.net/downloads.shtml to download This script assumes that the ProteoWizard is installed under either c:\program files\proteowizard or c:\program files (x86)\proteowizard If you use this python script to convert to mzML, you should cite the paper of the folks who wrote the program Chambers, M.C. Nature Biotechnology 2012, 30, 918-920 doi 10.1038/nbt.2377 """ def find_all(fname, path): """ Finds all files of a given name within a specified directory. Adapted from http://stackoverflow.com/questions/1724693/find-a-file-in-python Module dependancies: os """ locations = [] for root, dirs, files in os.walk(path): if fname in files: locations.append(os.path.join(root, fname)) return locations if sys.platform != 'win32': raise OSError( 'The function that converts to mzML is limited to Windows operating systems.\n' 'You can manually convert to *.mzML using the proteowizard standalone package ' 'and supply that mzML file to this script') locs = [] for val in ['c:\\program files\\proteowizard', 'c:\\program files (x86)\\proteowizard']: # searches for msconvert.exe in expected folders locs.extend(find_all('msconvert.exe', val)) if len(locs) == 0: # if script cannot find msconvert.exe raise IOError( 'The python script could not find msconvert.exe\n' 'Please ensure that ProteoWizard is installed in either:\n' 'c:\\program files\\proteowizard\nor\nc:\\program files (x86)\\proteowizard') outname = filename[:-4] + '.mzML' callstring = locs[-1] + ' "' + filename + '" --mzML' if bit in [32, 64]: callstring += ' --' + str(bit) else: raise ValueError( 'ProteoWizard conversion was called with an invalid floating point precision "%s".' % str(bit)) if compression is True: # call for compression callstring += ' --zlib' exten = '*.mzML' if gzip is True: # call to gzip entire mzml callstring += ' --gzip' outname += '.gz' exten += '.gz' print('callstring', callstring) if verbose is True: callstring += ' --verbose' sys.stdout.write('Generating %s file from %s' % (exten, filename)) sys.stdout.flush() subprocess.call(callstring) sys.stdout.write(' DONE\n') sys.stdout.flush() else: subprocess.call(callstring) return outname
09973f0c12f63c485b8947c57591791b51e10752
12,514
import json def getAll(filepath): """ Get all config value """ with open(filepath, "r", encoding="utf-8") as target: return json.load(target)
450597deb942c9fdbde33ec37c8feee9d2edd1ec
12,516
def safe_div(x: int, y: int): """ Computes x / y and fails if x is not divisible by y. """ assert isinstance(x, int) and isinstance(y, int) assert y != 0 assert x % y == 0, f'{x} is not divisible by {y}.' return x // y
250678a7d6b3b5564ee734a852b17d1cea7ac536
12,517
def format_entry(e): """ Format an entry from the data in the original database to something that uses html """ txt = e.midashigo1 strlen = len(txt) acclen = len(e.ac) accent = "0"*(strlen-acclen) + e.ac # Get the nasal positions nasal = [] if e.nasalsoundpos: positions = e.nasalsoundpos.split('0') for p in positions: if p: nasal.append(int(p)) if not p: # e.g. "20" would result in ['2', ''] nasal[-1] = nasal[-1] * 10 # Get the no pronounce positions nopron = [] if e.nopronouncepos: positions = e.nopronouncepos.split('0') for p in positions: if p: nopron.append(int(p)) if not p: # e.g. "20" would result in ['2', ''] nopron[-1] = nopron[-1] * 10 outstr = "" overline = False for i in range(strlen): a = int(accent[i]) # Start or end overline when necessary if not overline and a > 0: outstr = outstr + '<span class="overline">' overline = True if overline and a == 0: outstr = outstr + '</span>' overline = False if (i+1) in nopron: outstr = outstr + '<span class="nopron">' # Add the character stuff outstr = outstr + txt[i] # Add the pronunciation stuff if (i+1) in nopron: outstr = outstr + "</span>" if (i+1) in nasal: outstr = outstr + '<span class="nasal">&#176;</span>' # If we go down in pitch, add the downfall if a == 2: outstr = outstr + '</span>&#42780;' overline = False # Close the overline if it's still open if overline: outstr = outstr + "</span>" return outstr
17cbc7270a70cb8bb88ec7faa2295f1f42b041af
12,519
def dropna_except(df, except_subset, inplace=False): """drop rows containing nans from a Pandas dataframe, but allow nans in the specified subset of columns, in place""" subset = set(df.columns) for ec in except_subset: subset.remove(ec) df = df.dropna(inplace=inplace, subset=subset) return df
708bd3e9cab520f9453cd82d6f36aa8ec8183fc0
12,520
def _timeSlice__compare(self,other): """Compare two time slices, returning tuple (<return code>, <slice>, <comment>), with the larger slice if succesful. If return code is negative, no solution is found. If return code is 2, the slices are disjoint and both returned.""" assert self._h.label == 'timeSlice', 'collect._timeSlice__compare attached to wrong object: %s [%s]' % (self._h.title,self._h.label) if self.label == other.label: return (0,self,'Slices equal') sl = sorted( [self.label, other.label] ) ## label dict allows look-up of objects by label .... ee = self._labelDict map = {('RFMIP','RFMIP2'):('RFMIPunion', 'Taking union of slices'), ('RFMIP', 'hist55'):('hist55plus', 'Taking ad-hoc union with extra ...'), ('RFMIP2', 'hist55'):('hist55plus', 'Taking ad-hoc union with extra ...'), ('DAMIP20','DAMIP40'):('DAMIP40', 'Taking larger containing slice') } ## ## handle awkward cases ## if self.type != other.type or self.type == 'dayList': if tuple( sl ) in map: targ, msg = map[tuple(sl)] return (1,ee[targ],msg) else: return (-1,None,'Multiple slice types: %s' % sorted(ee.keys())) ### # if sl in [['piControl030a','piControl200'],['piControl030', 'piControl200']]: # return (1,ee['piControl200'],'Taking preferred slice (possible alignment issues)') ### # elif sl == ['piControl030', 'piControl030a']: # return (1,ee['piControl30'],'Taking preferred slice (possible alignment issues)') ### # elif sl == ['RFMIP','RFMIP2']: ## # return (1,ee['RFMIPunion', 'Taking union of slices') ## # elif sl == ['RFMIP', 'hist55']: ### # return (1,ee['hist55plus'], 'Taking ad-hoc union with extra ...') ## # elif sl == ['RFMIP2', 'hist55']: # return (1,ee['hist55'], 'Taking larger containing slice') ## # elif sl == ['DAMIP20','DAMIP40']: # return (1,ee['DAMIP40'], 'Taking larger containing slice') ## if not ( self.type in ['simpleRange','relativeRange'] or (len(self.type) > 13 and self.type[:13] == 'branchedYears') ): return (-2,None,'slice type aggregation not supported') sa,ea = (self.start, self.end) sb,eb = (other.start, other.end ) if sa <= sb and ea >= eb: return (1,self, 'Taking largest slice') if sb <= sa and eb >= ea: return (1,other, 'Taking largest slice') if ea < sb or eb < sa: return (2,(self,other), 'Slices are disjoint') return (-3,None, 'Overlapping slices')
d03b9a4f68c08e232753ae64c52225d8643380d5
12,521
def tab_delimited(list_to_write): """Format list of elements in to a tab-delimited line""" list_to_write = [str(x) for x in list_to_write] return '\t'.join(list_to_write) + '\n'
921a3316f01aecaba1efb43c524b1ad5cca89546
12,523
def poly_relation(a, b): """Describe 2 polygons relation""" rels = { 'a contains b': a.contains(b), 'a crosses b': a.crosses(b), 'a disjoint b': a.disjoint(b), 'a intersects b': a.intersects(b), 'a touches b': a.touches(b), 'a within b': a.within(b), 'a covers b': a.covers(b), 'a overlaps b': a.overlaps(b), 'b contains a': b.contains(a), 'b crosses a': b.crosses(a), 'b disjoint a': b.disjoint(a), 'b intersects a': b.intersects(a), 'b touches a': b.touches(a), 'b within a': b.within(a), 'b covers a': b.covers(a), 'b overlaps a': b.overlaps(a), } return ', '.join( k for k, v in rels.items() if v )
ae008f812f0bf31e2f4ad2384a3390ad381730ad
12,525
def get_hardware_info(**output_dict): """ Get the serial number/service tag of the node Does not directly output anything on its own, used in merge_dictionary function """ hw_dict = {} response_json = output_dict['GetHardwareInfo'] for node in response_json['result']['nodes']: node_id = node['nodeID'] node_serial = node['result']['hardwareInfo']['chassisSerial'] hw_dict[node_id] = node_serial return hw_dict
15969315bbf0d41d5d0066ca1c0dfe51c7fcbf19
12,527
def get_sorted_indices(some_list, reverse=False): """Get sorted indices of some_list Parameters ---------- some_list : list Any list compatible with sorted() reverse : bool Reverse sort if True """ return [ i[0] for i in sorted( enumerate(some_list), key=lambda x: x[1], reverse=reverse ) ]
6e207f4079cd3800fd26269725f5385250b76112
12,529
from pathlib import Path def _get_talairch_lta(fs_subject_dir): """Fetch pre-computed transform from infant_recon_all""" xfm = Path(fs_subject_dir) / "mri" / "transforms" / "niftyreg_affine.lta" if not xfm.exists(): raise FileNotFoundError("Could not find talairach transform.") return str(xfm.absolute())
a83caf9f34d7aa0be8ba9b592a14ea1ade82fff9
12,530
def excel_index(column): """ Takes a string and returns what column it would be in Excel. """ # Not needed but will insure the right values. There are different # ASCII values for uppercase and lowercase letters. letters = list(column.upper()) # This works like this: LOL = 8514 # 26^2 * 12(L) + 26^1 * 15(O) + 26^0 * 12 = 8514 rev = list(reversed(letters)) return sum([26**i * (ord(x) - 64) for i, x in enumerate(rev)])
224d7f370468e232a68a8205cf101e8dcb959829
12,531
import os def get_test_path(): """ Returns the path to the ``tests`` dir that this module resides in. :rtype: str :returns: The full path to the ``tests/`` dir. """ return os.path.dirname(__file__)
c712ca3d5c727391cab0ae13bd8ebfd736a8cf6f
12,533
def find_specification_label_in_feature(feature): """Analyse a Biopython feature to find a DnaChisel Specification in it. The specification should start with either "@" or "~", in the feature's field "label" or "note". """ for labelfield in ["label", "note"]: if labelfield not in feature.qualifiers: continue potential_label = feature.qualifiers.get(labelfield, "_") if isinstance(potential_label, list): potential_label = potential_label[0] if (potential_label != "") and (potential_label[0] in "@~"): return potential_label return None
918c78ad046c9acf474e2465410b7139af9e6936
12,534
import json import sys def load_config(): """ Loads a config file in the current directory called config.json. """ # Opens the config file and loads as a dictionary. try: with open('config.json', 'r') as f: config = json.load(f) print("Config file loaded.") return config except: message = 'Could not read config file.' print("Unexpected error: ", sys.exc_info()[0]) sys.exit(message)
b6a7d81fc7e610f8ef80bd4ab9371bfeaea67c7b
12,535
def gp_tuple_to_dict(gp_tuple): """Convert a groupings parameters (gp) tuple into a dict suitable to pass to the ``grouping_parameters`` CompoundTemplate.__init__ kwarg. """ params = [{'min': 1}, {'max': 1}, {'name': None}, {'possible_types': None}, {'is_separator': False}, {'inner_sep_type': None}] d = {} for i, param in enumerate(params): if i < len(gp_tuple): d[list(param.keys())[0]] = gp_tuple[i] else: d[list(param.keys())[0]] = list(param.values())[0] return d
5db98d0530dc177685e0bcfefad94a1f6718aed9
12,537
import re def check_section_sort_order(file_path, sect_lines): """Check the sort order of the lines of a section""" # Find out lines of sub_sections and analyze them result = True filtered_lines = [] subsection_lines = [] for raw_line in sect_lines: line = raw_line.rstrip() if line != raw_line: print(f"{file_path}: spaces at the end of {repr(raw_line)}") result = False if line == '': if subsection_lines: # Add empty lines to subsection subsection_lines.append(line) # Anyway, add them to the lines in the section too filtered_lines.append(line) elif line.startswith(' '): # Add indented lines to the subsection, without the indent subsection_lines.append(line[2:]) else: # non-indented lines means subsections end there if subsection_lines: if not check_section_sort_order(file_path, subsection_lines): result = False subsection_lines = [] filtered_lines.append(line) # Ends recursive structures if subsection_lines: if not check_section_sort_order(file_path, subsection_lines): result = False del subsection_lines if not filtered_lines: return result # If there is a dash, every line needs to start with a dash, and this is it if any(line.startswith('- ') for line in filtered_lines): if not all(not line or line.startswith('- ') for line in filtered_lines): print(f"{file_path}: a section with dash needs to have all with dash: {repr(filtered_lines)}") result = False return result # Return directly, here # Check the sort order of lines starting with a star last_sortword = None last_sortword_orig = None for line in filtered_lines: if not line: continue if not line.startswith('*'): # Reset the sort order when a text appears if not re.match(r'^[0-9a-zA-Z]', line): print(f"{file_path}: unexpected non-list line: {repr(line)}") result = False last_sortword = None last_sortword_orig = None continue if len(line) < 3 or line[1] != ' ': print(f"{file_path}: missing space between */- and words in {repr(line)}") result = False continue # Ignore lists of URLs if line.startswith('* https://'): if last_sortword is not None: print(f"{file_path}: URL while looking for words: {repr(line)}") result = False continue # Find the equal sign try: eq_idx = line.index('=', 3) except ValueError: print(f"{file_path}: missing = in {repr(line)}") result = False continue # Keep an "original" unmondified version of the word, in order to display it new_word_orig = new_word = line[2:eq_idx].strip() new_word = new_word.upper() new_word = new_word.replace('/', '') new_word = new_word.replace('-', '') new_word = new_word.replace('²', '2') if last_sortword is not None and last_sortword > new_word: print(f"{file_path}: disorder {last_sortword} > {new_word} " + f"({last_sortword_orig} needs to come after {new_word_orig})") result = False last_sortword = new_word last_sortword_orig = new_word_orig return result
afc88307e8db7f258b82e073207e3a7aa6302095
12,538
from typing import Optional def convert_int_or_none(val: Optional[int]) -> Optional[int]: """Convert to an int or None.""" return int(val) if val is not None else val
1920243d7465df6f2f3f9e6fdbd2424bbad165b4
12,539
def calculate_lux(r, g, b): """Calculate ambient light values""" # This only uses RGB ... how can we integrate clear or calculate lux # based exclusively on clear since this might be more reliable? illuminance = (-0.32466 * r) + (1.57837 * g) + (-0.73191 * b) return illuminance
00b1e26fb10cef79ae32808e355f93209190dfe5
12,540
import os def split_full_path(full_path, base_dir): """ Given a full path, return: - relative_dir: the part of the path that does not include the base directory and the basename - basename """ fname = os.path.basename(full_path) relative_path = full_path.split(base_dir)[-1] relative_dir = relative_path.split(fname)[0] relative_dir = relative_dir[1:-1] # clip slashes return relative_dir, fname
bfd500d7bf8ca999b2648e0ea168688170835e66
12,541
def count_path_dynamic(m, n): """Count number of paths with dynamic method.""" # create 2d array to store values paths = [[0 for x in range(n)] for y in range(m)] # set num of paths on edges to one for i in range(m): paths[i][0] = 1 for i in range(n): paths[0][i] = 1 # calculate num paths for i in range(1, m): for j in range(n): paths[i][j] = paths[i - 1][j] + paths[i][j - 1] return paths[m - 1][n - 1]
3232de0e5ff31071b11cbda3c5899ba7649a9a5e
12,542
def parse_object(repo, objectish): """Parse a string referring to an object. :param repo: A `Repo` object :param objectish: A string referring to an object :return: A git object :raise KeyError: If the object can not be found """ return repo[objectish]
6606eb58a1aab94a6071bf18221dbec63e2ef6da
12,545
import os def _get_env_var(key: str) -> str: """Return the value of an environment variable accessed by the given key. If a value does not exist, return the key, which is too liberal, but useful when a tenant or username, for example, is stored in literal form in a config (rather than as a placeholder for an environment variable).""" v = os.environ.get(key) if v is not None: return v else: return key
e7e844c42b04cda872e73fb1db7a21d9bc30c896
12,547
def elision_normalize(s): """Turn unicode characters which look similar to 2019 into 2019.""" return s.replace("\u02BC", "\u2019").replace("\u1FBF", "\u2019").replace("\u0027", "\u2019").\ replace("\u1FBD", "\u2019")
a7b0f2ba14d0fcbb2cd1cc97b8ce858051d35709
12,548
def compare(value1, value2, comparison): """ Compare 2 values :type value1: object :param value1: The first value to compare. :type value2: object :param value2: The second value to compare. :type comparison: string :param comparison: The comparison to make. Can be "is", "or", "and". :return: If the value is, or, and of another value :rtype: boolean """ if not isinstance(comparison, str): raise TypeError("Comparison argument must be a string.") if comparison == 'is': return value1 == value2 elif comparison == 'or': return value1 or value2 elif comparison == 'and': return value1 and value2 raise ValueError("Invalid comparison operator specified.")
8f8869d54d55959625a89377db39220b50e1b0d3
12,549
def constrain_to_range(s, min_val, max_val): """ Make sure that a value lies in the given (closed) range. :param s: Value to check. :param min_val: Lower boundary of the interval. :param max_val: Upper boundary of the interval. :return: Point closest to the input value which lies in the given range. :rtype: float """ return max(min(s, max_val), min_val)
d2017580bab60ba444cbf40f570b16763de81969
12,550
import subprocess import re def supervisor_status(): """ """ cmd = '''sudo supervisorctl status''' stdout = subprocess.check_output(cmd, shell=True) stdout = stdout.decode('utf-8') procs = stdout.splitlines() # each line is a process result = {} for proc in procs: proc = re.sub('\s+', ' ', proc).strip() #replace multiple spaces by one space splitted_proc = proc.split(' ') name = splitted_proc.pop(0) status = splitted_proc.pop(0) uptime = ' '.join(splitted_proc) result[name] = status + ' ' + uptime return (result)
10611fc558671a87388bee0b23d8d47ec438dec4
12,551
def lambda_context(): """Generates An Context""" class Context: @staticmethod def get_remaining_time_in_millis(): return 200000 return Context()
f86f0846ca3317284e11b7b8ec110805b6533e4d
12,554
def find_piece_size(total_size): """ Determine the ideal piece size for a torrent based on the total size of the data being shared. :param total_size: Total torrent size :type total_size: int :return: Piece size (KB) :rtype: int """ if total_size <= 2 ** 19: return 512 elif total_size <= 2 ** 20: return 1024 elif total_size <= 2 ** 21: return 2048 elif total_size <= 2 ** 22: return 4096 elif total_size <= 2 ** 23: return 8192 elif total_size <= 2 ** 24: return 16384 else: raise ValueError("Total size is unreasonably large")
532adc41448ce5dfb11f02699c987936e1abda0a
12,555
import os import re def get_recipes(working_directory): """Returns the paths to recipes and expectation file directories. Args: working_directory (str): absolute path to the directory where the recipes are located. Returns: latest_recipes (str[]): paths to all unbranched recipes. branched_recipes (str[]): paths to all branched recipes. branched_expectations (str[]): paths to all expectation directories of branches recipes. """ recipe_pattern = r'\.py$' branched_recipe_pattern = r'_\d+_\d+_\d+\.py$' expectation_pattern = r'\.expected$' latest_recipes = [] branched_recipes = [] branched_expectations = [] for root, dirs, files in os.walk(working_directory): for filename in files: if (re.search(recipe_pattern, filename)): if re.search(branched_recipe_pattern, filename): branched_recipes.append(os.path.join(root, filename)) else: latest_recipes.append(os.path.join(root, filename)) for dir_name in dirs: if re.search(expectation_pattern, dir_name): branched_expectations.append(os.path.join(root, dir_name)) return latest_recipes, branched_recipes, branched_expectations
61c4824c3f8acf10cd160c76d3970a786bcef470
12,556
def fake_file(filename, content="mock content"): """ For testing I sometimes want specific file request to return specific content. This is to make creation easier """ return {"filename": filename, "content": content}
5b342edf9dec65987223fbbc8b670402513ae4ed
12,557
def _convert_string(key): """Convert OEIS String to Integer.""" if isinstance(key, str): key = int(key.strip().upper().strip("A")) return key
b99a93089f655d4a8941fc1939211acc5645ef80
12,558
def find_note_chapter(connection, note_id, title): """ 搜索章节信息的字典 :param connection: :param note_id: :param title: :return: 章节信息字典 """ with connection.cursor() as cursor: sql = "select * from chapter where note_id=%s and title=%s " cursor.execute(sql, (note_id, title)) return cursor.fetchone()
49c9c51efcd266c18d37441a536e81f7605dbb93
12,559
def flatten(sequence): """Given a sequence possibly containing nested lists or tuples, flatten the sequence to a single non-nested list of primitives. >>> flatten((('META.INSTRUMENT.DETECTOR', 'META.SUBARRAY.NAME'), ('META.OBSERVATION.DATE', 'META.OBSERVATION.TIME'))) ['META.INSTRUMENT.DETECTOR', 'META.SUBARRAY.NAME', 'META.OBSERVATION.DATE', 'META.OBSERVATION.TIME'] """ flattened = [] for elem in sequence: if isinstance(elem, (list, tuple)): elem = flatten(elem) else: elem = [elem] flattened.extend(elem) return flattened
6ca3fe470757dc4081c4387d917d5e285c2a3f06
12,560
def relevant_rule(rule): """Returns true if a given rule is relevant when generating a podspec.""" return ( # cc_library only (ignore cc_test, cc_binary) rule.type == "cc_library" and # ignore empty rule (rule.hdrs + rule.textual_hdrs + rule.srcs) and # ignore test-only rule not rule.testonly)
3e1a45d222128e0065eb585135806a0f8bb787d9
12,561
def _decrement_version(lambda_config): """Decrement the Lambda version, if possible. Args: lambda_config (dict): Lambda function config with 'current_version' Returns: True if the version was changed, False otherwise """ current_version = lambda_config['current_version'] if current_version == '$LATEST': return False int_version = int(current_version) if int_version <= 1: return False lambda_config['current_version'] = int_version - 1 return True
a06ed14e0abaa68a809bdb49c2d4f2cc59ce6db2
12,564
import collections def rollout(env, agent, max_steps): """Collects a single rollout of experience. Args: env: The environment to interact with (adheres to gym interface). agent: The agent acting in the environment. max_steps: The max number of steps to take in the environment. Returns: A dictionary of lists containing information from the trajectory. """ assert max_steps > 0 traj = collections.defaultdict(list) def add_step(**kwargs): for k, v in kwargs.items(): traj[k].append(v) s = env.reset() num_steps = 0 while num_steps < max_steps: a, a_info = agent.step(s) sp, r, t, _ = env.step(a) add_step(s=s, a=a, r=r, t=t, a_info=a_info) s = sp num_steps += 1 if t: break # Handle certain edge cases during sampling. # 1. Ensure there's always a next state. traj["s"].append(s) # 2. Ensure that the agent info (importantly containing the next-state-value) always exists. _, a_info = agent.step(s) traj["a_info"].append(a_info) return traj
eb9a9f41b9c37e1c5f8ebdbbec13650dd7665622
12,566
def tms_mpsse(bits): """convert a tms bit sequence to an mpsee (len, bits) tuple""" n = len(bits) assert (n > 0) and (n <= 7) x = 0 # tms is shifted lsb first for i in range(n - 1, -1, -1): x = (x << 1) + bits[i] # only bits 0 thru 6 are shifted on tms - tdi is set to bit 7 (and is left there) # len = n means clock out n + 1 bits return (n - 1, x & 127)
25d2dc3b1edd5494e82295fe50889565246c2ae5
12,567
def _aggregate(query, func, by=None): """ Wrap a query in an aggregation clause. Use this convenience function if the aggregation parameters are coming from user input so that they can be validated. Args: query (str): Query string to wrap. func (str): Aggregation function of choice. Valid choices are 'avg'/'mean', 'min', 'max', 'sum'. by (list of str): Optional list of variables by which to perform the aggregation. Returns: str: New query string. """ if func == "mean": func = "avg" if func not in ["avg", "min", "max", "sum"]: raise ValueError("Unsupported aggregation function %r" % func) query = "{func}({query})".format(func=func, query=query) if by: query += " by({by_variables})".format(by_variables=", ".join(by)) return query
e26aa715fadc5a58f5f87cee297fc3e6500120e1
12,568
from typing import Any def accept(message: Any) -> bool: """ Prompts the user to enter "yes" or "no". Returns True if the response was "yes", otherwise False. Ctrl-c counts as "no". """ message = f"[pretf] {message} [yes/no]: " response = "" while response not in ("yes", "no"): try: response = input(message).lower() except KeyboardInterrupt: response = "no" print() return response == "yes"
884bf321462ef37f02a69925ece012e108fad861
12,569
def format_seconds(total_seconds: int) -> str: """Format a count of seconds to get a [H:]M:SS string.""" prefix = '-' if total_seconds < 0 else '' hours, rem = divmod(abs(round(total_seconds)), 3600) minutes, seconds = divmod(rem, 60) chunks = [] if hours: chunks.append(str(hours)) min_format = '{:02}' else: min_format = '{}' chunks.append(min_format.format(minutes)) chunks.append('{:02}'.format(seconds)) return prefix + ':'.join(chunks)
c0f79b7f45c32589537b5dbf51a95b4811c50417
12,570
import six def rev_comp( seq, molecule='dna' ): """ DNA|RNA seq -> reverse complement """ if molecule == 'dna': nuc_dict = { "A":"T", "B":"V", "C":"G", "D":"H", "G":"C", "H":"D", "K":"M", "M":"K", "N":"N", "R":"Y", "S":"S", "T":"A", "V":"B", "W":"W", "Y":"R" } elif molecule == 'rna': nuc_dict = { "A":"U", "B":"V", "C":"G", "D":"H", "G":"C", "H":"D", "K":"M", "M":"K", "N":"N", "R":"Y", "S":"S", "U":"A", "V":"B", "W":"W", "Y":"R" } else: raise ValueError( "rev_comp requires molecule to be dna or rna" ) if not isinstance( seq, six.string_types ): raise TypeError( "seq must be a string!" ) return ''.join( [ nuc_dict[c] for c in seq.upper()[::-1] ] )
2e42ccf5f37992d0fbe3a25afd70a04e6fc0c225
12,572
import math def iters_close(iter1, iter2): """Assert that the given iterators are near-equal.""" iter1, iter2 = list(iter1), list(iter2) if len(iter1) != len(iter2): return False for i in range(len(iter1)): a = iter1[i] b = iter2[i] if math.isnan(a) and (isinstance(b, complex) or math.isnan(b)): continue b1, b2 = b - abs(b / 100), b + abs(b / 100) if not (b1 <= a <= b2): return False return True
efaaaced7260642b0bcd14eed54f02803e61c9ad
12,573
import os def working_directory(): """ Get the working directory """ path = os.path.abspath(os.path.dirname(__file__)) return path
0fa4cc34ddd18d82da14e18b21125fc24b864249
12,574
def pe57(limit=1000): """ >>> pe57() 153 """ n, d = 3, 2 cnt = 0 for a in range(limit + 1): # n, d = n + (d << 1), n + d nn = n n += d << 1 d += nn if len(str(n)) > len(str(d)): cnt += 1 return cnt
ae898b427c8061d63d401957441a5c42e78238f7
12,576
def findall_deep(node, selector, ns, depth=0, maxDepth=-1): """ recursively find all nodes matching the xpath selector :param node: the input etree node :param selector: the xpath selector :param ns: a dict of namespaces :param depth: the current depth :param maxDepth: the maximum number of levels to navigate :return: a list of matching nodes """ results = node.findall(selector, ns) if ns else node.findall(selector) if maxDepth == -1 or (depth < maxDepth): children = list(node) if children and len(children) > 0: for child in children: results = results + findall_deep(child, selector, ns, depth+1, maxDepth) return results
f93d413dd205acf5be0e5f76dd6c599f54bfac57
12,577
from typing import Union from typing import List def table_insert(name: str, field_names: Union[str, List[str]]) -> str: """Return command to add a record into a PostgreSQL database. :param str name: name of table to append :param field_names: names of fields :type: str or list :return: command to append records to a table :rtype: str Example: import psql cur = psql.connection('db', 'user', 'password') [cur.execute(psql.table_insert('table', 'field'), (x, )) for x in values] """ if isinstance(field_names, str): field_names = [field_names] length = len(field_names) if length > 1: values = ','.join(['%s'] * length) else: values = '%s' return '''INSERT INTO {table_name} ({fields}) VALUES ({values});'''.format(table_name=name, fields=', '.join(field_names), values=values)
a50aadebe655118c255ccb81c3c0852646057ff4
12,578
def __normalize(variable): """ Scale a variable to mean zero and standard deviation 1 Parameters ---------- variable : xarray.DataArray or np.ndarray Returns ------- xarray.DataArray or np.ndarray """ mean = variable.mean() std = variable.std() if std != 0: result = (variable - mean) / std else: result = variable - mean return result
7d6329ef6454deb04b041a630a7f1f084f237b57
12,580
def get_old_prim_signals(signals): """ create a dict of primary signals and the secondary signals they control data is compiled from the 'secondary_signals' field on primary signals this field is populated by this Python service Args: signals (TYPE): Description Returns: TYPE: Description """ signals_with_children = {} for signal in signals: knack_id = signal["id"] secondary_signals = [] try: for secondary in signal["SECONDARY_SIGNALS"]: secondary_signals.append(secondary["id"]) signals_with_children[knack_id] = secondary_signals except (KeyError, AttributeError): continue return signals_with_children
a37c4b2837beb2a8354be30f1247f9555319edac
12,582
def get_proj(ds): """ Read projection information from the dataset. """ # Use geopandas to get the proj info proj = {} maybe_crs = ds.geometry.crs if maybe_crs: maybe_epsg = ds.geometry.crs.to_epsg() if maybe_epsg: proj["proj:epsg"] = maybe_epsg else: proj["proj:wkt2"] = ds.geometry.crs.to_wkt() return proj
4ed68d8733285cdea92c1b167a3d7f59024845db
12,584