content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def decomp(n): """ Retorna a decomposição em fatores primos de um inteiro em forma de dicionário onde as chaves são as bases e o valores os expoentes. Ex: >>> decomp(12) {2: 2, 3: 1} >>> decomp(72) {2: 3, 3: 2} :param n: number :return: dictionary """ expoente = 0 base = 2 decomposicao = {} while n > 1: while n % base == 0: expoente += 1 n = n/base if expoente > 0: decomposicao[base] = expoente expoente = 0 base += 1 return decomposicao
999d8be04ffd197ebdd14c88b525eb8ca4379bb9
14,075
def sanitize_type_instance(index_name): """ collectd limit the character set in type_instance to ascii and forbids the '/' character. This method does a lossy conversion to ascii and replaces the reserved character with '_' """ ascii_index_name = index_name.encode('ascii', 'ignore') # '/' is reserved, so we substitute it with '_' instead return ascii_index_name.replace('/', '_')
ece5008944a80ec45f20c3c8bf01575f162bb8e0
14,076
def validWorkspace(uri): """Function to check whether workspace is a geodatbase""" if ".gdb" in str(uri) or ".sde" in str(uri): return True else: return False
073c1b992eee78dcb939b46215faed57e9bcf96c
14,077
def get_run_zip_file_slug_sets(new_run, old_run_zip_files): """ :param old_run_zip_files: A list of run zip files. :return: A set of provider slugs for each zip file. """ data_provider_task_records = new_run.data_provider_task_records.exclude(provider__isnull=True) all_run_zip_file_slugs = [ data_provider_task_record.provider.slug for data_provider_task_record in data_provider_task_records ] run_zip_file_slug_sets = [] for old_run_zip_file in old_run_zip_files: run_zip_file_slug_set = [] for data_provider_task_record in old_run_zip_file.data_provider_task_records.all(): run_zip_file_slug_set.append(data_provider_task_record.provider.slug) # Don't rerun the overall project zip file. if all_run_zip_file_slugs != run_zip_file_slug_set: run_zip_file_slug_sets.append(run_zip_file_slug_set) return run_zip_file_slug_sets
3dd2c472bcf3113f2544dcf7b4c13f956714429b
14,078
import requests def sendRequest(url, type="POST", params=None, headers=None): """ Send a request to a URL ### Input: - `url` (str) | the url to send the request to - `type` (str) | the type of request (GET or POST) - `params` (dict) | parameters to be sent with the request - `headers` (dict) | headers to be sent with the request ### Output: - `response` (dict) | the JSON response of the request """ ## Perform a GET request if type == "GET": rawResponse = requests.get(url, params=params, headers=headers) ## Perform a POST request else: rawResponse = requests.post(url, params=params, headers=headers) ## Convert the response to a json object, if possible if hasattr(rawResponse, "json"): response = rawResponse.json() ## Otherwise, get the text response else: response = rawResponse.text return response
7f450b8eedf6405b237730b9f7d6da5277c41e7b
14,080
def extract_milestones(github_issues: list) -> list: """Extracts milestone information from parsed issues.""" # Extract individual milestone fields, dedupe deduped_milestone_names = { x.get('milestone').get('name') for x in github_issues} deduped_milestone_descriptions = { x.get('milestone').get('description') for x in github_issues} deduped_milestone_dates = { x.get('milestone').get('due_date') for x in github_issues} # Reconstruct deduped milestone list, remove null values github_milestones = list(zip(deduped_milestone_names, deduped_milestone_descriptions, deduped_milestone_dates)) github_milestones.remove((None, None, None)) return github_milestones
236f2f55eeff9de9e149fcb920b642684f3ee1ab
14,081
def format_number(value: float) -> str: """ Format a number into a string, where “.0” is removed if number does not have a decimal part. :param value: The number to format. """ return str(int(value) if value % 1 == 0 else value)
7f34c689a64753c97c664dadf61d57bdf837c8a6
14,082
def get_span_row_count(span): """ Gets the number of rows included in a span Parameters ---------- span : list of lists of int The [row, column] pairs that make up the span Returns ------- rows : int The number of rows included in the span Example ------- Consider this table:: +--------+-----+ | foo | bar | +--------+ | | spam | | +--------+ | | goblet | | +--------+-----+ :: >>> span = [[0, 1], [1, 1], [2, 1]] >>> print(get_span_row_count(span)) 3 """ rows = 1 first_row = span[0][0] for i in range(len(span)): if span[i][0] > first_row: rows += 1 first_row = span[i][0] return rows
e226e0f78bd6711a7ddbe9c749ed43d1d2bc476c
14,083
def parse_score_qa(output, metric, digits=4): """Function for parsing the output from `pyserini.eval.evaluate_dpr_retrieval`. Currently, the implementation is the same as `parse_score_msmacro`, but we're keeping separate in case they diverge in the future.""" for line in output.split('\n'): if metric in line: score = float(line.split()[-1]) return round(score, digits) return None
dbb2179fc1706618cc5a4ccfd88818d32086680b
14,084
def get_logbook_by_name(logbook_name, conn): """ Get a logbook by name from a persistence backend connection :param str logbook_name: The name of the logbook to get :param obj conn: A persistence backend connection :return obj logbook: The logbook with the specified name """ return next( iter([i for i in conn.get_logbooks() if i.name == logbook_name]) )
7ed83cfca3d7f0313046b39032c2b906c4bff410
14,085
def is_inside_relative_range(value, ref_value, pct): """ Parameters ---------- value : numeric ref_value : numeric pct : numeric pct should be smaller than 1. Returns ------- boolean """ if ref_value * (1 - pct) <= value <= ref_value * (1 + pct): return True else: return False
9e60eda7b3ba979c9dc33be890d46394aa954436
14,087
from typing import List def get_label_list(labels: List[List[int]]) -> List[int]: """Gets a sorted list of all the unique labels from `labels`. Args: labels: A list of lists, each corresponding to the label-sequence of a text. Returns: All the unique labels the ever appear in `labels`, given in a sorted list. Example: Given `labels=[[0, 0, 3, 2, 5], [4, 0], [5, 2, 3]]`, returns `[0, 2, 3, 4, 5]`. """ unique_labels = set() for label in labels: unique_labels = unique_labels | set(label) label_list = list(unique_labels) label_list.sort() return label_list
be795ff63c1eaccd221289708551a8ddd02b2cc5
14,088
def format_message(date_str, node, msg): """Format log message""" message = f"{date_str}: {node.site_name}: {node.location or node.model} ({node.serial}) {msg}" return message
53dc7e2716f935a083c36e40ad4887cfe23c0aad
14,089
def _deep_value(*args, **kwargs): """ Drills down into tree using the keys """ node, keys = args[0], args[1:] for key in keys: node = node.get(key, {}) default = kwargs.get('default', {}) if node in ({}, [], None): node = default return node
fc821b5fe0758b582388c91be4139737970704b3
14,090
import os def abspath(current_path, relative_path): """Build an absolute path from relative path""" parent = os.path.abspath(os.path.dirname(current_path)) return os.path.join(parent, relative_path)
19ec82d8629112bd2deb7c5bb80106426115f02a
14,091
import tempfile def create_tmpdir(): """Create temporary directory where all intermediate files will be placed.""" return tempfile.mkdtemp(prefix='xl-')
7cac5eb979b3d7150084daf68749b30a3986e52b
14,093
import re def get_aws_local_file(aws_creds_file): """ Converting aws config and credentials files into dictionaries :param aws_creds_file: string :return: dictionary """ aws_config = {} current_profile = '' input_file = open(aws_creds_file, 'r') for line in input_file.readlines(): # print line if re.match('^\[', line): current_profile = line.strip()[1:-1] aws_config[current_profile] = {} if re.match('\w', line): key, val = line.split('=') aws_config[current_profile][key.strip()] = val.strip() input_file.close() return aws_config
3c0b323f8dc61f17953d72f9e8adb74ac3e0e828
14,094
def _bowtie_args_from_config(data): """Configurable high level options for bowtie. """ config = data['config'] qual_format = config["algorithm"].get("quality_format", "") if qual_format.lower() == "illumina": qual_flags = ["--phred64-quals"] else: qual_flags = [] multi_mappers = config["algorithm"].get("multiple_mappers", True) multi_flags = ["-M", 1] if multi_mappers else ["-m", 1] multi_flags = [] if data["analysis"].lower().startswith("smallrna-seq") else multi_flags cores = config.get("resources", {}).get("bowtie", {}).get("cores", None) num_cores = config["algorithm"].get("num_cores", 1) core_flags = ["-p", str(num_cores)] if num_cores > 1 else [] return core_flags + qual_flags + multi_flags
57e86765444657812d0bebc9b1a3c3dcb234d300
14,095
def split_unk(str_, vocab): """ 将所有不在词表中的符号切分开,尽量保留在词表中的符号 """ rstring = "" for c in str_: if c in vocab.keys(): rstring += c else: rstring += " " + c + " " str_ = rstring return str_
c54da92f9e69f32949bba538242ea91cfdb197f9
14,096
def _world2fig(ff, x, y): """ Helper function to convert world to figure coordinates. Parameters ---------- ff : `~aplpy.FITSFigure` `~aplpy.FITSFigure` instance. x : ndarray Array of x coordinates. y : ndarray Array of y coordinates. Returns ------- coordsf : tuple Figure coordinates as tuple (xfig, yfig) of arrays. """ # Convert world to pixel coordinates xp, yp = ff.world2pixel(x, y) # Pixel to Axes coordinates coordsa = ff._ax1.transData.transform(zip(xp, yp)) # Axes to figure coordinates coordsf = ff._figure.transFigure.inverted().transform(coordsa) return coordsf[:, 0], coordsf[:, 1]
99df767d948bc1c0807b676e2178de1478a1ac71
14,098
def is_palindrome(number): """ Returns True if `number` is a palindrome, False otherwise. """ num_str = str(number) num_comparisons = len(num_str) // 2 for idx in range(num_comparisons): if num_str[idx] != num_str[-1-idx]: return False return True
391aec57bba8366d7e7ef2c8187fda377f5a786d
14,099
from typing import Iterable def flatten_list(li: Iterable): """Flattens a list of lists.""" if isinstance(li, Iterable): return [a for i in li for a in flatten_list(i)] else: return [li]
306536fdadf231b0a0f752bb63d7e01317819674
14,100
def improve_ensemble(energy, positions, ensemble, ensemble_energies, unchanged_iterations): """ Given an energy and positions for a single pose, as well as the same data for a reference ensemble, this function "improves" the quality of the ensemble by identifying poses with the lowest potential energy. :param energy: The energy for a pose. :type energy: `Quantity() <http://docs.openmm.org/development/api-python/generated/simtk.unit.quantity.Quantity.html>`_ :param positions: Positions for coarse grained particles in the model, default = None :type positions: `Quantity() <http://docs.openmm.org/development/api-python/generated/simtk.unit.quantity.Quantity.html>`_ ( np.array( [cgmodel.num_beads,3] ), simtk.unit ) :param ensemble: A group of similar poses. :type ensemble: List(positions(np.array(float*simtk.unit (shape = num_beads x 3)))) :param ensemble_energies: A list of energies for a conformational ensemble. :type ensemble_energies: List(`Quantity() <http://docs.openmm.org/development/api-python/generated/simtk.unit.quantity.Quantity.html>`_ ) :param unchanged_iterations: The number of iterations for which the ensemble has gone unchanged. :type unchanged_iterations: int :returns: - ensemble (List(positions(np.array(float*simtk.unit (shape = num_beads x 3))))) - A list of the positions for all members in the ensemble. - ensemble_energies ( List(`Quantity() <http://docs.openmm.org/development/api-python/generated/simtk.unit.quantity.Quantity.html>`_ )) - A list of the energies that were stored in the PDB files for the ensemble, if any. - unchanged_iterations ( int ) - The number of iterations for which the ensemble has gone unchanged. """ if any([energy < ensemble_energies[i] for i in range(len(ensemble_energies))]): ensemble_energies[ensemble_energies.index(max(ensemble_energies))] = energy ensemble[ensemble_energies.index(max(ensemble_energies))] = positions unchanged_iterations = 0 else: unchanged_iterations = unchanged_iterations + 1 return (ensemble, ensemble_energies, unchanged_iterations)
ac1dcf2ec104a886b61e2abe4b4e090724d47ce7
14,101
def get_static_welcome_message(): """ Get the static welcome page. """ return \ """ <h3>Search Help</h3> <ul><li>The display below the line is an example of the output the browser shows you when you enter a search word. The search word was <b>green</b>.</li> <li>The search result shows for different parts of speech the <b>synsets</b> i.e. different meanings for the word.</li> <li>All underlined texts are hypertext links. There are two types of links: word links and others. Clicking a word link carries out a search for the word in the Wordnet database.</li> <li>Clicking a link of the other type opens a display section of data attached to that link. Clicking that link a second time closes the section again.</li> <li>Clicking <u>S:</u> opens a section showing the relations for that synset.</li> <li>Clicking on a relation name opens a section that displays the associated synsets.</li> <li>Type a search word in the <b>Next Word</b> field and start the search by the <b>Enter/Return</b> key or click the <b>Search</b> button.</li> </ul> """
f630c67ab66069baf8b82b5b4c5fe111b9191a73
14,102
def platform_name(project, platform): """"Get the untrusted platform name.""" return project.upper() + '_' + platform.upper()
3f63ab210ef040114a536cac40efbad168631273
14,103
import numpy def lat2g0(lat): """Calculate surface gravitational acceleration for latitude This function is stolen from atmlab: https://www.sat.ltu.se/trac/rt/browser/atmlab/trunk/geophysics/pt2z.m From the original description: Expression below taken from Wikipedia page "Gravity of Earth", that is stated to be: International Gravity Formula 1967, the 1967 Geodetic Reference System Formula, Helmert's equation or Clairault's formula. :param lat: Latitude [degrees] :returns: gravitational acceleration [m/s] """ x = numpy.abs( lat ); # see docstring for source of parametrisation return 9.780327 * ( 1 + 5.3024e-3*numpy.sin(numpy.deg2rad(x))**2 + 5.8e-6*numpy.sin(numpy.deg2rad(2*x)**2 ))
7c907a8016b6a579dd0a16a1662bf02358778d16
14,104
def tpearson(x, y, axis=0): """Tensor-based pearson correlation.""" n = x.shape[axis] xc = x - x.mean(axis=axis, keepdims=True) yc = y - y.mean(axis=axis, keepdims=True) xystd = x.std(axis=axis) * y.std(axis=axis) cov = (xc * yc).sum(axis=axis) / n corr = cov / xystd return corr
37da10d2e21f04296a2cc0b3f8b239e0fcba8957
14,105
import re def remove_unwanted_chars(x: str, *chars: str, to_replace: str = "") -> str: """Remove unwanted characters from a string.""" return re.sub(f"[{''.join(chars)}]", to_replace, x)
4a1e25b1ad12f47f835d4f6cdbac4a6e08413077
14,106
def write_the_species_tree(annotated_species_tree, output_file): """ this function writes the species tree to file args: annotated_species_tree : a string of annotated species tree in .newick format output_file : a file name to write to output: a file containing the annotated species tree """ with open(output_file, "w") as out: out.write(annotated_species_tree) print("wrote the annotated species besttree to "+output_file) return output_file
7db9d5fc10cd27b1e7a5e51427e7de6991a1157a
14,107
import functools def compose(*functions): """ A functional helper for dealing with function compositions. It ignores any None functions, and if all are None, it returns None. Parameters ---------- *functions function arguments to compose Returns ------- function or None The composition f(g(...)) functions. """ # help from here: https://mathieularose.com/function-composition-in-python/ def f_of_g(f, g): if f is not None and g is not None: return lambda x: f(g(x)) elif f is not None: return f elif g is not None: return g else: return lambda x: x if any(functions): return functools.reduce(f_of_g, functions, lambda x: x) else: return None
8a18b1e0beef43c0cfac4439fa158675a486b560
14,108
def pretty_print_list(lst, name = 'features', repr_format=True): """ Pretty print a list to be readable. """ if not lst or len(lst) < 8: if repr_format: return lst.__repr__() else: return ', '.join(map(str, lst)) else: topk = ', '.join(map(str, lst[:3])) if repr_format: lst_separator = "[" lst_end_separator = "]" else: lst_separator = "" lst_end_separator = "" return "{start}{topk}, ... {last}{end} (total {size} {name})".format(\ topk = topk, last = lst[-1], name = name, size = len(lst), start = lst_separator, end = lst_end_separator)
d199261e6b9fd226256a151601d0bd86e7458fe1
14,109
def factorial(n): """ :type n: int :rtype: int """ f = n for i in range(n - 1, 0, -1): f = f * i return f
17d6e211911417507e5887232f181bf774396c73
14,110
def _generate_fake_input_arg_componentsdk(arg_spec): """Generate a fake argument value for inputs of module spec Args: arg_spec (dict) : argument specification from yaml module spec Returns: object: sample fake value Raises: NotImplementedError: if arg type is not implemented """ if "AzureMLDataset" in arg_spec.type: return "/mnt/fakeinputdatasetpath" if "AnyDirectory" in arg_spec.type: return "/mnt/fakeinputdirectorypath" if "AnyFile" in arg_spec.type: return "/mnt/fakeinputfilepath/file.txt" if arg_spec.default: return arg_spec.default if "String" in arg_spec.type: return "0" if "Integer" in arg_spec.type: return arg_spec.min or arg_spec.max or "0" if "Boolean" in arg_spec.type: return False if "Float" in arg_spec.type: return arg_spec.min or arg_spec.max or "0.32" if "Enum" in arg_spec.type: return arg_spec.enum[0] raise NotImplementedError( "input type {} is not implemented in our test suite yet".format(arg_spec.type) )
62d92059ff6df74f4f08e6c2d09186f48058b617
14,112
def docker_compose_project_name(): """Set a consistent project name to enable optional reuse of containers.""" return "pytest-python-gitlab"
0c8ed2e63692fec969c042b4e2e8094459045c5b
14,116
def _partition(entity, sep): """Python2.4 doesn't have a partition method so we provide our own that mimics str.partition from later releases. Split the string at the first occurrence of sep, and return a 3-tuple containing the part before the separator, the separator itself, and the part after the separator. If the separator is not found, return a 3-tuple containing the string itself, followed by two empty strings. """ parts = entity.split(sep, 1) if len(parts) == 2: return parts[0], sep, parts[1] else: return entity, '', ''
0172ce09bd0451eb538122bd9566a3e70d0dcc15
14,117
def get_phe_title(db, phename): """ Search bar SciDBnew: res = str(list(phef[phef['description'] == "asthma_diagnosed_by_doctor"]['title'])[0]) """ phef = db.get_phenotype_fields(association_set=str(db.list_association_sets()['name'][0])) if phef.empty: return None else: res = str(list(phef[phef['description'] == phename]['title'])[0]) return res
93ef978e9b79760d0226cc0fd00d827f7b3872ff
14,118
def compatibility_g_m(gen_1, gen_2, name_1, name_2): """Check compatibility.""" print("Checking compatibility of {} with {}.".format(name_1, name_2)) r_code = 0 for seq in gen_1: if seq not in gen_2: print("FAIL\t{} sequence not found in {}.".format(seq, name_2)) r_code = 2 elif gen_1[seq] != gen_2[seq]: print("FAIL\t{} sequence lengths missmatch" "(lengths are {} and {}).".format(seq, gen_1[seq], gen_2[seq])) r_code = 2 print() return r_code
0ce34eb6ebe97bca179425939c95a2a6a19f6e6a
14,119
import sys import subprocess def _GetGitRevision(in_directory): """Returns the git hash tag for the given directory. Args: in_directory: The directory where git is to be run. Returns: The git SHA1 hash string. """ git_exe = 'git.bat' if sys.platform.startswith('win') else 'git' p = subprocess.Popen( [git_exe, 'rev-parse', 'HEAD'], cwd=in_directory, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) (stdout, _) = p.communicate() return stdout.strip()
4ee7abe93c3d63bec84e478725d7ca086599d74b
14,120
import socket def getaddrinfo(host, port=None, family=0, socktype=0, proto=0, flags=0): """return (family, socktype, proto, canonname, sockaddr) >>> socket.getaddrinfo("www.python.org", 80, 0, 0, socket.SOL_TCP) [(2, 1, 6, '', ('82.94.164.162', 80)), (10, 1, 6, '', ('2001:888:2000:d::a2', 80, 0, 0))]""" return socket.getaddrinfo(host, port, family, socktype, proto, flags)
5fde1bc04887e002199e119b24cde08227d14c0b
14,121
def coref_cleaning(t, new_coref_np, current_coref): """Assign the correct coreference tag according to Args: t (token), new_coref_np (Boolean), current_coref (int) Return: new_coref_np (Boolean), current_coref (int) """ if t.coref.startswith('(') and t.coref.endswith(')'): current_coref = t.coref[1:-1] t.change_coref(current_coref) new_coref_np = False # nested coref elif t.coref.startswith('('): current_coref = t.coref[1:] t.change_coref(current_coref) new_coref_np = True if t.full_pos == 'PRELS': new_coref_np = False # if value is - , get last found integer elif new_coref_np and t.coref.startswith('-'): t.change_coref(current_coref) if t.full_pos == 'PRELS': new_coref_np = False # end of nested coref elif new_coref_np == True and t.coref.endswith(')'): current_coref = t.coref[:-1] if t.sim_pos=='V': current_coref = '_' t.change_coref(current_coref) new_coref_np = False # no coref entity elif new_coref_np == False: t.change_coref('_') return (new_coref_np, current_coref)
9e2d1db3316c6ddd2c09050b64838cc92d5dd651
14,123
def get_default_tokenizer(): """Only support split tokenizer """ def _split_tokenizer(x, delimiter=None): if delimiter == "": return list(x) return x.split(delimiter) return _split_tokenizer
91cc23378d230d9cb47fbe0c47cf7a33cbce5265
14,124
import json def load_dict_from_json(filename: str) -> dict: """ Load the given file as a python `dict` Parameters ---------- filename: `str` an absolute path to the json file to be loaded. Returns ------- `dict` The loaded json file as a python dictionary. """ with open(filename, 'r') as file: return json.loads(file.read())
1d55e5dbcf33e7f1d21063be7b722a5ed5bc6bb3
14,125
def collect_not_null_kwargs(**kwargs) -> dict: """ Collect not null key value pair from keyword arguments. .. versionadded:: 1.0.1 """ return { k: v for k, v in kwargs.items() if v is not None }
84e3438b5a4f0a48a4b558cc50b65a21e5883af9
14,127
def get_n_neighborhood_start_stop_indices_3D(volume_shape, point, n): """ Compute the start and stop indices along the 3 dimensions for the n-neighborhood of the point within the 3D volume. Note that this returns an index range where the end is *non-inclusive*! So for a point at x,y,z with 0-neighborhood (only the point itself), you will get x,x+1,y,y+1,z,z+1 as return values. Parameters ---------- volume_shape: 3-tuple of int The shape of the volume (e.g., whole 3D image) point: 1D array of length 3 The x, y, and z coordinates of the query point. Must lie within the volume. This is the point around which the neighborhood will be computed. n: int >= 0 The neighborhood size (in every direction, the neighborhood is always square). For 0, only the index of the point itself will be returned. For 1, the 26 neighbors in distance 1 plus the index of the point itself (so 27 indices) will be returned. If the point is close to the border of the volume, only the valid subset will be returned of course. For n=2 you get (up to) 125 indices. Returns ------- xstart: int The x start index, inclusive. xend: int The x end index, exclusive. ystart: int The y start index, inclusive. yend: int The y end index, exclusive. zstart: int The z start index, inclusive. zend: int The z end index, exclusive. Examples -------- >>> volume = np.zeros((3, 3, 3)) >>> point = [2, 2, 2] >>> xstart, xend, ystart, yend, zstart, zend = st.get_n_neighborhood_start_stop_indices_3D(volume.shape, point, 1) # 1-neighborhood """ vx = volume_shape[0] vy = volume_shape[1] vz = volume_shape[2] # now set valid ones to 1 xstart = max(0, point[0]-n) xend = min(point[0]+1+n, vx) ystart = max(0, point[1]-n) yend = min(point[1]+1+n, vy) zstart= max(0, point[2]-n) zend = min(point[2]+1+n, vz) return xstart, xend, ystart, yend, zstart, zend
6eb1c6f0b4ff537b3d0eb1b647b019a2d61480b7
14,128
from typing import Dict from typing import Any def add_to_dict_with_swapped_keys(old_dict: Dict[Any, Dict], new_dict: Dict[Any, Dict]) -> Dict[Any, Dict]: """ Swap the keys of two nested dictionaries in a new dictionary. {'Key1': {'Key2': 42}} -> {'Key2': {'Key1': 42}} Args: old_dict: a nested dictionary whose keys are to be swapped new_dict: an initiated dictionary, does not have to be empty Returns: the new_dict with the addition of swapped keys of the old_dict """ for key1, nested_dict in old_dict.items(): for key2, value in nested_dict.items(): new_dict.setdefault(key2, {})[key1] = value return new_dict
2b4d0a0d8bd2734d8c0bc7abde1fa659454f1464
14,129
import os def splitpath(full_path): """ Splits a path into all possible pieces (vs. just head/tail). """ head, tail = os.path.split(full_path) result = [tail] while len(head) > 0: [head, tail] = os.path.split(head) result.append(tail) result = [x for x in result if len(x)] return result[::-1]
427a9de5772459504026d235e5e9ff59259a5c80
14,130
import typing import functools def build_simple_validator(func: typing.Callable, *args, **kwargs) -> typing.Callable[[typing.Any], typing.Any]: """Build a ready-to-use simple validator out of a function. Args: - func: callable, the function to be called to validate an input - args: list, args to partially apply to func - kwargs: dict, kwargs to partially apply to func Returns: validator: functools.partial, a partial function based on ``func``, with ``args`` and ``kwargs`` partially applied and run through functools.update_wrapper """ validator = functools.partial(func, *args, **kwargs) return functools.update_wrapper(validator, func)
cc8227cce228579e51aa05ceb25d9020d51eb199
14,131
import torch def quat_conjugate(a: torch.Tensor) -> torch.Tensor: """Computes the conjugate of a quaternion. Args: a: quaternion to compute conjugate of, shape (N, 4) Returns: Conjugated `a`, shape (N, 4) """ shape = a.shape a = a.reshape(-1, 4) return torch.cat((-a[:, :3], a[:, -1:]), dim=-1).view(shape)
530eb2a8d8b9b87de2bfcaeb48729704908131cc
14,132
import struct def discard_blanks(data): """Remove blank spaces from the data file""" pos = 0 for i_char in data: char = struct.unpack('s', data[pos:pos + 1])[0].decode() if char not in ["*", " "]: break else: pos += 1 return data[pos:]
aff090ab188bffd6bca86eb3b861d25a0440dbcc
14,135
def blank_lines(logical_line, blank_lines, indent_level, line_number, previous_logical): """ Separate top-level function and class definitions with two blank lines. Method definitions inside a class are separated by a single blank line. Extra blank lines may be used (sparingly) to separate groups of related functions. Blank lines may be omitted between a bunch of related one-liners (e.g. a set of dummy implementations). Use blank lines in functions, sparingly, to indicate logical sections. """ if line_number == 1: return # Don't expect blank lines before the first line if previous_logical.startswith('@'): return # Don't expect blank lines after function decorator if (logical_line.startswith('def ') or logical_line.startswith('class ') or logical_line.startswith('@')): if indent_level > 0 and blank_lines != 1: return 0, "E301 expected 1 blank line, found %d" % blank_lines if indent_level == 0 and blank_lines != 2: return 0, "E302 expected 2 blank lines, found %d" % blank_lines if blank_lines > 2: return 0, "E303 too many blank lines (%d)" % blank_lines
0f2d89ae662ffd39170e83f8788cf2946f7cd045
14,136
def gem_cov(frags, weight_flag): """ GEM coverage. Args: frags (list of list): [chrom,start,end] for each fragment in a GEM weight_flag (binary): True if weighted by fragment number; False otherwise Returns: gem_all (list of bed entries): bed entries in format [chrom,start,end] """ gem_all = frags[0][0:2] gem_all.append(frags[-1][2]) if weight_flag == True: gem_fin = [gem_all] * len(frags) else: gem_fin = [gem_all] return gem_fin
cd515d0f358992e07d9bf441c486ce088ce7c7a3
14,137
def unnormalise_density(c, rho): """Reverses any desnity normalisation As a general rule this should be done using the units read in from the file, with unit conversion done afterwards""" return c * rho
1ffa7b4f5e08a071d5f8e94489d363a085f31930
14,138
import random def mutate_color(color): """ Mutates one coordinate of a color in a HSV cube. """ color[random.randrange(0, 3)] = random.random() % 1 return color
5fdfef8b7c29be367dee85f195458924bb8421aa
14,141
def container_with_most_water(heights): """ Parameters ---------- heights : list A list of integers representing heights of containers Returns ------- int area of container with most water >>> container_with_most_water([1, 8, 6, 2, 5, 4, 8, 3, 7]) 49 >>> container_with_most_water([1, 1]) 1 """ max_area = 0 left_idx, right_idx = 0, len(heights) - 1 while left_idx < right_idx: left_height, right_height = heights[left_idx], heights[right_idx] min_height = min(left_height, right_height) width = right_idx - left_idx current_area = min_height * width if left_height < right_height: left_idx += 1 elif right_height < left_height: right_idx -= 1 else: left_idx += 1 right_idx -= 1 max_area = max(current_area, max_area) return max_area
d25c602bac1aab4484ca60680bcaa21abbb78dc7
14,143
import datetime as dt import calendar def _parse_date(row): """parse the date of a single row into datetime object.""" day_ix = 4; mon_ix = 5; year_ix = 6; date = dt.date(int(row[year_ix]), list(calendar.month_abbr).index(row[mon_ix]), int(row[day_ix])) del row[day_ix:year_ix+1] row.insert(day_ix, date) return row
51b3c86d98f2c7686b34fbae3dd1306e0f00f817
14,147
def shiny_gold_in(bag: str, rules: dict) -> bool: """Recursively check for shiny gold bags.""" if "shiny gold" in rules[bag].keys(): return True elif not rules[bag]: # bag holds no others return False else: for inner in rules[bag]: if shiny_gold_in(inner, rules): return True return False
1859f499b72a938a58a78af5ca1a78e9f7221731
14,148
def get_position_type(salary_plan): """ Given a salary plan code, map to one of the VIVO position types """ position_dict = { 'CPFI': 'postdoc', 'CTSY': 'courtesy-faculty', 'FA09': 'faculty', 'FA9M': 'clinical-faculty', 'FA10': 'faculty', 'FA12': 'faculty', 'FACM': 'clinical-faculty', 'FAPD': 'postdoc', 'FASU': 'faculty', 'FELL': None, # Fellowship, lump sum payment only 'FWSP': None, # student-assistant 'GA09': None, # graduate-assistant 'GA12': None, # graduate-assistant 'GASU': None, # graduate-assistant 'HOUS': 'housestaff', 'ISCR': None, # Scholarship, lump sum payment only 'OF09': 'temp-faculty', 'OF12': 'temp-faculty', 'OFSU': 'temp-faculty', 'OPSE': None, # OPS 'OPSN': None, # OPS 'STAS': None, # student-assistant 'STBW': None, # student-assistant 'TA09': 'non-academic', 'TA10': 'non-academic', 'TA12': 'non-academic', 'TASU': 'non-academic', 'TU1E': 'non-academic', 'TU2E': 'non-academic', 'TU9E': 'non-academic', 'TUSE': 'non-academic', 'TU1N': None, # TEAMS Hourly 'TU2N': None, # TEAMS Hourly 'TU9N': None, # TEAMS Hourly 'TUSN': None, # TEAMS Hourly 'US1N': None, # USPS 'US2N': None, # USPS 'US9N': None, # USPS 'USSN': None, # USPS 'US2E': 'non-academic', # USPS Exempt } position_type = position_dict.get(salary_plan, None) return position_type
eaaa64bfe35d27476eb9218d6401e0565126392d
14,149
import ast import logging def _generate_trees(filename, with_filenames=False, with_file_content=False): """ Generated trees. :param filename: filename :param with_filenames: boolean :param with_file_content: boolean :return: list of ast object """ trees = [] with open(filename, 'r', encoding='utf-8') as attempt_handler: main_file_content = attempt_handler.read() try: tree = ast.parse(main_file_content) except SyntaxError as e: logging.exception("Exception occurred.") tree = None if with_filenames: if with_file_content: trees.append((filename, main_file_content, tree)) else: trees.append((filename, tree)) else: trees.append(tree) return trees
258ccfabebb4de21d971939dfa3369a4658bace4
14,150
def str2ms(s): """ Convert the time strings from an SRT file to milliseconds. Arguments: s: A time value in the format HH:MM:SS,mmm where H, M, S and m are digits. Returns: The time string converted to an integer value in milliseconds. """ s = s.strip() time, ms = s.split(",") h, m, s = time.split(":") return int(ms) + 1000 * (int(s) + 60 * (int(m) + 60 * int(h)))
552f9ffbd557cc0729c035901dc1a1a1bfae66d8
14,153
def parse(file): """parse out information from data_text and get information about image size, and class names""" (str_height, str_width, str_names) = file.readlines()[0].split("*") (height, width) = (int(str_height), int(str_width)) names = str_names.strip("][").replace("'", "").split(", ") return (height, width, names)
554bc1bf07671aecbe7da2ba2cfbb0bd51aa511c
14,155
import torch def get_device(inputs): """ Get used device of a tensor or list of tensors. """ if isinstance(inputs, torch.Tensor): device = inputs.device elif isinstance(inputs, (tuple, list)): device = inputs[0].device else: raise TypeError(f'Inputs can be a tensor or list of tensors, got {type(inputs)} instead!') return device
eb67cb3a5ae226c4136bc172d37225ba6a64b45f
14,157
def pythagorean_distance_equation(path1, path2): """Pythagorean Distance Equation. Function for counting distance, derived from the Pythagorean theorem. """ # point path dot X1 dotX1 = path1[0] # point path dot X2 dotX2 = path2[0] # point path dot Y1 dotY1 = path1[1] # point path dot Y2 dotY2 = path2[1] # result distance --> revise result_distance = ((((dotX2-dotX1)**2)+((dotY2-dotY1)**2))**0.5) # return result return result_distance
a16f9d2a3f4ba824ebeb020ed2e5b1bb4068cc0e
14,160
def quick_sort(sequence: list) -> list: """Simple implementation of the quick sort algorithm in Python :param sequence: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending """ if len(sequence) < 2: return sequence _sequence = sequence[:] pivot = _sequence.pop() lesser = [] greater = [] for element in _sequence: (greater if element > pivot else lesser).append(element) return quick_sort(lesser) + [pivot] + quick_sort(greater)
73470224b8f149568b84c7b723097fc0c2ef353f
14,161
def _is_gradient_task(task_id, num_tasks): """Returns True if this task should update the weights.""" if num_tasks < 3: return True return 0 <= task_id < 0.6 * num_tasks
11d37b1095e40ef0be840c63a0c184e63630e945
14,162
def score_tup(t): """ Score an ngram tuple returned from a database ngram table. A higher scoring term is more deserving of inclusion in the resulting acrostic :param t: (Term string, initials string, Corpus count, Used count) :return: Fitness score for this term """ term = t[0] inits = t[1] pop = t[2] used = t[3] raw = (len(term)*pop/(10*used*used*used))**(len(inits)) if "#" in term: score = 2*raw else: score = raw return max(score, 1)
eea34dbf2d6a7dec37dc95cde289560a77c72f7e
14,163
import torch def get_optimizer(parameters, lr, weight_decay): """ Initiate Adam optimizer with fixed parameters Args: parameters: filter, parameters to optimize lr: float, initial learning rate weight_decay: float, between 0.0 and 1.0 Return: a torch.optim.Adam optimizer """ return torch.optim.Adam(parameters, lr=lr, weight_decay=weight_decay)
54b4c6a4cd02672ebfc8ff9c850f0d601fc6510f
14,165
def DoesMoleculeContainsPAINSPattern(Mol, PAINSPatternMols): """Check presence of PAINS pattern in the molecule""" MolMatched = False for PatternMol in PAINSPatternMols: if Mol.HasSubstructMatch(PatternMol, useChirality = True): MolMatched = True break return MolMatched
15b8684b69c02508f24b075db654efcb4c903188
14,166
def encrypt_decrypt(): """Decide whether to encrypt or decrypt.""" while True: choice = input("Enter E for encryption or D for decryption:") if choice == "E": return "E" elif choice == "D": return "D" else: print("Wrong input! Please try again.\n")
1361e86939cf333680ba9c640487bf246a8faf2d
14,168
def get_P_Elc_toilet_seat_heater(theta_ave_d, P_Elc_toilet_seat_heater_rtd): """暖房時の消費電力を計算する Parameters ---------- theta_ave_d : float 日付dにおける平均外気温度(地域、季節によらず 11.23), ℃ P_Elc_toilet_seat_heater_rtd : float 便座暖房時の定格消費電力, W Returns ---------- P_Elc_toilet_seat_heater : float 便座暖房時の消費電力, W """ theata_toilet_ave_d = 0.4984 * theta_ave_d + 13.427 # E_Elc_toilet_seat_heater_d = -20.01 * theata_toilet_ave_d \ # + 922.4 * P_Elc_toilet_seat_heater_rtd / 45 E_Elc_toilet_seat_heater_d = -20.1 * theata_toilet_ave_d \ + 922.4 * P_Elc_toilet_seat_heater_rtd / 45 P_Elc_toilet_seat_heater = E_Elc_toilet_seat_heater_d / (24 * 1) return P_Elc_toilet_seat_heater
48ca59bbf2ca88e4e93febb4c83c80564050757b
14,169
def splunk_setup(splunk): """ Override this fixture in conftest.py, if any setup is required before the test session. splunk fixture can provide the details of the splunk instance in dict format. **Possible setups required**: 1. Enable Saved-searches before running the tests 2. Restart Splunk 3. Configure inputs of an Add-on. **Example**:: from splunklib import binding @pytest.fixture(scope="session") def splunk_setup(splunk): splunk_binding = binding.connect(**splunk) splunk_binding.post( f"/servicesNS/nobody/{addon_name}/saved/searches/{savedsearch}/enable" , data='' ) """ return splunk
88255ac6efd493cc6cb8cf6201ca1f9528534451
14,170
import requests def get_gist(url): """ Get gist contents from gist url. Note the gist url display the raw gist instead of usual gist page. Args: url (str) : url containing the raw gist instead of usual gist page. Returns: (str): output gist str. """ r = requests.get(url) assert r.status_code==200, "Failed Page query code {}".format(r.status_code) return r.text
853623c31af246dc07e22ac8a7dd0b515a74a080
14,171
import struct def encode_struct(fmt, value): """Generic method for encoding arbitrary python "struct" values""" return struct.pack(fmt, value)
57285e612f1f2798eceace6de7be0a71fd4520a0
14,172
import re def is_email(addr): """ 判断是否是合法邮箱 :param addr: 邮箱地址 :return: True or False """ re_is_email = re.compile("^[a-z0-9]+([._-]*[a-z0-9])*@([a-z0-9]+[-a-z0-9]*[a-z0-9]+.){1,63}[a-z]+$") if re_is_email.search(addr): return True else: return False
c48d931af7f57420d4b5829da02aa72203275f0f
14,173
def merge_unique(a: list, b: list) -> set: """ merges all of the unique values of each list into a new set >>> merge_unique([1, 2, 3], [3, 4, 5]) {1, 2, 3, 4, 5} """ ret = set() a.extend(b) for element in a: ret.add(element) return ret
6a969c6beaee46e5618d25c5714cec223ebf1686
14,174
def get_wanted_parameter(season_data, names, index_para): """ Take in a list of players' names, a wanted parameter and return :param season_data: a dictionary of players' statistics :param names: a list of players' names :param index_para: index of the wanted parameter depending on the file :return: a_list_of_wanted_para: a list of wanted parameter for each player """ a_list_of_wanted_para = [] for name in names: stat_for_player = season_data[name] para = stat_for_player[index_para] a_list_of_wanted_para.append(para) return a_list_of_wanted_para
0d278abd79f28c922dfcacf4984af7efe14a9070
14,175
import argparse def parse_args(): """Parse args :return: args """ parser = argparse.ArgumentParser( description='Params for pulling aws instances for appetite') parser.add_argument("-n", "--name-query", help="filter on name based on aws tag", dest="name_query") parser.add_argument("-x", "--regex", help="Secondary regex used for instance filtering", dest="regex_filter", default="(.*?)") parser.add_argument("-r", "--region", help="region to query", default="us-west-2") parser.add_argument("-q", "--add-quotes", help="If quotes are added to output", action='store_true', default=False, dest="add_quotes") parser.add_argument("-i", "--just-ips", help="get just the ips", action='store_true', default=False, dest="just_ip") parser.add_argument("-t", "--tag", help="Tag to query", dest="tag", default="Name") return parser.parse_args()
8e5f1a67de324502ba801bfa9971e891c79fcbda
14,176
import requests def get_FREQ( APIKey='AP8DA23', FromDateTime='2021-01-01 00:01:00', ToDateTime='2021-02-01 23:59:00', ServiceType='csv', endpoint='https://api.bmreports.com/BMRS/FREQ/v1' ): """Rolling System Frequency """ params = { 'APIKey': APIKey, 'FromDateTime': FromDateTime, 'ToDateTime': ToDateTime, 'ServiceType': ServiceType, } r = requests.get(endpoint, params=params) return r
9e28b80f2b10c998047f10181cd6612a63392111
14,177
def cxDummy(ind1, ind2): """Dummy crossover that does nothing. This is used when we have a single gene in the chromosomes, such that crossover would not change the population. """ return ind1, ind2
d5a3667cb714663348d5f8f421c167e134eed612
14,178
def comtypes_get_refcount(ptr): """Helper function for testing: return the COM reference count of a comtypes COM object""" ptr.AddRef() return ptr.Release()
d2e7addf5a4bfa497d46597f194bfaef48b129d7
14,179
def convert_cookie_str(_str): """ convert cookie str to dict """ _list = [i.strip() for i in _str.split(';')] cookie_dict = dict() for i in _list: k, v = i.split('=', 1) cookie_dict[k] = v return cookie_dict
de6ada63afb3490e793a62e7de98460c48f3b1c8
14,180
def DL_ignore(answers): """Return False if any dictionary-learning method was selected. Arguments --------- answers: dict Previous questions answers. Returns ------- bool True if DL verbosity question should be ignored. """ expected = ['ITKrMM', 'wKSVD', 'BPFA'] flag = [meth in answers['method'] for meth in expected] return True not in flag
fda853fc0f959dd5302a90bc9d56f012a1d7da8f
14,183
def wait(status, timeout=None, *, poll_rate="DEPRECATED"): """(Blocking) wait for the status object to complete Parameters ---------- status: StatusBase A Status object timeout: Union[Number, None], optional Amount of time in seconds to wait. None disables, such that wait() will only return when either the status completes or if interrupted by the user. poll_rate: "DEPRECATED" DEPRECATED. Has no effect because this does not poll. Raises ------ WaitTimeoutError If the status has not completed within ``timeout`` (starting from when this method was called, not from the beginning of the action). Exception This is ``status.exception()``, raised if the status has finished with an error. This may include ``TimeoutError``, which indicates that the action itself raised ``TimeoutError``, distinct from ``WaitTimeoutError`` above. """ return status.wait(timeout)
3311714c7aee5cbbecf658bfa563826c363752e2
14,186
import re def find_md_links(md): """Returns dict of links in markdown: 'regular': [foo](some.url) 'footnotes': [foo][3] [3]: some.url """ # https://stackoverflow.com/a/30738268/2755116 INLINE_LINK_RE = re.compile(r'\[([^\]]+)\]\(([^)]+)\)') FOOTNOTE_LINK_TEXT_RE = re.compile(r'\[([^\]]+)\]\[(\d+)\]') FOOTNOTE_LINK_URL_RE = re.compile(r'\[(\d+)\]:\s+(\S+)') links = list(INLINE_LINK_RE.findall(md)) footnote_links = dict(FOOTNOTE_LINK_TEXT_RE.findall(md)) footnote_urls = dict(FOOTNOTE_LINK_URL_RE.findall(md)) footnotes_linking = [] for key in footnote_links.keys(): footnotes_linking.append((footnote_links[key], footnote_urls[footnote_links[key]])) return links
d630d22d571e774312516fc4005444087d23392f
14,187
def sex(return_expectations=None): """ Returns M, F or empty string if unknown or other """ return "sex", locals()
4f461d76d96f6c0559d20693b28280436af1a594
14,188
def epoch_time(start_time, end_time): """ Computes the time for each epoch in minutes and seconds. :param start_time: start of the epoch :param end_time: end of the epoch :return: time in minutes and seconds """ elapsed_time = end_time - start_time elapsed_mins = int(elapsed_time / 60) elapsed_secs = int(elapsed_time - (elapsed_mins * 60)) return elapsed_mins, elapsed_secs
8265cb78c26a96a83a1035c09a7dccb0edf8c4d0
14,189
def fibonaccinumber(n): """ >>fibonaccinumber(6) 1 1 2 3 5 8 >>fibonaccinumber(10) 1 1 2 3 5 8 13 21 34 55 """ if n<0: print("Incorrect input") elif n==0: return 0 elif n==1: return 1 else: n1 , n2 =1,1 count = 1 while count < n: print(n1 , end = ' ') nth = n1 + n2 # update values n1 = n2 n2 = nth count += 1 return n1
c69d23d2603360cba6b5d5e600da1aa96dcfec60
14,191
def play(pet) -> bool: """Return False if any of the stats is 0.""" if (not pet.hunger) or (not pet.thirst) or (not pet.energy) or (not pet.fitness) or (not pet.mental_health): print("Oh no! Take better care of", pet.name, "next time!") print("Thank you for playing!") print(" - - - ") print(pet) print(" - - - ") return False return True
482b38868d402d40687ed0c7495a122c50783fe3
14,192
def base_prob(phred_score): """ Returns the probabilty that a base is incorrect, given its Phred score. """ prob = 10.0**(-float(phred_score)/10) return prob
8231019213204e65d577ea86d1e2e0e7352a3f70
14,193
import re def need_format(line: str) -> bool: """ Return true if line as a title declaration followed by content """ return ( ( re.search("\t+e_\w*[-'\w]*\W*=", line) is not None and re.search("\t+e_\w*[-'\w]*\W*=\W*{[\w\t]*\n", line) is None and re.search("\t+e_\w*[-'\w]*\W*=\W*{\W*#", line) is None ) or ( re.search("\t+k_\w*[-'\w]*\W*=", line) is not None and re.search("\t+k_\w*[-'\w]*\W*=\W*{[\w\t]*\n", line) is None and re.search("\t+k_\w*[-'\w]*\W*=\W*{\W*#", line) is None ) or ( re.search("\t+d_\w*[-'\w]*\W*=", line) is not None and re.search("\t+d_\w*[-'\w]*\W*=\W*{[\w\t]*\n", line) is None and re.search("\t+d_\w*[-'\w]*\W*=\W*{\W*#", line) is None ) or ( re.search("\t+c_\w*[-'\w]*\W*=", line) is not None and re.search("\t+c_\w*[-'\w]*\W*=\W*{[\w\t]*\n", line) is None and re.search("\t+c_\w*[-'\w]*\W*=\W*{\W*#", line) is None ) )
61576aa02a745db8b0ce0c9c4c9a4d4589e8d842
14,195
import argparse def parse(): """ Handles the arguments and options. """ parser = argparse.ArgumentParser( formatter_class=argparse.RawTextHelpFormatter) parser.add_argument( "-w", "--what", dest="what", help="REQUIRED - The absolute path to the folder containing the tests" ) parser.add_argument( "-t", "--tags", dest="tags", default=[], help="Collects tags to run specific tests\n" + \ "USAGE: -t=tag1 || -t=tag1,tag2" ) parser.add_argument( "-e", "--extra", dest="extra_assignments", default=False, action="store_true", help="Includes tests for extra assignments" ) parser.add_argument( "--trace", dest="trace_assertion_error", default=False, action="store_true", help="Adds a traceback option for assertion errors" ) parser.add_argument( "--exam", dest="exam", default=False, action="store_true", help="Use when running test for an exam" ) args, _empty = parser.parse_known_args() if args.tags: args.tags = args.tags.split(",") return args
ef1700cc5a3692fd5cd85d84cd24f37f859df7a7
14,196
def builderFor(action, hypervisor=None, template=None, service=None): """ This decorator is used to supply metadata for functions which is then used to find suitable methods during building just by these properties. :param action: Name of the generic step/action (see planner) that this function implements :type action: str :param hypervisor: Name of the hypervisor that this function can be used for :type action: str :param template: A list of template properties this function is suitable for (e.g. OS, certain software) :type action: [str] :param service: Production or type of the service that this function can be used for :type action: str :returns: The deocorated input function with the given attribute values :rtype: function """ def decorate(f): f.action = action f.hypervisor = hypervisor f.template = template f.service = service return f return decorate
1dddfd8dc5dbc87e5a018fc4fb144bef8a8f98e3
14,197
def undeployed_value_only_calculator(repository, undeployed_value_tuple, deployed_value_tuple): """ Just return the undeployed value as the value. This calculator can be used when there will be no deployed value. """ return undeployed_value_tuple[2]
58294274d37b4c7bb9cad21bb01260986cb0d6ae
14,198
import re def _remove_inner_punctuation(string): """ If two strings are separated by & or -, remove the symbol and join the strings. Example ------- >>> _remove_inner_punctuation("This is AT&T here") 'This is ATT here' >>> _remove_inner_punctuation("A T & T") 'A T T' """ string = re.sub('[\&\-]', '', string) return string
4ca3e7f91e35dba2e0a34f1dfd935e004623875e
14,199
def _scrape_main_synonym_section(left_content): """Scrapes the main/default synonym section for a word. If there is no pos listed, use the one listed for the word """ synonym_header = left_content.find('div', {'class' : 'synonyms_list'}) synonym_labels = synonym_header.find_all('p', {'class' : 'function-label'}) synonym_lists = synonym_header.find_all('p', {'class' : None}) if len(synonym_labels) != len(synonym_lists): raise ValueError('There are an uneven number of labels and lists') synonym_list = [] for label, s_list in zip(synonym_labels, synonym_lists): word_list = s_list.find_all('a') word_list_text = [word for word in word_list] pos_synonym_flag = label.getText().lower() synonym_list.append((pos_synonym_flag, word_list_text)) return synonym_list
21063c652e733e6e6f6ca32bdf0d343bdbbb80f4
14,200
import re def generateHashtags(randomTests): """Generates a hashtag from a level 1+ category name using Regular Expressions and other rules. Arguments: randomTests {Dictionary} -- A test category and hashtag (level 1+) categories Returns: Dictionary -- The input dictionary with added hashtag names """ tests = randomTests hashtags = [] for i in range(len(tests["hashtagCategories"])): unmodCat = tests["hashtagCategories"][i] tests["hashtagCategories"][i] = tests["hashtagCategories"][i].replace(re.sub('([^>]+)', '', tests["hashtagCategories"][i]), "") # remove texts in brackets, e.g. (E.12) tests["hashtagCategories"][i] = tests["hashtagCategories"][i].replace(" ", "") # remove spaces tests["hashtagCategories"][i] = tests["hashtagCategories"][i].split("&") # split words by '&' (beacause these can be considered as two separate words) for j in range(len(tests["hashtagCategories"][i])): # since we now have arrays of one or more words tests["hashtagCategories"][i][j] = re.sub(r'[^A-Za-z]', '', tests["hashtagCategories"][i][j]) # remove non alphabetic characters tagAndCat = { "hashtag" : tests["hashtagCategories"][i][j].lower(), "category" : unmodCat } hashtags.append(tagAndCat) tests["hashtagCategories"] = hashtags return tests
96478924f04706b88cb367d5aca334225771788b
14,201
def get_longest_element(focus): """ cycles thru the status_results dictionary in the indicated key or field list and finds the longest piece of text in it :param focus: the key or field in value list that is being searched for longest value :return: the longest value """ return (len(max(focus, key=len)))
d00c02d3d02ebd371f16ef16fd2e7daa0ead47ac
14,202
def get_num_classes(labels): """Get total number of classes. Arguments: labels {list} -- label values. """ num_classes = max(labels) + 1 missing_classes = [] for l in range(num_classes): if l not in labels: missing_classes.append(l) if len(missing_classes): raise ValueError('Missing labels: {missing_classes}'.format(missing_classes=missing_classes)) if num_classes <= 1: raise ValueError('Invalid number of labels: {num_classes}'.format(num_classes=num_classes)) return num_classes
4573bbf81fab0396454167287e86492e0ee7cd48
14,203
def store_edges(edge_data): """Store edges in a dict where the keys are the edge times""" time_edge_map = {} for i in range(len(edge_data)): current_time = edge_data[i,0] if not current_time in time_edge_map: time_edge_map[current_time] = [] time_edge_map[current_time].append(edge_data[i,1:3].tolist()) sorted_times = sorted(time_edge_map.keys()) print('Number of unique epochs: ' + str(len(sorted_times))) return sorted_times, time_edge_map, len(edge_data)
885bb4f4e6480904a09bb7afe7d85053643a3f1a
14,204
def filter_linker_flavour(args): """Remove `-flavor gnu`.""" new_args = [] ignore = False for arg in args: if ignore: ignore = False # ignore this argument else: if arg == '-flavor': ignore = True else: new_args.append(arg) return new_args
f37aac3c7b14f25350c945c5258d851402292330
14,205
import random def random_string(length, charset): """ Return a random string of the given length from the given character set. :param int length: The length of string to return :param str charset: A string of characters to choose from :returns: A random string :rtype: str """ n = len(charset) return ''.join(charset[random.randrange(n)] for _ in range(length))
1370f86a2e696ba6030b719ec8e32631f1865e01
14,206