content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def pop_required_arg(arglist, previousArg) : """ Pop the first element off the list and return it. If the list is empty, raise an exception about a missing argument after the $previousArgument """ if not len(arglist) : raise Exception, "Missing required parameter after %s" % previousArg head = arglist[0] del arglist[0] return head
ed35be7859326979dceb5843a07769e5a022a30b
56,157
def to_nested_tuples(item): """Converts lists and nested lists to tuples and nested tuples. Returned value should be hashable. """ if isinstance(item, list): return tuple([to_nested_tuples(i) for i in item]) else: return item
38bffdfca7e05cc22fdc663cdf75011336a9bf93
56,159
def parse_columns(columns): """ Extract relevant values from columns of the NCBI Assembly Report """ accession = columns[0] refseq_category = columns[4] # taxid = columns[5] # species_taxid = columns[6] # Helps with dog, for example organism_name = columns[7] assembly_level = columns[11] release_type = columns[12] #genome_rep = columns[13] release_date = columns[14].replace('/', '-') assembly_name = columns[15] refseq_accession = columns[17] return [release_type, assembly_level, refseq_category, assembly_name, release_date, organism_name, accession, refseq_accession]
28712c8d2b4d16d2ff412ded2932bd55b720b78c
56,165
def infile_to_yaml(yaml_schema, infile_schema, infile_struct): """Transform elements in a SOG Fortran-ish infile data structure into those of a SOG YAML infile data structure. :arg yaml_schema: SOG YAML infile schema instance :type yaml_schema: :class:`YAML_Infile` instance :arg infile_schema: SOG Fortran-ish infile schema instance :type infile_schema: :class:`SOG_Infile` instance :arg infile_struct: SOG Fortran-ish infile data structure :type infile_struct: nested dicts :returns: SOG YAML infile data structure. :rtype: nested dicts """ def get_element(node, key): return infile_schema.get_value( infile_struct, '{0.infile_key}.{1}'.format(node, key)) def transform(node): result = { 'value': get_element(node, 'value'), 'description': str(get_element(node, 'description')), 'variable name': node.var_name, } units = get_element(node, 'units') if units is not None: result['units'] = str(units) return result def walk_subnodes(node): result = {} if not any(child.children for child in node.children): return transform(node) else: for child in node.children: result.update({child.name: walk_subnodes(child)}) return result result = {} for node in yaml_schema: result.update({node.name: walk_subnodes(node)}) return result
b70ff33efd26ebe3bea4a9d549da211e1e40fedd
56,168
def find_first_of_filetype(content, filterfiltype, attr="name"): """Find the first of the file type.""" filename = "" for _filename in content: if isinstance(_filename, str): if _filename.endswith(f".{filterfiltype}"): filename = _filename break else: if getattr(_filename, attr).endswith(f".{filterfiltype}"): filename = getattr(_filename, attr) break return filename
950a48be91bd2d5bd017a94a788a28501d39e3ae
56,170
def getMObjectByMObjectHandle(handle): """ Retrieves an MObject from the given MObjectHandle. :type handle: om.MObjectHandle :rtype: om.MObject """ return handle.object()
71deab5a83723a67700ff4bea750d5705c0f7bef
56,171
import re def match_rule(target, rule): """ Match rule to a target. Parameters ---------- target : dict Dictionary containing [(key, value), ...]. Keys must be str, values must be str or None. rule : dict Dictionary containing [(key, match), ...], to be matched to *target*. Match can be str specifying a regexp that must match target[key], or None. None matches either None or a missing key in *target*. If match is not None, and the key is missing in *target*, the rule does not match. Returns ------- matched : bool Whether the rule matched. The rule matches if all keys match. """ for key, value in rule.items(): if value is None: if key in target and target[key] is not None: return False elif key not in target or target[key] is None: return False else: w = str(target[key]) m = re.match(str(value), w) if m is None or m.end() != len(w): return False # rule matched return True
4ed15a631a637eb98015ae01e6e34e4b02e7aa91
56,178
def get_novelty_smi(gen_smis, ref_smis, return_novelty=False,): """ Get novelty generated SMILES which are not exist in training dataset para gen_smis: generated SMILES, in list format para ref_smis: training SMILES, in list format para return_novelty: if return novelty MOLs, in canonical SMILES format, default False """ nov_smis = [i for i in gen_smis if i not in ref_smis] if return_novelty: return nov_smis else: return len(nov_smis)/len(gen_smis)
3953ddeb8968d5562edaccf7a95aeb62866c3176
56,184
def GetManufacturerInfo(e): """Organize the manufacturer information of an EDID. Args: e: The edid.Edid object. Returns: A dictionary of manufacturer information. """ return { 'Manufacturer ID': e.manufacturer_id, 'ID Product Code': e.product_code, 'Serial number': e.serial_number, 'Week of manufacture': e.manufacturing_week, 'Year of manufacture': e.manufacturing_year, 'Model year': e.model_year }
e2217e74db5aed0afe51650a799d8b993236bb8c
56,187
def get_people_count(group): """ Return the number of people in a group. """ # Separate people from the group. people = group.split("\n") if "" in people: people.remove("") # Return people count. return len(people)
f95f740da8118ec80b6e34f5c01cd6edac8e6c92
56,191
def gen_list_gt(lst, no): """Returns list with numbers greater than no.""" #syntax: [ item for item in lst if_condition ] return [ item for item in lst if item > no ]
2f4ffff19639f224c2e571f7af6e7abe9ce57ee2
56,192
def remove_vat(price, vat): """ Returns price exluding vat, and vat amount """ price_ex_vat = price / ((vat / 100) + 1) vat_paid = price - price_ex_vat return price_ex_vat, vat_paid
44612fdd3e169c3b76ca0bfa055709d53e15d1b6
56,194
def _ParseSelector(selector): """This function parses the selector flag.""" if not selector: return None, None selectors = selector.split(',') selector_map = {} for s in selectors: items = s.split('=') if len(items) != 2: return None, '--selector should have the format key1=value1,key2=value2' selector_map[items[0]] = items[1] return selector_map, None
e5a199fcfe500936b70d94a32cc2e001050f75c7
56,195
import tempfile def write_temp_deploy(source_prototxt, batch_size): """ Modifies an existing prototxt by adding force_backward=True and setting the batch size to a specific value. A modified prototxt file is written as a temporary file. Inputs: - source_prototxt: Path to a deploy.prototxt that will be modified - batch_size: Desired batch size for the network Returns: - path to the temporary file containing the modified prototxt """ _, target = tempfile.mkstemp() with open(source_prototxt, 'r') as f: lines = f.readlines() force_back_str = 'force_backward: true\n' if force_back_str not in lines: lines.insert(1, force_back_str) found_batch_size_line = False with open(target, 'w') as f: for line in lines: if line.startswith('input_dim:') and not found_batch_size_line: found_batch_size_line = True line = 'input_dim: %d\n' % batch_size f.write(line) return target
2ff20107b026346d3cae9afb08dbb853c0562e55
56,197
def onlyNormalHours(data, normal_hours): """ This function takes in the data, and only spits out the data that falls under normal trading hours. Parameters ---------- data : DataFrame dataframe of the stock data. normal_hours : list list containing the opening hour and closing hour of the market in datetime. Returns ------- data : DataFrame the dataframe passed in, but without after hours data. """ discard_pile = [] for r in range(data.shape[0]): #rewriting the normal hours so it is current with the day of the current row in the dataframe normal_hours[0] = normal_hours[0].replace(year=data['Date'][r].year, month=data['Date'][r].month, day = data['Date'][r].day) normal_hours[1] = normal_hours[1].replace(year=data['Date'][r].year, month=data['Date'][r].month, day = data['Date'][r].day) #comparing the current row to see if it's within normal trading hours - if so then keep, otherwise add the index to a discard pile if not(normal_hours[0] <= data['Date'][r] <= normal_hours[1]): discard_pile.append(r) #now that the non trading hours rows have been recorded, drop them all data = data.drop(discard_pile) #reindex the dataframe so the indeces are sequential again data = data.reset_index(drop=True) return data
29c7489ab2f80a661e1be5f708b61c4ea99267b4
56,199
def dequote(token): """ Return a token value stripped from quotes based on its token label. """ quote_style_by_token_label = { 'LITERAL-STRING-DOUBLE':'"', 'LITERAL-STRING-SINGLE': "'", } qs = quote_style_by_token_label.get(token.label) s = token.value if qs and s.startswith(qs) and s.endswith(qs): return s[1:-1] return s
f214e27988fd3bfc516f8c4c67bf389c008498df
56,204
def delanguageTag(obj): """ Function to take a language-tagged list of dicts and return an untagged string. :param obj: list of language-tagged dict :type obj: list :returns: string """ if not isinstance(obj, list): return(obj) data = (obj if len(obj) else [{}])[-1] return data['@value'] if data.get('@value', '') else data.get('@id', '')
0bec893fe3fe02061147ff6fa4e8ed8878bd7378
56,207
def search_sorted(array, value): """ Searches the given sorted array for the given value using a binary search which should execute in O(log N). Parameters ---------- array : `numpy.ndarray` a 1D sorted numerical array value : float the numerical value to search for Returns ------- index : int - if `value` is between `array[0]` and `array[-1]`, then `index` is the integer index of `array` closest to `value` - if `value<array[0]` or `value > array[-1]`, then `index` is None """ def index_to_check(rmin, rmax): return (rmin + rmax) // 2 range_min = 0 range_max_0 = len(array) range_max = range_max_0 numloops = 0 while numloops < 100: numloops += 1 if (range_max - range_min) == 1: if (range_max == range_max_0) or (range_min == 0): raise LookupError(("For some reason, range_max-" +\ "range_min reached 1 before the element was found. The " +\ "element being searched for was {0!s}. (min,max)=" +\ "({1!s},{2!s})").format(value, range_min, range_max)) else: high_index = range_max else: high_index = index_to_check(range_min, range_max) high_val = array[high_index] low_val = array[high_index - 1] if value < low_val: range_max = high_index elif value > high_val: range_min = high_index else: # low_val <= value <= high_val if (2 * (high_val - value)) < (high_val - low_val): return high_index else: return high_index - 1 raise NotImplementedError("Something went wrong! I got " +\ "caught a pseudo-infinite loop!")
6167ce64f6e1df1172a435cb9bc87150c917a8b4
56,209
def drop_duplicates(movies_df): """ Drop duplicate rows based on `imdb_id`. If `imdb_id` is not in the columns, create it from `imdb_link`. Additionally, drop any movies with an `imdb_id` of 0. Parameters ---------- movies_df : Pandas dataframe movie data Returns ------- Pandas dataframe Data with duplicate rows dropped """ # Extract `imdb_id` into a new column if not already present if 'imdb_id' not in movies_df.columns and 'imdb_link' in movies_df.columns: movies_df['imdb_id'] = movies_df['imdb_link'].str.extract(r'(tt\d{7})') # Drop movies with an `imdb_id` of 0 movies_df = movies_df.query('imdb_id != "0"').copy() # Drop duplicate rows movies_df.drop_duplicates(subset=['imdb_id'], inplace=True) return movies_df
f498819bbc8ca5a652444220584ff2ed594f99e4
56,210
async def read_file(file) -> bytes: """Reads a given file.""" await file.seek(0) return await file.read()
5ed5b770e5f8a79fd72472a639600bbf31a6a25d
56,211
def seir_model(prev_soln, dx, population, virus): """ Solve the SEIR ODE :param prev_soln: previous ODE solution :param dx: timestep :param population: total population :param virus: Virus object :return: solution to ODE delta values """ s, e, i, r = prev_soln ds = - virus.beta * s * i / population de = virus.beta * s * i / population - virus.sigma * e di = virus.sigma * e - virus.gamma * i dr = virus.gamma * i return ds, de, di, dr
e21ff1ed04f2a6118e191a3fc89c2414f2508951
56,213
def ensure3d(arr): """Turns 2 arrays into 3d arrays with a len(1) 3rd dimension. Allows functions to be written for both grayscale and color images.""" if len(arr.shape) == 3: return arr elif len(arr.shape) == 2: return arr.reshape((arr.shape[0], arr.shape[1], 1))
ad070841bf401f8d3766441978956fdbf8acfe0a
56,215
def compare_log_to_resp(log, resp): """ Search the log list for the responses in the response list Search through the log list for the lines in the response list. The response list may contain substrings found in the log list lines. The response list lines must be found in the log list in the order they are specified in the response list (the log list may have extra lines which are ignored). Returns None if all the strings in the response list were found in the log list. Otherwise, returns the first missing response line. """ response_line_no = 0 response_line = resp[response_line_no].rstrip() for log_line in log: log_line = log_line.rstrip() if response_line in log_line: # Found a match, step to the next non-blank line in the response # list while True: response_line_no += 1 if response_line_no >= len(resp): # We ran through all of our respones lines, success! return None else: response_line = resp[response_line_no].rstrip() if len(response_line) > 0: break #print("Log missing '{0:s}'".format(response_line)) return response_line
c03ba316cd9e31b3a590574d9aee3afe12a56f74
56,220
def validation_file_name(test_name): """Std name for a result snapshot.""" return test_name + '.snapshot~'
ab69f781ff9cb218b0400bb869ef61e4678d7c8d
56,225
def slug_provider(max_length): """ Provides the tested slug data :param max_length: maximum number of characters in the slug :return: list of tuples where the first tuple element is string value and the second tuple element is whether such a value is valid """ return [ ("", False), ("a", True), ("a" * max_length, True), ("a" * (max_length+1), False), ("ivanov", True), ("IVANOV", True), ("Ivanov", True), ("iVaNoV", True), ("ivanov123", True), ("ivanov-ivan", True), ("ivanov_ivan", True), ("ivanov.ivan", False), ("ivanov,ivan", False), ("(ivanov-ivan)", False), ("иванов-иван", False), ]
99c31d7f14377de4067aa1e180266ad1499b22f4
56,229
def lr_scheduler(epoch, init_lr=0.001, lr_decay_epoch=7): """ Returns the current learning rate given the epoch. This decays the learning rate by a factor of 0.1 every lr_decay_epoch epochs. """ return init_lr * (0.1**(epoch // lr_decay_epoch))
aa51f2b1bc8cd1c85cb0221717d3c97fa73fab48
56,231
def GetNextNodeID(nodes, dist): """ return node ID with the minimum distance label from a set of nodes """ # empty nodes if not nodes: raise Exception('Empty Scan Eligible List!!') nodeID = nodes[0] min_ = dist[nodeID] for i in nodes: if dist[i] < min_: min_ = dist[i] nodeID = i return nodeID
46a2153a5bb86db12715825462a8351156c3a5b9
56,235
import re def find_email(text): """ Extract email from text. Parameters ---------- text: str Text selected to apply transformation Examples: --------- ```python sentence="My gmail is abc99@gmail.com" find_email(sentence) >>> 'abc99@gmail.com' ``` """ line = re.findall(r'[\w\.-]+@[\w\.-]+', str(text)) return ",".join(line)
c1ebb0d851576aebc277fa48f7e3d2569a695d31
56,236
def import_module(callback): """ Handle "magic" Flask extension imports: ``flask.ext.foo`` is really ``flask_foo`` or ``flaskext.foo``. """ def wrapper(inference_state, import_names, module_context, *args, **kwargs): if len(import_names) == 3 and import_names[:2] == ('flask', 'ext'): # New style. ipath = (u'flask_' + import_names[2]), value_set = callback(inference_state, ipath, None, *args, **kwargs) if value_set: return value_set value_set = callback(inference_state, (u'flaskext',), None, *args, **kwargs) return callback( inference_state, (u'flaskext', import_names[2]), next(iter(value_set)), *args, **kwargs ) return callback(inference_state, import_names, module_context, *args, **kwargs) return wrapper
e2250476df234f6353385c4f37f743cf4e76edde
56,238
def about_view(request): """Display a page about the team.""" return {'message': 'Info about us.'}
2d6e5b98ac21aa2770a4aab7e4f9dce2b86e2cc1
56,239
def find_nth(str1, mystr, n): """ Finds a pattern in an input string and returns the starting index. """ start = str1.find(mystr) while start >= 0 and n > 1: start = str1.find(mystr, start+len(mystr)) n -=1 return start
79eb499dfa2e923624302acfb19dc9f4af46768a
56,240
def order(x, count=0): """Returns the base 10 order of magnitude of a number""" if x / 10 >= 1: count += order(x / 10, count) + 1 return count
c3eaf92d217ef86f9f2ccc55717cdbedf098613e
56,243
import torch def get_batch_r(node_arr, device="cpu"): """Returns batch of one user's ratings node_arr (2D float array): one line is [userID, vID1, vID2, rating] device (str): device used (cpu/gpu) Returns: (float tensor): batch of ratings """ return torch.FloatTensor(node_arr[:, 3], device=device)
84d0a3f1bb5d3893c87141aa0b9addd99bef245c
56,245
import re def to_camel_case(snake_case): """Turn `snake_case_name` or `slug-name` into `CamelCaseName`.""" name = "" for word in re.split(r"[-_]", snake_case): name += word.title() return name
b6ccb241052929eb0e5d7843d438bc467563c1da
56,249
def escape_shell(arg): """Escape a string to be a shell argument.""" result = [] for c in arg: if c in "$`\"\\": c = "\\" + c result.append(c) return "\"" + "".join(result) + "\""
4d74ba8e00ed6eea599852be9908f20cbbc036f5
56,252
from typing import Any import fractions def fractional(value: Any) -> Any: """Returns a human readable fractional number. The return can be in the form of fractions and mixed fractions. There will be some cases where one might not want to show ugly decimal places for floats and decimals. Pass in a string, or a number or a float, and this function returns a string representation of a fraction or whole number or a mixed fraction Examples: fractional(0.3) will return '1/3' fractional(1.3) will return '1 3/10' fractional(float(1/3)) will return '1/3' fractional(1) will return '1' This will always return a string. Args: value: a number. Returns: Any: human readable number. """ try: number = float(value) except (TypeError, ValueError): return value whole_number = int(number) frac = fractions.Fraction(number - whole_number).limit_denominator(1000) numerator = frac.numerator denominator = frac.denominator if whole_number and not numerator and denominator == 1: # this means that an integer was passed in # or variants of that integer like 1.0000 return "%.0f" % whole_number if not whole_number: return "%.0f/%.0f" % (numerator, denominator) return "%.0f %.0f/%.0f" % (whole_number, numerator, denominator)
ad0e28ef519af41cfd06279e1e30efe5eba44b29
56,257
import gzip import json def read_json(filename): """ Read a JSON file. Parameters ---------- filename : str Filename. Must be of type .json or .json.gz. """ if filename.endswith('json.gz'): with gzip.open(filename) as f: tree = json.load(f) elif filename.endswith('.json'): with open(filename) as f: tree = json.load(f) else: raise ValueError('Filename must be of type .json or .json.gz.') return tree
d785a8191d961204a1e0e066e0a1e38c6b7c66de
56,263
def message(msg_content, title=None, content_type=None, extras=None): """Inner-conn push message payload creation. :param msg_content: Required, string :param title: Optional, string :keyword content_type: Optional, MIME type of the body :keyword extras: Optional, dictionary of string values. """ payload = { 'msg_content': msg_content, } if title is not None: payload['title'] = title if content_type is not None: payload['content_type'] = content_type if extras is not None: payload['extras'] = extras return payload
74c092486f571bf7ede66fc091e333375ffc754f
56,264
import time def exec_remote_cmd(client, cmd, t_sleep=5, verbose=False): """A wrapper function around paramiko.client.exec_command that helps with error handling and returns only once the command has been completed. Parameters ---------- client : paramiko ssh client the client to use cmd : str the command to execute t_sleep : float, optional the amount of time to wait between querying if a job is complete verbose : str, optional print information about the command """ if verbose: print("Remotely executing '{0}'".format(cmd)) stdin, stdout, stderr = client.exec_command(cmd) while not stdout.channel.exit_status_ready(): if verbose: print('Command not complete, checking again in {0} seconds.'.format( t_sleep)) time.sleep(t_sleep) if stdout.channel.recv_exit_status() != 0: raise IOError('Error with command {0}: {1}'.format( cmd, " ".join(stderr))) return stdin, stdout, stderr
e6a79b58925252284c5427895d41ce3eed77f58f
56,271
def get_nodes_by_lease(db, lease_id): """Get node ids of hosts in a lease.""" sql = '''\ SELECT ch.hypervisor_hostname AS node_id FROM blazar.leases AS l JOIN blazar.reservations AS r ON l.id = r.lease_id JOIN blazar.computehost_allocations AS ca ON ca.reservation_id = r.id JOIN blazar.computehosts AS ch ON ca.compute_host_id = ch.id WHERE l.id = %s ''' return db.query(sql, (lease_id,), limit=None)
ce074b4d8ffb32ae4c03b913e77319e5161df11d
56,272
def clamp(x, lower=0, upper=1): """Clamp a value to the given range (by default, 0 to 1).""" return max(lower, min(upper, x))
29ccb97e5fd65b1f859516cde9157affa307f7c1
56,273
def insert_leaf(csv_map, name, value, max_depth, current_depth): """ Insert a leaf to its proper csv column, creating it if it does not exist. Note that this function will not add a new column if the associated value is None, because there's a chance that it may be revealed to be a recursive column in the future. :param csv_map: :param name: :param value: :param max_depth: :param current_depth: :return: True if the given value was added, False otherwise. """ if name not in csv_map: if value is None: # If this is the first time encountering this name, but its value is None, there is no way to know if it # is truly a leaf, so abort. return False csv_map[name] = [None for _ in range(current_depth)] for _ in range(max(max_depth.value - current_depth, 1)): csv_map[name].append(value) else: csv_map[name].append(value) return True
36a307c2d2dda20dc308860a5bceeb44fd154abd
56,274
def power_of_k(k, power=1): """ Compute an integer power of a user-specified number k. """ # initialize x = 1 # multiply x by k power times. for i in range(power): x *= k return x
79519dad002e36d25725f34bb087018fec2b6a64
56,275
def calc_assignment_average(assignment_list): """ Function that will take in a list of assignments and return a list of average grades for each assignment """ return [sum(grades)/len(grades) for grades in assignment_list]
3bd061e778d46e4d6cb06672f23e944cffec7725
56,276
def selection_sort(items): """ Sort items with a selection sort. """ for index, item in enumerate(items): current_smallest = index for search_index in range(index + 1, len(items)): if items[search_index] < items[current_smallest]: current_smallest = search_index temp = items[index] items[index] = items[current_smallest] items[current_smallest] = temp return items
66325c5e577961dc4f40c9ade7eb4764bdcc7898
56,277
def any_none(els): """Return True if any of the elements is None.""" return any(el is None for el in els)
beea382b11f4f862b9982380504406f24f94b8ef
56,279
def load_fasta(path, seperator=None, verbose=False): """Loads a fasta file. Returns a dictionary with the scaffold names as the key and the sequence as the value.""" if verbose: print("Starting to load fasta!") try: with open(path, "r") as f: scaffolds = {} current_scaffold = "" for line in f: line = line.strip() if len(line) == 0: continue if line[0] == ">": line = line[1:] if seperator is not None: line = line.split(seperator) line = line[0] scaffolds[line] = [] current_scaffold = line #if verbose: print(f"scaffold {line} loading") else: scaffolds[current_scaffold].append(line) f.close() for scaffold in scaffolds.keys(): scaffolds[scaffold] = "".join(scaffolds[scaffold]) if verbose: print("Loading fasta completed") return scaffolds except: raise Exception(f"Error with opening the fasta file \"{path}\"")
d644ddbb5dbdd02b5c39f45f35c1bb1a4e551449
56,283
def module_checkpoint(mod, prefix, period=1, save_optimizer_states=False): """Callback to checkpoint Module to prefix every epoch. Parameters ---------- mod : subclass of BaseModule The module to checkpoint. prefix : str The file prefix for this checkpoint. period : int How many epochs to wait before checkpointing. Defaults to 1. save_optimizer_states : bool Indicates whether or not to save optimizer states for continued training. Returns ------- callback : function The callback function that can be passed as iter_end_callback to fit. """ period = int(max(1, period)) # pylint: disable=unused-argument def _callback(iter_no, sym=None, arg=None, aux=None): """The checkpoint function.""" if (iter_no + 1) % period == 0: mod.save_checkpoint(prefix, iter_no + 1, save_optimizer_states) return _callback
007ab00fe2b0ce3b6acca76596ff5b68cd7cb8d6
56,285
def get_project_id_from_req(request): """ Returns the project id from the request. """ id = request.args.get("id", "").strip().lower() return id
e9b571be55abd2b60d7d0fb20f09a02ab2203fac
56,288
def replace_tmt_labels(dfp, dfm): """Replace default tmt labels with sample names provided by metadata file Parameters: ----------- dfp : pandas dataframe protein/phosphoprotein dataset dfm : pandas dataframe metadata table that should contain atleast 2 columns named 'tmt_label' and 'sample' Returns: -------- dfp : pandas dataframe protein/phosphoprotein dataset with tmt labels replaced by sample name """ rdict = {t: s for t, s in zip(dfm['tmt_label'].tolist(), dfm['sample'].tolist())} dfp = dfp.rename(columns=rdict).copy() return dfp
ce82648cde1a6fc1f3d3128463dd5d623f6c4c2a
56,293
def get_tag2idx(df): """ Returns tags maps from a given dataframe df Outputs: tag2idx: map from tag to idx idx2tag: map from idx to tag """ tag_values = list(df["tag"].unique()) + ['PAD'] tag2idx = {tag:idx for idx, tag in enumerate(tag_values)} idx2tag = {idx:tag for tag, idx in tag2idx.items()} return tag2idx, idx2tag
4c0d15bf567f47c82779798a0a69bb3966e65e65
56,294
def filterTests(module, testFilter, testNames): """ Filters out all test names that do not belong to the given module or where one name component equals the testFilter string. """ filteredNames = [] for testName in testNames: nameComponents = testName.split('.') testIsFromModule = module in nameComponents hasTestFilter = True if testFilter: hasTestFilter = testFilter in nameComponents if testIsFromModule and hasTestFilter: filteredNames.append(testName) return filteredNames
7d360bf49f3b96593a63c9977568305eccd58724
56,295
def get_batch_size(tensor, base_size): """Get the batch size of a tensor if it is a discrete or continuous tensor. Parameters ---------- tensor: torch.tensor. Tensor to identify batch size base_size: Tuple. Base size of tensor. Returns ------- batch_size: int or None """ size = tensor.shape if len(base_size) == 0: # Discrete return tuple(size) else: return tuple(size[: -len(base_size)])
cef7cf5487260f26abc0dac343d2a38690006330
56,306
def copy_word_pairs(word_pairs): """ Return a copy of a list of WordPairs. The returned copy can be modified without affecting the original. """ new_list = [] for pair in word_pairs: pair_copy = pair.create_copy() new_list.append(pair_copy) return new_list
e05a3a616fef6ccd94f7e3b51e7e1f77dc7fc4b9
56,308
def filter_prediction(predictions, treshhold): """ Removes prediction where scores < threshold :param predictions: ``List[Dict[Tensor]]`` The fields of the ``Dict`` are as follows: - boxes (``FloatTensor[N, 4]``) - labels (``Int64Tensor[N]``) - scores (``Tensor[N]``) * optional - masks (``UInt8Tensor[N, 1, H, W]``) :param treshhold: ``float``, threshold :return: the same predictions without scores < threshold """ samples = [] for i, prediction in enumerate(predictions): tresh = len([x for x in predictions[i]['scores'] if x >= treshhold]) sample = { 'boxes': prediction['boxes'][:tresh].cpu(), 'labels': prediction['labels'][:tresh].cpu(), 'scores': prediction['scores'][:tresh].cpu(), } if 'masks' in prediction: sample['masks'] = prediction['masks'][:tresh].cpu() samples.append(sample) return samples
6cec413989b05a229a9caaa479e251adc133ebc9
56,311
def time_to_index(t, sampling_rate): """Convert a time to an index""" return int(round(t * sampling_rate))
62103761e178101d283c0f0b0021b6740e57e83c
56,315
def tag_from_topic(mqtt_topic): """Extract the RuuviTag's name from the MQTT topic.""" return mqtt_topic.split("/")[2]
6c70e6904703af0c88b58e0ed033a545ce48e2d1
56,317
def _verb_is_valid(verb_str): """Return True if verb_str is a valid French verb; else False.""" return (len(verb_str) >= 3) and (verb_str[-2:] in ["er", "ir", "re"])
44df5cb95279de8b3096211e51a1022b256f839e
56,318
def _provider_uuids_from_iterable(objs): """Return the set of resource_provider.uuid from an iterable. :param objs: Iterable of any object with a resource_provider attribute (e.g. an AllocationRequest.resource_requests or an AllocationCandidates.provider_summaries). """ return set(obj.resource_provider.uuid for obj in objs)
5f4fbac4be65ba982cd0e8e7176b098d7a7acd22
56,325
def _remove_duplicates(list_of_elements): """ Remove duplicates from a list but maintain the order :param list list_of_elements: list to be deduped :returns: list containing a distinct list of elements :rtype: list """ seen = set() return [x for x in list_of_elements if not (x in seen or seen.add(x))]
871190be711883b53d46f2a8c6ea2f57c3ec695c
56,333
def remove_duplicate_field(event, field_to_keep, field_to_discard): """ Remove a field when both fields are present :param event: A dictionary :param field_to_keep: The field to keep if both keys are present and the value is not None :param field_to_discard: The field to discard if the field_to_keep is not None :return: An event with a single bundle ID Examples: .. code-block:: python # Example #1 event = {'A_Field': 'another_value', 'a_field': 'a_value '} event = remove_duplicate_field(event, field_to_keep='a_field', field_to_discard='A_Field') event = {'a_field': 'a_value '} """ if field_to_keep in event and field_to_discard in event: if event[field_to_keep] is not None: del event[field_to_discard] else: del event[field_to_keep] return event
ab200bdaa76546e92f6a489021ae35e1f268b3ef
56,335
def pic_path(instance, filename): """Generates the path where user profile picture will be saved.""" return "images/users/%s.%s" % (instance.user, filename.split('.')[-1])
f018833551d34fd56d4860e0611c3d1f1837f07e
56,336
def rowvectorize(X): """Convert a 1D numpy array to a 2D row vector of size (1,N)""" return X.reshape((1, X. size)) if X.ndim == 1 else X
3183e8aa0590427b2e51921d464f7bc767330a5d
56,341
def normalize_date(date_string): """ Converts string to DD-MM-YYYY form :param date_string: string to be converted :return: normalized string """ if date_string in ["", "-"]: return "" normalized_date = date_string.split("/") return "-".join(normalized_date)
953c476ec5d7b964788455955b8660cd6e36ad23
56,342
def _makeUserRequestContext(user): """ Constructs a class to be used by settings.USERREQUESTCONTEXT_FINDER bound to a user, for testing purposes. """ class UserRequestContextTestFinder(object): def get_current_user_collections(self): return user.user_collection.all() def get_current_user_active_collection(self): colls = self.get_current_user_collections() if colls: return colls.get(usercollections__is_default=True) return UserRequestContextTestFinder
77821e41ad2264a107d123fea14bc4ed8165480b
56,347
def reduce_unique(items): """ Reduce given list to a list of unique values and respecting original order base on first value occurences. Arguments: items (list): List of element to reduce. Returns: list: List of unique values. """ used = set() return [x for x in items if x not in used and (used.add(x) or True)]
21c01b8024d6bbaa3c44fd9a13507fbffa6ba122
56,349
import zipfile import json def get_pack_name(zip_fp: str) -> str: """returns the pack name from the zipped contribution file's metadata.json file""" with zipfile.ZipFile(zip_fp) as zipped_contrib: with zipped_contrib.open('metadata.json') as metadata_file: metadata = json.loads(metadata_file.read()) return metadata.get('name', 'ServerSidePackValidationDefaultName')
fe7f2a4d24db425ff71c8aeb27c2c7b650e5a446
56,351
def train_test_split(df, test_ratio): """ Splitting the given dataset to train and test by preserving the order between the samples (it is time series data). Parameters ---------- df: pandas' DataFrame. The dataset test_ratio: float. Indicate the ratio of the test after the split """ train_ratio = 1 - test_ratio train_last_idx = int(len(df) * train_ratio) train_df = df.iloc[train_last_idx:, :].reset_index(drop=True) test_df = df.iloc[:train_last_idx, :].reset_index(drop=True) return train_df, test_df
eeb7f3cfcabbe7ca7baf827728226a51abc813cd
56,353
def check_csv(csv_name): """ Checks if a CSV has three columns: response_date, user_id, nps_rating Args: csv_name (str): The name of the CSV file. Returns: Boolean: True if the CSV is valid, False otherwise """ # Open csv_name as f using with open with open(csv_name, 'r') as f: first_line = f.readline() # Return true if the CSV has the three specified columns if first_line == "response_date,user_id,nps_rating\n": return True # Otherwise, return false else: return False
553b8ab08e7183b4358ff80ed4351b8e2b98f026
56,359
def _flow_tuple_reversed(f_tuple): """Reversed tuple for flow (dst, src, dport, sport, proto)""" return (f_tuple[1], f_tuple[0], f_tuple[3], f_tuple[2], f_tuple[4])
643af8917c0c800d1710d7167fd6caaa248b4767
56,360
def add_whitespace(bounding_box: list, border: int = 5) -> list: """ Add white space to an existing bounding box. Parameters ---------- bounding_box : list Four corner coordinates of the cropped image without whitespace. border : int The amount of whitespace you want to add on all sides. Returns ------- Bounding box with increased whitespace. """ assert len(bounding_box) == 4, "Bounding box can only have 4 corners" larger_box = [] for i, corner in enumerate(bounding_box): if i < 2: larger_box.append(corner - border) else: larger_box.append(corner + border) return larger_box
54d94b9b858e8ebf41275c7ab0a91c8520b06b68
56,362
def get_relevant_dates(year): """ Gets the relevant dates for the given year """ if year == 2003: return [ "20030102", "20030203", "20030303", "20030401", "20030502", "20030603", "20030701", "20030801", "20030902", "20031001", "20031103", "20031201", ] # you can add additional elif-clauses here if needed. f. ex: # elif year == 2004: # return .... else: raise ValueError("A list of relevant dates does not exist for the given year")
023d7d0b01b01ce59bd6d04cb98df3e13187deed
56,366
def num_transfer(df): """Total number of track transfer.""" return df.extra.Type.isin(['TRANSFER']).sum()
b63861cf2b0299e7d01d946c0d5064ca02cf4792
56,367
import fnmatch def csv_matches(list_of_files): """Return matches for csv files""" matches = fnmatch.filter(list_of_files, "*.csv") return matches
ee81a533731b78f77c4b4472c2e19c643d04525f
56,370
import warnings def ignore_warnings(test_func): """Decorator to ignore specific warnings during unittesting""" """ Returns: do_test: The input function 'test_func' in decorated form to approrpriately handle resource warnings """ def do_test(self, *args, **kwargs): with warnings.catch_warnings(): """File I/O is known to raise ResourceWarning in unittest so ignore it""" warnings.simplefilter("ignore", ResourceWarning) test_func(self, *args, **kwargs) return do_test
9fbe878bcb8079eb7784d4eaaa2a8062508e6a66
56,375
import pickle def viw_pkl(path, start=0, end=10): """view dict in pickle file from start to end Args: path(str): absolute path start(int, optional): start index of dict. Defaults to 0. end(int, optional): end index of dict. Defaults to 10. Returns: result(dict): a small dict Examples: >>> d = Dict() >>> d.viw_pkl("/home/directory/test.pkl", 0, 10) {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6, 'g': 7, 'h': 8, 'i': 9, 'j': 10} """ n_pkl = [] with open(path, "rb") as f: dict_object = pickle.load(f) result = dict(list(dict_object.items())[start: end]) return result
f8f9c8eb58e76447f5c9fb093ca1b838b8e733b1
56,382
def calculate_temperature_equivalent(temperatures): """ Calculates the temperature equivalent from a series of average daily temperatures according to the formula: 0.6 * tempDay0 + 0.3 * tempDay-1 + 0.1 * tempDay-2 Parameters ---------- series : Pandas Series Returns ------- Pandas Series """ ret = 0.6*temperatures + 0.3*temperatures.shift(1) + 0.1*temperatures.shift(2) ret.name = 'temp_equivalent' return ret
1f6f4e4062b7ce20d1579532250329d49cfc026a
56,384
def arrayFormat(arrays, format, indices, prependIndex=False): """ Makes a list of formated strings, where each string contains formated values of all vars for one of ids, that is: ['vars[0][ids[0]] vars[1][ids[0]] ... ', 'vars[0][ids[1]] ...', ... ] Arguments: - arrays: list of arrays - format: format string - idices: list of indices - prependIndex: if True, the index is prepended to each line. Note that in this case format has to contain an entry for index. """ out = [] for ind in indices: # make a tuple of values for this id if prependIndex: row = [ind] else: row = [] for ar in arrays: row.append( ar[ind] ) # format the current list out.append( format % tuple(row) ) return out
a349f1e7cbdeb8865dc9ee62362b984f2f16fff5
56,386
def _create_combiners(table_to_config_dict, table_to_features_dict): """Create a per feature list of combiners, ordered by table.""" combiners = [] for table in table_to_config_dict: combiner = table_to_config_dict[table].combiner or 'sum' combiners.extend([combiner] * len(table_to_features_dict[table])) return combiners
c8b6f11cccb3450068ec1054cc5978d193439e10
56,393
import torch def metropolis_accept( current_energies, proposed_energies, proposal_delta_log_prob ): """Metropolis criterion. Parameters ---------- current_energies : torch.Tensor Dimensionless energy of the current state x. proposed_energies : torch.Tensor Dimensionless energy of the proposed state x'. proposal_delta_log_prob : Union[torch.Tensor, float] The difference in log probabilities between the forward and backward proposal. This corresponds to log g(x'|x) - log g(x|x'), where g is the proposal distribution. Returns ------- accept : torch.Tensor A boolean tensor that is True for accepted proposals and False otherwise """ # log p(x') - log p(x) - (log g(x'|x) - log g(x|x')) log_prob = -(proposed_energies - current_energies) - proposal_delta_log_prob log_acceptance_ratio = torch.min( torch.zeros_like(proposed_energies), log_prob, ) log_random = torch.rand_like(log_acceptance_ratio).log() accept = log_acceptance_ratio >= log_random return accept
ed968091752bbeb3624f3480237b3a37b95e0f66
56,394
import sqlite3 def robot_type_exists(cursor: sqlite3.Cursor, type: str) -> bool: """Determines whether or not the robot type exists in the db :param cursor: [description] :type cursor: sqlite3.Cursor :param type: [description] :type type: str :return: [description] :rtype: bool """ type_exists: int = cursor.execute( '''SELECT COUNT(*) FROM robot_type WHERE id = :id''', {"id": type.upper()} ).fetchone().get("COUNT(*)") return True if type_exists == 1 else False
4a7646c286ad92683e819263bed8410bff069736
56,395
import math def siteXYZ(sites): """ computes geocentric site coordinates from parallax constants. Parameters ---------- sites : dictionary Typically the value returned from read5c. Keys are obscodes and values must have attributes blank, rcos, rsin, and long. Returns ------- dictionary Keys are the keys of the sites parameter, values are 3-tuples floats, the geocentric coordinates, where blank = False and the 3-tuple (None, None, None) where blank = True. """ ObservatoryXYZ = {} for code, s in sites.items(): if not s.blank: longitude = s.long * math.pi / 180 x = s.rcos * math.cos(longitude) y = s.rcos * math.sin(longitude) z = s.rsin ObservatoryXYZ[code] = (x, y, z) else: ObservatoryXYZ[code] = (None, None, None) return ObservatoryXYZ
3cea4e4ee44d0efb1b72d5a4a8f02fc05f845f1d
56,397
def isgrashof(l1,l2,l3,l4): """ Determine if a four-bar linkage is Grashof class. """ links = [l1,l2,l3,l4] S = min(links) # Shortest link L = max(links) # Largest link idxL = links.index(L) idxS = links.index(S) P, Q = [links[idx] for idx in range(len(links)) if not(idx in (idxL,idxS))] #other links if (S + L) <= (P + Q): # Checks Grashof condition return True else: return False
abbc703c7e0fa8736c0dc371e7e2b53570b994ef
56,401
def build_cmd(seq, out_file, query_id, args): """ Builds a command to run blast.py on the command line. :param seq: A fasta sequence to BLAST :param out_file: The name of the file to store the results in :param query_id: The id of the query :param args: A dictionary of arguments need for the BLAST and EC search from online databases. :return: The command to run blast.py on the command line. """ cmd = ["py", "blast.py", \ "--fasta_sequence", seq, \ "--program" , args['--program'], \ "--email", args["--email"], \ "--out_file", out_file, \ "--id", str(query_id), \ "--min_pct_idnt", str(args["--min_pct_idnt"]), \ "--min_qry_cvr", str(args["--min_qry_cvr"]), \ "--max_blast_hits", str(args["--max_blast_hits"]), \ "--max_uniprot_hits", str(args["--max_uniprot_hits"])] return cmd
fb5f21174652428b435ae78fafe596be7fbd0d28
56,405
def is_not_extreme_outlier(x, _min, _max): """Returns true if x >=min and x<=max.""" return x >= _min and x <= _max
22514eedba0eeb44a3f25f2ba255ebce075c9ab6
56,412
def _constant_time_compare(first, second): """Return True if both string or binary inputs are equal, otherwise False. This function should take a constant amount of time regardless of how many characters in the strings match. This function uses an approach designed to prevent timing analysis by avoiding content-based short circuiting behaviour, making it appropriate for cryptography. """ first = str(first) second = str(second) if len(first) != len(second): return False result = 0 for x, y in zip(first, second): result |= ord(x) ^ ord(y) return result == 0
abbbed038c3040e6251a410f8de3177598a1ee10
56,415
import binascii def encode_hex(_bytes): """Encode binary string to hex string format.""" return binascii.b2a_hex(_bytes)
8b2f09bdc2f5a7fb9526836fa8a4e22293cb430b
56,416
import requests def edges(word: str, lang: str = "th"): """ Get edges from `ConceptNet <http://api.conceptnet.io/>`_ API. ConceptNet is a public semantic network, designed to help computers understand the meanings of words that people use. For example, the term "ConceptNet" is a "knowledge graph", and "knowledge graph" has "common sense knowledge" which is a part of "artificial inteligence". Also, "ConcepNet" is used for "natural language understanding" which is a part of "artificial intelligence". | "ConceptNet" --is a--> "knowledge graph" --has--> "common sense"\ --a part of--> "artificial intelligence" | "ConceptNet" --used for--> "natural language understanding"\ --a part of--> "artificial intelligence" With this illustration, it shows relationships (represented as *Edge*) between the terms (represented as *Node*) :param str word: word to be sent to ConceptNet API :param str lang: abbreviation of language (i.e. *th* for Thai, *en* for English, or *ja* for Japan). By default, it is *th* (Thai). :return: return edges of the given word according to the ConceptNet network. :rtype: list[dict] :Example: :: from pythainlp.corpus.conceptnet import edges edges('hello', lang='en') # output: # [{ # '@id': '/a/[/r/IsA/,/c/en/hello/,/c/en/greeting/]', # '@type': 'Edge', # 'dataset': '/d/conceptnet/4/en', # 'end': {'@id': '/c/en/greeting', # '@type': 'Node', # 'label': 'greeting', # 'language': 'en', # 'term': '/c/en/greeting'}, # 'license': 'cc:by/4.0', # 'rel': {'@id': '/r/IsA', '@type': 'Relation', 'label': 'IsA'}, # 'sources': [ # { # '@id': '/and/[/s/activity/omcs/vote/,/s/contributor/omcs/bmsacr/]', # '@type': 'Source', # 'activity': '/s/activity/omcs/vote', # 'contributor': '/s/contributor/omcs/bmsacr' # }, # { # '@id': '/and/[/s/activity/omcs/vote/,/s/contributor/omcs/test/]', # '@type': 'Source', # 'activity': '/s/activity/omcs/vote', # 'contributor': '/s/contributor/omcs/test'} # ], # 'start': {'@id': '/c/en/hello', # '@type': 'Node', # 'label': 'Hello', # 'language': 'en', # 'term': '/c/en/hello'}, # 'surfaceText': '[[Hello]] is a kind of [[greeting]]', # 'weight': 3.4641016151377544 # }, ...] edges('สวัสดี', lang='th') # output: # [{ # '@id': '/a/[/r/RelatedTo/,/c/th/สวัสดี/n/,/c/en/prosperity/]', # '@type': 'Edge', # 'dataset': '/d/wiktionary/en', # 'end': {'@id': '/c/en/prosperity', # '@type': 'Node', # 'label': 'prosperity', # 'language': 'en', # 'term': '/c/en/prosperity'}, # 'license': 'cc:by-sa/4.0', # 'rel': { # '@id': '/r/RelatedTo', '@type': 'Relation', # 'label': 'RelatedTo'}, # 'sources': [{ # '@id': '/and/[/s/process/wikiparsec/2/,/s/resource/wiktionary/en/]', # '@type': 'Source', # 'contributor': '/s/resource/wiktionary/en', # 'process': '/s/process/wikiparsec/2'}], # 'start': {'@id': '/c/th/สวัสดี/n', # '@type': 'Node', # 'label': 'สวัสดี', # 'language': 'th', # 'sense_label': 'n', # 'term': '/c/th/สวัสดี'}, # 'surfaceText': None, # 'weight': 1.0 # }, ...] """ obj = requests.get(f"http://api.conceptnet.io/c/{lang}/{word}").json() return obj["edges"]
1cc7041a1805aa05e8318097cf88bad93962a6b0
56,420
def is_const(op): """Is const op.""" return op.type in ["Const", "ConstV2"]
ea5fcfa38b8d077d4a50b3b7f01d4298e04e97cb
56,421
from pathlib import Path from typing import MutableMapping from typing import Any import toml def toml_loader(filepath: Path) -> MutableMapping[str, Any]: """Open and load a dict from a TOML file.""" with filepath.open("r") as f: return toml.load(f)
b5a3f58434fa9c38d57fa6dc362b0f031226b38f
56,422
import json def make_json_entry(data): """Return JSON entry to be added to indiacovid19.json.""" j = [ data.ref_date, data.active, data.cured, data.death, data.ref_date + ' ' + data.ref_time, 'https://indiacovid19.github.io/webarchive/mohfw/{}_{}/' .format(data.ref_date, data.ref_time.replace(':', '')), '' ] return ' ' + json.dumps(j)
07affb5ad2d0bb77676411f3d0c1321bfbaafaa7
56,427
def get_node_index(node): """Returns the index (position) of a given node.""" if node.parent is None: return 0 return node.parent.children.index(node)
3d453ecf22d3c6a0c7acac6735e8c0ec0dd8abcd
56,429
def vector_l2norm(v): """ Function that computes the L2 norm (magnitude) of a vector. Returns: float: magnitude value """ return (v['x']*v['x'] + v['y']*v['y']) ** 0.5
e3abe58d03c9cfa5b39d791fa9a1da248d354ea8
56,430
import unicodedata def normalize_unicode(string: str) -> str: """ Normalizes a string to replace all decomposed unicode characters with their single character equivalents. :param string: Original string. :returns: Normalized string. """ return unicodedata.normalize("NFC", string)
d7d8e77beeb524a3217b167f2e5fc71290164da8
56,433
def distinct(l): """ Given an iterable will return a list of all distinct values. """ return list(set(l))
02bc70f426af96dd99e7ae690754c8e61328efbe
56,434
def replace(params): """Replace in a string. This function works the same exact way a string replace operation would work with the ``.replace()`` method. **Syntax:** [replace]string[/replace] **Attributes** ``what`` Substring to replace. (**Required**.) ``with`` The replacement for every occurance of ``what``. (**Required**) """ params['string'] = params['string'].replace( params['var']['what'], params['var']['with'] ) return params
5522b1c8ed00b58ed0657395427322287f52f68d
56,437
def as_sequence(iterable): """Helper function to convert iterable arguments into sequences.""" if isinstance(iterable, (list, tuple)): return iterable else: return list(iterable)
f04701a540efdee0bb262efdc65f91876fd3f441
56,448
def _contains_attribute_definition(line_str: str) -> bool: """Returns whether or not a line contains a an dataclass field definition. Arguments: line_str {str} -- the line content Returns: bool -- True if there is an attribute definition in the line. """ parts = line_str.split("#", maxsplit=1) before_comment = parts[0].strip() before_first_equal = before_comment.split("=", maxsplit=1)[0] parts = before_first_equal.split(":") if len(parts) != 2: # For now, I don't think it's possible to have a type annotation contain : return False attr_name = parts[0] attr_type = parts[1] return not attr_name.isspace() and not attr_type.isspace()
a0f253c06931f1515f100bafee2f31912762e509
56,449
import re def clean_data(text): """ Accepts a single text document and performs several regex substitutions in order to clean the document. Currently not used. May potentially be used in the future to clean training data and tweets Parameters ---------- text: string or object Returns ------- text: string or object """ special_chars_regex = '[:?,\>$|!\'"]' white_spaces_regex = '[ ]{2,}' text = re.sub('[^a-zA-Z ]', "", text) text = re.sub(special_chars_regex, " ", text) text = re.sub(white_spaces_regex, " ", text) text = text.replace('\n', '%20') text = re.sub(r'http\S+', '', text) return text.lower()
38a4a188f55db46283c1944162fa432f20a834e4
56,451
def uniq( lst ): """Build a list with uniques consecutive elements in the argument. >>> uniq([1,1,2,2,2,3]) [1,2,3] >>> uniq([0,1,1,2,3,3,3]) [0,1,2,3] """ assert( len(lst) > 0 ) uniq = [ lst[0] ] for i in range(1,len(lst)): if lst[i] != lst[i-1]: uniq.append(lst[i]) return uniq
cce38a2c93ef84b655c418b4b4d7a659d97ad32c
56,452