content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def get_cdo_remap_cmd(method): """Define available interpolation methods in CDO.""" # REMAPDIS - IDW using the 4 nearest neighbors d = {'nearest_neighbors': 'remapnn', 'idw': 'remapdis', 'bilinear': 'remapbil', 'bicubic': 'remapbic', 'conservative': 'remapycon', 'conservative_SCRIP': 'remapcon', 'conservative2': 'remapycon2', 'largest_area_fraction': 'remaplaf'} return d[method]
17adfd670f4ccedffb9b9c1ac7c28d704d890da0
68,066
def ReadDevInstallPackageFile(filename): """Parse the dev-install package file. Args: filename (str): The full path to the dev-install package list. Returns: list[str]: The packages in the package list file. """ with open(filename) as f: return [line.strip() for line in f]
0d3b7dd8932112fc2001d0034f02689915627cfc
68,067
def get_excludes(excludes): """ Prepare rsync excludes as arguments :param excludes: :return: """ _excludes = '' for exclude in excludes: _excludes += f'--exclude {exclude} ' return _excludes
02f2373abc1e3a9de0d0b55214f36d721ccc53ea
68,071
def get_termfrequency_vector(word2wid, wordlist): """ Calculate the term frequency vector. Parameters ---------- word2wid : dict Map a word to a unique ID (0, ..., |vocabulary|) wordlist : list of str Returns ------- termfrequency vector : list of ints List has the same length as vocabulary """ document_tf_vector = [0 for term in range(len(word2wid))] for w in wordlist: if w not in word2wid: if '<OOV>' in word2wid: document_tf_vector[word2wid['<OOV>']] += 1 continue document_tf_vector[word2wid[w]] += 1 return document_tf_vector
f2831b800c2e47b5d28aad682f57e95abe9e475c
68,078
def check_indicators_all_false(indicators): """ Input: indicators: (num_regions,) list Return: True if all indicators are False, False if any indicator is True """ for indicator in indicators: if indicator: return False return True
4a190b001f140eb7a0f07f9e19fabfe62a57032b
68,084
def read_bader(in_name): """ Read charges from a Bader program output file. Parameters ---------- in_name : str Name of the file to read Returns ------- charges : list of floats Charges in file """ with open(in_name) as bader_file: content_bader = bader_file.readlines() # electron charge per atom charges = [] for line in content_bader: if line.split()[0].isdigit(): charge = float(line.split()[4]) charges.append(charge) return charges
fa58e14760c1de9c76937122131dc9c62b03a1ce
68,086
from typing import List from typing import Dict def _sort_list_according_to_dataset( input_list: List[str], key_dict: Dict[str, int] ) -> List[str]: """ Sorts a list of radionuclides based on their order of appearence in the decay dataset. Parameters ---------- input_list : list List of radionuclide strings to be sorted. key_dict : dict Dictionary from the decay dataset with radionuclide strings as keys and their position (integers) in the decay dataset. Returns ------- list Sorted radionuclide list. Examples -------- >>> rd.inventory._sort_list_according_to_dataset(['Tc-99', 'Tc-99m'], rd.DEFAULTDATA.radionuclide_dict) ['Tc-99m', 'Tc-99'] """ return sorted(input_list, key=lambda radionuclide: key_dict[radionuclide])
3929455c7bc99ed7fe7e84157331d28079837423
68,087
async def transform_future(f, awaitable): """Apply a function to the result of an awaitable, return a future which delivers the result. """ return f(await awaitable)
e602b622d8a46c4529df9088cf0a665ce8ecef90
68,088
def _normalise_config(config: dict) -> dict: """ Removes special characters from config keys. """ normalised_config = {} for k in config: normalised_config[ k.replace("--", "").replace("<", "").replace(">", "") ] = config[k] return normalised_config
f677198a684f569e6d69c06545d7db32e85f93d1
68,094
def get_formid(spray_operator, spray_date, spray_operator_code=None): """ Returns a string with 'DAY.MONTH.SPRAY_OPERATOR_CODE' from a spray_operator and spray_date. """ return "%s.%s" % ( spray_date.strftime("%d.%m"), spray_operator.code if spray_operator else spray_operator_code, )
c2aeda25f3ebb0415cfc16f3fd992e8bb9397e94
68,095
def get_img_dev_width_mapping(column_width): """ returns a img_dev_width mapping for a given column_dev_width, e.g. - col-md-12 -->: 1 - col-lg-6 -->: 1/2 """ wmap = { '1': '1/6', '2': '1/5', '3': '1/4', '4': '1/3', '5': '1/2', '6': '1/2', '7': '2/3', '8': '2/3', '9': '3/4', '10': '1', '11': '1', '12': '1', } k = column_width.rsplit('-', 1)[1] return wmap.get(k)
233a927b42095b4f57ad365ca84e901682a8d53a
68,096
def split_items(item_string): """ Splits a string of - separated items into its component parts. >>> split_items('true_values-0.1-0.2') ['true_values', 0.1, 0.2] >>> split_items('a-b-c') ['a', 'b', 'c'] >>> split_items('true_value-8') ['true_value', 8] >>> split_items('elbow_joint-shoulder_joint-wrist_joint') ['elbow_joint', 'shoulder_joint', 'wrist_joint'] >>> split_items('fred') ['fred'] >>> split_items('None') [None] >>> split_items('alpha-0.1_gamma-0.9') ['alpha', '0.1_gamma', 0.9] """ parts = item_string.split('-') items = [] # now clean up the types for v in parts: if v.isnumeric(): items.append(int(v)) elif v == 'None': items.append(None) else: try: items.append(float(v)) except: items.append(v) return items
9cd6d56f8b23722b286ae6738eea5e5229f1460e
68,097
import torch from typing import Tuple def build_param_list( input_dim: int, output_dim: int, device: torch.device, hidden_units: tuple = (64, 64), requires_grad: bool = True ) -> Tuple[list, list]: """ Build parameter list of the neural network Parameters ---------- input_dim: int dimension of the input of the neural network output_dim: int dimension of the output of the neural network device: torch.device cpu or cuda hidden_units: tuple hidden units of the neural network requires_grad: bool whether the parameters need to be trained Returns ------- weights: a list of torch.Tensor weights of each layer of the neural network biases: a list of torch.Tensor biases of each layer of the neural network """ weights = [] biases = [] units = input_dim for next_units in hidden_units: weights.append(torch.zeros(next_units, units, requires_grad=requires_grad).to(device)) biases.append(torch.zeros(next_units, requires_grad=requires_grad).to(device)) units = next_units weights.append(torch.zeros(output_dim, units, requires_grad=requires_grad).to(device)) biases.append(torch.zeros(output_dim, requires_grad=requires_grad).to(device)) return weights, biases
872bd2a5095709245141168be02fc4f03f6674d8
68,098
def _indent_lines(s, indentation): """Indent lines in s with the given indentation.""" lines = [] for line in s.strip().split('\n'): stripped = line.strip() if stripped: lines.append(' ' * indentation + stripped) else: lines.append(stripped) return '\n'.join(lines)
63dc92bb43d42364b9427c9b4b5e21251ea2d8b7
68,104
def _tokenize_table(table): """Tokenize fields and values in table.""" return [(field.split(), value.split()) for field, value in table]
a79b88e9d3420183ac2f9cf6028f70eed53bf895
68,105
import audioop def convert_stereo_to_mono(fragment, width): """ Convert stereo fragment to mono. Parameters ---------- fragment : bytes object Specifies the original fragment. width : int Specifies the fragment's original sampwidth. Returns ------- bytes Converted audio in mono type. """ new_fragment = audioop.tomono(fragment, width, 0.5, 0.5) return new_fragment
8e9fbc1836a3377a91501bebd390bbbcfb257d41
68,108
def get_identity_api_version(authUrl): """ Returns the version of OpenStack's identity API based on the given authUrl. The version is returned as a string. :param authUrl: :return: """ for i in reversed(authUrl.split('/')): if i.startswith('v'): return i[1:] else: raise ValueError('Could not extract API version from auth URL')
3ce45030064b52eb41684484fd3a2baceb295cb9
68,109
def DataFrame_to_JsonString(pandas_data_frame): """Converts a pandas.DataFrame to a Json string in the form {column_name:value,...}. Inverse of JsonString_to_DataFrame.""" json=pandas_data_frame.to_json(orient='records') return json
a5218549811a1444789ae814c1b53b8881856b0b
68,110
def get_terms_for_artist(conn,artistid): """ Returns the list of terms for a given artist ID """ q = "SELECT term FROM artist_term WHERE artist_id='"+artistid+"'" res = conn.execute(q) return map(lambda x: x[0],res.fetchall())
b0b3b09fabe90fd17373a38c6745f982fabe0729
68,114
def model_norm(model, pow=(2, 1), vidx=-1): """norm of a model (l2 squared by default) Args: model (float tensor): scoring model pow (float, float): (internal power, external power) vidx (int): video index if only one is computed (-1 for all) Returns: (float scalar tensor): norm of the model """ q, p = pow if vidx != -1: # if we use only one video return abs(model[vidx]) ** (q * p) return (model ** q).abs().sum() ** p
dbdb01b3e2fc6ee8aa6e1725a6544a733173b986
68,121
def mk_parser(header, null_values=('',)): """ Return a function that will parse a record according to the given header. header: Sequence<(str, function<T>(str)->T)> Indexable collection of (name, func) pairs where the function parses a string into a value of the desired type. null_values: Collection<str> Set of unparsed values to replace with `None` instead of parsing. """ hdr_len = len(header) def parse_record(record): return [(parse(text) if text not in null_values else None) for ((_, parse), text) in zip(header, record)] return parse_record
37f4187032e866b43011af4f2a8cb1efc67757a7
68,124
def cut(obj: str, sec: int=40) -> list: """cut the string into list Args: obj (str): text need to be cut sec (int, optional): words count. Defaults to 40. Returns: list: list of words """ obj.strip() str_list = [obj[i:i+sec] for i in range(0,len(obj),sec)] print(str_list) return str_list
a7aca1724009d9907d016f7d6152e6c9ab1be438
68,126
def merge_two_lists(t1, t2): """ Merge two tuples of lists Args: t1: the tuple of the first lists t2: the tuple of the second lists Returns: a new tuple of the merged lists """ a1, b1 = t1 a2, b2 = t2 return a1 + a2, b1 + b2
8ff0fa91203a254bcca0fa60bb5b760175c4815b
68,131
def copy(from_key, to_key): """ Copies the value from one key to another in the data dict. """ def callable_(key, data, errors, context): data[key[:-1] + (to_key,)] = data.get(key[:-1] + (from_key,)) return callable_
0a7a0596d362eef9b712a5a22c6d6c769a97c68e
68,132
def make_palindrome(n: int, base: int = 10, odd_length: bool = False) -> int: """Forms a palindrome in the given base, using a positive integer seed. Args: n: A positive integer value. base: The base in which the resulting number will be a palindrome. Must be at least 2. odd_length: If ``True``, the resulting palindrome will contain ``2 * count_digits(n, base) - 1`` digits. If ``False``, the resulting palindrome will contain ``2 * count_digits(n, base)`` digits. Returns: An integer containing an odd (if ``odd_length`` is ``True``) or even (if ``odd_length`` is ``False``) number of digits which read the same forward and backward in the given base. References: Adapted from https://projecteuler.net/overview=036. """ # set beginning of palindrome to be the digits of n palindrome = n # remove final digit of n if palindrome should be odd in length if odd_length: n //= base # append each digit of n to palindrome in reverse order while n != 0: n, digit = divmod(n, base) palindrome = (palindrome * base) + digit return palindrome
e6b21ea40ea226621017c2a71afddd0d4377a522
68,134
def _union_bounds(a, b): """ Union two (minx, miny, maxx, maxy) tuples of bounds, returning a tuple which covers both inputs. """ if a is None: return b elif b is None: return a else: aminx, aminy, amaxx, amaxy = a bminx, bminy, bmaxx, bmaxy = b return (min(aminx, bminx), min(aminy, bminy), max(amaxx, bmaxx), max(amaxy, bmaxy))
3c20d4480dbe54138797d6ff8e4ec24f7a64ace8
68,135
import torch import math def uniform_unit_scaling(tensor: torch.Tensor, nonlinearity: str = "linear"): """ An initaliser which preserves output variance for approximately gaussian distributed inputs. This boils down to initialising layers using a uniform distribution in the range ``(-sqrt(3/dim[0]) * scale, sqrt(3 / dim[0]) * scale)``, where ``dim[0]`` is equal to the input dimension of the parameter and the ``scale`` is a constant scaling factor which depends on the non-linearity used. See `Random Walk Initialisation for Training Very Deep Feedforward Networks <https://www.semanticscholar.org/paper/Random-Walk-Initialization-for-Training-Very-Deep-Sussillo-Abbott/be9728a0728b6acf7a485225b1e41592176eda0b>`_ for more information. Parameters ---------- tensor : ``torch.Tensor``, required. The tensor to initialise. nonlinearity : ``str``, optional (default = "linear") The non-linearity which is performed after the projection that this tensor is involved in. This must be the name of a function contained in the ``torch.nn.functional`` package. Returns ------- The initialised tensor. """ size = 1. # Estimate the input size. This won't work perfectly, # but it covers almost all use cases where this initialiser # would be expected to be useful, i.e in large linear and # convolutional layers, as the last dimension will almost # always be the output size. for dimension in list(tensor.size())[:-1]: size *= dimension activation_scaling = torch.nn.init.calculate_gain(nonlinearity, tensor) max_value = math.sqrt(3 / size) * activation_scaling return tensor.uniform_(-max_value, max_value)
9b115264367f2188c1590a4344db57d04adde5df
68,136
import attr def model(maybe_cls=None, name=None, ns=None, ns_map=None, order=None, **kwargs): """ The decorator maps a class to an XML element. It uses a class name as a default element name. The default name can be altered using the decorator `name` argument. The `ns` argument defines a namespace of the element. By default class fields are serialized in in-class definition order. The order can be altered using `order` attribute. Internally the decorator adds a `dunder <https://wiki.python.org/moin/DunderAlias>`_\ attribute. :param maybe_cls: decorated class if it is used as `@model` or `None` if it is used as `@model()` :param str name: model name. If `None` class name will be used :param str ns: element namespace. If `None` empty namespace will be used or if the model is a nested one - namespace is inherited from the containing model :param dict ns_map: mapping from a namespace prefix to a full name. It is applied to the current model and it's elements and all nested models :param tuple order: class fields serialization order. If `None` in-class definition order is used :param kwargs: arguments that will be passed to :py:func:`attr.ib` """ def decorator(cls): attrs_kwargs = {'kw_only': True, **kwargs} cls = attr.attrs(cls, **attrs_kwargs) if order: for element_name in order: if not hasattr(getattr(cls, '__attrs_attrs__'), element_name): raise AssertionError("order element '{}' not declared in model".format(element_name)) cls.__paxb_attrs__ = (name, ns, ns_map, order) return cls # maybe_cls's type depends on the usage of the decorator. It's a class # if it's used as `@model` but ``None`` if used as `@model()`. if maybe_cls is None: return decorator else: return decorator(maybe_cls)
3fc9e315e9ba14c26d3d3dfd116b6f154f408f2d
68,137
def confirm(message, default=None): """ Ask the user a question, then wait for a yes or no answer. Returns the boolean result. """ result = input(message) if not result and default is not None: return default while len(result) < 1 or result.lower() not in 'yn': result = input('Please answer yes or no: ') return result[0].lower() == 'y'
1cd3375ec2142c2ea1b3cc3ab09e5ab8b7ef5f72
68,138
def str_to_bytes(s): """ Converts a given string into an integer representing bytes where G is gigabytes, M is megabytes, K is kilobytes, and B is bytes. """ if type(s) is int: return s units = {'B': 1, 'K': 1024, 'M': 1024 ** 2, 'G': 1024 ** 3} if len(s) < 2: raise ValueError('invalid size') order = s[-1] try: return units[order] * int(s[:-1]) except ValueError: raise ValueError('invalid size') except KeyError: raise ValueError('invalid units')
c36d5491698cfc3d2cc32854c209b20f84247b7f
68,141
def add(*items): """Adds one or more vectors.""" if len(items) == 0: return 0 n = len(items[0]) for v in items: if n!=len(v): raise RuntimeError('Vector dimensions not equal') return [sum([v[i] for v in items]) for i in range(n)]
837321ac712cd5a7f3c52c96e0512d0153749001
68,144
def split(sorted_data_points, attr_index, split_value): """Splits a list of data points sorted by a given element into two lists with one list containing tuples <= split_value and one list containing tuples > split_value. :param sorted_data_points: List of data points sorted by their values of the attribute specified by attr_index. :param attr_index: Index of the attribute of the tuple used to specify order of tuples. :param split_value: Value of tuple attribute where list of data points is split. :return: List containing two lists of data points as specified above.""" for index, value in enumerate(sorted_data_points): if value[attr_index] > split_value: return [sorted_data_points[:index], sorted_data_points[index:]] return [sorted_data_points, []]
b6e682959c99dc7b68cfa38db562b65ada60d295
68,145
from bs4 import BeautifulSoup def get_forms(content: str): """ This function gets all the forms from a page source html. @param content: The page content. @type content: str @return: List of all the forms. @rtype: list """ forms = list() for form in BeautifulSoup(content, "html.parser").find_all("form"): try: # Get the form action (requested URL). action = form.attrs.get("action", "").lower() # Get the form method (POST, GET, DELETE, etc). # If not specified, GET is the default in HTML. method = form.attrs.get("method", "get").lower() # Get all form inputs. inputs = [] for input_tag in form.find_all("input"): input_dict = dict() # Get type of input form control. input_type = input_tag.attrs.get("type") # Get name attribute. input_name = input_tag.attrs.get("name") # Get the default value of that input tag. input_dict["value"] = input_tag.attrs.get("value", "") # Add all the attributes to the input dictionary. if input_type: input_dict["type"] = input_type if input_name: input_dict["name"] = input_name # Add the input dictionary object to the list of inputs. inputs.append(input_dict) # Adding the form to the list. forms.append({"action": action, "method": method, "inputs": inputs, "form": form}) except: continue return forms
b6ef97d0ca6d580df3e8d16dcf539e32e5e348e2
68,153
def result_to_tricks(result: int, level: int): """ Convert a result to tricks made, e.g. +1 in a 3 level contract becomes 10 """ return 6 + level + result
172a0a98b30b8cfa47a28c6c80bf93c89081ab29
68,154
def _compress_str(s, spaces_to_drop): """Remove `spaces_to_drop` spaces from `s`, alternating between left and right""" assert s.count(" ") >= spaces_to_drop from_left = True l = 0 r = len(s) drop = set() remaining_spaces = spaces_to_drop while remaining_spaces > 0: if from_left: l = s.find(" ", l) drop.add(l) l += 1 # since `s.find` is inclusive, but we need exclusive else: r = s.rfind(" ", 0, r) drop.add(r) from_left = not from_left remaining_spaces -= 1 assert len(drop) == spaces_to_drop return ''.join([l for (i, l) in enumerate(s) if i not in drop])
b7cf1e1e55c319dffe6579ac49a2db18431e9dfb
68,156
def GetProp(bStep, name, default): """Returns a build step property or |default| if the property is not set.""" try: return bStep.build.getProperty(name) except KeyError: return default
f08044cb1c9c4c97531c15f8f274b4c0f235a944
68,159
import tempfile def create_staging_directory(parent_dir=None): """Creates a temporary staging directory at the specified location. If no `parent_dir` is specified, the platform-specific "temp" directory is used. """ return tempfile.mkdtemp(prefix="staging_", dir=parent_dir)
6170f494bdc5a73f1cf221cf90e25086f6b092c5
68,165
def getFile(data): """ Returns the path to the file for the given request """ return data[1].decode("utf-8")
b6c4c9eae5009725d38a9f71c04028f34ab24b29
68,167
def strip_edges(matrix: list) -> list: """return a matrix without it's edges""" return [row[1:-1] for row in matrix[1:-1]]
f7e5e04d4507e7709a14f6f07015bacabfa94f37
68,169
def function_to_str(fun): """Convert a function to a function name.""" return fun.__name__
7ea0ff2ad871e585b800251555933ca5cab7bda5
68,178
def horner(x0: float, coefficients: list) -> float: """A function that implements the Horner's method for evaluating a polynomial, with coefficients, at x = x0. Time complexity: O(n), where n = len(coefficients).""" assert isinstance(coefficients, list) assert all( isinstance(x, float) or isinstance(x, int) for x in coefficients) assert isinstance(x0, float) or isinstance(x0, int) p = 0 for c in reversed(coefficients): p = p * x0 + c return p
e3a0ca29bf8281abf22971ed8c48678b0dd44090
68,179
def modify_ref_method_str(df, param): """Subroutine for Ref_API_Query tha replaces various characters in data columns containing text, including the method name and the parameter units. Instrument Method names retrieved from the method code lookup table are specified in all upper case characters. These are converted to title cased characters (e.g., This Is Title Cased Text). While this imrpoves legibility some phrases (particularly acronyms, conjunctions, prepositions, ect.) should not be title cased. This function replaces specific phrases with the expected format. In addition, abbreviated unit names (used by AirNow) are converted to long format text to ensure consistency for reference data retreived from AQS, AirNow, and AirNowTech, and also to improve legibility. Args: df (pandas DataFrame): Dataframe resulting from API query. param (str): The evaluation parameter. Returns: df (pandas DataFrame): Modified dataframe with method and unit strings corrected. """ # Lookup dictionary of phrases that shouldn't be title cased replace = {'Method': {'Api': 'API', 'Frm': 'FRM', 'Fem': 'FEM', 'Lpm': 'LPM', ' At ': ' at ', 'Bam': 'BAM', 'Pm': 'PM', 'Vscc': 'VSCC', 'Te': 'TE', ' Or ': ' or ', 'W/': 'w/', ' And ': ' and '}, 'Unit': {'PPB': 'Parts per billion', 'PPM': 'Parts per million', 'UG/M3': 'Micrograms/cubic meter'} } for attrib in replace: for oldstr, newstr in zip(replace[attrib], replace[attrib].values()): col = param + '_' + attrib df[col] = df[col].astype(str).str.replace(oldstr, newstr) return df
eef1516eb1a5ec3345c2e7f7652374efad081d96
68,182
def _getbool_from_str(s): """Convert given string into bool value. Defaults to False. """ return (s or '').lower() in ['1', 'y', 'yes', 't', 'true']
a4a337eb67d6b72c8327233a97fe107dd7ba0a26
68,184
def exception_msg(exception): """Helper method to extract exception message for both py2 and py3 exception types :param exception: Exception object :returns: string representing the exception message """ if hasattr(exception, 'message'): return str(exception.message) return str(exception)
d777413a5031d6b5270aad7a16d90de31eb64eab
68,191
def parse_s3_key(key): """Parses the S3 object path into (name, ver).""" return key.rsplit("/", 1)[-1].rsplit("-", 1)
53386fee017103ec5e2f3a5fc66540d5ca3d61fe
68,197
def complete_multipart_upload(s3_obj, bucketname, object_key, upload_id, parts): """ Completes the Multipart Upload Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket object_key (str): Unique object Identifier upload_id (str): Multipart Upload-ID parts (list): List containing the uploaded parts which includes ETag and part number Returns: dict : Dictionary containing the completed multipart upload details """ result = s3_obj.s3_client.complete_multipart_upload( Bucket=bucketname, Key=object_key, UploadId=upload_id, MultipartUpload={"Parts": parts}, ) return result
7a68b331df2e531ba8a58b94e1f020d91502a8b1
68,199
def postprocess(y): """ Process the predicted result to eliminate values more than 1 or less than 0 :param y: Raw predicted result :return: Post processed result """ y[y < 0] = 0 y[y > 1] = 1 return y
bf80c7e37df7784ed825767bd69325e176cb4cd7
68,205
import warnings def warnUnless(ok, txt): """ Decorator to raise warning unless condition is True This function must be used as a decorator Parameters ---------- ok: bool Condition to raise the warning or not txt: str Text to display in the warning """ def inner(fct): def wrapper(*args, **kwargs): warnings.warn("%s %s" % (fct.__name__, txt)) return fct(*args, **kwargs) return wrapper if not ok: return inner else: return lambda f: f
c2375a9a685a4e5bfbb54d807a500a9c296bcbac
68,209
def downloadRange(app): """ Get the download range of an app. """ downloads = '0-99' d = int(app['downloads']) if d >= 100 and d < 10000: downloads = '100-9,999' elif d >= 10000 and d < 1000000: downloads = '10,000-999,999' elif d >= 1000000 and d < 100000000: downloads = '1,000,000-99,999,999' elif d >= 100000000: downloads = '100,000,000+' return downloads
9964524de823922cb3bf10f533ad54f7ea129a1b
68,210
import re import logging def extract_state(resume_segments): """ Find first text which matches one of the states in the string_to_search :param resume_segments: Dictionary containing segmented resume data :type resume_segments: Dictionary :return: state code :rtype: str """ try: string_to_search = resume_segments['contact_info'] states = ['AK', 'AL', 'AR', 'AZ', 'CA', 'CO', 'CT', 'DC', 'DE', 'FL', 'GA', 'HI', 'IA', 'ID', 'IL', 'IN', 'KS', 'KY', 'LA', 'MA', 'MD', 'ME', 'MI', 'MN', 'MO', 'MS', 'MT', 'NC', 'ND', 'NE', 'NH', 'NJ', 'NM', 'NV', 'NY', 'OH', 'OK', 'OR', 'PA', 'PR', 'RI', 'SC', 'SD', 'TN', 'TX', 'UT', 'VA', 'VT', 'WA', 'WI', 'WV', 'WY'] state_pattern = re.compile(r'\b(' + '|'.join(states) + r')\b') for line in string_to_search: result = state_pattern.search(line) if result: return result.group() return None except Exception as e: logging.error('Issue parsing state:: ' + str(e)) return None
fab0254dafaf836c4dae024bba0163466fb36fce
68,216
def get_day_of_week(date): """ Returns the day of the week for the date object passed. """ return date.strftime("%A")
a87b6e34c1d877abf62d30021d1946a4991f8b4d
68,218
def get_range(value): """ Find the max/min ranges for a Nagios range comparison. Nagios handles ranges by accepting them as strings. The curent rules according to the API assume [@]start:end and include: * If start is empty, start is 0. * If start is ~, start is -infinity. * Alert if value is <= start, or >= end. * If prepended with @, alert if value is between start and end. The fact 0 and : are optional make it somewhat confusing and counter- intuitive; most plugin authors don't know that <= 0 should trigger alerts. :param value: Nagios-compatible range string to parse. :return list: A single list with three elements representing the min and max boundaries for the range, and whether or not to invert the match. """ raw = value # If we find a '@' at the beginning of the range, we should invert # the match. invert = False if value.find('@') == 0: invert = True value = value.lstrip('@') # The : separates a max/min range. If it exists, there is at least # a minimum. We'll start our ranges at zero and infinity so we don't # have to worry about complex testing logic. bottom = 0 top = float('infinity') if value.find(':') > 0: (bottom, top) = value.split(':') if top == '': top = float('infinity') else: top = float(top) if bottom == '': bottom = 0 elif bottom == '~': bottom = -float('infinity') else: bottom = float(bottom) else: top = float(value) return (bottom, top, invert, raw)
2852c9add03e88b1f96796715fa0a800916ea638
68,220
def get_hourly_rate(target_count, cycle_minutes): """Get hourly rate of emails sent.""" cycle_hours = cycle_minutes / 60 return target_count / cycle_hours
8efcde55721880d624cb56c16b0001b7041fe6d3
68,229
def _remove_chars_py3(x_str, del_chars): """Utility to remove specified characters from string. Parameters ---------- x_str : str Generic input string. del_chars : str String containing characters we would like to remove. Returns ------- x_str : str Generic input string after removing characters in `del_chars`. """ translator = str.maketrans("", "", del_chars) x_str = x_str.translate(translator) return x_str
6069281d9958b72cdb84fa7af83786f1d05da01e
68,231
import re def make_strike_tags(line: str) -> str: """Find strike marks in string and replace them with <s> tags.""" return re.sub( r""" (?<![\w-]) (--) (?![<>\s-]) ([^<>]+?) (?<![<>\s-]) \1 (?![\w-]) """, r'<s>\2</s>', line, flags=re.X )
386b0528c6706d6efec1a96738f983c2d3ef1d50
68,233
def check_fields_of_view_format(fields_of_view): """Confirm that the input fields of view is valid. Parameters ---------- fields_of_view : list of int List of integer fields of view. Returns ------- str or list of int Correctly formatted fields_of_view variable. """ if fields_of_view != "all": if isinstance(fields_of_view, list): if all(isinstance(x, int) for x in fields_of_view): return fields_of_view else: try: return list(map(int, fields_of_view)) except ValueError: raise TypeError( f"Variables of type int expected, however some of the input fields of view are not integers." ) else: raise TypeError( f"Variable of type list expected, however type {type(fields_of_view)} was passed." ) else: return fields_of_view
f28a70f4105d333756437acb3877e78b2151f594
68,237
def disconnect_call_by_id(log, ad, call_id): """Disconnect call by call id. """ ad.droid.telecomCallDisconnect(call_id) return True
e83817eaf181e3e023f6ac9b877bec6d66682fdd
68,239
import io import math def build_pretty_binary_heap(heap: list, total_width=36, fill=" ") -> str: """Returns a string (which can be printed) representing heap as a tree. To increase/decrease the horizontal space between nodes, just increase/decrease the float number h_space. To increase/decrease the vertical space between nodes, just increase/decrease the integer number v_space. Note: v_space must be an integer. To change the length of the line under the heap, you can simply change the line_length variable.""" if not isinstance(heap, list): raise TypeError("heap must be an list object") if len(heap) == 0: return "Nothing to print: heap is empty." output = io.StringIO() last_row = -1 h_space = 3.0 v_space = 2 for i, heap_node in enumerate(heap): if i != 0: row = int(math.floor(math.log(i + 1, 2))) else: row = 0 if row != last_row: output.write("\n" * v_space) columns = 2 ** row column_width = int(math.floor((total_width * h_space) / columns)) output.write(str(heap_node).center(column_width, fill)) last_row = row s = output.getvalue() + "\n" line_length = total_width + 15 s += ('-' * line_length + "\n") return s
3192eb35813ee2f54231165c9cd7879bc78d5a88
68,245
def check_keys(frame, *keys): """ Function to check if all keys are in frame Parameters ---------- frame : I3Frame I3Frame keys: Series of keys to look for in frame Returns ------- boolean Whether or not all the keys in keys are in frame """ return all([key in frame for key in keys])
3fe90171dca3fe53aa6641fcec5e4f05a410808b
68,246
def gs_to_public_url(gs_url): """Converts a gs:// URI to a HTTP URL.""" assert gs_url.startswith('gs://') return gs_url.replace('gs://', 'https://storage.googleapis.com/', 1)
ff00fa1876f35be65b4c9b3314120ffd5cb0498a
68,247
from typing import List from typing import Tuple def get_ranges(nums: List[int]) -> List[Tuple[int, int]]: """Reduce a list of integers to tuples of local maximums and minimums. :param nums: List of integers. :return ranges: List of tuples showing local minimums and maximums """ nums = sorted(nums) lows = [nums[0]] highs = [] if nums[1] - nums[0] > 1: highs.append(nums[0]) for i in range(1, len(nums) - 1): if (nums[i] - nums[i - 1]) > 1: lows.append(nums[i]) if (nums[i + 1] - nums[i]) > 1: highs.append(nums[i]) highs.append(nums[-1]) if len(highs) > len(lows): lows.append(highs[-1]) return [(l, h) for l, h in zip(lows, highs)]
b08f02de62297073bdd6639a17e9f6572181cb2e
68,250
def read_last(file_name, n_lines=1): """ Reads the last line of a file. Parameters ---------- :param file_name: string Complete path of the file that you would like read. :return last_line: string Last line of the input file. """ try: with open(file_name, mode='r') as infile: lines = infile.readlines() except IOError: last_lines = 'IOEror in read_last_line: this file does not exist.' return last_lines try: last_lines = lines[-n_lines:] last_lines = '\n'.join(last_lines) except IndexError: last_lines = 'IndexError in read_last_line: no last line appears to exist in this file.' return last_lines
49ab2af4551c4d9d28e0db911cc7d846f866d9e8
68,256
def render_group(path, prefix): """Renders test_groups which represents a set of CI results. Follows this format: test_group('test-group-name', 'gcs-path') """ return 'test_group(\n \'%s\',\n \'%s/%s\'),' % ( path, prefix, path)
44ecaa47ec137edfc2a67cbee509a9f912f0fb9a
68,257
def is_between(check, start, end): """ Checks whether a date is between two other dates. :param check: :type check: date :param start: :type start: date :param end: :type end: date :return: True or False """ if check < start or end < check: return False else: return True
26530be17d34bde06a5fe68cd1ca7418cfeb1da1
68,262
def coalesce_dictionaries(data, merge_field="id"): """Takes a sequence of dictionaries, merges those that share the same merge_field, and returns a list of resulting dictionaries""" result = {} for datum in data: merge_id = datum[merge_field] item = result.get(merge_id, {}) item.update(datum) result[merge_id] = item return result.values()
2b8da5bf9b92a71503d84243f08c84eb4ab11828
68,263
def generate_ticket(request): """Generate a JSON formatted support ticket for Zendesk given a request object. Phone field is hidden to users, and hopefully captures spam content. If the phone field is filled at all, treat the request as spam. Args: request (django.http.HttpRequest): A django request containing the user's POSTed content. """ path = request.path is_honeypot_valid = request.POST['phone'] == "" # Set to False if honeypot is entered email = request.POST['email'] query = request.POST['textarea'] name = request.POST.get('name', 'Anonymous requester') if is_honeypot_valid and email and query: request_obj = { "request": { "requester": {"name": name, "email": email}, "subject": "Automated request from {}".format(name), "comment": {"body": "A request was sent from {}.\n{}".format(path, query)} } } return request_obj return False
70e482da3b64e91e608e33b352ae9fec10a91240
68,266
def parse_refs_json(data): """ Function to parse the json response from the references collection in Solr. It returns the results as a list with the annotation and details. """ # docs contains annotation, fileName, details, id generated by Solr docs = data['response']['docs'] # Create a list object for the results with annotation and details. Details is a list of # a single string, flatten it: remove the string from the list. results = [[docs[i].get('annotation'), docs[i].get('details')[0]] for i in range(len(data['response']['docs']))] #results = [result[0] for result in results] return results
c8dadb7fa8e522788b731d535d18fb5343ddd95a
68,267
def get_mongo_key(f, *args, **kwargs): """ Returns a mongo object key for the function. Args: f: the function args: positional arguments kwargs: keyword arguments Returns: The key. """ min_kwargs = list(filter(lambda pair: pair[1] is not None, kwargs.items())) return { "function": "{}.{}".format(f.__module__, f.__name__), "args": args, "ordered_kwargs": sorted(min_kwargs), "kwargs": dict(min_kwargs) }
3b33759bf7370ee768be33db9165ec17bc79c23d
68,268
def parse_header(elements) -> dict: """ Parse headers from nodes TSV Parameters ---------- elements: list The header record Returns ------- dict: A dictionary of node header names to index """ header_dict = {} for col in elements: header_dict[col] = elements.index(col) return header_dict
7e726af9a1cfe7e7f46c252e19f1eb90c0228c5f
68,269
def update_dict(ldict, rdict): """ Update ldict with key, value pairs from rdict """ updated_dict = ldict.copy() updated_dict.update(rdict) return updated_dict
8500a47ad85edb311a2aead9fc2803e3481dd085
68,276
def log2chr(val): """ For the log-base 2 of val, return the numeral or letter corresponding to val (which is < 36). Hence, 1 return '0', 2 return '1', 2*15 returns 'f', 2*16 returns 'g', etc. """ p = 0 while val >= 2: p += 1 val /= 2 if p < 10: return chr(ord('0') + p) else: return chr(ord('a') + p - 10)
6490cd0970828dbd071e6408deefe82d26d9f9c9
68,280
from typing import Any def get_columns(node: Any, df_cols: list) -> list: """Iterates over an xml element to retrieve columns Iterates over 'select' or 'drop' element of xml tree to create a list of columns that are defined under it Parameters ---------- node: xml element an element of xml tree (select/drop) df_cols: list List of columns of the dataframe Returns ---------- cols: list List of selected columns """ cols = [] for elem in node: attrib = elem.attrib col = attrib['name'] if 'subcol' in attrib: col += "_" + attrib['subcol'] if col not in df_cols: print(f"Invalid column: {col}") return [] cols.append(col) elif col in df_cols: cols.append(col) else: col += "_" col_list = [x for x in df_cols if col in x] cols.extend(col_list) # remove duplicates cols = list(dict.fromkeys(cols)) return cols
aabe0a96b9e38275751bffb0697848f55de0b7c2
68,285
def get_lti_settings(is_regex=True): """Returns LTI provider settings to override settings in our tests.""" suffix = "[0-9a-f]{8}-[0-9a-f]" if is_regex else "" return { "lti_provider_test": { "base_url": f"http://localhost:8060/lti/videos/{suffix:s}", "is_base_url_regex": is_regex, "oauth_consumer_key": "TestOauthConsumerKey", "shared_secret": "TestSharedSecret", } }
cb81d94e119a54d3a08fada19ea74a391cc0a77d
68,288
def iter_split(buf, delim, func): """ Invoke `func(s)` for each `delim`-delimited chunk in the potentially large `buf`, avoiding intermediate lists and quadratic string operations. Return the trailing undelimited portion of `buf`, or any unprocessed portion of `buf` after `func(s)` returned :data:`False`. :returns: `(trailer, cont)`, where `cont` is :data:`False` if the last call to `func(s)` returned :data:`False`. """ dlen = len(delim) start = 0 cont = True while cont: nl = buf.find(delim, start) if nl == -1: break cont = not func(buf[start:nl]) is False start = nl + dlen return buf[start:], cont
9887b4cd49c6d36e9fc98eedbc87ef29657d7418
68,290
def resolve_name_obj(name_tree_kids): """Resolve 'Names' objects recursively. If key 'Kids' exists in 'Names', the name destination is nested in a hierarchical structure. In this case, this recursion is used to resolve all the 'Kids' :param name_tree_kids: Name tree hierarchy containing kid needed to be solved :return: Resolved name tree list """ temp_list = [] for kid in name_tree_kids: if 'Kids' in kid and kid['Kids']: temp_list.extend([kid_kid.resolve() for kid_kid in kid['Kids']]) elif 'Names' in kid: return name_tree_kids return resolve_name_obj(temp_list)
863d299516d97d0abdf9b0fdc97833da3b41eef9
68,291
def make_mem_key(device_id: str, state_type: str) -> str: """ Create a key of the format <id>:<type> """ return device_id + ":" + state_type
0c095a03f11e64d70d82af772cb94fc67facc227
68,296
def _step_id(step): """Return the 'ID' of a deploy step. The ID is a string, <interface>.<step>. :param step: the step dictionary. :return: the step's ID string. """ return '.'.join([step['interface'], step['step']])
c1ff629a09758ff5817f76ea487af467fbbffc84
68,298
def smallest_integer(arr: list) -> int: """ Time Complexity: O(n) If 1 is not the first element, then answer is 1. 2nd element onwards, if the next element is <= current number being checked, then increment the number being checked by that number, else we found the result. Reason: adding k to a series of numbers will increment the current possible range by k. We can already create every number from 1 to current number being checked-1, Now we just need to check the rest (curr_sum + k onwards). """ res: int = 1 for element in arr: if element <= res: res += element else: break return res
641b84b7bc0d2fb4c366c0544fdc3d0c6a04d4e1
68,301
def get_float(value): """ Convert a string to a float number :param value: (str) :return: (float) Example: >>> get_float('3.0') 3.0 """ try: ret = float(value) except ValueError: raise ValueError("Could not convert '%s' into a float number" % value) return ret
30efdd2f38dea5708c07f434f7cf2557f9da3a93
68,307
def append_to_optparse (parser): """ Set up OptionParser from optparse to allow for ruffus specific options: --verbose --version --log_file -t, --target_tasks -j, --jobs -n, --just_print --flowchart --key_legend_in_graph --draw_graph_horizontally --flowchart_format --forced_tasks """ # # general options: verbosity / logging # parser.add_option("-v", "--verbose", dest = "verbose", action="count", default=0, help="Print more verbose messages for each additional verbose level.") parser.add_option("-L", "--log_file", dest="log_file", metavar="FILE", type="string", help="Name and path of log file") # # pipeline # parser.add_option("-t", "--target_tasks", dest="target_tasks", action="append", default = list(), metavar="JOBNAME", type="string", help="Target task(s) of pipeline.") parser.add_option("-j", "--jobs", dest="jobs", default=1, metavar="N", type="int", help="Allow N jobs (commands) to run simultaneously.") parser.add_option("-n", "--just_print", dest="just_print", action="store_true", default=False, help="Don't actually run any commands; just print the pipeline.") parser.add_option("--flowchart", dest="flowchart", metavar="FILE", type="string", help="Don't actually run any commands; just print the pipeline " "as a flowchart.") # # Less common pipeline options # parser.add_option("--key_legend_in_graph", dest="key_legend_in_graph", action="store_true", default=False, help="Print out legend and key for dependency graph.") parser.add_option("--draw_graph_horizontally", dest="draw_horizontally", action="store_true", default=False, help="Draw horizontal dependency graph.") parser.add_option("--flowchart_format", dest="flowchart_format", metavar="FORMAT", type="string", default = 'svg', help="format of dependency graph file. Can be 'ps' (PostScript), "+ "'svg' 'svgz' (Structured Vector Graphics), " + "'png' 'gif' (bitmap graphics) etc ") parser.add_option("--forced_tasks", dest="forced_tasks", action="append", default = list(), metavar="JOBNAME", type="string", help="Pipeline task(s) which will be included even if they are up to date.") return parser
50e200a4cbf8e721c307511df3e8c42592a4b71b
68,311
import re def delete_matches(pattern: str, line: str) -> str: """Replace matches that matches pattern with empty string Args: pattern (str): regex for search line (str): editable line Returns: str: string with deleted matches """ return re.sub(pattern, "", line)
073f4849e74161b3356913a46bd3fe8421d0d488
68,312
def mock_exhausted_fuzzer(**kwargs): """ Always return ``None`` signaling an exhausted fuzzer. """ return None
0e54819da8eabb1ab80e08542058ff8ad1f460fd
68,314
import html def _format_msg(msg: str) -> str: """Convert message to HTML suitable for rendering.""" return html.escape(msg).replace('\n', '<br />')
91389596f6584e98a1448adef3f8755d2bcd95d7
68,317
def add_saturate(a, b, upper_bound, lower_bound): """ Returns the saturated result of an addition of two values a and b Parameters ---------- a : Integer First summand b : Integer Second summand upper_bound : Integer Upper bound for the addition lower_bound : Integer Lower bound for the addition """ c = int(a) + int(b) if c > upper_bound: c = upper_bound elif c < lower_bound: c = lower_bound return c
754c978369e2f1d0f5474c60d6ea25e9ee37b7db
68,327
def process_response_code(info): """Check the VirusTotal response code to ensure hash exists in its database Args: info: the full VirusTotal report in json format. Returns: True if the hash was found in the VirusTotal database. False if not. """ if info["response_code"] == 1: #return true for further processing of results print("Item found in VT database, standby for results..") return True elif info["response_code"] == 0: print("Item not found in VT database, exiting..") return False elif info["response_code"] == -2: print("Item currently queued for analysis, check back later..") return False else: print("Unknown VT response code. Response code: ", info["response_code"]) return False
d10855ecb568574860c8b5fbfd9da3d66ba06dc6
68,330
import re def sanitize_url(url: str) -> str: """ Sanitize the given url so that it can be used as a valid filename. :param url: url to create filename from :raise ValueError: when the given url can not be sanitized :return: created filename """ for part in reversed(url.split('/')): filename = re.sub(r'[^a-zA-Z0-9_.\-]', '', part) if len(filename) > 0: break else: raise ValueError('Could not create reasonable name for file from url %s', url) return filename
e82f86c359f9706ec4359c779e3c7ee7e5daf1e3
68,334
def calc_intersection(r1, r2): """ Calculates the intersection of 2 rects Maths from https://es.wikipedia.org/wiki/Intersecci%C3%B3n_de_dos_rectas :param r1: :param r2: :return: """ p1, p2, p3, p4 = r1[0], r1[1], r2[0], r2[1] x1, y1 = p1[0], p1[1] x2, y2 = p2[0], p2[1] x3, y3 = p3[0], p3[1] x4, y4 = p4[0], p4[1] denom = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4) if denom == 0: return None # parallel else: tmp_x = (x1 * y2 - y1 * x2) * (x3 - x4) - (x1 - x2) * (x3 * y4 - y3 * x4) tmp_y = (x1 * y2 - y1 * x2) * (y3 - y4) - (y1 - y2) * (x3 * y4 - y3 * x4) return tuple([tmp_x / denom, tmp_y / denom])
4bab64bc95e2dbaef28ddb413432cf92b429e8ee
68,337
def get_senml_json_record(parsed, urn, label): """helper function returning value of associated label example SenML input: [{'bn': '/1/', 'n': '0/0', 'v': 123}, {'n': '0/1', 'v': 300}, {'n': '0/2', 'v': 0}, {'n': '0/3', 'v': 0}, {'n': '0/5', 'v': 0}, {'n': '0/6', 'vb': False}, {'n': '0/7', 'vs': 'U'}]""" return next(item for item in parsed if item["n"] == urn)[label]
a3d4c2f4b0ed04b721baeb2a74efb003b5ddfbed
68,341
import re def formatEpisodeName(names, join_with): """Takes a list of episode names, formats them into a string. If two names are supplied, such as "Pilot (1)" and "Pilot (2)", the returned string will be "Pilot (1-2)" If two different episode names are found, such as "The first", and "Something else" it will return "The first, Something else" """ if len(names) == 1: return names[0] found_names = [] numbers = [] for cname in names: number = re.match("(.*) \(([0-9]+)\)$", cname) if number: epname, epno = number.group(1), number.group(2) if len(found_names) > 0 and epname not in found_names: return join_with.join(names) found_names.append(epname) numbers.append(int(epno)) else: # An episode didn't match return join_with.join(names) names = [] start, end = min(numbers), max(numbers) names.append("%s (%d-%d)" % (found_names[0], start, end)) return join_with.join(names)
d3391a9bd571da8fc02ced3d4406eccdab4e8c9d
68,343
def get_scenario_start_index(base_times, scenario_start_time): """ Returns the index of the closest time step that is at, or before the scenario start time. """ indices_after_start_index = [ idx for idx, time in enumerate(base_times) if time > scenario_start_time ] if not indices_after_start_index: raise ValueError( f"Scenario start time {scenario_start_time} is set after the baseline time range." ) index_after_start_index = min(indices_after_start_index) start_index = max([0, index_after_start_index - 1]) return start_index
c63733604e9e0736f7c86dcb8273f8ebbd349be8
68,344
def t_norm(x, y, type='product'): """ Binary operation T on the interval [0,1] TM(x,y)=min(x,y) (minimum or Godel t-norm) TP(x,y)=x*y (product t-norm) TL(x,y)=max(x+y−1,0) (Lukasiewicz t-norm) x and y -- numeric inputs for the triangular norm """ if type == 'product': return x * y elif type == 'minimum': return min(x, y) elif type == 'Lukasiewicz': return max(x + y - 1, 0) else: raise TypeError("Triangular Norm type invalid")
7bea02ed21a540c0ef2b16af95b301dc692f7dd9
68,349
from typing import Mapping from typing import Any def from_level_to_severity(logger, log_method, event_dict) -> Mapping[str, Any]: """A custom processor for structlog, converts `level` to `severity`""" event_dict['severity'] = event_dict.pop('level') return event_dict
4a19a0d2562bb48aac907e062279f622087b21c3
68,351
def payment(info_list): """ Calculate the payment by the hours and payment per hour. :param info_list: list, formed by tuple with hours and payment. :return: int, payment calculated by the information of the payment list. Usage:: >>> payment([(3, 50)]) 150 >>> payment([(2,40), (3, 50)]) 230 """ return sum(hours * payment_per_hour for (hours, payment_per_hour) in info_list)
6fdda4bced92c600fa8b376bdd6d277af8a10f66
68,352
def _map_slice(slice_, mapper): """Applies `mapper` to a contiguous slice `slice_`.""" assert slice_.step is None or slice_.step == 1 new_slice = slice(mapper(slice_.start), 1 + mapper(slice_.stop -1)) assert slice_.stop - slice_.start == new_slice.stop - new_slice.start return new_slice
127cc3e7e4ad2fb28263afec01e4c812d92e342a
68,356
def robot_point_creator(dict_point): """Fonction qui permet de convertir les données dictionnaires de configuration de la forme du robot en une liste de point Args: dict_point (dict): dictionnaire de configuration de la forme du robot Returns: liste: liste des coordonnées dans le repère du robot de sa forme """ # initialisation de la liste de capteurs robot_pointX = [] robot_pointY = [] # Ajout des capteurs en fonction du fichier capteur_config.yaml for point in dict_point: data = dict_point.get(point) robot_pointX.append(data['x']) robot_pointY.append(data['y']) # Ajout de la ligne pour fermer le robot robot_pointX.append(robot_pointX[0]) robot_pointY.append(robot_pointY[0]) return [robot_pointX, robot_pointY]
33ce0607ec7c59c903c63050abeafe4f8dcd1259
68,364
def _loc(content, node): """ Find the location of a node within ``content`` Args: content (str): The file content node (ast.Node): Node to find Returns: (int, int): Start/end indices of string """ start_line, start_col = node.lineno, node.col_offset end_line, end_col = node.end_lineno, node.end_col_offset line_lengths = [len(line) for line in content.splitlines(True)] idx0 = sum(line_lengths[: start_line - 1]) + start_col idx1 = sum(line_lengths[: end_line - 1]) + end_col return (idx0, idx1)
6f7c798a300ca579ecbe037b076b7ded5d2a0054
68,365
def transDataRow(origins, pep, protDict): """ Called by writeToFasta(), this function takes the trans origin data for a given peptide and formats it for writing to the csv file. :param origins: the data structure containing information on where a given peptide was found within the input proteins. Has the form: [[protName, startIndex, endIndex]..] where each sublist refers to the location of an individual cleavage which can be combined with another cleavage somewhere in the protein file to create the peptide. :param pep: the peptide which the origin data related to. :param protDict: a dictionary containing the input protein data. This is needed to return slight differences in the peptide and origin due to the program not treating I/J differently. :return dataRows: a list of lists, where each sublist is a row of data which is to be written to file. Each sublist has the format: [protName, peptide, pepInProt, location] """ dataRows = [] for location in origins: if location == True: dataRow = [pep, "Formed only by cleavages under max length."] dataRows.append(dataRow) else: protName = location[0] startRef = location[1] endRef = location[2] + 1 pepInProt = protDict[protName][startRef:endRef] dataRow = [location[0], pep, pepInProt, [startRef + 1, endRef]] dataRows.append(dataRow) return dataRows
995db536271b338a3dac1b872d8ff275ecf58c70
68,368
def create_user(app, username="user"): """create a user with username=pw and email=<username>@example.org""" return app.module_map['userbase'].register({ 'username' : username, 'password' : username, 'fullname' : username, 'email' : "%s@example.com" %username, }, force = True, create_pw = False)
3b0817c3a3e18b02f3c1d78c3067e62eae710f15
68,369
def sieve(indices, dates): """ Filters the indices using the dates :param indices: The indices values to filter :param dates: The dates used to filter :return: Filtered indices """ rt = dict() for _ in dates: if len(indices[_]) > 0: rt[_] = indices[_] return rt
3da85cb754b4b4dfb458d9e9ebe1417bb97b9097
68,370
import re def strip(phone): """Remove all non-numeric digits in phone string.""" if phone: return re.sub('[^\d]', '', phone)
114f45e475432742f98d47f13db3a0dad9906d9d
68,381