content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def num_steps(n, steps=0): """Determine number of ways a child can get to the top of the stairs.""" if steps > n: return 0 if steps == n: return 1 return num_steps(n, steps + 1) + num_steps(n, steps + 2) + num_steps(n, steps + 3)
3223c602c76f6a2b89eb2e37ea141addcbbb3531
120,598
def slowcomplete(mylist, prefix, k): """ Given a prefix, suggest the top k words with the highest weight Params: prefix: the prefix to search in the list mylist: a list with stored words and corresponding weights k: the number of top recommendations ordered by weight Return: reclist: list of top k recommendations matching the prefix """ assert isinstance(mylist, list), "The search space should be a list." assert isinstance(prefix, str), "The prefix should be a string" assert isinstance(k, int), "The number of suggestions should be a positive integer" assert k > 0, "The number of suggestions should be a positive integer" reclist = [] for i in range(len(mylist)): # note: mylist contains [(weight1, word1), (weight2, word2), ...] if prefix != mylist[i][1] and mylist[i][1].startswith(prefix): reclist.append(mylist[i]) reclist = sorted(reclist, key=lambda x: x[0], reverse=True) if len(reclist) == 0: return 'No words match with the given prefix.' return reclist[0:k] # return [x[1] for x in reclist[0:k]]
3a40279d311636e385b9e83018fd1aa92663e23a
120,599
def index_find(string, start, end): """ version of substring that matches :param string: string :param start: string to start splitting :param end: string to end splitting :return: string between start and end """ return string.partition(start)[2].partition(end)[0]
ce7bca851abce0e37665404dda8f7935b11c75d0
120,604
def zoom_fit(screen, bounds, balanced=True): """What affine transform will zoom-fit the given items? screen: (w,h) of the viewing region bounds: (x,y,w,h) of the items to fit balance: Should the x and y scales match? returns: [translate x, translate y, scale x, scale y] """ (sw, sh) = screen (gx, gy, gw, gh) = bounds x_scale = sw/gw y_scale = sh/gh if (balanced): x_scale = min(x_scale, y_scale) y_scale = x_scale return [-gx*x_scale, -gy*y_scale, x_scale, y_scale]
5bdd7d017b65426b3d7937bd866a45da3a67e929
120,607
def mode_2_prod_rotated(A, B, adjoint_a=False): """ For a 3D tensor A(r1,i,r2) and B(r3,i) a matrix, this implements the mode 2 product so that we return C(r3,r1,r2) = sum_{i}{A*B} Note that we change the order of dimensions in this case (hence the name rotated). Now r3 becomes a "batch dim". This is useful for subsequent operations """ S = A.shape uA = A.permute(1, 0, 2).reshape(S[1], S[0] * S[2]) if adjoint_a: r = B.transpose(-2, -1).conj() @ uA else: r = B @ uA return r.reshape(-1, S[0], S[2])
48e6150ce2d88f0cb1fc63f03782f71f9d73cb24
120,612
import asyncio def asyncio_run(tasks): """ Helper method which abstracts differences from Python 3.7 and before about coroutines """ if hasattr(asyncio, "run"): done , _ = asyncio.run(asyncio.wait(tasks)) else: loop = asyncio.new_event_loop() try: done, _ = loop.run_until_complete(asyncio.wait(tasks)) finally: loop.close() task = done.pop() retval_exception = task.exception() if retval_exception is not None: raise retval_exception return task.result()
5dee64dd0716cadae57dbe5d962cd3f732d14a95
120,614
def dot(v1, v2): """Dot product of two vectors""" return v1.dot(v2)
d896e79c0da713b5a3b877deb33b68df8f2c8da4
120,615
def shift_speed(speed_series, shift, dt): """Given series of speeds, returns the speed shifted by 'shift' amount of time. speed_series is a list speeds with constant discretization dt. We assume that the last entry in speed_series is the current speed, and we want the speed from shift time ago. If shift is not a multiple of dt, we use linear interpolation between the two nearest speeds. If shift time ago is before the earliest measurement in speed_series, we return the first entry in speed_series. Returns a speed. """ ind = int(shift // dt) if ind+1 > len(speed_series): return speed_series[0] remainder = shift - ind*dt spd = (speed_series[-ind-1]*(dt - remainder) + speed_series[-ind]*remainder)/dt # weighted average return spd
2892db238d3e5d337178cc8bdf726e72d4adf475
120,616
def get_quadratic(A, b, c=0): """Given **A**, **b** and *c*, this returns a function that evaluates the quandratic for a vector **x**. Where :math:`\\mathbf{A} \\in \\mathbb{R}^{NxN}`, :math:`\\mathbf{b} \\in \\mathbb{R}^N` and :math:`c` is a constant, this function evaluates the following quadratic: .. math:: Q( \\mathbf{x} ) = \\frac{1}{2} \\mathbf{x^T A x + b^T x} + c for a vector :math:`\\mathbf{x}`. It also optionally returns the gradient of the above equation, and its Hessian. Parameters ---------- A : (N, N) numpy.ndarray A square matrix b : (N) numpy.ndarray A vector c : float A constant Returns ------- function : The callable function that returns the quadratic evaluation, and optionally its gradient, and Hessian. """ def Quadratic(x, return_g=True, return_H=True): f = 0.5 * x.dot(A.dot(x)) + b.dot(x) + c out = (f,) if return_g: g = A.dot(x) + b out += (g,) if return_H: H = A out += (H,) return out if len(out) > 1 else out[0] return Quadratic
8ef49bfa248a1435fa7200a0fd6b4b51d5169448
120,619
def _connect_setns(spec, kind=None): """ Return ContextService arguments for a mitogen_setns connection. """ return { 'method': 'setns', 'kwargs': { 'container': spec.remote_addr(), 'username': spec.remote_user(), 'python_path': spec.python_path(), 'kind': kind or spec.mitogen_kind(), 'docker_path': spec.mitogen_docker_path(), 'lxc_path': spec.mitogen_lxc_path(), 'lxc_info_path': spec.mitogen_lxc_info_path(), 'machinectl_path': spec.mitogen_machinectl_path(), } }
8f0af174f3008f36152c508f19c93a9db3ef697f
120,621
def select_actions(agents, message): """ This function selects an action from each agent's policy. :param agents: The list of agents. :param message: The message from the leader. :return: A list of selected actions. """ selected = [] for agent in agents: selected.append(agent.select_action(message)) return selected
f4db052c8caf64fd7d8ea38b11e94d61fde06d37
120,627
def _get_all_objs_of_type(type_, parent): """Get all attributes of the given type from the given object. Parameters ---------- type_ : The desired type parent : The object from which to get the attributes with type matching 'type_' Returns ------- A list (possibly empty) of attributes from 'parent' """ return set([obj for obj in parent.__dict__.values() if isinstance(obj, type_)])
acbb63d10a2521080d8ee00dc443a90770f364ff
120,631
from typing import Optional def option_string(argument: Optional[str]) -> Optional[str]: """ Check for a valid string option and return it. If no argument is given, raise ``ValueError``. """ if argument and argument.strip(): return argument raise ValueError("Must supply string argument to option")
6ba4a8ca985a32d3cd8eb81f2f44580eb93739ee
120,632
def fraclist(l, percent): """ Return the first `percent`/100 elements from the list l. """ return l[:int(round(len(l)*percent/100.0))]
73c17cb681c4ab40d7866d8e68f7360080924ac8
120,636
def compose_task_url(contest: str, contest_no: int, task: str) -> str: """ 与えられたコンテスト名、コンテスト番号、タスク名から 問題ページのURLを生成する Parameters ---------- contest : str コンテスト名. ABC, ARC or AGC contest_no : int コンテンストの番号 task : str タスク名. A, B, C or D Returns ---------- url : str 対応する問題のURL """ host = 'https://beta.atcoder.jp/' return host + 'contests/' + contest.lower()\ + '{:03d}'.format(contest_no)\ + '/tasks/' + contest.lower()\ + '{:03d}'.format(contest_no)\ + '_' + task.lower()
cffe03dce8e826e6380e97cb3a29b1a5cb16e866
120,637
def cdf(rv, x, *args, **kwargs): """ Returns the value of the cumulative distribution function of `rv` evaluated at `x`. In general computed using `RandomVariable.mean` but may be overridden. :param rv: RandomVariable :param x: float :return: float """ return rv.cdf(x, *args, **kwargs)
c32f4f8b75828c65bec297152b9b496afd02d81e
120,639
def get_group_id(conn, group_name): """Get ID of a group based on group name. Must be an exact match. Case sensitive. Parameters ---------- conn : ``omero.gateway.BlitzGateway`` object OMERO connection. group_name : str Name of the group for which an ID is to be returned. Returns ------- group_id : int ID of the OMERO group. Returns `None` if group cannot be found. Examples -------- >>> get_group_id(conn, "Research IT") 304 """ if type(group_name) is not str: raise TypeError('OMERO group name must be a string') for g in conn.listGroups(): if g.getName() == group_name: return g.getId() return None
e1b7d22511d48b9be372e91f2024e6f55082c9e0
120,646
def process(workflow_instance, **kwargs): """Process an instance of a workflow using the runtime parameters given by ``kwargs``. """ instance = workflow_instance(**kwargs) instance.process() out = instance.get_outputs() instance.serialize_state() return out
4417b2d9dac61eec1c905586bf9303d041ec4ccb
120,647
def gen_hex_name(num: int) -> str: """ Convert an integer from base 10 to base 64 (string) **Parameters** num: int Number to convert, in base 10 **Returns** str 4-digit number in base 63, with leading zeros """ # All characters that OpenMDAO allows in Group names char_set = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_" base = len(char_set) string_rep = "" remainder = 0 # name will have leading zeros prefix = "_0000" terminate = False while terminate is False: remainder = num % base num -= remainder num = int(num / base) string_rep = char_set[remainder] + string_rep prefix = prefix[:-1] if num == 0: terminate = True return prefix + string_rep
6578738b4df840632e782975a1ef297dbf68e3ca
120,649
import collections def _f1_score(target, prediction): """Computes token f1 score for a single target and prediction.""" prediction_tokens = prediction.split() target_tokens = target.split() common = (collections.Counter(prediction_tokens) & collections.Counter(target_tokens)) num_same = sum(common.values()) if num_same == 0: return 0 precision = 1.0 * num_same / len(prediction_tokens) recall = 1.0 * num_same / len(target_tokens) f1 = (2 * precision * recall) / (precision + recall) return f1
7dc5a7c48af11d69ce52c6974b55e376c1e5d3f6
120,651
def load_text(file_name): """ Load lines from a plain-text file and return these as a list, with trailing newlines stripped. Arguments: file_name (str or unicode): file name. Returns: list of str or unicode: lines. """ with open(file_name) as text_file: lines = text_file.read().splitlines() return lines
11c4b0d57be2488800b11857b138b98fad689564
120,653
def get_queue_url(sqs_client, queue_name: str) -> str: """ Name: get_queue_url Description: Function to get the url to connect to sqs. Inputs: :sqs_client: -> Client to connect to sqs. :queue_name: type(str) -> Required. The name of the queue. Outputs: type(str) -> Return the sqs url. """ response = sqs_client.get_queue_url(QueueName=queue_name) return response["QueueUrl"]
f8338fb0369d5b237712f7b694941a714f29af54
120,654
def nicesize(nbytes: int, space: str = "") -> str: """ Uses IEC 1998 units, such as KiB (1024). nbytes: Number of bytes space: Separator between digits and units Returns: Formatted string """ data = { "PiB": 1024 ** 5, "TiB": 1024 ** 4, "GiB": 1024 ** 3, "MiB": 1024 ** 2, "KiB": 1024 ** 1, } for suffix, scale in data.items(): if nbytes >= scale: break else: scale, suffix = 1, "B" return str(nbytes // scale) + space + suffix
9f2ae3874cf4c171732783cf6a59bdf534c95fdc
120,656
def find_card_number(patron): """ Gets the card number from the patron record :param patron: patron record dictionary :return: card_number """ return patron.get("id")
17f8a5d9f723243848094e69031d47b5c0b17502
120,667
def apply_message_parser(parser): """ Decorator that passes the result of a message parser to a handler as kwargs. The parser will only be passed a `message` kwarg. """ def inner_decorator(handler): def wrapped(client, message): parser_result = parser(message=message) handler(client=client, message=message, **parser_result) return wrapped return inner_decorator
400fa032e2ce84a8e19bec8d11216b052a0b24e1
120,670
def check_mapped_storage_exists(coriolis, destination_endpoint, storage_mappings, destination_environment=None): """ Checks whether all of the mapped storage types exist on the destination. :param destination_endpoint: destination endpoint object or ID :param storage_mappings: storage mappings dict :param destination_environment: optional environment options for target :raises: Exception if storage is not found. """ storage = coriolis.endpoint_storage.list( destination_endpoint, environment=destination_environment) def _check_stor(storage_name_or_id): matches = [ stor for stor in storage if stor.id == storage_name_or_id or stor.name == storage_name_or_id] if not matches: raise Exception( "Could not find storage type '%s' in: %s" % ( storage_name_or_id, storage)) return matches[0] # check default: default = storage_mappings.get('default') if default is not None: _check_stor(default) # check backend mappings: for mapped_storage in storage_mappings.get('backend_mappings', {}).values(): _check_stor(mapped_storage) # check per-disk mappings: for mapped_storage in storage_mappings.get('disk_mappings', {}).values(): _check_stor(mapped_storage)
480d71bb0311ebe381009b883571e84c03ccb2e1
120,672
import random import xxhash def gen_k_hash_functions(k): """ Return k random 32-bit seeds for xxh32 (maybe experiment with 64 bit) Currently returns xxhash.x32 objects with initialized seeds. Parameters ---------- k : number of random seeds for xxh32 Returns ------- val : list of ints List of random 32-bit seeds for xxh32. """ random_seeds = random.sample(range(0xfffffff),k) return [xxhash.xxh32(seed=seed) for seed in random_seeds]
9f8f23c4f27d1ae31fba6813a51bb948115768b6
120,675
import json def is_valid_json(x): """Returns true if x can be JSON serialized Args: x: Object to test """ try: json.dumps(x) return True except TypeError: return False
cabdf1e0616ef499ed0b857122928a61c3903d8a
120,676
import re def bump_minor(version: str) -> str: """Increment minor version part, leaves rest of the version string unchanged""" m = re.fullmatch(r"""(?x) (?P<prefix>\D*)? # optional prefix (?P<major>\d+) # major part (\.(?P<minor>\d+))? # minor part (?P<suffix>.*) # unsed rest """, version) if not m: raise ValueError("Invalid version string", version) prefix = m.group('prefix') or "" major = m.group('major') minor = m.group('minor') or "0" minor2 = str(int(minor) + 1) return f"{prefix}{major}.{minor2}"
1ef2ce69c8ab005f5afef338fad00cd8c84ef48e
120,680
def compute_runtime(length, buffer): """ Takes a length as a time delta and a buffer in seconds Returns number of miliseconds equal to length - buffer """ return (length.total_seconds() - buffer)*1000
4e139340ff48904a737936dcd54d087f60755bea
120,681
def format_df_summary(df): """ Returns a formatted dataframe for the dataset summary statistics table. Converts count values to integers and percentage values to a percent format. Parameters: df (pandas DataFrame): Dataframe to format for dataset summary statistics table Returns: formatted_df (pandas DataFrame): formatted dataset summary statistics table """ df[['No. of Columns', 'No. of Rows', 'Total Value Count', 'Count of NaNs', 'Count of Duplicate Rows', 'Count of Numerical Variables', 'Count of Categorical Variables']] = \ df[['No. of Columns', 'No. of Rows', 'Total Value Count', 'Count of NaNs', 'Count of Duplicate Rows', 'Count of Numerical Variables', 'Count of Categorical Variables']].astype(int) df[['Percent of NaNs', 'Percent of Duplicate Rows']] = \ df[['Percent of NaNs', 'Percent of Duplicate Rows']].astype(str) + '%' formatted_df = df.transpose() return formatted_df
6c6bb1adb9e135b65a076c20f558572833e6d269
120,684
def problem_10_5(square1, square2): """ Given two squares on a two dimensional plane, find a line that would cut these two squares in half. Solution: Compute the ecuation of the line passing through the center of the two squares. Args: square1: tuple, of dicts, format ({x, y}, {x, y}) square2: tuple, of dicts, format ({x, y}, {x, y}) Returns: tuple, format (sloape, intercept) """ (p1, p2) = square1 (p3, p4) = square2 c1 = {'x': float(p1['x'] + p2['x']) / 2, 'y': float(p1['y'] + p2['y']) / 2} c2 = {'x': float(p3['x'] + p4['x']) / 2, 'y': float(p3['y'] + p4['y']) / 2} slope = float(c2['y'] - c1['y']) / (c2['x'] - c1['x']) intercept = float(p1['y'] * p2['x'] - p1['x'] * p2['y']) / (p2['x'] - p1['x']) return (slope, intercept)
832ce077e6179cd6214f47d0f442243e34406b37
120,685
from typing import Any import torch def is_edge_index(x: Any) -> bool: """Check if the input :obj:`x` is PyG-like :obj:`edge_index` with shape [2, M], where M is the number of edges. Example ------- >>> from graphwar import is_edge_index >>> import torch >>> edges = torch.LongTensor([[1,2], [3,4]]) >>> is_edge_index(edges) True >>> is_edge_index(edges.t())) False """ return torch.is_tensor(x) and x.size(0) == 2 and x.dtype == torch.long and x.ndim == 2
25ff62a36acd0211df638403438df65daeeb476d
120,688
def all(x): """The all() function from Python 2.5, for backwards compatibility with previous versions of Python.""" output = True for bool in x: if not bool: output = False break return output
a2f41ebb16ee6cb42432f719f9bce34a7cfb04fa
120,690
import torch def compute_q_vals(Q, states, actions): """ This method returns Q values for given state action pairs. Args: Q: Q-net states: a tensor of states. Shape: batch_size x obs_dim actions: a tensor of actions. Shape: Shape: batch_size x 1 Returns: A torch tensor filled with Q values. Shape: batch_size x 1. """ return torch.gather(Q(states), 1, actions)
0930a91062dfef135f3377d836b65b74b69d5663
120,692
def create_accdata2(packer, enabled, frame_step, fcwhud_1, fcwhud_2, fcwhud_3, hud_intensity, flash_rate): """Creates a CAN message for ACCDATA_2""" csum = 0 values = { "CmbbBrkDecel_No_Cs": csum, "CmbbBrkDecel_No_Cnt": frame_step, "HudBlk1_B_Rq": fcwhud_1, "HudBlk2_B_Rq": fcwhud_2, "HudBlk3_B_Rq": fcwhud_3, "HudDsplyIntns_No_Actl": hud_intensity, "HudFlashRate_D_Actl": flash_rate, } return packer.make_can_msg("ACCDATA_2", 0, values)
cc727c2e05172c726835a1da4857d359da3498fe
120,693
def _priority_function(random_route:list, mean_routes:dict, std:dict, mean_weight:float, std_weight:float)-> dict: """ Give us the order of priority routes to select to look for connections Parameters ----------- random_route: list The list of all points to connect mean_routes: dict A dict with the mean distance/cost of all points one each other std_routes: dict A dict with the standar deviation distance/cost of all points one each other mean_weight: float The ponderation of the mean for all points, higher number make points with higher mean (more distance from others) have large values and priorice their connections std_weight: float The ponderation of the standar deviation for all points, higher number make points with higher deviation (more diferents of distance between points) have larger values and priorice their connections Return ------- A Dict of oredered by priority """ priorities = {} for id_route in random_route: priorities[id_route] = (mean_routes[id_route]**mean_weight) * (std[id_route]**std_weight) ordered_prior_route = dict(sorted(priorities.items(),reverse=True, key=lambda x: x[1])) return ordered_prior_route
5ad7033fefcf6ecf22fa7da3782611c07e5742f2
120,696
def findExtremeDivisor(n1, n2): """ Assumes that n1 and n2 are positive ints Return a tuple containing the smallest common divisor > 1 and the largest common divisor of n1 and n2""" divisors = () minVal, maxVal = None, None for i in range(2, min(n1, n2) + 1): if n1%i == 0 and n2%i == 0: if minVal == None or i < minVal: minVal = i if maxVal == None or i > maxVal: maxVal = i return (minVal, maxVal)
7a26da5c2ace6542c4919c569197b64521f3726c
120,697
def sabs(self, key="", **kwargs): """Specifies absolute values for element table operations. APDL Command: SABS Parameters ---------- key Absolute value key: 0 - Use algebraic values in operations. 1 - Use absolute values in operations. Notes ----- Causes absolute values to be used in the SADD, SMULT, SMAX, SMIN, and SSUM operations. """ command = f"SABS,{key}" return self.run(command, **kwargs)
9670d244e7a66fa281bb07b3930bdf8c3cfef0d3
120,698
def set_response_headers(response): """ Rewrite the "server" header so we don't give away that information """ # Flask/werkzeud does not allow us to remove the header entirely, so we write garbage into it instead response.headers["server"] = "asdf" return response
42be760de7bd7a9cb0947ff866eaaf246ecd822f
120,700
def _any(itr): """Similar to Python's any, but returns the first value that matches.""" for val in itr: if val: return val return False
a71eb2643af93d76031290d8083a6595b3560084
120,701
def create_dialogue_json(dg): """Creates a JSON describing a dialogue for purposes of Crowdflower. Arguments: dg -- a Django dialogue object """ json_str = ('{{"cid":"{cid}","code":"{code}","code_gold":"{gold}"}}' .format(cid=dg.cid, code=dg.code, gold=dg.get_code_gold())) return json_str
62a1e9508a8fb42ff7af4d14fb4b16cb1a609373
120,705
import asyncio async def readuntil(self, separator=b'\n'): # pragma: no cover """Read data from the stream until ``separator`` is found. On success, the data and separator will be removed from the internal buffer (consumed). Returned data will include the separator at the end. Configured stream limit is used to check result. Limit sets the maximal length of data that can be returned, not counting the separator. If an EOF occurs and the complete separator is still not found, an IncompleteReadError exception will be raised, and the internal buffer will be reset. The IncompleteReadError.partial attribute may contain the separator partially. If the data cannot be read because of over limit, a LimitOverrunError exception will be raised, and the data will be left in the internal buffer, so it can be read again. .. versionadded:: 0.1 """ seplen = len(separator) if seplen == 0: raise ValueError('Separator should be at least one-byte string') if self._exception is not None: raise self._exception # Consume whole buffer except last bytes, which length is # one less than seplen. Let's check corner cases with # separator='SEPARATOR': # * we have received almost complete separator (without last # byte). i.e buffer='some textSEPARATO'. In this case we # can safely consume len(separator) - 1 bytes. # * last byte of buffer is first byte of separator, i.e. # buffer='abcdefghijklmnopqrS'. We may safely consume # everything except that last byte, but this require to # analyze bytes of buffer that match partial separator. # This is slow and/or require FSM. For this case our # implementation is not optimal, since require rescanning # of data that is known to not belong to separator. In # real world, separator will not be so long to notice # performance problems. Even when reading MIME-encoded # messages :) # `offset` is the number of bytes from the beginning of the buffer # where there is no occurrence of `separator`. offset = 0 # Loop until we find `separator` in the buffer, exceed the buffer size, # or an EOF has happened. while True: buflen = len(self._buffer) # Check if we now have enough data in the buffer for `separator` to # fit. if buflen - offset >= seplen: isep = self._buffer.find(separator, offset) if isep != -1: # `separator` is in the buffer. `isep` will be used later # to retrieve the data. break # see upper comment for explanation. offset = buflen + 1 - seplen if offset > self._limit: raise asyncio.LimitOverrunError( 'Separator is not found, and chunk exceed the limit', offset) # Complete message (with full separator) may be present in buffer # even when EOF flag is set. This may happen when the last chunk # adds data which makes separator be found. That's why we check for # EOF *ater* inspecting the buffer. if self._eof: chunk = bytes(self._buffer) self._buffer.clear() raise asyncio.IncompleteReadError(chunk, None) # _wait_for_data() will resume reading if stream was paused. await self._wait_for_data('readuntil') if isep > self._limit: raise asyncio.LimitOverrunError( 'Separator is found, but chunk is longer than limit', isep) chunk = self._buffer[:isep + seplen] del self._buffer[:isep + seplen] self._maybe_resume_transport() return bytes(chunk)
bcdb41a61a562f46dd1f1a1811a0e7b1ea783458
120,706
def distance_great_circle(pointA, pointB): """ Compute the Great Circle Distance between two points. Parameters ---------- pointA : Tuple Pooint A. pointB : Tuple Point B. Returns ------- distance : Float Distance between A and B. """ return ((pointA[0] - pointB[0])**2 + (pointA[1] - pointB[1])**2)**0.5
3114adc04dc75577f0c9eb02fb46e93830528b61
120,710
import re def strip_ansi_sequences(text): """Return a string cleaned of ANSI sequences""" # This snippet of code is from Martijn Pieters # https://stackoverflow.com/questions/14693701/how-can-i-remove-the-ansi-escape-sequences-from-a-string-in-python ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])') return ansi_escape.sub('', text)
89699eb64ae6053c6b440844810b7b56721c39de
120,712
def list_range(x): """ Returns the range of a list. """ return max(x) - min(x)
43afbd92537f6d1e044c6ecafabcb82b0a823056
120,713
import random def move_and_drink(organism, population_dict, world, position_hash_table=None): """ Move and drink. Each occurs with probability 1/2. """ choice = random.randrange(0, 8) if choice < 4: return 'drink' else: return 'move'
9c105403c19c6c3ed62b84753ce124d242f89f33
120,714
def exec_time(pl, segment_info): """Return execution time of the last command Highlight group used: ``exec_time`` """ execution_time = getattr(segment_info.get("args", None), "execution_time", 0) if execution_time: return [{"contents": f"{execution_time:.2f}s", "highlight_groups": ["exec_time"]}]
931a0ce5a1c5e2afa945a594b4480f99830c6e04
120,718
def _filter_domain_id_from_parents(domain_id, tree): """Removes the domain_id from the tree if present""" new_tree = None if tree: parent, children = next(iter(tree.items())) # Don't add the domain id to the parents hierarchy if parent != domain_id: new_tree = {parent: _filter_domain_id_from_parents(domain_id, children)} return new_tree
2daf7a200efebc67e1a963582d89507ecb01344b
120,721
def _return_gene_body_bounds(df): """ Get start/end locations of a gene Parameters: ----------- df df that is subset from reading a GTF file using the GTFparse library. Returns: -------- start, end Notes: ------ (1) This function could be extended to other features though would require an additioanl level of identifying the max/min of feature sets that have multiple rows in a GTF (as opposed to the "gene" feature, which has only a single row / set of values. """ gene_body = df.loc[df["feature"] == "gene"] return gene_body.start.values[0], gene_body.end.values[0]
fd4ba665a609111e9253f20ef30059bf628bed39
120,723
def progress_reporting_sum(numbers, progress): """ Sum a list of numbers, reporting progress at each step. """ count = len(numbers) total = 0 for i, number in enumerate(numbers): progress((i, count)) total += number progress((count, count)) return total
3c759f2951bc30583de1a61459dc591a71004088
120,724
def _transform_summarized_rep(summarized_rep): """change around the keys of the summarized_representation dictionary This makes them more readable This function makes strong assumptions about what the keys look like (see PooledVentralStreams.summarize_representation for more info on this): a single string or tuples of the form `(a, b)`, `(a, b, c), or `((a, b, c), d)`, where all of `a,b,c,d` are strings or ints. We convert them as follows (single strings are untouched): - `(a, b) -> error_a_b` - `(a, b, c) -> error_a_scale_b_band_c` - `((a, b, c), d) -> error_a_scale_b_band_c_d` Parameters ---------- summarized_rep : dict the dictionary whose keys we want to remap. Returns ------- summarized_rep : dict dictionary with keys remapped """ new_summarized_rep = {} for k, v in summarized_rep.items(): if not isinstance(k, tuple): new_summarized_rep["error_" + k] = v elif isinstance(k[0], tuple): new_summarized_rep["error_scale_{}_band_{}_{}".format(*k[0], k[1])] = v else: if len(k) == 2: new_summarized_rep["error_scale_{}_band_{}".format(*k)] = v else: new_summarized_rep['error_' + '_'.join(k)] = v return new_summarized_rep
acaaf9f4753ef8911e9354c2f7bd8ae27cc9b0f1
120,726
def valueForKeyPath(dict, keypath, default = None): """ Get the keypath value of the specified dictionary. """ keys = keypath.split('.') for key in keys: if key not in dict: return default dict = dict[key] return dict
197b4aa12011a6166761d6751cb5449dd79cbe6b
120,727
from pathlib import Path def check_file(file: str): """Check file exists.""" file_path = Path(file) if not file.endswith((".cfg", ".txt", ".log")): raise ValueError(f"{file_path} must end with '.cfg', '.txt' or '.log'") if not file_path.exists(): raise SystemExit(f"{file_path} doesn't exist.") return file
9c599e0f9f6f5db575ef598e445efb00af11dd13
120,728
def join_list(delimeter): """Join a list into a string using the delimeter. This is just a wrapper for string.join. Args: delimeter: The delimiter to use when joining the string. Returns: Method which joins the list into a string with the delimeter. """ def join_string_lambda(value): return delimeter.join(value) return join_string_lambda
1c06cb4dc92604c1d858105d1f22a03cb75c8ed9
120,734
def is_same_class_or_subclass(target, main_class): """ checks if target is the same class or a subclass of main_class :param target: :param main_class: :return: """ return isinstance(target, main_class) or issubclass(target.__class__, main_class)
26c7249422d27ab5cb735e4cacd8f61a0b5203bb
120,736
import math def get_confidence_interval(mean, std_dev, sample_size, ci=1.96): """ Computes the confidence interval assuming a normal distribution of the data :param mean: The sample mean :param std_dev: The standard deviation (ideally population - otherwise sample std.dev) :param sample_size: The number of samples used to estimate mean and std_dev :param ci: The z-* multiplier for the desired confidence interval (e.g. 1.96 = 95% CI) :return: [lower_bound, upper_bound] of the specified confidence interval """ error_margin = ci * (std_dev / math.sqrt(sample_size)) return [mean - error_margin, mean + error_margin]
292740550b3e1c33333daf89a3dc339b4ba6922c
120,737
def account_configs(config): """Return the map from account id to their configuration.""" m = {} for bank, account in config.items(): for name, conf in account.items(): m[conf["account_id"]] = {i: conf[i] for i in conf if i != "account_id"} return m
962fb53bb054df28ab827a4bf8af84ffc4719fe8
120,740
def get_task_error_message(task_result): """ Parse error message from task result. """ try: res = task_result['result'] except Exception: res = task_result for key in ('detail', 'message'): try: return res[key] except Exception: continue return str(res)
e761e98c96446c15d8c2bf465f32c663febcfd8b
120,741
def row_year_is_in_bounds(years_search_string, year_row): """Returns true if the year in the passed row is in the passed year bound.""" list_years_search_string = years_search_string.split("-") year_in_low_to_high_bounds = (list_years_search_string[0] <= year_row and year_row <= list_years_search_string[1]) year_in_high_to_low_bounds = (list_years_search_string[1] <= year_row and year_row <= list_years_search_string[0]) return year_in_low_to_high_bounds or year_in_high_to_low_bounds
25fc026ec679b1a73a89ce02b2a8130547db3901
120,742
def november_days_left(d): """d is an integer representing the date in November for Thanksgiving returns the remaining days in November""" return 30 - d
1e2a6458c2c9a510241afdae3f3aa49bb11bb02c
120,745
def point_touches_rectangle(point, rect): """ Returns True if the point is in the rectangle or touches it's boundary. point_touches_rectangle(point, rect) -> bool Parameters ---------- point : Point or Tuple rect : Rectangle Examples -------- >>> rect = Rectangle(0,0,10,10) >>> a = Point((5,5)) >>> b = Point((10,5)) >>> c = Point((11,11)) >>> point_touches_rectangle(a,rect) 1 >>> point_touches_rectangle(b,rect) 1 >>> point_touches_rectangle(c,rect) 0 """ chflag = 0 if point[0] >= rect.left and point[0] <= rect.right: if point[1] >= rect.lower and point[1] <= rect.upper: chflag = 1 return chflag
aeee8515f041a8e1bd6a45f3da375805495224c3
120,746
import math def as_minutes(s): """ Returns <s> seconds in (hours, minutes, seconds) format. """ h, m = math.floor(s / 3600), math.floor(s / 60) m, s = m - h * 60, s - m * 60 return '%dh %dm %ds' % (h, m, s)
f10daf5c05a63f6e01810346a839522137214571
120,747
import time def trainNetwork(trainer, runs, verbose): """ Trains the network for the given number of runs and returns statistics on the training :param trainer: the neural network trainer to train the network on :param runs: the number of times to train the network :param verbose: boolean value to indicate verbose output :return totalTime: the total amount of time it took to train the network :return averageTimePerEpoch: the amount of time it took on average to train the netwrok once :return trainerErrorValues: the raw error values from the neural network trainer :return epochTimes: list of amount of time it took for each training run """ epochTimes = [] trainerErrorValues = [] globalStart = time.time() for i in range(1,runs): if verbose: print (str((i/(runs*1.0)) *100) + '% complete') localStart = time.time() trainerErrorValues.append(trainer.train()) localEnd = time.time() epochTimes.append(localEnd - localStart) globalEnd = time.time() totalTime = (globalEnd - globalStart) averageTimePerEpoch = sum(epochTimes)/len(epochTimes) return totalTime, averageTimePerEpoch, trainerErrorValues, epochTimes
0ff68f30726030071a2129b7ab5ff47ba76f0800
120,751
def replace(f): """Method decorator to indicate that a method shall replace the method in the full class.""" f.__replace = True return f
dd16efdbc0730a0c745860bc5b4a5bed1b5cd073
120,753
def flatten_documents_to_strings(docs): """Flattens the given documents in nested form to a string representation: [ #docs [ #document1 ['word1','word2','word3'], #sentence1 ['word1','word2','word3'], #sentence2 ], [ #document2 ['word1','word2','word3'], #sentence1 ['word1','word2','word3'], #sentence2 ] ] becomes [ #docs 's1_word1 s1_word2 s1_word3 s2_word1 s2_word2 s2_word3', #document1 's1_word1 s1_word2 s1_word3 s2_word1 s2_word2 s2_word3', #document2 ] """ strdocs = [] for doc in docs: strdoc = [' '.join(sent) for sent in doc] strdoc = ' '.join(strdoc) strdocs.append(strdoc) return strdocs
a58b5b6d8db0e488efa51cda4c1d6bab22fa9a67
120,758
from operator import add def add64(a, b): """ Add a and b, where a,b contains two 32 bit words """ result = [add(a[0],b[0]), add(a[1],b[1])] return result
f774c2a9e04771c5ab8c920c5107a26947c111ba
120,759
def str2tuple(string, sep=","): """ Map "1.0,2,0" => (1.0, 2.0) """ tokens = string.split(sep) # if len(tokens) == 1: # raise ValueError("Get only one token by " + # f"sep={sep}, string={string}") floats = map(float, tokens) return tuple(floats)
7514cc0dcff2c2aa320a96ee16909b16f95ba5cb
120,761
def bboxCenter(obj, radius=False): """Get bounding box center of mesh object Arguments: obj (dagNode): mesh object radius (bool): If True return a list the center + the radius Returns: list of float: the bounding box center in world space >>> center = mnav.bboxCenter(source, radius=False) """ bbx = obj.getBoundingBox(invisible=True, space='world') center = [(bbx[0][x] + bbx[1][x]) / 2.0 for x in range(3)] if radius: r = abs((bbx[0][0] - bbx[1][0]) / 2) return center, r return center
19f90cc6235cf93860ced8101ba4f733d84560f4
120,766
def afm_atoms_creator(in_data: list, custom_atom='Po') -> list: """ Args: in_data (list) - list of rows from POSCAR type file. Add one type of "Fake" atom into the POSCAR structure. This allows it to be treated as an atoms with spin up and down respectively, thus estimate number of positive and negative contributions into the total energy. """ out_data = in_data.copy() out_data[6] = 'direct\n' stech_str = in_data[0].split(' str #:')[0] right_atom_type = f'{custom_atom} ' + ''.join([i for i in stech_str if not i.isnumeric()]) out_data[5] = right_atom_type + '\n' + in_data[5] return out_data
da7bef3a1799b8bbef71ac2750585192b0c1e6f2
120,768
def compute_gradient(y, y_predicted, tx, N=1, regularization=0): """Computes gradient of a linear regression model with squared loss function Parameters ---------- y : np.ndarray Class labels y_predicted : np.ndarray Labels predicted by model tx : np.ndarray Data N : int The size of the dataset regularization : int or np.ndarray Returns ------- np.ndarray Gradient of a mean square loss for linear model """ return -tx.T.dot(y - y_predicted) / N + regularization
6b5b69ca3726f1d05010a0e73e12da9e84e7d50b
120,769
import decimal def decimal_default(obj): """Provide json-encodable conversion for decimal.Decimal type. json encoding fails on decimal.Decimal. This function converts the decimal into a float object which json knows how to encode. """ if isinstance(obj, decimal.Decimal): return float(obj) raise TypeError
13e7f5c7dd12185e2e16a1ee462f35ea0b88c472
120,777
from typing import List def remove_short_sents(sents: List[str], min_num_words: int = 4) -> List[str]: """ Remove sentences that are made of less than a given number of words. Default is 4 """ return [sent for sent in sents if len(sent.split()) >= min_num_words]
5e7699b9c469dca6ee4b7412c3c1ba91ddb9f047
120,779
def _unbound_tae_starter(tae, *args, **kwargs): """ Unbound function to be used by joblibs Parallel, since directly passing the TAE results in pickling-problems. Parameters ---------- tae: ExecuteTARun tae to be used *args, **kwargs: various arguments to the tae Returns ------- tae_results: tuple return from tae.start """ return tae.start(*args, **kwargs)
f8256c0a23f49b6c8e0fadab1af7575c512df37f
120,780
def cmp_schema(schema1, schema2): """ Compare to Schemas for equivalence Args: schema1 (Schema): Schema instance to compare schema2 (Schema): Schema instance to compare against Returns: bool: True if the schema's match else False """ return all([schema1.schema_str == schema2.schema_str, schema1.schema_type == schema2.schema_type])
d1fc6a7d4657a70f91ffffd00d052012c2b08ee3
120,787
def sample_population(trips_df, sample_perc, attributes_df=None, weight_col='freq'): """ Return the trips of a random sample of the travel population. We merge the trips and attribute datasets to enable probability weights based on population demographics. :params DataFrame trips_df: Trips dataset :params DataFrame attributes_df: Population attributes dataset. :params float sample_perc: Sampling percentage :params string weight_col: The field to use for probability weighting :return: Pandas DataFrame, a sampled version of the trips_df dataframe """ if attributes_df is not None: sample_pids = trips_df.groupby('pid')[['freq']].sum().join( attributes_df, how='left' ).sample( frac=sample_perc, weights=weight_col ).index else: sample_pids = trips_df.groupby('pid')[['freq']].sum().sample( frac=sample_perc, weights=weight_col ).index return trips_df[trips_df.pid.isin(sample_pids)]
58ff3afb9276e9c6091564626f6d44d2033bfb90
120,792
from typing import Callable def discrete_binary_search(is_ok: Callable[[int], bool], n: int) -> int: """ Binary search in a function domain Parameters ---------- is_ok : bool Boolean monotone function defined on range(n) n : int length of the search scope Returns ------- i : int first index i such that is_ok(i) or returns n if is_ok is all False :complexity: O(log(n)) """ lo = 0 hi = n while lo < hi: mid = lo + (hi - lo) // 2 if mid >= n or is_ok(mid): hi = mid else: lo = mid + 1 return lo
7bea770dc12b10241bdac74f6423fe7968d154b5
120,799
def get_lomb_frequency_ratio(lomb_model, i): """ Get the ratio of the ith and first frequencies from a fitted Lomb-Scargle model. """ return (lomb_model['freq_fits'][i-1]['freq'] / lomb_model['freq_fits'][0]['freq'])
9ea8027f0a58df5dfcd08ede271d8c6bd4ab8e58
120,801
def map_samples_to_indices(c): """Return a dict mapping samples names (key) to sample indices in the numpy genotype arrays (value). """ c.execute("select sample_id, name from samples") return {row['name']: row['sample_id'] - 1 for row in c}
0d2d22daad51b6dd54a2b12621324e51e1f5d61d
120,808
def interpolate(val, min, max): """ Interpolates values between 0 and 1 to values between the specified min and max. Used primarily map values generated by random.random() to a specified range. """ return val * (max - min) + min
f5cf4be43afc59d4dafdca5b7d53e42eb9525a4e
120,809
import calendar def nearest_dom(year, month, day): """ Return day adjusted as necessary to fit within the days available in year/month. For example: nearest_dom(2017, 2, 30) #=> 28 """ return min(calendar.monthrange(year, month)[1], day)
e918c3cb735be749925f14ab646e762a83c75570
120,811
def get_job_id_from_url(url_path): """Given a smrtlink job url (e.g., https://smrtlink-alpha.nanofluidics.com:8243/sl/#/analysis/job/13695), return job id 13695""" return url_path.split('/')[-1]
87cdc7b8487856ec7e0dfcc1e6962b1d218d6bde
120,813
import re def strip_html_tags(text): """ Strip HTML tags from text generated from Markdown script :param text: text in HTML :return: text without HTML Tags """ htmlPattern = re.compile(r'<\w+>|</\w+>') return htmlPattern.sub('', text)
a93fdaefa4d20dfcc647f5a3cd8fa801709668a4
120,821
def split_semicolon_filter(s): """ Filter to take semicolon-delimited text and convert it to a list """ if s is not None: return s.strip().split(';') return None
4805f7519daed31fd82fcfc78b6ccdbc0fc132fe
120,824
def normalize_vector(vector: float) -> float: """ Normalize the vector angle from the limited trigonometry that the robot can use. :param vector: vector value between 0.25 and 0.75 :return: normalized vector value between 0.0 and 1.0 """ return (vector - 0.25) / 0.5
cdc19db9f64f8585edd053f0dee69b9ef6fb26dd
120,832
def count_stair_ways(n): """Count the numbers of ways to walk up a flight of stairs with 'n' steps while taking a maximum of 2 steps at a time >>> count_stair_ways(2) 2 >>> count_stair_ways(3) 3 >>> count_stair_ways(4) 5 >>> count_stair_ways(5) 8 """ """BEGIN PROBLEM 3.1""" # Hint: This is actually a slight modification of the recursive fibonacci # function, in that the input to output mapping is off by 1 spot # i.e. instead of: (2, 3, 4, 5) => (1, 2, 3, 5) # we get this: (2, 3, 4, 5) => (2, 3, 5, 8) if n < 0: return 0 elif n == 0: return 1 else: return count_stair_ways(n - 1) + count_stair_ways(n - 2) """END PROBLEM 3.1"""
de6a72eb722df431b77f95dffb7f48c3cefc5a18
120,839
def make_oracle_from_labels(class_labels): """Given a list of class labels, generates an oracle that will answer posed queries queries based on the labels. Parameters ---------- class_labels : array_like A list of class labels, which the oracle will use to answer queries. Returns ------- function An oracle function. """ return lambda a, b: class_labels[a] == class_labels[b]
ed727a352321aeb2c39ed4585b7ecfb94c8327fb
120,840
def strftime_all_years(dt, fmt): """Exactly like datetime.strftime but supports year<1900.""" assert '%%Y' not in fmt # unlikely but just in case if dt.year >= 1900: return dt.strftime(fmt) else: return dt.replace(year=1900).strftime(fmt.replace('%Y', '%%Y')).replace('%Y', str(dt.year))
a41843508efba68c72a90759cc3033ffe7dc2fd1
120,844
def _invert_footprint(footprint): """Change the order of the values in `footprint`. This is a patch for the *weird* footprint inversion in `ndi.grey_morphology` [1]_. Parameters ---------- footprint : array The input footprint. Returns ------- inverted : array, same shape and type as `footprint` The footprint, in opposite order. Examples -------- >>> footprint = cp.asarray([[0, 0, 0], [0, 1, 1], [0, 1, 1]], cp.uint8) >>> _invert_footprint(footprint) array([[1, 1, 0], [1, 1, 0], [0, 0, 0]], dtype=uint8) References ---------- .. [1] https://github.com/scipy/scipy/blob/ec20ababa400e39ac3ffc9148c01ef86d5349332/scipy/ndimage/morphology.py#L1285 # noqa """ inverted = footprint[(slice(None, None, -1),) * footprint.ndim] return inverted
d23f80886a03d5fa723559c234a933766ec05612
120,848
def install_conda_target(conda_target, conda_context, skip_environment=False): """ Install specified target into a its own environment. Return the process exit code (i.e. 0 in case of success). """ if not skip_environment: create_args = [ "--name", conda_target.install_environment, # environment for package conda_target.package_specifier, ] return conda_context.exec_create(create_args) else: return conda_context.exec_install([conda_target.package_specifier])
3daf3dc55fab83c4da9b749bd2dcce537b091b12
120,854
import time def sleep_secs(max_sleep, end_time=999999999999999.9): """ Calculate time left to sleep in seconds. The returned value will be >=0, <=max_sleep, and will never exceed the number of seconds between the current time and the specified end_time. :param max_sleep: max sleep time. Useful for enforcing that a process checks in periodically to see if it needs to wake up. units: seconds :type max_sleep: float :param end_time: the return value of this function will never exceed the difference between the current time and this time. Useful for setting a hard limit on end time. Expressed in seconds since epoch, like the return value from time.time(). Defaults to a number large enough to be many years in the future regardless of operating system :type end_time: float :return: sleep time, in seconds, such that 0<=ret<=max_sleep and ret<=(end_time=time.time()) :rtype: float """ return max(0.0, min(end_time - time.time(), max_sleep))
68a6a9b56b05315af29dcf04c6e37ca97b22962c
120,857
def time_print(tdiff): """Print time Helper function to print time remaining in sensible units. Args: tdiff (float): time in seconds Returns: tuple: (float time, string units) """ units = 'seconds' if tdiff > 60: tdiff /= 60 units = 'minutes' if tdiff > 60: tdiff /= 60 units = 'hours' if tdiff > 24: tdiff /= 24 units = 'days' return tdiff, units
68506bc98c7de222a0d6e0b56207f75d537a162d
120,858
def to_rgba(x): """Return the four first channels (RGBA) of an image.""" return x[..., :4]
581ae79260a47a6cab75d9a6a9403a38ed84b11b
120,861
def fix_udp_checksum_fast(packet: bytes) -> bytes: """ Overwrite checksum with b"\x00\x00" as UDP checksum to an EthernetII frame :param packet: Raw bytes of EthernetII frame :return: Raw bytes of packet with b"\x00\x00" UDP checksum added """ if len(packet) < 42 or packet[23] != 0x11: # invalid UDP packet return packet return packet[:40] + b"\x00\x00" + packet[42:]
49a364ed8332d384469301f8762feb94b7287eac
120,867
import string import random def random_string(length=-1, charset=string.ascii_letters): """ Returns a random string of "length" characters. If no length is specified, resulting string is in between 6 and 15 characters. A character set can be specified, defaulting to just alpha letters. """ if length == -1: length = random.randrange(6, 16) random_string = ''.join(random.choice(charset) for x in range(length)) return random_string
1ff5eb4ffa449114479cf8c2101325b96a2700f5
120,872
def mesos_masters_quorum_size(mesos_masters): """Calculate the required quorum size from a list of mesos masters""" return((len(mesos_masters) / 2) +1)
00243edce9bcda076fff37e9a9daac0e4ae66b6a
120,875
def keep_most(terms_d, terms_amout): """ keep_most(): is getting a SHORTED dictionary of Terms-Frequencies and the amount of Terms to return as arguments. It is returning the number of Terms equal to the argument 'terms_amount' with the Highest Frequency. """ terms_l = [(v, k) for (k, v) in terms_d.iteritems()] terms_l.sort() terms_l.reverse() most_terms_l = terms_l[0: terms_amout] terms_d = dict([(k, v) for (v, k) in most_terms_l]) return terms_d
4178270b62b0f44808acf3fc930365e75d43a0e6
120,878
from pathlib import Path def get_setting_paths() -> list: """ Returns a list of paths to the server setting and plugin settings. """ settings_paths = [] server_dir = Path(__file__).resolve().parents[1] for p in server_dir.iterdir(): if p.is_file() and p.suffix != ".jar": settings_paths.append(p.absolute()) plugin_dir = server_dir / "plugins" if plugin_dir.is_dir(): for p in plugin_dir.iterdir(): if p.is_dir and (p / "config.yml").is_file(): settings_paths.append(p.absolute()) return settings_paths
83c54c59554dff6d9498036a96e52750b4810a53
120,884
def str_lower_first(value): """ Convert the first alphabit to lowcase and keep the others unchange. """ return value[0:1].lower() + value[1:]
baf48ad92bba12bed15a618d4305538a4bfb61aa
120,888
def preprocess_arguments(argsets, converters): """convert and collect arguments in order of priority Parameters ---------- argsets : [{argname: argval}] a list of argument sets, each with lower levels of priority converters : {argname: function} conversion functions for each argument Returns ------- result : {argname: argval} processed arguments """ result = {} for argset in argsets: for (argname, argval) in argset.items(): # check that this argument is necessary if not argname in converters: raise ValueError("Unrecognized argument: {0}".format(argname)) # potentially use this argument if argname not in result and argval is not None: # convert to right type argval = converters[argname](argval) # save result[argname] = argval # check that all arguments are covered if not len(converters.keys()) == len(result.keys()): missing = set(converters.keys()) - set(result.keys()) s = "The following arguments are missing: {0}".format(list(missing)) raise ValueError(s) return result
1894571a227bc7cd95020e8828b67717b7ca4d4d
120,894