content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import re def _check_token(token, names): """ Checks if a token exists in names list. As some versions of regex module only support groups of 100, this method splits the names in sub-groups in order to check the token. Best case scenario, the token will be found in the first 100 elements. In the worst case, it will go through all the list as usual. """ max_regex = 99 grouped_names = [names[i: i + max_regex] for i in range(0, len(names), max_regex)] for sub_names in grouped_names: regexes = "(" + ")|(".join(sub_names) + ")" if re.match(regexes, token) is not None: return True return False
af6e8d5a75584610034c1f77fbd2c1db4cf3ad43
108,155
def eval_poly(coeff,x): """ Evaluate polynomial represented as integer coefficients at x in linear time. Test f(x)=4x^3+3x^2+2x+1, f(1)=10 >>> abs(eval_poly([1,2,3,4],1.0)-10) < 1e-7 True Test f(x)=4x^3+2x, f(2)=36 >>> abs(eval_poly([0,2,0,4],2.0)-36) < 1e-7 True """ sum = coeff[-1] for a in reversed(coeff[:-1]): sum = sum*x+a return sum
7a91be18485bea4668ad17c536e0aa572cd144e6
108,167
def calc_mod(scores, ability, prof_mod): """ Calculate total modifider. If modifier is positive, add a + before it. scores: dictionary of ability scores. ability: ability used for roll. prof_mod: amount to be added based on proficiency. Always 0 or more. mod: total modifier to be added to base d20 roll. Can be positive or negative. pos_neg: "+" if modifier is 0 or greater, empty string if less than 0, since a "-" will automatically be used. Typical 5e notation. """ # modifier equation mod = int((scores[ability] - 10) / 2 + prof_mod) # if modifier is 0 or greater, add a + sign in front. if mod >= 0: pos_neg = "+" else: pos_neg = "" return pos_neg, mod
b656cc2115d69e7d16f1d602fbc2a2cbf17b3e00
108,179
def filter_point(point: int) -> int: """Filter a point. If point is below threshold, divide it by divisor. If above, multiple it by multiplier. This is a crude but effective way of skewing an image to black-and-white without actually thresholding it. """ if point < 160: return round(point / 1.2) else: return round(point * 2)
e9f78b40220515488e6a9b2115b5773a06b9bfd1
108,181
def sum(self, key_selector=None): """Computes the sum of a sequence of values that are obtained by invoking an optional transform function on each element of the input sequence, else if not specified computes the sum on each item in the sequence. Example res = source.sum() res = source.sum(lambda x: x.value) key_selector -- {Function} [Optional] A transform function to apply to each element. Returns an observable {Observable} sequence containing a single element with the sum of the values in the source sequence. """ if key_selector: return self.map(key_selector).sum() else: return self.reduce(seed=0, accumulator=lambda prev, curr: prev + curr)
6ef239cfef93099e3f1efbec5af7db372930fa75
108,189
def coerce(string): """When appropriate, coerce `string` into some type. Supports floats, ints, booleans and strings.""" if string == "True": return True if string == "False": return False try: return int(string) except Exception: try: return float(string) except Exception: return string
bff5179d2fdb3a01d19aebd85f10d44284c7b313
108,190
def bordaScores(instance): """ Computes the total Borda scores of all the alternatives of the instance. Within an indifference class, all alternatives are assigned the smallest score one alternative from the class would have gotten, had the order been strict. For instance, for the order ({a1}, {a2, a3}, {a4}), a1 gets score 3, a2 and a3 score 1 and a4 score 0. :param instance: The instance. :type instance: preflibtools.instance.preflibinstance.PreflibInstance :return: A dictionary mapping every instance to their Borda score. :rtype: dict """ if instance.dataType not in ("toc, soc"): raise TypeError("You are trying to compute the Borda scores of an instance of type " + str(instance.dataType) + ", this is not possible.") res = dict([]) for order in instance.orders: i = instance.nbAlternatives for indifClass in order: i -= len(indifClass) for alt in indifClass: if alt not in res: res[alt] = i else: res[alt] += i return res
43a8d3ca0ca10615e894979b263621d32934e17b
108,191
import random import string def generate_random_string() -> str: """Generates a random string.""" return "".join( random.choice(string.ascii_letters) for _ in range(random.randint(3, 10)) )
4b3fe738e3e9a52bef75babc8a963feb1b90fb74
108,192
def args_to_dict(args): """Converts request arguments to a simple dictionary. If an argument appears multiple times, the resulting dictionary will contain list of all of its values. :param args: flask's request args object :type args: request.args :return: a simple dict from args :rtype: dict """ args_dict = {} for param_name, param_value in args.items(multi=True): if param_name not in args_dict: args_dict[param_name] = param_value elif not isinstance(args_dict[param_name], list): current_value = args_dict[param_name] args_dict[param_name] = [current_value, param_value] else: args_dict[param_name].append(param_value) return args_dict
94b81191cfde5fba11e9297254417fa0e5b6be12
108,198
def somethingMatch(regexs, string): """ Returns True if at least one of regular expresions from specified list matches string. :param regexs: list of regular expresions :param filename: string to match :returns: True if at least one of the regular expresions matched the string. """ return any(regex.match(string) for regex in regexs)
d344c3191eedee55dc4972aee4f44338c51fbc8b
108,199
def identity(steps, context): """ The identity fuzzer, which returns the provided steps. """ return steps
0b4ac8e1189a7c84da0331d84d2f1d3b18f34b7e
108,205
def actions(self): """ Returns all the action objects whether enabled or disabled. """ return self.__actions
9eec5e6df3788259230cceb63f9baa0faa868cc8
108,208
def unpack_trace(data,tr =False): """ This function unpacks an (N,M) array into wl,t and the intensity trace Parameters: data: (N,M) 2D array in .txt format tr: Boolean, if True return only trace, default is False Returns: t: 1D array of length M giving time values wl: 1D array of length N giving wavelength values trace: (N,M) 2D array of intensity data """ t = data[0,1:] wl = data[1:,0] trace = data[1:,1:] if tr == True: return trace else: return t,wl,trace
ac354e6e2e2d47c4e3f9af1fccf990c9742bfa9f
108,210
def _find_largest_common_prefix(values): """ Searches through a list of values to find the longest possible common prefix amongst them. Useful for optimizing more costly searches. Supports lists of strings or tuples. If tuples are used, the first value is assumed to be the value to search on. :param values: List of values (strings or tuples containing a string in the first position) :return: String prefix common to all values """ if isinstance(values[0], tuple): prefix, *_ = values[0] else: prefix = values[0] for value in values: key = value[0] if isinstance(value, tuple) else value while key[:len(prefix)] != prefix and len(prefix) > 0: prefix = prefix[:-1] return prefix
c0ece16d9aa7ad6c424170526f71f0536e4eb158
108,212
def prop_has_many_entries(prop_ent): """ Check if a Wikidata entry has multiple values for a given property """ try: prop_ent[1] return True except IndexError: return False
0e701d14e3f7732ed303b22cd052f3b789f5a6d7
108,219
def sort_by_hierarchy(tids, taxdump): """Sort a sequence of taxIds by hierarchy from low to high. Parameters ---------- tids : list of str taxIds to sort taxdump : dict taxonomy database Returns ------- list of str sorted taxIds """ # start with any taxId from pool seq = [tids[0]] pool = [x for x in tids[1:]] # loop until pool is drained while pool: found = False for i, tid in enumerate(pool): # add to end of sequence if taxdump[seq[-1]]['parent'] == tid: seq.append(tid) found = True # add to beginning of sequence elif taxdump[tid]['parent'] == seq[0]: seq.insert(0, tid) found = True # remove from pool if found: pool.pop(i) break # if none can be added, then they are not sortable (i.e., not a # sequence in the taxonomic hierarchy) if not found: raise ValueError('Cannot sort taxIds by hierarchy.') return seq
6e3a1ed782891f8e126e04dab7de7f4c33a247c4
108,220
def flush(hand): """ return True if all the cards have the same suit """ suits = [s for r, s in hand] return len(set(suits)) == 1
493048bdf79ecd0aae2a6d49fe0309321d1d2c1a
108,226
def discover_article(word): """Discovers if the word is proceeded by an 'a' or an 'an' """ return 'an' if word[0].lower() in 'aeiou' else 'a' # My first attempt # vowels = ['a', 'e', 'i', 'o', 'u'] # first_letter = word[0].lower() # article = 'a' # # for vowel in vowels: # if first_letter == vowel: # article = 'an' # # return article
1b1bd2bfe58c2c06db63b87e990110f759f5d884
108,227
def get_overlaps(first_intervals, second_intervals): """ >>> get_overlaps([(1, 2), (3, 4), (8, 9)], [(1, 4), (7, 8.5)]) [(1, 2), (3, 4), (8, 8.5)] >>> get_overlaps([(1, 4), (7, 8.5)], [(1, 2), (3, 4), (8, 9)]) [(1, 2), (3, 4), (8, 8.5)] >>> get_overlaps([(1, 8), (9, 10)], [(2, 3), (5, 6), (7, 9.5)]) [(2, 3), (5, 6), (7, 8), (9, 9.5)] >>> get_overlaps([(2, 3), (5, 6), (7, 9.5)], [(1, 8), (9, 10)]) [(2, 3), (5, 6), (7, 8), (9, 9.5)] >>> get_overlaps([(1, 10)], [(0, 5)]) [(1, 5)] >>> get_overlaps([(0, 5)], [(1, 10)]) [(1, 5)] >>> get_overlaps([(1, 6), (7, 9)], [(5.5, 7.5)]) [(5.5, 6), (7, 7.5)] >>> get_overlaps([(5.5, 7.5)], [(1, 6), (7, 9)]) [(5.5, 6), (7, 7.5)] """ overlaps = [] for first_interval in first_intervals: # Find the index of the first interval in the second list starting after this interval ends. # We do not need to search beyond this interval. end_index = None for index, second_interval in enumerate(second_intervals): if second_interval[0] >= first_interval[1]: end_index = index break # Go through all of these intervals and compute the overlaps. for second_interval in second_intervals[:end_index]: if second_interval[1] > first_interval[0]: uncovered_region = (max(first_interval[0], second_interval[0]), min(first_interval[1], second_interval[1])) overlaps.append(uncovered_region) return overlaps
72b7310c30b77bf9465b3e7ef682f81aba0a28ff
108,232
def request_playlist_videos(youtube, playlist_id, num_results, page_token=None): """Request all the videos from a playlist_id using YouTube's api.""" if page_token: request = youtube.playlistItems().list( part="snippet", maxResults=num_results, playlistId=playlist_id, pageToken=page_token ) else: request = youtube.playlistItems().list( part="snippet", maxResults=num_results, playlistId=playlist_id, ) response = request.execute() return response
55792e921ef6427ea2ba973fd22869a97fe25e60
108,233
def get_schema_obj(obj_id, schema): """Search the schema graph for an object with the given ID.""" matches = [obj for obj in schema['@graph'] if obj['@id'] == obj_id] return matches[0] if len(matches) == 1 else None
ae2f48109a2575139b44017ba843586e30f2120d
108,236
import operator def len_or_approx(tree): """ Prefer approximate length if implemented. (It's cheaper.) """ try: return operator.length_hint(tree) except TypeError: return len(tree)
e03af353a39a98357baddc43abcfd32186f51314
108,242
def getListTasksRedirect(entity, params): """Returns the redirect for the List of tasks page for the given Org entity and Org View params. """ result = '/%s/list_org_tasks/%s' % ( params['url_name'], entity.key().id_or_name()) return result
ee8204b475b08631867fbef1c09a88b34d53b6f1
108,244
def get_peer_and_channel(peers, scid): """Look for the channel identified by {scid} in our list of {peers}""" for peer in peers: for channel in peer["channels"]: if channel.get("short_channel_id") == scid: return (peer, channel) return (None, None)
77d40a8fe89817d6a8bcfee242f12a7aaeff1bba
108,246
def same_species_lists(list1, list2, check_identical=False, only_check_label=False, generate_initial_map=False, strict=True): """ This method compares whether two lists of species or molecules are the same, given the comparison options provided. It is used for the `is_same` method of :class:`Reaction`, but may also be useful in other situations. Args: list1 (list): list of :class:`Species` or :class:`Molecule` objects list2 (list): list of :class:`Species` or :class:`Molecule` objects check_identical (bool, optional): if ``True``, use is_identical comparison and compare atom IDs only_check_label (bool, optional): if ``True``, only compare the label attribute of each species generate_initial_map (bool, optional): if ``True``, initialize map by pairing atoms with same labels strict (bool, optional): if ``False``, perform isomorphism ignoring electrons Returns: ``True`` if the lists are the same and ``False`` otherwise """ def same(object1, object2, _check_identical=check_identical, _only_check_label=only_check_label, _generate_initial_map=generate_initial_map, _strict=strict): if _only_check_label: return str(object1) == str(object2) elif _check_identical: return object1.is_identical(object2, strict=_strict) else: return object1.is_isomorphic(object2, generate_initial_map=_generate_initial_map, strict=_strict) if len(list1) == len(list2) == 1: if same(list1[0], list2[0]): return True elif len(list1) == len(list2) == 2: if same(list1[0], list2[0]) and same(list1[1], list2[1]): return True elif same(list1[0], list2[1]) and same(list1[1], list2[0]): return True elif len(list1) == len(list2) == 3: if same(list1[0], list2[0]): if same(list1[1], list2[1]): if same(list1[2], list2[2]): return True elif same(list1[1], list2[2]): if same(list1[2], list2[1]): return True elif same(list1[0], list2[1]): if same(list1[1], list2[0]): if same(list1[2], list2[2]): return True elif same(list1[1], list2[2]): if same(list1[2], list2[0]): return True elif same(list1[0], list2[2]): if same(list1[1], list2[0]): if same(list1[2], list2[1]): return True elif same(list1[1], list2[1]): if same(list1[2], list2[0]): return True elif len(list1) == len(list2): raise NotImplementedError("Can't check isomorphism of lists with {0} species/molecules".format(len(list1))) # nothing found return False
e3ac3db8a917a402269a5c56bf52a59ec900c279
108,256
def sanitize_empty(value): """ Built-in sanitizer which replaces the original value with empty string. """ return None if value is None else ""
da32a119959434d2b134a47e4f363ea3d6c25ff2
108,259
def __isLeft(p0,p1,p2): """Returns a float indicating if p2 is left or right of line from p0 and p1 p0,p1,p2: tuples representing point coordinates return: float > 0 if p2 is left < 0 if p2 is right = 0 if p2 is on line""" p0x = p0[0] p0y = p0[1] p1x = p1[0] p1y = p1[1] p2x = p2[0] p2y = p2[1] return ((p1x-p0x)*(p2y-p0y)-(p2x-p0x)*(p1y-p0y))
83720b18d97c42afd2d1e410b8a1970eb39291bc
108,264
def contiguous_chunks(intlist): """Calculate a list of contiguous areas in a possibly unsorted list. >>> contiguous_chunks([2, 9, 3, 5, 8, 1]) [(1, 3), (5, 1), (8, 2)] """ if len(intlist) == 0: return [] mylist = sorted(intlist) result = [[mylist[0], 1]] for x in mylist[1:]: if x == sum(result[-1]): result[-1][1] += 1 else: result.append([x, 1]) return list(map(tuple, result))
e39879ba41f1a6c84163ce867bf20622c9a6a2ec
108,265
def ctext(text, colour="green"): """Colour some terminal output""" # colours c = { "off": "\033[0m", # High Intensity "black": "\033[0;90m", "bl": "\033[0;90m", "red": "\033[0;91m", "r": "\033[0;91m", "green": "\033[0;92m", "g": "\033[0;92m", "yellow": "\033[0;93m", "y": "\033[0;93m", "blue": "\033[0;94m", "b": "\033[0;94m", "purple": "\033[0;95m", "p": "\033[0;95m", "cyan": "\033[0;96m", "c": "\033[0;96m", "white": "\033[0;97m", "w": "\033[0;97m", } return f"{c[colour]}{text}{c['off']}"
c472022647b5097039a7a2b7e6db2257f7ba63e0
108,272
import re def check_numeric(token): """ Replace numeric parts in a token using character level check and merge :param token: token to be checked for numerical :return: input token after replacing numeric parts with a wildcard """ return_token = "" for i in range(0, len(token)): if not token[i].isnumeric(): return_token += token[i] else: return_token += '<*>' wildcard_check = re.compile('(?:\<\*\>)+') return re.sub(wildcard_check, '<*>', return_token)
6d05aa76a90505ecc979eecdd84daab7b3ca3be5
108,279
def get_no_of_projects_each_year(org_name, DATA): """ Returns a dictionary of the year and number of projects of a organisation and also returns the total number of projects """ context = {} total_projects = 0 for year in DATA.keys(): if DATA[year].get(org_name): context[year] = len(DATA[year][org_name]) total_projects += len(DATA[year][org_name]) return context, total_projects
6cc10db63e89a128b071f9545e0823c408c8b141
108,280
import math def herdan(n_terms, n_words): """ Computed as log(t)/log(w), where t is the number of unique terms/vocab, and w is the total number of words. Also known as Herdan's C. (Herdan 1960, 1964) """ if n_words <= 1: return 0 return math.log(n_terms) / math.log(n_words)
3da9812f5612289f02cd1ef944de001fa188218e
108,281
def member_to_index(m_name, members): """ Given a member name, return the index in the members dict @param m_name The name of the data member to search for @param members The dict of members @return Index if found, -1 not found Note we could generate an index when processing the original input """ count = 0 for d in members: if d["name"] == m_name: return count count += 1 return -1
44c4608588aa12d26262c6767b54dd5756d22c81
108,283
def pkcs5_unpad(data): """Removes PKCS#5 padding from data. """ count = ord(data[-1]) assert data[-count:] == data[-1]*count return data[:-count]
b500fdf03230f9af1a28b4af492d2a58caa83bcc
108,286
from typing import List def _combine_int(digits: List[int], base:int) -> int: """Combine digits into big integer""" acc = 0 for d in digits: acc = acc * base + d return acc
f21e3013b944d7e87c892f99a82255d443a09870
108,287
def check_symlink_capability(path, target): """helper similar to datalad.tests.utils.has_symlink_capability However, for use in a datalad command context, we shouldn't assume to be able to write to tmpfile and also not import a whole lot from datalad's test machinery. Finally, we want to know, whether we can create a symlink at a specific location, not just somewhere. Therefore use arbitrary path to test-build a symlink and delete afterwards. Suiteable location can therefore be determined by high lever code. Parameters ---------- path: Path target: Path Returns ------- bool """ try: target.touch() path.symlink_to(target) return True except Exception: return False finally: if path.exists(): path.unlink() if target.exists(): target.unlink()
e61a036bb95602e503357b6d8e7d85048eb684ec
108,288
import functools def with_window(f): """ Decorator that runs function only if buffer is in selected window. This decorator expects the first parameter of the wrapped function to be a buffer. Note that this modifies the argument list of f, inserting window as second positional argument. """ @functools.wraps(f) def _with_window(*args, **kwargs): self = args[0] win = self.window() if win: f(args[0], win, *args[1:], **kwargs) return _with_window
94cffb664343db12e6cce5c621fd213635b094ab
108,290
def by_value(dc: dict, val) -> list: """ Return key from ``dc`` if its value is equal to ``val``. """ return list(dc.keys())[list(dc.values()).index(val)]
acab6b9ceef7ab4ed5fcc3975d68fe941a56f932
108,293
def _memoize_cache_key(args, kwargs): """Turn args tuple and kwargs dictionary into a hashable key. Expects that all arguments to a memoized function are either hashable or can be uniquely identified from type(arg) and repr(arg). """ cache_key_list = [] # hack to get around the unhashability of lists, # add a special case to convert them to tuples for arg in args: if type(arg) is list: cache_key_list.append(tuple(arg)) else: cache_key_list.append(arg) for (k, v) in sorted(kwargs.items()): if type(v) is list: cache_key_list.append((k, tuple(v))) else: cache_key_list.append((k, v)) return tuple(cache_key_list)
8f6df038e7ef60338ce287fb72a8f7acb56a4d64
108,296
import re def extract_request(message, slack_id): """Extract request from the message. Keywoard arguments: message -- user's message slack_id -- bot slack id """ request = message.replace("{} ".format(slack_id), "").strip() request = re.sub(" +", " ", request) return request
e7f0b22dd45f32d33094f80b6e47a6b1ce60ab65
108,300
def getPhenotype(chromosome, items): """ Given a chromosome, returns a list of items in the bag :param chromosome: :param items: :return: list """ return [v for i, v in enumerate(items) if chromosome[i] == 1]
19b7bc47cba3fdf652dd84d4c5c1932afde6cbde
108,302
def op_at_code_loc(code, loc, opc): """Return the instruction name at code[loc] using opc to look up instruction names. Returns 'got IndexError' if code[loc] is invalid. `code` is instruction bytecode, `loc` is an offset (integer) and `opc` is an opcode module from `xdis`. """ try: op = code[loc] except IndexError: return "got IndexError" return opc.opname[op]
3621eee7d46b37aaa84c140cfb62760334bdb7b3
108,303
def is_ugly(num, factors=(2, 3, 5)): """ Check whether a given number is an ugly number :param num: given number :type num: int :param factors: prime factors for ugly number :type factors: list[int] or tuple[int] :return: whether a given number is an ugly number :rtype: bool """ if num == 1: return True elif num <= 0: return False for factor in factors: while num % factor == 0: num //= factor return num == 1
8f343ef6b9d382cbea778acc7d55c95ffcbdd4c1
108,304
def cmp_iso(a,b): """ Compares two iso8601 format dates. returns [0 : a = b], [-1 : a < b], [1 : a > b] """ y0,y1 = int(a[:4]),int(b[:4]) if y0 != y1: return -1 if y0 < y1 else 1 m0,m1 = int(a[5:7]),int(b[5:7]) if m0 != m1: return -1 if m0 < m1 else 1 d0,d1 = int(a[8:10]),int(b[8:10]) if d0 != d1: return -1 if d0 < d1 else 1 H0,H1 = int(a[11:13]),int(b[11:13]) if H0 != H1: return -1 if H0 < H1 else 1 M0,M1 = int(a[14:16]),int(b[14:16]) if M0 != M1: return -1 if M0 < M1 else 1 S0,S1 = int(a[17:19]),int(b[17:19]) return 1 if S0 > S1 else -1 if S0 < S1 else 0
d95b335f61917f989f6d65af8222b220a88e34b0
108,317
def check_coord_type(cube, coord): """ Function to test whether coord is classified as scalar or auxiliary Args: cube (iris.cube.Cube): Iris cube containing coordinates to be checked coord (iris.coords.Coord): Coordinate to check """ coord_scalar = True coord_aux = False cube_summary = cube.summary() aux_ind = cube_summary.find("Auxiliary") if coord in cube_summary[aux_ind:]: coord_scalar = False coord_aux = True return coord_scalar, coord_aux
d7e672b5826277e097b76272cac0f995dbd0d544
108,322
def response(status, **kwargs): """ Generic response format :param status: status content :param kwargs: other content :return: dictionary with status and other """ reply = {'status': status, } for k, v in kwargs.items(): reply[k] = v return reply
c6c92692db435f7d55a1559e54a460e8c18338a3
108,333
from typing import Callable from typing import List def ListURLParameter(delim: str) -> Callable[[str], List[str]]: # pylint: disable=C0103 """ Arg: delim => delimiter Description: Create anonymous function to parse `delim`-separated list of items. Preconditions: N/A Raises: N/A """ def f(arg: str) -> List[str]: # pylint: disable=C0103 return [alias.strip() for alias in arg.strip().split(delim) if alias] return f
596a44ff81692e135c6e36540f709bf316849922
108,334
def binary_string_to_value(binary_string): """ Convert a binary string representing an unsigned int to a value Args: binary_string (str): The string to convert. e.g. "001010010" Returns: (int): the value of the binary string """ return int(binary_string, 2)
075a4d1958ef9a1c409cfda7a7e71d533434f42e
108,335
import random def random_range(value, variance=0.0): """Return a random value within range with variance """ value_range = [value * (1.0 - variance), value] return random.uniform(*value_range)
36cd95b83e09ce2a2b0767b3f0348ac8d8724a07
108,338
def get_total_seconds(td): """Returns total seconds represented by a datetime.timedelta object. """ return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
fc8c024b4d866c77650027ca2ac4b6e69666143a
108,342
def convert_str_to_key_value(string, separators=(':', '=')): """ :param string: in 'foo:bar' or 'foo=bar' format :param separators: :return: (key, value)|(None, None) """ sep = '' for s in separators: if s in string: sep = s if sep != '': array = [a.strip(' ') for a in string.split(sep)] return array[0], array[1] return None, None
dd589df7b085cc566850ee297f2996f0abc4fb05
108,343
def circuit_hamiltonien(chemin): """ Extrait un circuit hamiltonien depuis un circuit eulérien, passe par tous les sommets une et une seule fois. """ nb = max(chemin) + 1 res = [] coche = [False for i in range(0, nb)] for c in chemin: if coche[c]: continue res.append(c) coche[c] = True return res
9b930678ae90fb1be67d38a3b73237c9d28b3818
108,344
def create_line_item_config(name, order_id, placement_ids, ad_unit_ids, cpm_micro_amount, sizes, hb_bidder_key_id, hb_pb_key_id, hb_bidder_value_id, hb_pb_value_id, currency_code='USD'): """ Creates a line item config object. Args: name (str): the name of the line item order_id (int): the ID of the order in DFP placement_ids (arr): an array of DFP placement IDs to target ad_unit_ids (arr): an array of DFP ad unit IDs to target cpm_micro_amount (int): the currency value (in micro amounts) of the line item sizes (arr): an array of objects, each containing 'width' and 'height' keys, to set the creative sizes this line item will serve hb_bidder_key_id (int): the DFP ID of the `hb_bidder` targeting key hb_pb_key_id (int): the DFP ID of the `hb_pb` targeting key currency_code (str): the currency code (e.g. 'USD' or 'EUR') Returns: an object: the line item config """ # Set up sizes. creative_placeholders = [] for size in sizes: creative_placeholders.append({ 'size': size }) # Create key/value targeting for Prebid. # https://github.com/googleads/googleads-python-lib/blob/master/examples/dfp/v201802/line_item_service/target_custom_criteria.py # create custom criterias hb_bidder_criteria = { 'xsi_type': 'CustomCriteria', 'keyId': hb_bidder_key_id, 'valueIds': [hb_bidder_value_id], 'operator': 'IS' } hb_pb_criteria = { 'xsi_type': 'CustomCriteria', 'keyId': hb_pb_key_id, 'valueIds': [hb_pb_value_id], 'operator': 'IS' } # The custom criteria will resemble: # (hb_bidder_criteria.key == hb_bidder_criteria.value AND # hb_pb_criteria.key == hb_pb_criteria.value) top_set = { 'xsi_type': 'CustomCriteriaSet', 'logicalOperator': 'AND', 'children': [hb_bidder_criteria, hb_pb_criteria] } # https://developers.google.com/doubleclick-publishers/docs/reference/v201802/LineItemService.LineItem line_item_config = { 'name': name, 'orderId': order_id, # https://developers.google.com/doubleclick-publishers/docs/reference/v201802/LineItemService.Targeting 'targeting': { 'inventoryTargeting': {}, 'customTargeting': top_set, }, 'startDateTimeType': 'IMMEDIATELY', 'unlimitedEndDateTime': True, 'lineItemType': 'PRICE_PRIORITY', 'costType': 'CPM', 'costPerUnit': { 'currencyCode': currency_code, 'microAmount': cpm_micro_amount }, 'creativeRotationType': 'EVEN', 'primaryGoal': { 'goalType': 'NONE' }, 'creativePlaceholders': creative_placeholders, } if placement_ids is not None: line_item_config['targeting']['inventoryTargeting']['targetedPlacementIds'] = placement_ids if ad_unit_ids is not None: line_item_config['targeting']['inventoryTargeting']['targetedAdUnits'] = [{'adUnitId': id} for id in ad_unit_ids] return line_item_config
ab26c245fa513bbb7d3db18b3042fbfd07682e76
108,345
import re def add_white_spaces(expression): """ Adds white spaces in between <,>,=,<=, and >= so it can be easily parsed Example: "0.2>=p*q >=0.1" ---> "0.2 >= p*q >= 0.1" """ just_equal = r"[^\s<>]=[^<>]|[^<>]=[^\s<>]" match = re.findall(just_equal, expression) # print(match) if match: expression.replace("=", " = ") with_equal = r"[^\s]>=|[^\s]<=|[^\s]=>|[^\s]=<|>=[^\s]|<=[^\s]|=>[^\s]|=<[^\s]" match = re.findall(with_equal, expression) # print(match) if match: greater_eq_check = True smaller_eq_check = True eq_greater_check = True eq_smaller_check = True for item in match: if ">=" in item and greater_eq_check: expression = expression.replace(">=", " >= ") greater_eq_check = False if "<=" in item and smaller_eq_check: expression = expression.replace("<=", " <= ") smaller_eq_check = False if "=>" in item and eq_greater_check: expression = expression.replace("=>", " >= ") greater_eq_check = False if "=<" in item and eq_smaller_check: expression = expression.replace("=<", " <= ") smaller_eq_check = False without_equal = r"<[^\s=]|>[^\s=]|[^\s=]<|[^\s=]>" match = re.findall(without_equal, expression) # print(match) if match: greater_check = True smaller_check = True for item in match: if ">" in item and greater_check: expression = expression.replace(">", " > ") greater_check = False if "<" in item and smaller_check: expression = expression.replace("<", " < ") smaller_check = False expression = re.sub(r' +', ' ', expression).strip() return expression
0a6c48cc98ba2e8dd0f8ff1bd029cc8f448d794f
108,347
def cross_p(u, v): """ returns the cross product of two vectors u x v - orthogonal to u and v """ return (u[1]*v[2] - u[2]*v[1], u[2]*v[0] - u[0]*v[2], u[0]*v[1] - u[1]*v[0])
61419eeed62284e3501974409843a0dca174f323
108,350
from typing import List def _create_component_schema(component_name: str, field_names: List): """ Build a schema for a given component and fieds list PARAMS ====== component_name: string Name of the component to build a schema for field_names: list of strings Name of all the fields included in this component RETURNS ======= component_schema: dict A dictionnary containing the detailed schema for a given component """ # Initialize the dictionary with the component name: component_schema = dict() component_schema['ComponentName'] = component_name # Loops through all the fields: col_list = [] is_first_field = True component_schema['Columns'] = col_list for field_name in field_names: # The first field is considered to be the timestamp: if is_first_field: ts_col = dict() ts_col['Name'] = field_name ts_col['Type'] = 'DATETIME' col_list.append(ts_col) is_first_field = False # All the other fields are supposed to be float values: else: attr_col = dict() attr_col['Name'] = field_name attr_col['Type'] = 'DOUBLE' col_list.append(attr_col) return component_schema
35108d0bb581961e7839a78d9addfff50721bf4f
108,352
def gather_reporter_statistics(reporter_monitors, metric_name): """ Extracts a per-reporter map of an specific metric. :param reporter_monitors: List of all reporter monitors. :param metric_name: Monitor of interest. :return: Map, containing the metric information. """ metric_per_reporter = {reporter_name: reporter_info[metric_name] for reporter_name, reporter_info in reporter_monitors.iteritems()} return metric_per_reporter
da0575b5aa9dabdea09506f5089f98fde541123e
108,353
def combine_arguments(args, additional): """Combine two dictionaries representing command-line arguments. Any duplicate keys will be merged according to the following procedure: 1. If the value in both dictionaries are lists, the two lists are combined. 2. Otherwise, the value in the first dictionary is OVERWRITTEN. :param args: original command-line arguments :type args: dictionary :param additional: additional command-line arguments :type additional: dictionary :return: combined command-line arguments :rtype: dictionary """ new_args = args.copy() for key, value in additional.items(): if key in new_args: if isinstance(value, list) and isinstance(new_args[key], list): new_args[key] += value else: new_args[key] = value else: new_args[key] = value return new_args
835d03bec91a9689589227528f9bbc0028f6a1ed
108,355
import uuid def generate_dtids(registry: str, number: int) -> list: """ Generate a list of DTIDs. Args: number: The number of generated DTIDs. registry: Base URL of the DTID registry. Returns: List of DTIDs """ dtids = [] for _ in range(number): dtids.append(registry + str(uuid.uuid4())) return dtids
3efcc7dd8ab4bd6edd4b3256c7e973df8f15e7fc
108,357
def parse_mask(s: str) -> int: """Parses an integer, possibly starting with 0b/0o/0x, from a string and returns it. Examples: >>> parse_mask("10") 10 >>> parse_mask("0x10") 16 >>> parse_mask("0o10") 8 >>> parse_mask("0b10") 2 >>> parse_mask("") Traceback (most recent call last): ... ValueError: invalid literal for int() with base 10: '' """ s = s.strip() if s.startswith("0x"): return int(s[2:], base=16) elif s.startswith("0o"): return int(s[2:], base=8) elif s.startswith("0b"): return int(s[2:], base=2) return int(s, base=10)
2692043920fc8a07ccea35b1708d4e3e8d0bdd5c
108,361
def check_validity(card_number): """checks the validity of a credit card number with the Luhn Algorithm Arguments: card_number {str} -- the number of the card to be cheked as a string Returns: Boolean -- True if the card number is valid """ # make a list of integer numbers from the string num_list = [int(num) for num in card_number] # double every second digit doubled_2nd = [2*num if not index % 2 else num for index, num in enumerate(num_list)] # if number is greater than 9, build the cross sum cross_sum_gr_9 = [num % 10 + 1 if num > 9 else num for num in doubled_2nd] # sum all digits up check_sum = sum(cross_sum_gr_9) # check if the result is divisible by 0 return check_sum % 10 == 0
f6850a54d70d481b2511bd5c8bbca89b74bd51ed
108,362
def _just_gimme_an_ascii_string(s): """Converts encoded/decoded string to a platform-appropriate ASCII string. On Python 2 this encodes Unicode strings to normal ASCII strings, while normal strings are left unchanged. On Python 3 this decodes bytes to Unicode strings via the ASCII encoding, while Unicode strings are left unchanged (and might still contain non-ASCII characters!). Raises ------ UnicodeEncodeError, UnicodeDecodeError If the conversion fails due to the presence of non-ASCII characters """ if isinstance(s, bytes) and not isinstance(s, str): # Only encoded bytes on Python 3 will end up here return str(s, encoding='ascii') else: return str(s)
18bf2818e03927f3862301d6b90bf3265147921c
108,366
def totalAvailableSamples(packet, nPoints, scale): """ Compute the total number of available plots available for a given packet as well as the number of data points in a single plot. Args: packet (np array): Complete set of demodulated data. nPoints (int): Number of complete set of data points to plot, usually set by the global 'nPoints'. scale (int): 1 if plotting data points with no interpolation, or 8 if using 8 point interpolation. Returns: tuple: Tuple containing: * Number of points for each sample (int). * Total number of plots that can be plotted for this packet (int) """ nPointsSampleNum = ((nPoints * 2) * scale) + scale packetSize = packet.size return nPointsSampleNum, int(packet.size / nPointsSampleNum)
e5f7b89315ce8cd949957530dbc9f84222b6ffd8
108,368
def check_collisions(veh_dict, t, collision_tolerance_x, collision_tolerance_y): """ Simply check the vehicle positions after t time assuming constant velocity and given the initial veh_dict for any collisions (respecting the given collision tolerances). Arguments veh_dict: A dictionary of all of the vehicles in the scene, with their information. t: Float, the time at which we want to check for collisions, forward in time from where the veh_dict says every car is, in seconds. collision_tolerance_x: Float, the collision tolerance, laterally, in meters. A distance less than this to another object will be considered a collision. collision_tolerance_y: Float, the collision tolerance, longitudinally (forward and back), in meters. Returns (collision, msg): collision: Boolean, whether or not a collision occurred. msg: String, the vehicle id or a message that no collision occurred. """ ego_pos_x = 0 # for now, assume no lateral motion. ego_pos_y = veh_dict["ego"].rel_vy * t for veh_id in veh_dict.keys(): if veh_id == "ego": continue new_pos_x = veh_dict[veh_id].rel_x + veh_dict[veh_id].rel_vx*t new_pos_y = veh_dict[veh_id].rel_y + veh_dict[veh_id].rel_vy*t if abs(new_pos_x - ego_pos_x) <= collision_tolerance_x \ and abs(new_pos_y - ego_pos_y) <= collision_tolerance_y: return True, veh_id return False, "No collisions detected."
9b4233e803e106b5d8bd2e768eb658abee0cb1ff
108,369
def get_checksum(state): """Get checksum from the state. Compares pairs of characters, passing 1 to checksum if they are the same 0 otherwise. Process repeats until length of checksum is odd. """ checksum = [] for index, _ in enumerate(state[::2]): if state[index * 2] == state[index * 2 + 1]: checksum.append('1') else: checksum.append('0') if len(checksum) % 2 == 0: return get_checksum(checksum) return ''.join(checksum)
36d8465a8665cc879481494a7b4745dab1fb2ae0
108,372
def exclude_bad(data, columns, values): """Exclude rows of data with poor quality control flags. eg. data_new = exlcude_bad(data, ['Practical Salinity Corrected QC Flag '], [0,4,9]) :arg data: the data set :type data: pandas DataFrame :arg columns: Column names with quality control information :type columns: list of strings :arg values: the quaility control values to be exlcuded :type values: lis tof integers :returns: data_return - new data frame with bad data excluded. """ data_return = data for col in columns: for val in values: data_return = data_return[data_return[col] != val] return data_return
22e1c73ea66e0da6d93b1899456d13f1046ca2c7
108,377
def set_to_provider_client(unparsed_set): """Take a oai set and convert into provider_id and client_id""" # Get both a provider and client_id from the set client_id = None provider_id = None if unparsed_set: # Strip any additional query if "~" in unparsed_set: unparsed_set, _ = unparsed_set.split("~") if unparsed_set: # DataCite API deals in lowercase unparsed_set = unparsed_set.lower() if "." in unparsed_set: provider_id, _ = unparsed_set.split(".") client_id = unparsed_set else: provider_id = unparsed_set return provider_id, client_id
1321f49fa79a1e424be870730c0ca3dbe828fc39
108,387
def translate_facet_field(fcs): """ Translates solr facet_fields results into something easier to manipulate A solr facet field looks like this: [field1, count1, field2, count2, ..., fieldN, countN] We translate this to a dict {f1: c1, ..., fn: cn} This has slightly higher overhead for sending over the wire, but is easier to use """ if 'facet_fields' not in fcs: return {} ffs = fcs['facet_fields'] rs={} for (facet, facetresults) in ffs.items(): pairs = {} rs[facet] = pairs for i in range(int(len(facetresults)/2)): (fv,fc) = (facetresults[i*2],facetresults[i*2+1]) pairs[fv] = fc return rs
dbb563420f3aba4034162fff0c25a540ac59c722
108,388
def cumsum(series): """ Calculates cumulative sum of values. Equivalent to `series.cumsum()`. Args: series: column to compute cumulative sum for. """ sums = series.cumsum() return sums
20f127653ac49e3e52d91f2df0b6f8131c89b0fb
108,392
def unquote(s): """Turn a string in the form =AB to the ASCII character with value 0xab""" return chr(int(s[1:3], 16))
c53cb891d7831c528b27f31c60eca30e7c4dc93e
108,394
import torch def to_cartesian(x: torch.Tensor) -> torch.Tensor: """ Transform a complex-valued tensor from polar to cartesian coordinates. Args: x: Tensor whose last dimension contains the magnitude and phase. Returns: x in cartesian coordinates. x[...,0] contains the real part and x[...,1] contains the imaginary part. """ r = x[...,0] phi = x[...,1] real = r * phi.cos() imag = r * phi.sin() return torch.stack((real, imag), dim=-1)
e4fc995e0230df8c5842efb4b3e80201df33ac00
108,399
import yaml import re def read_yaml(file_path): """ By Adam O'Hern for Mechanical Color Returns a Python object (list or dict, as appropriate) from a given YAML file path. We use YAML because it's easier and more human readable than JSON. It's harder to mess up, easier to learn, and--imagine!--it supports commenting. Note: YAML does not support hard tabs (\t), so this script replaces those with four spaces (' '). """ yaml_file = open(file_path, 'r') yaml_data = yaml.safe_load(re.sub('\\t', ' ', yaml_file.read())) yaml_file.close() return yaml_data
5fcfe366af8503eef51bee9d5f5bfcb5636e5376
108,401
def get_r_dict(r_chain_dict, lig_r_atom_touch_mcs): """ This will take the r_chain_dict and the dict of all the atoms which touch the core and return a dict of Rs groups as keys and their nodes as values Inputs: :param dict r_chain_dict: dict of all the atom isolabels for in an R-group. keys are R-groups; items are iso-labels of atoms in the R-group. ie) {'1R1': [3, 4, 5, 6, 7, 8, 9, 10, 11, 10000]} :param dict lig_r_atom_touch_mcs: dict of all the atoms which directly touch the core and what anchor they touch. keys are atom isolabels of atoms touching an anchor; items are iso-labels of anchor atoms. ie) {3: [10000]} Returns: :returns: dict r_s_dict: dictionary of R-groups and anchor atoms they are connected to. keys are R-groups. items are isolabel of anchor atoms. ie) {'1R1': [10000]} """ r_s_dict = {} for key in list(r_chain_dict.keys()): temp_r_list = r_chain_dict[key] node_list = [] for atom in r_chain_dict[key]: for key_id in list(lig_r_atom_touch_mcs.keys()): if atom == key_id: for x in lig_r_atom_touch_mcs[key_id]: node_list.append(x) r_s_dict[key] = node_list return r_s_dict
1b62c2bc012fdc432366fd0137a8fa43368a29f0
108,402
def get_inheritance_map(classes): """Return a dict where values are strict subclasses of the key.""" return {scls: [p for p in classes if p != scls and p.issubclass(scls)] for scls in classes}
d75b90c958fdc199ca177f062c23f79e900a9423
108,404
def find_middle_node(doubly): """Solution to exercise R-7.8. Describe a nonrecursive method for finding, by link hopping, the middle node of a doubly linked list with header and trailer sentinels. In the case of an even number of nodes, report the node slightly left of center as the “middle.” (Note: This method must only use link hopping; it cannot use a counter.) What is the running time of this method? -------------------------------------------------------------------------- Solution: -------------------------------------------------------------------------- My solution is to use the doubly linked list's _size parameter to link-hop to the middle node of the list. The running time is O(n), as there will be n/2 link hops required to find the middle node. """ if doubly.is_empty(): return None node = doubly._header for _ in range((doubly._size-1) // 2 + 1): # Upper limit excluded, so +1 node = node._next return node
c03ba496a3630f286addf6ff812e67586fb11a64
108,409
def scale_x(las_file, point): """ Calculate scaled value of x for point record """ _px = point.X scale = las_file.header.scale[0] offset = las_file.header.offset[0] return _px*scale+offset
b8ca605cdffcda16a1270546e0e067249b4c1ec5
108,415
from pathlib import Path def read_energy_grid(filename: Path) -> list[list[int]]: """Read energy grid from file Args: filename (Path): path to the input file. Returns: list[list[int]] Energy field Examples: >>> x = [print(line) for line in read_energy_grid(Path("test_data/day_11.data"))] [5, 4, 8, 3, 1, 4, 3, 2, 2, 3] [2, 7, 4, 5, 8, 5, 4, 7, 1, 1] [5, 2, 6, 4, 5, 5, 6, 1, 7, 3] [6, 1, 4, 1, 3, 3, 6, 1, 4, 6] [6, 3, 5, 7, 3, 8, 5, 4, 7, 8] [4, 1, 6, 7, 5, 2, 4, 6, 4, 5] [2, 1, 7, 6, 8, 4, 1, 7, 2, 1] [6, 8, 8, 2, 8, 8, 1, 1, 3, 4] [4, 8, 4, 6, 8, 4, 8, 5, 5, 4] [5, 2, 8, 3, 7, 5, 1, 5, 2, 6] """ grid = [] with filename.open("r") as file: for line in file: grid.append([int(x) for x in line.strip()]) return grid
157c01a1729080ff04a03522b8312f35bceab32e
108,417
def short_description(description, **kwargs): """ This decorator adds the django short_description attribute to the given function. It also adds every keyword argument as extra attribute to the decorated function. :param description: the value for the short description attribute :type description: str or unicode :return: the decorator function :rtype: function """ def _wrapper(func): """ Internal wrapper function. It only adds some attributes to the function object. """ func.short_description = description for key in kwargs: setattr(func, key, kwargs[key]) return func return _wrapper
4c75cda3bd4a1f5674a527a02a6f27754bed6177
108,418
def _getCompartmentPosition(df, id): """ Get the position of a compartment with its certain compartment id. Args: df: tuple-(df_CompartmentData, df_NodeData, df_ReactionData, df_ArbitraryTextData, df_ArbitraryShapeData). id: str-the id of the compartment. Returns: position_list: list of position. position: list-1*2 matrix-top left-hand corner of the compartment [position_x, position_y]. """ idx_list = df[0].index[df[0]["id"] == id].tolist() position_list =[] for i in range(len(idx_list)): position_list.append(df[0].iloc[idx_list[i]]["position"]) return position_list
a57a1c7280fa87af856d559763700885fb20f823
108,419
def eval_poly_vs_fct(poly, function, test_values): """ Compute the maximum absolute difference between <poly> and <function> evaluated at each point in <test_values> vector """ diff = max(abs(poly(v) - function(v)) for v in test_values) return diff
263af4f4263ba8454235064baa5c0e0f4ee9914e
108,421
import torch def dot(x, y): """Dot product""" return (x*y).sum(dtype=torch.double)
2e02c26149c6d65a714638e551505731b3c359c6
108,425
def _sanitize_index_element(ind): """Sanitize a one-element index.""" if ind is None: return None return int(ind)
7f1f57394c624f8b7a271fa55267489ad0686ea6
108,427
def load_posts(conn, value): """Returns all the posts from the posts table.""" with conn: query = "SELECT * FROM posts WHERE subreddit='{}'".format(value) return conn.execute(query)
391dcedf29dbf5a523873e92463c71c0654fb902
108,435
import re def _can_be_regex(obj): """Return True if obj can be turned into a regular expression.""" return isinstance(obj, (str, type(re.compile(""))))
50aa8e67b87d97be59696aa1d33c4ab25412d8af
108,437
def count_clues(puzzle_grid): """Counts clues in a puzzle_grid, which can be a list of lists or string.""" if isinstance(puzzle_grid, list): return sum([1 for sublist in puzzle_grid for i in sublist if i]) return len(puzzle_grid) - puzzle_grid.count(".")
005f7e099618e16cf31b194bd2898baf412d374b
108,438
import math import random def rand_cluster(n,c,r): """returns n random points in disk of radius r centered at c""" x,y = c points = [] for i in range(n): theta = 2*math.pi*random.random() s = r*random.random() points.append((x+s*math.cos(theta), y+s*math.sin(theta))) return points
26b078791d8cbfbfedcc74f745023b3b65c7fb29
108,440
def unique_tuples(df, columns): """ Return the set of unique tuples from a dataframe for the specified columns. Parameters ---------- df : pandas.DataFrame Dataframe with the columns of data to be considered. columns : list-like A list of column names in the dataframe from which to construct the tuples. """ return set(zip(*[df[_] for _ in columns]))
9e6c31e957a1a7a5bf58fd8362ad8ae8106ac253
108,445
def apply_consecutive_elements(l, fn): """Apply `fn` taking as arguments consecutive elements of `l`.""" return [fn(i, j) for i, j in zip(l[:-1], l[1:])]
c1669687730d60c3c9204b18048d4279a98192e1
108,450
def plato2sg(plato): """Convert Plat to Specific Gravity """ return 259.0 / (259.0 - float(plato))
588aef0eeff674f1aad3984ee29f9fd478262ae2
108,452
from typing import Callable import functools def show_shapes(func: Callable) -> Callable: """ Decorator to log dataframe shape before and after applying a function. Args: func: Function that takes a dataframe as argument Returns: function """ @functools.wraps(func) def inner(df): print(f"Shape before {func.__name__}", df.shape) out_df = func(df) print(f"Shape after {func.__name__}", out_df.shape) return out_df return inner
6ca9a5c862d22864933873e46ed19c36f0fd90e7
108,459
import re def search_terms_to_list(search_string): """ takes a search query string, returns a list of terms to search on, extracting terms surrounded by double quotes as separate items """ search_terms = [] exact_searches = re.findall(r'"([^"]+)"', search_string) if exact_searches: for phrase in exact_searches: search_string = re.sub(f'"{phrase}"', '', search_string) # strip exact search search terms out search_terms.extend(exact_searches) search_terms.extend(search_string.split()) return search_terms
73830f1ac24b1a5ff2416c086464b7d218feb8c8
108,463
def labels_name(labels): """Returns a comma-separate list of label names Args: labels: list of label objects. Returns: string with comma-separated labels names. """ if type(labels) is list: return ','.join([ l['name'] for l in labels ]) else: return labels['name']
35ad15d47056eea6343cc9267a50ff673680f9a6
108,467
import random def get_network_demands(network_topology, num): """ Generates a list of network demands on the provided network topology :param network_topology: type tuple Tuple of networkx.Graphs that describe the communication resources and connectivity of quantum network :param num: type int The number of demands desired :return: type list List of tuples (source, destination, fidelity, rate) describing the demand """ _, nodeG = network_topology demands = [] end_nodes = [node for node in nodeG.nodes if nodeG.nodes[node]["end_node"] is True] for num_demands in range(num): src, dst = random.sample(end_nodes, 2) fidelity = round(0.6 + random.random() * (3 / 10), 3) # Fidelity range between F=0.6 and 1 rate = 10 / (2**random.choice([i for i in range(7, 11)])) # Rate range between 0.2 and 1 demands.append((src, dst, fidelity, rate)) return demands
13a6548647779f82bdd891bff3b3b4611fa6c3e2
108,472
def evaluate_apartment(area: float, distance_to_underground: int) -> float: """Estimate price of an apartment.""" price = 200000 * area - 1000 * distance_to_underground return price
c212a0ddffdfa2fd9f14c9410c7cd88418cc505f
108,473
def CalculateNewBackgroundPositionX(m): """Fixes percent based background-position-x. This function should be used as an argument to re.sub since it needs to perform replacement specific calculations. Args: m: A match object. Returns: A string with the background-position-x percentage fixed. BG_HORIZONTAL_PERCENTAGE_X_RE.sub(CalculateNewBackgroundPosition, 'background-position-x: 75%') will return 'background-position-x: 25%'. """ # The flipped value is the offset from 100% new_x = str(100-int(m.group(2))) return 'background-position-x%s%s%%' % (m.group(1), new_x)
0034b01a822f3a76222612ff4cf51bce2f189852
108,478
def get_tags(misp_attr: dict): """ Tags are attached as list of objects to MISP attributes. Returns a list of all tag names. @param The MISP attribute to get the tag names from @return A list of tag names """ return [ t.get("name", None) for t in misp_attr.get("Tag", []) if t and t.get("name", None) ]
e590b941e6b3a05a4f6948e8d485aa5f539aefd7
108,479
from typing import List from typing import Optional from typing import Dict def _get_non_image_dict(list_channels: List[str], numerical_cols: List[str], categorical_cols: Optional[List[str]] = None) -> Dict[str, List[str]]: """ Converts list of channels to dictionary of features to channels. Mimics the behavior of convert_non_image_features_channels_to_dict in ScalarConfig. """ res = {} columns = numerical_cols.copy() if categorical_cols: columns += categorical_cols for col in columns: res[col] = list_channels return res
82ddf1ac3919930af6e6695c276e8e44e424edd8
108,480
import math def my_round(i): """ Helper function for rounding. Keyword: (float) i : number to be inputted Returns: (int) f: number rounded up or down based on mathematical rounding rules """ f = math.floor(i) return f if i - f < 0.5 else f+1
57419d3e0a7d55be0a640f561efef609a42632f7
108,483
def compute_counts(corpus): """Compute the word counts and probs for a given corpus corpus: list of sentences returns: dict of words, containing counts & probs """ words = {} size = 0 # Let's count words first for line in corpus: for token in line.split(): if token in words: words[token]['count'] += 1 else: words[token] = {} words[token]['count'] = 1 size += 1 # Then we compute all the probs once we know the final size for k in words.keys(): words[k]['prob'] = words[k]['count'] / size return words
81ed7162f638f4b6949f7bc10b0f6beac6fadac3
108,484
from pathlib import Path import re def get_number_from_path_stem(path): """ Extract tail number from path. Examples -------- >>> get_number_from_path_stem('src/file_1.pdb') >>> 1 >>> get_number_from_path_stem('src/file_3.pdb') >>> 3 >>> get_number_from_path_stem('file_1231.pdb') >>> 1231 >>> get_number_from_path_stem('src/file11') >>> 11 >>> get_number_from_path_stem('src/file_1234_1.pdb') >>> 1 Parameters ---------- path : str or Path obj The path to evaluate. Returns ------- int The tail integer of the path. """ stem = Path(path).stem number = re.findall(r'\d+', stem)[-1] return int(number)
f81925d4f431c6972eee237cffc14d159265b083
108,496