content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def sign(x: float) -> float: """ Função sinal: retorna 1.0 se x é maior ou igual a zero, -1.0 caso contrário. Args: x (float): valor a ser comparado com zero. Returns: float: 1.0 ou -1.0, a depender do valor de x. """ if x >= 0.0: return 1.0 else: return -1.0
7f38d062344660f0d5acd2defe76e0f974f20ffa
48,082
def get_version(opts, app): """Get version of a specific app Args: opts (dict): Nephos options dict. app (str): Helm application name. Returns: str: Desired version of Helm app, if specified. Defaults to None. """ if "versions" in opts and app in opts["versions"]: return opts["versions"][app] else: return None
bf3517644904f26482dac56c598326c2bbcf5b44
48,087
def tagged_struct_columns(typegraph, struct_id): """ Return a dict mapping the struct column names to versions tagged with the id. """ struct_id = str(struct_id) typedef = 'Struct' + struct_id colheader = ('struct_id', *typegraph.fields[typedef]) return { header:"t{:s}_{:s}".format(struct_id, header) for header in colheader}
022a33a32dfc9bbc964fd2294f1393a796597469
48,091
def maybe_tuple(value): """Return `value` as a tuple. If it is already a tuple, return it unchanged. Otherwise return a 1-element tuple containing `value`.""" if isinstance(value, tuple): return value return (value,)
6279c9a96d5362b7cdaf4b022ac3f41a40a26e28
48,093
def config_identifier(converter, model_name): """Create identifier of configuration based on data `converter` and `model_name`""" return model_name.lower().replace('-', '_') + '_' + converter
50633c10a240823e4cb7e664d480cfe459fdf8a8
48,094
def _get_id_field_from_input_field_name(input_field_name: str) -> str: """ Map plural input fields like children to the appropriate field child_ids in this case. """ if input_field_name == "children": return "child_ids" return input_field_name.rstrip("s") + "_ids"
7ce6368f6d9462718d0acf6b46099c8ae80e0e54
48,098
def valid_benefit_data(promotion): """ Valid JSON data for creating a new Benefit object """ return { 'promotion': promotion.pk }
8e6ec6adb25930623ecc45bf04cadac14ad8087c
48,106
def convert_string_to_pymath(strin): """ Convert a string to a 'math' format """ if strin.strip() is not '': return '$' + strin.replace(' ', '\ ') + '$' else: return strin
74f2d99e5bcb324c3284c0e06d4af343c9c320ac
48,109
def conj(pary): """ Returns the conjugation of 2D PitchArray same as PitchArray.conj(), but create a new copy Parameter: ---------- pary: PitchArray """ return pary.conj(inplace = False)
36f12094dba0c4e0cf28ffb8d27e2b35c87d85b2
48,114
def to_iso639_part1(language_code): """ Convert codes like "en-us" to "en" """ return language_code.split('-', 1)[0]
53192b1a7b0263cca5bb9c714fc01be4b4b3b1ae
48,116
import types def good_decorator_accepting_args(decorator): """This decorator makes decorators behave well wrt to decorated functions names, doc, etc. Differently from good_decorator, this accepts decorators possibly receiving arguments and keyword arguments. This decorato can be used indifferently with class methods and functions.""" def new_decorator(*f, **k): g = decorator(*f, **k) if 1 == len(f) and isinstance(f[0], types.FunctionType): g.__name__ = f[0].__name__ g.__doc__ = f[0].__doc__ g.__dict__.update(f[0].__dict__) pass return g new_decorator.__name__ = decorator.__name__ new_decorator.__doc__ = decorator.__doc__ new_decorator.__dict__.update(decorator.__dict__) # Required for Sphinx' automodule. new_decorator.__module__ = decorator.__module__ return new_decorator
898d8bdcb71d47d8a083841d57122e9e16436905
48,125
def is_step(cur, prev): """ Checks whether pairs cur (current) and prev (previous) are consecutive tracks. Works if disc_num or track_num is a single letter :param cur: [disc_num, track_num] :param prev: [disc_num, track_num] :return: bool. True if cur comes after prev, False otherwise """ c = cur[:] c = [c[0] if len(c[0]) > 0 else '0', c[1] if len(c[1]) > 0 else '0'] c = [ord(c[0])-64 if not c[0].isdigit() else int(c[0]), ord(c[1])-64 if not c[1].isdigit() else int(c[1])] p = prev[:] p = [p[0] if len(p[0]) > 0 else '0', p[1] if len(p[1]) > 0 else '0'] p = [ord(p[0])-64 if not p[0].isdigit() else int(p[0]), ord(p[1])-64 if not p[1].isdigit() else int(p[1])] if c[0]-p[0] == 0: # same disc, must be next track return c[1]-p[1] == 1 elif c[0]-p[0] == 1: # next disc, must start new track return c[1] < 2 else: # cannot be valid return False
4c5238fb21faa18c44fca2ea457da29f42dbfba3
48,126
import six def check(pkt, pktType, keyMatches=None, **valMatches): """This function takes an object that was expected to come from a packet (after it has been JSONized) and compares it against the arg requirements so you don't have to have 10 if() statements to look for keys in a dict, etc.. Args: @pkt : object to look at @pktType : object type expected (dict, list, etc..) @keyMatches : a list of minimum keys found in parent level of dict, expected to be an array @valMatches : a dict of key:value pairs expected to be found in the parent level of dict the value can be data (like 5) OR a type (like this value must be a @list@). Returns: None if everything matches, otherwise it returns a string as to why it failed.""" # First check that the pkt type is equal to the input type if(type(pkt) is not pktType): return 'expected %s' % str(pktType) if(keyMatches): # Convert the keys to a set keyMatches = set(keyMatches) # The keyMatches is expected to be an array of the minimum keys we want to see in the pkt if the type is dict if(type(pkt) is dict): if(not keyMatches.issubset(pkt.keys())): return 'missing, "%s"' % ', '.join(list(keyMatches - set(pkt.keys()))) else: return None # Finally for anything in the valMatches find those values if(valMatches): # Pull out the dict object from the "valMatches" key if('valMatches' in valMatches.keys()): matchObj = valMatches['valMatches'] else: matchObj = valMatches for k, v in six.iteritems(matchObj): # Check for the key if(k not in pkt.keys()): return 'key missing "%s"' % k # See how we should be comparing it: if(type(v) is type): if(type(pkt[k]) is not v): return 'key "%s", bad value type, "%s", expected "%s"' % (k, type(pkt[k]), v) else: # If key exists check value if(v != pkt[k]): return 'key "%s", bad value data, "%s", expected "%s"' % (k, pkt[k], v) return None
a2e9819cf25ed2d919da74e2ff3141537303cb0e
48,127
def calc_kl_div(a,b): """ Calculates the KL-divergence with ref tensor a, comparing to a new tensor b. source of formula: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence """ return (a * (a / b).log()).sum()
9c2c545a15d863561604304661d1071f5b88de90
48,128
def _get_instance_list(mig, field='name', filter_list=None): """ Helper to grab field from instances response. :param mig: Managed Instance Group Object from libcloud. :type mig: :class: `GCEInstanceGroupManager` :param field: Field name in list_managed_instances response. Defaults to 'name'. :type field: ``str`` :param filter_list: list of 'currentAction' strings to filter on. Only items that match a currentAction in this list will be returned. Default is "['NONE']". :type filter_list: ``list`` of ``str`` :return: List of strings from list_managed_instances response. :rtype: ``list`` """ filter_list = ['NONE'] if filter_list is None else filter_list return [x[field] for x in mig.list_managed_instances() if x['currentAction'] in filter_list]
efcb7c948583e7433ff30030cd934903f9953632
48,133
def comp_nthoctave_axis(noct, freqmin, freqmax): """Computes the frequency vector between freqmin and freqmax for the 1/n octave Parameters ---------- noct: int kind of octave band (1/3, etc) freqmin: float minimum frequency freqmax: float maximum frequency Returns ------- Frequency vector """ if noct == 3: table = [ 10, 12.5, 16, 20, 25, 31.5, 40, 50, 63, 80, 100, 125, 160, 200, 250, 315, 400, 500, 630, 800, 1000, 1250, 1600, 2000, 2500, 3150, 4000, 5000, 6300, 8000, 10000, 12500, 16000, 20000, ] f_oct = [f for f in table if (f >= freqmin and f <= freqmax)] else: f0 = 1000 f_oct = [f0] i = 1 while f_oct[-1] <= freqmax: f_oct.append(f0 * 2.0 ** (i / noct)) i = i + 1 f_oct = f_oct[:-2] i = -1 while f_oct[0] > freqmin: f_oct.insert(0, f0 * 2.0 ** (i / noct)) i = i - 1 f_oct = f_oct[1:] return f_oct
ab7ef798021f8ee2872e5b2e0040860c80639647
48,134
def get_clauses(id_dict, db): """Get a list of clauses to be passed to a db query. Note that an empty condition will be returned if id_dict has no ids in it (either the dict is empty or all the lists within the dict are empty), which will in general have the unexpected effect of selecting everything, rather than nothing. Parameters ---------- id_dict : dict {id_type: [int or str]} A dictionary indexed by the type of id, containing lists of id's of that the respective type. If all the lists are empty, or the dict is empty, returns an empty condition. Note that id types of 'trid' and 'tcid' will be mapped to text ref ids and text content ids, respectively. db : indra.db.DatabaseManager instance This instance is only used for forming the query, and will not be accessed or queried. Returns ------- clause_list : list [sqlalchemy clauses] A list of sqlalchemy clauses to be used in query in the form: `db.filter_query(<table>, <other clauses>, *clause_list)`. If the id_dict has no ids, an effectively empty condition is returned. """ # Handle all id types but text ref ids (trid) and text content ids (tcid). id_condition_list = [getattr(db.TextRef, id_type).in_(id_list) for id_type, id_list in id_dict.items() if len(id_list) and id_type not in ['tcid', 'trid']] # Handle the special id types trid and tcid. for id_type, table in [('trid', db.TextRef), ('tcid', db.TextContent)]: if id_type in id_dict.keys() and len(id_dict[id_type]): int_id_list = [int(i) for i in id_dict[id_type]] id_condition_list.append(table.id.in_(int_id_list)) return id_condition_list
86effb5e1f035f3ba30bd7e305a2c71a933a3c33
48,136
def frontiers_from_bar_to_time(seq, bars): """ Converts the frontiers (or a sequence of integers) from bar indexes to absolute times of the bars. The frontier is considered as the end of the bar. Parameters ---------- seq : list of integers The frontiers, in bar indexes. bars : list of tuple of floats The bars, as (start time, end time) tuples. Returns ------- to_return : list of float The frontiers, converted in time (from bar indexes). """ to_return = [] for frontier in seq: bar_frontier = bars[frontier][1] if bar_frontier not in to_return: to_return.append(bar_frontier) return to_return
ea86d14725a6761ba90ba4b62b7f78d687e6a269
48,145
def bin2string (arr): """ Converts the binary number array 'arr' to string format """ bin_string = '' for bits in arr: bin_string += str(int(bits)) return bin_string
1d0ae72bb93732562c51a1380bb519ccfdc1db31
48,148
def is_app_code(code: int) -> bool: """ Checks whether a code is part of the app range. :param code: Code to check. :return: Boolean indicating whether `code` is within the app range. """ return 0 < code < 0x10
d0bd6485875500418b5ddd60ae5fb3c9b965b71f
48,154
def get_group_lists(self, sym_grouping): """Gets the index list for left and right groups.""" left_idx = [k for k in range(sym_grouping[0])] right_list = [k + sym_grouping[0] for k in range(sym_grouping[1])] return left_idx, right_list
b3adf2e12c7fa2cb4ac5ecdd47c8f9b18a77c49b
48,160
def actions(board): """ Returns set of all possible actions (i, j) available on the board. """ available_moves = set() for x, row in enumerate(board): for y, column in enumerate(row): if column is None: available_moves.add((x, y)) return available_moves
b6d4057e3c5c369a8cadd4c10d7175e5003824bd
48,163
def chromatic_induction_factors(n): """ Returns the chromatic induction factors :math:`N_{bb}` and :math:`N_{cb}`. Parameters ---------- n : numeric Function of the luminance factor of the background :math:`n`. Returns ------- tuple Chromatic induction factors :math:`N_{bb}` and :math:`N_{cb}`. Examples -------- >>> chromatic_induction_factors(0.2) # doctest: +ELLIPSIS (1.0003040..., 1.0003040...) """ N_bb = N_cb = 0.725 * (1 / n) ** 0.2 return N_bb, N_cb
d05e9243d3ee4a7255f59435bb02131638142f9f
48,164
import torch def distance2bbox(points, distance, max_shape=None): """Decode distance prediction to bounding box. Args: points (Tensor): Shape (n, 2), [x, y]. distance (Tensor): Distance from the given point to 4 boundaries (left, top, right, bottom). max_shape (tuple): Shape of the image. Returns: Tensor: Decoded bboxes. """ x1 = points[:, 0] - distance[:, 0] y1 = points[:, 1] - distance[:, 1] x2 = points[:, 0] + distance[:, 2] y2 = points[:, 1] + distance[:, 3] if max_shape is not None: x1 = x1.clamp(min=0, max=max_shape[1]) y1 = y1.clamp(min=0, max=max_shape[0]) x2 = x2.clamp(min=0, max=max_shape[1]) y2 = y2.clamp(min=0, max=max_shape[0]) return torch.stack([x1, y1, x2, y2], -1)
1a7021e32398625b87a847fc82f7316137dbe5cf
48,165
def can_self_enroll_in_course(course_key): """ Returns True if the user can enroll themselves in a course. Note: an example of a course that a user cannot enroll in directly is a CCX course. For such courses, a user can only be enrolled by a CCX coach. """ if hasattr(course_key, 'ccx'): return False return True
e5b9b66bb2885a17afbd947093cfbb4088095e7d
48,166
def depth_to_col_name(depth): """ Derives the proper name of the column for locations given a depth. """ if depth == 0: return "location" else: return "sub_" * depth + "lctn"
e6ae7a4e3563bf762dfe0a82d514314a849da0a6
48,176
def _determine_sentence_id(sentence, new_id, id_name, old_id): """ Determine the appropriate id for this sentence. Ids here means doc id or par id. Args: sentence: The sentence whose ids to check. new_id: The key that the id can come up as without the id key word. id_name: The id in the comments to modify. One of 'newpar id', or 'newdoc id'. old_id: The id of the previous sentence. Returns: The value of the id of the sentence. """ if sentence.meta_present(id_name): return sentence.meta_value(id_name) elif sentence.meta_present(new_id): return None else: return old_id
95a5f1987a9037df1fb7e73b2b47c8da2b265e1c
48,179
def string_slice(strvar,slicevar): """ slice a string with |string_slice:'[first]:[last]' """ first,last= slicevar.partition(':')[::2] if first=='': return strvar[:int(last)] elif last=='': return strvar[int(first):] else: return strvar[int(first):int(last)]
1a52d6a7d71a6c1c116dcd9c1eef6d5d5962cd07
48,180
def get_cumulative_rewards(rewards, gamma=0.99): """ rewards: rewards at each step gamma: discount for reward """ discounted_rewards = [] for rew in range(len(rewards)): Gt = 0 pw = 0 for r in rewards[rew:]: Gt = Gt + gamma ** pw * r pw = pw + 1 discounted_rewards.append(Gt) return discounted_rewards
bd8716f6cfb1261048ebcd99dfeac023eef78e49
48,182
from typing import Any def is_number(val: Any) -> bool: """ Check if a value is a number. """ return isinstance(val, (int, float, complex))
37a62e0a846a22ee58c9bdc96cf63bfeb34bc539
48,191
def increment_ipv4_segments(segments): """ Increment an ip address given in quad segments based on ipv4 rules :param segments: IPv4 segments to increment. :type segments: ``list`` or ``tuple`` :return: Incremented segments. :rtype: ``list`` """ segments = [int(segment) for segment in segments] segments[3] += 1 if segments[3] == 256: segments[3] = 0 segments[2] += 1 if segments[2] == 256: segments[2] = 0 segments[1] += 1 if segments[1] == 256: segments[1] = 0 segments[0] += 1 return segments
fd2f9ea2c74fa3546815b53e20407b63cee6b1a4
48,192
def _calculate_num_runs_failures(list_of_results): """Caculate number of runs and failures for a particular test. Args: list_of_results: (List) of JobResult object. Returns: A tuple of total number of runs and failures. """ num_runs = len(list_of_results) # By default, there is 1 run per JobResult. num_failures = 0 for jobresult in list_of_results: if jobresult.retries > 0: num_runs += jobresult.retries if jobresult.num_failures > 0: num_failures += jobresult.num_failures return num_runs, num_failures
f67a6b1fa6a5aefc19bfe7e99ed77473801e6b83
48,193
def test_request_response(connection, receiver): """ Test request/response messaging pattern. """ def endpoint_callback(message): return message.payload + "-pong" connection.register_async_endpoint(endpoint_callback, "test.request") connection.call_async(receiver.create_cbf(), "test.request", "ping") assert ["ping-pong"] == receiver.wait_for_messages()
200dd1c9dcf752130b7748323ea9d269a9d53eab
48,196
from datetime import datetime import logging def get_now_time(format="%Y-%m-%d %H:%M:%S", show=False): """ Gets current time. """ now = datetime.now() now = now.strftime(format) if show == True: logging.info("Current time: {}".format(now)) return now
1cb1cc85c102601890319a7fc057484b3c79360a
48,197
def warning_formatter(msg, category, filename, lineno, line=None): """Format warning to only print filename, linenumber and message. Parameters ---------- msg warning message category warning category filename filename of file where warning was raised lineno linenumber where warning was raised line line containing warning Returns ------- str formatted warning message """ return f"{filename}:L{lineno}: {msg}\n"
8b0c35077ca5c1872eefa00616e36ea67d40c93d
48,198
def parse_etraveler_response(rsp, validate): """ Convert the response from an eTraveler clientAPI query to a key,value pair Parameters ---------- rsp : return type from eTraveler.clientAPI.connection.Connection.getHardwareHierarchy which is an array of dicts information about the 'children' of a particular hardware element. validate : dict A validation dictionary, which contains the expected values for some parts of the rsp. This is here for sanity checking, for example requiring that the parent element matches the input element to the request. Returns ---------- slot_name,child_esn: slot_name : str A string given to the particular 'slot' for each child child_esn : str The sensor id of the child, e.g., E2V-CCD250-104 """ for key, val in validate.items(): try: rsp_val = rsp[key] if isinstance(val, list): if rsp_val not in val: errmsg = "eTraveler response does not match expectation for key %s: " % (key) errmsg += "%s not in %s" % (rsp_val, val) raise ValueError(errmsg) else: if rsp_val != val: errmsg = "eTraveler response does not match expectation for key %s: " % (key) errmsg += "%s != %s" % (rsp_val, val) raise ValueError(errmsg) except KeyError: raise KeyError("eTraveler response does not include expected key %s" % (key)) child_esn = rsp['child_experimentSN'] slot_name = rsp['slotName'] return slot_name, child_esn
b8f8d0cb395889dd2266e926abbf323c6ea7ae84
48,201
def hex_dist(x1, y1, z1, x2, y2, z2): """Returns how many steps one hex is from another""" return (abs(x1 - x2) + abs(y1 - y2) + abs(z1 - z2)) // 2
a9a49fb956292ff91cfb582be0c0a43b227d6285
48,205
def _get_repo_path(repo_url): """ Extracts the username/reponame from the given github URL :param repo_url: (str) Full https path to the github repository :return: (str) <username>/<reponame> """ position = repo_url.find("github.com") name = "" if position >= 0: name = repo_url[position + 11:] if name.endswith("/"): # Strip trailing slash name = name[:-1] else: if repo_url.endswith("/"): name = repo_url[:-1] return name.split("/")
108b395c596364a66675c217965bebe57df506e9
48,211
def nombre_joueurs_annee(annee,liste): """ Fonction qui en paramètre prend une année (int) et la liste des joueurs sous forme de liste de dictionnaires et qui retourne le nombre de joueurs ayant joués l'année donnée """ compteur = 0 for enreg in liste : if int(enreg['Année']) == annee: compteur += 1 return compteur
75ae36a2cac32bd6b02a4a3ade1cfab82af8f575
48,220
def longest_line_length(code): """Return length of longest line.""" if len(code) == 0: return 0 return max(len(line) for line in code.splitlines())
cdf83015969293e5b6a0645bc9a99878e230696e
48,227
def count_valid(passports : list[dict[str, str]], fields : list[str]) -> int: """Counts the passports that have the right fields""" return sum( all(field in passport for field in fields) for passport in passports )
aa17d08fb77bc0d1b5346db45e8f72c8e4ad9094
48,228
def top_sentences(query, sentences, idfs, n): """ Given a `query` (a set of words), `sentences` (a dictionary mapping sentences to a list of their words), and `idfs` (a dictionary mapping words to their IDF values), return a list of the `n` top sentences that match the query, ranked according to idf. If there are ties, preference should be given to sentences that have a higher query term density. """ def qtd(sentence, query): """ calculates the query term density. That is, proportion of terms in sentence that are also in query. """ query = set(query) count = sum([1 if word in query else 0 for word in sentence.split()]) return count / len(sentence.split()) ranked = {} for sentence, wordlist in sentences.items(): ranked[sentence] = 0 for word in query: if word in wordlist: ranked[sentence] += idfs.get(word, 0) ranked = dict(sorted(ranked.items(), key=lambda x: x[1], reverse=True)) ranked = list(ranked.items())[:n] # Tie breaker using query term density for index in range(n - 1): if ranked[index][1] == ranked[index+1][1]: left = qtd(ranked[index][0], query) right = qtd(ranked[index][0], query) if right > left: ranked[index], ranked[index + 1] = ranked[index + 1], ranked[index] ranked = [item[0] for item in ranked] return ranked
0440d2bec742f88728c294e9e359e18d38a106a7
48,232
def clean_up_feature_sets(*feature_sets, earliest_date: dict, last_date: dict) -> list: """Leave only features from inside the observation window.""" results = [] for feats in feature_sets: results.append(feats[(feats.DATE < feats.SUBJECT_ID.map(last_date)) & (feats.DATE >= feats.SUBJECT_ID.map(earliest_date))]) return results
06e4ac3713dce63a4237694fa6b6b0ed850216f6
48,233
from datetime import datetime def str2timestamp(str_timestamp: str, date_format: str = '%d.%m.%Y %H:%M:%S') -> datetime: """ convert a string into a datetime object :param str_timestamp: :param date_format: :return: """ return datetime.strptime(str_timestamp, date_format)
fc66d55fdd7004d8ff1299cdc6adf39412701bd7
48,236
def str2bool(stuff): """Converts a string to a Boolean as a human would expect.""" return stuff.lower() in ("yes", "true", "y", "1")
f48b8c5062ecf072c06903bfd1f9638926606811
48,237
def _check_if_any_list_value_in_string(listx, line): """ Internal method to test if any of the list value present in a line. """ flag = False for value in listx: if value in line: flag = True break return flag
c5461fa063c0148e4017a94c2f3540dbc3bd2b7e
48,248
def flatten_lists_to_csv(data): """Converts the passed in data to csv. Assuming: x = [ ["v1", 98, 23], ["v2", 0.25, 0.56], ] then flatten_lists_to_csv(data) will return the following string: v1,v2 98,0.25 23,0.56 :param list data: A list of lists holding a flat view of the data to convert to csv. :return: A string representing the csv view of the passed in data. """ rows = [] i = 0 while True: try: row = [] for j in range(len(data)): row.append(str(data[j][i])) rows.append(",".join(row)) i += 1 except IndexError: break return "\n".join(rows)
d02297c9829dac0fda4629e69db09cceae402a7a
48,249
import re def re_search(pattern, text, plural=False): """Regex helper to find strings in a body of text""" match = [m.group(1) for m in re.finditer(pattern, text)] if plural: return match else: if match: return match[0]
66998eb3b29978260eb60603cf440f95a16eb532
48,252
import copy def merge_rows(row1, row2): """Merge two rows of the table of CVE data""" output = copy.deepcopy(row1) for key in row2: if key not in output: output[key] = row2[key] elif output[key] == row2[key]: continue elif key == 'References': output['References'].update(row2['References']) elif key == 'Severity': if output['Severity'] == 'Critical' or row2['Severity'] == 'Critical': output['Severity'] = 'Critical' else: output[key] = '{old}, {new}'.format(old=output[key], new=row2[key]) else: output[key] = '{old}, {new}'.format(old=output[key], new=row2[key]) return output
62e206636f4775efc2173ea6a835ec7a7c1c3d1f
48,254
def _year_to_decade(yr): """ A simple function so I don't mess this up later, this constructs the *redistricting* decade of a district. This is offset from the regular decade a year is in by two. """ return (yr - 2) - (yr - 2) % 10
69c971f422801d260bdd5151ce47165785f9a46b
48,255
def positive(number: int) -> int: """ :return: Number, or 1 if number is negative or 0 """ return max(1, number)
82380388554c5f42096e33509424c5f67167c463
48,256
import click def _cb_key_val(ctx, param, value): """ from: https://github.com/mapbox/rasterio/blob/69305c72b58b15a96330d371ad90ef31c209e981/rasterio/rio/options.py click callback to validate `--opt KEY1=VAL1 --opt KEY2=VAL2` and collect in a dictionary like the one below, which is what the CLI function receives. If no value or `None` is received then an empty dictionary is returned. { 'KEY1': 'VAL1', 'KEY2': 'VAL2' } Note: `==VAL` breaks this as `str.split('=', 1)` is used. """ if not value: return {} else: out = {} for pair in value: if "=" not in pair: raise click.BadParameter( "Invalid syntax for KEY=VAL arg: {}".format(pair) ) else: k, v = pair.split("=", 1) # cast numbers for func in (int, float): try: v = func(v) except Exception: pass # cast bools and None if isinstance(v, str): if v.lower() in ["true", "yes"]: v = True elif v.lower() in ["false", "no"]: v = False elif v.lower() in ["none", "null", "nil", "nada"]: v = None out[k.lower()] = v return out
ed06b49232921fb63f2e99269ea1d32fb1bdaa84
48,260
def get_path(obj, path): """Fetch data from an object with nested, or return None. Avoids raising.""" for each in path: try: obj = obj[each] except Exception: return None return obj
799c3a68ecdc6662ad5d2747501ff05ae93fe39b
48,266
def convert_collection_to_path_param(collection): """ Convert a list of elements to a valid path parameter by concatenating with ",". This is used when calling some endpoints that require a list of instance ids. If the collection parameter is not of type ``list``, ``tuple``, ``str`` or ``int``, ``None`` is returned. :param collection: Some object that may be a collection :return: Either a comma-separated list of the elements or None if invalid type """ if isinstance(collection, list) or isinstance(collection, tuple): return ','.join(collection) elif isinstance(collection, str) or isinstance(collection, int): return str(collection) else: return None
3fa52bbc39c32c5e088338b6e8f1f84ec39a3630
48,269
def find_letter(letters, l, index): """ Find the first occurrence of a letter in a word after a given index. Searches forward in the word after index-th letter. If no matching letter is found, search backwards for the latest occurrence before the index-th letter. :return: index of the found occurrence, otherwise -1 """ try: index_offset = letters.index(l, index + 1) except ValueError: letters.reverse() try: index_offset = len(letters) - letters.index(l, len(letters) - index) - 1 except ValueError: index_offset = -1 return index_offset
5e681a2734996055eb6d117af14ff8b1f11e9ab3
48,275
def seat (bpass): """Returns the seat ID for the given boarding pass (`bpass`).""" row = sum(2**(6-n) for n, s in enumerate(bpass[0:7]) if s == 'B') col = sum(2**(2-n) for n, s in enumerate(bpass[7:] ) if s == 'R') return (row * 8) + col
29b6e929798ccbd6a2c3aaf916940df75844d176
48,279
from typing import List def _break_line_in_two(line: str, max_chars_per_line: int) -> List[str]: """Breaks a line into the first line lower than max_char_per_line and the remaining string""" if len(line) <= max_chars_per_line: return [line] position = 0 while position < max_chars_per_line: new_position = line.find(" ", position) if new_position == -1 or new_position >= max_chars_per_line-1: return [line[:position-1], line[position:]] position = new_position + 1 return [line]
d7997ee8e5033ebf5b109c126181b9978a622a0e
48,281
def has_access(user, workflow): """Calculate if user has access to workflow. :param user: User object :param workflow: Workflow object :return: True if it is owner or in the shared list """ return workflow.user == user or user in workflow.shared.all()
d42ec8b9f7ca8dc7842b8a9950c1374ae7afcfbd
48,290
def abs_value_equal(x, y): """Return whether or not the absolute value of both numbers is the same. Please refrain from using libraries (abs) >>> abs_value_equal(-2, -2) True >>> abs_value_equal(-3, 3) True >>> abs_value_equal(1, 2) False >>> abs_value_equal(3, 3) True >>> abs_value_equal(-6, -6) True >>> abs_value_equal(-1, -5) False >>> abs_value_equal(5, -6) False """ return ((x==y) or (x+y==0))
5047caa23e0e1fd78e8696f706c5aa8ad43b0e22
48,291
def ListVersions(client, messages, pkg, version_view): """Lists all versions under a package.""" list_vers_req = messages.ArtifactregistryProjectsLocationsRepositoriesPackagesVersionsListRequest( parent=pkg, view=version_view) list_vers_res = client.projects_locations_repositories_packages_versions.List( list_vers_req) return list_vers_res.versions
ea7b3c320817094df3bc23b05cf9cb9f18c2a7d1
48,292
import typing def decode_byte_array(as_bytes: typing.List[int]) -> bytes: """Decodes a byte array. """ return bytes(as_bytes)
a029272fc8ab76e169d1cc15ae7b248fac9ea719
48,297
import struct def hex_to_32bit_IEEE754_float(hex_str): """Transform a string containing a hexidecimal representation of a 32-bits IEEE754-formatted float value to a float """ return (struct.unpack("f", struct.pack("i", int(hex_str, 16))))[0]
961b87e7774268bcb3e55459097397d7cfd22b4c
48,299
def subsample(y, limit=256, factor=2): """ If a given Series is longer than `limit`, returns subsampled sequence by the specified integer factor """ if len(y) > limit: return y[::factor].reset_index(drop=True) return y
dda6aa9ec00276e94be1db7354cbe3ad830f728b
48,301
from typing import Union import torch def AvgPoolNd( dim: int, kernel: int, stride: Union[None, int] = None, dtype=None, ): """Average pooling layer. Args: dim (int): Dimensionality. kernel (int): Kernel size. stride (int, optional): Stride. dtype (dtype): Data type. Returns: object: Average pooling layer. """ return getattr(torch.nn, f"AvgPool{dim}d")( kernel_size=kernel, stride=stride, padding=0, )
5488f87b9f371bba9bd5de4f29a2e04e799825db
48,309
def logout_user(_): """Log user out.""" return {}, 200
edb75ddc32f905c62600ef7706ccd060d02f466c
48,310
from typing import Any def default_serialize(value: Any) -> str: """Default value serializer. ``None`` -> ``""`` ``value: Union[bool, int, float, str]`` -> ``str(value)`` ``value: Any`` -> ``repr(value)`` """ if isinstance(value, str): return value if isinstance(value, (bool, int, float)): return str(value) if value is None: return "" return repr(value)
fdb96224b765cd2eef8bb33bd04b8bc6f9a6510e
48,315
def tuple_(*args): """:yaql:tuple Returns tuple of args. :signature: tuple([args]) :arg [args]: chain of values for tuple :argType [args]: chain of any types :returnType: tuple .. code:: yaql> tuple(0, [], "a") [0, [], "a"] """ return args
54474e0230e3821903d60f441c698cb73beec596
48,316
import re def get_web_element_attribute_names(web_element): """Get all attribute names of a web element""" # get element html html = web_element.get_attribute("outerHTML") # find all with regex pattern = """([a-z]+-?[a-z]+_?)='?"?""" return re.findall(pattern, html)
33aa9d0ab94525e6c68581aa4ce45c457e269bc4
48,320
import torch def sort_rows(m, n_rows): """sort N*M matrix by row Args: m (torch.Tensor): N*M matrix to be sorted n_rows (int): no of rows to be sorted Returns: sorted (torch.Tensor): N*M matrix with sorted row """ m_T = m.transpose(1, 0) sorted_m = torch.topk(m_T, k=n_rows)[0] return sorted_m.transpose(1,0)
c8c27cc05302d55750e54600bbebe1726939dd83
48,321
from typing import Any def _isstr(value: Any) -> bool: """ Check to see if this is a stringlike or a (nested) iterable of stringlikes """ if isinstance(value, (str, bytes)): return True if hasattr(value, "__iter__"): return all(_isstr(v) for v in value) return False
a1232c628f3a174297e4db03f239673c8b11bd16
48,322
def c_to_f(temperature): """Converts temperature from celcius to fahrenheit Args: temperature: floating point representing the temperature in celcius Returns: temperature in fahrenheit """ if temperature is None: return None return (temperature * 9 / 5) + 32
1606e0f64fb5ebe8146c63a3a78d3b47fbaf9871
48,323
import torch def adversarial_loss(prob, label): """Compute adversarial losses in GAN networks. Note: As a reminder, prob is either D(x) or D(G(z)), and label is either 0 (fake) or 1 (real). With BCELoss(), this means that - l(D(x), 1) = -log(D(x)): "Real" discriminator loss - l(D(x), 0) = -log(1 - D(x)): "Fake" discriminator loss - l(D(G(z)), 1) = -log(D(G(z))): Non-saturating generator loss Args: prob: Discriminator output, in interval [0, 1] label: Data label, with fake = 0 and real = 1 """ return torch.nn.BCEWithLogitsLoss()(prob, label)
1eb7eb8e5d82a354e4272fa5de57d67f30ceca92
48,328
def create_vocab(docs): """Create a vocabulary for a given set of documents""" words = set() for d in docs: for w in d: words.add(w) vocab = {} for i, w in enumerate(list(words)): vocab[w] = i return vocab
e02a5b6d1c8eb18ef6fbd279b3ecd7f0b62d1107
48,335
def partition(length, parts): """Partitions 'length' into (approximately) equal 'parts'.""" sublengths = [length//parts] * parts for i in range(length % parts): # treatment of remainder sublengths[i] += 1 return sublengths
d398b104ed434d076244b00ad4dc8876821e3e75
48,339
import re def parse_checksum_row(row): """ Args: row: a line of text from pt-table-checksum Returns: An array of elements, if the regex matches [ts, errors, diffs, rows, chunks, chunks_skipped, elapsed_time, db, tbl] Ex: [ '08-30T06:25:33', '0', '0', '28598', '60', '0', '0.547', 'pbdata04159', 'userstats' ] If the regex doesn't match, return nothing. """ p = re.compile(''.join("^(\d+-\d+T\d+:\d+:\d+)\s+(\d+)\s+(\d+)\s+" "(\d+)\s+(\d+)\s+(\d+)\s+(\d+\.\d+)\s+" "(.+?)\.(.+)$")) m = p.match(row) if m: return m.groups()
4f239ba582c07a7135d00e7078ec578dcd13de83
48,342
def get_list_of_multiple_or_one_or_empty_from_dict(input, name, vtype=None): """ Extracts objects by 'name' from the 'input' and returns as a list. Tries both plural and singular names from the input. If vtype is specified, tries to convert each of the elements in the result to this type. :param input: - dict - Input dictionary. Event of Lambda for example. :param name: - str - Name of attribute (in plural form). :param vtype: - type - Type to be converted to. Must be callable. Tested types: str, int, float :return: - list - List of vtypes, or list of whatever was in input, or empty list. :raises ValueError: In all cases something is wrong. """ if not isinstance(input, dict): raise ValueError("'input' attribute must be a dict. Received: {}".format(type(input))) if not isinstance(name, str): raise ValueError("'name' attribute must be a str. Received: {}".format(type(name))) def convert(obj, t): return obj if not t else t(obj) results = input.get(name) or input.get(name.rstrip('s')) if not results: return [] # Wrap to list if not there yet. if not isinstance(results, (list, tuple, set)): results = [results] else: results = list(results) # Apply vtype convertion if required. return [convert(x, vtype) for x in results]
b038bd1c2ff390d32bd058344f48175ca8071b27
48,345
def lower_keys(obj: dict) -> dict: """ Lowercase first level keys of a dictionary :param obj: object to be lowercased :return: same object with lowercase keys in first level """ nu_obj = dict() for key in obj: nu_obj[key.lower()] = obj[key] return nu_obj
e3043d3b850128bb94b9c9388ab59e279a87bc40
48,351
def CSVWriter (iterable, outLoc, header="", ): """ Writes an iterable to a CSV file. :param iterable: List of list :param outLoc: file location. Where to place it. :param header: header of the CSV file :return: 1 """ if not iterable: print ("nothing to write") return 0 out = open(outLoc, 'w') if header: out.write(header+'\n') #Only works if iterable is a nested list for member in iterable: for item in member: out.write(str(item)+',') out.write('\n') print("write to "+outLoc+" successful.") return 1
25a800d84b5ad4733fa6a4205f49823191a8523f
48,353
def fibonacci(n): """ Returns a list of fibonacci numbers of length n Parameters ---------- n : int Number in fibonacci suite desired Returns ------- fib_list : list[ints] List of integers """ memo = [0, 1] for i in range(2, n): memo += [memo[i - 2] + memo[i - 1]] return memo[:n]
5c2e29732c383b92a7a56fd618418d4301bbcd3c
48,362
import math def get_number_elements_per_split(total_elements, num_splits): # type: (int, int) -> int """Compute the number of elements in each split""" return math.ceil(total_elements / float(num_splits))
9f80c0db1042291afe7fa9513464ff186946feac
48,365
def fit_in_range(value, min_value, max_value): """Make the given value fit into the range min_value..max_value""" if value < min_value: value = min_value elif value > max_value: value = max_value return value
e12de739061ce1c6b8ca72502a15cafd58b56555
48,369
def transpose_list(list_of_dicts): """Transpose a list of dicts to a dict of lists. :param list_of_dicts: to transpose, as in the output from a parse call :return: Dict of lists """ res = {} for d in list_of_dicts: for k, v in d.items(): if k in res: res[k].append(v) else: res[k] = [v] return res
48c0213992f1c0a3d542f5631bad1522a096ab14
48,373
def get_cond_latents_at_level(cond_latents, level, hparams): """Returns a single or list of conditional latents at level 'level'.""" if cond_latents: if hparams.latent_dist_encoder in ["conv_net", "conv3d_net"]: return [cond_latent[level] for cond_latent in cond_latents] elif hparams.latent_dist_encoder in ["pointwise", "conv_lstm"]: return cond_latents[level]
6a2184d059753b8a946ec24bf4e1bbff6efe9910
48,375
def prepare_fetch_incidents_query(fetch_timestamp: str, fetch_severity: list, fetch_table: str, fetch_subtype: list, fetch_fields: str, fetch_limit: str) -> str: """ Prepares the SQL query for fetch incidents command Args: fetch_limit: Indicates how many incidents should be queried fetch_timestamp: The date from which threat logs should be queried fetch_severity: Severity associated with the incident. fetch_subtype: Identifies the log subtype. fetch_table: Identifies the fetch type. fetch_fields: Fields to fetch fro the table. Returns: SQL query that matches the arguments """ query = f'SELECT {fetch_fields} FROM `{fetch_table}` ' # guardrails-disable-line query += f'WHERE time_generated Between TIMESTAMP("{fetch_timestamp}") ' \ f'AND CURRENT_TIMESTAMP' if fetch_subtype and 'all' not in fetch_subtype: sub_types = [f'sub_type.value = "{sub_type}"' for sub_type in fetch_subtype] query += f' AND ({" OR ".join(sub_types)})' if fetch_severity and 'all' not in fetch_severity: severities = [f'vendor_severity.value = "{severity}"' for severity in fetch_severity] query += f' AND ({" OR ".join(severities)})' query += f' ORDER BY time_generated ASC LIMIT {fetch_limit}' return query
a68e0c9bac3d6ef451d772bffbc0fd43e23b7360
48,376
def fancy_retrieve(broker, num_messages=1): """ Fancy wrapper that combines several operations to retrieve a message from the message broker. :param broker: The Broker instance :param num_messages: The number of messages to retrieve from the queue :rtype: list :return: The list of messages kept in the internal message buffer """ broker.connect() broker.subscribe() messages = broker.pop_messages(num_messages) broker.disconnect() return messages
24668471841a3bb71691d2dcd3d715bd97626e53
48,377
def HasLib(parsed, name, version=None): """Check if the parsed yaml has specified the given library. Args: parsed: parsed from yaml to python object name: str, Name of the library version: str, If specified, also matches against the version of the library. Returns: True if library with optionally the given version is present. """ libs = parsed.libraries or [] if version: return any(lib.name == name and lib.version == version for lib in libs) else: return any(lib.name == name for lib in libs)
67b1fbf0823eeaaaf666fe6f08af817f622e19b4
48,381
def find_biggest_value_per_day(day_data): """ Take pressure data per day and find biggest value. If some systolic and other systolic equal, compare by diastolic """ values = [(data[2], data[3]) for data in day_data] systolic, diastolic = max(values) return systolic, diastolic
f46a866809278b95851c8583a1682de58749a64f
48,384
def db_row_count(cursor, schema_table): """ :param cursor: Postgres formatted database connection string :type cursor: psycopg2.cursor :param schema_table: schema.table :type schema_table: str|unicode :return: number of rows in the specified schema.table :rtype: int """ query = """SELECT COUNT(1) FROM %s;""" % schema_table cursor.execute(query) result = cursor.fetchall() return int(result[0][0])
7c8547940233fb9b6bde894badf8096938ed7892
48,385
def datasetsListIntoTree(dList): """Transforms list of datasets into a neested dict where keys are year and quartal""" d = {} for rec in dList: if rec.quartal: if rec.year not in d: d[rec.year] = [None] * 4 d[rec.year][rec.quartal - 1] = rec else: d[rec.year] = rec return d
37a9ea05d919d02ff5bcf3844ee9df78ea2ba040
48,387
import hashlib def sha256_hex(hash_input): """ Return 64-character hex representation of SHA256 of input. Args: hash_input (obj): a python object having a string representation. Returns: length-64 hexadecimal string representing result of applying SHA256 to utf-8 encoding of string representation of input. Example: >>> sha256_hex("abc") 'ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad' """ return hashlib.sha256(str(hash_input).encode('utf-8')).hexdigest()
1261bbe0b9a208653ab7b069b4d6a64c24c9b57c
48,390
import shutil def tpl_repo(request, tmp_path, tpl_path): """ Creates a temporary folder containing a set of test templates. The templates to copy can be passed in by using the tpl_repo_contents mark. Used for testing installation of template directories. """ repo_dir = tmp_path / "template_repository" repo_dir.mkdir() marker = request.node.get_closest_marker("tpl_repo_contents") templates = ["hello_world", "test"] if marker is not None: templates = [a for a in marker.args] for tpl in templates: tpl_dir = tpl_path / tpl if tpl_dir.is_dir(): shutil.copytree(tpl_dir, repo_dir / tpl) return repo_dir
7299c86555595a063d9d043fcf1c5ebf7c5c4c6f
48,392
def get_base_name(full_name: str) -> str: """Extract the base name of a package. Args: full_name: Full name of the package of interest, e.g., pandas.testing Returns: Base name of the provided package, e.g., pandas """ return full_name.split('.')[0]
5a9fb854021fd878afe729dd938a4415b4ad39b5
48,395
def project_directory(packaged_scene, package_root, source_scene): """ Project directory to use in scene settings Returns: Project directory str """ return " project_directory \"[python nuke.script_directory()]\"\n"
21e6ea43cacadd9adaa0ef760b34384be770562a
48,397
def color_str_yellow(s): """Color string YELLOW for writing to STDIN.""" return "\033[93m{}\033[00m".format(s)
4c02d3a111da7213dd887bb168f7f77b0b733f41
48,399
def cyclic_fetch_elements_in_array(array, start_index, searchable_size): """ Fetch elements without worrying about reaching the end of the array Args: array (list of Any): anything in the form of array, can be an array of ADT start_index (int): the starting index to slice from searchable_size (int): the number of elements included from start_index Returns: list of Any """ array_length = len(array) # Determine if start_index + searchable_size will cause an overflow, and if so, # calculate how many elements will exceed. overflow_n = start_index + searchable_size - array_length # If it is larger than 0, that means we have an overflow if overflow_n > 0: # We need to return 2 concatenated arrays: # 1. Elements from the current index to the maximum length of the array # 2. Elements from the start to the overflow_n return array[start_index:array_length] + array[0:overflow_n] else: # Else, return elements as usual using slicing return array[start_index:(start_index + searchable_size)]
663b2fbbd576d926853e805715a7164b015e6c05
48,405
def value_for_key(data, keypath, default=None, exception_on_miss=False): """Returns the value at the given *keypath* within :attr:`values`. A key path is a list of components delimited by dots (periods). The components are interpreted as dictionary keys within the structure. For example, the key path ``'a.b'`` would yield ``'c'`` with the following :attr:`values` dictionary: :: {'a':{'b':'c'}} If the key path does not exist *default* will be returned. """ v = data for component in keypath.split('.'): if v != None and hasattr(v,'has_key') and v.has_key(component): v = v[component] else: if(exception_on_miss): raise KeyError, "Could not locate required tag: '%s'" % component v = default return v
f563fd845d6138d8651caf8780a1089b2b2d99f9
48,407
def is_json(input_file): """ Check if the file is in JSON format. The function reads the first character of the file, and if it is "{" then returns True. :param input_file: file name (string) :return: Boolean. """ with open(input_file) as unknown_file: c = unknown_file.read(1) if c == '{': return True return False
9213688ec0fc6d0b659360ba37dc5c740886e571
48,408
from pathlib import Path from datetime import datetime def detect_new_day(lock_file: Path = Path('.daylock')): """ Return true when executed on a different day than the last time. It uses a file (lock_file) to keep track of the last day """ day_of_the_year = datetime.now().timetuple().tm_yday new_day = True if lock_file.exists(): with open(lock_file, 'r') as file: lock = int(file.read()) new_day = lock != day_of_the_year if new_day: with open(lock_file, 'w') as file: file.write(str(day_of_the_year) + '\n') else: with open(lock_file, 'w') as file: file.write(str(day_of_the_year) + '\n') return new_day
a56ec8cca97ee423b163d0587d0cb1d90371b17a
48,414
from typing import Iterable import pathlib from typing import Sequence def get_modified_files( files: Iterable[pathlib.Path], *, since: float ) -> Sequence[pathlib.Path]: """Return the files that were modified since the given timestamp.""" return [f for f in files if f.stat().st_mtime > since]
1576da300654d6f4aa9dbc331fc23740bc25dd7c
48,416
def share_percentage(df, columns): """ This function shows the balance in each column of dataframe. For each column, it returns the list of unique values together with the percentange of each value in the dataset. """ shares_cols = dict() nrowss = len(df) for col in columns: unq_vals = df[col].unique() shares = dict() for val in unq_vals: shares[val] = round(len(df[df[col] == val]) / nrowss * 100.0, 3) shares_cols[col] = shares return shares_cols
b95ace6ae03d29b5bfaf0958719586013f435b2d
48,423