content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def create_index(transaction_executor, table_name, index_attribute): """ Create an index for a particular table. :type transaction_executor: :py:class:`pyqldb.execution.executor.Executor` :param transaction_executor: An Executor object allowing for execution of statements within a transaction. :type table_name: str :param table_name: Name of the table to add indexes for. :type index_attribute: str :param index_attribute: Index to create on a single attribute. :rtype: int :return: The number of changes to the database. """ print("Creating index on '{}'...".format(index_attribute)) statement = 'CREATE INDEX on {} ({})'.format(table_name, index_attribute) # print(statement) cursor = transaction_executor.execute_statement(statement) return len(list(cursor))
6f128ea042076fc1e35a5cea816069bd90258ef3
86,924
def get_lines_number(filename): """Get number of lines in text file.""" number = 0 with open(filename) as f: for _ in f: number += 1 return number
a1eaf72c4d184fe29e090143be1a100517cb01b2
86,925
def _isNamedTuple(obj): """ Heuristic check if an object is a namedtuple. """ return hasattr(obj, "_fields") and hasattr(obj, "_asdict") and callable(obj._asdict)
ec383341f8168b9568105c027f10d71a2192680b
86,929
def flux_at_edge(X, min_value=0): """Determine if an edge of the input has flux above min_value Parameters ---------- X: tensor or array 2D matrix to evaluate min_value: float Minimum value to trigger a positive result. Returns ------- result: bool Whether or not any edge pixels are above the minimum_value """ return bool(max(X[:, 0].max(), X[:, -1].max(), X[0].max(), X[-1].max()) > min_value)
3cb29f64e6b9cdce0cb987d27af95b828e10475a
86,930
def main_float_input(message): """ Takes message to print when asking for input, the converts to float. Repeats user input until it can be converted to float without error. Returns that float once done. """ try_input = input(message) while True: try: x = float(try_input) return x except ValueError: try_input = input("Not a valid input, try again: ")
8bc3aa4f78069dbdc115a90751254ab4652dacdc
86,932
def filter_keras_submodules(kwargs): """Selects only arguments that define keras_application submodules. """ submodule_keys = kwargs.keys() & {'backend', 'layers', 'models', 'utils'} return {key: kwargs[key] for key in submodule_keys}
4f1ed71131b27dfe9e6c424d2dbbebd1a372d920
86,933
import re def is_listener(struct): """ Returns whether the struct name either starts with `on` or ends with `listener`. This function works with both snake_case and CamelCase patterns. """ matches = re.match(r'(?:^[oO]n[_A-Z].*|.*(?:_l|L)istener$)', struct.name) return matches is not None
41bdb47e637806383d5f4fd5d801f9c26e64494d
86,934
def bool2int(bool): """Convert a bool to an int.""" if bool: return 1 else: return 0
80823d633e9ee6fd3f51ac8b834f301fa2d32107
86,935
def to_increment_str(val): """Return passed value with a preceding '+' if it is a positive number""" try: float(val) except: return val if float(val) > 0: return f"+{int(float(val)):,}" else: return f"{int(float(val)):,}"
69a77d708187fcbe98f4733c588e7e2821a50bcf
86,938
def read_tpu_file(tpu_file): """ Reads the file that contains the TPU creation command. Returns it and the name of the TPU. """ with open(tpu_file) as inf: tpu_cmd = inf.readline().strip() return tpu_cmd
ae390c1c8df78bd42fa09f002de250b7ebd6a236
86,939
def _flatten_nested_keys(dictionary): """ Flatten the nested values of a dictionary into tuple keys E.g. {"a": {"b": [1], "c": [2]}} becomes {("a", "b"): [1], ("a", "c"): [2]} """ # Find the parameters that are nested dictionaries nested_keys = {k for k, v in dictionary.items() if type(v) is dict} # Flatten them into tuples flattened_nested_keys = {(nk, k): dictionary[nk][k] for nk in nested_keys for k in dictionary[nk]} # Get original dictionary without the nested keys dictionary_without_nested_keys = {k: v for k, v in dictionary.items() if k not in nested_keys} # Return merged dicts return {**dictionary_without_nested_keys, **flattened_nested_keys}
30b60b1a6b0866d2407f18f70cde53677601b039
86,945
def get_line_from_two_points(p1, p2): """ Returns a function which takes an x-coordinate and returns the corresponding y-coordinate on the line defined by the points p1, p2 """ slope = p2[1] - p1[1] slope /= p2[0] - p1[0] return lambda x: (slope * (x - p1[0])) + p1[1]
81acd90af283b3b09e57a3e8d10a28df44a88af3
86,950
import io def parse_test_file(filename, nl_format): """Check for any special arguments and return them as a list.""" # Disable "universal newlines" mode; we can't directly use `nl_format` as # the `newline` argument, because the "bizarro" test uses something Python # considers invalid. with io.open(filename, newline='') as f: for l in f.read().split(nl_format): if 'glcpp-args:' in l: return l.split('glcpp-args:')[1].strip().split() return []
fe9b8f77452a0e049b50ccafc82cfc171842b5a0
86,951
def doctemplate(*args): """Return a decorator putting ``args`` into the docstring of the decorated ``func``. >>> @doctemplate('spam', 'spam') ... def spam(): ... '''Returns %s, lovely %s.''' ... return 'Spam' >>> spam.__doc__ 'Returns spam, lovely spam.' """ def decorator(func): func.__doc__ = func.__doc__ % tuple(args) return func return decorator
d293cff69b047f29eaa6170db383f80dd7a96295
86,954
import math def is_prime(n): """ Return True if n is a prime number, False otherwise. """ if n < 2: return False return not any(n % i == 0 for i in range(2, math.floor(math.sqrt(n)) + 1))
f0e335e2c45a24c0ab0d8e1e69ffc5a6feb42866
86,957
import ast def compile_single_node(node, filename): """Compile a 'single' ast.node (expression or statement).""" mode = 'eval' if isinstance(node, ast.Expr) else 'exec' if mode == 'eval': root = ast.Expression(node.value) else: root = ast.Module([node]) return (compile(root, filename, mode), mode)
bed8c809d11df486aa5f86addb1b9e4563baf17f
86,961
def x_aver_top_mass(xp_mass, xpf_mass): """ Calculates the average mass concentration at the top of column. Parameters ---------- xp_mass : float The mass concentration of distilliat, [kg/kg] xpf_mass : float The mass concentration of point of feed, [kg/kg] Returns ------- x_aver_top_mass : float The average mass concentration at top of column, [kg/kg] References ---------- Дытнерский, стр.230, формула 6.8 """ return (xp_mass + xpf_mass) / 2
a6cf2405f71701f247895f1935a4aaa8e88c7865
86,963
def resolve_user(user_name, user_resolutions): """Resolve user names.""" user = user_name if user in user_resolutions: user = user_resolutions[user] else: user = user_name.split(".")[1] return "[@{}](/p/{})".format(user, user)
a02453731ca48f35afbcbd079787abfc3cffe423
86,965
import torch def calculate_predictive_loss(data, predictions): """ Prediction loss calculation. :param data: Hash with label. :param prediction: Predicted label. :return target: Target tensor. :prediction loss: Loss on sample. """ target = [data["target"]] target = torch.tensor(target) prediction_loss = torch.nn.functional.nll_loss(predictions, target) return target, prediction_loss
dec076f4888edbb335a19811c499064faf4d99fe
86,966
def dfs(dependencies, dependents, key=lambda x: x): """ Depth First Search of dask graph This traverses from root/output nodes down to leaf/input nodes in a depth first manner. At each node it traverses down its immediate children by the order determined by maximizing the key function. As inputs it takes dependencies and dependents as can be computed from ``get_deps(dsk)``. Examples -------- >>> dsk = {'a': 1, 'b': 2, 'c': (inc, 'a'), 'd': (add, 'b', 'c')} >>> dependencies, dependents = get_deps(dsk) >>> sorted(dfs(dependencies, dependents).items()) [('a', 2), ('b', 3), ('c', 1), ('d', 0)] """ result = dict() i = 0 roots = [k for k, v in dependents.items() if not v] stack = sorted(roots, key=key) seen = set() while stack: item = stack.pop() if item in seen: continue seen.add(item) result[item] = i deps = dependencies[item] if deps: deps = deps - seen deps = sorted(deps, key=key) stack.extend(deps) i += 1 return result
4c326910e68709d9bb84f39203bc05e6976d5a93
86,968
import ipaddress def is_ip_on_lan(ip: str) -> bool: """Return true when the given IP is in a IANA IPv4 private range, otherwise false Args: ip An IPv4 address in dotted-quad notation. Returns: true or false depending on the value of ip """ return ipaddress.IPv4Address(ip).is_private
6e268bffebb707ef21416b41df585495b7bf0bac
86,971
def create_person(name, age) -> dict: """ Creates a dictionary representation of a person """ return {'name': name, 'age': age, 'friends': []}
5751da5831a1a1a3bd369508287c6614785cf8eb
86,972
def get_avg_price(ph, pl): """Daily average prices calculated as an average of highest and lowest prices""" return (ph + pl)/2
dc0f9649be02f8edc2f5169c2fffe92f65336920
86,973
def read_text(filename): """ Reads the text from a text file. """ with open(filename, "rb") as f: text = f.read() return text
a21353955b75dd626b741cc719e2af4684b39a46
86,974
def cut(text, length=40, trailing=" (...)"): """Cuts text to a predefined length and appends a trailing ellipsis for longer sources.""" if not text: return text if len(text) <= length: trailing = "" return text[:length] + trailing
b92706af60912ba8bf2402ae70397fca45b07e91
86,976
def map_format(sequence, format): """ Apply format string on elements in sequence. :param format: format string. can use one positional format argument, i.e. '{}' or '{0}', which will map to elements in the sequence. """ return [format.format(item) for item in sequence]
1f58111e7f3c67a4aecf29fe88d43eb49bef9e55
86,977
def choose_weapon(decision, weapons): """Chooses a weapon from a given list based on the decision.""" choice = [] for i in range(len(weapons)): if i < decision: choice = weapons[i] return choice
86e6272022a45603525e06a732748cf7275337b8
86,978
def s2(n, k): """Calculates the Stirling number of the second kind.""" if n == 0 or n != 0 and n == k: return 1 if k == 0 or n < k: return 0 return k * s2(n-1, k) + s2(n-1, k-1)
05127f75f33a781ec11e55b21f89f0ad42734f1c
86,984
import requests def get_from_url(url): """ :param url: url to download from :return: return the content at the url """ return requests.get(url).content
8f60f5b00ba6e9205597c51738de62d63c5c2a85
86,985
def findFirstNonWhitespace(text, pos): """Find first thing after pos in text that is not whitespace (whitespace = " " for now) Returns: position of thing and thing if nothing is after pos, return (-1, None) """ while pos > -1 and text[pos] == " " and pos < len(text): pos = pos + 1 if pos == -1 or pos >= len(text): return -1, None else: return pos, text[pos]
77f0d9a296cfb9f8119d275ba6dd15796e7ec2f4
86,986
def deg_to_dms(deg): """Convert decimal degrees to (deg,arcmin,arcsec)""" d = int(deg) deg-=d m = int(deg*60.) s=deg-m//60 return d,m,s
c5db284ace27822d9090e5e4ecfd99e97f497198
86,987
def nextLn(Ln): """ Return Gray code Ln+1, given Ln. """ Ln0 = ['0' + codeword for codeword in Ln] Ln1 = ['1' + codeword for codeword in Ln] Ln1.reverse() return Ln0 + Ln1
b2996eb318cbf1b740e1cf8c7c0a46816ad3b85e
86,988
def max_val(t): """ t, tuple or list Each element of t is either an int, a tuple, or a list No tuple or list is empty Returns the maximum int in t or (recursively) in an element of t """ maxVal = False def helper(obj): nonlocal maxVal for el in obj: if isinstance(el, int): if maxVal == False or maxVal < el: maxVal = el else: helper(el) helper(t) return maxVal
159531e895bb1c5b51dd8e8d1c308b554f44050f
86,989
def _remove_duplicates_in_list(list): """ Removes duplicate elements in a list. :param list: A list with possibly duplicate elements. :type list: list :return: A list which elements are not duplicate. :rtype: list """ shrinked_list = [] for element in list: if element not in shrinked_list: shrinked_list.append(element) return shrinked_list
35a23e381c9816854442b37df27ee27633f9aa83
86,993
def mem_binary_search(arr, time, return_ind=False): """ Performs binary search on array, but assumes that the array is filled with tuples, where the first element is the value and the second is the time. We're sorting utilizing the time field and returning the value at the specific time. Args: arr: sorted array with (val, start_time) time: the time that we are looking for return_ind: If True, return the index corresponding to time in arr Returns: the value that took place at the given time """ # if len(arr) == 0 or time < arr[0][1]: # return None start, end = 0, len(arr) while (end > start+1): mid = (end + start) // 2 if time >= arr[mid][1]: start = mid else: end = mid if return_ind: return start else: return arr[start][0]
6b3a717ec8bcb97621a50721394952592befb9c9
86,997
def encode_topic_name(topic_names, to_byte=True): """Create topic name. Mainly used for creating a topic name for publisher. # Arguments topic_names: list a list of strings # Returns topic_name: byte string the topic name separated by "/" """ topic_name = "/".join(topic_names) if to_byte is True: return topic_name.encode("utf-8") else: return topic_name
35ecf647cef20bf192c0d9fd5d56b5312b1b8aea
87,002
import time def elapsed_time(start): """Compute time since provided start time. Parameters ---------- start: float Output of time.time(). Returns ------- Elapsed hours, minutes and seconds (as tuple of int). """ time_elapsed = time.time() - start hrs = time_elapsed // 3600 secs = time_elapsed % 3600 mins = secs // 60 secs = secs % 60 return int(hrs), int(mins), int(secs)
d0f6be5dd8890b42e92ff6bd1198d8fa08e73d77
87,006
from typing import Callable from typing import Any import functools import inspect def call_only_once(func: Callable[..., Any]) -> Callable[..., Any]: """ Decorate a method or property of a class, so that this method can only be called once for every instance. Calling it more than once will result in exception. """ @functools.wraps(func) def wrapper(*args, **kwargs): # type: ignore self = args[0] assert func.__name__ in dir(self), "call_only_once can only be used on method or property!" if not hasattr(self, "_CALL_ONLY_ONCE_CACHE"): cache = self._CALL_ONLY_ONCE_CACHE = set() else: cache = self._CALL_ONLY_ONCE_CACHE # pylint: disable=W0212 cls = type(self) # cannot use ismethod(), because decorated method becomes a function is_method = inspect.isfunction(getattr(cls, func.__name__)) assert func not in cache, ( f"{'Method' if is_method else 'Property'} {cls.__name__}.{func.__name__} " f"can only be called once per object!" ) cache.add(func) return func(*args, **kwargs) return wrapper
69850248c942a2bb12feff510680bc46067c6f90
87,008
def encode(value): """ Encode UTF8. :param bytes value: a decoded content :return: the encoded content :rtype: str """ return value.encode('utf-8')
da1066c3a166fe16095dd849c6d14bc91c5c05ae
87,010
def _get_headers(advisories): """Return list of unique headers.""" return list(set(key for adv in advisories for key in adv.keys()))
78a1364bf278b0cafa7eb2a645bd435e019275dc
87,011
def _sh_negate(sh, order): """Get the negative spherical harmonic from a positive one.""" assert order >= 0 return sh.conj() * (-1. if order % 2 else 1.)
506a3fa59111edfcbdb6b76af09e65a75fa55d0a
87,014
import collections def top_k(signal, k): """Computes the top_k cut of a {'string': frequency} dict.""" counter = collections.Counter(signal) ranks = collections.defaultdict(list) for key, value in counter.most_common(): ranks[value].append(key) results = {} counter = 0 for freq, values in ranks.items(): for v in values: results[v] = freq counter += 1 if counter >= k: break return results
db5743f174f90c1e8dcc561b16085c2389d2c38f
87,016
import torch def min(dat, dim=None): """Minimum element (across an axis) for tensors and arrays""" if torch.is_tensor(dat): return dat.min() if dim is None else dat.min(dim=dim).values else: return dat.min(axis=dim)
867ec2d624644794d828141e502276aed8b4f16a
87,020
from collections.abc import Mapping def merge_dict(*args, add_keys=True): """ Safely merge two dictionaries Args: dct0...n: dicts to merge add_keys: merge dict keys (default: True) Returns: merged dict """ if len(args) < 1: return None dct = args[0].copy() for merged in args[1:]: if not add_keys: merged = {k: merged[k] for k in set(dct).intersection(set(merged))} for k, v in merged.items(): if isinstance(dct.get(k), dict) and isinstance(v, Mapping): dct[k] = merge_dict(dct[k], v, add_keys=add_keys) else: if v is None: if not k in dct: dct[k] = None else: dct[k] = v return dct
806a8871294b719b915fc7906ab152e83d949796
87,022
import requests import json def get_json_data(url): """Return a dict parsing a remote json file""" r = requests.get(url) try: return r.json() except json.JSONDecodeError: # Catch the Unexpected UTF-8 BOM error r.encoding='utf-8-sig' return r.json()
3589bfb2bb9112023c5a5b148dcb59a6817a4fd3
87,037
def get_domain_term(cl, domain): """ create the codelist term values for a given domain code :param cl: the codelist for domains :param domain: the 2 letter domain abbreviation to use to find the codelist term details to return :return: term dictionary with the details of the domain codelist term """ term = {} for domain_term in cl["terms"]: if domain_term["submissionValue"] == domain: term["Term"] = domain_term["submissionValue"] term["NCI Term Code"] = domain_term["conceptId"] term["Decoded Value"] = domain_term["preferredTerm"] term["Comment"] = "" term["IsNonStandard"] = "" term["StandardOID"] = "STD.3" return term term["Term"] = domain term["NCI Term Code"] = "" term["Decoded Value"] = "" term["Comment"] = "" term["IsNonStandard"] = "Yes" term["StandardOID"] = "" return term
da269c45348dda31c4989a6fac6447176a7823cd
87,039
def rotate(pattern): """Rotate pattern by 90 angles to the right.""" rotated = [] for row_no, row in enumerate(pattern): rotated.append('') for col_no, char in enumerate(row): rotated[row_no] += pattern[len(pattern) - 1 - col_no][row_no] return rotated
359429df171ff04c98ebf9ef5057dd317008361e
87,046
def strip_name_amount(arg: str): """ Strip the name and the last position integer Args: arg: string Returns: string and integer with the default value 1 """ strings = arg.split() try: first = ' '.join(strings[:-1]) second = int(strings[-1]) except (ValueError, IndexError): first = ' '.join(strings) second = 1 return first, second
034ca8c780b9e837f6c3c5f2a1bf0d58dbc77e9b
87,050
def mclag_session_timeout_valid(session_tmout): """Check if the MCLAG session timeout in valid range (between 3 and 3600) """ if session_tmout < 3 or session_tmout > 3600: return False, "Session timeout %s not in valid range[3-3600]" % session_tmout return True, ""
4f200dfcaddf15a967f3274f94648630d47bfd54
87,051
def select(printer, ast): """Prints a select statement "name : type".""" name_str = ast["name"] type_str = printer.ast_to_string(ast["type"]) return f'{name_str} : {type_str}'
56c1613a14057f38978794b7763fd3896a567088
87,058
def simple_log(msg, err=False): """Print a log message to stdout and then return it.""" lmsg = "" if not err: lmsg = "\n[*] %s" % str(msg) else: lmsg = "\n[x] %s" % str(msg) print(lmsg, flush=True) return lmsg
7799c11993b4b9f4f7c22d5af81a15bc3649b4e8
87,059
from typing import Any def get_value_from_ndpi_comments( comments: str, value_name: str, value_type: Any ) -> Any: """Read value from ndpi comment string.""" for line in comments.split("\n"): if value_name in line: value_string = line.split('=')[1] return(value_type(value_string))
7a90518cc76d674de2d6fc5f794b576ddbc5dd2d
87,061
def write_key(key, value): """Write a `key = value` line in an LTA file. Parameters ---------- key : str value : int or float or str Returns ------- str """ if isinstance(value, (str, int, float)): return f'{key} = {value}' else: return f'{key} = ' + ' '.join([str(v) for v in value])
27ead1043d868aec4d0488212217513aef37bed6
87,062
def gen_checkbox_labels(batch_size, num_leg_objs, antnames): """ Auto-generating Check box labels Parameters ---------- batch_size : :obj:`int` Number of items in a single batch num_leg_objs : :obj:`int` Number of legend objects / Number of batches Returns ------ labels : :obj:`list` Batch labels for the batch selection check box group """ nants = len(antnames) labels = [] s = 0 e = batch_size - 1 for i in range(num_leg_objs): if e < nants: labels.append("{} - {}".format(antnames[s], antnames[e])) else: labels.append("{} - {}".format(antnames[s], antnames[nants - 1])) # after each append, move start number to current+batchsize s = s + batch_size e = e + batch_size return labels
70ec137b45479b6b12ac0f86defb5be57c89a114
87,063
def data() -> str: """ Test data for day_01. Returns ------- data: str """ return '1721\n979\n366\n299\n675\n1456'
d7b73881ceb86e72bd28d0c4c9343dac6d5bf832
87,064
def format_timespan_digits(ts): """Format a timespan namedtuple as a string resembling a digital display.""" if ts.days: day_or_days = "days" if ts.days > 1 else "day" return ( f"{ts.days} {day_or_days}, " f"{ts.hours:02d}:{ts.minutes:02d}:{ts.seconds:02d}" ) if ts.seconds: return f"{ts.hours:02d}:{ts.minutes:02d}:{ts.seconds:02d}" return f"00:00:00.{ts.total_microseconds}"
8dbc152c1c93839007b3e74c6458c53ebda7d0c0
87,070
def colourfulness_correlate(C, B_rw): """ Returns the correlate of *colourfulness* :math:`M`. Parameters ---------- C : numeric Correlate of *chroma* :math:`C`. B_rw : numeric Ideal white *brightness* correlate :math:`B_{rw}`. Returns ------- numeric Correlate of *colourfulness* :math:`M`. Examples -------- >>> C = 0.013355007871688761 >>> B_rw = 125.24353925846037 >>> colourfulness_correlate(C, B_rw) # doctest: +ELLIPSIS 0.0167262... """ M = C * B_rw / 100 return M
3c392973d69de71299def78e267d290535d6e603
87,072
def graphql_field( field_name, field_type, arguments={}, context_args=[], description=None, is_deprecated=False, deprecation_reason=None): """Decorator that annotates a method as corresponding to a GraphQL field. The field appears in the GraphQL types for any of the containing class's subclasses that are annotated with graphql_object or graphql_interface. The value of the field for a particular object is equal to the method's return value. To be precise, we do not necessarily call the decorated method on a given object; rather, we call the method with the same name as the decorated method. For example, if we decorate a method Foo.foo() using graphql_field, and Bar overrides foo(), then for an object of type Bar, we obtain the field's value by calling Bar's implementation of foo(), not Foo's implementation. In other words, we respect ordinary method overriding semantics. At present, we do not support annotating a method with multiple graphql_field annotations. basestring field_name - The name of the field. basestring field_type - The GraphQL type of the field's value. dict<basestring, basestring> arguments - A map from the names of the arguments to the field in GraphQL to their GraphQL types. We pass the arguments to the method as keyword arguments, after changing the arguments' names from camelCase to snake_case. Any arguments whose values are not supplied are omitted from the keyword arguments. As such, we will use any default values for such arguments defined in Python. list<basestring> context_args - A list of the context arguments to include in the keyword arguments. See GraphQlContext.context_arg. basestring description - A description of the field, or None. GraphQL favors the Markdown format. bool is_deprecated - Whether the field is deprecated. basestring deprecation_reason - An indication of why the field is deprecated, or None. This is None if is_deprecated is False. """ def decorator(func): func._graphql_field_name = field_name func._graphql_field_type = field_type func._graphql_field_args = arguments func._graphql_field_context_args = context_args func._graphql_field_description = description func._graphql_field_is_deprecated = is_deprecated func._graphql_field_deprecation_reason = deprecation_reason return func return decorator
d944803b50ae9d461635bcbbd4d2ccfcf95dc2b0
87,073
def process_content(df, config, dataframes): """ Applies a function to each value of the column "process_content": [ { "function": lambda x: str(x), "columns": ["column1", "column2"] } ] """ for function_dict in config.process_content: for column in function_dict['columns']: df[column] = df[column].map(function_dict['function']) return df
41040b0eafe0f97526b645fdecf363d653c1cf85
87,077
def intseq(words, w2i, unk='.unk'): """ Convert a word sequence to an integer sequence based on the given codebook. :param words: :param w2i: :param unk: :return: """ res = [None] * len(words) for j, word in enumerate(words): if word in w2i: res[j] = w2i[word] else: res[j] = w2i[unk] return res
9716cbc3fad228802ba954e50df4d2f109eff3e6
87,079
def update(model, optimizer, train_x, train_y, ret_loss=False): """Perform a single model update Apply model on the given data train_x, train_y. Compute the prediction loss and make parameter updates using the provided optimizer. Parameters ---------- model : nn.Module Pytorch neural network module. Must have a criterion attribute!! optimizer : torch.optim Optimizer that updates the model train_x : torch.Tensor Input tensor x train_y : torch.Tensor Ground-truth output tensor y ret_loss : bool, optional Whether to return the observed loss (default=False) Returns ------ ret_loss (optional) Only if ret_loss=True """ model.zero_grad() out = model(train_x) loss = model.criterion(out, train_y) loss.backward() optimizer.step() if ret_loss: return loss.item()
c45e77135444c24f3a7d7095894a5100ec3d9219
87,080
def _flash_encryption_tweak_range(flash_crypt_config=0xF): """ Return a list of the bit indexes that the "key tweak" applies to, as determined by the FLASH_CRYPT_CONFIG 4 bit efuse value. """ tweak_range = [] if (flash_crypt_config & 1) != 0: tweak_range += range(67) if (flash_crypt_config & 2) != 0: tweak_range += range(67, 132) if (flash_crypt_config & 4) != 0: tweak_range += range(132, 195) if (flash_crypt_config & 8) != 0: tweak_range += range(195, 256) return tweak_range
b02d2f128ee466cfbc1051ca769e902fe70ed872
87,083
def list_detectors(gd_client, aws_region): """ Lists the detectors in a given Region Used to detect if a detector exists already :param gd_client: GuardDuty client :param aws_region: AWS Region :return: Dictionary of AWS_Region: DetectorId """ detector_dict = gd_client.list_detectors() if detector_dict['DetectorIds'] == []: pass else: return detector_dict
dd4419404b638b5d0975a254917d5365c10299a4
87,091
def bytes_fixlen(byteseq: bytes, length: int) -> bytes: """ Fix the length of a byte sequence. :param byteseq: The byte sequence to fix the length of. :param length: The length of the sequence. :return: The byte sequence with fixed length. """ if len(byteseq) > length: return byteseq[:length] else: return (b'\x00' * (length - len(byteseq))) + byteseq
c3c3a5186c5e3c007bc80d138f3ef88ea80bef5a
87,095
def add_automl_args(argparser): """ Helper function which defines command-line arguments specific to AMC. Arguments: argparser (argparse.ArgumentParser): Existing parser to which to add the arguments """ group = argparser.add_argument_group('AutoML Compression Arguments') group.add_argument('--amc-cfg', dest='amc_cfg_file', type=str, action='store', help='AMC configuration file') group.add_argument('--amc-protocol', choices=["mac-constrained", #"param-constrained", "accuracy-guaranteed", "mac-constrained-experimental", "punish-agent"], default="mac-constrained", help='Compression-policy search protocol') group.add_argument('--amc-ft-epochs', type=int, default=1, help='The number of epochs to fine-tune each discovered network') group.add_argument('--amc-save-chkpts', action='store_true', default=False, help='Save checkpoints of all discovered networks') group.add_argument('--amc-action-range', type=float, nargs=2, default=[0.0, 0.80], help='Density action range (a_min, a_max)') group.add_argument('--amc-heatup-episodes', type=int, default=100, help='The number of episodes for heatup/exploration') group.add_argument('--amc-training-episodes', type=int, default=700, help='The number of episodes for training/exploitation') group.add_argument('--amc-reward-frequency', type=int, default=None, help='Reward computation frequency (measured in agent steps)') group.add_argument('--amc-target-density', type=float, help='Target density of the network we are seeking') group.add_argument('--amc-agent-algo', choices=["ClippedPPO-continuous", "ClippedPPO-discrete", "TD3", "DDPG", "Random-policy"], default="ClippedPPO-continuous", help="The agent algorithm to use") group.add_argument('--amc-ft-frequency', type=int, default=None, help='How many action-steps between fine-tuning.\n' 'By default there is no fine-tuning between steps.') group.add_argument('--amc-prune-pattern', choices=["filters", "channels"], default="filters", help="The pruning pattern") group.add_argument('--amc-prune-method', choices=["l1-rank", "stochastic-l1-rank", "fm-reconstruction"], default="l1-rank", help="The pruning method") group.add_argument('--amc-rllib', choices=["coach", "spinningup", "hanlab", "random"], default=None, help="Choose which RL library to use") group.add_argument('--amc-group-size', type=int, default=1, help="Number of filters/channels to group") group.add_argument('--amc-reconstruct-pts', dest="amc_fm_reconstruction_n_pts", type=int, default=10, help="Number of filters/channels to group") group.add_argument('--amc-ranking-noise', type=float, default=0., help='Strcuture ranking noise') return argparser
ac234d2e395f8cc0d14d978d992e2340b40d697a
87,097
def docker_network_response_parser(port_mapping_list): """Parse list to source - dest port. Input: [ { '8080/tcp': [{'HostIp': '0.0.0.0', 'HostPort': '80'}], '90/tcp': [{'HostIp': '0.0.0.0', 'HostPort': '9090'}] } ] Output: ["HostIp(0.0.0.0):HostPort(80):GuestPort(8080)"] """ result = [] port_map = port_mapping_list[0] for pm in port_map.keys(): result.append( "HostIp("+port_map[pm][0]['HostIp'] + "):HostPort("+port_map[pm][0]['HostPort'] + "):GuestPort("+pm+")" ) return result
85d3556b9398885df6578199b145916364e8453a
87,100
from pathlib import Path import json def read_config() -> dict: """Read the configuration file and return its contents.""" with open(Path("../config.json")) as f: return json.load(f)
f98ed693fe21b3241d3675f13d35e23086dd0ee5
87,102
def lookup(d, key): """Filter to enable dictionary lookups by key in templates.""" return d[key]
ee5b3765f2cc159e1a0dd3098824056633ff24fa
87,103
def to_list(obj, split_strings=True): """ Converts a obj, an iterable or a single item to a list. Args: obj (mixed): Object to convert item or wrap. split_strings (bool, optional): Whether to split strings into single chars. Defaults to ``True``. Returns: list: Converted obj or wrapped item. Example: >>> results = to_list({'a': 1, 'b': 2, 'c': 3}) >>> assert set(results) == set([1, 2, 3]) >>> to_list((1, 2, 3, 4)) [1, 2, 3, 4] >>> to_list(1) [1] >>> to_list([1]) [1] >>> to_list(a for a in [1, 2, 3]) [1, 2, 3] >>> to_list('cat') ['c', 'a', 't'] >>> to_list('cat', split_strings=False) ['cat'] .. versionadded:: 1.0.0 .. versionchanged:: 4.3.0 - Wrap non-iterable items in a list. - Convert other iterables to list. - Byte objects are returned as single character strings in Python 3. """ if isinstance(obj, list): return obj[:] elif isinstance(obj, dict): return obj.values() elif not split_strings and isinstance(obj, (str, bytes)): return [obj] elif split_strings and isinstance(obj, bytes): # in python3 iterating over bytes gives integers instead of strings return list(chr(c) if isinstance(c, int) else c for c in obj) else: try: return list(obj) except TypeError: return [obj]
c963a5f3309ed62b26dd81117bbd7812eebb566d
87,106
def clean_tweet_text(original_tweet_text): """ Remove all URL and hashtag entities in the tweet_text param original_tweet_text: the original tweet_text field return: new tweet_text field with out any URL and hashtag """ tweet_text_words = original_tweet_text.split() filtered_tweet_text_words = [tweet_text_word for tweet_text_word in tweet_text_words if not (tweet_text_word.startswith('http') or tweet_text_word.startswith('#'))] return ' '.join(filtered_tweet_text_words)
df628573a73d68bac03ceac76f7d19ca7adf816b
87,107
def gaussian_pdf(x: str = 'x', mean: str = r'\mu', variance: str = r'\sigma^2') -> str: """ Returns a string representing the probability density function for a Gaussian distribution. **Parameters** - `x`: str The random variable. - `mean`: str, optional The mean of the random variable. - `variance`: str, optional The variance of the random variable. **Returns** - `out`: str TeX compatible string. """ return r'\frac{1}{\sqrt{2\pi ' + variance + r'}}e^{-\frac{(' + x + '-' + mean + r')^2}{2' + variance + r'}}'
f5da3be1ee4676fadb32e9da989ff83d1b8114ef
87,110
from typing import Dict def varchar_func(x: str, type_info: Dict) -> bool: """Tests if the string is a string less than 65536 bytes""" row_len = len(str(x).encode("utf-8")) type_info["suffix"] = max(row_len, type_info["suffix"] or 1) return row_len < 65536
c51dc041d781c22e80c5b5c9fe85a8bc60421a01
87,111
def _GetMuteConfigIdFromFullResourceName(mute_config): """Gets muteConfig id from the full resource name.""" mute_config_components = mute_config.split("/") return mute_config_components[len(mute_config_components) - 1]
33b5c5598a156768dc4c87ddf5cc77d08cb6766e
87,112
def default_entry_point(name): """Generate a default entry point for package `name`.""" return "{name}.__main__:main".format(name=name)
d58387ede8f38cd8bc1ab6d5f63062f1a56fcef7
87,117
def area_of_triangle(vertices): """Compute the area of the triangle with given vertices""" p, q, r = vertices pq = q-p pr = r-p v = pq.cross(pr) return 0.5 * abs(v.Length)
a6ac013540088472c187529b9dd33668a93935b8
87,119
import requests def link_reporter(url, display=False, redirect_log=True): """Attempt to resolve a URL and report on how it was resolved.""" if display: print(f"Checking {url}...") # Make request and follow redirects try: r = requests.head(url, allow_redirects=True) except: r = None if r is None: return [(False, url, None, "Error resolving URL")] # Optionally create a report including each step of redirection/resolution steps = r.history + [r] if redirect_log else [r] step_reports = [] for step in steps: step_report = (step.ok, step.url, step.status_code, step.reason) step_reports.append( step_report ) if display: txt_report = f'\tok={step.ok} :: {step.url} :: {step.status_code} :: {step.reason}\n' print(txt_report) return step_reports
5df2bbe5c76881b60deb5c6aaa90d619cc4b1a37
87,129
import pickle def load_pickle(filepath): """Loads an object from pickle.""" try: with open(filepath, "rb") as f: return pickle.load(f) except FileNotFoundError as e: raise ValueError(f"File '{filepath}' does not exist.") from e
6b151946ab13f486a49b2f959269872d7ce459c2
87,134
def git_exec_path_parameters(request, mock_subprocess): """Fixture for returning subprocess parameters for `git --exec-path`""" return request.param
7bfc9ea0a8ddc58ba735e4dd37296224f601ae95
87,147
def get_timestamp_format_by_ttl_seconds(ttl_value: int) -> str: """ Calculates the precision of the timestamp format required based on the TTL For example: if TTL is 3600 seconds (1hr) then return "%Y-%m-%d-%H0000" if TTL is 600 seconds (10 mins) then return "%Y-%m-%d-%H%M00" if TTL is 35 seconds (35 secs) then return "%Y-%m-%d-%H%M%S" """ if ttl_value >= 86400: # Greater than one day, return a day timestamp return "%Y-%m-%d-000000" elif ttl_value >= 3600: # Greater than one hour, return an hour-based timestamp return "%Y-%m-%d-%H0000" elif ttl_value >= 60: # Greater than a minute, return a minute-based timestamp return "%Y-%m-%d-%H%M00" else: # Return a second-based timestmap return "%Y-%m-%d-%H%M%S"
28af0bf1cacf64e111311aa64ac3862310637a8b
87,149
def _get_rounded_intersection_area(bbox_1, bbox_2): """Compute the intersection area between two bboxes rounded to 8 digits.""" # The rounding allows sorting areas without floating point issues. bbox = bbox_1.intersection(bbox_1, bbox_2) return round(bbox.width * bbox.height, 8) if bbox else 0
1f414ced822b384817760e6d79ca1826889cc01e
87,156
def _instantiate(obj, typ): """Returns obj if obj is not None, else returns new instance of typ obj : an object An object (most likely one that a user passed into a function) that, if ``None``, should be initiated as an empty object of some other type. typ : an object type Expected values are list, dict, int, bool, etc. """ return typ() if obj is None else obj
27b5943b26451059a359345686ee7d93af827e70
87,158
def parseVersion(stringVersion): """Parses a version string like "6.1.0.3" and returns a python list of ints like [ 6,1,0,3 ]""" m = "parseVersion:" # sop(m,"Entry. stringVersion=%s" % ( stringVersion )) listVersion = [] parts = stringVersion.split('.') for part in parts: # sop(m,"Adding part=%s" % part) listVersion.append(int(part)) # sop(m,"Exit. Returning listVersion=%s" % ( listVersion )) return listVersion
6510006c314dfbaed9397dc5b3e19e6bc13787a0
87,162
def clear_dpid(dpid): """ clear_dpid removes any non useful info from DPID. Some examples of DPIDs: "dpid:11:11:11:11:11:11" "dp:11:11:11:11:11:11" "11:11:11:11:11:11" "111111111111" The goal is to return the last one: "111111111111" Args: dpid: dpid to be fixed Returns: dpid fixed >>> clear_dpid(b'of:11:11:11:11:11:11') '111111111111' >>> clear_dpid("of:11:11:11:11:11:11") '111111111111' >>> clear_dpid("dpid:11:11:11:11:11:11") '111111111111' >>> clear_dpid("dp:11:11:11:11:11:11") '111111111111' >>> clear_dpid("11:11:11:11:11:11") '111111111111' >>> clear_dpid("111111111111") '111111111111' """ dpid_names = ["dpid:", "dp:", "of"] if isinstance(dpid, bytes): dpid = dpid.decode("utf-8") for dpid_name in dpid_names: pos = dpid.find(dpid_name) if pos != -1: # substring found dpid = dpid[pos+len(dpid_name):] if len(dpid.split(":")) == 2: return dpid.split(":")[1] elif len(dpid.split(":")) > 2: return dpid.replace(":", "") return dpid
372b512b3a71e5a6fd0c5f0aa82e8001d9018b6a
87,163
def accuracy(preds, y): """ Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8 """ pred_t = preds.argmax(dim=1, keepdim=True) return pred_t.eq(y.view_as(pred_t)).float().mean()
7c6a4765d2efe2c418912d756fa3812e72e4ab1c
87,165
def common_members(list_a, list_b): """ list_c = common_members(list_a, list_b) Return a list (sorted) of common members in list_a, list_b Parameters: list_a: list list_b: list Returns: list_c: a list of common members in list_a and list_b """ list_c = list(set(list_a).intersection(list_b)) list_c.sort() return list_c
ff8810d48650badf58f5a1a6cdaf4dcac90c5939
87,166
def process_weather_station_data(df, station_name): """ Rename weather station data so each station's weather data can be distinguished. Selects only weather features to include in the dataframe. """ df = df.iloc[:, 3:].copy() new_names = [(i,f"{station_name}_{i}") for i in df.iloc[:, 1:].columns.values] return df.rename(columns = dict(new_names))
0dd86a888d251d231c9f9b394614d5081dbc7bbc
87,170
def calcAverage (theList): """Calculates the average of a list of values. :param theList: The list of which the average is to be found. :type theList: list :return: The average of the values of theList :rtype: float """ sum = 0 numValues = 0 for value in theList: sum += value numValues += 1 return sum / numValues
b3a716ee81b25760ad8ff1619ac08ff90bb4aa64
87,171
def split_path(path): """ Splits the text and build a nice import statement from it. Note: only well defined import paths are supported. Not something invalid like '..foo.bar..'. >>> split_path('foo.bar.Batz') ('foo.bar', 'Batz') >>> split_path('..lel') ('..', 'lel') >>> split_path('.lul') ('.', 'lul') >>> split_path('lol') ('', 'lol') >>> split_path('...na.na.na.na.na.na.Batman') ('...na.na.na.na.na.na', 'Batman') >>> split_path('...........yolo.swagger') ('...........yolo', 'swagger') :param path: The path to split. :type path: str :return: The import text, like `from x import y` or `import z` :rtype: tuple(str)|Tuple[str, str] """ last_dot_position = path.rfind('.') if last_dot_position == -1: # no dot found. import_path = '' import_name = path else: import_path = path[:last_dot_position + 1] import_name = path[last_dot_position + 1:] # handle 'foo.bar.Baz' not resulting in 'foo.bar.', i.e. remove the dot at the end. if import_path.rstrip('.') != '': # e.g. not '....' import_path = import_path.rstrip('.') # end if return import_path, import_name
07b5ce86fb69bb3ec6b1cd48ff890dbfad31e715
87,177
def merge(numbs1, numbs2): """ Go through the two sorted arrays simultaneously from left to right. Find the smallest element between the two in the two arrays. Append the smallest element to a third array and increase the index from which the element was taken by one. If the two elements are the same, then add one of them to the third array and increase both indexes by one. The process is repeated until the end of both arrays. If one of the two arrays is bigger, then add the remaining elements of the biggest array at the end of the iterations. :param numbs1: The first array to be merged :param numbs2: The second array to be merged :return: The sorted array """ sorted_numbs = [] i = j = 0 while i < len(numbs1) and j < len(numbs2): if numbs1[i] <= numbs2[j]: sorted_numbs.append(numbs1[i]) i += 1 elif numbs2[j] <= numbs1[i]: sorted_numbs.append(numbs2[j]) j += 1 else: sorted_numbs.append(numbs1[i]) i += 1 j += 1 if i < len(numbs1): for k in range(i, len(numbs1)): sorted_numbs.append(numbs1[k]) if j < len(numbs2): for l in range(j, len(numbs2)): sorted_numbs.append(numbs2[l]) return sorted_numbs
2595f5bea09004461cf6e3e984455d260adf87bf
87,178
def get_recipes_for_day(request, day): """ Filters the choices of the user by date and returns the recipes chosen for that day :param day: datetime instance :return: list of recipes """ user = request.user choices_for_day = user.choice_set.filter(date=day) recipes = [] if choices_for_day: recipes = [ch.recipe for ch in choices_for_day] return recipes
8a86e5da35d90b2d37b5cd972875ac1c99f20d1f
87,180
def get_post_details(post) -> dict: """Parse a given post and return essential info in a Dict """ dict_ = { 'name_author': post.author.name, 'timestamp_created': post.created, 'is_OC': post.is_original_content, 'num_upvotes': post.ups, 'num_downvotes': post.downs, 'num_comments': post.num_comments, 'is_gilded': post.gilded, 'post_title': post.title, 'post_url': post.url, 'post_text': post.selftext } return dict_
5a7b51a8fb4effdd9f42baa670ab3d39ac93bf54
87,186
def filter_fridge(recipes, fridge): """ This function removes recipes that doesn't contains ingredients available in the fridge. :param recipes: a data frame of recipes to filter :param fridge: the list of ingredients from the user's fridge :return: data frame of recipes containing at least an ingredient from the fridge """ fridge_str = "|".join(fridge) possible_recipes = recipes[recipes["Ingredients"].str.contains(fridge_str)] return recipes[recipes["RecipeID"].isin(possible_recipes["RecipeID"])]
a5b81d1b7c4e2fdedc3d08739df904e3eacc350f
87,188
from typing import Counter def count_classes(y): """ Returns a dictionary with class balances for each class (absolute and relative) :param y: gold labels as array of int """ count_dict = Counter([l for l in y]) return {l: str(c) + " ({0:.2f}".format((c / len(y)) * 100) + "%)" for l, c in count_dict.items()}
edcf536d9e64f32c64dfaf738c769ae0ea74be79
87,197
def compute_unigram(input, unigram_count, total_word): """ compute_unigram - function to compute unigram probability Inputs: input : str Word unigram_count : defaultdict(int) Occurence hasmap of words/tokens total_word : int Total number of words/tokens Outputs: output : float unigram probability """ return unigram_count[input] / total_word
35a203b1b523e9598b5a81ac4dba76601e263ec1
87,200
def invert_scores(input_ranking): """ Function that given a ranking list, it corrects its relevance in preparation for dcg. For example, [1,1,2,3] --> [3,3,2,1] [1,2] --> [2,1] [1] --> [1] :param input_ranking ordered list with the ranking """ max_value = max(input_ranking) relevance = [] for i in input_ranking: if i == 0: relevance.append(i) else: relevance.append(max_value-i + 1) return relevance
c732cd8f82d23da3f33b44523633139ead552581
87,204
def len_ignore_leading_ansi(s: str) -> int: """Returns the length of the string or 0 if it starts with `\033[`""" return 0 if s.startswith("\033[") else len(s)
9a01323a3bd5a840760c076f6beddb435d373791
87,205
import re def check_match(regex: str, tests: list[str]) -> bool: """Check if the regex matches the any string in `tests`.""" return any(re.match(regex, test) for test in tests)
4e2a6c43eed5b17029aa3aa4efc80da5975f7a11
87,208
def match_bert_span_to_text(pred, bertid_2_goldid, question_len, context_tokens ): """ Match the predicted bert span to the text in gold context. Args: pred (:obj:`dict`): The prediction dictionary. bertid_2_goldid (:obj:`list`): The list mapping bert token ids to gold token ids. question_len (:obj:`int`): The length of the question in bert tokens. context_tokens (:obj:`list`): The list of gold context tokens. """ answer_start, answer_end = pred["span"] # null prediction if (answer_start, answer_end) == (0, 0): return {'span': None, 'answer': None, 'answer_tokens': None, 'confidence': pred["confidence"], "start_logit": pred["start_logit"], "end_logit": pred["end_logit"], } # prediction is not in context if (answer_start < question_len or answer_end < question_len): return None bert_span = (answer_start - question_len, answer_end - question_len) # span in bert tokens gold_span = (bertid_2_goldid[bert_span[0]], bertid_2_goldid[bert_span[1]] + 1) # span in gold tokens # span contains invalid tokens if (gold_span[0] < 0 or gold_span[1] < 0): return None answer_tokens = context_tokens[gold_span[0]:gold_span[1]] answer = ' '.join(answer_tokens) return {'span': gold_span, 'answer': answer, 'answer_tokens': answer_tokens, 'confidence': pred["confidence"], "start_logit": pred["start_logit"], "end_logit": pred["end_logit"], }
8acaaa0cdd82487b49c2f257a5772836e361d685
87,210
def timedelta_to_matsim_time(td): """ Convert datetime timedelta object to matsim string format (00:00:00) """ hours, remainder = divmod(td.total_seconds(), 3600) minutes, seconds = divmod(remainder, 60) return f"{int(hours):02}:{int(minutes):02}:{int(seconds):02}"
40e73d58d2ef09ba9613e253629e01987ffe55df
87,211
def _omega_sunset(lat, delta): """Sunset hour angle (Eq. 59) Parameters ---------- lat : ee.Image or ee.Number Latitude [radians]. delta : ee.Image or ee.Number Earth declination [radians]. Returns ------- ee.Image or ee.Number Sunset hour angle [radians]. Notes ----- acos(-tan(lat) * tan(delta)) """ return lat.tan().multiply(-1).multiply(delta.tan()).acos()
e21a27da6bcea7c7c9508775371f13745a552da6
87,212