content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def argmin(pairs): """ Given an iterable of pairs (key, value), return the key corresponding to the smallest value. Raises `ValueError` on empty sequence. >>> argmin(zip(range(20), range(20, 0, -1))) 19 """ return min(pairs, key=lambda x: x[1])[0]
39f9c00f95ee9ba4257b3533cd15f5a6a263156d
52,262
def _convert_exception(exception): """Convert an Exception into a failure document for publishing.""" return {'errmsg': str(exception), 'errtype': exception.__class__.__name__}
3b3f1a93ae18e05df1f02a54df874dc5fb51b643
52,264
def example_function(myinput: int) -> int: """ Returns the given parameter without change. This function is just for demo purposes and should be removed when the template is used for an actual project. Args: myinput: The parameter to return. Returns: The given parameter without change. """ return myinput
9c599f24075b837b64facbcef0eadf07462f1454
52,269
import torch def bbox_cxcyah_to_xyxy(bboxes): """Convert bbox coordinates from (cx, cy, ratio, h) to (x1, y1, x2, y2). Args: bbox (Tensor): Shape (n, 4) for bboxes. Returns: Tensor: Converted bboxes. """ cx, cy, ratio, h = bboxes.split((1, 1, 1, 1), dim=-1) w = ratio * h x1y1x2y2 = [cx - w / 2.0, cy - h / 2.0, cx + w / 2.0, cy + h / 2.0] return torch.cat(x1y1x2y2, dim=-1)
d0f2e04a6089d0fd1d1feed559285d27d4665864
52,270
def square(n): """ (int) -> int Return a number squared. >>> square(7) 49 >>> square(12) 144 """ return (n * n)
abd1322034261cdc7281caaec14978ed7aec2760
52,273
import gzip import pickle def load_sim(filename): """ Utility function that loads a system together with simulation results. Parameters ---------- filename: string Name of inputfile Returns ------- system: Builder object A discritized system. result: dictionary Dictionary containing 1D arrays of electron and hole quasi-Fermi levels and the electrostatic potential across the system. Keys are 'efn', 'efp', and/or 'v'. """ f = gzip.GzipFile(filename, 'rb') data = f.read() sys, result = pickle.loads(data) return sys, result
ff135fa8521cc2ead823544583aa8d54b45a9e15
52,279
def const(a): """ The constant function. A function that returns a constant value. a -> b -> a """ return lambda b: a
1dc9fe5b12ac20fbb68cffb743b4cd4dce77c017
52,283
def maxTuple(tuples): """Return the tuple whose first element is largest.""" maxTuple = None; maxValue = 0.0; for tuple in tuples: if tuple[0] > maxValue: maxValue = tuple[0] maxTuple = tuple return maxTuple
1ad6c1b31008493efac751d4ea21c16ec20d041f
52,289
import base64 def featB64encode(feat): """Base64 encode feature. :param feat: feature :type feat: :class:`numpy.ndarray` :return: str """ return base64.b64encode(feat)
af509e6f3c67739f374c2762735d25b4ab80185c
52,291
def name_relative_to(parent_abspath, child_abspath): """ Determine the relative name of a child path with respect to a parent system. Args ---- parent_abspath : str Asbolute path of the parent. child_abspath : str Absolute path of the child. Returns ------- str Name of the child relative to the parent. """ start = len(parent_abspath)+1 if parent_abspath else 0 return child_abspath[start:].split('.', 1)[0]
49e58743cec5822859eb4913fb64888201ec317e
52,293
def append(l: list, obj: object) -> list: """Extend or append to list""" if isinstance(obj, list): l.extend(obj) else: l.append(obj) return l
21996e2f36baf323a44459b8d9eb9602f87df761
52,295
def tensor_extend_new_dim(x, dim, n): """Extending the tensor along a new dimension with a replica of n. Args: x (Tensor): tensor to be extended dim (int): the value indicating the position of the newly inserted dimension n (int): the number of replica along dim Returns: Tensor: the extended tensor. Its shape is ``(*x.shape[0:dim], n, *x.shape[dim:])`` """ return x.unsqueeze(dim).expand(*x.shape[0:dim], n, *x.shape[dim:])
06296948f98a57e74caadba97eed392c62599201
52,299
def read_timestamp(metric): """Read the timestamp from the given metric and convert it to the format used by time.time().""" return metric.timestamp / 1000.0
b065a3977df6dc6c78a803aad003d6ff170fa989
52,311
import re def _expand_h(string): """ Convert Huawei-style port ranges into list of ports """ result = [] for element in re.split('(?<!to) (?!to)', string): m = re.match('(\d+) to (\d+)', element) if m: for num in range(int(m.group(1)), int(m.group(2)) + 1): result.append(str(num)) else: result.append(element) return result
37334477e7bf830a8c430040b26187bfe7c13e5d
52,312
def df_column_to_strings(person_df, column_name): """ Function is intended to take a particular column from a dataframe and convert the values in said column into a string that can be used for a query (e.g. only searching amongst a specific set of persons). Parameters ---------- person_df (df): dataframe that contains a columnt that can be converted into a longer string for use in future queries column_name (str): name of the column to be converted into a longer string Returns ------- column_string (str): the string that contains all of the values previously contained within the dataframe's columns """ column_results = person_df[column_name].tolist() column_string = "(" column_length = len(column_results) for idx, val in enumerate(column_results): column_string += str(val) if idx < column_length - 1: column_string += ", " else: column_string += ")" return column_string
2df20f4fa373f81d7b551df2bfc22571b8d3173d
52,313
def fts_pattern(pattern): """Convert a pattern to an fts representation.""" fts = [f'{patt}*' for patt in pattern.split(' ') if patt] return ' '.join(fts)
615e8fdf64e3ea213d3d05002ef1d34be589145e
52,317
import torch def generate_sorted_element_mask(previous_actions, input_length: int) -> torch.Tensor: """ Generates a mask that prevents actions from attending to elements of the unordered set that have already been placed into the ordered sequence. Args: previous_actions: List of previous actions (in order) that we need to mask input_length: Number of elements in the unordered sequence Returns: Memory mask of shape (nb of previous actions + 1, input sequence length) suitable for use in transformer """ # Generate lower triangular matrix (creates columns for masked input elements) # i_th column of masked_cols is equal to the {a_i}'th column of the mask masked_cols = torch.tril(torch.ones(len(previous_actions) + 1, len(previous_actions)) * float('-inf'), diagonal=-1) # Create empty mask mask = torch.zeros(len(previous_actions) + 1, input_length) # For each previous action, prevent further actions from attending to its corresponding input element mask[:, previous_actions] = masked_cols return mask
c7b0bf85e2b1cad0737eba23847009116ac9ed9b
52,321
def getframemodname(frame): """Given a frame return the module name""" globals = frame.f_globals if globals.has_key('__name__'): return globals['__name__'] return None
98d0735f1bc1a492ddfd432626d73bd284fa2eab
52,322
def field_provides(field, ifaces): """Does field provide at least one of the interfaces specified?""" _pb = lambda iface: iface.providedBy(field) return any(map(_pb, ifaces))
91aa226118d00f69e7e2e56e2aabc8c9424e5797
52,323
from typing import List async def standard_tweet_info( as_json: dict, ) -> tuple[str, List[str], List[str], List[str]]: """ Returns the text, tickers, images, and hashtags of a tweet. Parameters ---------- as_json : dict The json object of the tweet. Returns ------- tuple[str, List[str], List[str], List[str]] str The text of the tweet. List[str] The tickers in the tweet. List[str] The images in the tweet. List[str] The hashtags in the tweet. """ images = [] # If the full text is available, use that if "extended_tweet" in as_json: text = as_json["extended_tweet"]["full_text"] ticker_list = as_json["extended_tweet"]["entities"] if "urls" in as_json["extended_tweet"]["entities"]: for url in as_json["extended_tweet"]["entities"]["urls"]: text = text.replace(url["url"], url["expanded_url"]) # Add the media, check extended entities first if "extended_entities" in as_json["extended_tweet"]: if "media" in as_json["extended_tweet"]["extended_entities"]: for media in as_json["extended_tweet"]["extended_entities"]["media"]: images.append(media["media_url"]) text = text.replace(media["url"], "") # Not an extended tweet else: text = as_json["text"] ticker_list = as_json["entities"] if "urls" in as_json["entities"]: for url in as_json["entities"]["urls"]: text = text.replace(url["url"], url["expanded_url"]) if "media" in as_json["entities"]: for media in as_json["entities"]["media"]: images.append(media["media_url"]) text = text.replace(media["url"], "") tickers = [] hashtags = [] # Process hashtags and tickers if "symbols" in ticker_list: for symbol in ticker_list["symbols"]: tickers.append(f"{symbol['text'].upper()}") # Also check the hashtags for symbol in ticker_list["hashtags"]: hashtags.append(f"{symbol['text'].upper()}") return text, tickers, images, hashtags
65e91d3ea01d7b1c3ed5e6cfb2370faa4bd9d107
52,324
import json def artifact_file(tmpdir, artifact_content, artifact_filename, artifact_file_created): """ This fixture creates a tempdir and writes the artifact_filename file with the artifact_content passed in via the fixture. Then it returns the path of the file. If artifact_content is a dict it will do a json dump, otherwise if it is just a string we'll write that directly. """ f = tmpdir.join(artifact_filename) if artifact_file_created: # Only write the artifact if we want to actually create it. if isinstance(artifact_content, dict): artifact_content = json.dumps(artifact_content, indent=4) f.write(artifact_content) return f.strpath
fb00f64c01e08613fc6f7d7a5b2a75663019f491
52,328
def _make_safe(decorator, original): """ Copy the function data from the old function to the decorator. """ decorator.__name__ = original.__name__ decorator.__dict__ = original.__dict__ decorator.__doc__ = original.__doc__ return decorator
75f765865082576716833d9d16dbcb87037a0997
52,330
import math def _neq_per_residue(row_freq): """ Compute the Neq of a vector of frequencies coresponding to one residue. The vector is a row of the frequency matrix. This function is intended to be called from the `numpy.apply_along_axis` function. Parameters ---------- row_freq : 1D numpy array vector of frequencies Return ------ The value of the Neq """ H = sum([math.log(freq)*freq for freq in row_freq if freq != 0]) return math.exp(-H)
7b34d676bfc60df378ba20b2634e204b23e336cd
52,333
from typing import OrderedDict def get_star_ids_from_upload_file(fullpath): """ Get and return all star ids from a given WebObs upload text file. :param fullpath: upload text file name [string]. :return: list of all star IDs, no duplicates, preserving order found in file [list of strings]. """ try: with open(fullpath) as f: lines = f.readlines() except FileNotFoundError: return list() # Ensure file declares itself to be either Extended or Visual format: type_line_found = False for line in lines: if line.upper().startswith('#TYPE='): type_line_found = True type_string = line.split('=')[1].strip() if type_string.upper() not in ['EXTENDED', 'VISUAL']: return list() if not type_line_found: return list() # Get delimiter, comma as default: delimiter = ',' # default if #DELIM line not found. for line in lines: if line.upper().startswith('#DELIM='): delimiter = line.split('=')[1].strip() if delimiter.lower() == 'comma': delimiter = ',' break # Extract and return star_ids: lines = [line for line in lines if not line.startswith('#')] # keep only observation text lines. star_ids_as_found = [line.split(delimiter)[0].strip() for line in lines] # may contain duplicates. star_ids = list(OrderedDict.fromkeys(star_ids_as_found)) # no duplicates, order preserved. return star_ids
82f172a1455e02b728d7337894805e68798abf04
52,334
def good_mac(mac): """ Convert dash separated MAC to colon separated """ return mac.upper().replace('-', ':')
1df09a84dac7c379bedf97846842c20b62a33206
52,337
import builtins import re def safer_eval(statement, locals): """A safer eval function. Does not allow __ or try statements, only includes certain 'safe' builtins.""" allowed_builtins = ['True', 'False', 'str', 'bytes', 'int', 'float', 'len', 'any', 'all', 'sorted'] for name in allowed_builtins: locals[name] = getattr(builtins, name) if re.search(r'__|try\s*:|lambda', statement): raise ValueError('`__`, lambda or try blocks not allowed in if statements.') return eval(statement, {'__builtins__': None}, locals)
b56d35919d1dbf75acf8509ee816e74fa2ab8192
52,344
def get_scop_labels_from_string(scop_label): """ In [23]: label Out[23]: 'a.1.1.1' In [24]: get_scop_labels_from_string(label) Out[24]: ('a', 'a.1', 'a.1.1', 'a.1.1.1') """ class_, fold, superfam, fam = scop_label.split('.') fold = '.'.join([class_, fold]) superfam = '.'.join([fold, superfam]) fam = '.'.join([superfam, fam]) return class_, fold, superfam, fam
59e869748a673b9ec210a719b9f887fad3a0f6b3
52,346
def get_min_max(nums): """ Return a tuple(min, max) out of list of unsorted integers. Args: nums(list): list of integers containing one or more integers """ if len(nums) == 0: raise ValueError('nums must not be empty') if len(nums) == 1: return (nums[0], nums[0]) min_, max_ = nums[0], nums[0] # initialize for num in nums[1:]: # One pass traversal, O(n) in time, O(1) in space if num < min_: min_ = num elif num > max_: max_ = num return (min_, max_)
6f1beeaf5e165283106c85d79973b2fd6fbc33e4
52,348
def optimize(model): """ Carry out reduce optimization Parameters ---------- model mmtbx model object that contains H atoms H atoms should be at approprite distances Returns ------- model mmtbx model object with optimized H atoms """ # hierarchy object --> has hierarchy of structure pdb_hierarchy = model.get_hierarchy() # geometry restraints manager --> info about ideal bonds, angles; what atoms are bonded, etc. grm = model.get_restraints_manager() print("Reduce optimization happens here") return model
5ffc8cef6af9e2f8051caabdc57b0bece5a291fa
52,350
import difflib def diff(before, after): """Diff two strings""" lines = difflib.Differ().compare(before.splitlines(), after.splitlines()) return "\n".join(lines)
785e71526d8f36a7d7c405aadc6e07024c3d9d1f
52,354
def contains_duplicate(nums: list[int]) -> bool: """ Args: nums: array of possibly non-distinct integers Returns: True if `nums` contains duplicate elements, False otherwise Examples: >>> contains_duplicate([1,2,3,1]) True >>> contains_duplicate([1,2,3,4]) False >>> contains_duplicate([1,1,1,3,3,4,3,2,4,2]) True """ hash_table = {} for num in nums: if hash_table.get(num) is None: hash_table[num] = True else: return True else: return False
b09b64a4f4904b597d974abbf57d9e675efb4611
52,362
def validate_multiple_lists_length(*lists) -> bool: """Validates that a list of lists is of the same length Args: lists(list): a list of lists to be checked Retuens: b(bool): True if all list of the same length, False else.  """ list_len = -1 for l in lists: try: iter(l) except BaseException: return False if list_len == -1: # first list list_len = len(l) else: if list_len != len(l): return False return True
7874614f1e500c5d312abd4636c03df100fe6b7b
52,366
def type_check(*args): """ Checks if all types in args are correct :param args: List[Tuple[object, type]] = list of objects and their estimated types :return: bool = True or raises an error """ for arg in args: if type(arg[0]) != arg[1]: raise TypeError(f"Type of '{type(arg[0])}' is false! Try {arg[1]} instead") return True
be618f2cafb963a74949e0d17fea7bc414d5d86b
52,367
def read_weather_inputs(ClockStruct, weather_df): """ clip weather to start and end simulation dates *Arguments:*\n `ClockStruct` : `ClockStructClass` : time paramaters `weather_df` : `pd.DataFrame` : weather data *Returns:* `weather_df` : `pd.DataFrame`: clipped weather data """ # get the start and end dates of simulation start_date = ClockStruct.SimulationStartDate end_date = ClockStruct.SimulationEndDate assert weather_df.Date.iloc[0] <= start_date assert weather_df.Date.iloc[-1] >= end_date # remove weather data outside of simulation dates weather_df = weather_df[weather_df.Date >= start_date] weather_df = weather_df[weather_df.Date <= end_date] return weather_df
d434b034c39222f80444a79c5dede04c6052977a
52,369
def _get_species_name_from_line(htmlline): """Inside a htmlline, return the genus and species name of a specimen. Args: htmlline (str): A string of html, should be from AntWeb. Returns: str: Genus and species """ a = "?genus=" b = "&species=" genus = htmlline.split(a)[-1].split(b)[0] c = "species=" d = "&rank=" species = htmlline.split(c)[-1].split(d)[0] return genus, species
443d7a305700b9b43838ac11a6ff968e752659ae
52,370
def _find_aerosols(obs, is_falling, is_liquid): """Estimates aerosols from lidar backscattering. Aerosols are lidar signals that are: a) not falling, b) not liquid droplets. Args: obs (ClassData): The :class:`ClassData` instance. is_falling (ndarray): 2-D boolean array of falling hydrometeors. is_liquid (ndarray): 2-D boolean array of liquid droplets. Returns: ndarray: 2-D boolean array containing aerosols. """ is_beta = ~obs.beta.mask return is_beta & ~is_falling & ~is_liquid
7df68593dfcded48a20614e5714e3c6f12317cbf
52,378
import typing def get_restriction_time(string: str) -> typing.Optional[int]: """ Get user restriction time in seconds :param string: string to check for multiplier. The last symbol should be one of: "m" for minutes, "h" for hours and "d" for days :return: number of seconds to restrict or None if error """ if len(string) < 2: return None letter = string[-1] try: number = int(string[:-1]) except TypeError: return None else: if letter == "m": return 60 * number elif letter == "h": return 3600 * number elif letter == "d": return 86400 * number else: return None
0581bf779a8b44eff970f38985b9af968471d0de
52,381
def each_class_max(W): """Determine if a unique walk matrix demonstrates each class max. Each class max is defined as: For each class C[i] in W there exists a walk length L[x] such that C[i] has larger number of walks of length L[x] than all other classes. Parameters ---------- W : Numpy Matrix Unique walk matrix as returned by `walk_classes` Returns ------- boolean True if each-class-max holds, false if not """ # Work with an ndarray representation of W w = W.getA() # Get the number of rows and columns in W num_rows, num_cols = w.shape # Possible classes classes = [i for i in range(0, num_rows)] # Possible walks walks = [i for i in range(0, num_cols)] # Check all classes for cls in classes: # Whether class has a max has_max = False # Check all walks for walk in walks: # Find maximum of other classes for same walk maximum = max([w[cls2][walk] for cls2 in classes if cls2 != cls]) # Set has max to true if number of walks is greater if w[cls][walk] > maximum: has_max = True break # If not max for some length return false if not has_max: return False # Return true if property is not violated return True
cf4b5822fa5080cb2c343a20331e352ff4eccfaa
52,386
def supply_space(max_length, target_str): """ 使用空格将指定字符串补充到特定宽度,如果字符串本身已经超过了这个宽度,那么就不处理 Args: max_length: 目标宽度 target_str: 需要补充的字符串 Returns: target_str: 调整后的字符串 """ if max_length > len(target_str): target_str = " " * (max_length - len(target_str)) + target_str return target_str
cf866c68dc77c8f938b413d1bb04273ac1ec4be0
52,387
def call_with_context(ctx, func, *args, _context_callable=False, **kwargs): """ Returns the value of func(*args, **kwargs) within the context Set '_context_callable=True' if your contextmanager needs to be called first """ if _context_callable: ctx = ctx() with ctx: return func(*args, **kwargs)
0ded4d39d983af4d69c5bb3c5b271b4015327cd2
52,388
def removesuffix(s: str, suffix:str) -> str: """Removes the suffix from s.""" if s.endswith(suffix): return s[:-len(suffix)] return s
538ca2ef7021b01f09a7c9db56069639a9389d95
52,390
def get_padding(shape1, shape2): """ Return the padding needed to convert shape2 to shape1 """ assert len(shape1) == len(shape2) h_diff, w_diff = shape1[1] - shape2[1], shape1[2] - shape2[2] padding_left = w_diff // 2 padding_right = w_diff - padding_left padding_top = h_diff // 2 padding_bottom = h_diff - padding_top return (padding_left, padding_right, padding_top, padding_bottom)
7092cb8aa11111800ecfba108b9ea7cf00e14ade
52,391
def checkAnswer(guess, a_followers, b_followers): """Take the user guess and followers count and returns True / False""" if a_followers > b_followers: return guess == "a" else: return guess == "b"
f1e19c81795d53c80165b0b2a1d6c1bc4e21a27f
52,392
import hashlib import time def compute_file_checksum(path, read_chunksize=65536, algorithm='sha256'): """Compute checksum of a file's contents. :param path: Path to the file :param read_chunksize: Maximum number of bytes to be read from the file at once. Default is 65536 bytes or 64KB :param algorithm: The hash algorithm name to use. For example, 'md5', 'sha256', 'sha512' and so on. Default is 'sha256'. Refer to hashlib.algorithms_available for available algorithms :return: Hex digest string of the checksum .. versionadded:: 3.31.0 """ checksum = hashlib.new(algorithm) # Raises appropriate exceptions. with open(path, 'rb') as f: for chunk in iter(lambda: f.read(read_chunksize), b''): checksum.update(chunk) # Release greenthread, if greenthreads are not used it is a noop. time.sleep(0) return checksum.hexdigest()
a0b86ee70f4349809aed62195efdb2969faaacd4
52,394
def gen_wtml(base_dir, depth, **kwargs): """ Create a minimal WTML record for a pyramid generated by toasty Parameters ---------- base_dir : str The base path to a toast pyramid, as you wish for it to appear in the WTML file (i.e., this should be a path visible to a server) depth : int The maximum depth of the pyramid Optional Keywords ----------------- FolderName BandPass Name Credits CreditsUrl ThumbnailUrl Returns ------- wtml : str A WTML record """ kwargs.setdefault('FolderName', 'Toasty') kwargs.setdefault('BandPass', 'Visible') kwargs.setdefault('Name', 'Toasty map') kwargs.setdefault('Credits', 'Toasty') kwargs.setdefault('CreditsUrl', 'http://github.com/ChrisBeaumont/toasty') kwargs.setdefault('ThumbnailUrl', '') kwargs['url'] = base_dir kwargs['depth'] = depth template = ('<Folder Name="{FolderName}">\n' '<ImageSet Generic="False" DataSetType="Sky" ' 'BandPass="{BandPass}" Name="{Name}" ' 'Url="{url}/{{1}}/{{3}}/{{3}}_{{2}}.png" BaseTileLevel="0" ' 'TileLevels="{depth}" BaseDegreesPerTile="180" ' 'FileType=".png" BottomsUp="False" Projection="Toast" ' 'QuadTreeMap="" CenterX="0" CenterY="0" OffsetX="0" ' 'OffsetY="0" Rotation="0" Sparse="False" ' 'ElevationModel="False">\n' '<Credits> {Credits} </Credits>\n' '<CreditsUrl>{CreditsUrl}</CreditsUrl>\n' '<ThumbnailUrl>{ThumbnailUrl}</ThumbnailUrl>\n' '<Description/>\n</ImageSet>\n</Folder>') return template.format(**kwargs)
e0a98299b21f7e8403a56e06c03f5736f94806d8
52,395
import binascii import zlib def unpack_chunk(chunk_data: str) -> bytes: """ Unpacks a previously packed chunk data back into the original bytes representation :param chunk_data: The compressed, base64 encoded string to convert back to the source bytes object. """ if not chunk_data: return b'' chunk_compressed = binascii.a2b_base64(chunk_data.encode('utf-8')) return zlib.decompress(chunk_compressed)
5978ede0f699de1b9954670e355989d626dae9ed
52,396
def insert_manifest(cursor, workspace_id, spec): """manifest情報登録 Args: cursor (mysql.connector.cursor): カーソル workspace_id (int): ワークスペースID spec (Dict)): manifest情報のJson形式 Returns: int: manifest_id """ # insert実行 cursor.execute('INSERT INTO manifest ( workspace_id, file_name, file_text )' \ ' VALUES ( %(workspace_id)s, %(file_name)s, %(file_text)s )', { 'workspace_id' : workspace_id, 'file_name' : spec['file_name'], 'file_text' : spec['file_text'] } ) # 追加したワークスペースIDをreturn return cursor.lastrowid
15f5e2ae29a9169583d6c30bc46c3a56a4357883
52,404
def overlap(span, spans): """Determine whether span overlaps with anything in spans.""" return any( start <= span[0] < end or start < span[1] <= end for start, end in spans )
35c593c57517303744cd9fd5c0858cbef15ffff9
52,405
from typing import Dict def preprocess_sd(sd: Dict): """ Removes module. from state dict. Args: sd: input state dict Returns: preprocessed state dict """ preprocessed = {} for key in sd.keys(): preprocessed[key[7:]] = sd[key] return preprocessed
db0e1b30cc2b5702f88205fb6027d6e4df64b4ed
52,408
def pre_shared_key_id_getter(self): """Return the ID field / endpoint_name field :param self: Instance of the entity for which this is a custom method. :type self: mbed_cloud.foundation.PreSharedKey :return: Entity ID (which shadows the endpoint name) :rtype: str """ # The underlying value of endpoint name will set by the REST API, so use this in preference to the id return self._endpoint_name.value
9d8b12d31cbd6e951c956294cce55342d20efff7
52,409
def _format_comment(comment, comm_char): """Format a multiline comment.""" return '\n'.join(f'{comm_char} {_line}' for _line in comment.splitlines())
ecbf797e32f0ba0d4e779bcbe043e487d3d65b88
52,410
from typing import List def Unique(ls:List, **sort_args): """Return a sorted list of unique elements. Args: ls (List): The input list (or other iterable). sort_args: Args for calling `sorted` on the subpaths in format of (path, full_path), only works if `ordered` is True. Notice that the sort function will only be applied at the base level instead of applied recursively. PPlease refer to function `BUILTIN_LISTPATHS_SORT_CRITERIA()` for built-in criteria. Returns: List: The sorted list of unique elements. """ return sorted(list(set(ls)), **sort_args)
691277a847010957d085ad19648ac2b8db4c3706
52,416
import re def add_subgroup_columns_from_text( df, text_column, subgroups, expect_spaces_around_words=True ): """Adds a boolean column for each subgroup to the data frame. New column contains True if the text contains that subgroup term. Args: df: Pandas dataframe to process. text_column: Column in df containing the text. subgroups: List of subgroups to search text_column for. expect_spaces_around_words: Whether to expect subgroup to be surrounded by spaces in the text_column. Set to False to for languages which do not use spaces. """ ndf = df.copy() for term in subgroups: if expect_spaces_around_words: # pylint: disable=cell-var-from-loop ndf[term] = ndf[text_column].apply( lambda x: bool( re.search("\\b" + term + "\\b", x, flags=re.UNICODE | re.IGNORECASE) ) ) else: ndf[term] = ndf[text_column].str.contains(term, case=False) return ndf
c82a2d85f7df5c0cfb69055c472f00a44e32cd18
52,419
def clamp(value, min_v, max_v): """return rectified value (i.e. the closest point in [`min_v`, `max_v`])""" return max(min_v, min(value, max_v))
f4beb1ecfc57a59840a428e3443937744c3a8f83
52,425
def transform_gcp_vpcs(vpc_res): """ Transform the VPC response object for Neo4j ingestion :param vpc_res: The return data :return: List of VPCs ready for ingestion to Neo4j """ vpc_list = [] # prefix has the form `projects/{project ID}/global/networks` prefix = vpc_res['id'] projectid = prefix.split('/')[1] for v in vpc_res.get('items', []): vpc = {} partial_uri = f"{prefix}/{v['name']}" vpc['partial_uri'] = partial_uri vpc['name'] = v['name'] vpc['self_link'] = v['selfLink'] vpc['project_id'] = projectid vpc['auto_create_subnetworks'] = v.get('autoCreateSubnetworks', None) vpc['description'] = v.get('description', None) vpc['routing_config_routing_mode'] = v.get('routingConfig', {}).get('routingMode', None) vpc_list.append(vpc) return vpc_list
46a997c9bc51f5be96f5ae95c47290c97b887a34
52,434
def isfloat(value): """Returns if a given value can be converted to a float or not. Args: value: Input value of each type Returns: bool: Boolean if the input value can be parsed to a float""" try: float(value) return True except: return False
3a0acb0d06d149761844ff3e01c30839129351c5
52,438
def I_box(m,w,l): """Moment of a box with mass m, width w and length l.""" return m * (w**2 + l**2) / 12
60287d37634c185b2e2632e5ce295d1127f3dd21
52,439
import math def grade(zscore) -> str: """ Assigns a color based on a 3-point gradient of red-white-green Returns 6-digit hexadecimal string based on zscore. """ def clamp(x): """Clamps rgb value to (0, 255).""" return max(0, min(x, 255)) red = (255, 0, 0) white = (255, 255, 255) green = (0, 255, 0) if math.isnan(zscore): return '#%02x%02x%02x' % white start, end = white, red if zscore < 0 else green x = abs(zscore) r, g, b = (int(x*end[i] + (1-x)*start[i]) for i in range(3)) hex = '#%02x%02x%02x' % (clamp(r), clamp(g), clamp(b)) return hex
570c50e27866e5c438f0b407550517ca69e05927
52,444
def process_sample(G, sample): """ Interpret the DQM solution in terms of the partitioning problem.""" # Display results to user group_1 = [] group_2 = [] sep_group = [] for key, val in sample.items(): if val == 0: group_1.append(key) elif val == 1: group_2.append(key) else: sep_group.append(key) # Display best result print("\nPartition Found:") print("\tGroup 1: \tSize", len(group_1)) print("\tGroup 2: \tSize", len(group_2)) print("\tSeparator: \tSize", len(sep_group)) print("\nSeparator Fraction: \t", len(sep_group)/len(G.nodes())) # Determines if there are any edges directly between the large groups illegal_edges = [(u, v) for u, v in G.edges if ({sample[u], sample[v]} == {0, 1})] print("\nNumber of illegal edges:\t", len(illegal_edges)) return group_1, group_2, sep_group, illegal_edges
5217f01a97a10ceba9ce9a8f24eacc31ddae09ca
52,446
def file_read(path, encoding='utf-8', errors='strict'): """ Return a file's contents as a string """ with open(path, 'r', encoding=encoding, errors=errors) as f: return f.read()
3e93b41f9ed3323fbfc3c39ce069570000ba08f7
52,453
def reverse(v): """ Reverses any iterable value """ return v[::-1]
e652b4046e3e1b473bcea4408dea33cc0ba1cee6
52,455
def round_thousands_to_1dp(value: float) -> float: """ Rounds values to 1dp in steps of 1000. Rounds values greater than 1B to nearest 100M Rounds values > 1B and >= 1M to nearest 100k Rounds values > 1M and >= 1k to nearest 100 Rounds values less than 1000 to 1dp. If value is not a number, return original value. Args: value (float): Value to round Returns: float: Rounded value """ try: if value >= 1_000_000_000: return round(value, -8) if value >= 1_000_000: return round(value, -5) if value >= 1000: return round(value, -2) return round(value, 1) except TypeError: return value
2478bf43e09daef37b8d274ce5ea5c6bae4f7ddf
52,456
def string_to_intlist(s): """Takes a normal string and returns a list of numerical values (codepoints)""" return [ord(c) for c in s]
6b54ea37ac3c02ed732cabbd349c102cc72fd96d
52,458
from pathlib import Path import csv def get_csv(path: Path) -> list: """ Read CSV with lead column like HEADER (1), COMMENT (0..N), DATA (1..N). :param path: Path of the csv-file (;-separated) :return: list(dict) with keys like in HEADER row. """ def map_line(line: list, fields: dict) -> dict: d = {} for k in fields: d[fields[k]] = line[k] return d with open(path) as fp: reader = csv.reader(fp, delimiter=';', quotechar='"') header = next(reader) assert header[0].endswith("HEADER") fields = {} for i in range(1, len(header)): if header[i]: fields[i] = header[i] data = [] for row in reader: if row[0].endswith("COMMENT"): continue data.append(map_line(row, fields)) return data
c2f43099b5acd5f21366e402b2eee6dd839b8e0a
52,465
import collections def find_container(intersections): """ Returns the container node. Parameters ---------- intersections: List[Intersection] Full list of intersections of ray with a scene. Returns ------- Node The container node Example ------- >>> intersections = scene.intersections(ray.position, ray.directions) >>> container = find_container(intersections) """ if len(intersections) == 1: return intersections[0].hit count = collections.Counter([x.hit for x in intersections]).most_common() candidates = [x[0] for x in count if x[1] == 1] pairs = [] for intersection in intersections: node = intersection.hit if node in candidates: pairs.append((node, intersection.distance)) # [(node, dist), (node, dist)... ] pairs = sorted(pairs, key=lambda tup: tup[1]) containers, _ = zip(*pairs) container = containers[0] return container
e1f380c30db5d8e5f13ded00dd01f126ca2b7472
52,466
import pathlib def file_get_extension(filepath, with_extension_dot=False, only_last_extension=False): """returns the extension of a file by a provided filepath\n with_extension_dot (default: False): True, will include the dot e.g. '.txt'\n only_last_extension (default: False): True, returns only the last extension e.g. 'gz' instead of 'tar.gz' """ if only_last_extension: return pathlib.Path(filepath).suffix[(0 if with_extension_dot else 1):] else: return "".join( pathlib.Path(filepath).suffixes)[(0 if with_extension_dot else 1):]
762b47905ae9598aa607a66323fefc5b507cc385
52,468
import json def get_metrics_collections(file_paths): """ Reads the json files to be averaged and returns the json contents :param file_paths: A list of file paths :return: A list of dictionaries, which represent the metrics obtained from one run of Collector """ metrics_collections = [] for file_path in file_paths: with open(file_path) as f: metrics_collections.append(json.load(f)) return metrics_collections
4db2fbf3b0e63629a76a64bc4e4107fff8680e3f
52,469
def get_monitor_group(subparser): """ Retrieve the monitor group for the argument parser. Since the monitor group is shared between commands, we provide a common function to generate the group for it. The user can pass the subparser, and the group is added, and returned. """ # Monitoring via https://github.com/spack/spack-monitor monitor_group = subparser.add_argument_group() monitor_group.add_argument( '--monitor', action='store_true', dest='use_monitor', default=False, help="interact with a montor server during builds.") monitor_group.add_argument( '--monitor-no-auth', action='store_true', dest='monitor_disable_auth', default=False, help="the monitoring server does not require auth.") monitor_group.add_argument( '--monitor-tags', dest='monitor_tags', default=None, help="One or more (comma separated) tags for a build.") monitor_group.add_argument( '--monitor-keep-going', action='store_true', dest='monitor_keep_going', default=False, help="continue the build if a request to monitor fails.") monitor_group.add_argument( '--monitor-host', dest='monitor_host', default="http://127.0.0.1", help="If using a monitor, customize the host.") monitor_group.add_argument( '--monitor-prefix', dest='monitor_prefix', default="ms1", help="The API prefix for the monitor service.") return monitor_group
92ef507213c62cf5e6c9518a9b6866bcb9f0c3b7
52,472
def _normalize_names(name): """Normalize column names.""" name = name.strip() name = name.strip("*") return name
435e6ec6ddc06bd73131fd2594a388d3175f42a5
52,474
def subs_potentials(A,B,tol): """Difference between two sets of data of the same length Args: A/B: the arrays (2D) tol: the tolerence of the difference Returns: C: a new aaray (2D) """ C = A for i in range(len(A)): C[i,0] = A[i,0] if abs(A[i,1] - B[i,1]) <= tol: C[i,1] = 0 else: C[i,1] = A[i,1] - B[i,1] return C
e2ad4999bf9fccf3532f93d66df70f17ba684ca5
52,476
def sanitize(value): """Makes sure filenames are valid by replacing illegal characters :param value: string """ return value.replace('/', '_')
94f72e2bf1d1cf08dde9ae6c192ed1b68a875900
52,478
import requests def fetch_local_weather(lat, lon, API_key=""): """ Retrieve current weather from api.openweathermap.org. WARNING: Free API allows a maximum of 60 requests/minute Documentation: http://openweathermap.org/current#geo Returns a dict decoded from the API's JSON response Returns 'None' if ANY errors occur, service is unavailable. """ API_URL = 'http://api.openweathermap.org/data/2.5/weather' API_KEY ='e5591c0f23cf2f87b7854d06192b36af' local_params = dict(lat=lat, lon=lon, APPID=API_KEY) response = requests.get(API_URL, params=local_params) if (response): local_weather = response.json() if local_weather['cod'] == 200: return local_weather return None
e18913665c29cbf946f10b5ffb729291c3d56909
52,485
def second_tensor_invariant(A): """ Calculates the second tensor invariant of a symmetric 3x3 matrix. Returns a scalar. """ I2 = ( (A[1,1] * A[2,2]) + (A[2,2] * A[0,0]) + (A[0,0] * A[1,1]) - A[1,2]**2 - A[2,0]**2 - A[0,1]**2 ) return I2
ff3eda7db0334371033199cbfde7631e3d3f574f
52,486
def greedy(graph): """Calculates and returns shortest tour using greedy approach. Runs in O(n^2). Provides approximate solution. Args: graph: instance of a Graph. Returns: list: sequence of nodes constituting shortest tour. """ tour = [] available_nodes = set(graph.nodes()) # pick random starting node, add it to tour path starting_node = available_nodes.pop() tour.append(starting_node) while available_nodes: # continue from previously added node prev_node = tour[-1] # pick next closest node out of available ones next_node = min( (candidate for candidate in available_nodes), key=lambda x: graph.distance(prev_node, x) ) tour.append(next_node) available_nodes.remove(next_node) return tour
b2145adb162e99532cf14574acc7b08ec058f480
52,489
import math def solve_tilted_rectangle(xend, yend, length, width, angle, padding=0.0, pad_upper=True): """ Given a rectangle of a certain length, width and orientation, knowing the coordinates of the centre of one end of the rectangle, return the coordinates of the corners. :Parameters: xend: float X coordinate of the centre of the upper edge (at the extremity of the length) of the rectangle. yend: float Y coordinate of the centre of the same edge of the rectangle. length: float Length of the rectangle width: float Width of the rectangle angle: float Angle of the rectangle (radians). padding: float (optional) An optional padding to be applied to the edges of the rectangle, increasing the length and the width by 2 * padding. This parameter can be used to determine the corners of a new rectangle which avoids the edges of the original rectangle by at least this amount. By default the padding is zero and the corners of the original rectangle are returned. pad_upper: boolean (optional) Set True (the default) to pad the upper edge of the rectangle. Setting this to False allows one end of the rectangle to have a much smaller padding. :Returns: quad: tuple of ((x1,y1),(x2,y2),(x3,y3),(x4,y4)) The corner coordinates of the rectangle """ assert float(length) > 0.0 assert float(width) > 0.0 # The coordinates of the other edge of the rectangle can be calculated # from the length and orientation. xlength = length * math.cos(angle) ylength = length * math.sin(angle) xother = xend - xlength yother = yend - ylength # The X and Y increments of the corners from these ends depend on # the width and orientation xwidth2 = width * math.sin(angle) / 2.0 ywidth2 = width * math.cos(angle) / 2.0 x1 = xother + xwidth2 y1 = yother - ywidth2 x2 = xother - xwidth2 y2 = yother + ywidth2 x3 = xend - xwidth2 y3 = yend + ywidth2 x4 = xend + xwidth2 y4 = yend - ywidth2 # If required, apply a padding to the corner coordinates. if padding > 0.0: xlength_pad = padding * math.cos(angle) ylength_pad = padding * math.sin(angle) xwidth_pad = padding * math.sin(angle) ywidth_pad = padding * math.cos(angle) x1 = x1 - xlength_pad + xwidth_pad y1 = y1 - ylength_pad - ywidth_pad x2 = x2 - xlength_pad - xwidth_pad y2 = y2 - ylength_pad + ywidth_pad if pad_upper: x3 = x3 + xlength_pad - xwidth_pad y3 = y3 + ylength_pad + ywidth_pad x4 = x4 + xlength_pad + xwidth_pad y4 = y4 + ylength_pad - ywidth_pad else: # Only pad the width at the upper end of the rectangle x3 = x3 - xwidth_pad y3 = y3 + ywidth_pad x4 = x4 + xwidth_pad y4 = y4 - ywidth_pad quad = [(x1,y1), (x2,y2), (x3,y3), (x4,y4)] return quad
7208f27aa51aaf1abe81bd9f5180a63fede7f1f8
52,491
def hypothesis(theta0, theta1, x): """Return our hypothesis, or guess, given the parameters and input value""" return theta0 + (theta1 * x)
676745e28959d7b7bf99018236175b5b3ae84d90
52,494
import json def load_dict(d): """Function that loads json dictionaries""" with open(d, 'r', encoding='utf-8') as in_f: # load reference dict (based on training data) to settle disputes based on frequency dic = json.load(in_f) in_f.close() return dic
55d79180d48ed9b4bb1701ce1cf89a65951076b1
52,497
def stones(n, a, b): """Hackerrank Problem: https://www.hackerrank.com/challenges/manasa-and-stones/problem Manasa is out on a hike with friends. She finds a trail of stones with numbers on them. She starts following the trail and notices that any two consecutive stones' numbers differ by one of two values. Legend has it that there is a treasure trove at the end of the trail. If Manasa can guess the value of the last stone, the treasure will be hers. For example, assume she finds 2 stones and their differences are a = 2 or b = 3. We know she starts with a 0 stone not included in her count. The permutations of differences for the two stones would be [2, 2], [2, 3], [3, 2] or [3, 3]. Looking at each scenario, stones might have [2, 4], [2, 5], [3, 5] or [3, 6] on them. The last stone might have any of 4, 5, or 6 on its face. Compute all possible numbers that might occur on the last stone given a starting stone with a 0 on it, a number of additional stones found, and the possible differences between consecutive stones. Order the list ascending. Args: n (int): the number of non-zero stones found a (int): the first possible difference b (int): the other possible difference Returns: list: list of the possible values of the last stone """ results = [] for i in range(0, n): stone = (n - i - 1) * a + i * b results.append(stone) return sorted(set(results))
6181d26e3b90d5da15bcaabde6624a595a70a77e
52,498
def position_order(open: list, close: list): """ Pair open position order with close position orders :param open: a list of orders that may have an open position order :param close: a list of orders that may have an close position order :return: A tuple containing a pair of an open order with a close position order """ for o in open: for c in close: if o.position == "OPEN" and c.position == "CLOSE": open.remove(o) close.remove(c) return (o, c) return None
b70a0740c59168c29226c2e083a6268ec14ad0a6
52,503
def GreedyMatching(edges, max_count: int): """Find matching greedily. Edges are picked sequentially from top of input. So, useful for greedy matching with weights as well.""" M = set() matched_nodes = set() for u, v in edges: if u not in matched_nodes and v not in matched_nodes: matched_nodes.add(u) matched_nodes.add(v) M.add((u, v)) if len(M) == max_count: return M return M
a45b1ca1a4664ab9a282dfde8b7e0c59692caf98
52,504
import statistics def getMergeOperation(datatype): """ FIO log files with a numjobs larger than 1 generates a separate file for each job thread. So if numjobs is 8, there will be eight files. We need to merge the data from all those job files into one result. Depending on the type of data, we must sum or average the data. This function returns the appropriate function/operation based on the type. """ operationMapping = {'iops': sum, 'lat': statistics.mean, 'clat': statistics.mean, 'slat': statistics.mean, 'bw': sum, 'timestamp': statistics.mean} opfunc = operationMapping[datatype] return opfunc
12e34b3f6c35c3b0fbdd44bb49e7db814185eccb
52,506
def GenericInteractHandler(widget, new_value, column_name, index): """Generic callback for interactive value updates.""" widget.update_dataframe(new_value, column_name, index) return new_value
f0e09a1a761dc6916291eca415944e5c5b583d71
52,510
def should_be_dark_extensions(time_current: int, time_dark: int): """Determines if dark mode should be active like the extensions do""" return time_dark <= time_current
2a8db4acbfc686cd99d050b822bfe443a5099186
52,511
import struct def bytes_2_double(bytes_double_string, is_little_endian=False): """ 将8字节的bytes串转换成float. :param bytes_double_string: :param is_little_endian: :return: """ # 小端数据返回 if is_little_endian: return struct.unpack('<d', bytes_double_string)[0] # 大端数据返回 return struct.unpack('>d', bytes_double_string)[0]
cf0ff4a933ce745543b920e70c6846e4cab9f6f6
52,518
import random def rnd(words): """ Picks a random word from the list. Parameters ---------- words : list of str list of valid words. Returns ------- string the guess word. """ return random.sample(words, 1)[0] if words else ""
52636d5ffb130fefe3bc4ec79e394bf7efd4e608
52,519
import re def s3_remove_root_path(key): """ Remove root element from path """ file_link_path = re.sub(r"^[^/]+\/", "", key) return file_link_path
15b4f446d902df58b5beb8e3b09d0d98016c0e76
52,522
def from_min_to_day(time, organisation=None): """ Convert a str time with minutes as a unit (i.e. a task duration), into another str rounded time with an 8-hour day as a unit. If an organisation is given as parameter, we use its field `hours_by_day` to know how long a working day lasts. """ if organisation is not None: hours_per_day = organisation["hours_by_day"] or 8 else: hours_per_day = 8 day_estimation = int(time) / 60 / hours_per_day return str(round(day_estimation, 1))
808d82ca8a6c0dc3124092ae7c5c9a99410ad7f6
52,523
def filter_by_datasets(data, datasets): """ Given a list of datasets, limit the data frame to reads coming from only thoes datasets """ # Validate dataset names first permitted_datasets = set(list(data.dataset)) for d in datasets: if d not in permitted_datasets: raise ValueError("Invalid dataset name: %s" % (d)) data = data[data['dataset'].isin(datasets)] return data
e4753015a54e6adeb9d942a997c9441bc460fe14
52,525
import json def parse_inputs(path_to_arguments): """ Function to parse the file with the hyper-parameters of the network. Parameters ---------- path_to_arguments : string Contains the path to a '.json' that will describe the hyper-parameters of the network Returns ------- input_parameters : dictionary with some of the hyper-parameters "model_name": Name of the model (will be used to save the information), "batch_size": Batch size, "num_epochs": Number of epochs of training, "learning_rate": Learning Rate, "weight_decay": Weight decay for L2 Regularization, "num_classes": Number of different categories that the data is separated into, "num_channels": Number of channels used, "dataset_proportion": , "device": CPU ('cpu') or GPU ('cuda'). By default, it will check if there is GPU available and use it; if not available, will use CPU. """ with open(path_to_arguments, 'r') as j: return json.loads(j.read())
c31ed092a1894e9221554522be15a90ea48da938
52,526
def preprocess_sents(sentences_list): """Clean up sentences predicted by TRAM""" prepocessed_sents = [] for s in sentences_list: # Replace any new lines separating parts of the sentence s = s.replace('\n', ' ') # Replace any double spaces which might result from previous step with a single space s = s.replace(' ', ' ') # Do a length check to skip empty strings and random punctuation if len(s) < 3: continue prepocessed_sents.append(s) return prepocessed_sents
4cabaf784b080f8fbf1a84d44719e934749c577d
52,531
def divide(a, b): """Divide two numbers. Parameters ---------- a : int, float The dividend b : int, float The divisor Return ------ int, float: the quotient """ return a / b
732b5f6ce0ea5840f45e5158ca5a49748bb25e31
52,534
import struct def encode_nibbles(val, val2): """Encode two values as two nibbles in a byte Specs: * **Nibble**: MSN LSN * **Byte**: 0b0000 0000 * **Indexes**: 7654 3210 * **Values**: val2 val Requirement: * Only values (0-15) allowed """ assert any([v > 2**4 - 1 for v in [val, val2]]) is False, 'Nibble value too big, only values (0-15) allowed' # shift 4 bits to the left fullbyte = (val2 << 4) + val return struct.pack('B', fullbyte)
736574176071c4a522a03f5c04b2aa5714e772aa
52,537
import re def GetBIOSStrAndIntAttributeHandles(attr_type, attr_val_table_data): """ From pldmtool GetBIOSTable of type AttributeValueTable get the dict of attribute handle and its values based on the attribute type. Description of argument(s): attr_type "BIOSInteger" or "BIOSString". attr_val_table_data pldmtool output from GetBIOSTable table type AttributeValueTable. @return Dict of BIOS attribute and its value based on attribute type. """ attr_val_int_dict = {} attr_val_str_dict = {} for item in attr_val_table_data: value_dict = {} attr_handle = re.findall(r'\(.*?\)', item["AttributeNameHandle"]) # Example: # {'vmi_if0_ipv4_prefix_length': {'UpperBound': 32, 'LowerBound': 0} if (item["AttributeType"] == "BIOSInteger"): value_dict["LowerBound"] = item["LowerBound"] value_dict["UpperBound"] = item["UpperBound"] attr_val_int_dict[attr_handle[0][1:-1]] = value_dict # Example: # {'vmi_if1_ipv4_ipaddr': {'MaximumStringLength': 15, 'MinimumStringLength': 7}} elif (item["AttributeType"] == "BIOSString"): value_dict["MinimumStringLength"] = item["MinimumStringLength"] value_dict["MaximumStringLength"] = item["MaximumStringLength"] attr_val_str_dict[attr_handle[0][1:-1]] = value_dict if (attr_type == "BIOSInteger"): return attr_val_int_dict elif (attr_type == "BIOSString"): return attr_val_str_dict
8201b5dc07cc1c3ed906ea23205fcf787711abd0
52,544
def av_good_mock(url, request): """ Mock for availability, best case. """ return {'status_code': 301, 'text': 'Light side'}
f66006908d9155d02d934c0438639b2797a9ddfd
52,549
import jinja2 def sif(variable, val): """"string if": `val` if `variable` is defined and truthy, else ''""" if not jinja2.is_undefined(variable) and variable: return val else: return ""
dc64cb59595dcbcf1e050da422b11eb8b28fa799
52,550
def is_model_field_changed(instance, field_name: str) -> bool: """ Compares model instance field value to value stored in DB. If object has not been stored in DB (yet) field is considered unchanged. :param instance: Model instance :param field_name: Model attribute name :return: True if field value has been changed compared to value stored in DB. """ assert hasattr(instance, field_name) if not hasattr(instance, "pk") or instance.pk is None: return False qs = instance.__class__.objects.all() params = {"pk": instance.pk, field_name: getattr(instance, field_name)} return not qs.filter(**params).exists()
148138ae5833598b938979f0d1f4e407d0ea8fce
52,551
def get_child_node_frequency(df): """ Returns the frequency of each childQID : param df : Dataframe to analyze : return freq : Dictionary of childQIDs and their frequencues """ freq = df['childQID'].value_counts().to_dict() return freq
ae58534e53e984f91ef3b4ff1baaf79186b6c380
52,553
def _compare_first_n(series_1, series_2, n): """ Utility function that sees if the first n rows of a Pandas series are the same """ for i in range(n): if series_1.iloc[i] != series_2.iloc[i]: return False return True
a06059cebd758a12953fc31eb4b06867ea3851b0
52,562
def jaccard_index_calc(TP, TOP, P): """ Calculate Jaccard index for each class. :param TP: true positive :type TP : int :param TOP: test outcome positive :type TOP : int :param P: condition positive :type P : int :return: Jaccard index as float """ try: return TP / (TOP + P - TP) except Exception: return "None"
13f5a53ca114db2b3af928db0ab9fac885ad2923
52,565
def cli(ctx, invocation_id): """Get a detailed summary of an invocation, listing all jobs with their job IDs and current states. Output: The invocation step jobs summary. For example:: [{'id': 'e85a3be143d5905b', 'model': 'Job', 'populated_state': 'ok', 'states': {'ok': 1}}, {'id': 'c9468fdb6dc5c5f1', 'model': 'Job', 'populated_state': 'ok', 'states': {'running': 1}}, {'id': '2a56795cad3c7db3', 'model': 'Job', 'populated_state': 'ok', 'states': {'new': 1}}] """ return ctx.gi.invocations.get_invocation_step_jobs_summary(invocation_id)
d5e83ecdc061e8161a5f7ca7ac9bcda864293b7d
52,567