content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def _stored_data_paths(wf, name, serializer): """Return list of paths created when storing data""" metadata = wf.datafile(".{}.alfred-workflow".format(name)) datapath = wf.datafile(name + "." + serializer) return [metadata, datapath]
5f01d804db9f1848cc13e701a56e51c06dccdb31
2,302
def filter_word_counts(counts, min_length=1): """ Given a list of (word, count) tuples, create a new list with only those tuples whose word is >= min_length. """ stripped = [] for (word, count) in counts: if len(word) >= min_length: stripped.append((word, count)) return stripped
dbc3b6cf319663d34f4bcccb5109d0a2d6bb4f99
600,058
def read_vocabulary(vocab_file, threshold): """read vocabulary file produced by get_vocab.py, and filter according to frequency threshold. """ vocabulary = set() for line in vocab_file: word, freq = line.strip('\r\n ').split(' ') freq = int(freq) if threshold == None or freq >= threshold: vocabulary.add(word) return vocabulary
a83e69b32744f9948a785cb8e06a678704811f4b
551,413
def convert_retention_to_seconds(desired_retention, retention_unit): """Convert desired retention to seconds. :param desired_retention: The desired retention for snapshot schedule :param retention_unit: The retention unit for snapshot schedule :return: The integer value in seconds """ duration_in_sec = None if desired_retention: if retention_unit == 'hours': duration_in_sec = desired_retention * 60 * 60 else: duration_in_sec = desired_retention * 24 * 60 * 60 return duration_in_sec
40a6284faf85d005b96ae5aa55429259eafc6324
671,737
def readFileContent(filename, mode): """A generic function to read the content of a file. Args: filename: The full filename mode: Python's file opening mode Return: The content """ file = open(filename, mode) content = file.read() file.close() return content
520de28ada55de30d82cef52069d8226fd5194f5
348,322
def trunc_text(input_text: list, length: list) -> list: """ Truncating the input text according to the input length. :param input_text: The input text need to be truncated. :param length: The length used to truncated the text. :return: The truncated text. """ return [row[:length[idx]] for idx, row in enumerate(input_text)]
3e2a9430aee4776c3a83c3571624ad79a75d9ce8
515,748
def dms_to_deg(deg,min,sec): """Convert a (deg,arcmin,arcsec) to decimal degrees""" return deg+min/60.+sec/3600.
9eaf74e10d80cf66990836da950b2908af6677b1
670,636
from typing import Optional from typing import List def float_list_type(value: str) -> Optional[List[float]]: """ Converts a string with comma separated float values into a list of floats. Parameters ---------- value : str The string value to be converted Returns ------- Optional[List[float]] If the string value is a comma separated list of floats, a list of floats is returned. Else returning None. """ try: arr = [float(x) for x in value.split(',')] return arr except ValueError: pass
4e6ce82664ab2af07f9585c975a08202da51f546
291,692
import random def rand(ctx, plan, candidates): """Random choose one""" return random.choice(candidates)
d5bb5a85da0d3478cda6a63226be9e3a0039f071
576,654
from typing import List from typing import Callable def add_to_partition(elem: str, partitions: List[List[str]], is_equiv: Callable[[str, str], bool]): """Helper function to add an element to an appropriate equivalence class Adds the element to an existing class if one is available or creates a new class by adding a partition if necessary. Args: elem: The element to add partitions: The list of equivalence classes to add ``elem`` to is_equiv: A function that accepts two elements of lst and returns whether those elements should be in the same equivalence class. For proper functioning, should implement an equivalence relation. Returns: The equivalence classes provided but with ``elem`` added. """ for partition in partitions: if is_equiv(elem, partition[0]): partition.append(elem) return partitions partitions.append([elem]) return partitions
c6c7771794d9fdf90a72aa525195cab536d175e3
605,349
import requests import re from bs4 import BeautifulSoup def get_soup(fbref_url, name=None): """ Get soup of FBRef URL Parameters ---------- fbref_url : str Url of FBRef. Can either be the search url or an exact match url name : str Name to search for, must be used with the search url. """ if name: fbref_url += "=" + name response = requests.get(fbref_url) comm = re.compile("<!--|-->") soup = BeautifulSoup(comm.sub("", response.text), "lxml") return soup
4550c898dcc6bf34c38fd36e073f3e1e48a6a25a
367,860
from typing import Union from typing import List from typing import Dict from typing import OrderedDict def sort_callbacks_by_order( callbacks: Union[List, Dict, OrderedDict] ) -> OrderedDict: """Creates an sequence of callbacks and sort them. Args: callbacks: either list of callbacks or ordered dict Returns: sequence of callbacks sorted by ``callback order`` Raises: TypeError: if `callbacks` is out of `None`, `dict`, `OrderedDict`, `list` """ if callbacks is None: output = OrderedDict() elif isinstance(callbacks, (dict, OrderedDict)): output = [(k, v) for k, v in callbacks.items()] output = sorted(output, key=lambda x: x[1].order) output = OrderedDict(output) elif isinstance(callbacks, list): output = sorted(callbacks, key=lambda x: x.order) output = OrderedDict([(i, value) for i, value in enumerate(output)]) else: raise TypeError( f"Callbacks must be either Dict/OrderedDict or list, " f"got {type(callbacks)}" ) return output
bba43ab6292e1132f8447e79403d9d730831e3de
43,649
def test_for_single_dict(source: str, calc_results: dict) -> bool: """ Returns True if 'source' is a str representing a variable name within 'calc_results' whose value itself is a single-level dictionary of keyword values. """ gotten = calc_results.get(source, "") return isinstance(gotten, dict)
345a604e050ddafadd00225e6acbc8e2cece5ff6
586,865
def get_common_xs(entries): """Return a mask of where there are Xs in all routing table entries. For example ``01XX`` and ``XX1X`` have common Xs in the LSB only, for this input this method would return ``0b0001``:: >>> from rig.routing_table import RoutingTableEntry >>> entries = [ ... RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1100), # 01XX ... RoutingTableEntry(set(), 0b0010, 0xfffffff0 | 0b0010), # XX1X ... ] >>> print("{:#06b}".format(get_common_xs(entries))) 0b0001 """ # Determine where there are never 1s in the key and mask key = 0x00000000 mask = 0x00000000 for entry in entries: key |= entry.key mask |= entry.mask # Where there are never 1s in the key or the mask there are Xs which are # common to all entries. return (~(key | mask)) & 0xffffffff
f60e8c4dd0cf5b6b719115f72228f0d91e5c65c0
611,864
from typing import Pattern import re def compile_regex(pattern: str) -> Pattern[str]: """Compile a regex.""" return re.compile(pattern)
1ddb1f2cdc8975e30904b89ab7b1104f854940a7
576,662
def list_samples(cursor): """ A convenience function for accessing the list of sampleIds from the Samples table. :param cursor: :return: """ return cursor.sscan_iter("samples", count=5000)
463db8aba1d7e6f680cd7faa43ecd970eef0d65f
365,174
def _ms_tuple(z_tup): """Stringify a tuple into a Mathematica-like argument list.""" return '[' + ','.join(map(str, z_tup)) + ']'
61aff1d5036f11c1fe062718979418b9404dc0d5
538,080
def normalize(D, value=1): """normalize. Normalize the coefficients to a maximum magnitude. Parameters ---------- D : dict or subclass of dict. value : float (optional, defaults to 1). Every coefficient value will be normalized such that the coefficient with the maximum magnitude will be +/- 1. Return ------ res : same as type(D). ``D`` but with coefficients that are normalized to be within +/- value. Examples -------- >>> from qubovert.utils import DictArithmetic, normalize >>> d = {(0, 1): 1, (1, 2, 'x'): 4} >>> print(normalize(d)) {(0, 1): 0.25, (1, 2, 'x'): 1} >>> from qubovert.utils import DictArithmetic, normalize >>> d = {(0, 1): 1, (1, 2, 'x'): -4} >>> print(normalize(d)) {(0, 1): 0.25, (1, 2, 'x'): -1} >>> from qubovert import PUBO >>> d = PUBO({(0, 1): 1, (1, 2, 'x'): 4}) >>> print(normalize(d)) {(0, 1): 0.25, (1, 2, 'x'): 1} >>> from qubovert.utils import PUBO >>> d = PUBO({(0, 1): 1, (1, 2, 'x'): -4}) >>> print(normalize(d)) {(0, 1): 0.25, (1, 2, 'x'): -1} """ res = type(D)() mult = value / max(abs(v) for v in D.values()) for k, v in D.items(): res[k] = mult * v return res
d8dcce6a35254790e82f948608a3f848f1a80286
43,527
def navigate_to_history_entry(entryId: int) -> dict: """Navigates current page to the given history entry. Parameters ---------- entryId: int Unique id of the entry to navigate to. """ return {"method": "Page.navigateToHistoryEntry", "params": {"entryId": entryId}}
4e64a316ebee88ec266818d1813fd1073ceef7f2
176,201
def segmentize_geometry(geometry, segment=0.5): """ segmentizes the lines of a geometry Parameters ---------- geometry : OGRGeometry geometry object segment : float, optional for precision: distance in units of input osr_spref of longest segment of the geometry polygon Returns ------- OGRGeometry a congruent geometry realised by more vertices along its shape """ geometry_out = geometry.Clone() geometry_out.Segmentize(segment) geometry = None return geometry_out
15d7a925b361ce6339d3b16b2d7a25af16516d48
179,207
def normalize_date(date): """Round datetime down to midnight.""" return date.replace(hour=0, minute=0, second=0, microsecond=0)
d4021f6e984045d9bd0172fd1c7d060acc8054e0
679,817
def to_interval(points: list): """ Transforms the set of points into set of intervals - Orthogonal hull Args: points (list of tuples): which are the points Example: POINT INTERVALS A B X Y [(0, 2), (1, 3)] --> [[0, 1], [2, 3]] Example 2: A B C X Y [(0, 2), (1, 5), (4, 3)] --> [[0, 4], [2, 5]] Example 3: A B C X Y Z [(0, 2, 9), (1, 5, 0), (4, 3, 6)] --> [[0, 4], [2, 5], [0, 9]] """ intervals = [] for dimension in range(len(points[0])): interval = [points[0][dimension], points[0][dimension]] for point in range(len(points)): if interval[0] > points[point][dimension]: interval[0] = points[point][dimension] if interval[1] < points[point][dimension]: interval[1] = points[point][dimension] intervals.append(interval) return intervals
782a6f41b536091a9aceff3ea216c3aa100e7aff
692,018
def yes_or_no(question): """Yes or no question from gist https://gist.github.com/garrettdreyfus/8153571 Args: question (str): Question to ask Returns: bool: Returns true or false to question """ reply = str(input(question+' (y/n): ')).lower().strip() #Just look at first char if len(reply): if reply[0] == 'y': return True if reply[0] == 'n': return False else: return yes_or_no("Uhhhh... please enter ") #Invalid input else: return yes_or_no("Uhhhh... please enter ")
17ab395b0b39a2ac6169ae179489827e953d4011
478,352
def _thunkByte(c, mask=0xff, shift=0): """extract an integer from a byte applying a mask and a bit shift @c character byte @mask the AND mask to get the desired bits @shift negative to shift right, positive to shift left, zero for no shift """ val = c & mask if shift < 0: val = val >> abs(shift) elif shift > 0: val = val << shift return val
e24c912c17dc0e9bc01522c88903e2cef78e45cc
354,139
from typing import Dict import re def apply_list_replace(input_str: str, replacements: Dict[str, str]) -> str: """ Apply a series of replacement on the input. :param input_str: the string to be modified :param replacements: a Dict regex -> replacement. Each item will be passed to re.sub() :return: the modified string """ temp = input_str if isinstance(replacements, dict): for replacement in replacements.items(): temp = re.sub(replacement[0], replacement[1], temp) return temp
287e1a7763e7f56719adf566358c62156bcf668c
12,920
import torch def generate_original_PE(length: int, d_model: int) -> torch.Tensor: """Generate positional encoding as described in original paper. :class:`torch.Tensor` Parameters ---------- length: Time window length, i.e. K. d_model: Dimension of the model vector. Returns ------- Tensor of shape (K, d_model). """ PE = torch.zeros((length, d_model)) pos = torch.arange(length).unsqueeze(1) PE[:, 0::2] = torch.sin( pos / torch.pow(1000, torch.arange(0, d_model, 2, dtype=torch.float32)/d_model)) PE[:, 1::2] = torch.cos( pos / torch.pow(1000, torch.arange(1, d_model, 2, dtype=torch.float32)/d_model)) return PE
c04f4ee9c09d61ce370c92485517d22c3012233f
563,532
import logging def scope_logger(cls): """ Class decorator for adding a class local logger Example: >>> @scope_logger >>> class Test: >>> def __init__(self): >>> self.log.info("class instantiated") >>> t = Test() """ cls.log = logging.getLogger('{0}.{1}'.format(cls.__module__, cls.__name__)) return cls
84e00f8f668accd362d4fc29016fd2ec95f0bcef
38,102
def parse_time(time_string): """ Parses given time string to extract hour and minute. Eg: 11:34 -> 11,34 11 -> 11,0 :param time_string: :return: Tuple of hour and minute """ if ":" in time_string: # Convert both to int from string hour, minute = tuple(map(int, time_string.split(":"))) else: hour, minute = int(time_string), 0 return hour, minute
dee9ef0a4e136939086d2889d610b172711626cb
438,063
def _FormatAsEnvironmentBlock(envvar_dict): """Format as an 'environment block' directly suitable for CreateProcess. Briefly this is a list of key=value\0, terminated by an additional \0. See CreateProcess documentation for more details.""" block = '' nul = '\0' for key, value in envvar_dict.iteritems(): block += key + '=' + value + nul block += nul return block
ceb08bd3857c4d1c64b38f833522f91efdffc7ef
265,249
import math def fib(n): """ Calculates the n-th Fibonacci number iteratively """ if n < 1: raise ValueError("expected integer") if math.floor(n) != n: raise ValueError("n must be exact integer") a, b = 1, 1 for i in range(2, n+1): a, b = b, a + b return a
4fb6db5a59767d0e7256e467bc454e75f11fe5cc
92,337
from typing import Sequence def divide_sequence_into_chunks(seq: Sequence, chunk_size: int) -> Sequence[Sequence]: """ Returns a sequence which has sequences of the elements in the input sequence and each sequence's length is equal to chunk_size. If the length of input sequence cannot be divided by chunk_size, the last element of return value contains less than chunk_size elements >>> divide_sequence_into_chunks([0, 1, 2, 3], chunk_size=2) [[0, 1], [2, 3]] >>> divide_sequence_into_chunks([0, 1, 2, 3, 4], chunk_size=2) [[0, 1], [2, 3], [4]] :param seq: :param chunk_size: :return: """ return [seq[x:x + chunk_size] for x in range(0, len(seq), chunk_size)]
be3c6c62139a546e30807804ba6ebe7f73d6403d
490,363
def __unique_trace_events__(trace_events): """ Finds unique trace events. :param trace_events: Events found in the trace (filtered by family). :return: Unique trace events """ unique_events = {} for line in trace_events: if line[0] not in unique_events.keys(): unique_events[line[0]] = [line[1]] else: if line[1] not in unique_events[line[0]]: unique_events[line[0]].append(line[1]) return unique_events
906e931805fc1396209d99e6aa26184c023b99aa
239,900
import re def parse_url_to_topic(method, route): """ Transforms a URL to a topic. `GET /bar/{id}` -> `get.bar.*` `POST /bar/{id}` -> `post.bar.*` `GET /foo/bar/{id}/baz` -? `get.foo.bar.*.baz` Possible gotchas `GET /foo/{id}` -> `get.foo.*` `GET /foo/{id}:action` -> `get.foo.*` However, once it hits the service the router will be able to distinguish the two requests. """ route = route.replace('.', '?') route = route.replace('/', '.').strip('.') topic = f'{method.value.lower()}.{route}' # need to replace `{id}` and `{id}:some_method` with just `*` return re.sub(r"\.\{[^\}]*\}[:\w\d_-]*", ".*", topic)
15cfacdb8071cfea694cbfd8d38f2cb12fd65ab5
595,232
def pageHeader( headline="", tagline=""): """ *Generate a pageHeader - TBS style* **Key Arguments:** - ``headline`` -- the headline text - ``tagline`` -- the tagline text for below the headline **Return:** - ``pageHeader`` -- the pageHeader """ pageHeader = """ <div class="page-header" id=" "> <h1>%(headline)s<br><small>%(tagline)s</small></h1> </div>""" % locals() return pageHeader
7d9e91df8af2fff92b0b7096cd1a13198d899e15
709,034
def rename_columns(df): """ Renames the columns of the data frame Parameters ---------- df : pandas data frame The building benchmarking data for a city Returns ---------- df The modified data frame """ df.rename({'zip': 'ZIPCode', 'utility_name': 'UtilityCompany', 'state': 'State', 'service_type': 'ServiceType', 'ownership': 'Ownership', 'commercial_rate': 'CommercialRate', 'industrial_rate': 'IndustrialRate', 'residential_rate': 'ResidentialRate' }, axis=1, inplace=True) return df
8e13b98c6be445fbe7a5ecd87560f81650c76e9c
430,697
async def transform_future(f, awaitable): """Apply a function to the result of an awaitable, return a future which delivers the result. """ return f(await awaitable)
e602b622d8a46c4529df9088cf0a665ce8ecef90
68,088
from typing import Dict def get_config_pipeline(user_cfg: Dict[str, dict]) -> Dict[str, dict]: """ Get the pipeline configuration :param user_cfg: user configuration :type user_cfg: dict :return: cfg: partial configuration :rtype: cfg: dict """ cfg = {} if "pipeline" in user_cfg: cfg["pipeline"] = user_cfg["pipeline"] return cfg
b75d081a873b4c7f625f0bd831587013d47fe452
304,669
def _l1_regularization(l1, model): """Computes the L1 regularization for the given model Args: l1 (float): L1 parameter model (:obj:`torch.nn.Module`): Model to use Returns: float: L1 loss (i.e. l1 * l1_norm(params)) """ l1_loss = sum(param.norm(1) for param in model.parameters()) return l1 * l1_loss
32826672a7de00f8a0412e2496e6ebfea213b502
704,427
def slice_ends(word, count=1): """Slice letters off each side, in a symmetric fashion. The idea is to find interesting substring word combinations. :param word (string): the word to modify. :param count (int, optional): The number of letters to chop off each end. :rtype string: The modified string. >>> slice_ends('potatoes', count=2) >>> 'tato' """ if any([not count, count is None]): return word return word[count:len(word) - count]
296305e917fb2e08ff1cc6edbc1a7d3ee4ee568c
171,153
import math def Ar(h, r): """ Surface area of a spherical cap of height 'h'. """ return 2. * math.pi * r * h
3a1e83b7e372cdc5be8612393c38c01e5863d62e
351,624
def slice2enlist(s): """Convert a slice object into a list of (new, old) tuples.""" if isinstance(s, (list, tuple)): return enumerate(s) if s.step == None: step = 1 else: step = s.step if s.start == None: start = 0 else: start = s.start return enumerate(range(start, s.stop, step))
9414afdb247e2f7eadc35a77edbcc78ec7353d1e
640,211
def find_all_indexes(text, pattern): """ Return a list of starting indexes of all occurrences of pattern in text, or an empty list if not found. Runtime: O(n) Condition: Iterates over the text to find the pattern """ # start = time.time() assert isinstance(text, str), 'text is not a string: {}'.format(text) assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text) # Empty list to store the indexes of the pattern pattern_indexes = [] # Appends to the the pattern_indexes if the pattern is empty if pattern == '': for index in range(0, len(text)): pattern_indexes.append(index) # finish = time.time() - start # print(finish) return pattern_indexes # Iterates through and appends the index if the pattern finds the match for all indexes for index, _ in enumerate(text): if pattern == text[index: (index + len(pattern))]: pattern_indexes.append(index) # finish = time.time() - start # print(finish) return pattern_indexes
ba2aade70604f6544b1518980a8c7d52072acf52
304,729
import base64 def to_base64_creds(user, password): """ create a BasicAuth (user:pwd) encoded in Base64 :param user: to be encoded :param password: to be encoded :return: an str that represents 'user:pwd' encoded in Base64 """ message_bytes = "{}:{}".format(user, password).encode('ascii') base64_bytes = base64.b64encode(message_bytes) return base64_bytes.decode('ascii')
1caea46aba91064c5814349388b0b9dcfbca4a62
559,953
def get_fastq_stats(fastq_filehandle): """Return some basic statistics for a fastq file.""" read_count, gc_count, total_base_count = 0, 0, 0 for i, line in enumerate(fastq_filehandle): if i != 1: # only process the reads from the fastq continue read_count += 1 for base in line.strip().upper(): total_base_count += 1 if base in ['G', 'C']: gc_count += 1 gc_fraction = gc_count / total_base_count return read_count, gc_fraction
b4fc34668d7d2842617e035a059798888d3d8640
677,517
import yaml def load_config_file(config_file): """ Load config file. Parameters ---------- config_file : str Path of the yaml file with all the parameters for the detector. Returns ------- config : dict Parsed parameters. """ # Loads config files yaml_file = open(config_file) config = yaml.load(yaml_file, Loader=yaml.FullLoader) return config
bc18194c21ab03e85e34e1ac72d593a028f92770
403,403
def crop_face(frame, box): """ This function takes an image and a bounding box and crops the image. """ (x,y,w,h) = box return frame[y: y + h, x: x + w]
7d885406ed5a141aa52c42f2f5eab713a07182b4
198,404
from datetime import datetime def convert_date(_x): """Extract and reformat datetime string :param str _x: Date string to convert :return: Date string :rtype: datetime """ date = datetime.strptime(_x, '%Y-%m-%d').date() return date
7e32bad1c767ef1814ad28b581b61f2cdf40d297
408,303
def parse_languages(languages: str) -> list: """ This method turn the argument given for the languages to a proper list of hreflang codes/languages :param languages: Argument for languages "<language 1>,<language 2>,..." :return: A list of str that contains the different hreflang codes or languages """ # We remove the " symbol from our str languages = languages.replace("\"", "") # We split the str by , to get a list of languages return languages.split(",")
abce0f5b9c485b9c7f13359e170f8396e79a7509
405,581
import gzip import pickle def load_gzipped_pickle(filename: str) -> dict: """ The function loads gzipped and pickled items / sessions object. Parameters ---------- filename : str Returns ------- pickled_object : dict """ with gzip.open(filename, 'rb') as fstream: pickled_object = pickle.load(fstream) return pickled_object
4601083db8484904e91abf6c636cace981f5a713
595,996
def colorbar_extension(colour_min, colour_max, data_min, data_max): """ For the range specified by `colour_min' to `colour_max', return whether the data range specified by `data_min' and `data_max' is inside, outside or partially overlapping. This allows you to automatically set the `extend' keyword on a `matplotlib.pyplot.colorbar' call. Parameters ---------- colour_min, colour_max : float Minimum and maximum value of the current colour bar limits. data_min, data_max : float Minimum and maximum value of the data limits. Returns ------- extension : str Will be 'neither', 'both', 'min, or 'max' for the case when the colour_min and colour_max values are: equal to the data; inside the data range; only larger or only smaller, respectively. """ if data_min < colour_min and data_max > colour_max: extension = 'both' elif data_min < colour_min and data_max <= colour_max: extension = 'min' elif data_min >= colour_min and data_max > colour_max: extension = 'max' else: extension = 'neither' return extension
517c71e7d0cd1792e48d87e2275a82f129d51fca
680,918
def _tf_tensor_name_to_tflite_name(tensor_tf_name: str) -> str: """Convert a TF tensor name to the format used by TFLiteConverter. Args: tensor_tf_name: Tensor name to convert. Returns: Converted tensor name. """ # See get_tensor_name() in //third_party/tensorflow/lite/python/util.py return tensor_tf_name.split(':')[0]
b5e776d2a43a4d3c91a4f7e06eb7d2d402e00966
215,439
import math def prime_divisors(n): """ Returns all prime divisors of a number Parameters ---------- n : int denotes the positive integer of which prime divisors needs to be find out return : array returns an array of integers denoting prime divisors of n """ arr = [] if(n<2): return arr while n % 2 == 0: arr.append(2) n = n / 2 for i in range(3,int(math.sqrt(n))+1,2): while n % i== 0: arr.append(int(i)) n = n / i if n > 2: arr.append(int(n)) if(len(arr) == 1): return arr else: temp_arr = [] temp_arr.append(arr[0]) for i in range(1,len(arr)): if(arr[i] != arr[i-1]): temp_arr.append(arr[i]) return temp_arr
3ebc2e88cd974cac2f9f4cfebd4751a448660c49
87,861
def trace_ensembles_for_replica(replica, steps): """ List of which ensemble a given replica was in at each MC step. Parameters ---------- replica : replica ID steps : iterable of :class:`.MCStep` input data Returns ------- list list of ensembles """ return [s.active[replica].ensemble for s in steps]
8fe800a6c4a1d7b8acf109ceced3920978699085
599,574
def _ibp_sub(lhs, rhs): """Propagation of IBP bounds through a substraction. Args: lhs: Lefthand side of substraction. rhs: Righthand side of substraction. Returns: out_bounds: IntervalBound. """ return lhs - rhs
45ed06feea14275ddd64e1ec60727123db52a5cd
706,262
def standardize(dataframe): """Scales numerical columns using their means and standard deviation to get z-scores: the mean of each numerical column becomes 0, and the standard deviation becomes 1. This can help the model converge during training. Args: dataframe: Pandas dataframe Returns: Input dataframe with the numerical columns scaled to z-scores """ dtypes = list(zip(dataframe.dtypes.index, map(str, dataframe.dtypes))) # Normalize numeric columns. for column, dtype in dtypes: if dtype == "float32": dataframe[column] -= dataframe[column].mean() dataframe[column] /= dataframe[column].std() return dataframe
3016edf510920d8a674608a2b595b9216ad9bde4
278,248
def ppdistance(a,b): """Returns the squared distance between the two 2D points. a - [float,float], b - [float,float] return - (float) """ return (b[0]-a[0])**2+(b[1]-a[1])**2
348bf3816baef86f88f3ede2a4f176d658c64e1d
450,011
def parse_field(field): """ Parse a field dictionary and return a properly formatted string Args: field (dict): A dictionary of model field arguements Returns: str: A formatted string of the model field """ quote_fields = ['db_column', 'db_tablespace', 'help_text', 'unique_for_date', 'unique_for_month', 'unique_for_year', 'upload_to', 'verbose_name'] default_str_fields = ['CharField', 'EmailField', 'FileField', 'FilePathField', 'ImageField', 'GenericIPAddressField', 'SlugField', 'TextField', 'URLField'] entry = "" field_clean = {} field_name = field.pop('field_name', None) field_type = field.pop('field_type', None) if field_name == '': return '' for f in field: if field[f] != '': if f in quote_fields: field[f] = "'%s'" % field[f] if field_type in default_str_fields and f == 'default': field[f] = "'%s'" % field[f] field_clean[f] = field[f] try: choices = field_clean['choices'].lstrip("'").rstrip("'") choice_name = '%s_choices' % field_name choice_name = choice_name.upper() field_clean['choices'] = choice_name entry += ' %s = %s\n' % (choice_name, choices) except KeyError: pass kwargs = ', '.join(['%s=%s' % (f, field_clean[f]) for f in field_clean]) kwargs = kwargs.replace('fk_model=', '') entry += ' %s = models.%s(%s)\n' % (field_name, field_type, kwargs) return entry
cdb40f3a47654ef10315fdceba65e22d399a080e
357,891
def get_cell_coords(row_coords, col_coords): """Get top-left and bottom-right coordinates for each cell using row and column coordinates""" cell_coords = {} for i, row_coord in enumerate(row_coords): cell_coords.setdefault(i, {}) for j, col_coord in enumerate(col_coords): x1 = col_coord[0] y1 = row_coord[1] x2 = col_coord[2] y2 = row_coord[3] cell_coords[i][j] = (x1, y1, x2, y2) return cell_coords
be27fb640209b338f507eafc850e1c98ae8bbaf4
355,979
def sum_ap(N, d): """Return the sum of an arithmetic sequence.""" n = N // d return (n * (2 * d + (n - 1) * d)) // 2
0e2ba16540a3a14ed73d5d8a80f7ef998c805336
388,866
import re def invalid_year_format(command_line_arguments): """Checks the input year format, if invalid returns true.""" year_input_string = command_line_arguments.year if year_input_string and re.search(r"^\d+(-){1}\d+((,){1}(\s){1}\d+(-){1}\d+)*$", year_input_string) is None: return True return False
e2c1cd948795462fc68ebf5c482ccac3bfaf4c93
577,266
from typing import List from typing import Union def ssum(x: List) -> Union[int, float]: """ Sum of array items Parameters ---------- x : list Array Returns ------- Union[int, float] Result of sum Examples ------- >>> ssum([1, 2]) 3 """ return sum(x)
3e9a164c80b12f38528aeb93be6318ad50d00ab3
294,748
def len_iter(iterable): """An efficient implementation for finding the length of an iterable without needing to retain its contents in memory.""" return sum(1 for _ in iterable)
58a0288b79f5d76cf3c573b5f110f57a4e406082
378,503
from typing import OrderedDict def linear_set_generator(random, args): """ Generates a list continuous values of the size of a representation. This function requires that a bounder is defined on the EvolutionaryAlgorithm. See Also -------- inspyred.ec Parameters ---------- random : Random args : dict representation: set containing the possible values max_candidate_size: int, default: 9 variable_candidate_size: bool, default: True Returns ------- list A list containing tuples - sample of the elements and linear value. If variable_candidate_size is True the list size is up to max_candidate_size, otherwise the candidate size equals candidate_size """ bounder = args.get("_ec").bounder representation = args.get('representation') max_size = args.get('max_size', 9) variable_size = args.get('variable_size', True) if variable_size: size = random.randint(1, max_size) else: size = max_size indices = random.sample(range(len(representation)), size) values = random.uniform(next(bounder.lower_bound), next(bounder.upper_bound), len(indices)) return OrderedDict({i: v for i, v in zip(indices, values)})
39fef7e79d83d6c281e290e4387829d6f3343410
33,280
def flatten(x, out=None, prefix='', sep='.'): """ Flatten nested dict """ out = out if out is not None else {} if isinstance(x, dict): for k in x: flatten(x[k], out=out, prefix=f"{prefix}{sep if prefix else ''}{k}", sep=sep) elif isinstance(x, (list, tuple)): for k, v in enumerate(x): flatten(k, out=out, prefix=f"{prefix}{sep if prefix else ''}{k}", sep=sep) else: out[prefix] = x return out
8a76a3ee959365ba4596c8f186347352ca57f0b7
121,190
def process_fields(tag): """ Process the 'field' element of a tag dictionary. Process the fields string - a comma-separated string of "key-value" pairs - by generating key-value pairs and appending them to the tag dictionary. Also append a list of keys for said pairs. :param tag: dict containing a tag :returns: dict containing the key-value pairs from the field element, plus a list of keys for said pairs """ fields = tag.get('fields') if not fields: # do nothing return {} # split the fields string into a dictionary of key-value pairs result = dict(f.split(':', 1) for f in fields.split('\t')) # append all keys to the dictionary result['field_keys'] = sorted(result.keys()) return result
41e7e958fe350135559673ae7877ffb1f254caee
50,897
def get_loopbacks(yaml): """Return a list of all loopbacks.""" ret = [] if "loopbacks" in yaml: for ifname, _iface in yaml["loopbacks"].items(): ret.append(ifname) return ret
9651f264f80a0bfee66b012c8998c467eedecb0b
631,172
def child_query_url(finder, obj): """Return Ajax query URL for children of given object.""" return finder.child_query_url(obj)
7fc2b1993a7ef5fb8f70cd81250cd9a1c7547d27
567,955
def get_connection_types(itfs, dst, src, bidir, dominant_type=None): """ Gets the types of a connection depending on the source and destination types. Determines the overlapping "drives" and "receives" types for the destination and source respectively. If the connection is bidirectional then the "drives" and "receives" types of both destination and source are taken into account. >>> itfs = {} >>> itfs['et1'] = { 'receives': ['type_a'], 'drives': 'type_b' } >>> itfs['ap1'] = { 'receives': ['type_b', 'type_c'], 'drives': 'type_a' } >>> itfs['ap2'] = { 'drives': ['type_b', 'type_c'] } >>> get_connection_types(itfs, 'ap1', 'et1', bidir=False) ('type_a',) >>> get_connection_types(itfs, 'et1', 'ap1', bidir=False) ('type_b',) >>> get_connection_types(itfs, 'et1', 'ap1', bidir=True) ('type_a', 'type_b') >>> get_connection_types(itfs, 'ap2', 'ap1', bidir=False) ('type_b', 'type_c') >>> get_connection_types(itfs, 'ap2', 'ap1', bidir=False, dominant_type='type_c') ('type_c',) """ def get(itf, direction_types): if not direction_types in itf: return set() if isinstance(itf[direction_types], str): return set([itf[direction_types]]) return set(itf[direction_types]) driving_types = get(itfs[dst], "drives") receiving_types = get(itfs[src], "receives") if bidir: # If the connection is bidirectional we also take all the types # in the opposite direction into account driving_types.update(get(itfs[dst], "receives")) receiving_types.update(get(itfs[src], "drives")) if dominant_type in driving_types or dominant_type in receiving_types: # A dominant type will override any other types for the given connection return (dominant_type,) return tuple(sorted(driving_types & receiving_types))
4e96ec4ea44a1eca2f5c1d91d40c2de23930ec37
517,817
def _findParam(name, node): """Searches an XML node in search of a particular param name :param name: str indicating the name of the attribute :param node: xml element/node to be searched :return: None, or a parameter child node """ for attr in node: if attr.get('name') == name: return attr
2bcd1e8006a0ac1a748ea510df71269d9b6101e3
355,144
def min_edit_distance(original, target, delete_cost = 1, insert_cost = 1, subst_cost = None): """ Minimum edit distance algorithm computes the minimum no. of insertions, deletions and modifications required to transform an original sequence(of letters, words) into the target sequence. M.E.D is often used in measuring a segmentation algorithm's accuracy. `original` - Original sequence, usually the output of a segmentation algorithm. `target` - Target is the sequence we're comparing our segmented output against. It's usually a hand segmented list of words. `delete_cost` - Cost of deleting an element from original. `insert_cost` - Cost of inserting an element into original. `subst_cost` - Cost function tells us the cost of substituing one element with another. """ # We use a constant substitution const of 1 when the function is not # specified. subst_cost = subst_cost or (lambda a, b: 1 if a != b else 0) # We use dynamic programming to compute minimal edit distance b/w two # lists. If m = |original| and n = |target|, then runtime complexity # is O(m * n). We can cut down memory usage to O(M) or O(N). M, N = len(original), len(target) dp = [0] * (N + 1) for i in range(M + 1): dp2 = [0] * (N + 1) for j in range(N + 1): # dp2[j] = m.e.d b/w original[:i] and target[:j] dp2[j] = i * delete_cost + j * insert_cost if i == 0 or j == 0: continue # Delete original[i]. dp2[j] = delete_cost + dp[j] # Insert Target[j] after original[i] in original. dp2[j] = min(dp2[j], insert_cost + dp2[j - 1]) # Modify original[i] into target[j] dp2[j] = min(dp2[j], subst_cost(original[i - 1], target[j - 1]) + dp[j - 1]) dp = dp2 return dp[N]
ad4003d5c88bf135743c074128625856f9038867
525,952
def calculate_bmi(weight, height): """ Calculates BMI given the weight and height as float """ bmi = (weight / (height * height)) * 703.0 return bmi
8495b11598e50516dca80965d5063df92aa78f40
702,974
def broadcast_shapes(*shapes): """ Broadcast any number of shapes against each other. Parameters ---------- *shapes : tuples The shapes to broadcast Example ------- >>> broadcast_shapes((1,5), (3, 2, 1)) (3, 2, 5) """ if any(not isinstance(s, tuple) for s in shapes): raise TypeError('The input shapes are not tuples.') ndim = max(len(s) for s in shapes) shapes_ = [(ndim-len(s)) * [1] + list(s) for s in shapes] outshape = [] for idim, dims in enumerate(zip(*shapes_)): dims = [dim for dim in dims if dim != 1] if len(dims) == 0: d = 1 elif any(dim != dims[0] for dim in dims): raise ValueError( 'The shapes could not be broadcast together {}'.format( ' '.join(str(s) for s in shapes))) else: d = dims[0] outshape.append(d) return tuple(outshape)
06a2cebee284836b28654a21ad70b59842366032
619,861
def _parse_conll_identifier( value: str, line: int, field: str, *, non_zero=False ) -> int: """Parse a CoNLL token identifier, raise the appropriate exception if it is invalid. Just propagate the exception if `value` does not parse to an integer. If `non_zero` is truthy, raise an exception if `value` is zero. `field` and `line` are only used for the error message.""" res = int(value) if res < 0: raise ValueError( "At line {line}, the `{field}` field must be a non-negative integer, got {value!r}".format( line=line, field=field, value=value ) ) elif non_zero and res == 0: raise ValueError( "At line {line}, the `{field}` field must be a positive integer, got {value!r}".format( line=line, field=field, value=value ) ) return res
c5f1d186cbc07de931a412e314de22e4dfb46fe6
658,624
from typing import Dict from typing import Any def make_resampling_test( freq: str = "D", resample: bool = True, agg: str = "Mean" ) -> Dict[Any, Any]: """Creates a resampling dictionary with specifications defined by the arguments, for testing purpose. Parameters ---------- freq : str Value for the 'freq' key of dictionary. resample : bool Value for the 'resample' key of dictionary. agg : str Value for the 'agg' key of dictionary. Returns ------- dict Resampling dictionary that will be used for unit tests. """ return {"freq": freq, "resample": resample, "agg": agg}
2f6402a9c7929200b1cdd5b076b1e2e2133fb0c2
589,022
import logging def _compare_relative_difference(value: float, expected_value: float, threshold: float) -> bool: """Compares relative difference between value and expected_value against threshold. Args: value: a float value to be compared to expected value. expected_value: a float value that is expected. threshold: a float between 0 and 1. Returns: a boolean indicating whether the relative difference is within the threshold. """ if value != expected_value: if expected_value: relative_diff = abs(value - expected_value) / abs(expected_value) if not (expected_value and relative_diff <= threshold): logging.warning('Relative difference %f exceeded threshold %f', relative_diff, threshold) return False return True
7692cd3260a0c5aa2a3ec686fec7c5b06214167d
249,461
def fibonacci(length, a, b): """Return the beginning of the fibonacci chain with elements a and b. Parameters ---------- length : int The length of the chain requested. a : object The a element of the chain. b : object The b element of the chain. Returns ------- chain : tuple A tuple of the requested length, with objects a and b alternating in the aperiodic fibonacci pattern. """ if length == 1: return (a,) if length == 2: return (a, b) first = (a,) second = (a, b) while True: next = second + first if len(next) >= length: return next[0:length] first = second second = next
1341fdb84c1470527025609927273c5458c24004
245,762
def list_inventory (inventory): """ :param inventory: dict - an inventory dictionary. :return: list of tuples - list of key, value pairs from the inventory dictionary. """ result = [] for element, quantity in inventory.items(): if quantity > 0: result.append ((element, quantity)) return result
264f8cde11879be8ace938c777f546974383122c
3,509
def _find_union(lcs_list): """Finds union LCS given a list of LCS.""" return sorted(list(set().union(*lcs_list)))
f6c2df0092afc0c427fa1de70c032764f4692394
431,221
def strip_suffix(string, suffix): """Remove a suffix from a string if it exists.""" if string.endswith(suffix): return string[:-(len(suffix))] return string
0ca354328ce8579fcce4f16f4f0dfdeac4708391
41,843
def round_up( val, mult_of ): """ Rounds up the value to the given multiple """ return val if val % mult_of == 0 else val + mult_of - val % mult_of
324b03387e873d07fc88ee304a9cdb0556e7b50e
103,938
def interpolate(x0, y0, x1, y1, x): """Linear interpolation between two values Parameters ---------- x0: int Lower x-value y0: int Lower y-value x1: int Upper x-value y1: int Upper y-value x: int Requested x-value Returns ------- int, float Interpolated y-value """ y = (y0 * (x1 - x) + y1 * (x - x0)) / (x1 - x0) return y
082cc92c4c170dbba479e396731326e450b5d765
35,868
from datetime import datetime def str2date(string): """ Convert string to datetime object """ return datetime.strptime(string, '%Y-%m-%d')
601be43078dc23555b177291397e65104f0322a4
340,307
import re def simple_tokenize(document): """ Clean up a document and split into a list of words. Converts document (a string) to lowercase and strips out everything which is not a lowercase letter. """ document = document.lower() document = re.sub('[^a-z0-9]', ' ', document) return document.strip().split()
28b793c02988d8049a5f35d864350af00dc1d0db
104,652
def trace_up(all_bags, bag): """ For the input bag "bag", trace the path upwards along all of its parents, which are the bags that can hold it. """ parents = [] if len(bag.held_by) == 0: return parents for bag_type in bag.held_by: parents.append(bag_type) held_bag = all_bags[bag_type] for parent in trace_up(all_bags, held_bag): parents.append(parent) return list(set(parents))
44950e8059c664ac97fc536622ddd9cd3224ce8c
600,349
from typing import List def _get_stress_table_types() -> List[str]: # pragma: no cover """ Gets the list of Nastran stress objects that the GUI supports """ table_types = [ # OES - tCode=5 thermal=0 s_code=0,1 (stress/strain) # OES - CELAS1/CELAS2/CELAS3/CELAS4 stress 'celas1_stress', 'celas2_stress', 'celas3_stress', 'celas4_stress', # OES - CELAS1/CELAS2/CELAS3/CELAS4 strain 'celas1_strain', 'celas2_strain', 'celas3_strain', 'celas4_strain', # OES - isotropic CROD/CONROD/CTUBE stress 'crod_stress', 'conrod_stress', 'ctube_stress', # OES - isotropic CROD/CONROD/CTUBE strain 'crod_strain', 'conrod_strain', 'ctube_strain', # OES - isotropic CBAR stress 'cbar_stress', # OES - isotropic CBAR strain 'cbar_strain', # OES - isotropic CBEAM stress 'cbeam_stress', # OES - isotropic CBEAM strain 'cbeam_strain', # OES - isotropic CTRIA3/CQUAD4 stress 'ctria3_stress', 'cquad4_stress', # OES - isotropic CTRIA3/CQUAD4 strain 'ctria3_strain', 'cquad4_strain', # OES - isotropic CTETRA/CHEXA/CPENTA stress 'ctetra_stress', 'chexa_stress', 'cpenta_stress', # OES - isotropic CTETRA/CHEXA/CPENTA strain 'ctetra_strain', 'chexa_strain', 'cpenta_strain', # OES - CSHEAR stress 'cshear_stress', # OES - CSHEAR strain 'cshear_strain', # OES - CEALS1 224, CELAS3 225 'nonlinear_spring_stress', # OES - GAPNL 86 'nonlinear_cgap_stress', # OES - CBUSH 226 'nolinear_cbush_stress', ] table_types += [ # OES - CTRIAX6 'ctriax_stress', 'ctriax_strain', 'cbush_stress', 'cbush_strain', 'cbush1d_stress_strain', # OES - nonlinear CROD/CONROD/CTUBE stress 'nonlinear_rod_stress', 'nonlinear_rod_strain', # OESNLXR - CTRIA3/CQUAD4 stress 'nonlinear_plate_stress', 'nonlinear_plate_strain', #'hyperelastic_plate_stress', 'hyperelastic_cquad4_strain', # OES - composite CTRIA3/CQUAD4 stress 'cquad4_composite_stress', 'cquad8_composite_stress', 'ctria3_composite_stress', 'ctria6_composite_stress', 'cquad4_composite_strain', 'cquad8_composite_strain', 'ctria3_composite_strain', 'ctria6_composite_strain', # OGS1 - grid point stresses 'grid_point_surface_stresses', # tCode=26 'grid_point_volume_stresses', # tCode=27 ] return table_types
5707eed6f3f83247950384efdd0aac34a3f7e9db
165,701
def permute_unique_set(nums: list[int]) -> list[list[int]]: """Compute all the *unique* permutations of the elements in a given input array Args: nums: array of possibly non-distinct elements Returns: all *unique* permutations of elements in `nums` Examples: >>> sorted(permute_unique_set([1,1,2])) [[1, 1, 2], [1, 2, 1], [2, 1, 1]] >>> sorted(permute_unique_set([1,2,1,1])) [[1, 1, 1, 2], [1, 1, 2, 1], [1, 2, 1, 1], [2, 1, 1, 1]] >>> sorted(permute_unique_set([1,2,3])) [[1, 2, 3], [1, 3, 2], [2, 1, 3], [2, 3, 1], [3, 1, 2], [3, 2, 1]] """ uniq_perms = {tuple()} # Initialized with the empty set # for each iteration, size of current permutations will increase by 1 # => at the end of the algorithm, size of each permutation will be equal to len(nums) for curr_num in nums: # Bisect-left: Insert `curr_num` to the left of insertion_idx in perm # (increases size of permutation by 1) uniq_perms = { # tuples for hashability (*perm[:insertion_idx], curr_num, *perm[insertion_idx:]) for perm in uniq_perms for insertion_idx in range(len(perm) + 1) } return [list(permutation) for permutation in uniq_perms]
21b722730a703d11fa5634f06b42349d6756dcb7
290,855
def pyimpl_shape(array): """Implement `shape`.""" return array.shape
20098aa4dc5a330df9083b667e244227fc790973
231,165
import re def get_years_from_fname(fname): """ find the YYYY-YYYY pattern in the file name. Y0 and Y1 are Returned as integers """ match = re.findall('[12][90][789012][0-9]-[12][90][789012][0-9]', fname) if len(match) >= 1: t0, t1 = [int(x) for x in match[0].split('-')] return t0, t1 else: return None
e88ce3db1bbc01c9f81e0e84c0fbeeb22ab96bb8
147,132
def ec_key(path, *merge_by): """Returns the context key and merge logic for the given context path and ID field name(s).""" if len(merge_by) == 0: return path js_condition = '' for key in merge_by: if js_condition: js_condition += ' && ' js_condition += 'val.{0} && val.{0} === obj.{0}'.format(key) return '{}({})'.format(path, js_condition)
f6d367ea2c353373e2ab075d0773e440e069f00d
505,568
def get_field(DataModelClass, field_name, base_name=None): """ Returns a SQLAlchemy Field from a field name such as 'name' or 'parent.name'. Returns None if no field exists by that field name. """ # Handle hierarchical field names such as 'parent.name' if base_name: if base_name in DataModelClass.__tablename__: return getattr(DataModelClass, field_name, None) else: return None # Handle flat field names such as 'name' return getattr(DataModelClass, field_name, None)
5da760a8018ab3a43b15030d7b2c7b8151d82106
305,047
import six def _Net_backward(self, diffs=None, start=None, end=None, **kwargs): """ Backward pass: prepare diffs and run the net backward. Parameters ---------- diffs : list of diffs to return in addition to bottom diffs. kwargs : Keys are output blob names and values are diff ndarrays. If None, top diffs are taken from forward loss. start : optional name of layer at which to begin the backward pass end : optional name of layer at which to finish the backward pass (inclusive) Returns ------- outs: {blob name: diff ndarray} dict. """ if diffs is None: diffs = [] if start is not None: start_ind = list(self._layer_names).index(start) else: start_ind = len(self.layers) - 1 if end is not None: end_ind = list(self._layer_names).index(end) outputs = set(self.bottom_names[end] + diffs) else: end_ind = 0 outputs = set(self.inputs + diffs) if kwargs: if set(kwargs.keys()) != set(self.outputs): raise Exception('Top diff arguments do not match net outputs.') # Set top diffs according to defined shapes and make arrays single and # C-contiguous as Caffe expects. for top, diff in six.iteritems(kwargs): if diff.shape[0] != self.blobs[top].shape[0]: raise Exception('Diff is not batch sized') self.blobs[top].diff[...] = diff self._backward(start_ind, end_ind) # Unpack diffs to extract return {out: self.blobs[out].diff for out in outputs}
4af46a2a55c17025d8320921c6d0a6e4ad0c5ba4
481,429
def get_conda_platform_from_python(py_platform): """ Converts a python platform string to a corresponding conda platform Parameters ---------- py_platform : str The python platform string Returns ------- str The conda platform string """ # Provides the conda platform mapping from conda/models/enum.py # Note that these are python prefixes (both 'linux2' and 'linux' from # python map to 'linux' in conda.) python_to_conda_platform_map = { 'darwin': 'osx', 'linux': 'linux', 'openbsd': 'openbsd', 'win': 'win', 'zos': 'zos', } for k in python_to_conda_platform_map: if py_platform.startswith(k): return python_to_conda_platform_map[k] return None
447eacf07a95d8c0ce6e473b92d622ea9a8ecbf4
479,077
def transfer_annotations_prob(mapping_matrix, to_transfer): """ Transfer cell annotations onto space through a mapping matrix. Args: mapping_matrix (ndarray): Mapping matrix with shape (number_cells, number_spots). to_transfer (ndarray): Cell annotations matrix with shape (number_cells, number_annotations). Returns: A matrix of annotations onto space, with shape (number_spots, number_annotations) """ return mapping_matrix.transpose() @ to_transfer
65d6d3167e63c5031f4216c80622960c30a7f8f6
648,567
def comment_for_pr(comment_id, plan): """Returns a formatted string containing comment_id and plan""" return f'{comment_id}\n```hcl\n{plan}\n```'
fe02965e4c43dddbcc98573ae6c05d508d56af62
361,478
def soma3(a, b, c): """soma três números Arguments: a 1° parametro b 2° parametro c 3° parametro Returns: soma -- a + b + c """ s = a + b + c return s
32d0ef25cd9a0c5fdefa88f4cf388af2710c775d
268,863
def insertion_sort(array): """insertion_sort(list) -> list >>> insertion_sort([3, 2, 13, 4, 6, 5, 7, 8, 1, 20]) [1, 2, 3, 4, 5, 6, 7, 8, 13, 20] """ for index in range(1, len(array)): value_now = array[index] pos = index while pos > 0 and array[pos - 1] > value_now: array[pos] = array[pos - 1] pos = pos - 1 array[pos] = value_now return array
f91732e1eddceae762b49e3db328965cecee153c
98,599
def get_exception_info(exception: BaseException) -> dict: """ Given an Exception object, retrieves some information about it: class and message. :param exception: An exception which inherits from the BaseException class. :return: A dict, containing two values: - class: The name of the exception's class. - message: The message attached to the exception, if exists. """ return {'class': exception.__class__.__name__, 'message': exception.__str__()}
5bd8c09478e2b64ad58c857f39946bfcbfd79caa
630,784
def download_input(storage_provider, parsed_event, input_dir_path): """Receives the event where the file information is and the tmp_dir_path where to store the downloaded file. Returns the file path where the file is downloaded.""" return storage_provider.download_file(parsed_event, input_dir_path)
887ca61a40a658d172b4b77833132b73933a14ce
16,015
import logging def get_logger(logger_name, logger_file, log_level=logging.INFO): """Setup the logger and return it.""" log_format = "%(asctime)s - %(levelname)s - %(message)s" logging.basicConfig( level=log_level, format=log_format, datefmt="%y-%m-%d_%H:%M", filename=logger_file, filemode="w", ) # define a Handler which writes INFO messages or higher to the sys.stderr console = logging.StreamHandler() console.setLevel(log_level) # set a format which is simpler for console use formatter = logging.Formatter(log_format) # tell the handler to use this format console.setFormatter(formatter) # add the handler to the root logger logging.getLogger(logger_name).addHandler(console) return logging.getLogger(logger_name)
17dc41c4e1ecab10ed8a3c14ffa875d64337cdb8
268,953
import hashlib def sha256(payload): """This function returns the sha256 of the provided payload""" return hashlib.sha256(payload).digest()
05e5fef168521265c0fdfa0b82ca18930baadef6
670,517