content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def read_bib(fname): """Reads in .bib file""" print("Reading {}".format(fname)) with open(fname, 'r') as bibfile: data = bibfile.read() return data
a39bed3ad6feee5f2ad19d12ae49ff01f0fc40df
73,809
def admin_pc_client(admin_pc): """Returns the client from the default admin's ProjectContext """ return admin_pc.client
e870e216b7a21c9e879d1bd12a5255f224c62310
73,811
def isascii(s): """Returns True if str s is entirely ASCII characters. (Compare to Python 3.7 `str.isascii()`.) """ try: s.encode("ascii") except UnicodeEncodeError: return False return True
ab0969a89ebdf23b8c6ec2d34ecd3f67b616b30a
73,812
import random def random_range(max): """ A range from 0 to a randomly chosen end between 1 and `max`. The smallest range would thus be `range(0, 1)` while the largest would be `range(0, max)`. """ assert max >= 1, f"max {max} must be greater than or equal to 1" return range(0, random.randint(1, max))
a82bcc620a0307b2e06298c935e159b50df68428
73,814
def validate_etextno(etextno): """Raises a ValueError if the argument does not represent a valid Project Gutenberg text idenfifier. """ if not isinstance(etextno, int) or etextno <= 0: msg = 'e-text identifiers should be strictly positive integers' raise ValueError(msg) return etextno
2a6c479f66651d2b84b6551fc1888a248d1df00d
73,815
def merge_two_dicts(x, y): """A quick function to combine dictionaries. Parameters ---------- x : dict Dictionary 1. y : dict Dictionary 2. Returns ------- dict The merged dictionary. """ z = x.copy() # start with keys and values of x z.update(y) # modifies z with keys and values of y return z
539ca0fae147943a6c33c761303c96e829e8f45d
73,816
def is_next_south_cell_empty(i, j, field): """ check if next below cell is empty :param i: :param j: :param field: :return: True if next below cell is empty, False otherwise """ if i == len(field) - 1: if field[0][j] == '.': return True return False if field[i + 1][j] == '.': return True return False
4b06dcba61eff2769fd78176f259f79e98ca2a1d
73,817
def _channel_info_to_flattened_name(channel_info): """ Simple method to generate a flattened channel name. """ if channel_info['start_index'] == -1: return channel_info['base_name'] elif channel_info['start_index'] == channel_info['end_index']: return '{0}{1}'.format(channel_info['base_name'], channel_info['start_index']) else: return '{0}{1}:{2}'.format(channel_info['base_name'], channel_info['start_index'], channel_info['end_index'])
a2e767ef98d9d843c93b3548c88f0ec1be582e0f
73,822
def get_normalized_hostname(env): """ Get the best hostname we can find in the provided environment. If the request came in on an alternate port, it will be appended to the result. """ host = env.get('HTTP_HOST', env['SERVER_NAME']) if(host.find(':') == -1): host += ':' + env['SERVER_PORT'] return host
a175e8437d10a1b9e7ce66b24d9023963b87a20c
73,829
def _centered_text_position(img, num_lines, line_height): """ Center text in the middle of the image depending on how many lines the text is """ x_position = 15 textbox_center = num_lines*line_height/2 image_center = img.height/2 y_position = image_center - textbox_center return (x_position, y_position)
83cde42f954b0dd84a26adce9169dc343f75b0ef
73,830
def fake_idtoken_processing_hook3(id_token, user, token, **kwargs): """ Fake function for checking scope is passed to processing hook. """ id_token['scope_of_token_passed_to_processing_hook'] = token.scope return id_token
7f7dcd367936d77c473b36cd0b9aaefa6ff6b7ca
73,833
from typing import Optional from typing import Union from typing import List def _check_pattern_list( patterns: Optional[Union[List[str], str]], key: str, default: Optional[List[str]] = None, ) -> Optional[List[str]]: """Validate file search patterns from user configuration. Acceptable input is a string (which will be converted to a singleton list), a list of strings, or anything falsy (such as None or an empty dictionary). Empty or unset input will be converted to a default. Args: patterns: Input from user configuration (YAML). key (str): Name of the configuration key the input came from, used for error display purposes. default: Value to return in case the input is empty or unset. Returns: Validated list of patterns. Raises: ValueError: If the input is unacceptable. """ if not patterns: return default if isinstance(patterns, str): return [patterns] if isinstance(patterns, list) and all(isinstance(p, str) for p in patterns): # type: ignore return patterns raise ValueError( f"Invalid file patterns in key '{key}': must be a string or " "list of strings" )
a5156afd8d30babac062e0628309160acd5068a4
73,835
def get_task_run_df(enki, task_id): """Return a dataframe containing all task run info for a task.""" enki.get_tasks(task_id=task_id) enki.get_task_runs() task = enki.tasks[0] return enki.task_runs_df[task.id]
fb7b5832ce38219b00ab2314527b806ccb68b277
73,838
from typing import List import torch def collate_fn(batch_items: List[dict]): """ Collate and pad fields in dataset items """ x, y = [], [] input_specs_lengths, output_text_lengths = [], [] texts = [] audios = [] sample_rates = [] for i in range(len(batch_items)): x.append(batch_items[i]["spectrogram"].squeeze(0).t()) y.append(batch_items[i]["text_encoded"].squeeze(0)) input_specs_lengths.append(x[-1].shape[0]) output_text_lengths.append(y[-1].shape[0]) texts.append(batch_items[i]["text"]) audios.append(batch_items[i]["audio"]) sample_rates.append(batch_items[i]["sample_rate"]) x = torch.nn.utils.rnn.pad_sequence(x, batch_first=True, padding_value=0.) y = torch.nn.utils.rnn.pad_sequence(y, batch_first=True) return {"spectrogram": x, "text_encoded": y, "spectrogram_length": torch.tensor(input_specs_lengths, dtype=torch.int32), "text_encoded_length": torch.tensor(output_text_lengths, dtype=torch.int32), "text": texts, "audio": audios, "sample_rate": sample_rates}
cfb36a8ad0033f435d8b8ffa369d3cf85e7b27ca
73,839
def number_to_name(number): """Take integer number as input (0-1-2-3-4) and returns string (rock-spock-paper-lizard-scissor) """ if number == 0: return "rock" elif number == 1: return "spock" elif number == 2: return "paper" elif number == 3: return "lizard" elif number == 4: return "scissor" else: return "Error"
e0a5dd4ceb35ee0d7c2f86f95fb34f4bf1fe2da3
73,849
import math def cooccur_probability_fn(counts): """Returns function for computing entailment probability. Args: counts: Dict mapping unigrams / bigrams (joined using "|||") to their counts. Returns: Function handle to compute entailment probability. """ def _cooccur_probability(ngram, table): """Returns probability of ngram being entailed by the table. Uses the co-occurrence counts given along with the lexical entailment model described in: Glickman, Oren, Ido Dagan, and Moshe Koppel. "A lexical alignment model for probabilistic textual entailment." Machine Learning Challenges. Springer, Berlin, Heidelberg, 2006. 287-298. E.g.: >>> _cooccur_probability(["michael", "dahlquist"], [(["name"], ["michael", "dahlquist"])]) >>> 1.0 Args: ngram: List of tokens. table: List of either (attribute, value) pairs or (head, relation, tail) triples. Each member of the pair / triple is assumed to already be tokenized into a list of strings. Returns: prob: Float probability of ngram being entailed by the table. """ table_toks = set() for item in table: if len(item) == 2: # attribute, value table_toks.add("_".join(item[0])) table_toks.update(item[1]) else: # head, relation, tail table_toks.update(item[0] + ["_".join(item[1])] + item[2]) probability = 1. for xtok in ngram: if xtok in table_toks: continue max_p = 0. for btok in table_toks: if btok not in counts: continue p = float(counts.get(btok + "|||" + xtok, 0.)) / counts[btok] if p > max_p: max_p = p probability *= max_p return math.pow(probability, 1. / len(ngram)) return _cooccur_probability
5919365b2142ddb86ffaf11966c9e72d7dc22971
73,851
def scaling_term(k, n): """Term that scales measure between zero and one k size of the neighbourhood n number of datapoints """ if k < (n / 2.0): return 2.0 / ((n * k) * (2 * n - 3 * k - 1)) else: return 2.0 / (n * (n - k) * (n - k - 1))
bc29fd056811196ec2212f248761a847e0cce9c9
73,860
def chunk(vs, size): """Chunk list `vs` into chunk of size `size`""" chunks = [vs[i : i + size] for i in range(0, len(vs), size)] if not chunks or len(chunks[-1]) != size: raise ValueError(f"list {vs} can not be chunked in {size}s") return chunks
3e6b0938db4898503c36544a7b87896b0bfad59f
73,865
def _basify_factors(factors): """Convert `(f_i, k)` factors to Basic expressions. """ if type(factors) is tuple: return factors[0], [ (g.as_basic(), k) for g, k in factors[1] ] else: return [ (g.as_basic(), k) for g, k in factors ]
40a4090bfc7fb72794f888f7f39ea7e64e5f99d2
73,868
import logging def process_response(response, records): """ Examines the response from PutRecords, returning any records that couldn't be sent. """ if response['FailedRecordCount'] == 0: return [] result = [] dropped_record_count = 0 for ii in range(len(response['Records'])): entry = response['Records'][ii] errorCode = entry.get('ErrorCode') if errorCode == 'ProvisionedThroughputExceededException': result.append(records[ii]) elif errorCode: dropped_record_count += 1 if dropped_record_count > 0: logging.warn(f'dropped {dropped_record_count} records due to Kinesis internal errors') if len(result) > 0: logging.info(f'requeueing {len(result)} records due to throughput-exceeded') return result
45e19dccbb6d77c5fe8b5ff6849bab3801da0ac1
73,869
from datetime import datetime def recast_timestamp(ms: int): """Recast millisecond epoch offsets to DateTime""" try: return datetime.fromtimestamp(ms / 1000.0) except TypeError: return None
6d89074ad13e7eb0e96949b8463015fda1124e45
73,874
def apply_mask(values, mask): """ Convenience method to filter out values from a list according to some binary masking array. @param values: The list that should be filtered. @param mask: The masking array, must have the same length as the `values` array. @return: A list that contains only the selected values. """ return [value for value, is_set in zip(values, mask) if is_set == 1]
4651b627b24730392dae98bc0502672a551a101c
73,878
def imgstr(imgloc): """html img tag""" istr = ('<div style="display: block; text-align: left;">' '<a href="{0}"><img src="{0}" border="0" height="200"></a></div>') return istr.format(imgloc)
1d61f89286d7a558d822a6ef11ab095592eee6a2
73,879
import random def k_fold_cross_validation(k: int, num_data: int, shuffle: bool = True) -> tuple: """ Splits a number of training data into k folds, which can be used for k-fold-cross-validation. Parameters ---------- k: number of folds for cross validation. num_data: Total amount of data values. shuffle: Boolean variable, which indicates whether to shuffle the data or not. Returns ------- split: tuple, in which lists of indices according to the splits are stored. """ assert num_data >= k, "Total amount of data needs to be larger or equal to the number of folds!" # create indices, corresponding to the data points, and shuffle them, if required indices = list(range(num_data)) if shuffle: random.shuffle(indices) # compute the sizes of the folds and the remaining number of data fold_size = int(num_data / k) remaining_data = num_data % k # compute the splits fold_list = [] for i in range(k): fold_list.append(indices[i*fold_size:(i+1)*fold_size]) # append the remaining data points to the folds for i in range(remaining_data): fold_list[i].append(indices[k*fold_size+i]) split = tuple(fold_list) return split
022028007bc553dcd383147ff59c0a2883b33783
73,888
def get_bpath(f): """ Return a string that corresponds to the base path of the data. NB: For openPMD 1.0.0, the basePath is always of the form '/data/%T' where %T is replaced by the actual iteration which is present in the file. Parameters: ----------- f: am h5py.File object """ iteration = list(f['/data'].keys())[0] return('/data/%s' % iteration)
5ea70083d6b7294e109fa0483e8f332fd555d387
73,894
def get_permission_codes(permission_resources): """ Generate a list of resource code that the user can access to :param permission_resources: Resources that user can access to :return: a list of resource code that the user can access to """ permission_codes = [] for resource in permission_resources: permission_codes.append(resource.code) parent = resource.parent while parent: permission_codes.append(parent.code) parent = parent.parent return set(permission_codes)
ec33d2ec28d0ffa02e3b92a29fa8b3b0fd5203ac
73,895
def parse_ratings(ushpa_data): """ Parse ushpa data from bapa.services.ushpa.get_pilot_data to return a string such as 'P2, H3'. """ rating_dict = { 'BEGINNER': '1', 'NOVICE': '2', 'INTERMEDIATE': '3', 'ADVANCED': '4', 'MASTER': '5' } pg = ushpa_data.get('pg_pilot_rating') hg = ushpa_data.get('hg_pilot_rating') ratings = '' if pg: ratings += 'P-%s' % rating_dict[pg] if hg: ratings += ' H-%s' % rating_dict[hg] if ratings: return ratings.strip() return
74bfe8ca352f22fdead8119cb815a26c929ae47d
73,897
def smooth(x, y): """ Smooth a curve """ xs = x[:] ys = y[:] d = 0 for i in range(0, len(ys)): num = min(len(ys), i+d+1) - max(0, i-d) total = sum(ys[max(0, i-d):min(len(ys), i+d+1)]) ys[i] = total/float(num) return xs, ys
0e0b9b030413bb14a930ac86b55d5a3949826df6
73,898
def flatten_dict(nested: dict) -> dict: """Take a nested dictionary and flatten it. For example: {'a': {'b': 'c'}} will be flattened to {'a_b': c} Args: nested: a dictionary to be flattened Returns: Dict. flattened version of the original dictionary """ ans = {} for key, val in nested.items(): # if val is a dict, unflatten val, recursively if isinstance(val, dict): flattened = flatten_dict(val) for subkey, subval in flattened.items(): flattened_key = f"{key}_{subkey}" ans[flattened_key] = subval else: ans[key] = val return ans
1c3e37ed3aad9838c20a02e7757e3fe47d4e4513
73,899
def ns(state, *namespaces): """Set command namespaces to be included By default only the root namespace is enabled. The 'ns' command enables commands present in specified namespaces. This works by setting "active_namespaces" state variable. The "root" namespace is appended to the active_namespaces if not already present. When command is executed, all the active namespaces are searched one by one until the command is found. Note that order of active_namespaces is significant. """ namespaces = list(namespaces) if "root" not in namespaces: namespaces.append("root") state.vars["active_namespaces"] = namespaces return state
711f74b2555b398f60336391bd0d52165131a4b6
73,900
import typing def reset_database() -> typing.List[str]: """ Returns a statement that resets the database, i.e., deletes everything. Returns ------- out: str Neo4j statement """ return ['MATCH (n) DETACH DELETE n', 'CALL apoc.schema.assert({},{},true)']
617e9ddbf0e658136fea6b64c12c032d1447e1d3
73,901
def rebin(image, rebinned_shape, factor): """ Parameters ---------- image : 2D np.array the larger PSF grid rebinned_shape : iterable with size 2 shape of the image after rebinning factor : int smoothing factor Returns ------- 2D np.array the rebinned image with shape `rebinned_shape` """ x_dim, y_dim = rebinned_shape # shape after coarse binning x_kernel_dim, y_kernel_dim = image.shape[0]//x_dim, image.shape[1]//y_dim # shape of each kernel, the pixels inside of which will be averaged (should be equal to [factor, factor]) new_shape = x_dim, x_kernel_dim, y_dim, y_kernel_dim rebinned_image = image.reshape(new_shape).mean(axis=-1).mean(axis=1)*factor**2 # average across the dimensions of each kernel return rebinned_image
73464543d11b09ce6af2b1f60b7d735349002f77
73,902
def smart_truncate_l(content, begin_index=0, prefix='') -> str: """ truncate a text from the left, splitting at word boundaries. this means that the start of the text may be removed. :param content: the content to truncate :param begin_index: the string index to begin at. If the character at the index is not a whitespace, this function will seek the next whitespace and split there :param prefix: an optional text to prepend to the truncated text (will only be added, if begin_index > 0) :return: the truncated text """ if begin_index <= 0: return content else: splt = content[begin_index:].split(' ', 1) return prefix + (splt[1] if len(splt) > 1 else content[begin_index:])
4d8691d6ee3a7b5157568d0c64d376740c0fddda
73,906
def parse_lookup_table(lookup_table_file): """Parses a lookup table to determine regions. This allows the hemisphere splitting to adapt with updated lookup tables. :param lookup_table_file: :return: """ labels_dict = dict() with open(lookup_table_file, "r") as lookup_table: for line in lookup_table: # parse line for label code row = line.split(" ") for i in range(row.count("")): row.remove("") code = row[0] # continue if the code is a number if code.isalnum(): name = row[1] # determine hemisphere if "Left" in name or "lh" in name: hemisphere = "lh" elif "Right" in name or "rh" in name: hemisphere = "rh" else: hemisphere = "N/A" # determine location # set location to None. Then update it depending on the name. location = None if "wm" in name: location = "wm" elif "ctx" in name or "gyrus" in name: location = "gm" elif "CC" in name: location = "cc" elif "Ventricle" in name: location = "ventricle" cerebellum_names = [ "Cbm", "Cerebellum", "Cerebellum", "Cerebellar", "4th-Ventricle", "Brain-Stem", "VentralDC", ] subcortical_names = [ "Thalamus", "Caudate", "Putamen", "Pallidum", "Hippocampus", "Amygdala", "Accumbens", "Inf-Lat-Vent", ] for designated_name, list_of_locations in [ ("cerebellum", cerebellum_names), ("subcortical", subcortical_names), ]: for location_name in list_of_locations: if location_name in name: location = designated_name if not location: location = "UNKNOWN" labels_dict[code] = dict( name=name, hemisphere=hemisphere, location=location ) return labels_dict
6960dddbe5e76120d2c18f5ebf951ea59e0d1a93
73,908
import torch def seq_mask_from_lens(lengths, max_len=None): """ Creates a boolean mask from sequence lengths. If the `lengths` is of shape (...), the `mask` is of shape (..., max_len). The last dimension is of shape (max_len) and consisting of consecutive `True`s and `False`s. The number of `True`s is decided by the number in the `lengths`. `True` means that the corresponding position is not padding token, and `False` otherwise. lengths: tensor containing the lengths of sequences max_len: the max length of all the sequences """ if max_len is None: max_len = lengths.max().item() mask = torch.arange(0, max_len, dtype=torch.long).type_as(lengths) mask = mask.unsqueeze(0) mask = mask.repeat(*lengths.size(), 1) mask = mask.lt(lengths.unsqueeze(-1)) return mask
1f606b30d95ed6255249b63dc97a1ac2c2bc4346
73,909
from typing import Union def next_page(page: str) -> Union[None, str]: """ Given the contents of a Letterboxd page, returns the relative path to the next page to parse. It handles the pagination of any type of page, from followers, to following, to movies watched, etc. Returns None if this is the last page already and there isn't another one to parse. """ key_page_next = "\"next\" href=\"" start = page.rfind("paginate-nextprev") start = page.find(key_page_next, start) if start is -1: return None start += len(key_page_next) end_idx = page.find("\"", start) return page[start + 1:end_idx]
f0d041548a91553a512907f951813912a50f6182
73,915
def merge(*dicts): """Returns a dict that consists of the rest of the dicts merged with the first. If a key occurs in more than one map, the value from the latter (left-to-right) will be the value in the result.""" d = dict() for _dict in dicts: d.update(_dict) return d
9a624f0b440b1bf0281918abaa7988968495a39d
73,917
def get_human_readable_resolution(ds): """Get human readable resolution string Returns a human readable resolution string. I.e "1 km", "500 m" or "0.1 deg". Requires the resolution to be available by 'rioxarray' Parameters ---------- ds : xr.DataSet The dataset to read the resolution from. Returns ------- str The string with the human readable resolution """ res = ds.rio.resolution()[0] if ds.rio.crs.is_geographic: units = "deg" else: units = ds.rio.crs.linear_units[0] if res >= 1000: res = res/1000 units = f"k{units}" if int(res) == res: res = int(res) return f"{res} {units}"
c12ac11f13d347a35e180a1a6081cdf3f7a09a9a
73,919
def _normalize_scale(t, a=0, b=1): """Perfoms an afine translation to normalize an interval. Args: t (numpy.ndarray): Array of dim 1 or 2 with at least 2 values. a (float): Starting point of the new interval. Defaults 0. b (float): Stopping point of the new interval. Defaults 1. Returns: (numpy.ndarray): Array with the transformed interval. """ t = t.T # Broadcast to normalize multiple arrays t1 = (t - t[0]).astype(float) # Translation to [0, t[-1] - t[0]] t1 *= (b - a) / (t[-1] - t[0]) # Scale to [0, b-a] t1 += a # Translation to [a, b] t1[0] = a # Fix possible round errors t1[-1] = b return t1.T
5330539ba857b5d6c7759ae99a63b29c817b216e
73,923
def colourful_text(text, color): """add color to text Args: text (str): [description] color (str): red, green, yellow, blue, black, none Returns: str: coloured text Examples: >>> s = String() >>> s.colourful_text("比如Convolutional Neural Network,CNN对应中文是卷积神经网络。", "red") '\x1b[31m比如Convolutional Neural Network,CNN对应中文是卷积神经网络。\x1b[0m' """ colourful = { "red": u"\033[1;31;1m%s\033[0m", "green": u"\033[1;32;1m%s\033[0m", "yellow": u"\033[1;33;1m%s\033[0m", "blue": u"\033[1;34;1m%s\033[0m", "black": u"\033[1;30;1m%s\033[0m", } return colourful[color] % text if color != "none" else text
52d13e28f728b9a51fb587e9fc0e05ffaafa3de6
73,925
def max_seq_len() -> int: """Mock maximum sequence length.""" return 8
1f58c6cab80e750af42a9b3fccf80cb73f6b49c7
73,931
def get_symbol_name(symbol): """Returns __name__ attribute or empty string if not available.""" if hasattr(symbol, "__name__"): return symbol.__name__ else: return ""
a2ffae1dba71ab83b61ba933fde36e6448a47c68
73,932
def date2String(date_time): """This function is used to take a datetime object and convert it to a string representing the month, day, and time in 12 hour form""" day = str(date_time.strftime("%b")) + " " + str(date_time.day) time = date_time.strftime("%I:%M %p") displayDate = day + ", " + time return displayDate
c5ce14d7bc2603d1068ac4309035b6cf36d660b0
73,940
def is_array_field(test_dataframe, test_field): """Tests if the column test_field in test_dataframe is an array field :param test_dataframe: dataframe to test :param test_field: column name to test :return: True or False """ if test_dataframe.empty: raise ValueError("No data saved from dataset - DataFrame is empty") test_value = test_dataframe[test_field][0] return (hasattr(test_value, "__len__") and not isinstance(test_value, (str, bytes)))
285416097ec68c5f4d4231d0d65bb830fd35ccc0
73,943
def mirror_lines(string): """Given a multiline string, return its reflection along a vertical axis. Can be useful for the visualization of text version of trees.""" return '\n'.join(line[::-1] for line in string.split('\n'))
e02ea859e44a7d7d779d26a177710bc1bcc201af
73,948
def raster_preparation(purged_spike_train, event_start_frames, camera_framerate=120., window_size=10): """ Parameters ---------- purged_spike_train : np.ndarray The spike train without spikes that precede or succeed tracking, relative to tracking start. event_start_frames : np.ndarray Every frame ON (1) start in the session. camera_framerate : np.float64 The sampling frequency of the tracking system; defaults to 120. window_size : int The unilateral window size; defaults to 10 (seconds). ---------- Returns ---------- raster_list : list List of raster events (np.ndarrays) for that spike train. ---------- """ raster_list = [] for event in event_start_frames: window_start_seconds = (event / camera_framerate) - window_size window_centered_spikes = purged_spike_train[(purged_spike_train >= window_start_seconds) & (purged_spike_train < window_start_seconds + (window_size * 2))] - window_start_seconds raster_list.append(window_centered_spikes[window_centered_spikes > 0]) return raster_list
3dda58b7cd95a399623ef7572f228adef1762634
73,949
def _name(ii): """Use this to make the model name for source number `ii`.""" return 'gauss2d.source_{0:02d}'.format(ii)
90aab1b92d592609c5a4325f69d05f1121f844f6
73,953
import errno def is_out_of_memory_error(exception): """Returns true iff exception is an instance of OSError and error code represents an out of memory error.""" return isinstance(exception, OSError) and exception.errno == errno.ENOMEM
d840b856a85d6a5ba669f7f9ec239b83e63f782d
73,960
def GetBuildLogPathInGCS(logs_folder, build_id): """Gets a full Cloud-Storage path to a log file. This is a simple convenience function that mirrors the naming convention that the Blueprints Controller API uses for log files. Args: logs_folder: string, the full Cloud Storage path to the folder containing the log file, e.g. 'gs://my-bucket/logs'. build_id: string, a Cloud Build ID, e.g. '3a14eb82-7717-4160-b6f7-49c986ca449e'. Returns: A string representing the full Cloud Storage path to a specific log file. """ return '{0}/log-{1}.txt'.format(logs_folder, build_id)
e866ee3f58d79ab82e089fcdc5170b16bfd9d43a
73,962
def convert_force(voltage): """ Formula used to convert sensor data voltage reading into Newtons as per predetermined sensor calibration curve Calibration curve - Oct 24 2018 grams N voltage 0 0.000 298.7 100 0.981 334.2 300 2.943 405.9 500 4.905 477.5 800 7.848 585.1 1000 9.810 656.5 1200 11.77 728.5 R^2 = 1 y = 0.0274x - 8.175 Calibration curve - Feb 19 2019 grams N voltage 0 0.000 305.4 100 0.981 341.2 300 2.943 413.0 500 4.905 484.2 800 7.848 592.6 1000 9.810 664.6 1200 11.77 735.6 R^2 = 1 y = 0.02733x - 8.3447 Shift of approximately 0.17 N (17.3 g) """ return round(0.02733 * int(voltage) - 8.3447, 6)
e3a9a12b24e42496454c6e5fcb6704f896638ed7
73,963
def is_a_power_of_2(x: int) -> bool: """Check if an integer is a power of two. Args: x (int): Number to check Returns: bool: True if the number is a power of two """ # https://stackoverflow.com/questions/57025836/how-to-check-if-a-given-number-is-a-power-of-two return x > 0 and (x & (x - 1)) == 0
3a5ca905740945628832c5d428fcf2d9e699bb65
73,964
def create_export(service, matter, request_body): """ Creates an export in the given matter, with the given request_body (which is the actual JSON for the request). """ return service.matters().exports().create(matterId=matter, body=request_body).execute()
ddc6abe6452f41ae86390d98220a697c82c58dcb
73,966
import math def poisson_prob(n, _lambda): """The probability of k occurrences of a random variable having poisson distribution with expected value mu :param n: number of occurrences :type n: int :param _lambda: expected value :type _lambda: int :return: probability of k :rtype: float """ return math.exp(-_lambda) * pow(_lambda, n) / math.factorial(n)
378e327a1d3524bccb900fc61585514ebc999d61
73,975
import hashlib def read_hash_local(weights): """Reads local SHA256 hash from weights. Parameters ---------- weights : str or pathlib.Path Path of the file containing weights. Returns ------- local_hash : str or None SHA256 hash of weights file. Notes ----- Returns None if file is not found. """ BUFFER_SIZE = 65536 sha256 = hashlib.sha256() try: with open(weights, 'rb') as file_weights: while True: data = file_weights.read(BUFFER_SIZE) if not data: break sha256.update(data) local_hash = sha256.hexdigest() except FileNotFoundError: local_hash = None return local_hash
c3e2ef7e2b747b73c7d0aadd5a3883556870893f
73,978
def is_even(x: int) -> bool: """Checks if x is an even number""" return x/2 == x // 2
b08f8c62c22481d1ae0b148bc07b63bbba2c1ae7
73,980
from sympy.polys.domains import QQ def dup_content(f, K): """ Compute the GCD of coefficients of ``f`` in ``K[x]``. Examples ======== >>> from sympy.polys import ring, ZZ, QQ >>> R, x = ring("x", ZZ) >>> f = 6*x**2 + 8*x + 12 >>> R.dup_content(f) 2 >>> R, x = ring("x", QQ) >>> f = 6*x**2 + 8*x + 12 >>> R.dup_content(f) 2 """ if not f: return K.zero cont = K.zero if K == QQ: for c in f: cont = K.gcd(cont, c) else: for c in f: cont = K.gcd(cont, c) if K.is_one(cont): break return cont
5380f740421dab0ddf5d5892d090c252447725b5
73,982
def slashappend(s): """Append "/" to 's' if it doesn't aleady end in "/".""" if s and not s.endswith('/'): return s + '/' else: return s
12ac5cde7b2a5ddf18be25e34602b047dc14a633
73,985
import torch def get_bert_sentence_embeddings(encoded_layers): """Obtain sentence embeddings by averaging all embeddings in the second last layer for a sentence.""" sent_emb = torch.mean(encoded_layers[-2], 1) return sent_emb[0]
5ca57a542b3336192ec38a58c040a4c4727e137b
73,992
def avg_mutation_rate(alignment :list): """ Counts number of POLYMORPHIC sites on an alignment. -> no distinction on whether mutation carried by 1 or several sequences Used to estimate the age of the most recent common ancester. Parameters: alignment (list): !should be a compatible segment Outputs: float: #polymorphisms / len(aln) x #seq """ count = 0 for bases in alignment: if len(set(bases)) > 1: count += 1 return count / (len(alignment) * len(alignment[0]))
e1586ebab8e2fb2c4ef72dec21fc922d24dd2e97
73,996
def normalize(data, reference): """ Normalize data according to a reference data's mean and standard deviation """ return((data-reference.mean())/reference.std())
455f6aa9b29a18341a68d82883767a49799bffe5
73,998
def is_same(text0: str, text1: str, ignored: list): """Compare the content of two text. If there are different words in text0 and text1 and the word in text0 does not contain ignored substring, return False. Else return True.""" def should_ignore(word): for w in ignored: if w in word: return True return False words0, words1 = text0.split(), text1.split() if len(words0) != len(words1): return False for word0, word1 in zip(words0, words1): if not should_ignore(word0) and word0 != word1: return False return True
870453a917f8e3a1eff3918f5c69662fde6b65d1
73,999
def find_pudc_b(db): """ Find PUDC_B pin func in grid, and return the tile and site prefix. The PUDC_B pin is a special 7-series pin that controls unused pin pullup. If the PUDC_B is unused, it is configured as an input with a PULLUP. """ grid = db.grid() pudc_b_tile_site = None for tile in grid.tiles(): gridinfo = grid.gridinfo_at_tilename(tile) for site, pin_function in gridinfo.pin_functions.items(): if 'PUDC_B' in pin_function: assert pudc_b_tile_site == None, ( pudc_b_tile_site, (tile, site)) iob_y = int(site[-1]) % 2 pudc_b_tile_site = (tile, 'IOB_Y{}'.format(iob_y)) return pudc_b_tile_site
c7b528ec9f5fbe636663e78ea5bb671e3c8fc36e
74,000
import re def clean_name(naam): """ Clean the name of a company to get a better match with the url Parameters ---------- naam: str Original name of the company Returns ------- str: Clean name """ # de naam altijd in kleine letters naam_small = naam.lower() # alles wat er uit zit als B.V. N.V., etc wordt verwijderd naam_small = re.sub("\s(\w\.)+[\s]*", "", naam_small) # alles wat tussen haakjes staat + wat er nog achter komt verwijderen naam_small = re.sub("\(.*\).*$", "", naam_small) # alle & tekens verwijderen naam_small = re.sub("[&\"]", "", naam_small) # alle spaties verwijderen naam_small = re.sub("\s+", "", naam_small) return naam_small
c76026f6d7f2bf205974cafc6c4e17a5bb6552e2
74,001
import re def extract_result_types(comment): """Extracts a list of result types from the given comment. We allow annotations in the comment of the matcher to specify what nodes a matcher can match on. Those comments have the form: Usable as: Any Matcher | (Matcher<T1>[, Matcher<t2>[, ...]]) Returns ['*'] in case of 'Any Matcher', or ['T1', 'T2', ...]. Returns the empty list if no 'Usable as' specification could be parsed. """ result_types = [] m = re.search(r'Usable as: Any Matcher[\s\n]*$', comment, re.S) if m: return ['*'] while True: m = re.match(r'^(.*)Matcher<([^>]+)>\s*,?[\s\n]*$', comment, re.S) if not m: if re.search(r'Usable as:\s*$', comment): return result_types else: return None result_types += [m.group(2)] comment = m.group(1)
beebb985abedef560868b8cdd9d84a1c423eaa8d
74,008
def tra_number_atoms(snapshots): """ Get atom number, time and iteration from multiple snapshots. Args: snapshots (list[:class:`.Snap`]): snapshots the atomic information Returns: (tuple): tuple containing: - list[int]: number of atoms in each snapshot - list[float]: time in simulation of the snapshots - list[int]: iteration in simulation of the snapshots """ atoms = [] times = [] iterations = [] # loop through snapshots and save information to list for i in range(len(snapshots)): atoms.append(len(snapshots[i].atoms)) times.append(snapshots[i].time) iterations.append(snapshots[i].iter) return atoms, times, iterations
10f2600c354bbf0126e7a5127d4ac348a3190b82
74,012
import hashlib def generateCbsdReferenceId(fcc_id, serial_number): """creates encoded cbsd_reference_id using sha1 with fcc_id and serial number of cbsd. """ return str(fcc_id + '/' + str(hashlib.sha1(serial_number).hexdigest())).encode('utf-8')
aa82c669d1e73e09fa878fab7d73198158e4e72e
74,014
def make_generic_usage_message(doc): """Construct generic usage error :param doc: Usage documentation for program :type doc: str :returns: Generic usage error :rtype: str """ return 'Unknown option\n{}'.format(doc)
dfa60740a3504440ba6a58f96a464b76b4fd9f95
74,015
def _getAction(subAssignments): """ Given a group of assignments with same odID, find if this group is stationary/vertical/move. Each will be treated differently in cesium and folium. Parameters ---------- subAssignments: :ref:`Assignments` An assignment dataframe with the same odID Return ------ string An enumerate string, out put can be 'stationary'/'vertical'/'move' """ # if it is static or vertical, there should (must) be one row for correspondence `odID` if (len(subAssignments) == 1): if (subAssignments.iloc[0]['startLat'] == subAssignments.iloc[0]['endLat'] and subAssignments.iloc[0]['startLon'] == subAssignments.iloc[0]['endLon'] and subAssignments.iloc[0]['startAltMeters'] == subAssignments.iloc[0]['endAltMeters']): action = "stationary" elif (subAssignments.iloc[0]['startLat'] == subAssignments.iloc[0]['endLat'] and subAssignments.iloc[0]['startLon'] == subAssignments.iloc[0]['endLon'] and subAssignments.iloc[0]['startAltMeters'] != subAssignments.iloc[0]['endAltMeters']): action = "vertical" else: action = "move" else: action = "move" return action
8d49bbdda4345a282d9cdb9bc026108be6acfc97
74,018
def create_sudoku(filename): """ Creates a list from an input file of integers. Exits the application if wrong format is given as input. A correct inputfile consists of integer values separated by spaces, as instruced per assignment. Parameters: filename (string): Filename that holds the sudoku. Returns: list: A list containing integer values, representing the sudoku. """ sudoku = [] with open(filename) as f: try: [sudoku.append([int(x) for x in line.split()]) for line in f] except ValueError: exit("Wrong input.. expecting integer values.") if not sudoku: exit("Empty file.. baby Yoda dissaproves this input.") return sudoku
03a374fb4e589a12ce871d69237e66ba1b499ce5
74,021
import time import random import math def slow_prime_check(number): """This function is an intentionally slow and simple prime number check.""" # average wait time ~50ms. time.sleep(100 / 1000 * random.random()) for i in range(2, int(math.sqrt(number)+1)): if number % i == 0: return False return True
2440ce2d07eed954ce30b69f2bf29230b19f3506
74,025
def _strip_prefix(path, prefix): """Strip prefix if matched exactly.""" if path.startswith(prefix): path = path[len(prefix):] return path
5b164b07a384dba038fc48ba0381c3be73d05ea5
74,026
def make_reg_05h_byte(channel: int) -> int: """ Make the byte for REG2 (channel) Channel control (CH) 0-83. 84 channels in total 18 default for SX1262, 23 default for SX1268 850.125 + CH *1MHz. Default 868.125MHz(SX1262), 410.125 + CH *1MHz. Default 433.125MHz(SX1268) :param channel: The channel. :return: The channel / value for REG2. """ if 0 <= channel <= 83: return channel else: raise RuntimeError( f"Invalid channel, channel must be between 0-83, but was {channel}." )
b03fe31af5c3708c069a8eeb62db970f2af7667f
74,034
def gemerate_database_info(inputs, targets, allow_unfound=False): """ Construct db info from input nodes and target nodes. :param inputs: list of input nodes :param targets: list of target nodes :param allow_unfound: don't check if names are valid Builds a list of the db names for the nodes. If :return: """ db_info = { "inputs": [i.db_name for i in inputs], "targets": [i.db_name for i in targets], } # Allows `None` to pass through. if allow_unfound: return db_info # If none of the names was `None`, return. if not any(name is None for row in db_info.values() for name in row): return db_info # Else, we need to raise an error. missing_inputs = [i for i, idb in zip(inputs, db_info["inputs"]) if idb is None] missing_targets = [i for i, idb in zip(targets, db_info["targets"]) if idb is None] msg = "" if missing_inputs: msg += "Missing inputs: {}\n".format(missing_inputs) if missing_targets: msg += "Missing targets: {}\n".format(missing_targets) raise ValueError("Required quantities for the graph inputs or targets are not mapped to a db name:\n{}".format(msg))
25c0a82a4cd87c56b914057d8ab102ddb3ca5531
74,038
import torch def optimal_symmetric_circulant_precond_column( col_toeplitz: torch.Tensor, ) -> torch.Tensor: """ compute the first column of the circulant matrix closest to the Toeplitz matrix with provided first column in terms of the Froebenius norm """ n = col_toeplitz.shape[-1] b = torch.arange(1, n, device=col_toeplitz.device) / float(n) w = col_toeplitz[..., 1:] * b.flip(dims=(-1,)) col_circ = torch.cat((col_toeplitz[..., :1], w + w.flip(dims=(-1,))), dim=-1) return col_circ
616cc4663207579b5dd130fa817d0def9c133ded
74,040
from typing import Dict def map_values(d: Dict, fn) -> Dict: """ Run `fn` over values of `d` and return new dictionary. """ return {k: fn(v) for k, v in d.items()}
d48d59d5754a3cfa2a8de71635f31808dd1a617c
74,041
import json def json_decode(s): """ Decodes a json string to a dict. """ if not s: return None return json.loads(s)
9549e6a0f6615fcbb8d7f5ec0687ac1bc3626079
74,042
def default_rule(pk): """Set default operator.""" return pk.operators["2.7.1.a"]
15f236737340e714b75173ed2ad323ea434bf385
74,043
import requests def list_owned_scenes(user_id, jwt): """ List all owned scenes Args: user_id: rfclient.owner jwt: rfclient.api.api_token Returns: list of scenes """ def make_request(headers, page, page_size=1000): return requests.get( 'https://{host}/api/scenes'.format(host=rf_host), headers=headers, params={ 'pageSize': page_size, 'page': page, 'owner': user_id } ) headers = {'Authorization': jwt} rf_host = 'app.rasterfoundry.com' page = 0 resp = make_request(headers, page) resp.raise_for_status() js = resp.json() scenes = js['results'] has_next = js['hasNext'] while has_next: page += 1 resp = make_request(headers, page) resp.raise_for_status() js = resp.json() scenes.extend(js['results']) has_next = js['hasNext'] return scenes
dfdbd6fe80b3a9ad6da1e7f7bd9d0df690239245
74,047
def _unique_parnames(names): """ Given a list of parnames, including possible duplicates, returns a new list of parnames with duplicates prepended by one or more underscores to make them unique. This is also case insensitive. """ upper_names = set() unique_names = [] for name in names: name_upper = name.upper() while name_upper in upper_names: name = '_' + name name_upper = '_' + name_upper unique_names.append(name) upper_names.add(name_upper) return unique_names
fd5359b524c56ec2fddb1e002213cb826c608444
74,048
def vector_addition_cpu(size, scalar, in_a, in_b): """VectorAddition CPU implementation.""" return [in_a[i] + in_b[i] + scalar for i in range(size)]
ca5c00887b0f19c60eebcd39d623e654928806d3
74,057
from pathlib import Path def get_rel_path_len(data_root: str, fpath: str) -> int: """Determine the length of a relative path (depth in file system tree). Args: data_root fpath Returns: rel_path_len: integer """ data_root_len = len(Path(data_root).parts) path_len = len(Path(fpath).parts) rel_path_len = path_len - data_root_len return rel_path_len
0f62fecd1993f8938b0c0305be7799185ecf6c55
74,058
def get_min(df): """Get minimum voltage of ECG recording Args: df (DataFrame): DataFrame with ECG data Returns: float: minimum voltage found in ECG recording """ min_val = df['voltage'].min() return min_val
ea453111d2d06ac640190b8d17653668784de5a6
74,060
def utf8_encoding(t): """UTF-8 Encoding. Note that the exa values are shown with a '%'. >>> utf8_encoding("A") 'A' >>> utf8_encoding("Año") 'A%C3%B1o' """ return "".join("%%%X" % ord(x) if ord(x) > 127 else x for x in t)
274a5515c9239ef909ce0f112aa7902a0a5384d6
74,064
import csv def extract_scan_params_csv(scan_params_csv): """ Function to extract the site-based scan parameters from a csv file and return a dictionary of their values Parameters ---------- scan_params_csv : string filepath to the scan parameters csv file Returns ------- site_dict : dictionary a dictionary where site names are the keys and the scan parameters for that site are the values stored as a dictionary """ # Import packages # Init variables csv_open = open(scan_params_csv, 'r') site_dict = {} # Init csv dictionary reader reader = csv.DictReader(csv_open) placeholders = ['None', 'NONE', 'none', 'All', 'ALL', 'all', '', ' '] keys = {"TR (seconds)": "TR", "TE (seconds)": "TE", "Reference (slice no)": "reference", "Acquisition (pattern)": "acquisition", "FirstTR (start volume index)": "first_TR", "LastTR (final volume index)": "last_TR"} # Iterate through the csv and pull in parameters for dict_row in reader: if dict_row['Site'] in placeholders: site = 'All' else: site = dict_row['Site'] sub = "All" if "Participant" in dict_row.keys(): if dict_row["Participant"] not in placeholders: sub = dict_row["Participant"] ses = 'All' if 'Session' in dict_row.keys(): if dict_row['Session'] not in placeholders: ses = dict_row['Session'] if ses != 'All': # for session-specific scan parameters if site not in site_dict.keys(): site_dict[site] = {} if sub not in site_dict[site].keys(): site_dict[site][sub] = {} site_dict[site][sub][ses] = {keys[key]: val for key, val in dict_row.items() if key != 'Site' and key != 'Participant' and key != 'Session' and key != 'Series'} # Assumes all other fields are formatted properly, but TR might # not be #site_dict[site][sub][ses]['tr'] = \ # site_dict[site][sub][ses].pop('tr (seconds)') elif sub != "All": # participant-specific scan parameters if site not in site_dict.keys(): site_dict[site] = {} if sub not in site_dict[site].keys(): site_dict[site][sub] = {} site_dict[site][sub][ses] = {keys[key]: val for key, val in dict_row.items() if key != 'Site' and key != 'Participant' and key != 'Session' and key != 'Series'} # Assumes all other fields are formatted properly, but TR might # not be #site_dict[site][sub][ses]['tr'] = # site_dict[site][sub][ses].pop('tr (seconds)') else: # site-specific scan parameters only if site not in site_dict.keys(): site_dict[site] = {} if sub not in site_dict[site].keys(): site_dict[site][sub] = {} site_dict[site][sub][ses] = {keys[key]: val for key, val in dict_row.items() if key != 'Site' and key != 'Participant' and key != 'Session' and key != 'Series'} # Assumes all other fields are formatted properly, but TR might # not be #site_dict[site][sub][ses]['tr'] = \ # site_dict[site][sub][ses].pop('tr (seconds)') return site_dict
6eca6f69612767d5be60b768190572ef1deb1d32
74,066
def dump_deb822(fields): """ Format the given Debian control fields as text. :param fields: The control fields to dump (a dictionary). :returns: A Unicode string containing the formatted control fields. """ lines = [] for key, value in fields.items(): # Check for multi-line values. if "\n" in value: input_lines = value.splitlines() output_lines = [input_lines.pop(0)] for line in input_lines: if line and not line.isspace(): # Make sure continuation lines are indented. output_lines.append(u" " + line) else: # Encode empty continuation lines as a dot (indented). output_lines.append(u" .") value = u"\n".join(output_lines) lines.append(u"%s: %s\n" % (key, value)) return u"".join(lines)
2673a49e21ec39c65634caec7a2880b24178ee64
74,067
def prfar(self, lab="", option="", phi1="", phi2="", nph1="", theta1="", theta2="", ntheta="", val1="", val2="", val3="", **kwargs): """Prints pressure far fields and far field parameters. APDL Command: PRFAR Parameters ---------- lab Parameters to print: PRES - Acoustic parameters PROT - Acoustic parameters with the y-axis rotated extrusion option Print option, based on the specified print parameter type: phi1, phi2 Starting and ending φ angles (degrees) in the spherical coordinate system. Defaults to 0. nphi Number of divisions between the starting and ending φ angles for data computations. Defaults to 0. theta1, theta2 Starting and ending θ angles (degrees) in the spherical coordinate system. Defaults to 0 in 3-D and 90 in 2-D. ntheta Number of divisions between the starting and ending θ angles for data computations. Defaults to 0. val1 Radius of the sphere surface. Used only when Option = SUMC, PHSC, SPLC, SPAC, PSCT, or TSCT. val2 When Option = SPLC or SPAC: Reference rms sound pressure. Defaults to 2x10-5 Pa. val3 When Lab = PRES: Thickness of 2-D model extrusion in the z direction (no default). Notes ----- The PRFAR command prints pressure far fields and far field parameters as determined by the equivalent source principle. Use this command to print pressure and acoustic parameters. See the HFSYM command for the model symmetry and the HFANG command for spatial radiation angles. To retrieve saved equivalent source data, issue the SET,Lstep,Sbstep,,REAL command. Distributed ANSYS Restriction: This command is not supported in Distributed ANSYS. """ command = f"PRFAR,{lab},{option},{phi1},{phi2},{nph1},{theta1},{theta2},{ntheta},{val1},{val2},{val3}" return self.run(command, **kwargs)
553fdc4866cb20ab193e2b7c09a2be04156767b2
74,070
def williams_correction(n, a, G): """Return the Williams corrected G statistic for G goodness of fit test. For discussion read [1]_ pg 698-699. Parameters ---------- n : int Sum of observed frequencies. a : int Number of groups that are being compared. G : float Uncorrected G statistic Notes ----- The equation given in this function is simplified from [1]_ q = 1. + (a**2 - 1)/(6.*n*a - 6.*n) == 1. + (a+1.)/(6.*n) References ---------- .. [1] Sokal and Rohlf. "Biometry: The Principles and Practices of Statistics in Biological Research". ISBN: 978-0716724117 """ q = 1. + (a + 1.) / (6. * n) return G / q
bdc1e0c88608d047989950787b66af0439f2b266
74,072
def convert_value_to_option_tuple(value, helpstr=None): """Convert a value to a tuple of the form expected by `Link.options` Parameters ---------- value : The value we are converting helpstr : str The help string that will be associated to this option. Returns ------- option_info : tuple A 3-tuple with default value, helpstring and type for this particular option. """ if helpstr is None: helpstr = "Unknown" return (value, helpstr, type(value))
96a398859690b288d0e134feed266bb26cd4a038
74,073
def countBits(n): """ count_bits == PEP8 (forced mixedCase by CodeWars) """ return '{:b}'.format(n).count('1')
2b86a72a2f062384f7dd7552e90fa115730ad611
74,081
def parse_flags(raw_flags, single_dash=False): """Return a list of flags. If *single_dash* is False, concatenated flags will be split into individual flags (eg. '-la' -> '-l', '-a'). """ flags = [] for flag in raw_flags: if flag.startswith("--") or single_dash: flags.append(flag) elif flag.startswith("-"): for char in flag[1:]: flags.append("-" + char) return flags
9e70d625f6c5d2ace4c0669b5e32b3e5f27a42b8
74,085
def lower(text: str): """ Converts text to lowercase as part of text preprocessing pipeline. """ return text.lower()
dbcb0f3d459cbda2d02f9075b0d0cbdec64bcd50
74,088
def integer_divisors(n): """Returns a list of all positive integer divisors of n.""" n = abs(n) r = [] for i in range(1, n/2+1): if n % i == 0: r.append(i) r.append(n) return r
cce352b92638a3f5277734920d3cc9d03b8957c4
74,090
def get_namespace(type_or_context): """ Utility function to extract the namespace from a type (@odata.type) or context (@odata.context) :param type_or_context: the type or context value :type type_or_context: str :return: the namespace """ if '#' in type_or_context: type_or_context = type_or_context.rsplit('#', 1)[1] return type_or_context.rsplit('.', 1)[0]
6b84732b23c5e09731927a75b4aeeda8752750b0
74,093
def create_blocks(message, download_link=''): """ Create blocks for the main message, a divider, and context that links to Shipyard. If a download link is provided, creates a button block to immediately start that download. For more information: https://api.slack.com/block-kit/building """ message_section = { "type": "section", "text": { "type": "mrkdwn", "text": message, "verbatim": True } } divider_section = { "type": "divider" } if download_link != '': download_section = { "type": "actions", "elements": [ { "type": "button", "text": { "type": "plain_text", "text": "Download File" }, "value": "file_download", "url": download_link, "style": "primary" } ] } blocks = [message_section, download_section, divider_section] else: blocks = [message_section, divider_section] return blocks
0f1a9bb9a739958fae395500fd025efc848cca34
74,097
def convert_volts_to_amps(adc_volts): """ * f(0.5V) = -20A * f(4.5V) = 20A * f(U) = 10*U - 25 [A] """ return 10 * adc_volts - 25
1da610110b578cc80bd4495600d2e98b9a254029
74,098
import time def baseline_creation_payload(catalog_identifier: int, repo_identifier: int, device_details: dict) -> dict: """ Creates the JSON payload required to create the baseline Args: catalog_identifier: The ID for the target catalog repo_identifier: The ID of the repository associated with the catalog device_details: A dictionary containing the device type IDs and the device type names Returns: A dictionary with the payload required to create a new baseline in OME """ baseline_payload = {'Name': "Dell baseline update" + time.strftime(":%Y:%m:%d-%H:%M:%S"), 'Description': "Baseline update job launched via the OME API", 'CatalogId': catalog_identifier, 'RepositoryId': repo_identifier, 'DowngradeEnabled': True, 'Is64Bit': True, 'Targets': []} for target_id, target_type_dictionary in device_details.items(): baseline_payload['Targets'].append({ "Id": target_id, "Type": target_type_dictionary }) return baseline_payload
61ae9de5e2ffb7824e196f22d8c1f14e2590408b
74,100
def execute_http_call(session, url): """Executes an http call with the given session and url""" return session.get(url)
4f3c9cd6773814e02172a7719e866dd868491146
74,106
def query_doctypes(doctypes): """ES query for specified doctypes Args: doctypes (list) Returns: ES query (JSON) """ return {"query": {"terms": {"doctype": doctypes}}}
d69f689ac762569558bd90ddabbf0d135fcf7bdf
74,108
def window_maker(list_name, filled_list, window_size, slide_size): """Make a bed file of sliding windows.""" for scaffold, start, end in filled_list: width = window_size step = slide_size if width <= end: list_name.append((scaffold, start, width)) else: list_name.append((scaffold, start, end)) while width <= end: start += step width += step if width >= end: list_name.append((scaffold, start, end)) else: list_name.append((scaffold, start, width)) return list_name
02f5f293b2ba49efdd7c31a955776a6df5e19f42
74,110
def _distribution(gtfs, table, column): """Count occurrences of values AND return it as a string. Example return value: '1:5 2:15'""" cur = gtfs.conn.cursor() cur.execute('SELECT {column}, count(*) ' 'FROM {table} GROUP BY {column} ' 'ORDER BY {column}'.format(column=column, table=table)) return ' '.join('%s:%s' % (t, c) for t, c in cur)
cb00d98d2222b763dd501c6b52fef367f622d929
74,113