content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def _print_behind_ahead(behind, ahead): """ Print Behind Ahead """ numcommits = "" if behind: numcommits += "-" + str(behind) if behind and ahead: numcommits += "/" if ahead: numcommits += "+" + str(ahead) return numcommits
816e761a7d31d41b13b32db4eb01e7ee8040087b
698,845
def precprint(prec_type, prec_cap, p): """ String describing the precision mode on a p-adic ring or field. EXAMPLES:: sage: from sage.rings.padics.misc import precprint sage: precprint('capped-rel', 12, 2) 'with capped relative precision 12' sage: precprint('capped-abs', 11, 3) 'with capped absolute precision 11' sage: precprint('floating-point', 1234, 5) 'with floating precision 1234' sage: precprint('fixed-mod', 1, 17) 'of fixed modulus 17^1' """ precD = {'capped-rel':'with capped relative precision %s'%prec_cap, 'capped-abs':'with capped absolute precision %s'%prec_cap, 'floating-point':'with floating precision %s'%prec_cap, 'fixed-mod':'of fixed modulus %s^%s'%(p, prec_cap), 'lattice-cap':'with lattice-cap precision', 'lattice-float':'with lattice-float precision', 'relaxed':'handled with relaxed arithmetics'} return precD[prec_type]
b3eab5f0fd133ead8c413aded650d839a2c818b9
698,846
def zooms_string(z1, z2): """Prints 'zoom N' or 'zooms N-M'.""" if z2 != z1: return "zooms {}-{}".format(min(z1, z2), max(z1, z2)) else: return "zoom {}".format(z1)
2e433472d721767cfc152b75a74f9976ba340f7a
698,848
def collect(group, p, barrier_token): """ Collect partitions from partd, yield dataframes """ return p.get(group)
d3824864defbdec8b1712483b7b1e41ef7941268
698,849
def ret_schema(): """Defines dictionary of schema.""" schema = { ('path to directory', str, 'path'): ('path', str, '/apps/homefs1/.*'), ('dict of stocks', str, 'stocks'): { ('ticker', str, '[A-Z]*', '+'): { ('stock price', str, 'price'): ('price', float), ('company name', str, 'name'): ('name', str) } }, ('magic number', str, 'magic_number'): ('number', int, 42) } return schema
83c66bc000a471ff0787eb98a1a8b4146b628911
698,850
import os def files_and_folders(dir_path="."): """Return a dict containing a sorted tuple of files and a sorted tuple of folders""" f_and_f = os.listdir(dir_path) folders = [f for f in f_and_f if os.path.isdir(os.path.abspath(f))] files = set(f_and_f) - set(folders) return { "files": tuple(sorted(files, key=str.lower)), "folders": tuple(sorted(folders, key=str.lower)), }
3858d603acd208de0e81667fcd8ad79a9ea590e7
698,851
def _imag_2d_func(x, y, func): """Return imag part of a 2d function.""" return func(x, y).imag
b95f64fb2bca54db89c85349ed3ca961d2b47b4c
698,852
def save_config( bkp_config, config_file, for_restart = False ): """ save the configuration to the file object passed as a parameter """ print("bucket =", bkp_config["bucket"], file=config_file) print("dirs = ", ";".join(bkp_config["dirs"]), file=config_file) print("exclude_files = ", bkp_config["exclude_files"], file=config_file) print("exclude_dirs = ",";".join(bkp_config["exclude_dirs"]), file=config_file) print("log_email = ",bkp_config["log_email"], file=config_file) print("error_email = ",bkp_config["error_email"], file=config_file) print("threads = ",bkp_config["threads"], file=config_file) print("ssh_username = ",bkp_config["ssh_username"], file=config_file) print("ssh_password = ",bkp_config["ssh_password"], file=config_file) if for_restart: print("start_time = ",bkp_config["start_time"], file=config_file) print("end_time = ",bkp_config["end_time"], file=config_file) print("end_config = True", file=config_file) return 0
a4894cf29a44200442c6fb32b59eab9aefb5352c
698,853
def index(l, val, default=-1): """ find the index the val in list :param l: index list :param val: value to find index :param default: default value to return that value not in list :return: value index in list """ try: getattr(l, 'index') except: raise TypeError('ipnut data doesn\'t support index') if val not in l: return default else: return l.index(val)
6c953b278cb65451879bbde19dfd4392a034b36f
698,854
from typing import OrderedDict def dict_order(_dict): """Controls the print order of the dictionary """ new_dict = OrderedDict() new_dict['name'] = _dict.get('name') new_dict['type'] = _dict.get('type') new_dict['conditions'] = _dict.get('conditions') new_dict['scopes'] = _dict.get('scopes') new_dict['values'] = _dict.get('values') return new_dict
a4ed107403289093065ce169d549dda88b5dee5c
698,856
def batch_directions(z, y, Q, path_sizes, step_vals, subtract_projection=True): """ This function takes an input batch of z vectors (and corresponding class label vectors y) and applies the directions in Q to the batch of z vectors. :param z: (N, nz) tensor of base random noise to manipulate with directions :param y: (N, D) tensor of class vectors :param Q: (ndirs, nz) matrix of z-space directions :param path_sizes: (ndirs,) tensor indicating how far to travel in each direction :param step_vals: (interp_steps,) tensor controlling the granularity of the interpolation :param subtract_projection: bool, whether or not to "remove" each direction from the sampled z vectors :return: z: (N * ndirs * interp_steps, nz) tensor, y: (N * ndirs * interp_steps) tensor containing all z's and y's needed to create the visualizations """ interp_steps = step_vals.size(0) N, nz = z.size() ndirs = Q.size(0) z = z.view(1, N, 1, nz).repeat(ndirs, 1, interp_steps, 1) # .view(N * ndirs * interp_steps, nz) if subtract_projection: # The projection will be the same across the interp_steps dimension, so we can just pick-out the first step: z_proj = z[:, :, 0, :].view(ndirs * N, nz) Q_proj = Q.repeat_interleave(N, dim=0) projection = (z_proj * Q_proj).sum(dim=1, keepdims=True) / Q_proj.pow(2).sum(dim=1, keepdims=True) * Q_proj z -= projection.view(ndirs, N, 1, nz) path_sizes = path_sizes.view(ndirs, 1, 1, 1) step_vals = step_vals.view(1, 1, interp_steps, 1) Q = Q.view(ndirs, 1, 1, nz) z += step_vals * path_sizes * Q z = z.view(N * ndirs * interp_steps, nz) y = y.repeat_interleave(interp_steps, dim=0).repeat(ndirs, 1) return z, y
4adeab5b8a9ded7b4a10affbb5427435dbd5a599
698,857
def update_tax_nodes(nodes, tax): """ nodes can be a list of strings: taxids or names or a list of tuples with (rank, taxid/name) Return a dictionary mapping nodes and updated nodes (or None) First look for id, if nothing found, lookup by unique name """ updated_nodes = {} for node in nodes: if isinstance(node, tuple): r = node[0] n = node[1] else: r = None n = node # Either returns same node, updated or tax.undefined_node (None) updated_taxid = tax.latest(n) if updated_taxid: # Assign updated or same taxid updated_nodes[node] = updated_taxid else: names = tax.search_name(n, rank=r, exact=True) # Assign taxid if found unique name only if names and len(names) == 1: updated_nodes[node] = names[0] else: updated_nodes[node] = tax.undefined_node return updated_nodes
7d292ddf43e39280e699f76765079799f61dc0dc
698,858
def value_formating(): """Format a value with the underlying format function.""" happy_value = type('HappyFormater', (object,), { '__format__': lambda _, template: template.replace('lol', '(^_^)')})() return "{:lol}".format(happy_value)
a637bace1337d73658eb667bc3eafc866e5cd9ba
698,859
import json def _CanParseJSON(my_json): """Returns True if the input can be parsed as JSON, False otherwise.""" try: json.loads(my_json) except ValueError: return False return True
c8602b9e9544a70102135bd875d19c66664bdefc
698,860
import csv def write_csv(f, items): """ :type items: list[OrderedDict] """ if len(items) == 0: # nothing to write return f fieldnames = items[0].keys() # this works as expected because we use OrderedDicts writer = csv.DictWriter(f, fieldnames) writer.writeheader() for item in items: writer.writerow(item) return f
1cabe109bb42e892cc85b40bc6c6930987c042ee
698,861
import json def get_brawl_cookie(request): """ Get brawl cookie """ return json.loads(request.cookies.get('brawl') or '[]')
e9264476a44bebfa9a43106678d1653ba86e555d
698,862
import torch def label_smoothed_nll_loss( lprobs: torch.Tensor, target: torch.Tensor, epsilon: float, ignore_index=None, reduction="mean", dim=-1 ) -> torch.Tensor: """ Source: https://github.com/pytorch/fairseq/blob/master/fairseq/criterions/label_smoothed_cross_entropy.py :param lprobs: Log-probabilities of predictions (e.g after log_softmax) :param target: :param epsilon: :param ignore_index: :param reduction: :return: """ if target.dim() == lprobs.dim() - 1: target = target.unsqueeze(dim) if ignore_index is not None: pad_mask = target.eq(ignore_index) target = target.masked_fill(pad_mask, 0) nll_loss = -lprobs.gather(dim=dim, index=target) smooth_loss = -lprobs.sum(dim=dim, keepdim=True) # nll_loss.masked_fill_(pad_mask, 0.0) # smooth_loss.masked_fill_(pad_mask, 0.0) nll_loss = nll_loss.masked_fill(pad_mask, 0.0) smooth_loss = smooth_loss.masked_fill(pad_mask, 0.0) else: nll_loss = -lprobs.gather(dim=dim, index=target) smooth_loss = -lprobs.sum(dim=dim, keepdim=True) nll_loss = nll_loss.squeeze(dim) smooth_loss = smooth_loss.squeeze(dim) if reduction == "sum": nll_loss = nll_loss.sum() smooth_loss = smooth_loss.sum() if reduction == "mean": nll_loss = nll_loss.mean() smooth_loss = smooth_loss.mean() eps_i = epsilon / lprobs.size(dim) loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss return loss
5c85297cda97746413c747fe10fae1ecaa16dea9
698,863
def select_latest(engine, ticker, time_x): """ 获取离time_x最近的一个 :param engine: :param ticker: :param time_x: :return: """ sql = 'select price,adj_factor from security_lookup_cn_wave_10 where ticker="{}" and time_x<="{}" ORDER BY time_x desc limit 1'.format( ticker, time_x) rs_set = engine.execute(sql) price = None adj_factor = None for p, f in rs_set: price = p adj_factor = f return price, adj_factor
d33d817afdf58407c9e4ecb66ed9f43e9de6128a
698,864
from typing import Union import json def prettify(data: Union[list, dict]) -> str: """ Return input data structure (list or dict) as a prettified JSON-formatted string. Default is set here to stringify values like datetime values. """ return json.dumps(data, indent=4, sort_keys=True, default=str)
800ef5d3f7a5765bca6fe42fc32e40da5a1cc398
698,865
def power_series(z, cs): """ returns cs[0] + cs[1] * z + cs[2] * z ** 2 + ... + cs[-1] * z ** (len(cs) - 1) """ s = cs[-1] for c in reversed(cs[:-1]): s *= z s += c return s
a9fe54d8a4bc15385f5c1da61eb1696b43a470d4
698,866
def normalize(signal): """Restrict the range of a signal to the closed interval [-1.0, 1.0]. """ normalized_signal = signal / max(signal.max(), signal.min(), key=abs) return normalized_signal
d86fe058302ee133e6318f5c0a3fa24430e7e24c
698,867
from pathlib import Path import os import hashlib def pseudo_hash(path: Path, string: str = "") -> str: """Compute a pseudo hash based on : - The file size - The file last modification date - A given string (Real hash is too long to compute for big file.) """ string = "-".join([str(os.path.getsize(path)), str(os.path.getmtime(path)), string]) return str(hashlib.md5(bytes(string, "utf-8")).hexdigest())
10bdf79dfcb78bead4ec8f9b165496b977ce2e81
698,868
def remove_non_ascii(s): """ Remove non-ascii characters in a file. Needed when support for non-ASCII is not available. Args: s (str): Input string Returns: String with all non-ascii characters removed. """ return "".join(i for i in s if ord(i) < 128)
0a215ffa1841667d7dd7d9c1d9a12bbe84e2cbcd
698,869
def generate_streak_matrix(weeks_data): """ Create the streak matrix 1 if the user committed 0 if the user hasn't committed -1 to store null values :param weeks_data: week-wise contribution data of the user :return: matrix containing the values of the contribution streak """ rows, columns = 7, len(weeks_data) streak_matrix = [[-1 for i in range(columns)] for y in range(rows)] i = 0 for week in weeks_data: days = week['contributionDays'] for day in days: if day['contributionCount'] > 0: streak_matrix[day['weekday']][i] = 1 elif day['contributionCount'] == 0: streak_matrix[day['weekday']][i] = 0 i += 1 return streak_matrix
e6cc9e3c96aebb20ca8af11ae19e7f86aab96d62
698,870
def any_in_seq(search_vals, seq): """Check if any value in search_vals in the sequence seq""" for v in search_vals: if v in seq: return True return False
d314355aef2cba89833394ff6aeacf675daec7e6
698,871
def date2season(dt): """converts datetime to season like 2008/2009""" y = dt.year if dt.month <= 6: return "%s/%s" % (y - 1, y) else: return "%s/%s" % (y, y + 1)
e8653264fb682c0c2e5ceeb0548f513e0af0eb33
698,872
def limit(self, *args, **kwargs): """ Calls :meth:`.Table.limit` on each table in the TableSet. """ return self._proxy('limit', *args, **kwargs)
40b22ac87d582d6e245bb3ed2ba6d60766ec0870
698,874
import json def get_json_file(path): """ Load a JSON file from disk. """ with open(path) as f: data = json.load(f) return data
acb099868c4baeb59ead76bb20018af477720609
698,875
def can_use_slay(user_id): """Return true if the user can use the command "slay" Characters that can slay: - All characters (to allow for fake claiming) """ return True
f8be6bd59c1c8bb8acb12d164c7b452c6ede1308
698,876
def get_emojis(): """Retrieves hundreds of emoji glyphs derived from the UTF-8 character table. Examples: >>> moji = get_emojis()\n >>> moji[1540:1547]\n '🤠 🤡 🤢 🤣' """ emoji_list_1 = [chr(i) for i in range(127744, 127994)] emoji_list_2 = [chr(e) for e in range(128000, 128501)] remove_list_for_list_3 = [ 129394, 129399, 129400, 129401, 129443, 129444, 129451, 129452, 129453, 129483, 129484, ] emoji_list_3 = [ chr(e) for e in range(129293, 129536) if e not in remove_list_for_list_3 ] agg_list = emoji_list_1 + emoji_list_2 + emoji_list_3 one_space_sep_string = " ".join(agg_list) return one_space_sep_string
2ba1401ac5a2b326f56fc29fa91cd638e27c5426
698,877
def Floyd(G): """Returns the length of paths from all nodes to remaining nodes Parameters ---------- G : graph weighted graph Returns ------- result_dict : dict the length of paths from all nodes to remaining nodes Examples -------- Returns the length of paths from all nodes to remaining nodes >>> Floyd(G) """ adj = G.adj.copy() result_dict = {} for i in G: result_dict[i] = {} for i in G: temp_key = adj[i].keys() for j in G: if j in temp_key: result_dict[i][j] = adj[i][j].get("weight", 1) else: result_dict[i][j] = float("inf") if i == j: result_dict[i][i] = 0 for k in G: for i in G: for j in G: temp = result_dict[i][k] + result_dict[k][j] if result_dict[i][j] > temp: result_dict[i][j] = temp return result_dict
9fc9102225bf5273ff5a507b14ef2d8d679b2223
698,879
def format_datetime(session): """Convert date or datetime object into formatted string representation. """ if session.data is not None: date_format = session.field.opts.date_format if date_format == 'iso8601': session.data = session.data.isoformat() else: session.data = session.data.strftime(date_format) return session.data
53a99843e47dde6b82cb48e77fd553bbf65dd646
698,881
def train_step(model, optimizer, loss_fn, conditions, true, out): """One training step Args: model: the feed-forward network optimizer: the optimizer for the network loss_fn: the loss function conditions: the observing conditions used as inputs true: the true galaxy magnitudes used as inputs out: the ground truth output """ optimizer.zero_grad() conditions.requires_grad_(True) true.requires_grad_(True) predout = model(conditions, true).squeeze() loss = loss_fn(predout, out) loss.backward() optimizer.step() return loss.item(), predout.data
8d9ba730582e1d7992bf2bd2f8359f4531392645
698,882
import torch def get_concentrated_mask(class_weights, topk): """ Returns a logical mask indicating the categories with the top k largest probabilities, as well as the catogories corresponding to those with the top k largest probabilities. Parameters ---------- class_weights : torch.Tensor Array of class weights, with each row corresponding to a datapoint, each column corresponding to the probability of the datapoint belonging to that category topk : int the k in top-k Returns ------- mask_topk : torch.Tensor Boolean array, same dimension as class_weights, with entry 1 if the corresponding class weight is in the topk for that observation topk_domain: torch.LongTensor Array specifying the indices of class_weights that correspond to the topk observations """ mask_topk = torch.zeros(class_weights.shape).to(class_weights.device) seq_tensor = torch.LongTensor([i for i in range(class_weights.shape[0])]) if topk > 0: _, topk_domain = torch.topk(class_weights, topk) for i in range(topk): mask_topk[seq_tensor, topk_domain[:, i]] = 1 else: topk_domain = None return mask_topk, topk_domain, seq_tensor
8663c6e4d868eb2100684132ef95c59eee9b3560
698,883
import os import sys import shutil def main(loc=None, excluded=[], silent=False): """Sorts files into categories. Sorts each file (excluding itself), according to its extension, into folders created for each category. New files are moved, existing files are overwritten. A summary is printed can be printed after all files are moved. Args: loc: A path to an existing folder to sort. (default os.getcwd()) excluded: A list of filenames to be ignored. (default [this_filename]) silent: Wether it should print the summary at the end Returns: A dict mapping each category to the files that were moved Raises: OSError: It wasn't possible to create a folder PermissionError: A file couldn't be moved or overwritten. """ for default_exclude in ['desktop.ini']: if default_exclude not in excluded: excluded.append(default_exclude) # Set default parameters if loc is None: loc = os.getcwd() assert os.path.exists(loc) this_filename = os.path.split(sys.argv[0])[-1] if excluded is None: excluded = [this_filename] else: excluded = [this_filename] + excluded assert type(excluded) is list # Initialize variables categories = {'gDesktop Plugins' : {'gg', 'ggc'}, 'Torrents' : {'torrent'}, 'Docs' : {'doc', 'xls', 'ppt', 'mdb', 'pub', 'docx', 'pptx', 'xlsx', 'pdf', 'rtf', 'ppsx', 'csv', 'vcf', 'txt', 'text'}, 'Images' : {'jpg', 'jpeg', 'png', 'gif', 'psd', 'psb', 'ai', 'svg', 'dng', 'webp'}, 'Videos' : {'mpg', 'mp4', 'mkv', 'srt', 'wmv', 'mov', 'avi', 'webm'}, 'Audio' : {'mp3', 'wma', 'flac', 'audio', 'eac3', 'm2ts', 'dts', 'ogg'}, 'Programming' : {'py', 'pyc', 'pyo', 'pyw', 'pl', 'v', 'c', 'dat', 'ecf', 'unitypackage', 'mobileprovision'}, 'Executables' : {'exe', 'msi', 'apk', 'bat', 'jar', 'jnlp', 'swf', 'reg', 'vbox-extpack', 'appx'}, 'Compressed' : {'rar', 'zip', 'iso', 'tgz', 'gz', '7z', 'ova', 'img', 'dmg', 'xz', 'asc'}, 'Other' : {'cfg', 'rpa', 'rpy', 'ini', 'xml', 'duf'}} summary = dict((category, []) for category in categories) filenames = (filename for filename in os.listdir(loc) if os.path.isfile(filename) and filename not in excluded) # Begin sorting for filename in filenames: for category, extensions in categories.items(): for extension in extensions: if filename.lower().endswith('.' + extension): src = loc src_path = os.path.join(src, filename) dst = os.path.join(loc, category) dst_path = os.path.join(dst, filename) # Create category folder if not os.path.exists(dst): try: os.mkdir(dst) except OSError: print ('Failed to create folder:', categories[category]) # Move/copy the files try: if not os.path.exists(dst_path): shutil.move(src_path, dst) else: shutil.copy2(src_path, dst) os.remove(src_path) summary[category].append(filename) except PermissionError: print('PermissionError ' '({}):'.format(os.stat(src_path).st_mode), src_path) break # Print a summary at the end if not silent: for category, filenames in summary.items(): if filenames: print ('{} files moved to {}'.format(len(filenames), category)) for filename in filenames: print (' '*2 + filename) os.system('pause') return summary
8fd380b66844a7ff0a2f0dfb27464246730e3bc1
698,885
import re def get_params(rule): """ Returns params from the url Args: rule (str): the endpoint path (e.g. '/v1/data/<int:id>') Returns: (list): parameters from the endpoint path Examples: >>> rule = '/v1/random_resource/<string:path>/<status_type>' >>> get_params(rule) ['path', 'status_type'] """ # param regexes param_with_colon = r"<.+?:(.+?)>" param_no_colon = r"<(.+?)>" either_param = param_with_colon + r"|" + param_no_colon parameter_matches = re.findall(either_param, rule) return ["".join(match_tuple) for match_tuple in parameter_matches]
05414d950a6a603ff79fa2efff3ff3fef1e375f2
698,887
import json import os def get_bait_name(input_config: str): """Get the bait name from case config Args: input_config: Path to config Returns: bait: string """ with open(input_config) as f: load_config = json.load(f) # Read the config file and return the bait name from the json file bait = os.path.basename(load_config["panel"]["capture_kit"]) return bait
840c9e578f77ef18d3170d8cd5300a1e8055885b
698,888
def reset_var(): """reset vars""" return [], [], True
3d78de5df772f60e0331a95d9ca14f46d1d36071
698,889
def upstream_or_distgit_path( request, upstream_and_remote, distgit_and_remote, ogr_distgit_and_remote ): """ Parametrize the test to upstream, downstream [currently skipped] and ogr distgit """ return { "upstream": upstream_and_remote[0], "distgit": distgit_and_remote[0], "ogr-distgit": ogr_distgit_and_remote[0], }[request.param]
6f94a44e95301398c495dff56f9e470b46b5c737
698,890
def dotProduct(d1, d2): """ @param dict d1: a feature vector represented by a mapping from a feature (string) to a weight (float). @param dict d2: same as d1 @return float: the dot product between d1 and d2 """ if len(d1) < len(d2): return dotProduct(d2, d1) else: return sum(d1.get(f, 0) * v for f, v in d2.items())
65892f62f3c9d1ec13a62427aaafa7831a8ee1e5
698,891
def input_data(): """Input data (as coming from the view layer).""" return { "metadata": {"title": "Test", "type": {"type": "test"}}, }
7a8bb2b82ee4754b2ee8b535c76a23ca5695c237
698,892
def bitvec_number(x): """Return number represented by given bitve""" return sum(x << (32 * i) for i, x in enumerate(x.data[0:x.size]))
ad393d4f30d9fef35f089491a7c59faca3ec6610
698,894
def garfield_empty_mock(url, request) -> str: """ Mock HTTP empty response using HTTMock :param url: str :param request: Request :return: str """ return """ <html> <body></body> </html> """
c2c349e40f315bfc625680fc5b0e92a7a54f5f7c
698,895
def check_order(order): """ Checks the specified drawing order is valid and returns the corresponding tree traversal order. """ if order is None: order = "minlex" traversal_orders = { "minlex": "minlex_postorder", "tree": "postorder", } if order not in traversal_orders: raise ValueError( f"Unknown display order '{order}'. " f"Supported orders are {list(traversal_orders.keys())}" ) return traversal_orders[order]
c8478bbbb59ce25beec4a4196dc183a49ecb62ba
698,896
def get_total_num_women(dataframe, fil): """Counts total women in category Returns: [list] -- total number of women, total number in category """ total = 0 matched = 0 for index, row in dataframe.iterrows(): if fil(row): total += 1 if row["gender"] == "F": matched += 1 return matched
478046f1f7bb62280f36df50b50c537dba2eb063
698,897
def get_cat2id(item_metas, n_entities): """Extracts all categories from item metada and maps them to an id""" categories = set([cat for it_meta in item_metas for cat in it_meta.categories]) return {cate: n_entities + i for i, cate in enumerate(categories)}
38c7895949d3eccf9d8d4fc6c609b036700f93d8
698,898
def add_acids(EP, amino_acids): """ The function check that all alpha amino acids in sequence, else add this monomers. Parameters ---------- EP : dict Variants of peptide sequences. amino_acids : list List of alpha amino acids. Returns ------- EP : dict Corrected variants of peptide sequences. """ EP_cop = EP.copy() for var in EP_cop: for tour in EP[var].copy(): if len(tour) == 0: EP[var].remove(tour) continue for aa in amino_acids.copy(): if aa not in tour: continue amino_acids.remove(aa) if len(amino_acids) != 0: for var in EP: [EP[var].append([aa]) for aa in amino_acids] return EP
45027c8b38c97da4af677e79af0c302539d8b747
698,899
def identify_target_ligand(ligand_residues): """Attempts to guess the target ligand""" # If there is only one target ligand then that must be the target # even if there are multiple instances. That could be the case if # the compound is peptidic for example. if len(ligand_residues) == 1: return list(ligand_residues.keys())[0] # Alternatively, if there are multiple ligands count them and if # one is found to only have one instance use that after printing # a relevant message indeces = [ligand_residues[_] == 1 for _ in ligand_residues] if indeces.count(True) == 1: index = list(ligand_residues.values()).index(1) return list(ligand_residues.keys())[index] else: return None
50e3559b2f42aa1a60ca5270279cc2a37325f3cb
698,900
def Recherche(tree, word): """Recherche le mot word dans l'arbre tree et indique s'il est dans l'arbre.""" return tree.contains(word)
f17d4024b0e5e7542fec0a502f853fdb63a21c95
698,901
def fix_IE_for_attach(request, response): """ This function will prevent Django from serving a Content-Disposition header while expecting the browser to cache it (only when the browser is IE). This leads to IE not allowing the client to download. """ useragent = request.META.get('HTTP_USER_AGENT', '').upper() if 'MSIE' not in useragent and 'CHROMEFRAME' not in useragent: return response offending_headers = ('no-cache', 'no-store') if response.has_header('Content-Disposition'): try: del response['Pragma'] except KeyError: pass if response.has_header('Cache-Control'): cache_control_values = [value.strip() for value in response['Cache-Control'].split(',') if value.strip().lower() not in offending_headers] if not len(cache_control_values): del response['Cache-Control'] else: response['Cache-Control'] = ', '.join(cache_control_values) return response
b8fd12ba12a2f7df918a3e0fa7c64094c0bd016d
698,902
def to_dict(arr): """ Convert a list to a dict with keys drawn from '0', '1', '2', ... Examples -------- >>> to_dict([2, 3, 4]) # doctest: +SKIP {'0': 2, '1': 3, '2': 4} """ return dict(zip(map(str, range(len(arr))), arr))
a51c0cbb477b4569a67fda3088a200194bb8bf67
698,903
def is_power_of_two(a): """Return whether the argument, cast to int, is a power of 2.""" a = int(a) # Bit manipulations. A power of 2 has a bit represetenation like 0...010...0. # For such a number subtracting 1 from it turns it into 0...001...1, so ANDing # a-1 and a should yield 0. return a > 0 and ((a - 1) & a) == 0
d7b0d90df8eb4287a6f56e8256aa6c40b9b46441
698,904
from typing import Callable from functools import reduce def clean(text: str, *cleaners: Callable[[str], str]) -> str: """Cleans the given text using the provided cleaning functions. Arguments: text: The text string to be cleaned. cleaners: The simple cleaner functions to be applied in sequence over the input text. Returns: The clean text. """ return reduce(lambda part, func: func(part), cleaners, text)
67a53637bca0b19b49bd157ccc520d8dc053a12f
698,905
def strip_parens(text): """ strips parenthesis from a string (works with nested parentheses) note that this method will leave the extra spaces behind, but this will not affect tokenization :param text: original string :return: text stripped of parenthetical words """ left_parens = [] right_parens = [] paren_indices = [] # pairs that correspond to the [beginning, end] of each parenthetical expression for index, character in enumerate(text): if character is '(': left_parens.append(index) elif character is ')' and len(left_parens) > 0: right_parens.append(index) if len(right_parens) == len(left_parens): paren_indices.append([left_parens[0], right_parens[-1]]) left_parens = [] right_parens = [] num_right_parens = len(right_parens) if num_right_parens is not 0: paren_indices.append([left_parens[-1 - num_right_parens + 1], right_parens[num_right_parens - 1]]) index = 0 output = "" for [beginning, end] in paren_indices: output += text[index:beginning] index = end + 1 output += text[index:] return output
1976636150bc6705e39ab3929e1214f0b0c09644
698,906
def variable_name_to_title( variable_name, latex_flag=True ): """ Translates a variable name into a title suitable for inclusion in Matplotlib title strings. Variable names are assumed to be lowercased as found in IWP datasets and titles may include LaTeX markers for mathematical typesetting. Unknown variable names are returned as is. Takes 2 arguments: variable_name - Variable name to translate. latex_flag - Optional flag specifying whether LaTeX-encodings should be used in the translation. If specified as False, translations will not use LaTeX. If omitted, defaults to True. Returns 1 value: variable_title - Translated variable title. """ if variable_name == "divh": return "Horizontal Divergence" elif variable_name == "p": return "Density" elif variable_name == "pprime": if latex_flag: return "Density$'$" else: return "Density'" elif variable_name == "u": if latex_flag: return "$velocity_x$" else: return "Velocity - X" elif variable_name == "uprime": if latex_flag: return "$velocity_x'$" else: return "Acceleration - X" elif variable_name == "v": if latex_flag: return "$velocity_y$" else: return "Velocity - Y" elif variable_name == "vprime": if latex_flag: return "$velocity_y'$" else: return "Acceleration - Y" elif variable_name == "w": if latex_flag: return "$velocity_z$" else: return "Velocity - Z" elif variable_name == "wprime": if latex_flag: return "$velocity_z'$" else: return "Acceleration - Z" elif variable_name == "vortx": if latex_flag: return "$vorticity_x$" else: return "Vorticity - X" elif variable_name == "vorty": if latex_flag: return "$vorticity_y$" else: return "Vorticity - Y" elif variable_name == "vortz": if latex_flag: return "$vorticity_z$" else: return "Vorticity - Z" elif variable_name.startswith( "morlet" ): # Morlet wavelets have an angle preference which is encoded as either # "morlet+-angle", "morlet+angle", or "morlet-angle". handle the # plus/minus case as special and let the positive/negative, single # angles fall through like normal text. variable_title = "2D CWT with Morlet" # decorate the base title depending on the format of the rest of the # variable. pm_index = variable_name.find( "+-" ) if pm_index != -1: # # NOTE: we have to filter this as the non-default split parameter # will *not* filter out empty strings... # pieces = list( filter( lambda piece: len( piece ) > 0, variable_name[pm_index+2:].split( "-" ) ) ) # the first piece is the angle. add the remaining pieces the # way we found them. if len( pieces ) == 1: suffix = "" else: suffix = " ({:s})".format( "-".join( pieces[1:] ) ) if latex_flag: # add "+-N degrees" in LaTeX and then append the remaining # components as a suffix. variable_title = "{:s}$\pm{:s}\circ${:s}".format( variable_title, pieces[0], suffix ) else: variable_title = "{:s} +-{:s} degrees{:s}".format( variable_title, pieces[0], suffix ) elif len( variable_name ) > len( "morlet" ): # add the remaining text as a parenthetical. variable_title = "{:s} ({:s})".format( variable_title, variable_name[len( "morlet" ):] ) return variable_title elif variable_name.startswith( "arc" ): return "2D CWT with Arc" elif variable_name.startswith( "halo" ): return "2D CWT with Halo" # we don't have a special title for this variable. use what we have. return variable_name
3f9d56a13f4aacb2ec6e9cf15ad1d2ff1c4288bc
698,907
import builtins import sys def _str_type_to_type_annotations_dict(annotations_dict: dict): """ When getting the annotations, they return as a string representation of the type. However, we need the actual type. This function will take the annotations dict with the string type and turn it to the type. .. note:: If the type hint is the type already (no conversion needed), this won't break anything. :param annotations_dict: The annotations dict to convert. :type annotations_dict: dict :return: The converted dict. :rtype: dict """ fixed_annotations = {} for argument, annotation in annotations_dict.items(): # This won't affect anything if the type is already the type # Also, `isinstance(str, str)` == False if not isinstance(annotation, str): fixed_annotations[argument] = annotation continue try: fixed_annotations[argument] = getattr(builtins, annotation) except AttributeError: fixed_annotations[argument] = getattr(sys.modules[__name__], annotation) return fixed_annotations
346b1f12271d78aa588e5f0649e97ac819fd7351
698,908
import os def get_editor(): """return the editor command or raise Exception: look in the environment for either EDITOR, GIT_EDITOR or SVN_EDITOR if not found, raise error EDITOR needs to be set in the env """ found = os.environ.get('EDITOR', os.environ.get('GIT_EDITOR', os.environ.get('SVN_EDITOR'))) if not found: msg = 'You must set your editor in your environment. Either ' \ '"EDITOR", "GIT_EDITOR" or "SVN_EDITOR" ' 'must be set.' raise Exception(msg) return found
99aaba84f42c176b6f8d8b10479527f4f01931ec
698,909
def fix_filter_query(filter): """ fix filter query from user Args: filter: filter from users Returns: dict """ if filter: try: filter = { _.split("=")[0]: _.split("=")[1] for _ in list( set( str(filter).split("&") ) ) } except Exception: return {} return filter
11dce2c8471205118277bb396ec298de7fb7bf67
698,910
def rectangle_area(length, width): """ Calculates the area of a rectangle. :param length: The length of the rectangle. :param width: The width of the rectangle. :return: The area of the rectangle. """ return length * width
0cbe453fbd4c3c6a061f520d57f303dae55fdc25
698,911
def has_level_five(line): """Check if we need to explode""" count = 0 for c in line: if c == '[': count += 1 if count > 4: return True elif c == ']': count -= 1 return False
6d2e9d4aed708a9d71a9f01f90519f804e1fd3d9
698,912
def threshold_array(arr, threshold=2e-4): """ Thresholds an array, returning a binary array Parameters --------- arr : NumpyArray Contains the data to threshold Returns --------- NumpyArray Returns arr with binary values, depending of the threshold """ return (arr > threshold)
254692de4f82dbf6c3e684b10324cb73c436ff28
698,913
def __binary_search(arr, f, g): """ Perform the binary search in order to find the minimum feasible value of ``c`` inside ``arr``. :param arr: The array on which to perform the binary search. :param f: Function to be applied to ``g`` and ``arr[mid]`` (``check_th7`` or ``feas``). :param g: A NetworkX (Multi)DiGraph representing a synchronous circuit. :return: The minimum clock period and the corresponding retiming function. """ def bs_rec(low, high, prev_mid=None, prev_x=None): if high >= low: mid = (high + low) // 2 x = f(g, arr[mid]) if x is None: return bs_rec(mid+1, high, prev_mid, prev_x) else: return bs_rec(low, mid-1, mid, x) else: return arr[prev_mid], prev_x return bs_rec(0, len(arr)-1)
415528fd46a33d1efe8d736717ac8b041d34c5d7
698,914
def _current_window_for_event(event): """ Return the `Window` for the currently focussed Buffer. """ return event.app.layout.current_window
4b9859c7bf7fc4b072362d2d5b9e896022769587
698,915
import re def clean_xml(xml): """Clean the given XML string of namespace definition, namespace prefixes and syntactical but otherwise meaningless differences. Parameters ---------- xml : str String representation of XML document. Returns ------- str String representation of cleaned XML document. """ # remove xmlns namespace definitions r = re.sub(r'[ ]+xmlns:[^=]+="[^"]+"', '', xml) # remove namespace prefixes in tags r = re.sub(r'<(/?)[^:]+:([^ >]+)([ >])', r'<\1\2\3', r) # remove extra spaces in tags r = re.sub(r'[ ]+/>', '/>', r) # remove extra spaces between tags r = re.sub(r'>[ ]+<', '><', r) return r
3f566975ab512ccc22824c45e7ef04fc861a5c03
698,916
import torch def _make_orthogonal(A): """ Assume that A is a tall matrix. Compute the Q factor s.t. A = QR (A may be complex) and diag(R) is real and non-negative """ X, tau = torch.geqrf(A) Q = torch.linalg.householder_product(X, tau) # The diagonal of X is the diagonal of R (which is always real) so we normalise by its signs Q *= X.diagonal(dim1=-2, dim2=-1).sgn().unsqueeze(-2) return Q
ad2cfaa0c041062c8628bc02faedcfdbc550f4bb
698,917
def weighted_average(gini_or_entropy_left, left_cnt, gini__or_entropy_right, right_cnt): """ calculate weighted average for Gini index or Entropy :param right_cnt: count of total records on the right side of node :param left_cnt: count of total records on left side of node :param gini_or_entropy_left: gini index or Entropy of left side of node :param gini__or_entropy_right: gini index or Entropy of right side of node :return: weighted average of entire node """ # formula used to calculate weighted gini index weighted_avg = ((left_cnt / (left_cnt + right_cnt)) * gini_or_entropy_left) + ( (right_cnt / (left_cnt + right_cnt)) * gini__or_entropy_right) return weighted_avg
7914a0164427de4e9cebf7637dda670694f8df59
698,918
import torch def concat_entities(entities): """ Concat multiple graphs via concatenation of their entities tensors. Parameters ---------- entities: a list of graph tuples. Either [(v,e,c),...] or [(v,e),...] when the graph has no global attribute. Returns v,e,c - concatenated entities tensors, None for c if no global in the graph. ------- """ has_global = len(entities[0]) == 3 and entities[0][2] is not None v = torch.cat([el[0] for el in entities], dim=1) e = {} for k in entities[0][1].keys(): e[k] = torch.cat([el[1][k] for el in entities], dim=1) c = None if has_global: c = torch.cat([el[2] for el in entities], dim=1) return v, e, c
06d0619836d4cb8c977028ee0cdccd9075136c72
698,919
def filter_out_installed_pkgs(event_out_pkgs, installed_pkgs): """Do not install those packages that are already installed.""" return {p for p in event_out_pkgs if (p.name, p.modulestream) not in installed_pkgs}
16fb8f68372fb070869a744b15ddce0ed6591887
698,921
import os def is_using_wayland() -> bool: """Check if we are running on Wayland DE. Returns [bool] -- {True} if probably Wayland """ return "WAYLAND_DISPLAY" in os.environ
2c361ecdada0dfb55101cb0e06290d7033bcfbf1
698,922
def truedicts(all): """ Generates a pair of dictionairies containg all true tail and head completions. :param all: A list of 3-tuples containing all known true triples :return: """ heads, tails = {(p, o) : [] for _, p, o in all}, {(s, p) : [] for s, p, _ in all} for s, p, o in all: heads[p, o].append(s) tails[s, p].append(o) return heads, tails
03c126ff6cdc0cc14be1c64a91e8164e890c2f2d
698,923
import math def RotateXY(x, y, xc=0, yc=0, angle=0, units="DEGREES"): """Rotate an xy cooordinate about a specified origin x,y xy coordinates xc,yc center of rotation angle angle units "DEGREES" (default) or "RADIANS" """ x = x - xc y = y - yc # make angle clockwise (like Rotate_management) angle = angle * -1 if units == "DEGREES": angle = math.radians(angle) xr = (x * math.cos(angle)) - (y * math.sin(angle)) + xc yr = (x * math.sin(angle)) + (y * math.cos(angle)) + yc return xr, yr
68d24bfd5b2cf436b1ea37c0d4124f6cfa357e9f
698,924
def _extract_gpcrdb_residue_html(txt): """ Extracts the relevant lines for all residues from a GPCRdb html entry. Parameters ---------- txt : str Content (html) of the website with the GPCRdb entry. Returns ------- residue_html : list A list in which each item contains the html lines for one residue. """ res_start_line = ' <td class="seqv seqv-sequence">' res_end_line = ' </td>' spl = txt.split('\n') residue_html = [] for lnum, line in enumerate(spl): if line == res_start_line: residue_lines = spl[lnum:lnum+12] # Use fewer lines if the residue is shorter # (i.e. has no GPCRdb number) if residue_lines[-4] == res_end_line: residue_lines = residue_lines[:-3] residue_html.append(residue_lines) return residue_html
4621848f27b9cd24017ed5c49683bfc4b8a180e8
698,925
def seq2set(xs) : """ bijection from sequences to sets """ rs=[] s = -1 for x in xs: sx=x+1 s+=sx rs.append(s) return rs
b90ec731ce35539240c43d4c616889f71ca7c982
698,926
def simple_tag_without_context_parameter(arg): """Expected simple_tag_without_context_parameter __doc__""" return "Expected result"
aad64452d051a587447696c6bd616b04f3f5b23e
698,927
import re def uuid(value: str): """ Validator for Universally unique identifier Example Result: [123e4567-e89b-12d3-a456-426655440000, 6a2f41a3-c54c-fce8-32d2-0324e1c32e22] Detail: https://en.wikipedia.org/wiki/Universally_unique_identifier#Format """ _uuid_pat = r'[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}' # pylint: disable=C0301 return re.findall(_uuid_pat, value)
66f498669b52df2e3ff9ea281fb781cb6774c77c
698,930
from typing import Tuple from typing import Dict def field_annotations(typed_dict) -> Tuple[Dict[str, type], Dict[str, type]]: """Return the required and optional fields in the TypedDict.""" return (typed_dict.__annotations__["required_fields"].__annotations__, typed_dict.__annotations__["optional_fields"].__annotations__)
2e88481a1668cd40caacb8bfc91a50c8746a704e
698,931
import os def get_config_log_dir(): """ Retrieve logging directory, create it if it doesn't exist. """ log_dir = os.path.expanduser(os.path.join(u'~', u'.mssqlscripter')) if (not os.path.exists(log_dir)): os.makedirs(log_dir) return log_dir
dd30794544f19083b74635d2a4c82621f2fd16ba
698,932
import sqlite3 def get_member_data(member_id: str, conn: sqlite3.Connection) -> dict: """ Gets email and phone data from database based on passed member. This function uses placeholders in query so it is injection safe. Look up get_member_data_injection for example where it's NOT injection safe. """ query = "SELECT EMAIL,PHONE FROM MEMBERS WHERE MEMBER_ID=?" cursor = conn.cursor() cursor.execute(query, (member_id,)) rows = cursor.fetchall() return dict(rows)
a4ca505961352e292109e22318ff7a0188ffc454
698,933
def node_short_name(node, **kwargs): """ Returns short name of the given node :param node: str :return: str """ return node
67d1d5ff172544eb2b233a925d90bbd0ea767e83
698,934
def create_notify_payload(host, nt, usn, location=None, al=None, max_age=None, extra_fields=None): """ Create a NOTIFY packet using the given parameters. Returns a bytes object containing a valid NOTIFY request. The NOTIFY request is different between IETF SSDP and UPnP SSDP. In IETF, the 'location' and 'al' fields serve the same purpose, and can be provided together (if so, they should point to the same location) or not at all. In UPnP, the 'location' field MUST be provided, and 'al' is ignored. Sending both 'location' and 'al' is the more widely supported option. It does not, however, mean that all SSDP implementations would accept a packet with both. Therefore the option to send just one of these fields (or none at all) is supported. If in doubt, send both. If your notifications go ignored, opt to not send 'al'. :param host: The address (IP + port) that the NOTIFY will be sent about. This is usually a multicast address. :type host: str :param nt: Notification type. Indicates which device is sending the notification. :type nt: str :param usn: Unique identifier for the service. Usually this will be composed of a UUID or any other universal identifier. :type usn: str :param location: A URL for more information about the service. This parameter is only valid when sending a UPnP SSDP packet, not IETF. :type location: str :param al: Similar to 'location', but only supported on IETF SSDP, not UPnP. :type al: str :param max_age: Amount of time in seconds that the NOTIFY packet should be cached by clients receiving it. In UPnP, this header is required. :type max_age: int :param extra_fields: Extra header fields to send. UPnP SSDP section 1.1.3 allows for extra vendor-specific fields to be sent in the NOTIFY packet. According to the spec, the field names MUST be in the format of `token`.`domain-name`, for example `myheader.philips.com`. SSDPy, however, does not check this. Normally, headers should be in ASCII - but this function does not enforce that. :return: A bytes object containing the generated NOTIFY payload. """ if max_age is not None and not isinstance(max_age, int): raise ValueError("max_age must by of type: int") data = ( "NOTIFY * HTTP/1.1\r\n" "HOST:{}\r\n" "NT:{}\r\n" "NTS:ssdp:alive\r\n" "USN:{}\r\n" ).format(host, nt, usn) if location is not None: data += "LOCATION:{}\r\n".format(location) if al is not None: data += "AL:{}\r\n".format(al) if max_age is not None: data += "Cache-Control:max-age={}\r\n".format(max_age) if extra_fields is not None: for field, value in extra_fields.items(): data += "{}:{}\r\n".format(field, value) data += "\r\n" return data.encode("utf-8")
95b6a7ad37ec96d646116451340ed7ee96495632
698,935
import math def runs(bit_str: str): """Runs Test Determine whether the number of runs of ones and zeros of various lengths is as expected for a random sequence. See section 2.3 in this NIST publication: https://tsapps.nist.gov/publication/get_pdf.cfm?pub_id=906762 """ n = len(bit_str) # pre-test ones = 0.0 for i in range(n): ones += int(bit_str[i]) pi = ones / n tau = 2.0 / math.sqrt(n) if abs(pi - 0.5) >= tau or ones == n: # pre-test failed; do not run this test return 0.0 # Runs test v_n = 1 for i in range(n-1): v_n += 0 if bit_str[i] == bit_str[i+1] else 1 p = math.erfc(abs(v_n - 2 * n * pi * (1 - pi)) / ( 2 * math.sqrt(2 * n) * pi * (1 - pi))) return p
fce124ccf0b3da512c72ae866aca70572a768692
698,936
import numbers import math import cmath def atan(x): """ Uncertain number arctangent function .. note:: In the complex case there are two branch cuts: One extends from :math:`\mathrm{j}` along the imaginary axis to :math:`\mathrm{j}\infty`, continuous from the right. The other extends from :math:`-\mathrm{j}` along the imaginary axis to :math:`-\mathrm{j}\infty`, continuous from the left. """ try: return x._atan() except AttributeError: if isinstance(x,numbers.Real): return math.atan(x) elif isinstance(x,numbers.Complex): return cmath.atan(x) else: raise TypeError( "illegal argument: {!r}".format(x) )
470af67f67a6c5c1aa63fb28b5590c4f699c988c
698,937
import os def cpu_time(reference_cpu_time = 0.0): """ Returns current or elapsed value of CPU time used by the process, including both user and system CPU time of both the process itself and its children. """ return sum(os.times()[:4]) - reference_cpu_time
9aee8973bea50d89e08dbf3d0c77ad4cfc252657
698,938
def get_present_volume(present): """Calculate volume of the box needed for present.""" volume = 1 for side in present: volume *= side return volume
e3df95638a741513307163abd90a68013aa7da3b
698,939
def crop_rasters_for_sr(max_sr_factor, *hmaps): """Crop a list of rasters to a size such that they can be evently divided by ``max_sr_factor``. It assumes that each raster is centered identically. I.e. if one raster has size 256x256 and another 254x254, it assumes that it is a border of size 1 that is removed symmetrically from the first raster to get the location of the second raster. It will crop off the bottom-right of the image to make it an evenly divisible size. Parameters ---------- max_sr_factor : int The maximum amplicfication factor for super-resolution predictions that will be made using these rasters. I.e. 32 means that there will be 32x32 predictions per image. hmaps : list of :class:`numpy.ndarray` The rasters to crop. The final two dimensions must correpsond to i,j of the raster. Returns ------- list of :class:`numpy.ndarray` Cropped versions of ``hmaps`` """ min_width = min([i.shape[-1] for i in hmaps]) reduction = min_width % max_sr_factor out = [] for h in hmaps: crop_width = (h.shape[-1] - min_width) / 2 assert crop_width == int(crop_width) crop_width = int(crop_width) out.append( h[ ..., crop_width : -(reduction + crop_width), crop_width : -(reduction + crop_width), ] ) return out
77b45841cb82bc5475c6343df05c91b04d4e74c8
698,940
def track_score(track_path): """ Scoring the proposal tracklet can b associate 2 st_track possibility. Score = weight_motion * score_motion + weight_appearance * score_appearance :return: """ path_score = 0. for each_node in track_path: path_score += each_node.score return path_score / len(track_path)
8633bdbff8520e94e775d34b64bc448fa3b9366b
698,941
def font_size_norm(x): """the web font size is different from the game client's - try to approximate how it would look without having to know the actual value.""" try: x = int(x) except ValueError: return "100%" ratio = x / 24 return f"{int(ratio * 100)}%"
d70679ec385abe0cb283aa4615853e8b5e1a853e
698,942
def apim_api_operation_get(client, resource_group_name, service_name, api_id, operation_id): """Gets the details of the API Operation specified by its identifier.""" return client.api_operation.get(resource_group_name, service_name, api_id, operation_id)
4101a7aa2b3815856bf852a299698d407af390c0
698,943
def _uses_vulnerable_solc_version(version): """Detect if used compiler version is 0.4.[0|1|2|3|4] Args: version (solc version used) Returns: Bool """ if version in ["0.4.0", "0.4.1", "0.4.2", "0.4.3", "0.4.4"]: return True return False
475dff3c6d3ed71317aab79e147f8797bde85f3a
698,944
import hashlib def get_subscriber_hash(member_email): """ The MD5 hash of the lowercase version of the list member's email. Uses as memeber_id """ member_email = member_email.lower() m = hashlib.md5(member_email) return m.hexdigest()
c2d7c7f4d0da58ec3990cccad865cb53cf2ebe15
698,945
def set_ax_info(ax, **kwargs): """Set Axes information Args: ax (Axes) : The ``Axes`` instance. kwargs : ``key`` indicate the method which is start with ``set_``, and the method takes arguments (``val``) according to the type of ``val`` Returns: ax (Axes) : The ``Axes`` instance. Examples: >>> import matplotlib.pyplot as plt >>> from pycharmers.matplotlib import set_ax_info >>> fig, ax = plt.subplots(nplots=1)[0] >>> ax = set_ax_info(ax, aspect=5, title="Title", xlabel="Xlabel", ylabel="Ylabel", yticks={"ticks":[]}) >>> ax.scatter(1,1) >>> plt.show() """ for k,v in kwargs.items(): method = ax.__getattribute__(f"set_{k}") if isinstance(v, str) and v=="_void": method() elif isinstance(v, dict): method(**v) elif isinstance(v, tuple): method(*v) elif isinstance(v, list) and len(v)==2 and (isinstance(v[0], tuple) and isinstance(v[1], dict)): args, kwargs = v method(*args, **kwargs) else: method(v) return ax
f54d959b83464071eda6efec2a9189b47addec34
698,946
def Reverse(action): """Reverses the behavior of the action Example:: # rotates the sprite 180 degrees in 2 seconds counter clockwise action = Reverse( RotateBy( 180, 2 ) ) sprite.do( action ) """ return action.__reversed__()
9bde87421204300e55fff80eed0564f4b4d8e978
698,947
from datetime import datetime def prune(iterable, start, end): """Discard events that fall outside the start-end interval.""" events = sorted(iterable, key=lambda event: event.created_at) events.reverse() # Check if we have at least one event older than the start date. # If so, then we have a complete record for the desired interval. oldest_event = events[-1] if oldest_event.created_at >= start: msg = 'Warning: May be missing valid events between %s and %s' print(msg % (datetime.strftime(start, '%Y-%m-%d'), datetime.strftime(oldest_event.created_at, '%Y-%m-%d'))) return (event for event in iterable if start <= event.created_at < end)
d69be8ef507347049b4fc59938378eb108cc8ee4
698,948
def get_licenses(obj): """ Returns a list of license under which the given language resource is available. """ result = [] for dist in obj.distributioninfotype_model_set.all(): for licence_info in dist.licenceInfo.all(): result.append(licence_info.licence) return result
09cbadcc55b15d189c0f19095351fdb2d9d526e3
698,949
def patches(internal_api_patcher): """ Fixture that resets session-wide patches before each test case function """ for mock in internal_api_patcher.patcher_mocks: mock.reset_mock() return internal_api_patcher
05bacb7491e7178a324c07da4c67fad0b1fed01c
698,950
def get_frame_number(frame): """Get frame number by calculating distance to newest frame.""" num = 0 newer = frame.newer() while newer != None: newer = newer.newer() num += 1 return num
1b653a629d9e34af2cfa1ce37bea54644a18236a
698,951
import torch def batch_l2(kernels): """ Args: kernels: (num_kernels, bsize, bsize) """ n = kernels.shape[0] kernels = kernels.view(n, -1) # (n, bsize*bsize) distances = torch.cdist(kernels, kernels) distances = distances - torch.diag(torch.diag(distances)) return - distances.mean()
0d1d9d1544cf98a0ae92e34e6ed739dadc6ef5f5
698,952
def ecs_efs_encryption_status(status): """ Property: EFSVolumeConfiguration.TransitEncryption """ valid_status = ["ENABLED", "DISABLED"] if status not in valid_status: raise ValueError( 'ECS EFS Encryption in transit can only be one of: "%s"' % (", ".join(valid_status)) ) return status
85e0d0f8b3c33331c8d877f0554126ca72c023bc
698,953
def folder_contents_html(folder_path, files, folders): """Given files and folders generate html.""" html = "<!DOCTYPE html><html><body>{}</body></html>" atag = '<a href="{}">{}</a>' files_and_folders = '' for folder in folders: files_and_folders += '<h4>' + atag.format(folder_path + '/' + folder, folder) + '</h4>' for file_name in files: files_and_folders += '<h4>' + atag.format(folder_path + '/' + file_name, file_name) + '</h4>' return html.format(files_and_folders)
6b6b37ca9452319d309a61c877ebf6d1fba201aa
698,954
import os def get_logdir_name(flags): """Generates the name of the log directory from the values of flags Parameters ---------- flags: neural net architecture generated by get_arguments() Outputs ------- the name of the directory to store the training and evaluation results """ epochs = (flags.number_of_steps * flags.train_batch_size) / flags.num_samples_train param_list = ['batch_size', str(flags.train_batch_size), 'num_tasks', str(flags.num_tasks_per_batch), 'lr', str(flags.init_learning_rate), 'lr_anneal', flags.lr_anneal, 'epochs', str(epochs), 'dropout', str(flags.dropout), 'opt', flags.optimizer, 'weight_decay', str(flags.weight_decay), 'nfilt', str(flags.num_filters), 'feature_extractor', str(flags.feature_extractor), 'task_encoder', str(flags.task_encoder), 'att_nfilt', str(flags.attention_num_filters), 'enc_cl_link', flags.encoder_classifier_link] if flags.log_dir == '': logdir = './logs1/' + '-'.join(param_list) else: logdir = os.path.join(flags.log_dir, '-'.join(param_list)) if flags.exp_dir is not None: # Running a Borgy experiment logdir = flags.exp_dir logdir=flags.log_dir return logdir
2caa6a8005bfb76b52847be3acc64ba93a0d72e0
698,955