content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def format_msg(wiki_link: str): """Return html formatted email content.""" contents = f''' <!DOCTYPE html> <html> <body> <div style="text-align:center;"> <h1>Your Weekly Article:</h1> <a href="{wiki_link}">{wiki_link}</a> </div> </body> </html> ''' return contents
598da5bd195d5795fe2acec3b53f9d0a2449096d
61,553
def get_pytt_last_hidden(docs, drop=0.0): """Output a List[array], where the array is the last hidden vector vector for each document. To backprop, we increment the values in the doc._.pytt_d_last_hidden_state array. """ outputs = [doc._.pytt_last_hidden_state for doc in docs] for out in outputs: assert out is not None def backprop_pytt_last_hidden(d_outputs, sgd=None): for doc, d_last_hidden_state in zip(docs, d_outputs): if doc._.pytt_d_last_hidden_state is None: doc._.pytt_d_last_hidden_state = d_last_hidden_state else: doc._.pytt_d_last_hidden_state += d_last_hidden_state return None return outputs, backprop_pytt_last_hidden
3f58fbd2d1a172d1e30ba7ff7a04f0c05d4a9f5e
61,554
import socket def get_ip_for_hostname(hostname, ignore_hostnames_list=["AUTO"]): """Get the IP of a given hostname. Args: hostname (string): hostname to find. ignore_hostnames_list (list of strings): special hostname values which won't be lookup ip (if the given hostname is in this list, this function will return the given hostname without any modification). Returns: (string) IP of the given hostname (or None if we can't find it). """ if hostname in ignore_hostnames_list: return hostname if hostname in ("127.0.0.1", "localhost", "localhost.localdomain"): return "127.0.0.1" try: infos = socket.getaddrinfo(hostname, 80, 0, 0, socket.IPPROTO_TCP) except Exception: return None for (family, sockettype, proto, canonname, sockaddr) in infos: if sockaddr is None or len(sockaddr) != 2: continue tmp = sockaddr[0] if '.' not in tmp: # we don't want ipv6 continue return tmp return None
3df4146c7d3940d7d19f682728f89ca0b20e0848
61,555
def lens_rotation(alpha0, s0, dalpha, ds, t, tb): """Compute the angle alpha and projected separation s for each time step due to the lens orbital motion. :param alpha0: angle alpha at date tb. :param s0: projected separation at date tb. :param dalpha: float, angular velocity at date tb (radians.year^-1). :param ds: change rate of separation (year^-1). :param t: list of dates. :param tb: time reference for linear development. :type alpha0: float :type s0: float :type dalpha: float :type ds: float :type t: numpy array :type tb: float :return: unpacked list of actual alpha and s values at each date. :rtype: numpy array, numpy array """ Cte_yr_d = 365.25 # Julian year in days alpha = alpha0 - (t - tb) * dalpha / Cte_yr_d s = s0 + (t-tb) * ds / Cte_yr_d return alpha, s
4e5ed45067d31f91739439784f54a82d13be0366
61,561
def filter_columns(df, keep): """Filter Pandas table df keeping only columns in keep""" cols = list(df) for col in keep: cols.remove(col) return df.drop(columns=cols)
d0f042139c9ff342294362de168090aa0411008e
61,562
def crc16_is_OK(hex_string): """Returns True if the hex_string ending in two CRC16 bytes passes the Dallas 1-wire CRC16 check. Code adapted from: http://forum.arduino.cc/index.php?topic=37648.0;wap2 """ # break the Hex string into a list of bytes byte_list = [int(hex_string[i:i+2], 16) for i in range(0, len(hex_string), 2)] crc = 0 for inbyte in byte_list: for j in range(8): mix = (crc ^ inbyte) & 0x01 crc = crc >> 1 if mix: crc = crc ^ 0xA001 inbyte = inbyte >> 1; return crc==0xB001
41107799a3255df670021ad5539d400d9d781c02
61,563
import re def hdir(obj, magics=False, internals=False): """Print object methods and attributes, by default excluding magic methods. Parameters ----------- obj: any type The object to print methods and attributes for. magics: bool Specifies whether to include magic methods (e.g. __name__, __hash__). Default False. internals: bool Specifies whether to include internal methods (e.g. _dfs, _name). Default False. Returns -------- dict Keys are method/attribute names, values are strings specifying whether the corresponding key is a 'method' or an 'attr'. """ output = dict() for attr in dir(obj): # Exclude magics or internals if specified. if (not magics and attr.startswith('__')) or \ (not internals and re.match('_[^_]', attr)): continue # Handle rare case where attr can't be invoked (e.g. df.sparse on a # non-sparse Pandas dataframe). try: is_method = callable(getattr(obj, attr)) except Exception: continue # Update output to specify whether attr is callable. if is_method: output[attr] = 'method' else: output[attr] = 'attribute' return output
3cd9686de9659ddf15c0317736835462329e160c
61,564
import torch def one_hot(type_map: torch.Tensor, n_classes: int) -> torch.Tensor: """ Take in a type map of shape (B, H, W) with class indices as values and reshape it into a tensor of shape (B, C, H, W) Args: ----------- type_map (torch.Tensor): type map n_classes (int): number of classes in type_map Returns: ----------- torch.Tensor: A one hot tensor from the type map of shape (B, C, H, W) """ assert type_map.dtype == torch.int64, ( f"Wrong type_map dtype: {type_map.dtype}. Shld be torch.int64" ) one_hot = torch.zeros( type_map.shape[0], n_classes, *type_map.shape[1:], device=type_map.device, dtype=type_map.dtype ) return one_hot.scatter_( dim=1, index=type_map.unsqueeze(1), value=1.0 ) + 1e-7
ed1fc37e4681287a6dafaeaa505ca6ce26af780e
61,565
def isNumber(s): """ Check if a string is a numeric (int or float) value """ try: float(s) return True except ValueError: return False
0114112223455c619f4c2e14147f179b959ef0f7
61,568
from typing import List from typing import Tuple def cycle_time(events: List[Tuple[float, float]], num_instances: int) -> float: """ Computes the cycle time given a list of events (having a start and a complete timestamp) and the number of instances of the log The definition that has been followed is the one proposed in: https://www.presentationeze.com/presentations/lean-manufacturing-just-in-time/lean-manufacturing-just-in-time-full-details/process-cycle-time-analysis/calculate-cycle-time/#:~:text=Cycle%20time%20%3D%20Average%20time%20between,is%2024%20minutes%20on%20average. So: Cycle time = Average time between completion of units. Example taken from the website: Consider a manufacturing facility, which is producing 100 units of product per 40 hour week. The average throughput rate is 1 unit per 0.4 hours, which is one unit every 24 minutes. Therefore the cycle time is 24 minutes on average. Parameters --------------- events List of events (each event is a tuple having the start and the complete timestamp) num_instances Number of instances of the log Returns --------------- cycle_time Cycle time """ events = sorted(events, key=lambda x: (x[0], x[1])) st = events[0][0] et = events[0][1] production_time = 0 for i in range(1, len(events)): this_st = events[i][0] this_et = events[i][1] if this_st > et: production_time += (et - st) st = this_st et = max(et, this_et) return production_time / num_instances
90b49a066dea228658411c6b174c34f745beac1b
61,571
def mlp_check_dimensions(x, y, ws, bs): """ Return True if the dimensions in double_u and beta agree. :param x: a list of lists representing the x matrix. :param y: a list output values. :param ws: a list of weight matrices (one for each layer) :param bs: a list of biases (one for each layer) :return: True if the dimensions of x, y, ws and bs match """ ## W rows should equal X columns, b col should equal W col result = True if len(ws) != len(bs): return False if len(x[0]) != len(ws[0]): return False if len(x) != len(y): return False if len(y[0]) != len(bs[len(bs) - 1][0]): return False for layer in range(len(ws)): if len(ws[layer][0]) != len(bs[layer][0]): return False if layer == 0: pass else: prev_w = ws[layer - 1] if len(ws[layer]) != len(prev_w[0]): return False return result
b9556dc5b180463284a6b9fde7a635e0ae7eb6bf
61,574
def is_sorted(l): """Checks if the list is sorted """ return all(l[i] <= l[i+1] for i in range(len(l)-1))
548e85d98ee7873a64f14c38376059f2300a564b
61,575
def _getdef(list, num, default=None): """ Get value from list with default :param list: List to get value from :param num: Index to get :param default: Default to return if index isn't in list :return: Value at index, or default """ if num < len(list): return list[num] return default
6e12f8174d525be026033ad44dbe843f56370aa2
61,580
def _linear_interpolation(x, X, Y): """Given two data points [X,Y], linearly interpolate those at x.""" return (Y[1] * (x - X[0]) + Y[0] * (X[1] - x)) / (X[1] - X[0])
b6bae0b7fd85aa53a471d5479abd574ec58ab4eb
61,581
def human_readable_stat(c): """ Transform a timedelta expressed in seconds into a human readable string Parameters ---------- c Timedelta expressed in seconds Returns ---------- string Human readable string """ c = int(c) days = c // 86400 hours = c // 3600 % 24 minutes = c // 60 % 60 seconds = c % 60 if days > 0: return str(days)+"D" if hours > 0: return str(hours)+"h" if minutes > 0: return str(minutes)+"m" return str(seconds)+"s"
909b14421193ae3d1878468587358a4f598ed11b
61,587
def is_response(event): """Check whether an event is a response indicating a message was successfully sent""" return 'reply_to' in event and 'ok' in event and event['ok']
6906e8364862f6ed88faa2f2f249db8901df4a51
61,593
def doubleEscape(arg1, arg2): """Search through arg1 and replace all instances of arg2 with 2 copies of arg2.""" output = '' for c in arg1: if c == arg2: output += arg2 + arg2 else: output += c return output
87ee7e91a267d9785ac85f225eb53c99181d798e
61,596
from typing import Optional from datetime import datetime def json_default(item: object) -> Optional[object]: """JSON object type converter that handles datetime objects. Arguments: item: The object JSON is trying to serialise. """ if isinstance(item, datetime): return item.isoformat() return None
0e07ef0f996fd034f29d3e28361ee292228ba2fb
61,597
def np_chunk(tree): """ Return a list of all noun phrase chunks in the sentence tree. A noun phrase chunk is defined as any subtree of the sentence whose label is "NP" that does not itself contain any other noun phrases as subtrees. """ # Create a list to store the noun phrase chunks. noun_prhase_chunks = [] # Iterate over the subtrees of 'tree'. for subtree in tree.subtrees(): # For each 'subtree' iterate over its subtrees 's' adding the label # for each 's' to the list 'labels'. labels = [] for s in subtree: # Use a try loop to try to add all the labels of each 's' to avoid # erros. Because at some point 's' might be a string, which doesn't # have a label method. try: labels.append(s.label()) except: continue # A 'subtree' that is a noun phrase will not have subtrees 's' with # the label "NP". if subtree.label() == "NP" and "NP" not in [labels]: noun_prhase_chunks.append(subtree) return noun_prhase_chunks
d022cf2c30686213187b3550781080e6db9da183
61,601
import math def merc(lat, lon): """ Convert lat/lon into mercator values """ r_major = 6378137.000 x = r_major * math.radians(lon) scale = x / lon y = ( 180.0 / math.pi * math.log(math.tan(math.pi / 4.0 + lat * (math.pi / 180.0) / 2.0)) * scale ) return (x, y)
a55e22c42008c0209e49061911bdb2f4c0aefc31
61,602
def should_scrolling_continue(rule_conf): """ Tells about a rule config if it can scroll still or should stop the scrolling. :param: rule_conf as dict :rtype: bool """ max_scrolling = rule_conf.get('max_scrolling_count') stop_the_scroll = 0 < max_scrolling <= rule_conf.get('scrolling_cycle') return not stop_the_scroll
764ab6fb95e1228d488e57aa28136a1270e64b31
61,603
def list_item_html(text: str) -> str: """Embed text in list element tag.""" return "<li>{}</li>".format(text)
53fbf492e0e1ede08676db635b0571e8c4fdf2e0
61,608
def toggle_navbar_collapse(n_clicks: int, is_open: bool) -> bool: """Toogle navigation bar on mobile devices Arguments: n_clicks {int} -- number of clicks is_open {bool} -- is navigation bar open Returns: bool -- new state of navigation bar """ if n_clicks: return not is_open return is_open
0c3aca96b7b11026d6e9363400cea32410454ddd
61,609
from typing import Tuple from typing import List def read_dataset(path: str) -> Tuple[List[List[str]], List[List[str]]]: """ Reads a dataset from a given path. Args: path: Path of the file stored in tsv format. Returns: A 2D list of tokens and another of associated labels. """ tokens_s = [] labels_s = [] tokens = [] labels = [] with open(path) as f: for line in f: line = line.strip() if line.startswith('# '): tokens = [] labels = [] elif line == '': tokens_s.append(tokens) labels_s.append(labels) else: _, token, label = line.split('\t') tokens.append(token) labels.append(label) assert len(tokens_s) == len(labels_s) return tokens_s, labels_s
daa0320ac23d71c25056d49810ad5036e2fdac13
61,610
def CalculateRollingMax(raw_scores, total_samples, window_samples): """Calculates a rolling maximum across the array, in windows of the same size. The scores returned from open-cv matchTemplate are calculated as if each value in the array matches with the first position in the window. Here, for each position in the original array, scores across the window are aggregated, to take into account the match with the first, second, third, ... position in the window. The aggegation method is max(), it performed better than avg(). Args: raw_scores: array of scores calculated with open-cv matchTemplate function. It should have length total_samples - window_samples + 1. total_samples: total number of samples of the original data. window_samples: number of samples in the window. Returns: Array of scores calculated aggregating with the maximum. """ scores = [] left = 0 current_max = 0 for right in range(total_samples): if right < len(raw_scores): if raw_scores[right] > current_max: current_max = raw_scores[right] current_size = right - left + 1 if current_size > window_samples: pop_value = raw_scores[left] left += 1 if pop_value >= current_max: current_max = 0 for c in range(left, min(right + 1, len(raw_scores))): if raw_scores[c] > current_max: current_max = raw_scores[c] score = current_max scores.append(score) return scores
e5fc81a57d6b51c983c798a51d552af76e30a8fb
61,613
def _get_clip_indices(utt_start, utt_end, batch_start, batch_end): """ Cuts the parts of the utterance that do not fit into the batch window. Arguments: utt_start {int} -- start point of the utterance utt_end {int} -- end point of the utterance batch_start {int} -- start point of the batch window batch_end {int} -- end point of the batch window Returns: (int, int), bool -- a tuple containing clipped start and end point of an utterance, the boolean flag is True if the end of the utterance is inside the batch window. """ if utt_end <= batch_start: return None if utt_start >= batch_end: return None start = 0 end = utt_end - utt_start if utt_start < batch_start: start = batch_start - utt_start if utt_end > batch_end: end = batch_end - utt_start ends = utt_end <= batch_end return (start, end), ends
fb6ebff4952d0c8454df31a9f4f717ca94d41894
61,614
import re def error_034_template_elements(text): """Fix pagename magicwords and return (new_text, replacements_count) tuple.""" return re.subn(r"{{(PAGENAME|FULLPAGENAME)}}", "{{subst:\\1}}", text)
2bba32526e60993b60e2d72141c268dbb69fcce8
61,616
def iter2list(pythonic_iterator): """ Converts a neo4j.util.PythonicIterator to a list. """ return [item for item in pythonic_iterator]
e892a74e88951f3bd4fc5e9eee2b3e4069aafe3c
61,618
def get_integral_powerlaw_model(Emin, Emax, k0, index, E0=1.0): """ Compute the enery integral : \int_Emin^Emax f(E) dE for f(E) a power law Parameters ---------- - Emin (GeV): the lower bound - Emax (GeV): the upper bound - k0 : normalization - E0 : pivot energy (GeV) - index : spectral index Outputs -------- - The integrated function """ if Emin > Emax: raise TypeError("Emin is larger than Emax") output = k0 * E0**(1-index) / (1-index) * ( (Emax/E0)**(1-index) - (Emin/E0)**(1-index)) return output
69494b1f9ad40ca953900fac1c38f3ed893b2786
61,620
from datetime import datetime def dateconverter(date_obj): """ Stringify datetime.datetime in a given instance """ if isinstance(date_obj, datetime): return date_obj.__str__()
8b9514d383064d9be5e7945145797acdba1a0a44
61,622
def data_concat(pd,data_scnt,data_anlst,data_eng,mach_learn): """ Find all data science domain jobs and concatenate the dataframe created for each job category returns concatenated dataframe """ data_scnt['data']='Data Scientists' data_anlst['data']='Data Analysts' data_eng['data']='Data Engineer' mach_learn['data']='Machine Learning' jobs = [data_scnt,data_anlst,data_eng,mach_learn] datajobs = pd.concat(jobs) return datajobs
58d0dd947a5d5fb3d1028f4ae539642e07f6d80f
61,623
def split_datetime(a_datetime): """Given a datetime.datetime, return a 2-tuple of (datetime.date, datetime.time).""" return (a_datetime.date(), a_datetime.time())
1647fc742e14e8e880b0f2be0e946a4446071b6c
61,628
from typing import Dict from typing import Optional from typing import Any def map_losses_per_agent_ac( critic_losses: Dict, policy_losses: Dict, total_losses: Optional[Dict] = None ) -> Dict: """Map seperate losses dict to loss per agent. Args: critic_losses : critic loss per agent. policy_losses : policy loss per agent. total_losses : optional total (critic + policy loss) per agent. Returns: dict with losses grouped per agent. """ assert len(policy_losses) > 0 and ( len(critic_losses) == len(policy_losses) ), "Invalid System Checkpointer." logged_losses: Dict[str, Dict[str, Any]] = {} for agent in policy_losses.keys(): logged_losses[agent] = { "critic_loss": critic_losses[agent], "policy_loss": policy_losses[agent], } if total_losses is not None: logged_losses[agent]["total_loss"] = total_losses[agent] return logged_losses
41802cdbe6e50003909c94e5e704c623f7273ae6
61,632
import math def lcm(a, b): """Get least common multiple of 2 numbers""" a = int(a) b = int(b) return a*b/math.gcd(a,b)
0a583aa50d652099c78ec4dc0e42b67f47bdeed1
61,635
def fstat_join(f): """ Combines fstat columns into a string, properly escaped. """ p = f[1].replace(';', ';;').replace(',', ';.') return f"{f[0]},{p},{f[2]},{f[3]},{f[4]}"
4a43e1719c00f3d786403b70e3635eae5edd47e2
61,638
def _UpperCaseAlleles(alleles): """Convert the list of allele strings to upper case.""" upper_alleles = [] for allele in alleles: upper_alleles.append(str(allele).upper()) return upper_alleles
d6445620a360fb1cb4fc15724061ce91df9e0164
61,641
def capacity_default_edge_weight(_: float) -> float: """ For a capacity default, we assume the edge does not exist: this results in a capacity of 0. """ return 0
5e693bc001a44e47282afbae00aae53a3adef415
61,647
def LowerCamelCase(upperCamelCaseStr): """ Return the lowerCamelCase variant of an upper camel case string. """ return upperCamelCaseStr[0].lower() + upperCamelCaseStr[1:]
321e22b96984a241f5ad79ecd7297f58792bb384
61,648
def epsilon(n): """ Compute Jacobi symbol (5/n). """ if n % 5 in [1, 4]: return 1 elif n % 5 in [2, 3]: return -1 else: return 0
5c335cd59cbbe8a130763f1ebac2887afaac3e48
61,649
def csv_append(csv_string, item): """ Appends an item to a comma-separated string. If the comma-separated string is empty/None, just returns item. """ if csv_string: return ",".join((csv_string, item)) else: return item
38d1bde31225a8bf42156f3472a8bad99d3d9dfb
61,650
def read_file(path_to_file: str): """Return the file data from `path_to_file`.""" with open(path_to_file, 'r') as json_file: return json_file.read()
c7da33c398e7f5ff190c8b5f1ff0d972d01b4d55
61,651
def indsNotInList(check_list, ref_list): """ find the indices of a list where the nth value is not contained in another list Parameters ---------- check_list : list ref_list : list Returns ------- pop_inds : list of int """ pop_inds = [] for ind, name in enumerate(check_list): if name not in ref_list: pop_inds.append(ind) return pop_inds
f9cf952319d4ccc2775afca06b761a8fb7bde3aa
61,658
import ipaddress def ip_in_networks(ip, networks, min_prefix_len=1): """Return whether `ip` is in the dict of networks This is O(1) regardless of the size of networks Implementation based on netaddr.IPSet.__contains__ Repeatedly checks if ip/32; ip/31; ip/30; etc. is in networks for all netmasks that match the given ip, for a max of 32 dict key lookups for ipv4. If all netmasks have a prefix length of e.g. 24 or greater, min_prefix_len prevents checking wider network masks that can't possibly match. Returns `(netmask, networks[netmask])` for matching netmask in networks, if found; False, otherwise. """ if min_prefix_len < 1: raise ValueError(f"min_prefix_len must be >= 1, got {min_prefix_len}") if not networks: return False check_net = ipaddress.ip_network(ip) while check_net.prefixlen >= min_prefix_len: if check_net in networks: return check_net, networks[check_net] check_net = check_net.supernet(1) return False
006221844bf6ee9e0ecda6680a81fe2e01efeced
61,659
def prefix_match(prefix, obj): """ Check if an hash prefix matches: example: prefix_match(hash, "ak") will match "ak_123" but not "ak123" """ if obj is None: return False return obj.startswith(f"{prefix}_")
9b53e06b7ebc38a8ee18411e65c748d3b065af03
61,660
def _is_info(status_code): """Check if status code is informational: 100 <= 200""" return 100 <= status_code < 200
693f12da3256ba7565dc5a028af601e91e317765
61,661
def compute_efficiencies(denominator_lumi, numerator_lumi, weight, weighted_mean_numerator, sum_weights): """Compute efficiencies based on the input parameters. Returns a tuple consisting of (efficiency, weighted efficiency, weighted_mean_numerator, sum_weights). We return the partial numerator and denominator of the weighed mean calculation to ease the calculation of weighted values iteratively. It is responsibility of the calling function to properly store these numbers and keep on passing them to this routine. """ if denominator_lumi == 0: return (-1, -1, weighted_mean_numerator, sum_weights) eff = numerator_lumi/denominator_lumi sum_weights += weight weighted_mean_numerator += (eff * weight) if sum_weights == 0: weighted_mean = 0. else: weighted_mean = weighted_mean_numerator / sum_weights return (eff, weighted_mean, weighted_mean_numerator, sum_weights)
0ce02a2274ad4ac2229920454c2e3a2ebdd0cce2
61,664
def radio( optionText="", optionNumber=1, htmlId=False, inlineHelpText=False, blockHelpText=False, disabled=False, checked=False): """ *Generate a radio - TBS style* **Key Arguments:** - ``optionText`` -- the text associated with this checkbox - ``optionNumber`` -- the order in the option list - ``htmlId`` -- the html id of the element - ``inlineHelpText`` -- inline and block level support for help text that appears around form controls - ``blockHelpText`` -- a longer block of help text that breaks onto a new line and may extend beyond one line - ``disabled`` -- add the disabled attribute on an input to prevent user input - ``checked`` -- is the radio button checked by default **Return:** - ``radio`` -- the radio """ if inlineHelpText: inlineHelpText = """<span class="help-inline">%(inlineHelpText)s</span>""" % locals( ) else: inlineHelpText = "" if blockHelpText: blockHelpText = """<span class="help-block">%(blockHelpText)s</span>""" % locals( ) else: blockHelpText = "" if disabled: disabled = """disabled""" disabledId = "disabledId" else: disabled = "" disabledId = "" if checked is False: checked = "" else: checked = "checked" if not htmlId: htmlId = "" radio = """ <label class="radio"> <input type="radio" name="%(htmlId)s" id="%(htmlId)s %(disabledId)s %(htmlId)s" value="%(optionText)s" %(checked)s %(disabled)s> %(optionText)s </label>%(inlineHelpText)s%(inlineHelpText)s""" % locals() return radio
cb2e00428f5da355627ac0dc42cf420695f3ca89
61,669
def make_filter_kwargs(request): """ Make a set of filter kwargs to hide objects inappropriate for current user (e. g. private entries mustn't be seen by guests). """ if not (request.user.is_authenticated() and \ request.user.has_perm('blog.can_see_private')): return {'private': 0} else: return {}
d5cf2ec9102542168ebced4b0cf97b5ebd9e2b07
61,670
def parse_header( line ): """Parses a header line into a (key, value) tuple, trimming whitespace off ends. Introductory 'From ' header not treated.""" colon = line.find( ':' ) space = line.find( ' ' ) # If starts with something with no whitespace then a colon, # that's the ID. if colon > -1 and ( space == -1 or space > colon ): id = line[ 0: colon + 1 ] value = line[ colon + 1:] if value: value = value.strip() return ( id, value ) return ( None, None )
854fb76adf49fdc6743ae5ee167ea93bf1ad711e
61,681
def density_to_API(rho15): """ Return the API from the density Parameters ---------- rho15 : Density at 15°C [kg/m³] """ d = rho15 / 999.06 #cf https://www.engineeringtoolbox.com/water-density-specific-weight-d_595.html?vA=15&units=C# return (141.5/d) - 131.5
6c2e1742218ae778b5d1264aa0b9437afa4853b4
61,686
def weighted_average(xvals, yvals): """ Determines the weighted average of a group of masses and abundances :param list xvals: x values :param list yvals: y values :return: weighted average, summed intensity :rtype: tuple of float """ if sum(yvals) == 0: # catch for no intensity return sum(xvals) / len(xvals), 0. return ( sum([x * y for x, y in zip(xvals, yvals)]) / sum(yvals), # weighted m/z sum(yvals) # summed intensity )
b7bed246c32808ef7fdb5733aab6cddbf21a0cb4
61,689
import pathlib from typing import Dict from typing import List import re def parse_schema(sql_file: pathlib.Path) -> Dict[str, List[str]]: """ Parses the SQL schema creation script (schemas.sql) """ in_comment = False in_table_schema = False schema = {} table_schema: List[str] = [] with open(sql_file, "r", encoding="utf-8") as in_fd: for line in in_fd: line = line.strip().lower() if "/*" in line: # Start of comment in_comment = True continue elif "*/" in line: # End of comment in_comment = False continue elif in_comment: # Ignore comments continue elif line == ");": # End of schema in_table_schema = False elif line.startswith("create table "): # Starting a table match = re.search(r"create table (\w+) \(", line) if match is not None: in_table_schema = True table = match.group(1) table_schema = [] schema[table] = table_schema elif in_table_schema: # Line in the current schema col_name = line.split()[0] table_schema.append(col_name) return schema
bb9cb25e5e0f21c208bd739f627d862a706dd8ef
61,690
def gaps(ranges): """Get a list with the size of the gaps between the ranges """ gaps = [] for cur, nxt in zip(ranges[:-1], ranges[1:]): gaps.append(nxt[0] - cur[1]) # 1: end, 0: start return gaps
3ea329a0a0ca10a044e6c667b00f48b1bdfe18f2
61,693
def star_char(num_stars: int): """ Given a number of stars (0, 1, or 2), returns its leaderboard representation. """ return " .*"[num_stars]
0ddbdd91639e1b41b7e919e12fa7e74e506c464a
61,697
def shortest_path(graph, start, end, path=[]): """Uses recursion to find the shortest path from one node to another in an unweighted graph. Adapted from http://www.python.org/doc/essays/graphs.html Args: graph: A mapping of the graph to analyze, of the form {0: [1,2], 1:[3,4], ...} . Each key has a list of edges. start: The ID of the key to start the analysis from end: The ID of the key to end the analysis path: Optional argument used during the recursive step to keep the current path up to that point Returns: List of the shortest path (list) Returns None if start and end are not connected """ path = path + [start] if start == end: return path if start not in graph: return None shortest = None for node in graph[start]: if node not in path: newpath = shortest_path(graph, node, end, path) if newpath: if not shortest or len(newpath) < len(shortest): shortest = newpath return shortest
7ba266d7e6e57275e197885dc8dabe62239dc38f
61,698
def assert_keys_in_form_exist(form, keys): """ Check all the keys exist in the form. :param form: object form :param keys: required keys :return: True if all the keys exist. Otherwise return false. """ if form is None: return False if type(form) is not dict: return False for key in keys: if key not in form.keys(): return False # value = form[key] # if value == None: # return False return True
536ba684669ea58d1342d6478f8d62df419f8231
61,706
def intersection(list1, list2): """This function computes intersection between two input lists and returns the result. Parameters ---------- list1 : First list list2 : Second list Returns ---------- list3 : Intersection of list1 and list2 """ list3 = [value for value in list1 if value in list2] return list3
191a0578dd701533bc46629af51dae629138c262
61,712
import glob def parse_folder(path, raster_ext): """ Returns a list with all raster files in a folder Parameters ---------- path: string Path to folder with raster files raster_ext: string specifies raster format Returns ------- raster_file_list: list list of all raster files in a folder raster_file_name: list list of all raster names in a folder """ print('\n########## - Searching for files - ##########') raster_file_list = [] raster_file_name = [] # parsing folder for files for file in glob.glob(path + "*" + raster_ext): raster_file_list.append(file) raster_file_list = [w.replace('\\', '/') for w in raster_file_list] raster_file_name = [w[len(path):1000] for w in raster_file_list] return raster_file_list, raster_file_name
7e7c57324f754326047e02587fa7d662a5f1162c
61,714
def get_request_and_username(context): """ Returns request object and username """ request = context['request'] view = context['view'] username = view.kwargs.get('username') if not username: # get the username from the user if not set username = (request.user and request.user.username) return (request, username)
55bf401f33ed353b4e8139e272b7084a9d832453
61,716
from datetime import datetime def get_session_start_time(recording_metadata: dict) -> datetime: """Fetches the session start time from the recording_metadata dic Parameters ---------- recording_metadata : dict the metadata dic as obtained from the Spikelgx recording. Returns ------- datetime the session start time in datetime format. """ session_start_time = recording_metadata.get("fileCreateTime", None) if session_start_time: session_start_time = datetime.fromisoformat(session_start_time) return session_start_time
649dda80885d6fb7c0cd500bc7c35cc2715b1e92
61,721
import torch def nmse(gt, pred): """ Compute Normalized Mean Squared Error (NMSE) """ return torch.norm(gt - pred) ** 2 / torch.norm(gt) ** 2
0b594086ddf85d4e9b488adf34661ccf8593f95d
61,728
def create_body(filename): """Reads fruit name and weight from text file and returns them as a formatted string""" with open(filename, 'r') as file: text = [line.strip() for line in file] return f"name: {text[0]}<br/>weight: {text[1]}<br/><br/>"
31fbe3cd0df06c4d01b5e94d535908b951b90fc7
61,730
import math def piecewise_gaussian(theta: float, theta_0: float = 5, sigma_1: float = 1, sigma_2: float = 10) -> float: """A Gaussian function that favors a certain baseline angle (theta_0). The Gaussian function is divided into two pieces with different standard deviations: - If the input baseline angle (theta) is no larger than theta_0, the standard deviation will be sigma_1; - If theta is larger than theta_0, the standard deviation will be sigma_2. More details can be found in "View Selection" paragraphs in Yao's paper https://arxiv.org/abs/1804.02505. Args: theta: the input baseline angle. theta_0: defaults to 5, the expected baseline angle of the function. sigma_1: defaults to 1, the standard deviation of the function when the angle is no larger than theta_0. sigma_2: defaults to 10, the standard deviation of the function when the angle is larger than theta_0. Returns: the result of the Gaussian function, in range (0, 1] """ # Decide the standard deviation according to theta and theta_0 # if theta is no larger than theta_0, the standard deviation is sigma_1 if theta <= theta_0: sigma = sigma_1 # if theta is larger than theta_0, the standard deviation is sigma_2 else: sigma = sigma_2 return math.exp(-((theta - theta_0) ** 2) / (2 * sigma ** 2))
2aa6c7968a9ffb954a56cf78c0298fc87bc4086d
61,733
def welford(x_array): """Welford's method. Mean and variance calculation using Welford's method, taken from part 3 of "Ten Little Algorithms" by Jason Sachs. Args: x_array (array): sample sequence. Returns: M, S: mean and variance of x_array. """ k = 0 M = 0 S = 0 for x in x_array: k += 1 Mnext = M + (x - M) / k S = S + (x - M)*(x - Mnext) M = Mnext return (M, S/(k-1))
8403f778dbb71f0ec51687dd9f25644966303d91
61,736
def arg_to_attr(arg_name): """ Format an argument like an object attribute :param arg: the list of arguments name to parse :return: the parsed attributes list """ return [arg.replace('--', '').replace('-', '_') for arg in arg_name]
7a1261da0b5458f3e3820901a9d198e67320bfd7
61,737
def generate_buy_signal(df, spy_large_move): """ Method generating a buy signal column in a given dataframe""" # Buy signal when SPY id having a large negative move and equity is still up df['Buy Signal'] = (df['SPY Change'] < spy_large_move) & (df['Change'] > 0) return df
8fbcb59a8766340be130ff4f219101a524db7b11
61,738
def _build_intel_config(config, config_files): """Builds the wifi configuration for the intel driver. Args: config: Config namedtuple config_files: Map to look up the generated config files. Returns: wifi configuration for the intel driver. """ design_name = config.hw_design.name.lower() return config_files.wifi_sar_map.get(design_name)
f4598c8a485dd61647b7adde0374326bae92cc7c
61,739
import re def get_slice_obj_from_str(slicearg): """Given a string that looks like a slice, return an actual slice object. This is used to let command line arguments specify slices. There is a wonky bit w/ argparse that conflicts with starting negative values. I keep forgetting to document the work around... """ slicearg = re.sub(',','',slicearg) svals = [ int(n) if n else None for n in slicearg.split(':') ] svals = tuple(svals) s = slice(*svals) return s
3b06dfd0cddcb30aadd7e475c5a45ac78f83c580
61,743
def main(request, response): """ Respond with a blank HTML document and a `Link` header which describes a link relation specified by the requests `location` and `rel` query string parameters """ headers = [ (b'Content-Type', b'text/html'), ( b'Link', b'<' + request.GET.first(b'location') + b'>; rel=' + request.GET.first(b'rel') ) ] return (200, headers, b'')
5e4c6038b66bd7eafa964a99453cb816c6c133e2
61,753
from typing import List def _partition(nums: List[int], left: int, right: int) -> int: """ Helper function to partition the given sub-list. :param nums: list[int] :param left: int :param right: int :return: int """ # The pivot has already been moved to the left. pivot = nums[left] # Iterate over the sub-list, use a pointer to keep track of the smaller # part, and swap the current number with the pointer as necessary smaller_ptr = left + 1 i = left + 1 while True: while i <= right and nums[i] > pivot: i += 1 if i > right: break if i != smaller_ptr: nums[smaller_ptr], nums[i] = nums[i], nums[smaller_ptr] smaller_ptr += 1 i += 1 if left != smaller_ptr - 1: nums[left], nums[smaller_ptr - 1] = nums[smaller_ptr - 1], nums[left] return smaller_ptr - 1
5190a697c1a7412dc0dd13277a0ff48db348e82a
61,756
import torch def length_form_embedding(emb): """Compute the length of each sequence in the batch Args: emb: [seq_len, batch, depth] Returns: a 0/1 tensor: [batch] """ absed = torch.abs(emb) sum_last = torch.sum(absed, dim=2, keepdim=True) mask = sum_last != 0 sum_except_batch = torch.sum(mask, dim=(0, 2), dtype=torch.long) return sum_except_batch
d137fdf26f311d1811245846fcaeae813fe224cc
61,758
from typing import Union import pathlib import json def load_json_schema(relative_to: Union[str, pathlib.Path], filename: str): """ Load a JSON schema from file. Expects a 'schemas' directory in the same directory as `relative_to`. .. tip:: Typical usage of the form `schema = load_json_schema(__file__, 'definition.json')` Parameters ---------- relative_to the file to begin searching from filename the JSON file to load Returns ------- dict the schema """ path = pathlib.Path(relative_to).resolve().parent.joinpath("schemas", filename) with path.open(mode="r", encoding="utf-8") as file: return json.load(file)
46a6a126b4fdaf40a931197b70c4606710be8b84
61,759
def get_params_autogen(term): """Sets the parameters for the API call for the initial user-entered term""" params = { "action": "parse", "prop": "links", "page": term, "format": "json", } # Parameter set to query the Wikipedia page for a given term and retrieve up to 250 links to other articles - # namespace 0 - from that page in JSON format. return params
c5a49a114b153d129e9427db59c479c2b2341333
61,760
def str_artists(artists): """Generate a pretty string from multiple artists (a list) from Spotify.""" artist_string = '' for artist in artists: artist_string += artist["name"] + ', ' artist_string = artist_string.rstrip(', ') return artist_string
eefaebd07569476d0a4317c8f744f29e10fd68c2
61,767
def findBlankSpace(board): """Return an (x, y) tuple of the blank space's location.""" for x in range(4): for y in range(4): if board[x][y] == ' ': return (x, y)
f06ad62e18084c983214fb36470d30126bca7cbd
61,768
def is_participant(user): """ returns true iff user.participant exists, is active """ return hasattr(user, 'participant') and user.is_active
7565cd3665aa16864fae974817c83410ac0a0f8c
61,769
def explans_satistics(explanations_per_cluster, target_quality_measure='x_coverage', threshold=0): """ Computes insight startistics for the generated set of explanations. Computed statistics includes total number of clusters, clusters with at least one explanation, and clusters with at least one explanation with quality > min_quality. :param explanations_per_cluster: The dictionary of clusters and list of explanations of each cluster. :param target_quality_measure: The target quality that determines accepted explanations. :param threshold: The minimum quality of the accepted explanation. :return: """ stats = dict() # number of clusters stats['clusters_nums'] = len(explanations_per_cluster.keys()) got_explans=list(filter(lambda v: len(v)>0, explanations_per_cluster.values())) stats['clusters_with_any_explans'] = len(got_explans) stats['clusters_with_accepted_explans'] = sum([v[0].get_quality(target_quality_measure) > threshold for v in got_explans]) return stats
1ac2ea6b8ded082cf7f331d06bee8dcd71ce9547
61,770
def _to_rgba(color): """ Converts a color to RGBA. Parameters ---------- color : int, float or list If numeric, is interpreted as gray-value between 0 and 1. If list, has to have length 3 or 4 and is interpreted as RGB / RGBA depending on length (again, with values in [0, 1]). Returns ------- list """ if isinstance(color, (int, float)): # color given as brightness result = [color, color, color, 1] elif isinstance(color, list): if len(color) == 3: # color given as RGB result = color + [1] elif len(color) == 4: result = color.copy() else: assert False, f'len({color}) = {len(color)} has to be in [3, 4]' else: assert False, f'color specification not understood: {color}' return result
38c75e05b6437e9e3029a41d0189da6512d0de3b
61,772
import requests def onefs_release_version(host, port, auth): """Query a cluster and return the 4 major version digits""" url = 'https://{0}:{1}/platform/1/cluster/config'.format(host, port) config = requests.get(url, auth=auth, verify=False).json() return config['onefs_version']['release'].strip('v')
ed126c5611830fa09f9ba6e0246a5ddae69e261f
61,773
def format_entity_query_value(value): """Format and if needed, quote a value to be place in an entity search query """ value_s = str(value) if len(value_s.split()) > 1: return '"{}"'.format(value_s.replace('"', '\\"')) else: return value_s
aeab7acf854c82d27e0a46cc0e61de387f88721b
61,774
import re def parse_version_directory(dirname): """Get version number from version directory name.""" m = re.match(r'''v(\d{1,5})$''', dirname) if not m: raise Exception("Bad version directory name: %s" % (dirname)) v = int(m.group(1)) if v == 0: raise Exception("Bad version directory name: %s, v0 no allowed" % (dirname)) return v
97a61481b8bf49636b6ac2190a1145bd12a68c1e
61,775
import csv def get_users_from_file(users_file_path): """ Extracts users from CSV file to list """ names = [] with open(users_file_path, newline="") as user_file: for row in csv.reader(user_file): names.append(row[0]) return names
b6fb2669c2aaef2bad67e4b16686226adb20f11b
61,777
import struct import socket def subnet_check(netmask_bits, ip_address, target): """ Check if the specific gateway is within the network that the IP and Mask represents. netmask_bits: Number of bits in the netmask ip_address: A dotted quad string with any IP in the subnet to be checked target: A dotted quad string representing the target to check Returns True only if the specified target is within the calculated subnet range """ ip_decimal = struct.unpack('!L', socket.inet_aton(ip_address))[0] target_decimal = struct.unpack('!L', socket.inet_aton(target))[0] netmask_decimal = (0xFFFFFFFF >> int(netmask_bits)) ^ 0xFFFFFFFF return ip_decimal & netmask_decimal == target_decimal & netmask_decimal
784034801ce41d69590abe2bc7bb9a2ab11a03f8
61,781
def update(client, record_type_name, internal_id, data, preferences=None): """Update the record with the given type name and internal identifier with the provided data. Args: client: Netsuite API client type_name: Name of the type of record to update internal_id: Unique internal identifier for the record type data: Dictionary with the data to update the record preferences: Preferences to be used during the operation (optional) """ if not internal_id: raise Exception("Internal ID {} is invalid!".format(internal_id)) if not isinstance(data, dict): raise Exception("Type of data {}. Expected dict!".format(type(data))) # Merge the data to update with the record internal identifier data.update({"internalId": internal_id}) # Get the class to generate an instance of a given record type RecordType = client.models[record_type_name] # Instantiante record with the data to be updated record_with_data_to_update = RecordType(**data) return client.update(record_with_data_to_update)
a3f5a04117fa98a507232ce844f61ec1c08e6dd2
61,782
def with_graph(f): """Call a function with self.graph.as_default() context.""" def wrapper(*args, **kwargs): # the first argument is always the model instance model = args[0] with model.graph.as_default(): return f(*args, **kwargs) return wrapper
2f079a76ada7ce25b12b2a98b2c89661d69616c1
61,784
def cycle_check_undirected(graph, node=0, source_node=None, visited_nodes=None): """Returns true iff an undirected graph represented by an adjacency list has a cycle """ if visited_nodes is None: visited_nodes = set() if node in visited_nodes: return True # Found a cycle # Visit a node visited_nodes.add(node) # do a depth-first search on it's connections # if the node has been visited before, there is a cycle has_cycle = False for other in graph[node]: if other != source_node: # Undirected graph, don't want to detect cycle on one edge has_cycle = cycle_check_undirected(graph, other, node, visited_nodes) if has_cycle: break if not has_cycle and source_node is None: # this is the first node checked, were all the nodes check? # this graph could have unconnected trees if len(graph) != len(visited_nodes): # Find a non-visited node and check it for cycles for other in range(len(graph)): if other not in visited_nodes: # Giving it source node None will mean it will check for other unconneccted trees # Giving it visited_nodes means it won't redo checking nodes that have been visited already has_cycle = cycle_check_undirected(graph, other, None, visited_nodes) if has_cycle: break return has_cycle
ae6448a1fbabf9a02c3fbe66a0ec9c6ce6e27a9a
61,786
def get_statistics(url_messages): """Get statistics from the input URL messages.""" url, messages = url_messages return { 'url': url, 'num_reviews': len(messages), 'score': sum(msg['score'] for msg in messages) / len(messages), 'first_date': min(msg['processing_time'] for msg in messages), 'last_date': max(msg['processing_time'] for msg in messages), }
adb4e801dca090fe7f9591f4faf787df95baef22
61,787
import torch def autograd_range(name): """ Creates an autograd range for pytorch autograd profiling """ return torch.autograd.profiler.record_function(name)
fc781d79e5befbda86fad15ef2abb85d1f8403a7
61,788
def strip_angle_brackets_from_url(url): """Normalize URL by stripping angle brackets.""" return url.lstrip("<").rstrip(">")
eb9216097c9ecad26189df37434270cc820ce33a
61,790
from typing import List from typing import Any def sort_data(sorted_indices: List[int], data: List[Any]) -> List[Any]: """Sorts the data using given indices. :param sorted_indices: sorted indices :param data: data :return: sorted data """ new_data = [] for index in sorted_indices: new_data.append(data[index]) return new_data
c05218abc6fbe9fa77f587e5db70de7e016534ba
61,794
def _value(tree, at): """Get the leaf value at index.""" size = len(tree) // 2 at = size + at if at < 0 else at assert 0 <= at < size return tree[size + at]
f21e680817bb351aba3b2669ade9c7fa94f54eb5
61,795
def model(x, nu, a): """ Model for fitting Figure 3. Args: x (numpy array): x coordinates nu (float): the exponent a (float): the constant of proportionality Returns: yy (numpy array): the y values for the model y = ax^{nu} """ yy = a * x ** nu return yy
d4df4563317f1737dba9ec4e8ce700791b6a1c83
61,796
def colorscale(hexstr, scalefactor): """ Scales a hex string by ``scalefactor``. Returns scaled hex string. To darken the color, use a float value between 0 and 1. To brighten the color, use a float value greater than 1. >>> colorscale("DF3C3C", .5) 6F1E1E >>> colorscale("52D24F", 1.6) 83FF7E >>> colorscale("4F75D2", 1) 4F75D2 """ def clamp(val, minimum=0, maximum=255): if val < minimum: return minimum if val > maximum: return maximum return val if scalefactor < 0 or len(hexstr) != 6: return hexstr r, g, b = int(hexstr[:2], 16), int(hexstr[2:4], 16), int(hexstr[4:], 16) r = int(clamp(r * scalefactor)) g = int(clamp(g * scalefactor)) b = int(clamp(b * scalefactor)) return "%02x%02x%02x" % (r, g, b)
3e61010289f2a69e76cd86142e7755c793c119bd
61,799
from typing import Any def exact_method(term: str, key: str, value: Any) -> str: """ Map file search method 'exact' will return 'value' if 'key' == 'term' >>> exact_method("Hello World", "Hello World", "squaids") 'squaids' >>> exact_method("xxx", "Hello World", "squaids") '' """ return value if key == term else ""
474a97d0c3eeb55e058b2ee1ffef688c4bee2f84
61,801
def _clamp(coordinate): """restrict a single coordinate to 0-255""" return max(0, min(255, coordinate))
478aacaf10e65217ea2061ce2ece23032343f3e1
61,803
import html def get_filetype_link(link_text, url, filetype): """ Put the pieces together and produce a valid hyperlink for given output filetype. Args: link_text: A string representing the displayed or linked text when linking to something, e.g. "hello" in <a href="http://example.org">hello<a>. This string should already be in the intended form; i.e. all HTML escapes should have been unescaped at this point. url: A string of the URL. filetype: A string of the output filetype. Accepted parameters are: "none", "html", "markdown", "tex", "latex". Returns: A string that is a valid hyperlink for the specified output filetype. """ if filetype == "markdown": # From http://pandoc.org/README.html#backslash-escapes # There is also the hyphen, "-", but I've removed that since # escaping it just prevents em- and en-dashes from forming (and # in most cases, this is what one wants) special_chars = "\\`*_{}[]()>#+.!" result = "" for c in link_text: if c in special_chars: result += "\\" + c else: result += c return "[{link_text}]({url})".format(link_text=result, url=url) if filetype == "html": return '<a href="{url}">{link_text}</a>'.format(url=url, link_text=html.escape(link_text)) if filetype == "mediawiki": return "[{url} {link_text}]".format(url=url, link_text=link_text) if filetype in ["latex", "tex"]: # LaTeX is really sensitive about special characters so this # probably needs a lot of tweaking special_chars = "$&%{_#" result = "" for c in link_text: if c in special_chars: result += "\\" + c elif c == "\\": result += "\\textbackslash{}" elif c == "~": result += "\\textasciitilde{}" else: result += c clean_url = "" for c in url: if c in special_chars or c in "~": clean_url += "\\" + c elif c == "\\": clean_url += "{\\textbackslash}" else: clean_url += c return ("\\href{%s}{%s}" % (clean_url, result)) else: return "{link_text}: {url}".format(url=url, link_text=link_text)
da88c0c88dbdb7f14fc781eb8d8c054156bc9592
61,807
def paired_correlations(df): """ Calculates paired Pearson's correlations for dataframe :param df: dataframe of numerical features :return: Pandas DataFrame of paired correlations """ correlations = df.corr().abs().unstack().sort_values().reset_index() correlations = correlations[correlations['level_0'] != correlations['level_1']] return correlations
c721e9f43a7a87f335d2b045d564ad9a178fa77e
61,808
import time def RetryWithBackoff(callable_func, retry_notify_func, initial_delay=1, backoff_factor=2, max_delay=60, max_tries=20): """Calls a function multiple times, backing off more and more each time. Args: callable_func: A function that performs some operation that should be retried a number of times upon failure. Signature: () -> (done, value) If 'done' is True, we'll immediately return (True, value) If 'done' is False, we'll delay a bit and try again, unless we've hit the 'max_tries' limit, in which case we'll return (False, value). retry_notify_func: This function will be called immediately before the next retry delay. Signature: (value, delay) -> None 'value' is the value returned by the last call to 'callable_func' 'delay' is the retry delay, in seconds initial_delay: Initial delay after first try, in seconds. backoff_factor: Delay will be multiplied by this factor after each try. max_delay: Maximum delay, in seconds. max_tries: Maximum number of tries (the first one counts). Returns: What the last call to 'callable_func' returned, which is of the form (done, value). If 'done' is True, you know 'callable_func' returned True before we ran out of retries. If 'done' is False, you know 'callable_func' kept returning False and we ran out of retries. Raises: Whatever the function raises--an exception will immediately stop retries. """ delay = initial_delay num_tries = 0 while True: done, opaque_value = callable_func() num_tries += 1 if done: return True, opaque_value if num_tries >= max_tries: return False, opaque_value retry_notify_func(opaque_value, delay) time.sleep(delay) delay = min(delay * backoff_factor, max_delay)
89bb339007b3c31e520e665c6390cec609b7ccf5
61,811
def crop_dicom(ct, mask_pos: tuple, dims: tuple): """ Method to crop full ct image :param ct: ct image as numpy array :param mask_pos: position to crop from :param dims: dimensions to crop from mask in format (x,y,z), x gets cropped in both positive and negative direction :return: cropped """ x, y, z = mask_pos x_dim, y_dim, z_dim = dims y_basic_offset = 40 return ct[x - x_dim: x + x_dim, y - y_basic_offset:y - y_basic_offset + y_dim, z:z + z_dim]
2f60a6728e6e18636f1477a88b0f62a0289fb465
61,819
def destroy_pieces(turn): """ Set `turn_destroyed` for every piece which is destroyed """ pieces = [] destroyed_piece_states = turn.piecestates.filter(destroyed=True) for piece_state in destroyed_piece_states: piece = piece_state.piece piece.turn_destroyed = turn piece.save() pieces.append(piece) return pieces
296145e582b1db7a98308b39d7ad2f47a134a288
61,820