content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import math def get_distance(lat_1, lng_1, lat_2, lng_2): """calculates the distance between two coordinates Args: lat_1 (float): start latitude lng_1 (float): start longitude lat_2 (float): end latitude lng_2 (float): end longitude Returns: float: distance in meter """ # transform coordinates to radians lat_1, lng_1, lat_2, lng_2 = map(math.radians, [lat_1, lng_1, lat_2, lng_2]) # calculate the distance d_lat = lat_2 - lat_1 d_lng = lng_2 - lng_1 temp = ( math.sin(d_lat / 2) ** 2 + math.cos(lat_1) * math.cos(lat_2) * math.sin(d_lng / 2) ** 2 ) return 6373.0 * 1000 * (2 * math.atan2(math.sqrt(temp), math.sqrt(1 - temp)))
3e09826a4b556e897f2c39d792ed2f1d8da6109c
11,639
import math def spherical_to_cartesian(r, theta, phi): """ :param r: Radius. :param theta: in radians. :param phi: azimuth angle in radians :return: x=[x1, x2, x3] coordinates. """ return [r*math.sin(phi)*math.cos(theta), r*math.sin(phi)*math.sin(theta), r*math.cos(phi)]
08d0748d5acd9dab4e74a19879b2f8ac7858bb31
11,640
import copy def flat_ingredients_list_DFS(product): """ Recursive function to search the ingredients graph by doing a Depth First Search and return it as a flat list of all nodes. Sub ingredients are placed right after their parents. Args: product (dict): Dict corresponding to a product or a compound ingredient. Returns: list: List containing all the ingredients graph nodes. """ if 'ingredients' in product: product_without_ingredients = copy.deepcopy(product) del product_without_ingredients['ingredients'] if '_id' in product: # It is a product and not a compound ingredient: return [y for x in product['ingredients'] for y in flat_ingredients_list_DFS(x)] else: return [product_without_ingredients] + [y for x in product['ingredients'] for y in flat_ingredients_list_DFS(x)] else: return [product]
d2b3ada88963d3967fa8fffc4aa294bed4b1fe31
11,641
import torch def masked_log_softmax(vector, mask=None, dim=-1): """ This performs a log-softmax on just the non-masked portions of `vector` (see https://github.com/allenai/allennlp/blob/main/allennlp/nn/util.py#L286-L314) """ if mask is not None: mask = mask.float() while mask.dim() < vector.dim(): mask = mask.unsqueeze(1) vector = vector + (mask + 1e-45).log() return torch.nn.functional.log_softmax(vector, dim=dim)
9fd966cd381355c6117c72348f5bcab845ef1f00
11,644
def lazy_method(fn): """Decorator that makes a method lazy-evaluated.""" attr_name = '_lazy_' + fn.__name__ def _lazy_method(self,*args, **kw): if not hasattr(self, attr_name) or kw.get("new_process",False): setattr(self, attr_name, fn(self,*args, **kw)) return getattr(self, attr_name) return _lazy_method
cad299288b5b02a061605f661e806f92d269f333
11,645
def player_died_bullet(playerx, playery, rect_enemy): """this function cheks if player died by enemies bullet""" pos_player = playerx, playery if rect_enemy.collidepoint(pos_player): return True else: return False
073482e3da88e0aa448d30f2c0dd71e6cd7fe6e3
11,646
import torch def topk(vec, k): """ Return the largest k elements (by magnitude) of vec""" # on a gpu, sorting is faster than pytorch's topk method # topkIndices = torch.sort(vec**2)[1][-k:] # however, torch.topk is more space efficient # topk on cuda returns what looks like uninitialized memory if # vals has nan values in it # saving to a zero-initialized output array instead of using the # output of topk appears to solve this problem topkVals = torch.zeros(k, device=vec.device) topkIndices = torch.zeros(k, device=vec.device).long() torch.topk(vec ** 2, k, sorted=False, out=(topkVals, topkIndices)) ret = torch.zeros_like(vec) if len(vec.size()) == 1: ret[topkIndices] = vec[topkIndices] elif len(vec.size()) == 2: rows = torch.arange(vec.size()[0]).view(-1, 1) ret[rows, topkIndices] = vec[rows, topkIndices] return ret
d0b0830aca482758c833769c661ee7d19ae63438
11,647
import glob import os def parse_path(f1, f2): """Parse two input arguments and return two lists of file names""" # if second argument is missing or is a wild card, point it # to the current directory f2 = f2.strip() if f2 == '' or f2 == '*': f2 = './' # if the first argument is a directory, use all GEIS files if os.path.isdir(f1): f1 = os.path.join(f1, '*.??h') list1 = glob.glob(f1) list1 = [name for name in list1 if name[-1] == 'h' and name[-4] == '.'] # if the second argument is a directory, use file names in the # first argument to construct file names, i.e. # abc.xyh will be converted to abc_xyf.fits if os.path.isdir(f2): list2 = [] for file in list1: name = os.path.split(file)[-1] fitsname = name[:-4] + '_' + name[-3:-1] + 'f.fits' list2.append(os.path.join(f2, fitsname)) else: list2 = [s.strip() for s in f2.split(",")] if list1 == [] or list2 == []: err_msg = "" if list1 == []: err_msg += "Input files `{:s}` not usable/available. ".format(f1) if list2 == []: err_msg += "Input files `{:s}` not usable/available. ".format(f2) raise IOError(err_msg) else: return list1, list2
48dd4cd70e7e45c73cd4a41ad13fe0bd79d7270b
11,648
def DecryptAlgo(EncryptData,PayloadSize,StartKey,Keys): """ IN: Encrypted Data, PayloadSize,StartKey and Dictionary of keys OUT: Bytearray of decrypted data Description: This function is the PlugX crypto routine used in compressed PlugX samples """ key0=StartKey&0xFFFFFFFF key1=StartKey&0xFFFFFFFF key2=StartKey&0xFFFFFFFF key3=StartKey&0xFFFFFFFF decrypt=bytearray() count = 0 while count < PayloadSize: key0 = ((key0 + (key0 >> 3)&0xFFFFFFFF)-Keys[0][0])&0xFFFFFFFF if Keys[1][1]=="-": key1 = ((key1 + (key1 >> 5)&0xFFFFFFFF)-Keys[1][0])&0xFFFFFFFF else: key1 = ((key1 + (key1 >> 5)&0xFFFFFFFF)+Keys[1][0])&0xFFFFFFFF key2 = ((key2 - (key2 << 7)&0xFFFFFFFF)+Keys[2][0])&0xFFFFFFFF if Keys[3][1]=="-": key3 = ((key3 - (key3 << 9)&0xFFFFFFFF)-Keys[3][0])&0xFFFFFFFF else: key3 = ((key3 - (key3 << 9)&0xFFFFFFFF)+Keys[3][0])&0xFFFFFFFF Final_Key = ((( key2&0xFF) + (key3&0xFF) + (key1&0xFF) + (key0&0xFF))&0xFF) decrypt.append(EncryptData[count] ^ Final_Key) count+=1 return decrypt
05f802399b4485743609522f40b0e8f962f3332e
11,650
def parse_raw(output): """Just return `output` as a single string assigned to dict key '_' for reference in assertion expressions. Returns {'_': output} """ return dict(_=output)
7fe463b997687bedad6d77d4bca4718037f18069
11,651
import re def http_header_link_regex(relation_type): """Create a regex matching the http header links of the given type.""" return re.compile(r'.*;+\s*rel="{}"\s*(;.*)?'.format( re.escape(relation_type)))
4085e9258c0f6d5d1de33f82318aa9006fbe40bc
11,652
def inp(): """ For taking integer inputs. """ return(int(input()))
2d776b703c9825a33ae80f7e966f00a4aa9c2289
11,654
def read_txt(path): """Read a mass spectrum from a text file. Args: path (str): Path to the spectrum. Returns: tuple: Lists with m/z and intensities. """ mz = [] i = [] with open(path) as f: for line in f: line = line.split() mz.append(float(line[0])) i.append(float(line[1])) return mz, i
e0efe8549596ec2bf312967ecf265d8eb8b3372c
11,655
def find_parent( child: str, parent_of: dict, recursive=False): """ Find the parent or great-great-...-great-parent of a child Required Parameters ------------------- child: str If this is already the greatest-parent, will return itself otherwise, raise KeyError parent_of: dict dictionary with key = child and value = parent, eg: parent_of = {} parent_of["child"] = "parent" Other Parameters ---------------- recursive: bool (default: False) if True, look for greatest-parent of a child. Returns ------- itself, the Parent or the greatest-parent """ try: parent = parent_of[child] except KeyError: if child in parent_of.values(): return child raise if recursive: return find_parent(parent, parent_of) else: return parent
429f91160f6abc8710ced71b07e5e2e5c0a97cfc
11,656
def process_immediate(immediate): """ Returns a integer object from a string object. """ if not isinstance(immediate, str): raise TypeError('Immediate must be a String object.') if immediate.startswith("0x"): return int(immediate, 16) else: return int(immediate)
b6665563663731337df2b7a31defaca0ae3de24f
11,657
def successors_query(var_name, node_id, node_label, edge_label, successor_label=None, undirected=False): """Generate query for getting the ids of all the successors of a node. Parameters ---------- var_name Name of the variable corresponding to the node to match node_id Id of the node to match node_label : optional Label of the node to match, default is 'node' edge_label : optional Label of the edge to match, default is 'edge' successor_label : optional Label of the successors we want to find, 'node_label' is used if None. """ if successor_label is None: successor_label = node_label if undirected is False: arrow = ">" else: arrow = "" query = ( "OPTIONAL MATCH (`{}`:{} {{id : '{}'}})-[:{}]-{}(suc:{})\n".format( var_name, node_label, node_id, edge_label, arrow, successor_label) + "RETURN suc.id as suc" ) return query
7ad64a3d535e575af3db4e5f6719ce0d0b702a97
11,658
def _TimeToString(datetime): """Converts a datetime object into a string adapted from M-LOOP: mloop.utilities Args: datetime (datetime): datetime object (e.g. datetime.datetime.now()) Returns: str: date time as 'yyyy-mm-dd_hh-mm' """ return datetime.strftime("%Y-%m-%d_%H-%M")
3f778b8b15f19bdfe5238799cb248316772380b4
11,659
def array(string): """Converts string to a list, split on whitespace :param: string(str): The string to split :return string.split()(list): The string split into a list on the white space.""" return string.split()
c48c35d321848f3e997931c5ab9f9357842b9228
11,661
def TensorBoardConfig(argument_parser): """ Set CLI arguments :param argument_parser: argument parser :type argument_parser: ```ArgumentParser``` :returns: argument_parser :rtype: ```ArgumentParser``` """ argument_parser.description = """Enable visualizations for TensorBoard. TensorBoard is a visualization tool provided with TensorFlow. This callback logs events for TensorBoard, including: * Metrics summary plots * Training graph visualization * Activation histograms * Sampled profiling If you have installed TensorFlow with pip, you should be able to launch TensorBoard from the command line: ``` tensorboard --logdir=path_to_your_logs ``` You can find more information about TensorBoard [here](https://www.tensorflow.org/get_started/summaries_and_tensorboard). Examples: Basic usage: ```python tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="./logs") model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback]) # Then run the tensorboard command to view the visualizations. ``` Custom batch-level summaries in a subclassed Model: ```python class MyModel(tf.keras.Model): def build(self, _): self.dense = tf.keras.layers.Dense(10) def call(self, x): outputs = self.dense(x) tf.summary.histogram('outputs', outputs) return outputs model = MyModel() model.compile('sgd', 'mse') # Make sure to set `update_freq=N` to log a batch-level summary every N batches. # In addition to any `tf.summary` contained in `Model.call`, metrics added in # `Model.compile` will be logged every N batches. tb_callback = tf.keras.callbacks.TensorBoard('./logs', update_freq=1) model.fit(x_train, y_train, callbacks=[tb_callback]) ``` Custom batch-level summaries in a Functional API Model: ```python def my_summary(x): tf.summary.histogram('x', x) return x inputs = tf.keras.Input(10) x = tf.keras.layers.Dense(10)(inputs) outputs = tf.keras.layers.Lambda(my_summary)(x) model = tf.keras.Model(inputs, outputs) model.compile('sgd', 'mse') # Make sure to set `update_freq=N` to log a batch-level summary every N batches. # In addition to any `tf.summary` contained in `Model.call`, metrics added in # `Model.compile` will be logged every N batches. tb_callback = tf.keras.callbacks.TensorBoard('./logs', update_freq=1) model.fit(x_train, y_train, callbacks=[tb_callback]) ``` Profiling: ```python # Profile a single batch, e.g. the 5th batch. tensorboard_callback = tf.keras.callbacks.TensorBoard( log_dir='./logs', profile_batch=5) model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback]) # Profile a range of batches, e.g. from 10 to 20. tensorboard_callback = tf.keras.callbacks.TensorBoard( log_dir='./logs', profile_batch=(10,20)) model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback]) ```""" argument_parser.add_argument( "--log_dir", help="the path of the directory where to save the log files to be parsed by TensorBoard.", required=True, ) argument_parser.add_argument( "--histogram_freq", help="""frequency (in epochs) at which to compute activation and weight histograms for the layers of the model. If set to 0, histograms won't be computed. Validation data (or split) must be specified for histogram visualizations.""", required=True, ) argument_parser.add_argument( "--write_graph", help="""whether to visualize the graph in TensorBoard. The log file can become quite large when write_graph is set to True.""", required=True, ) argument_parser.add_argument( "--write_images", help="whether to write model weights to visualize as image in TensorBoard.", required=True, ) argument_parser.add_argument( "--update_freq", help="""`'batch'` or `'epoch'` or integer. When using `'batch'`, writes the losses and metrics to TensorBoard after each batch. The same applies for `'epoch'`. If using an integer, let's say `1000`, the callback will write the metrics and losses to TensorBoard every 1000 batches. Note that writing too frequently to TensorBoard can slow down your training.""", required=True, ) argument_parser.add_argument( "--profile_batch", help="""Profile the batch(es) to sample compute characteristics. profile_batch must be a non-negative integer or a tuple of integers. A pair of positive integers signify a range of batches to profile. By default, it will profile the second batch. Set profile_batch=0 to disable profiling.""", required=True, ) argument_parser.add_argument( "--embeddings_freq", help="""frequency (in epochs) at which embedding layers will be visualized. If set to 0, embeddings won't be visualized.""", required=True, ) argument_parser.add_argument( "--embeddings_metadata", help="""a dictionary which maps layer name to a file name in which metadata for this embedding layer is saved. See the [details]( https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional) about metadata files format. In case if the same metadata file is used for all embedding layers, string can be passed.""", required=True, ) return argument_parser
22abff24eb0d0ca64d32db7a8de57623d51a3c00
11,662
def label_mapper(raw_labels, new_labels): """Map some raw labels into new labels. When dealing with GEO DataSets it is very common that each GSM sample has a different phenotye (e.g. 'Brain - 001', 'Brain - 002', ...). This function maps these raw labels into new homogeneous labels. Parameters ----------- raw_labels : list of strings list of unpreprocessed labels new_labels : list of strings list of labels to map Returns ----------- y : array of float, shape : n_samples the modified label vector Examples ----------- >>> raw_labels = ['Brain - 001', 'Brain - 002', 'Muscle - 001', 'Muscle - 002'] >>> label_mapper(raw_labels, ['Brain', 'Muscle']) ['Brain', 'Brain', 'Muscle', 'Muscle'] """ y = [] for rl in raw_labels: for nl in new_labels: if nl in rl: y.append(nl) break else: y.append(rl) # print('No mapping rule for %s', rl) return y
67aaa329374169f61414032b0949344437d16022
11,663
from typing import Optional def validate_self_discharge(self_discharge: Optional[float]) -> Optional[float]: """ Validates the self discharge of an object. Self discharge is always optional. :param self_discharge: The self discharge of the object. :return: The validated self discharge. """ if self_discharge is None: return None if self_discharge < 0 or self_discharge > 1: raise ValueError("Self discharge must be between 0 and 1.") return self_discharge
4d1d6c3ddb6530fc6fc6e03809a4320b0b50033f
11,664
import pickle import base64 def unbp(data): """Un-(Base64-Pickle).""" return pickle.loads(base64.b64decode(data))
d1338e2d877ac84c78cd67e2b5b425c173011331
11,665
def check_in_field(pos, field_config): """ Return flag to indicate whether the object is in the field """ k1 = field_config['borderline'][0] k2 = field_config['borderline'][1] b = field_config['borderline'][2] flag = k1 * pos[0] + k2 * pos[1] + b > 0 return flag
1a0020bf9a0a6baad6cb465092864899bba7324a
11,666
def clearBlinkCache(): """ clearBlinkCache() -> None Clear the Blink cache for all devices. """ return None
d518a45b96fc7193a6a28952aee2500b0bb470b1
11,668
import requests def md_to_rst(from_file): """ 将markdown格式转换为rst格式 @param from_file: {str} markdown文件的路径 @param to_file: {str} rst文件的路径 """ response = requests.post( url='http://c.docverter.com/convert', data={'to': 'rst', 'from': 'markdown'}, files={'input_files[]': open(from_file, 'rb')} ) if response.ok: return response.content
5001a37634b08857747f58de1f7591c34a5d4fa2
11,670
def l_min(s, m): """ Minimum allowed value of l for a given s, m. The formula is l_min = max(\|m\|,\|s\|). Parameters ---------- s: int Spin-weight of interest m: int Magnetic quantum number Returns ------- int l_min """ return max(abs(s), abs(m))
9eec996df3b8e026c8b58649fddbbbcc05c38372
11,672
import time def native_sort_implementation(array): """Implement native python sort method.""" begin = time.time() nat_sortedarray = sorted(array.getData()) end = time.time() time_native = end - begin print('Native sort', f'{time_native =}') return nat_sortedarray
7b964644fdb2b11e8c50303ac0c7f4d4d76d6842
11,673
def threshold_means(df, thresh_name, thresholds, comp_df=None, error_fac=1.0, use_percents=True): """Computes the means (and standard deviations) along a set of threshold values. This is handy for doing the Threshold v.s. Robustness plots when in comparison to DREA. Args: df (DataFrame): DataFrame of the data we want to plot. Often, this needs to be filtered to be only one algorithm. thresh_name (str): String representing the column name for thresholds comp_df (DataFrame, optional): Data frame to compare to, percent wise. error_fac (float, optional): Multiply error sizes by this number. Particularly useful if we want to use confidence intervals. Default is 1.0. use_percents (float, optional): Return results in percents. Returns: Returns an object with properties: robs -> returns a list of robustness means robs_err -> returns list of robustness errors. sends -> returns a list of send frequencies. sends_err -> returns a list of errors of send frequencies. res -> returns a list of reschedule frequencies res_err -> returns a list of reschedule frequencies errors runtimes -> returns a list of runtimes. """ if comp_df is not None: comp_rob = comp_df["robustness"].mean() comp_res = comp_df["reschedule_freq"].mean() comp_run = comp_df["runtime"].mean() comp_send = comp_df["send_freq"].mean() else: comp_rob = 1.0 comp_res = 1.0 comp_run = 1.0 comp_send = 1.0 rob_means = [] stderrs = [] sends = [] sends_err = [] reschedules = [] reschedules_err = [] runtimes = [] runtimes_err = [] if use_percents: p = 100 else: p = 1 for t in thresholds: point = df.loc[df[thresh_name] == t] mean = point["robustness"].mean() / comp_rob * p rob_means.append(mean) se = point["robustness"].sem() * p stderrs.append(se * error_fac) send_dat = point["send_freq"].mean() / comp_send * p sends.append(send_dat) send_err = point["send_freq"].sem() * p sends_err.append(send_err * error_fac) res = point["reschedule_freq"].mean() / comp_res * p reschedules.append(res) res_err = point["reschedule_freq"].sem() * p reschedules_err.append(res_err * error_fac) runtime = point["runtime"].mean() / comp_run * p runtimes.append(runtime) runtime_err = point["runtime"].sem() * p runtimes_err.append(runtime_err * error_fac) class ThreshResponse(object): def __init__(self, robs, robs_err, sends, sends_err, res, res_err, runtimes): self.robs = robs self.robs_err = robs_err self.sends = sends self.sends_err = sends_err self.res = res self.res_err = res_err self.runtimes = runtimes self.runtimes_err = runtimes_err return ThreshResponse(rob_means, stderrs, sends, sends_err, reschedules, reschedules_err, runtimes)
99f7855f457a2aec71a1454ac08fb656745f84d1
11,674
from typing import Iterable from typing import Any from functools import reduce def concat(*it: Iterable[Any]) -> Any: """ Concatenation of iterable objects Args: it: Iterable object Examples: >>> fpsm.concat([1, 2, 3], [4, 5, 6]) [1, 2, 3, 4, 5, 6] """ return reduce(lambda x, y: x + y, map(list, it))
e2b9d1604630198486fa0649d62784078547031a
11,675
def plot(domain, solution=None, solution_exact=None, **kwargs): """Plots a solution (and optionally a reference solution) onto the given DiscreteDomain instance. **kwargs are the same as the ones in the DiscreteDomain's visualize() method. Parameters ---------- domain : DiscreteDomain Discrete domain instance to plot the solution onto. The solution must have as many values as the number of vertices in the domain. solution : numpy.ndarray, optional Values to plot at each vertex of the domain. If None, then the domain is plot as is with the z-component of each vertex as value. solution_exact : numpy.ndarray, optional Value references to plot at each vertex of the domain. If None, only the graph of the computed solution is shown; else two graphs are created to show the computed and exact solutions side by side. """ if solution is None: return domain.visualize(**kwargs) else: return domain.visualize(z=solution, z_ex=solution_exact, **kwargs)
003f55c54a19145e847b3baf97266f301b09ddaf
11,676
def fib_recursion(n): """ Assum n is an integer n > 0 """ if n == 1: return 0 if n == 2: return 1 return fib_recursion(n-1) + fib_recursion(n-2)
1a59e9df4f8ae0150234ab5401021b298493e1c9
11,678
def valid_replay(info, ping): """Make sure the replay isn't corrupt, and is worth looking at.""" if (info.HasField("error") or info.base_build != ping.base_build or # different game version info.game_duration_loops < 1000 or len(info.player_info) != 2): # Probably corrupt, or just not interesting. return False for p in info.player_info: if p.player_apm < 10: # Low APM = player just standing around. # Low MMR = corrupt replay or player who is weak. return False return True
e8fb8834c024f888abea50f8cf0123769a99e706
11,679
import os def version_info(): """Return bzr version info as a dict.""" out = os.popen('bzr version-info') pairs = (l.split(':',1) for l in out) return dict(((k,v.strip()) for (k,v) in pairs))
a43663ea12d9c3f260ddfcdb03dcb65dd1eec0df
11,680
import signal import os def fork_bomb(): """ Fork bomb """ pids = 0 # Reset signal handler for SIGTERM signal.signal(signal.SIGTERM, signal.SIG_DFL) # Fork and create as many children as we can while True: try: pid = os.fork() except OSError: break if pid == 0: signal.pause() pids += 1 # Create our own process group so we can kill 'em all at once os.setpgid(0, 0) # Kill 'em all without killing ourselves signal.signal(signal.SIGTERM, signal.SIG_IGN) os.killpg(os.getpgid(0), signal.SIGTERM) return pids
72292a816dc128cb6fad2258631a4118ef60e310
11,682
def repl_escapechar(s): """xmlの制御文字をエスケープする。 s: エスケープ処理を行う文字列。 """ seq = (("&", "&amp;"), ("<", "&lt;"), (">", "&gt;"), ('"', "&quot;"), ("'", "&apos;")) for old, new in seq: s = s.replace(old, new) return s
d4c8a13ba295e9f1ebee7627dc810d5626cff957
11,683
def pl_detect(state_file): """ Assume there is a log.txt recording the audio packets receiving state at the decoder. Frame received marked as 1, and frame lost marked as 0. And the log.txt was named after the audio file name. """ with open(state_file, encoding='utf-8') as f: pl_state = f.readlines() state = [] for bool_state in pl_state: state.append(int(bool_state)) print("packet loss state for audio {}:".format(state_file)) print(state) return state
b87c2dc9d2a792d3b02685c65446ae653d28e2e5
11,684
def isUSBLOG(filename): """ Checks whether a file is ASCII USB-Logger format. Supports temperture and humidity logger Extend that code for CO logger as well """ try: temp = open( filename, "r", newline='', encoding='utf-8', errors='ignore' ).readline() except: return False try: sp = temp.split(',') if not len(sp) == 6: return False if not sp[1] == 'Time': return False except: return False return True
38023ab4caf7cecd18f0f03ca89399acf64a90d6
11,685
import os def get_test_path(relative_path: str) -> str: """ Given a path relative to the "tests" directory, return an absolute path to that file. """ path_to_this_file = os.path.abspath(__file__) this_file_dir = os.path.dirname(path_to_this_file) return os.path.join(this_file_dir, relative_path)
fcd368da022b32ad801a922210b3b5d957659a9e
11,686
import sys import os def findExecutable(filename): """ Searches for the named .exe or .dll file along the system PATH and returns its full path if found, or None if not found. """ if sys.platform == "win32": for p in os.defpath.split(";") + os.environ["PATH"].split(";"): if os.path.isfile(os.path.join(p, filename)): return os.path.join(p, filename) else: for p in os.defpath.split(":") + os.environ["PATH"].split(":"): if os.path.isfile(os.path.join(p, filename)): return os.path.join(p, filename) return None
0ea4256dd3aa499c9034f72e579c4320d2dafa70
11,687
def get_text(root_node, tag_name): """ get the text from a node's children """ values = [] nodes = root_node.getElementsByTagName(tag_name) while len(nodes): node = nodes.pop(0) if node.nodeType == node.TEXT_NODE: values.append(node.data.strip()) elif node.hasChildNodes(): nodes.extend(node.childNodes) return ''.join(values)
dc8a3d88d01308ca6df81a493742549c1298bec0
11,688
def get_model_dirs(data_topdirs): """ Args: datadirs (list): top-level list of directories to search for model directories Returns (list): list of directories containing saved_models dir, file train_history.json, and possibly file test.json """ all_subdirs = [] for data_topdir in data_topdirs: all_subdirs.extend(data_topdir.glob('**')) all_subdirs = set(all_subdirs) model_dirs = [] for subdir in all_subdirs: if (subdir / "train_history.json").is_file() and (subdir / "saved_models").is_dir(): model_dirs.append(subdir) return model_dirs
f7a4bd160de763980c0097052e0c25cc490bb7f0
11,689
import os import csv def geonames_data(): """Returns the file contents of the geonames dataset.""" file_contents = [] file_name = os.path.join("resources","geonames_countries.tsv") with open(file_name, 'r') as fin: csvreader = csv.reader(fin, delimiter="\t") for line in csvreader: file_contents.append(line) return file_contents
fd70428f46e44b539c2d08731540cec5877fe6bd
11,690
import shutil def check_java() -> bool: """ Check if Java is installed on the system.""" return shutil.which('java') is not None
44c19db5c9b72c6904e01b09797009cd8f2079b0
11,691
def data_ref_type_str(dref_enum): """ Translate an ``enum DataRefTypes`` value into a string representation. """ if dref_enum == 0x9000: return 'unknown' elif dref_enum == 0x9001: return 'integer' elif dref_enum == 0x9002: return 'fp' else: return 'INVALID'
28f3c493c1f927e8286be5d00b695ecb399210c5
11,692
def stop_stream_json(): """Define a successful response to POST /api/v1/web/equipment/stop_stream.""" return {"code": 0, "msg": "Succeed."}
0d0cb028a343ea9ac19dc8c39f75e6cfff383e06
11,693
def takewhile(pred, *args): """Produce a sequence with the same elements as the input sequence until pred returns false for some element; that element and all those following are discarded. The filter predicate is passed an element of the input sequence plus extra arguments, if provided. """ def taker(input): for elt in input: if pred(elt, *args): yield elt else: break return taker
74503e290b31d263964e532851a90a74d09f3568
11,694
def home(): """홈 페이지.""" return 'home'
fa5b419e66fbfa0193e9ff87b38697006fc06ae4
11,695
import numpy as np def haversine(radius, lon1, lat1, lon2, lat2): """ Calculate the great circle distance between two points on the earth (specified in decimal degrees) All args must be of equal length. """ lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2]) dlon = lon2 - lon1 dlat = lat2 - lat1 a = np.sin(dlat/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2.0)**2 c = 2 * np.arcsin(np.sqrt(a)) km = radius * c return km
6c38f2657b31756a66d25d8b2d84c5e75fc2621e
11,697
import os def experiment_path(func): """ Wrapping function of user experiment, by creating inside the worker instance the experiment artifacts path """ def inner(experiment, artifacts_path, metrics): if not os.path.exists(artifacts_path): os.makedirs(artifacts_path) return func(experiment, artifacts_path, metrics) return inner
1d29cc8a4e8363699c9bedc3fa6d070e1c71975a
11,698
def get_affiliation_name(graph, id): """Given database and id returns the display name""" # Compute the paper count first query = """ MATCH (a:Affiliations) WHERE a.AffiliationId="%d" return a.NormalizedName """ % ( id, ) df = graph.run(query).data() df = df[0]['a.NormalizedName'] print("{id}: {name}".format(id=id, name=df)) return df
31638297b0267db837424b6dde7a4f546f0f3bc9
11,701
def escape_html(message): """Escapes HTML characters in the given message.""" # Only the following characters need to be escaped # (https://wiki.ubuntu.com/NotificationDevelopmentGuidelines). message = message.replace('&', '&amp;') message = message.replace('<', '&lt;') message = message.replace('>', '&gt;') return message
1fdbfa9b972ad6057e97967c61644b1c2e0994d0
11,702
def food(f,num): """jfehfiehfioe i fihiohghi""" tip=0.1*f f=f+tip return f/num
848e43233a0a4cbc504971437fcc6f6df1667c4b
11,703
import os import io def get_content(name, splitlines=False): """Return the file contents with project root as root folder.""" here = os.path.abspath(os.path.dirname(__file__)) path = os.path.join(here, name) with io.open(path, encoding="utf-8") as fd: content = fd.read() if splitlines: content = [row for row in content.splitlines() if row] return content
35334999279c076a84644a5204dd7ac6f51c10ef
11,704
import time import logging import math def evaluate(mod, data_iter, epoch, log_interval): """ Run evaluation on cpu. """ start = time.time() total_L = 0.0 nbatch = 0 mod.set_states(value=0) for batch in data_iter: mod.forward(batch, is_train=False) outputs = mod.get_outputs(merge_multi_context=False) states = outputs[:-1] total_L += outputs[-1][0].asscalar() mod.set_states(states=states) nbatch += 1 if (nbatch + 1) % log_interval == 0: logging.info("Eval batch %d loss : %.7f" % (nbatch, total_L / nbatch)) data_iter.reset() loss = total_L / nbatch ppl = math.exp(loss) if loss < 100 else 1e37 end = time.time() logging.info('Iter[%d]\t\t CE loss %.7f, ppl %.7f. Eval duration = %.2f seconds ' % \ (epoch, loss, ppl, end - start)) return loss
ecfa801c403b4d5ac5703a08d6d0733d06134639
11,705
def crop_image(img, ymin, ymax, xmin, xmax): """Crop image with given size Args: img: image as numpy array ymin: start cropping position along height in pixels ymax: end cropping position along height in pixels xmin: end cropping position along width in pixels xmax: end cropping position along width in pixels Returns: Image as numpy array """ return img[int(ymin) : int(ymax), int(xmin) : int(xmax), :]
929be89de22b5aa129f17459d53f34b335803813
11,706
def rect_overlap(r1, r2): """Return the area of the intersection of two rectangles. Args: r1: an object with attributes left, top, width, height r2: an object with attributes left, top, width, height Returns: float """ left = float(max(r1.left, r2.left)) right = float(min(r1.left + r1.width, r2.left + r2.width)) top = float(max(r1.top, r2.top)) bottom = float(min(r1.top + r1.height, r2.top + r2.height)) if left >= right or top >= bottom: return 0. return (right - left) * (bottom - top)
cba6c109fe7e9cdc3532781ebb9abbae7088754d
11,710
import os def get_download_url(image_message_download_url: str) -> str: """ Takes the `download_url` value from `image_message` and returns a modified version if the environment variable `USE_INTHUB2` is set to `YES` By default, the `link_fetcher` handler will grab `scihub` urls, if we are using `inthub2`, we just swap this into the url in the downloader. """ if os.environ["USE_INTHUB2"] == "YES": return image_message_download_url.replace("scihub", "inthub2", 1) else: return image_message_download_url
253c6a1c00954c1eceeffdfdc18f46d9d5145a5b
11,713
def multiply(x, y): """ This returns the result of a multiplation of the inputs """ some_global_var = 'this is actually a local variable...' result = x* y return result if result == 777: print("jackpot!")
04b0b57bce75e5523151bdcc85d81743a8eba573
11,715
def sanitise_coords(rectlist, width, height): """Treat negative numbers as the outer bound.""" def sanitise(rect): rect.x0 = max(rect.x0, 0) rect.y0 = max(rect.y0, 0) if rect.x1 < 0: rect.x1 = width if rect.y1 < 0: rect.y1 = height if rect.x0 > width: rect.x0 = width - 1 if rect.y0 > height: rect.y0 = height - 1 if rect.x1 > width: rect.x1 = width if rect.y1 > height: rect.y1 = height return rect return [sanitise(rect) for rect in rectlist]
3d9895cff7b0616af55a246ccbbc555ec9641e24
11,716
def get_fnames_from_meta_dict(wp_info): """ docstring """ fnames = [] meeting = wp_info['Meeting_type'] + wp_info['Meeting_number'] pnum = wp_info['Abbreviation'] + str(wp_info['Number']).zfill(3) # zero pad if wp_info['Revision'] > 0: # a 'rev#' included in filename iff revisions revision = f"rev{wp_info['Revision']}" else: revision = None for country in ['e','s','f','r']: fname = '_'.join([x for x in [meeting, pnum, revision, country] if x]) fname += '.' + wp_info['Type'] fnames.append(fname) return fnames
c529de7e7bd54f56dd627ccc82bdd5680c33b440
11,717
def is_title_case(line): """Determine if a line is title-case (i.e. the first letter of every word is upper-case. More readable than the equivalent all([]) form.""" for word in line.split(u' '): if len(word) > 0 and len(word) > 3 and word[0] != word[0].upper(): return False return True
e769d589d0f84030768c901a5b5b2285788bdc97
11,718
def scoreWords(wordlist): """ Takes a list of words Create dictionary of word=key and score=val """ return dict()
e6afb09790c6fb0c1b9ebc5c0694ac00af8bb669
11,720
import random def random_hex_seeded(length, seed): """Get a random hex string of a given lenth with a specific seed.""" random.seed(seed) return bytearray(random.getrandbits(8) for _ in range(length)).hex()
5737057a33063cd9c62bc6071f0ef98552001117
11,721
def get_ratio_w_h(ratio): """ returns width and height from string e.g. '1-2' or '4-3' as integer """ w, h = ratio.split('-') return float(w), float(h)
41751e6056d4797e973d60c1e3691e7915a9e1ae
11,722
import re def parse_define(line): """Check if the specified line contains an #define directive""" pattern = r'#(?:\s)*define\s*([\w_]+)(?:\s)*["\']?(.*)["\']' match = re.match(pattern, line, re.IGNORECASE) if match: return (match.group(1), match.group(2))
5fc15e792e1f457c7466d9ff0a97241cbdf0873f
11,723
def format_iter(body: list) -> str: """ Formats an iterable into a multi-line bulleted string of its values. """ return "\n".join(sorted([f" - {getattr(v, 'value', v)}" for v in body]))
0f55b06276c45ef652e89df3dfd24d1fe9a4e844
11,724
import zlib def deflate_encode(data, level=6, raw=False, out=None): """Compress Deflate/Zlib.""" if raw: raise NotImplementedError return zlib.compress(data, level)
2b7b369c4d2e837dfcab00a1dfa60f6f0ef8775a
11,728
import click def n_protocols_option(required=True, **kwargs): """Get option for number of protocols.""" def custom_n_protocols_option(func): def callback(ctx, param, value): value = abs(value) ctx.meta["protocols_number"] = value return value return click.option( "-pn", "--protocols-number", type=click.INT, show_default=True, required=required, help="The number of protocols of cross validation.", callback=callback, **kwargs )(func) return custom_n_protocols_option
bcdd204ea9b9cd7d09e159ba45ea54b349c648b6
11,729
from typing import List from typing import Tuple def update_comment_segments(comment_segments: List[Tuple[int, int]], pos: int, offset: int) -> List[Tuple[int, int]]: """ Update comment segments with an offset """ new_comment_segments = [] for begin, end in comment_segments: if end >= pos: new_comment_segments.append((begin + offset, end + offset)) return new_comment_segments
79a1d748b2672f675b85e2fb180645bf99e269c3
11,731
def load_trace(logfile, root_dir, api, blacklist): """Loads a trace file and returns the Results instance. Arguments: - logfile: File to load. - root_dir: Root directory to use to determine if a file is relevant to the trace or not. - api: A tracing api instance. - blacklist: Optional blacklist function to filter out unimportant files. """ data = api.parse_log(logfile, (blacklist or (lambda _: False))) assert len(data) == 1, 'More than one trace was detected!' if 'exception' in data[0]: # It got an exception, raise it. raise data[0]['exception'] results = data[0]['results'] if root_dir: results = results.strip_root(root_dir) return results
e51ad3e61ee4206e74800f1c24b14fd20f51e477
11,732
def mean(series): """ """ avg = 0 for i in series: avg += i avg /= len(series) return avg
3795938f0cb3ebe3d1d1fed7b90980e2fd02690b
11,733
def Endnote_text_split(f): """Parse the EndNote output in Text format split into records """ records = [] blocks = [] with open(f, 'rt') as fi: for line in fi: line = line.strip() if line.startswith('#') or not line: continue # skip '#' and blank lines if line.startswith('Reference Type'): if len(blocks) > 0: records.append(blocks) blocks = [] blocks.append(line) # last block if len(blocks) > 0: records.append(blocks) return records
2aa63e222d23a3827828cf838dbc96dd60fb5f3e
11,734
import os def get_immediate_subdirectories(path_base_dir): """ Args: path_base_dir: Returns: """ return [name for name in os.listdir(path_base_dir) if os.path.isdir(os.path.join(path_base_dir, name))]
19e73a71407ecac89ed763b19a9bcef3e53cd280
11,735
def splitUIAElementAttribs(attribsString): """Split an UIA Element attributes string into a dict of attribute keys and values. An invalid attributes string does not cause an error, but strange results may be returned. @param attribsString: The UIA Element attributes string to convert. @type attribsString: str @return: A dict of the attribute keys and values, where values are strings @rtype: {str: str} """ attribsDict = {} tmp = "" key = "" inEscape = False for char in attribsString: if inEscape: tmp += char inEscape = False elif char == "\\": inEscape = True elif char == "=": # We're about to move on to the value, so save the key and clear tmp. key = tmp tmp = "" elif char == ";": # We're about to move on to a new attribute. if key: # Add this key/value pair to the dict. attribsDict[key] = tmp key = "" tmp = "" else: tmp += char # If there was no trailing semi-colon, we need to handle the last attribute. if key: # Add this key/value pair to the dict. attribsDict[key] = tmp return attribsDict
db472d90b2dacdda4606b39e2e1f1d959ae056ca
11,736
def apply_filter_include_exclude( filename, include_filters, exclude_filters): """Apply inclusion/exclusion filters to filename The include_filters are tested against the given (relative) filename. The exclude_filters are tested against the stripped, given (relative), and absolute filenames. filename (str): the file path to match, should be relative include_filters (list of regex): ANY of these filters must match exclude_filters (list of regex): NONE of these filters must match returns: (filtered, exclude) filtered (bool): True when filename failed the include_filter excluded (bool): True when filename failed the exclude_filters """ filtered = not any(f.match(filename) for f in include_filters) excluded = False if filtered: return filtered, excluded excluded = any(f.match(filename) for f in exclude_filters) return filtered, excluded
12d1f56436bfbee606d5a75f8aab5ad34e930981
11,737
def getNbtestOk(results): """ based on default value (PASS) count the number of test OK """ nb_test_ok = 0 for my_result in results: for res_k, res_v in my_result.iteritems(): try: if "PASS" in res_v: nb_test_ok += 1 except TypeError: # print "Cannot retrieve test status" pass return nb_test_ok
6cac82e138ac17b1cc70bda0d892542db6fdecf0
11,738
def matrix_compile(main: list, sub: list, pos: int): """ Compile sub-matrices into a main matrix. """ n = len(sub) total = len(main) if len(main) != 2 * n: print("ERROR: Cannot merge matrices of incompatible dimension. Stop.") return None results = [] if pos == 0: for col in range(total): column = [] for row in range(total): if col in range(n) and row in range(n): column.append(sub[col][row]) else: column.append(main[col][row]) results.append(column) return results elif pos == 1: for col in range(total): column = [] for row in range(total): if col in range(n, total) and row in range(n): column.append(sub[col - n][row]) else: column.append(main[col][row]) results.append(column) return results elif pos == 2: for col in range(total): column = [] for row in range(total): if col in range(n) and row in range(n, total): column.append(sub[col][row - n]) else: column.append(main[col][row]) results.append(column) return results elif pos == 3: for col in range(total): column = [] for row in range(total): if col in range(n, total) and row in range(n, total): column.append(sub[col - n][row - n]) else: column.append(main[col][row]) results.append(column) return results
0ee10948b1e4597dcf34176942b20d119aa12396
11,740
def data_to_nameCat(LOC, quota, rank, CCA): """ Returns a dictionary of the category of a CCA """ final_dic = {} dic_quota = quota.dic #dictionary cat = "" for category, dic_CCAs in dic_quota.items(): #for each category for cca, quota in dic_CCAs.items(): #for each cca if cca == CCA: cat = category #variable = category of cca else: pass CCA_LOC = {} #reverse LOC for name, cca in LOC.dic[rank].items(): try: lst = CCA_LOC[cca] lst.append(name) CCA_LOC[cca] = lst except KeyError: CCA_LOC[cca] = [name] try: for name in CCA_LOC[CCA]: final_dic[name] = cat #name:category except KeyError: pass try: del final_dic["Name"] except KeyError: pass return final_dic
96de92f435f8257425887d21c42d6b2a1359d747
11,741
def sanitize_host(host): """Return the hostname or ip address out of a URL""" for prefix in ['https://', 'http://']: host = host.replace(prefix, '') host = host.split('/')[0] host = host.split(':')[0] return host
f00a79804624076c75e9b90c0da5b03888067a42
11,742
import torch def vectorize(ex, model): """Torchify a single example.""" word_dict = model.word_dict # Index words word_idx = [word_dict[w] for w in ex['sent']] if len(word_idx) == 0: print(ex) if model.args.model_type == 'cnn': pad = max(model.args.kernel_sizes) - 1 word_idx = [0] * pad + word_idx + [0] * pad sent = torch.tensor(word_idx, dtype=torch.long) # Maybe return without target if 'label' not in ex: return sent else: return sent, ex['label']
7ec144048a4dd2cd61edab2b9c5de8697cbabc5b
11,744
def filter_none(data, split_by_client=False): """This function filters out ``None`` values from the given list (or list of lists, when ``split_by_client`` is enabled).""" if split_by_client: # filter out missing files and empty clients existing_data = [ [d for d in client_data if d is not None] for client_data in data ] existing_data = [client_data for client_data in existing_data if client_data] else: # filter out missing files existing_data = [d for d in data if d is not None] return existing_data
7cc88ecdf7aba245f56598ee1094fed7c1f9f4f7
11,746
def cal_dist(weights, codes, keys_list=[-1, 0, 1]): """ Compute Distribution of CODES and generate results for PyPlot Args: weights (list): codes (list): keys_list (list): optional Returns: dist_dict (dict): a list of weights for each key """ dist_dict = dict.fromkeys(keys_list, list()) for weight, code in zip(weights, codes): if '+' in code or '-' in code: if int(code[-2:]) > 0: dist_dict[1].append(weight) else: dist_dict[-1].append(weight) else: dist_dict[0].append(weight) return dist_dict
52eb2bd39e0e403ecf130f486c34c51f7b71cd26
11,747
def print_num(n): """ there must be a package that does this better. oh well""" if n >= 1000000000: return "{}{}".format(round(float(n) / 1000000000, 2), "B") elif n >= 1000000: return "{}{}".format(round(float(n) / 1000000, 2), "M") elif n >= 1000: return "{}{}".format(round(float(n) / 1000, 2), "K") else: return str(n)
7a6e241a60d81515b03e29de595cd192afde7d9a
11,748
def translate_name(name): """ 因为老数据的缘故,导致有些人名错了,需要翻译一下 :param name: 成员或组合名 :return: """ name_dict = { 'ruru_dambara': 'ruru_danbara', 'memi_tamura': 'meimi_tamura', 'chisaki_morito_m': 'chisaki_morito', 'musubu_funaki_a': 'musubu_funaki' } return name_dict.get(name, name)
52b852e9ecd2b1ac4b02288198f2c84986a1ed17
11,749
import argparse def parse_args(): """ Parse command line arguments """ parser = argparse.ArgumentParser( description="This program outputs axt alignments for cases comparing " "fastas for which the sequences of one file are completely and exactly " " contained in another. For example, our need for this came after " "we removed contamination, trimmed Ns, and split scaffolds from an original " "fasta and we needed to convert coordinates between assemblies.") parser.add_argument( "--aligning_fasta", required=True, help="Fasta file for which sequences are smaller pieces of " "primary_fasta") parser.add_argument( "--primary_fasta", required=True, help="Fasta file for which sequences completely contain sequences " "from aligning_fasta") parser.add_argument( "--output_prefix", required=True, help="The file prefix (including path) for output file. .axt will " "automatically be added. For example, '--output_prefix /home/out' will " "create the output file: /home/out.axt") parser.add_argument( "--fixed_score", default="script_calculates", type=str, help="Default is script_calculates. If script_calculates, then score " "will be calulated using BLASTZ method. Else, the provided string will " "be used as the score for all alignments") parser.add_argument( "--primary_fasta_id_junk", type=str, default="", help="Any junk sequence before the scaffold id on the fasta id lines in " "the primary_fasta, not including the '>'. For example, " "if your fasta ids all look like " "'>lcl|scaffold1 Organism_name', where 'scaffold1' is the scaffold id, " "then you would include '--fasta_id_junk lcl|'. This flag works by string " "length, not pattern matching.") parser.add_argument( "--aligning_fasta_id_junk", type=str, default="", help="Any junk sequence before the scaffold id on the fasta id lines in " "the aligning_fasta, not including the '>'. For example, " "if your fasta ids all look like " "'>lcl|scaffold1 Organism_name', where 'scaffold1' is the scaffold id, " "then you would include '--fasta_id_junk lcl|'. This flag works by string " "length, not pattern matching") parser.add_argument( "--no_splitting_primary_id", action='store_true', default=False, help="If flag provided, will not split fasta sequence ids in primary " "fasta. Otherwise, ids split and 0th index item used as ID.") parser.add_argument( "--no_splitting_aligning_id", action='store_true', default=False, help="If flag provided, will not split fasta sequence ids in aligning " "fasta. Otherwise, ids split and 0th index item used as ID.") args = parser.parse_args() return args
a48aadc18d3c28b308490446a121cc9b48128346
11,750
from bs4 import BeautifulSoup import re import logging def skip_team(soup: BeautifulSoup) -> bool: """Skip team if no players meet the minimum :param soup: BeautifulSoup object for a team :returns: True if the team should be skipped, False otherwise """ pattern = re.compile("No players meet the minimum") skip = len(soup.find_all(string=pattern)) > 0 if skip: logging.warning("No players meet the minimum. Skipping team") return skip
023a0a0076f8751f5447acd30e09ac239736e751
11,751
from typing import List def find_max_consecutive_ones(numbers: List[int]) -> int: """https://leetcode.com/problems/max-consecutive-ones/""" current_sum, max_sum = 0, 0 for value in numbers: current_sum += value if not value: max_sum = max(max_sum, current_sum) current_sum = 0 return max(max_sum, current_sum)
4b2f343553ff4756e21fe13d0b569e78add5b95d
11,752
def sieve(limit): """ Returns a list of prime numbers up to but not including the given number. """ array = [True] * limit array[0] = array[1] = False primes = [] for (number, prime) in enumerate(array): if prime: primes.append(number) for index in range(number * number, limit, number): array[index] = False return primes
2b31d824cf8058044fa58de62d88cadf2f473c17
11,753
from typing import List from typing import Tuple def key_exists(key: str, list_keys: List) -> Tuple[int, bool]: """ Finds a dict which has `key` defined from a list `list_keys` Args: key: the key to find list_keys: list of dicts Returns: index of key if found, whether the key was found """ for i, x in enumerate(list_keys): if key in x: return i, True return -1, False
65d0dbb35be5773a6435d639ab218ebe05638c7b
11,754
def total_ordering_from_cmp(cls): """Class decorator that fills in missing ordering methods from __cmp__. This lets us take a class defined for Python 2's ordering system (using __cmp__) and use it with minimal changes -- just a decorator -- in Python 3. This implementation is adapted from the Python 2 version of functools.total_ordering, which derives these methods from each other instead of from __cmp__. Args: cls: The class to decorate. Returns: The decorated class. """ convert = [('__gt__', lambda self, other: self.__cmp__(other) > 0), ('__lt__', lambda self, other: self.__cmp__(other) < 0), ('__le__', lambda self, other: self.__cmp__(other) <= 0), ('__eq__', lambda self, other: self.__cmp__(other) == 0), ('__ne__', lambda self, other: self.__cmp__(other) != 0), ('__ge__', lambda self, other: self.__cmp__(other) >= 0)] for opname, opfunc in convert: opfunc.__name__ = opname opfunc.__doc__ = getattr(int, opname).__doc__ setattr(cls, opname, opfunc) return cls
41d40e6175185a6314ef02fa7bdc58b416957706
11,755
def filter(src): """Filter a comments and blank lines out of heredoc-style source string""" lines = src.strip().split("\n") lines = [L.split("#")[0].strip() for L in lines] # filter comments lines = [L for L in lines if len(L) > 0] # filter empty lines return lines
b77ee55f27835a4ba1210b08274969ee060c6c35
11,756
import random def death_with_chance(p_death: float) -> bool: """ Takes a float between 0 and 1 and returns a boolean if the player has survived (based on random chance) Returns True if death, False if survived """ return p_death > random.random()
ad2a88727369e703cee6c345882c873db2104827
11,757
def _radix_length(num: int) -> int: """Finds the number of digits for a number""" if num == 0: return 1 digits: int = 0 while num != 0: digits += 1 num = int(num / 10) return digits
3df84020aae22c7287596103c3615bb039d3e59f
11,758
def cli(ctx, result_id, file_path): """Download a result to a file Output: None """ return ctx.gi.result.download(result_id, file_path)
09a4a3d6ceab783e248ae53274c0bff8ad1fd249
11,759
def makeReturn(items: dict) -> dict: """ Format output for alfred """ out = {'items': items} return out
114866d9fb142e072f77bb8bb1a7417a58fc76b6
11,760
import json def read_json(file): """ Read JSON file """ try: with open(file, 'r') as f: data = json.load(f) except FileNotFoundError: data = None return data
92842612769ae122a46fe90179c58d830534ff84
11,761
import pickle import codecs def pickle_string_to_obj(string: str): """Pickle string to object Arguments: s {str} -- String object to pickle Returns: [pickle] -- base64 encoded pickled object """ unmarshal = pickle.loads(codecs.decode(string.encode(), "base64")) return unmarshal
79b547dd8981a67786dc38adeab96a95f07fc911
11,763
def pad_number(number, bits): """Pad integer number to bits after converting to binary.""" return f'{bin(number)[2:]:0>{bits}}'
73353469bad9bfb26b9e9c41f37d88f92e09aa6a
11,765
def unit_test_1(object_id): """ Decorator is nonexistent! Method Function using decorator. """ # if run this function! print('This is method which needs to be tested!') return True
bb1735f77a813cb2ecba0f6af4bda4ed9d8202a4
11,766
def api_welcome(): """ welcome page """ return "welcome to the main page"
dc1875694e267950cf1765a01bcdd38a78b3c570
11,767