content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def update_max_speed(driver, speed): """ Updates the max speed of the driver :param driver: driver :param speed: new max speed :type driver: DriverProfile :return: updated driver profile """ return driver.update_max_speed(speed)
73987703b584099538d865dde9d7d25f48080283
21,817
def asp_convert(string): """ Convert the given string to be suitable for ASP. Currently this means replacing - with _. It is a bit unsafe in that if name exist that are identical except one has a - and one has a _ in the same place, then this wont catch it, but if you do that stuff you. (str) -> str """ return string.replace("-", "__").lower()
5d75f10f8cfd24e27f01801ceac9e2ae0cd31a0e
21,820
def str2bytes(origin_str, charset='utf-8'): """ :param origin_str: :param charset: :return:immutable struct """ return bytearray(origin_str, encoding=charset)
e1689353c23ac385b457f5b9218ba0cc82b553d4
21,821
def get_unique(db): """ Get unique article id """ ids = "select id from articles order by id DESC" return [str(row[0]) for row in db.execute(ids)]
8c82a2b548016978c6f73f0cdf98cbc69555dd82
21,823
def warmup(): """Handle App Engine warmup requests. See https://cloud.google.com/appengine/docs/standard/python3/configuring-warmup-requests. """ return "", 200, {}
961bd39d80e3091716ff59c2f4eafbc76053be69
21,824
def two_forecast_times(all_obs, fs_from): """Select only two forecasting times, to reduce computation time.""" first_obs = min(obs['date'] for obs in all_obs if obs['date'] >= fs_from) twelve_weeks_later = first_obs + 12 * 7 return [first_obs, twelve_weeks_later]
3a2c695f6f5a23f128940e99a5b04f691d721cc8
21,830
import torch def evaluate_nll(confidences: torch.Tensor, true_labels: torch.Tensor) -> float: """ Args: confidences (Tensor): a tensor of shape [N, K] of predicted confidences. true_labels (Tensor): a tensor of shape [N,] of ground truth labels. Returns: nll (float): average negative-log-likelihood of predictions. """ nll = torch.nn.functional.nll_loss( torch.log(1e-12 + confidences), true_labels ).item() return nll
d1c5aa5d69e788ee835d8b94b6dd9d6895656e53
21,831
def detect_format(string): """ Detect CIF or POSCAR checking the most common features """ if '_cell_angle_gamma' in string \ and 'loop_' in string: return 'cif' lines = string.splitlines() for nline in [6, 7, 8]: if len(lines) <= nline: break if lines[nline].strip().lower().startswith('direct') \ or lines[nline].strip().lower().startswith('cart'): return 'poscar' if '"immutable_id"' in string and '"cartesian_site_positions"' in string and '"lattice_vectors"' in string: return 'optimade' return None
f43bfa49583cb9c633887807d47646e6c0b2d8e9
21,832
def train_test_column_split(x, y, df_column): """Function for splitting dataset into train/test partitions w.r.t. a column (pd.Series). Args: x (pd.DataFrame): DataFrame containing predictors. y (pd.DataFrame): DataFrame containing target variable. df_column (pd.Series): Series for train/test split, assuming it is contained in x. Returns: tuple: (x_train, x_test, y_train, y_test). A tuple of partitions of the initial dataset. """ x1, y1, col_name = x.copy(), y.copy(), df_column.name y1[col_name] = df_column return (x1[x1[col_name] == 'train'].drop(col_name, axis=1), x1[x1[col_name] == 'test'].drop(col_name, axis=1), y1[y1[col_name] == 'train'].drop(col_name, axis=1), y1[y1[col_name] == 'test'].drop(col_name, axis=1))
a28b536be57e04870ae9e6f1e1abc854838e24ae
21,833
def switchUser(username, password, event, hideError=False): """Attempts to switch the current user on the fly. If the given username and password fail, this function will return False. If it succeeds, then all currently opened windows are closed, the user is switched, and windows are then re-opened in the states that they were in. If an event object is passed to this function, the parent window of the event object will not be re-opened after a successful user switch. This is to support the common case of having a switch-user screen that you want to disappear after the switch takes place. Args: username (str): The username to try and switch to. password (str): The password to authenticate with. event (object): If specified, the enclosing window for this event's component will be closed in the switch user process. hideError (bool): If True (1), no error will be shown if the switch user function fails. (default: 0) Returns: bool: False(0) if the switch user operation failed, True (1) otherwise. """ print(username, password, event, hideError) return True
0ad6974238d8112b1697cfb49f9688e546547cdd
21,834
def triplo(n=0): """ -> Calcula o triplo de um número :param n: número :return: triplo do número """ return n * 3
e612051944215b2491958005cadaac9eff5ba3b6
21,835
def sendTweet(api, file): """Post an image to twitter. Args: image_path: String, path to image on disk to be posted to twitter Returns: tweepy.status object, contains response from twitter request """ return api.update_with_media(filename=file.name, file=file)
43bf571e03f57811dd8d001cbc40cf9ab66d4e6e
21,836
from functools import reduce def execute_filters(diff_data: str, filter_list: list) -> str: """ Applies filter functions to the difference output """ filters = filter_list return reduce(lambda m, f: f(m), filters, diff_data)
3ad12da2cf62f57301635e128a394ed1672be5be
21,837
def calculateXVertice(termA, termB): """ Calculates the value of the vertice X. """ vertice = ((-termB)/(2*termA)) return vertice
aafeb34baad2c5361ad6f1c49088984708cdffee
21,838
def keyequals(key, search): """ key is equals """ ret = False if isinstance(key, bytes): if str(key, 'utf8') == search: ret = True else: if key == search: ret = True return ret
bb76a94e34dc9231995bd68c7299e515d303eff0
21,840
def _search(left, right, predicate): """Simple binary search that uses the ``predicate`` function to determine direction of search""" if right >= left: mid = left + (right - left) // 2 res = predicate(mid) if res == 0: return mid elif res > 1: return _search(left, mid - 1, predicate) else: return _search(mid + 1, right, predicate) else: return -1
80a8ae96468fa4a13e42dc5b45920123c1eb8833
21,841
def get_aclinks(soup, categories): """ builds aircraft dictionary with href links as identifier keys and aircraft categories as values returns a dictionary """ # grab ordered lists to extract li list items that contain href links # each ordered list index aligns with categories index, i.e. each ol corresponds to a category olists = soup.find_all(name = 'ol') ac_dict = {} for ind, ol in enumerate(olists): for li in ol.find_all('li'): try: # list of links per category ac_link = li.find('a').get('href') except: ac_link = 'error at index: {}'.format(ind) # add the aircraft link if not in dictionary -- prevents dupes if ac_link not in ac_dict: # assign aircraft link as key, and aircraft category as value ac_dict[ac_link] = categories[ind] return ac_dict
bf446f4e344dec6b5801a254c04e83abaa8775ef
21,842
def getPadding(sField, cchWidth): """ Returns space padding for the given field string. """ if len(sField) < cchWidth: return ' ' * (cchWidth - len(sField)); return '';
895d3cde1daf0045cb434cc2fcdef6c9ca8808ff
21,843
import shutil def task_sync_readme(): """Make the README for veax-meta up to date""" def action(targets): shutil.copy('README.md', targets[0]) return { 'actions': [action], 'targets': ["packages/vaex-meta/README.md"], 'file_dep': ['README.md'] }
c526fdb521f209bd51cd4400df8302df8c05f723
21,844
import numpy as np def advance ( t, box, t_now, coltime, r, v ): """Advances positions and reduces collision times.""" # Guard against going back in time (should never happen) assert t>0.0, 'Negative time step' t_now = t_now + t # Advance current time by t coltime = coltime - t # Reduce times to next collision by t r = r + t * v / box # Advance all positions by t (box=1 units) r = r - np.rint ( r ) # Apply periodic boundaries return t_now, coltime, r
02a6a3b8ced20b547a85f3172d14e384e70b302f
21,845
import sys import os def _to_local_path(path): """Convert local path to SFTP path""" if sys.platform == 'win32': # pragma: no cover path = os.fsdecode(path) if path[:1] == '/' and path[2:3] == ':': path = path[1:] path = path.replace('/', '\\') return path
821cfbc97e7c405c17830842f7eedb12dcf01fa9
21,846
def dna_complement(sequence): """reads in a string of the top strand DNA sequence and returns the string of the complement (bottom strand) also in the 5’->3’ direction. """ conversion = {'a': 't', 't': 'a', 'c': 'g', 'g': 'c'} compsequence = "".join([conversion[c] for c in sequence][::-1]) return compsequence
5289c00f8b278164bb73228cdfd23ddefaa9c34b
21,847
def get_lstm_state(cell): """Centralize definition of 'state', to swap .c and .h if desired""" return cell.c
aacac31198e00a2592c0c612ad34e0c708d36113
21,849
def filter_drugs(mols): """ Return only compounds which are approved drugs """ return mols.filter(max_phase=4)
10b47fac2b8d102580bf9c24bab308019e88378a
21,850
def get_data_file_path_list(data_file_list_path): """Get mappting of video id to sensor data files. video id to (original video id, [file 1, ..., file n]) where file 1, ..., file n are one series data. Note. - Original video id in input files sometimes mixed lower and upper case. - To treat several case, lower case of them are used as key of mapping. """ mapping = {} with open(data_file_list_path) as f_in: for line in f_in.readlines(): if line.strip() == '': continue video_name_prefix, files_txt = line.strip().split('\t') mapping[video_name_prefix.lower()] = ( video_name_prefix, files_txt.split(',')) return mapping
7655c27105b1ce051bf1942655daabc2dfce9bd0
21,851
import hashlib def check_hashes(leftfile, rightfile, hasher=hashlib.md5()): """ check a hash of leftfile and rightfile to ensure they are the same - pass in the hashtype using the constructor - then use update to add the data to the selected type? Parameters ---------- leftfile : string for full file path rightfile : string for full file path Returns - boolean maybe? """ lefthash = hasher.copy() #use the same hasthype for both righthash = hasher #expects string in binary so use encode if that isn't the case lefthash.update(leftfile)#open(leftfile, 'rb').read()) righthash.update(rightfile)#open(rightfile, 'rb').read()) return lefthash.hexdigest() == righthash.hexdigest()
640b0e9a31c5e3ac8984e605b9d644cc373a03ce
21,852
def str_for_containers(self): """ Nice printing for types and method containers. Containers must have _container attribute containing all elements to be printed. """ cont = getattr(self, '_container', None) if cont is None: return '' res = '' for child in cont: descr = str(getattr(getattr(self, child, None), '__doc__', None)) if len(descr) > 100: descr = descr[:100] + '...' descr = descr.replace('\n', '\n\t') res = res + '\n%s\n\t%s' % (child, descr) res = res[1:] return res
53472d28c6484a61adc210dac3749a5683f9a42a
21,853
def snake_case_to_pascal_case(input_string): """ Converts the input string from snake_case to PascalCase :param input_string: (str) a snake_case string :return: (str) a PascalCase string """ input_list = input_string.split('_') input_list = [i.capitalize() for i in input_list] output = ''.join(input_list) return output
6c6344fb052dc6c1a712b838d58266ce8ea9b5c0
21,854
def content_escape(content): """ Escapes the content Parameters ---------- - content: string to escape """ return content.replace('\\', '\\\\').replace('\n', '\\n').replace('"', '\\"')
4f5a9ec7d7647b9737190087041b2cd036f2e1eb
21,856
import os def file_path(file_name, path): """ get the entire file path for file_name :param file_name: The base file name, ie. my_file.ext| myfile :param path: some default path :return: full path with file name, path/file_name or working/directory/file_name """ return path.rstrip('\/') + "/{0}".format(file_name) if path else os.getcwd() + "/{0}".format(file_name)
4a31a0802db702cb58f12135a13787b3b5c16b5e
21,857
def segregated_city(): """ perfect segregation """ city = {"A":{1:7, 2:0, 3:0}, "B":{1:0, 2:0, 3:14}, "C":{1:0, 2:42, 3:0}} return city
c23a306ae58eeca01e9a20c8f933caa427f996a5
21,858
import socket def check_port(ip, port, timeout=None): """ Checks if the port is open on a specific IP @param ip: IP of the remote host @param port: The port to check @param timeout: Timeout, in seconds @return bool: True if the port is open, False if closed """ socket_port = socket.socket() if timeout is not None: socket_port.settimeout(timeout) try: socket_port.connect((ip, int(port))) except socket.error: return False else: socket_port.close() return True
43a4696ca002f96e9b6c28d67326dd4c0c285e5e
21,860
def unpack_worlds(items): """Handle all the ways we can pass multiple samples for back-compatibility. """ # Unpack nested lists of samples grouped together (old IPython style) if isinstance(items[0], (list, tuple)) and len(items[0]) == 1: out = [] for d in items: assert len(d) == 1 and isinstance(d[0], dict), len(d) out.append(d[0]) # Unpack a single argument with multiple samples (CWL style) elif isinstance(items, (list, tuple)) and len(items) == 1 and isinstance(items[0], (list, tuple)): out = items[0] else: out = items return out
96c650dfd3523bf790c90937a7b8ed2a8144f776
21,861
def make_collision_handler(collider, maplayer): """Returns ``f = collider.collide_map(maplayer, ...)`` Returns: f : ``(last, new, vx, vy)`` -> ``(vx, vy)`` Utility function to create a collision handler by combining Arguments: maplayer : tells the objects to collide with. collider : tells how velocity changes on collision and resolves actual collisions. """ def collision_handler(last, new, vx, vy): return collider.collide_map(maplayer, last, new, vx, vy) return collision_handler
df59b7eb4b74fe803f1b13fcac7dabc29e06a62b
21,862
import os def read_file_lines(filepath): """Return a list of lines read from file.""" with open(filepath, 'r', encoding='utf-8', newline=os.linesep) as f: return f.readlines()
2c51bdc552af83c13c0ef8cec92edc3fc6d7aeee
21,863
def any(): """Return True if any data is waiting, else False.""" return True
c30fcd34c65fb30e19c834edcddf7654d4785a55
21,865
def successor(self, root): """ One step right and then always left """ root = root.right while root.left: root = root.left return root.val
78d871831ce22a99ad534e3f4c2f2fd5d3b2a400
21,866
def get_value_from_dict(dict, key, defaut_v): """ 从字典获得查询某个键值,如果不存在,返回默认值 :param dict: :param key: :param defaut_v: :return: """ if key in dict: return dict[key] else: return defaut_v
980e169e8b31891d2e4539b62763806f4d6412c7
21,868
def deploy_dashboard(db_json_url, wf_url, api_token): """Deploy a dashboard in wavefront.""" print("Deploying Dashboard with %s, %s, %s" % (db_json_url, wf_url, api_token)) return True
8ba623104f77d74f3837874e015e3ee8879ad77b
21,869
def conv_out_shape(in_shape, layers): """ Calculates output shape of input_shape going through a list of pytorch convolutional layers in_shape: (H, W) layers: list of convolution layers """ shape = in_shape for layer in layers: h_out = ((shape[0] + 2*layer.padding[0] - layer.dilation[0] * (layer.kernel_size[0] - 1)-1) / layer.stride[0])+1 w_out = ((shape[1] + 2*layer.padding[1] - layer.dilation[1] * (layer.kernel_size[1] - 1)-1) / layer.stride[1])+1 shape = (int(h_out), int(w_out)) return shape
b75fb479f47304be03aef20a36583ad8a2edc0de
21,870
import collections import json def load_json(files): """Load all json files as a list of dictionaries""" config = [] for file in files: with open(file, 'r') as data_file: config.append(collections.OrderedDict(json.load(data_file))) return config
63174b3fd1ce208a347c6cf7b4904873a97e7136
21,871
from datetime import datetime def print_start_time(message: str) -> datetime: """Print start time. Args: message (str): Message to print. Returns: start (datetime): Start time. """ start = datetime.now() print(f'{message} {start}') return start
a8f7f07da5c72bea88cf8e48a8d3f651a7662e0e
21,875
def cmap_lifeaquatic(N=None): """ Returns colormap inspired by Wes Andersen's The Life Aquatic Available from https://jiffyclub.github.io/palettable/wesanderson/ """ colors = [ (27, 52, 108), (244, 75, 26), (67, 48, 34), (35, 81, 53), (123, 109, 168), (139, 156, 184), (214, 161, 66), (1, 170, 233), (195, 206, 208), (229, 195, 158), (56, 2, 130), (0, 0, 0) ] colors = [tuple([v / 256 for v in c]) for c in colors] if colors is not None: return colors[0:N] else: return colors
1e5fe550f672da04bf5b135ecbf4396b6a4634a0
21,876
import json def format_json(value): """Return json as string. If we have a JSON value (like a filing) we can't save it as a JSON string because flask jsonify will escape everything. """ return_value = None # for some reason sql_alchemy returns this as a list of strings? # --> mystery solved: the app was doing loads before saving, so it didn't need to be loaded after # if value and len(value) > 0: # logging.warning(type(value)) # return_value = json.loads(value[0]) if value: return_value = json.dumps(value) return return_value
6737d8c26ca1618e02712e989bb2b4ccc57ffb76
21,877
def print_sums(n): """Print all sums of arguments of repeated calls. >>> f = print_sums(1)(2)(3)(4)(5) 1 3 6 10 15 """ print(n) def next_sum(k): return print_sums(n+k) return next_sum
ffbcd9dec717d3313a96c705c834644619150fd4
21,878
import os def _verify_root(): """Verify the command is running with root privileges.""" uid = os.getegid() if uid != 0: return False return True
0e35e10de443b7c87d1367a04bf678d6b9757c46
21,882
def suitedcard_dict(cards): """ Returns a dictionary of suits and card lists. Useful for dividing a list of cards into all the separate suits. """ suits = {} for c in cards: suits.setdefault(c.suit, []).append(c) # Dict grouping return suits
1ddae14099e90d125b701d320a37e100dfc19281
21,883
def ispalindrome(s): """ Returns true if s is a palindrome There are two ways to define a palindrome: 1. s is a palindrome if it reads the same backward and forward. 2. s is a palindrome if either (1) its length is <= 1 OR (2) its first and last chars are the same and the string between them is a palindrome. Letters must match exactly. Parameter s: the candidate palindrome Precondition s is a string """ assert type(s) == str, repr(s) + ' is not a string' # get in the habit # Base palindrome if len(s) < 2: return True # s has at least 2 characters ends = s[0] == s[-1] middle = ispalindrome(s[1:-1]) # Both must be true to be a palindrome return ends and middle
5ad5aaef785be935ab6494cf0a1653a265b680ab
21,887
def snake(s): """ Converts an input string in PascalCase to snake_case. """ snek = [] prev_up = False prev_alnum = False for idx, c in enumerate(s): alnum = c.isalnum() up = c.isupper() next_up = s[idx+1].isupper() if idx+1 < len(s) else False if (up and not prev_up and prev_alnum and idx != 0 or up and prev_up and not next_up and idx != len(s)): snek.append('_') snek.append(c.lower()) prev_alnum = alnum prev_up = up return ''.join(snek)
c0874409d689e078d1072cb06c0c03e39065c566
21,888
def Format_Information_Bootstrap(str_tips_dict, svd_spec,svd_dup,svd_trans,svd_loss,svd_recip, svd_donor): """ this takes in a bunch of list that map subtree -> 1 if that node is a transfer/loss/etc suboptimal but then i don't have to re-write the code used for the species tree args str_tips_dict (contains all subclades) several lists of subclades (associated with eg "transfers") outputs: a single dictionary containing all of the subtrees and what the nodes should be labeled as. """ format_dict = {} for clade_str in str_tips_dict: output = "" if clade_str in svd_recip: output=output+("Recip~") if clade_str in svd_donor: output=output+("Donor~") if clade_str in svd_spec: output=output+("Sp") if clade_str in svd_dup: output=output+("Dup") if clade_str in svd_trans: output=output+("Txfr") if clade_str in svd_loss: output=output+("Loss") format_dict[clade_str] = output return format_dict
2c0b109f312393cbe6af16dba0235619c5edae73
21,890
import numpy def generateArraySizes(): """Use this function to generate array sizes based on block sizes.""" blocksizes = [8, 12, 16, 20, 24, 32] #blocksizes with most options arraystart = 0 divisible =[False,False] arraysizes = [] for i in range(7): arraystart += 32*20 while not numpy.all(divisible): arraystart+=blocksizes[-1] divisible =[arraystart%bs==0 for bs in blocksizes] arraysizes.append(arraystart) return arraysizes
8cf24deb5acc94c4bb4251c5583aebf447b78a48
21,891
import sys def create_indices(conn, verbose=False): """ Create some useful indices. Note that the PRIMARY KEY columns are indexed by default! :param conn: The database connection :param verbose: print addtional output :return: """ if verbose: sys.stderr.write("Creating indices\n") tables = { "nodes": {"tidparentrank" : ["tax_id", "parent", "rank"]}, "names": { "tidname" : ["tax_id", "name"], "tiduniname" : ["tax_id", "unique_name"], "tidnameuniname" : ["tax_id", "name", "unique_name"] }, "division": {"divname" : ["division_id", "division_name"]}, "merged" : {"oldnewidx" : ["old_tax_id", "new_tax_id"]} } for t in tables: for idx in tables[t]: conn.execute("CREATE INDEX {ix} ON {tn} ({cn})".format(ix=idx, tn=t, cn=", ".join(tables[t][idx]))) conn.commit() return conn
2ec2b59fc8503daf0c1377feaf4803eba49bcf6f
21,892
def vertex_dict_to_list(input_poly): """Convert polygon vertices from {x:, y:} to [x, y]. Parameters ---------- input_poly : dict Dict with position of x, y polygon vertex {x:, y:}. Returns ------- Type: any X coordinate of vertex. Type: any Y coordinate of vertex. """ return (input_poly['y'], input_poly['x'])
06e01ecfe937255baa03cca57004615dfa2eb274
21,895
def prefer_insertions_at_309_and_315(mb): """Prefer alternatives that include 309.1C or 315.1C over others. There are two multi-C runs at the beginning of the 300's and by convention, any insert in one of those runs is pushed to the end. Mostly, the other rules will pick these, but in some circumstances with other substitutions or deletions in this area, these won't get picked - although we want them to. Thus, this special case preference. """ special_cases = ['309.1C', '315.1C'] # mb: mismatch block if len(mb) > 1: scores = [0] * len(mb) # pos: position # aa: alternate alignment for pos, aa in enumerate(mb): for variant in aa: if str(variant) in special_cases: scores[pos] += 1 if max(scores) > 0: # lsi: indices of lower scoring alternate alignments lsi = list(x for x in range(len(mb)) if scores[x] < max(scores)) # remove low scoring alignments from the mismatch block # in reverse order so as to not mess up the preceding indices lsi.sort(reverse=True) for i in lsi: mb.pop(i) return mb
92152d8de90617ce4a21da680bd34e96d7df98cc
21,896
import torch def log_likelihood(x_true, x_distr): """ Вычисляет логарфм правдоподобия объектов x_true для индуцированного моделью покомпонентного распределения Бернулли. Каждому объекту из x_true соответствуют K сэмплированных распределений на x из x_distr. Требуется вычислить оценку логарифма правдоподобия для каждого объекта. Подсказка: не забывайте про вычислительную стабильность! Подсказка: делить логарифм правдоподобия на число компонент объекта не надо. Вход: x_true, Tensor - матрица объектов размера n x D. Вход: x_distr, Tensor - тензор параметров распределений Бернулли размера n x K x D. Выход: Tensor, матрица размера n x K - оценки логарифма правдоподобия каждого сэмпла. """ eps = 0.001 eps_matrix = torch.ones(x_distr.shape) * eps cut = torch.min(torch.max(x_distr, eps_matrix), 1 - eps_matrix) return (x_true.unsqueeze(1) * torch.log(cut) + (1 - x_true).unsqueeze(1) * torch.log(1 - cut)).sum(dim=2)
c6d0ae229837ea200aa7bec18f35beac473a3ad6
21,898
def get_patch_centered(img, x, y, radius): """ Extracts a patch from an image centered at (x, y) with a given radius. """ return img[x - radius : x + radius + 1, y - radius : y + radius + 1]
6ad8c8ee8b737fcbe036c8d5a1fb0b00a76e4014
21,899
import sqlite3 def get_db(): """获取生成数据库连接 :return: """ db = sqlite3.connect('ebook.sqlite', detect_types=sqlite3.PARSE_DECLTYPES) return db
20d1e8787dae67f1ebaec241f688fc4e817ae398
21,901
def match_word(word, word_): """ word: The word to be matched word_: The set of alphabets This matches the characters of the word to the set of alphabets. """ for word_char, alphabet in zip(word, word_): if word_char not in alphabet: return False return True
5c998e5cb687454583970d9eec4d22bb2e25acae
21,902
def to_utc_rfc3339(a_datetime): """ Helper function to format a timezone unaware/UTC datetime in rfc3339 utc timestamp the easy way. """ return "{date_string}Z".format(date_string=a_datetime.replace(microsecond=0).isoformat())
2081917cfbc47aca65f7cca1f0440f22fe75297c
21,904
def is_for_ast_eval(test_str: str): """ Is the test string a valid list of dict string, such as "[1, 2]", that can be evaluated by ast eval. Arguments: test_str (str): Test string Returns: bool: Is test_str a valid list of dict strings """ return ('[' in test_str and ']' in test_str) or \ ('{' in test_str and '}' in test_str)
e3f30a6b27f9d66e91a2c122cc9b3fc5ae1f948f
21,905
def neuron_response(neuron, test_data): """ Measure neuron response time :param neuron: :param test_data: :return: """ return neuron.get_output(test_data)
77143b91f11ab670aaf458f6392c538e94d2959a
21,906
import math def haversine_dist(init_point, final_point): """Gives the Haversine distance between the initial and final point Parameters ---------- init_point : array The initial point final_point : array The final point Returns ------- float The Haversine distance """ # Assuming the input is in degrees init_rad = init_point * math.pi / 180 final_rad = final_point * math.pi / 180 d_latitude = final_rad[0] - init_rad[0] d_longitude = final_rad[1] - init_rad[1] x = (d_longitude) * math.cos((final_rad[0] - init_rad[0]) / 2) y = d_latitude return [x * 6356.752e3, y * 6356.752e3]
1dc2cd51f36f12e4be06a543243cf52a38ba1893
21,908
def triangle_shape(height): """return a triangle of x Args: height (int): number of stages Returns: str: triangle """ s = "x" esp = " " if height == 0: return "" return "\n".join( [ (height - 1 - i) * esp + (2 * i + 1) * s + (height - 1 - i) * esp for i in range(height) ] )
d83c98e6e8294b8b9e26be1124722027b6390eef
21,909
def _ap(relevances, scores, topn=None): """Returns the average precision (AP) of a single ranked list. The implementation here is copied from Equation (1.7) in Liu, T-Y "Learning to Rank for Information Retrieval" found at https://www.nowpublishers.com/article/DownloadSummary/INR-016 Args: relevances: A `list` of document relevances, which are binary. scores: A `list` of document scores. topn: An `integer` specifying the number of items to be considered in the average precision computation. Returns: The MAP of the list as a float computed using the formula sum([P@k * rel for k, rel in enumerate(relevance)]) / sum(relevance) where P@k is the precision of the list at the cut off k. """ def argsort(arr, reverse=True): arr_ind = sorted([(a, i) for i, a in enumerate(arr)], reverse=reverse) return list(zip(*arr_ind))[1] num_docs = len(relevances) if isinstance(topn, int) and topn > 0: num_docs = min(num_docs, topn) indices = argsort(scores)[:num_docs] ranked_relevances = [1. * relevances[i] for i in indices] precision = {} for k in range(1, num_docs + 1): precision[k] = sum(ranked_relevances[:k]) / k num_rel = sum(ranked_relevances[:num_docs]) average_precision = sum(precision[k] * ranked_relevances[k - 1] for k in precision) / num_rel if num_rel else 0 return average_precision
e856a98630548313362aa1bf49749a0b32208e61
21,910
def fmt_float(value): """ Finds the difference between the datetime value given and now() and returns appropriate humanize form """ if value is None: return None if value == 0: return 0 if value <= 1: return round(value, 3) if value <= 10: return round(value, 2) if value <= 100: return round(value,1) return int(value)
5088b0741b50be955a6519a1c499862fc28fa12b
21,911
def _get_env_var(rctx, name, default): """Find an environment variable in system. Doesn't %-escape the value! Args: rctx: rctx name: environment variable name default: default value to return if env var is not set in system Returns: The environment variable value or the default if it is not set """ if name in rctx.os.environ: return rctx.os.environ[name] return default
1e98d7b65f1b7323caff51d897e7c5b5bedae3cf
21,913
import uuid def rand_uuid_hex(): """Generate a random UUID hex string :return: a random UUID (e.g. '0b98cf96d90447bda4b46f31aeb1508c') :rtype: string """ return uuid.uuid4().hex
250c6a4b8a0c26610d17f162853dfa4547b8e287
21,916
def filter_using_multiindex(df_to_be_filtered, orig_df, filter_columns): """ Filter one dataframe using a multiindex from another """ new_index = df_to_be_filtered.set_index(filter_columns).index original_index = orig_df.set_index(filter_columns).index return df_to_be_filtered[new_index.isin(original_index)]
fd321c812ebff02698f05ce1f76db6786fb5ad77
21,919
def filter_for_sprl(c): """ Given a BIDSFile object, filter for sprl type file """ try: val = "sprlcombined" in c.entities["acquisition"] except KeyError: return False else: return val
6f05313701ecc01512fedf05709e5e13629c467d
21,920
def roll(lst, shift): """Roll elements of a list. This is similar to `np.roll()`""" return lst[-shift:] + lst[:-shift]
4805c646d4d6025b0ebcd660a020a58fb6078036
21,922
def count_distinct_occurence(calls, texts): """Return the count of distinct occurence of number Args: calls: list of calls texts: list of texts Returns: number of distinct number """ number_set = set() for record in calls+texts: number_set.add(record[0]) number_set.add(record[1]) return len(number_set)
4acf40c50bbd32b23735aaad2c581559829bb664
21,923
def parse_speakers_txt(speakers_path): """ Parse Libbrispeech original file with info about speakes""" mapping = {} with open(speakers_path) as f: lines = f.read().splitlines()[12:] for line in lines: splitted = line.split('|') u_id = splitted[0].replace(' ', '') sex = splitted[1].replace(' ', '') mapping[u_id] = sex print(f"Found {len(mapping)} speakers") return mapping
1d7c1949f02a95af2acbed47d54b4f2ecbc6b15b
21,924
def get(sarif_struct, *path): """ Get the sarif entry at PATH """ res = sarif_struct for p in path: res = res[p] return res
f4c1eb9f98acb5e795d65ac427079748a9b89a6f
21,926
def swift_module_name(label): """Returns a module name for the given label.""" return label.package.lstrip("//").replace("/", "_") + "_" + label.name
e10dee81c5bbd3d5a1fc15ae94aebe74e8de94c6
21,927
def lazy(func): """Lazy property decorator.""" attr = "_" + func.__name__ @property def _lazy(obj) : try: return getattr(obj, attr) except AttributeError: setattr(obj, attr, func(obj)) return getattr(obj, attr) return _lazy
1b59021bb237d628fb3884082c6bb12e5c7b3851
21,929
def add_chain_task(app): """No longer used, but here for backwards compatibility.""" @app.task(name='celery.chain', shared=False, lazy=False) def chain(*args, **kwargs): raise NotImplementedError('chain is not a real task') return chain
0f1675506f8570f563dbf634f34ad536b0402361
21,930
import subprocess def execute(args, cwd, capture_output=False, silent=False, **kwargs): """Executes a command. Args: args: List of command line arguments. cwd: Directory to execute in. capture_output: Whether to capture the output. silent: Whether to skip logging the invocation. **kwargs: Extra arguments to pass to subprocess.exec Returns: The output if capture_output, otherwise None. """ if not silent: print("+", " ".join(args), " [from %s]" % cwd) if capture_output: return subprocess.check_output(args, cwd=cwd, **kwargs) else: return subprocess.check_call(args, cwd=cwd, **kwargs)
da4d474f0071cc952bf9db1c2ce3eab5c37bc133
21,931
def remove_duplicate_values(array_like, tol=0.0): """ Removes duplicate values from list (when tol=0.0) or remove approximately duplicate values if tol!=0.0. """ unique_values = [array_like[0]] for element in array_like: element_is_duplicate = False for uval in unique_values: if abs(uval - element) <= tol: element_is_duplicate = True if not element_is_duplicate: unique_values.append(element) return unique_values
afdad5db2aa00858aa9bcd29e1b64b744b2fb963
21,932
from pathlib import Path def create_job( create_job_header, create_job_body, create_job_tail, job_name_prefix, scenario_name, job_name_suffix, queue_name, ncores, work_dir, run_dir, # config_file, ): """ Create the job file. The jobs is created by assembling three parts: the job header, the body, and the final tail (post execution process). The different parameters will be injected in the respective job creation functions. Parameters ---------- create_job_header : callable The function that will create the header. create_job_body : callable The function that will create the job body. create_job_tail: callable The function that will create the job tail. job_name_prefix : str A prefix for the job name. Normally this is the name of the job test case, for example the PDB ID. Injected in `create_job_header`. scenario_name : str The name of the benchmark scenario. Injected in `create_job_header`. job_name_suffix : str An additional suffix for the job name. Normally, `BM5`. Injected in `create_job_header`. queue_name : str The name of the queue. Injected in `create_job_header`. ncores : int The number of cpu cores to use in the jobs. Injected in `create_job_header`. work_dir : pathlib.Path The working dir of the example. That is, the directory where `input`, `jobs`, and `logs` reside. Injected in `create_job_header`. run_dir : pathlib.Path The running directory of the scenario. config_file : pathlib.Path Path to the scenario configuration file. Injected in `create_job_body`. Returns ------- str The job file in the form of string. """ # create job header job_name = f'{job_name_prefix}-{scenario_name}-{job_name_suffix}' std_out = str(Path('logs', 'haddock.out')) std_err = str(Path('logs', 'haddock.err')) job_header = create_job_header( job_name, work_dir=work_dir, stdout_path=std_out, stderr_path=std_err, queue=queue_name, ncores=ncores, ) available_flag = str(Path(run_dir, 'AVAILABLE')) running_flag = str(Path(run_dir, 'RUNNING')) done_flag = str(Path(run_dir, 'DONE')) fail_flag = str(Path(run_dir, 'FAIL')) job_body = create_job_body(available_flag, running_flag, config_file) job_tail = create_job_tail(std_err, done_flag, fail_flag) return job_header + job_body + job_tail
acc38ee8dc0169173f71e1fa5f81e75b340250e1
21,934
def join(expr, lh_arg, rh_arg): """Helper function to combine arguments. """ if lh_arg is None or rh_arg is None: return None else: return expr.copy([lh_arg, rh_arg])
8044db80f799894cb2684bc955e6546ca69f5e4b
21,935
def toUnicode(articles): """Convert a list of articles utf-8 encoded to unicode strings.""" return tuple([art.decode('utf_8') for art in articles])
6924a837d5a093b3e0ea358381d67a1fc011519c
21,936
def update_parent_child_relationships(links_dict, old_id, new_id): """ Update the parent-child relationships after clustering a firework by replacing all the instances of old_id with new_id Args: links_dict (list): Existing parent-child relationship list old_id (int): Existing id of the firework new_id (int): New id of the firework Returns: links_dict (list): Updated parent-child relationship list """ # Enumerate child ids and replace it with the new id for parent_id in links_dict: child_id_list = links_dict[parent_id] for index, child_id in enumerate(child_id_list): if child_id == old_id: child_id_list[index] = new_id break # Enumerate parent ids and replace it with the new id if old_id in links_dict: links_dict[new_id] = links_dict.pop(old_id) return links_dict
a08201e455ed87ebaae52542c99ff500df523367
21,938
def normalize_name(name): """Returns a normalized version of the given algorithm name.""" name = name.upper() # BouncyCastle uses X.509 with an alias of X509, Conscrypt does the # reverse. X.509 is the official name of the standard, so use that. if name == "X509": name = "X.509" # PKCS5PADDING and PKCS7PADDING are the same thing (more accurately, PKCS#5 # is a special case of PKCS#7), but providers are inconsistent in their # naming. Use PKCS5PADDING because that's what our docs have used # historically. if name.endswith("/PKCS7PADDING"): name = name[:-1 * len("/PKCS7PADDING")] + "/PKCS5PADDING" return name
77858e7217c3a17e3b041781f117e7b53dd4c57d
21,939
import random def cosvf_check_bounds(rtype1_start_idx, init): """ Check bounds of indiviudal's COSVF Pmin and Pmax array during evolution. Two checks: - Minimum penalty is zero - Pmax cannot be greater than Pmin :param rtype1_start_idx: (int) position on individual parameter list at which rtype1 begin :returns decorator: (func) a decorator function that is applied after individual mating or mutation """ def decorator(func): def wrapper(*args, **kargs): population = func(*args, **kargs) for ind in population: # check pmin bounds (greater than zero) for idx in range(0, len(ind)): if ind[idx] > 0: ind[idx] = -1*random.uniform(1, 10) # check pmax > pmin violation for r_type1 (pminmax COSVF) for pmax in range(rtype1_start_idx+1, len(ind), 2): if ind[pmax] > ind[pmax-1]: ind[pmax] = max(ind[pmax-1]+(ind[pmax-1]*random.uniform(0.05, 1)), -1.99e3) if init == True: for idx in range(0, rtype1_start_idx+1): ind[idx] = -1.0 return population return wrapper return decorator
0ae1ce227054798a521f8ad78f59665490989a7d
21,940
def strip_coln_white_space(df, coln): """Remove white space after some basis of estimate codes.""" df[coln] = df[coln].str.strip() return df
06ec3067e2ccc9c0b0bd20b398f9794fe566492a
21,941
import torch def upfeat(input, prob): """ A function to compute pixel features from superpixel features Args: input (tensor): superpixel feature tensor. prob (tensor): one-hot superpixel segmentation. Returns: reconstr_feat (tensor): the pixel features. Shape: input: (B, N, C) prob: (B, N, H, W) reconstr_feat: (B, C, H, W) """ B, N, H, W = prob.shape prob_flat = prob.view(B, N, -1) reconstr_feat = torch.matmul(prob_flat.permute(0, 2, 1), input) reconstr_feat = reconstr_feat.view(B, H, W, -1).permute(0, 3, 1, 2) return reconstr_feat
8b1515d7e3c7cfbf656f4e60e4c5e7899e48dbbf
21,942
import hashlib def get_md5(s): """ @param s: @return: """ md = hashlib.md5() md.update(s.encode('utf-8')) return md.hexdigest()
ace743a9a5837d6496de7c5f80ebf27be11405ec
21,945
def in_(left, right): """:yaql:operator in Returns true if there is at least one occurrence of left string in right. :signature: left in right :arg left: left operand, which occurrence is checked :argType left: string :arg right: right operand :argType right: string :returnType: boolean .. code:: yaql> "ab" in "abc" true yaql> "ab" in "acb" false """ return left in right
05678dbe16a212a1e5a712cf52cdc853173f7bad
21,946
import math def dydx(y,x): """ This function returns the differential equation """ return (3*(math.e**(-x))-0.4*y) # ODE to solve
7a7ab7628f235b4b47b6031094d8d3d5ca3db6db
21,947
def remote(obj): """ Return the remote counterpart of a local object. :param obj: the local object :return: the corresponding remote entity """ try: return obj.__remote__ except AttributeError: return None
adf85d39797c158bd16f874f6ce9cf3867d6fb8b
21,948
def build_dashboard(signals_df, portfolio_evaluation_df): """Build the dashboard.""" # Create hvplot visualizations price_df = signals_df[["close", "SMA50", "SMA100"]] price_chart = price_df.hvplot.line().opts(xaxis=None) portfolio_evaluation_table = portfolio_evaluation_df.hvplot.table( columns=["index", "Backtest"] ) # Build the dashboard dashboard = price_chart + portfolio_evaluation_table return dashboard
9a457ae657ec939ff473241d5c0a403d3009f1ec
21,949
def parse_memory_line(line): """Parses the memory lines of an atom, returns the bytes of the observation 2c00f4a6 """ return [int(line[i:i + 2], 16) for i in range(0, len(line.strip()), 2)]
0b6250556778b5ddad62ce541282bde7ca523a0e
21,951
import requests def download_pac(candidate_urls, timeout=1, allowed_content_types=None, session=None): """ Try to download a PAC file from one of the given candidate URLs. :param list[str] candidate_urls: URLs that are expected to return a PAC file. Requests are made in order, one by one. :param timeout: Time to wait for host resolution and response for each URL. When a timeout or DNS failure occurs, the next candidate URL is tried. :param allowed_content_types: If the response has a ``Content-Type`` header, then consider the response to be a PAC file only if the header is one of these values. If not specified, the allowed types are ``application/x-ns-proxy-autoconfig`` and ``application/x-javascript-config``. :return: Contents of the PAC file, or `None` if no URL was successful. :rtype: str|None """ if not allowed_content_types: allowed_content_types = {'application/x-ns-proxy-autoconfig', 'application/x-javascript-config'} if not session: sess = requests.Session() else: sess = session sess.trust_env = False # Don't inherit proxy config from environment variables. for pac_url in candidate_urls: try: resp = sess.get(pac_url, timeout=timeout) content_type = resp.headers.get('content-type', '').lower() if content_type and True not in [allowed_type in content_type for allowed_type in allowed_content_types]: continue if resp.ok: return resp.text except (requests.exceptions.ConnectionError, requests.exceptions.Timeout): continue
ba1554e01c0a3b2cf9251d686d946459aef40f2d
21,952
from io import StringIO def list_to_string_io(list_of_entries:list): """ Return file like object of the type StringIO from a given list of list of strings. Argument: - list_of_entries {list} - list of list of strings to transform to StringIO Example: [ ['AR8IEZO1187B99055E', 'SOINLJW12A8C13314C', 'City Slickers', 2008, 149.86404], ['AR558FS1187FB45658', 'SOGDBUF12A8C140FAA', 'Intro', 2003, 75.67628] ] Return: {StringIO} - file type object with values in input list concatenated. Example: 'AR8IEZO1187B99055E\tSOINLJW12A8C13314C\tCity Slickers\t2008\t149.86404\n AR558FS1187FB45658\tSOGDBUF12A8C140FAA\tIntro\t2003\t75.67628' """ return StringIO('\n'.join(['\t'.join([str(entry) for entry in set_of_entries]) for set_of_entries in list_of_entries]))
b71528876a2fd65264c1f77bdea96ab112616894
21,954
def verify_epsilons(variables, old_productions, current_productions): """ Check if all epsilons are present in the available variables from the original grammar in the given normal form. """ old_productions_with_epsilon = set( filter( lambda prod: prod.head in variables and not prod.body, old_productions, ) ) current_productions_with_epsilon = set( filter(lambda prod: not prod.body, current_productions) ) for production in old_productions_with_epsilon: if production not in current_productions_with_epsilon: return False return True
21cdb7a1bd796e7c1fd663ea8c8747e20c7828d9
21,955
import os def build_shell_env(env=None): """ Construct the environment for the shell to run in based on 'env', or the current process's environment if env is None.""" if not env: env = os.environ # Don't inherit PYTHONPATH or LD_LIBRARY_PATH - the shell launch script must set # these to include dependencies. Copy 'env' to avoid mutating argument or os.environ. env = dict(env) if "PYTHONPATH" in env: del env["PYTHONPATH"] if "LD_LIBRARY_PATH" in env: del env["LD_LIBRARY_PATH"] return env
c8b75bd24e917ff760dd84d8353a91e3243b8ef5
21,956
import re def parse_quast_result(path_to_quast_result): """ Args: path_to_quast_result (str): Path to the QUAST result file. Returns: dict: Parsed QUAST report For example: { "contigs_count": 72, "largest_contig": 692871, "N50": 299446, "L50": 6, "N75": 123167, "L75": 12, "total_length": 5182695, "percent_GC": 51.75, "complete_busco_percent": 100.0, "partial_busco_percent": 100.0 } """ quast_output = [] with open(path_to_quast_result, 'r') as quast_result: for line in quast_result: quast_output.append(line) def parse_quast_report_line(line): """ Takes a line of the quast report and returns the specific data that we're interested in from that line. Collapse multiple spaces into a tab char ('\t'), then split the line on tabs and take the second item. Cast floats to floats and ints to ints. '# contigs 751 ' -> '# contigs\t751\t' -> ['# contigs', '751', ''] -> '751' -> 751 """ result_data = re.sub(' {2,}', '\t', line).split('\t')[1] if re.match('\d+\.\d+', result_data): return float(result_data) else: return int(result_data) # Associate a regex that can be used to identify the line of interest in the quast report # with a string to use as a key in the output dict. quast_report_parsing_regexes = { '^# contigs {2,}\d+': 'num_contigs', '^Largest contig {1,}\d+': 'largest_contig', '^N50 +\d+': 'N50', '^NG50 +\d+': 'NG50', '^L50 +\d+': 'L50', '^LG50 +\d+': 'LG50', '^N75 +\d+': 'N75', '^NG75 +\d+': 'NG75', '^L75 +\d+': 'L75', '^LG75 +\d+': 'LG75', '^Total length {2,}\d+': 'total_length', '^Reference length +\d+': 'reference_length', '^GC \(%\) +\d+\.\d+': 'percent_GC', '^Reference GC \(%\) +\d+\.\d+': 'reference_percent_GC', '^Genome fraction \(%\) +\d+\.\d+': 'genome_fraction_percent', '^Duplication ratio +\d+\.\d+': 'duplication_ratio', '^Complete BUSCO \(\%\) +\d+\.\d+': 'complete_busco_percent', '^Partial BUSCO \(\%\) +\d+\.\d+': 'partial_busco_percent', } quast_result = {} for line in quast_output: for regex, key in quast_report_parsing_regexes.items(): if re.match(regex, line): quast_result[key] = parse_quast_report_line(line) return quast_result
d7a620a1dedbcbdf00b82a6b03c816ac963503cb
21,957
def is_digit(c) -> bool: """Checks if given char is a digit.""" try: return ord(c) >= 48 and ord(c) <= 57 except TypeError: return False
81eb6d9a3b73e567dff9b5c040309073db4ed3eb
21,958
def check_redundant(model_stack: list, stack_limit: int) -> bool: """[summary] :param model_stack: [description] :type model_stack: list :param stack_limit: [description] :type stack_limit: int :return: [description] :rtype: bool """ stop_recursion = False if len(model_stack) > stack_limit: # rudimentary CustomUser->User->CustomUser->User detection, or # stack depth shouldn't exceed x, or # we've hit a point where we are repeating models if ( (model_stack[-3] == model_stack[-1]) or (len(model_stack) > 5) or (len(set(model_stack)) != len(model_stack)) ): stop_recursion = True return stop_recursion
81fadf745e9cc5433b116e87e8f9c31342b78d41
21,959
import re def is_greeting_request(text): """ Returns true if the specified text is a greeting. """ greeting_re = '(お(はよ|やすみ)|(こん(にち|ばん)[は|わ]))' return not re.search(greeting_re, text) is None
377dce9bad1e77ab28b32ee52baa91867d568822
21,960