content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def power(base: int, p: int) -> int: """Assumes inputs are integers and that the power p >= 0. Base case: a⁰ = 1. Recursive step: aⁿ⁺¹ = aⁿ * a.""" assert p >= 0 if p == 0: return 1 else: return base * power(base, p - 1)
3383ecf1d72ee7f913b90b3c04af831c58215c57
696,523
def parse_subtree(my_map): """ param: my_map - tree element for the schema return: tree elements under each branch """ # Recursive search in order to retrieve the elements under the branches in the schema res = {} for k in my_map: if 'properties' in my_map[k]: res[k] = parse_subtree(my_map[k]['properties']) else: res[k] = "type: " + my_map[k].get('type', "") return res
0a1ef13a5f94d4bdff4b1bac80fb8c56b76c54d0
696,524
import os def get_directory_size(directory): """Get the filesize of a directory and its contents. Args: directory (String): The directory to check. Returns: int: The filesize of the directory. """ total = 0 try: for entry in os.scandir(directory): # For each entry, either add filesize to the total, or recurse into the directory if entry.is_file(): total += entry.stat().st_size elif entry.is_dir(): total += get_directory_size(entry.path) except NotADirectoryError: return os.path.getsize(directory) except PermissionError: return 0 except OSError: return 0 return total
869f1ffcca22b65f3cd94ff2d8b5c6a452058a57
696,525
def string_matching_naive(text='', pattern=''): """Returns positions where pattern is found in text. We slide the string to match 'pattern' over the text O((n-m)m) Example: text = 'ababbababa', pattern = 'aba' string_matching_naive(t, s) returns [0, 5, 7] @param text text to search inside @param pattern string to search for @return list containing offsets (shifts) where pattern is found inside text """ n = len(text) m = len(pattern) offsets = [] for i in range(n-m+1): if pattern == text[i:i+m]: offsets.append(i) return offsets
d9cf1d755e82e5a0396950eb3b395d712ff4ce77
696,526
import tkinter def gui_input(prompt1, prompt2): """ Creates a window to collect Risk and Budget info from user Parameters ---------- prompt1 : String First question in input prompt prompt2 : String Second question in input prompt Returns ------- value1 : String Value of response to the question asked in prompt1 value2 : String Value of response to the question asked in prompt2 """ # create tkinterwindow and set title and geometry of window _root = tkinter.Tk() _root.title('Wolves of DK St - Options Strategy Visualization') _root.geometry("+600+400") # Set text for input screen tkinter.Label(_root, text='Welcome to the Options Strategy Visualization').grid(row=1, column=1, columnspan=2) tkinter.Label(_root, text='Enter your desired probability of profit:').grid(row=3, column=1, columnspan=2) tkinter.Label(_root, text=' ').grid(row=2, column=1, columnspan=2) tkinter.Label(_root, text='0.1 = 10% chance of profit (Throw your money away)').grid(row=4, column=1, columnspan=2) tkinter.Label(_root, text='0.5 = 50% chance of profit (gambling)').grid(row=5, column=1, columnspan=2) tkinter.Label(_root, text='0.9 = 90% chance of profit (high chance for profit)').grid(row=6, column=1, columnspan=2) tkinter.Label(_root, text=' ').grid(row=8, column=1, columnspan=2) tkinter.Label(_root, text='Enter your desired budget (number, min $2000)').grid(row=9, column=1, columnspan=2) tkinter.Label(_root, text='Please enter only numeric digits, no characters').grid(row=10, column=1, columnspan=2) tkinter.Label(_root, text=' ').grid(row=13, column=1, columnspan=2) # set prompt and input text; setup input collection to be saved to var1 and var2 var1 = tkinter.StringVar() var2 = tkinter.StringVar() label1 = tkinter.Label(_root, text=prompt1) entry1 = tkinter.Entry(_root, textvariable=var1) label1.grid(row=7, column=1) entry1.grid(row=7, column=2) label2 = tkinter.Label(_root, text=prompt2) entry2 = tkinter.Entry(_root, textvariable=var2) label2.grid(row=12, column=1) entry2.grid(row=12, column=2) # create an "Enter" button to conclude data collection and exit GUI go = tkinter.Button(_root, text='Enter')#, command=store_data) go.grid(row=14, column=1, columnspan=2) go.bind("<Button-1>", lambda event: _root.destroy()) # this will block main method from advancing until the window is destroyed _root.mainloop() # after the window has been destroyed, we can't access # the entry widget, but we _can_ access the associated # variable value1 = var1.get() value2 = var2.get() return value1, value2
1c770c56aba7ca7a793561ac2063aa7dae41b15e
696,527
def _remove_doc_types(dict_): """ Removes "doc_type" keys from values in `dict_` """ result = {} for key, value in dict_.items(): value = value.copy() value.pop('doc_type', None) result[key] = value return result
e5badc0a73edba3233816d4c99fa7f26845c77f5
696,528
def get_id_object_from_json(json_data): """Returns the _id object for the given valid json. Keyword arguments: json_data -- the valid json data input """ return json_data["key"] if "key" in json_data else json_data["message"]["_id"]
172a89e762ad078698ce10cd77366e50e190f29f
696,529
def get_amplitudes_check(function): """Decorator to check the arguments of getting amplitudes function in register class. Arguments: function {} -- The tested function """ def wrapper(self, nth=None): """Method to return with the coefficient of the n-th possible state for the regsiter if the parameter is definit. If it isn’t, then the return value is the list of the coefficients of all possible states. Keyword Arguments: nth {int, None} -- Number of n-th possible amplitude (default: {None}) Raises: TypeError Examples: >>> import qvantum >>> >>> q1 = qvantum.Random_Qubit() >>> q2 = qvantum.Random_Qubit() >>> >>> r = qvantum.Register([q1, q2]) >>> r.show() '|Ψ> = (0.1075+0.7037i)|00> + (0.6331-0.0247i)|01> + (0.2171-0.0638i)|10> + (-0.0347-0.1983i)|11>' >>> r.get_amplitudes(2) (-0.10034614628094177-0.1325886060571926j) """ if isinstance(nth, int) or nth is None: return function(self, nth) else: raise TypeError('Invalid input! Argument must be integer or None type.') return wrapper
eaca90171c68639d398ca13bd1ec764ac8e169ff
696,531
def create_slices(start, stop, step=None, length=1): """ Generate slices of time indexes Parameters ---------- start : int Index where first slice should start. stop : int Index where last slice should maximally end. length : int Number of time sample included in a given slice. step: int | None Number of time samples separating two slices. If step = None, step = length. Returns ------- slices : list List of slice objects. """ # default parameters if step is None: step = length # slicing slices = [slice(t, t + length, 1) for t in range(start, stop - length + 1, step)] return slices
c45979e18db9d3f554ae7d194ca001bc3976fa83
696,532
def wootric_url(route): """helper to build a full wootric API route, handling leading '/' >>> wootric_url('v1/responses') ''https://api.wootric.com/v1/responses' >>> wootric_url('/v1/responses') ''https://api.wootric.com/v1/responses' """ route = route.lstrip('/') return f'https://api.wootric.com/{route}'
f48ebe6ac7fe05798230c5bc27681adcb03804ef
696,534
def divide(Cb, Cs): """ Looks at the color information in each channel and divides the blend color from the base color. """ B = Cb / (Cs + 1e-6) B[B > 1] = 1 return B
635e20d9f6fe97635a17da009b91bb2e3c4799ca
696,535
import os def hadoop_log_dir(hadoop_home=None): """Return the path where Hadoop stores logs. :param hadoop_home: putative value of :envvar:`HADOOP_HOME`, or None to default to the actual value if used. This is only used if :envvar:`HADOOP_LOG_DIR` is not defined. """ try: return os.environ['HADOOP_LOG_DIR'] except KeyError: # Defaults to $HADOOP_HOME/logs # http://wiki.apache.org/hadoop/HowToConfigure if hadoop_home is None: hadoop_home = os.environ['HADOOP_HOME'] return os.path.join(hadoop_home, 'logs')
ecaa44465237a29605a336a063acc0b808c7cc17
696,537
import os import yaml def identify_known_accounts(data): """ Given a known list of account IDs from yaml config, append note about their account if they're known to us. :return: Account description """ # If the accounts custom aliases yaml file does not exist just default to normal behavior if os.path.isfile('accounts.yaml'): accountsfile = open('accounts.yaml').read() contents = yaml.load(accountsfile, Loader=yaml.FullLoader) if 'accounts' in contents.keys(): accounts = contents["accounts"] for acct in accounts.keys(): if acct == data: return accounts[acct] return "Unidentified" else: return ""
1f6a68a222d31ef96a95205e891e9bfc96a93f3d
696,538
def string_or_none(v): """ Coax a value to a string """ if type(v) == str and v != '': return v try: return str(v) except ValueError: pass return None
e828264a1ad4dea2d96291041dff35cc69623f54
696,539
def get_s_next(s, a): """次の状態を決定する関数""" # action:[up, right, down, left] direction = [-3, 1, 3, -1] s_next = s + direction[a] return s_next
560eb9cd7c495132e76ce724058a911ee651884d
696,541
def check_xml(root): """Check xml file root, since jarvis4se version 1.3 it's <systemAnalysis>""" if root.tag == "systemAnalysis": return True else: return False
d2921e2b5784a26a7636b65f6d3fbcf1425f8448
696,542
def get_rotation(transform): """Gets rotation part of transform :param transform: A minimum 3x3 matrix """ assert transform.shape[0] >= 3 \ and transform.shape[1] >= 3, \ "Not a transform? Shape: %s" % \ transform.shape.__repr__() assert len(transform.shape) == 2, \ "Assumed 2D matrices: %s" % \ transform.shape.__repr__() return transform[:3, :3]
c8fa40d883c3b8cbc7fe24ee38fe35cb6813f491
696,543
def check(s, ans): """ This is a predicate function to check whether a string is in a given answer. :param s: str, the one need to be checked. :param ans: str, the given answer. :return: bool, """ if s in ans: return True return False
86af3d89634858f4839fc486580e95f1ba04b6a3
696,544
import struct def _as_float(value): """ Truncate to single-precision float. """ return struct.unpack('f', struct.pack('f', value))[0]
7bf9b2e4d61b7a91f6c8334f0e7f093d55c6d614
696,545
import os def get_list_of_paths(path): """Return list of paths for file or all files in a directory""" path_list = [] if os.path.isfile(path): path_list.append(path) elif os.path.isdir(path): abs_path = os.path.abspath(path) for file_name in os.listdir(abs_path): path_list.append(path + "/" + file_name) return path_list
9ee38c39d47f167db81b38b69221021ed627ed30
696,546
from pathlib import Path def ExistFile(path): """Check whether a file exists. Args: path: The path. Returns: bool: True if the file exists otherwise False. """ return Path(path).is_file()
b71856d8b8ffde2c768a4a4e16bc9303d9a92251
696,547
def generate_traits_list(traits: dict) -> list: """Returns a list of trait values Arguments: traits: a dict containing the current trait data Return: A list of trait data """ # compose the summary traits trait_list = [traits['local_datetime'], traits['canopy_height'], traits['access_level'], traits['species'], traits['site'], traits['citation_author'], traits['citation_year'], traits['citation_title'], traits['method'] ] return trait_list
995c66208d3306958811420ba8ebb1bcd7a11ccf
696,548
from typing import List def create_fhir_bundle( resources: List[dict], bundle_type: str = "transaction" ) -> dict: """Creates a FHIR bundle from a list of FHIR resources Parameters ---------- resources : List[dict] List of FHIR resources bundle_type : str, optional FHIR Bundle type https://www.hl7.org/fhir/bundle-definitions.html#Bundle.type, by default "transaction" Returns ------- dict FHIR Bundle """ return { "resourceType": "Bundle", "type": bundle_type, "entry": [ { "resource": resource, "request": {"method": "POST", "url": resource["resourceType"]}, } for resource in resources ], }
70f94829b2e366eea97fd31245eb77e69aaaf066
696,549
def patch_cmass_output(lst, index=0): """ Parameters ---------- lst : list of tuples output of afni.CenterMass() index : int index in the list of tuples Returns ------- tuple one set of center of mass coordinates """ if len(lst) <= index: raise IndexError("lst index out of range") return lst[index]
98b637a6a922bdbbb84b99f78a47de1519443192
696,550
import os def find_files(extensions): """Return all files in the current directory that have the passed extensions Args: extensions: list of extensions, each starting with a dot. For example ['.c', '.cpp'] Returns: List of filenames Raises: Nothing """ return [fname for fname in os.listdir('.') if fname.endswith(extensions)]
55e409602a23e072656d3a64a69e8f788e21907c
696,551
import numpy def areArraysEqual(arr1, arr2): """Are both `arr1` and `arr2` equal arrays? Arguments can be regular NumPy arrays, chararray arrays or structured arrays (including structured record arrays). They are checked for type and value equality. """ t1 = type(arr1) t2 = type(arr2) if not ((hasattr(arr1, 'dtype') and arr1.dtype == arr2.dtype) or issubclass(t1, t2) or issubclass(t2, t1)): return False return numpy.all(arr1 == arr2)
cfd2262e42310614a84e1a9117aaa80860a28b10
696,552
def replace_version(content, from_version, to_version): """ Replaces the value of the version specification value in the contents of ``contents`` from ``from_version`` to ``to_version``. :param content: The string containing version specification :param to_version: The new version to be set :return: (result_content, number of occurrences) """ frm_str = str(from_version) to_str = str(to_version) count = content.count(frm_str) result_setup_py = content.replace(frm_str, to_str, count) return result_setup_py, count
dd792b6674c21dc6e11ef8013d88b9c210f9604f
696,553
def generate_train_config(model_spec, train_batch_spec, val_batch_spec, epoch_size, n_epochs=100, lr=0.001): """ Generates a spec fully specifying a run of the experiment Args: model_spec: a model spec train_batch_spec: a batch spec for train val_batch_spec: batch spec for validation epoch_size: int n_epochs: int lr: learning rate - float Returns: train_spec """ train_spec = { 'model_spec': model_spec, 'train_batch_spec': train_batch_spec, 'val_batch_spec': val_batch_spec, 'train_spec': { 'epoch_size': epoch_size, 'n_epochs': n_epochs, 'lr': lr } } return train_spec
0e19dab0263eda8505ddb830fd255b2a760d822c
696,554
import subprocess import os def check_hash(filename, hash_file): """Checks that the hash of the downloaded file against the previous hash. Args: filename: string containing the name of the downloaded file hash_file: string containing the name of the hash recording file Returns: A boolean value True if the hashes match of False if they don't """ popen = subprocess.Popen( ['sha256sum {}'.format(filename)], shell=True, stdout=subprocess.PIPE) stdout, _ = popen.communicate() if hash_file in os.listdir(os.getcwd()): with open(hash_file, 'r+') as hashfp: if hashfp.readline() == stdout.decode('utf-8'): return True with open(hash_file, 'w+') as hashfp: hashfp.write(stdout.decode('utf-8')) hashfp.flush() return False
1cb63942fcee3d7e94a73224fb9a3d47692c173a
696,555
import pip def install_option_2(name: str): """Implement pip using pip package""" pip.main(['install', name]) return "done"
3daeb0bc6a35144fbc3f1668c0b6782d1836ba71
696,558
import numpy def spectrum_magnitude(frames, NFFT): """Apply FFT and Calculate magnitude of the spectrum. Args: frames: 2-D frames array calculated by audio2frame(...). NFFT:FFT size. Returns: Return magnitude of the spectrum after FFT, with shape (frames_num, NFFT). """ complex_spectrum = numpy.fft.rfft(frames, NFFT) return numpy.absolute(complex_spectrum)
7f761e630978a78a636b9f21332ef72d54f1d472
696,559
def strip_path_prefix(ipath, prefix): """ Strip prefix from path. Args: ipath: input path prefix: the prefix to remove, if it is found in :ipath: Examples: >>> strip_path_prefix("/foo/bar", "/bar") '/foo/bar' >>> strip_path_prefix("/foo/bar", "/") 'foo/bar' >>> strip_path_prefix("/foo/bar", "/foo") '/bar' >>> strip_path_prefix("/foo/bar", "None") '/foo/bar' """ if prefix is None: return ipath return ipath[len(prefix):] if ipath.startswith(prefix) else ipath
c9634c9b06466c7af07a39c97645f0c601fe3ce2
696,560
def link_header( header, link_delimiter=',', param_delimiter=';' ): """ Parsea en dicionario la respusta de las cabezera ``link`` Parameters ---------- header: string contenido de la cabezera ``link`` link_delimiter: Optional[str] caracter que separa los enlaces param_delimiter: Optional[str] caracter que separa el parametro ``rel`` del link Returns ------- dict Note ---- enlace a la especificacion del header `link header <https://tools.ietf.org/html/rfc5988>`_ """ result = {} links = header.split( link_delimiter ) for link in links: segments = link.split( param_delimiter ) if len( segments ) < 2: continue link_part = segments[0].strip() rel_part = segments[1].strip().split( '=' )[1][1:-1] if not link_part.startswith( '<' ) and not link_part.endswith( '>' ): continue link_part = link_part[1:-1] result[rel_part] = link_part return result
eceab56c0c00fccb9860e85d1e4aa383fcdc5d30
696,561
import typing import asyncio async def process_concurrent(objs: typing.List[typing.Any], process: typing.Callable[[typing.Any], typing.Awaitable], workers: int = 5): """ Run a processing coroutine on a list of objects with multiple concurrent workers. """ # Divide and round up step = (len(objs) - 1) // workers + 1 async def _proc_range(start, end): for i in range(start, end): await process(objs[i]) return asyncio.gather(*(_proc_range(i * step, min((i + 1) * step, len(objs))) for i in range(workers)))
74e983b7cf7480d14c51304b7cc6daaac75ea837
696,563
import torch def euclidean_distance(query_mat, target_mat): """ query_mat: (n, dim) target_mat: (m, dim) """ assert type(query_mat) == type(target_mat) inner_dot_mat = torch.matmul(query_mat, target_mat.t()) # (n, dim) @ (dim, m) -> (n, m) qnorm = query_mat.pow(2).sum(1, keepdim=True) tnorm = target_mat.pow(2).sum(1, keepdim=True).t() dist = qnorm - 2 * inner_dot_mat + tnorm return dist
d9d28677b7d643929bf59d9b236e5263e0cabd59
696,564
import os def test_chm (archive, compression, cmd, verbosity, interactive): """Test a CHM archive.""" return [cmd, '-d', os.path.abspath(archive)]
d5573c0c73149c3490d8ac26a7d998d6c9a35f5e
696,565
import os def app_dir(): """ :return: the absolute path of the directory containing the main streamlit app """ return os.path.realpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir))
7bdd10b898c9b66b34a4572578715cfae54cc9a0
696,566
import multiprocessing def get_cpus(): """This method obtains the number of available cpus""" return multiprocessing.cpu_count()
d9cc4ac2c01043432262f2b21af9b94a6031fc8e
696,567
def prop_all(printer, ast): """Prints an all property "A ...".""" prop_str = printer.ast_to_string(ast["prop"]) return f'A{prop_str}'
0a03f04d955251918a1ea7dcf3576f30cb790656
696,568
def platform(): """Return the list of platforms the plugin is available for.""" return ['linux', 'mac']
eac0726a00259208a35d1f1c0da3ef77b7e49764
696,569
import os def to_platform_path(path): """Return a version of ``path`` where all separator are :attr:`os.sep` """ return (path.replace("/", os.sep).replace("\\", os.sep) if path is not None else None)
11801bdbb3556eb402ab5d5bffe1fd4cd170c1a5
696,570
def yesno(val, yes='yes', no='no'): """ Return ``yes`` or ``no`` depending on value. This function takes the value and returns either yes or no depending on whether the value evaluates to ``True``. Examples:: >>> yesno(True) 'yes' >>> yesno(False) 'no' >>> yesno(True, 'available', 'not available') 'available' """ return yes if val else no
cfdbe3ffb53ed0239e39392873ab128506379107
696,571
def get_tool(): # noqa: E501 """Get tool information Get information about the tool # noqa: E501 :rtype: Tool """ return 'do some magic!'
0ddc3c8cd44a29b05e0cabc4fd4e9ca2b430fdab
696,572
from typing import Sequence from typing import List def list_insert_list(l:Sequence, to_insert:Sequence, index:int) -> List: """ Insert `to_insert` into a shallow copy of `l` at position `index`. This function is adapted from: http://stackoverflow.com/questions/7376019/ Parameters ---------- l : typing.Sequence An iterable to_insert : typing.Sequence The items to insert index : int The location to begin the insertion Returns ------- updated_l : typing.List A list with `to_insert` inserted into `l` at position `index` """ ret = list(l) ret[index:index] = list(to_insert) return ret
2f332cc70f64a740c7dd26867978121f4b68acb7
696,573
def ordered_sample(population, sample_size): """ Samples the population, taking the first `n` items (a.k.a `sample_size') encountered. If more samples are requested than are available then only yield the first `sample_size` items :param population: An iterator of items to sample :param sample_size: The number of items to retrieve from the population :return: A list of the first `sample_size` items from the population """ empty = True results = [] for i, item in enumerate(population): empty = False if i >= sample_size: break results.append(item) if empty: raise ValueError('Population is empty') return results
0aba56350b6acf44098ea247abb979c35df947e8
696,575
from pathlib import Path def basename(var, orig = False): """Get the basename of a path""" bname = Path(var).name if orig or not bname.startswith('['): return bname return bname[bname.find(']')+1:]
16d8365d5be084eaa45921fc633e552e3224847b
696,576
def get_image_and_label(filenames, class_names_to_ids, index): """ 返回单张图像及标签 :param filenames: 数据文件列表 :param class_names_to_ids: 标签列表 :param index: 数据的下标 :return: 返回第index张图像及标签 """ image_data = filenames[index][0].tobytes() class_id = filenames[index][1] return image_data, 28, 28, class_id
34094c8f99444b69310ee3399868a5fd29140a18
696,577
import os def needsToBeUpdated(sourceFileName, cloneFileName): """Returns True if cloneFileName is older than sourceFileName. :param sourceFileName: Current version of the file. :param cloneFileName: Archive version of the file. :return: True if cloneFileName is older than sourceFileName. >>> saveStrToFile("c:/temp/source.txt", "Test file.") >>> saveStrToFile("c:/temp/clone.txt", "Test file.") >>> needsToBeUpdated("c:/temp/source.txt", "c:/temp/clone.txt") False >>> import os >>> os.remove("c:/temp/source.txt") >>> os.remove("c:/temp/clone.txt") """ if os.path.exists(cloneFileName): return os.path.getmtime(sourceFileName) > os.path.getmtime(cloneFileName) else: return True
fa5519cc5c1499cb9149f0e4d1c2268bd23a5c49
696,579
def _events_to_analysis_dimensions(events, analysis_space): """Return a list of arrays of the values of events in each of the analysis dimensions specified in analysis_space""" return [events[x] for x, bins in analysis_space]
130a6f35b8d16feeabfcbb6316936ba0bdc2cafe
696,580
def sort_table(i): """ Input: { table - experiment table sort_index - if !='', sort by this number within vector (i.e. 0 - X, 1 - Y, etc) } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 table - updated experient table } """ table=i['table'] si=i['sort_index'] isi=int(si) for sg in table: x=table[sg] y=sorted(x, key=lambda var: 0 if var[isi] is None else var[isi]) table[sg]=y return {'return':0, 'table':table}
5f33731cf8bcc6ea34d2c62cdf0ba20dd97b9aa6
696,581
def civic_eid1756_statement(): """Create test fixture for CIViC EID1756 statement.""" return { "id": "civic.eid:1756", "description": "Study of 1817 PCa cases and 2026 cancer free controls to clarify the association of (MTHFR)c.677C>T (and c.1298A>C ) of pancreatic cancer risk in a population of Han Chinese in Shanghai. Results indicated a lower risk for the heterozygous CT genotype and homozygous TT genotype carriers of (MTHFR)c.677C>T which had a significantly lower risk of developing pancreatic cancer compared with the wild-type CC genotype.", # noqa: E501 "direction": "supports", "evidence_level": "civic.evidence_level:B", "proposition": "proposition:002", "variation_origin": "germline", "variation_descriptor": "civic.vid:258", "disease_descriptor": "civic.did:556", "method": "method:001", "supported_by": ["pmid:27819322"], "type": "Statement" }
663d169284c5e5be54367f511de411b5036baf08
696,582
def len_(string): """:yaql:len Returns size of the string. :signature: string.len() :receiverArg string: input string :argType string: string :returnType: integer .. code:: yaql> "abc".len() 3 """ return len(string)
a73a32c80c1016e3ca2735d56aeec1f98aa83767
696,583
from typing import List from typing import Union from typing import Callable from typing import Any def replace_value( data: dict, loc: List[Union[str, int]], func: Callable[[Any], Any] ) -> bool: """ replace a value in a dict by passing it through a callable. the location is a list of str (key) and int (index) items. """ sel = data for l in loc[:-1]: try: sel = sel[l] except: print("replace_value failed at: %s for key/index: %s" % (sel, l)) return False try: sel[loc[-1]] = func(sel[loc[-1]]) except: print("replacing failed for %s" % sel.get(loc[-1])) return False return True
6b89ec607fdeb5757468e1904bcb8b7af8d0b6ab
696,584
def getattr_from_collumn_name(system, column_name): """ Enables to return binary system attribute from column name. :param system: BinarySystem :param column_name: str; :return: requested attribute """ colname_split = column_name.split('__') if len(colname_split) > 2: raise ValueError('Column name can contain only single `__` separator.') elif len(colname_split) > 1: if colname_split[0] not in ['primary', 'secondary', 'star']: raise ValueError('Only `primary` or `secondary` prefix can be in front of the `__` separator.') if colname_split[1][:4] == 'spot': star = getattr(system, colname_split[0]) spot = star.spots[int(colname_split[1][4])-1] return getattr(spot, colname_split[1][6:]) return getattr(getattr(system, colname_split[0]), colname_split[1]) else: if colname_split[0] == 'critical_surface_potential': return getattr(system.primary, colname_split[0]) elif colname_split[0] == 'overcontact': morph = getattr(system, 'morphology') return 1 if morph in ['over-contact', 'overcontact'] else 0 else: return getattr(system, colname_split[0])
98726d9d9dca6152f85548981c18f9d5290a380b
696,585
def cooler_cost(cooling_power): """Gives back the approximative price of a cooler with a given loss, doi:10.1088/1757-899X/101/1/012001 """ return 1.81 * cooling_power ** 0.57 * 1e3
23703a86a52f82f2165574665e8693f07d0638d4
696,586
from subprocess import Popen, PIPE def get_git_version(git='git'): """Use ``git describe`` to generate a version string. Parameters ---------- git : :class:`str`, optional Path to the git executable, if not in :envvar:`PATH`. Returns ------- :class:`str` A :pep:`386`-compatible version string. Notes ----- The version string should be compatible with :pep:`386` and :pep:`440`. """ myversion = '0.0.1.dev0' try: p = Popen([git, "describe", "--tags", "--dirty", "--always"], universal_newlines=True, stdout=PIPE, stderr=PIPE) except OSError: return myversion out, err = p.communicate() if p.returncode != 0: return myversion ver = out.rstrip().split('-')[0]+'.dev' try: p = Popen([git, "rev-list", "--count", "HEAD"], universal_newlines=True, stdout=PIPE, stderr=PIPE) except OSError: return myversion out, err = p.communicate() if p.returncode != 0: return myversion ver += out.rstrip() return ver
7a1750a6cad036f26e1f0e3aaf1e39a5e652d093
696,587
def _is_clustal_seq_line(line): """Returns True if line starts with a non-blank character but not 'CLUSTAL' Useful for filtering other lines out of the file. """ return line and (not line[0].isspace()) and\ (not line.startswith('CLUSTAL')) and (not line.startswith('MUSCLE'))
78c9067fa02254409d33fdb9f243c74d549138ce
696,588
import random import colorsys def lincolor(num_colors, saturation=1, value=1, normalized=False): """Creates a list of RGB colors linearly sampled from HSV space with randomised Saturation and Value # Arguments num_colors: Integer. saturation: Float or `None`. If float indicates saturation. If `None` it samples a random value. value: Float or `None`. If float indicates value. If `None` it samples a random value. # Returns List, for which each element contains a list with RGB color # References [Original implementation](https://github.com/jutanke/cselect) """ RGB_colors = [] hues = [value / num_colors for value in range(0, num_colors)] for hue in hues: if saturation is None: saturation = random.uniform(0.6, 1) if value is None: value = random.uniform(0.5, 1) RGB_color = colorsys.hsv_to_rgb(hue, saturation, value) if not normalized: RGB_color = [int(color * 255) for color in RGB_color] RGB_colors.append(RGB_color) return RGB_colors
6540aba364ae13d9cf1d51169674b119f0d3ff82
696,589
def get_surrounding(field, row, col, value): """Finds how many mines are in surrounding squares of a certain coordinate""" count = 0 # checks surrounding squares for i in range(row - 1, row + 2): for k in range(col - 1, col + 2): # checks if the current coordinate is on the field and is the value if 0 <= i < field.shape[0] and 0 <= k < field.shape[1] and field[i, k] == value: count += 1 return count
5ad8e11e7062ee7d44975b119b69f28937e4083a
696,590
import time def epoch2time(epoch): """エポックを9 tupleにする""" return time.localtime(epoch)
33779e449fa48b2eb57bb40bab048eb47f2aa7bd
696,591
import math def water_saturation_vapour_pressure_iapws(t): """Returns the water vapour pressure according to W. Wagner and A. Pruss (1992) J. Phys. Chem. Reference Data, 22, 783–787. See http://www.kayelaby.npl.co.uk/chemistry/3_4/3_4_2.html Valid only above the triple point. The IAWPS formulation 1995 (Wagner and Pruß, 2002) is valid in the temperature range 273.16 K < T < 647.096 K. See http://cires1.colorado.edu/~voemel/vp.html :param t: water temperature (°C) :return: water vapour pressure in Pascal (Pa) """ tc = 647.096 # K pc = 22064000 # Pa a1 = -7.85951783 a2 = 1.84408259 a3 = -11.7866497 a4 = 22.6807411 a5 = -15.9618719 a6 = 1.80122502 t += 273.15 tau = 1 - t/tc return pc * math.exp((a1 * tau + a2 * tau**1.5 + a3 * tau**3 + a4 * tau**3.5 + a5 * tau**4 + a6 * tau**7.5) * tc / t)
aa424bc63b3165bc439dacbf8748f478654a15b2
696,592
import codecs import os import re def find_version(*file_paths) -> str: """Tries to extract a version from the given path sequence""" def read(*parts): """Reads a file from the given path sequence, relative to this file""" here = os.path.abspath(os.path.dirname(__file__)) with codecs.open(os.path.join(here, *parts), "r") as fp: return fp.read() # Read the file and match the __version__ string file = read(*file_paths) match = re.search(r"^__version__\s?=\s?['\"]([^'\"]*)['\"]", file, re.M) if match: return match.group(1) raise RuntimeError(f"Unable to find version string in {file_paths}!")
fcc84f3c009cd94ee6a3548508146e70dab54ae1
696,593
def setup_csv(yr): """ Setup the output file """ out = open("/mesonet/share/climodat/ks/%s_monthly.csv" % (yr,), 'w') out.write("stationID,stationName,Latitude,Longitude,") for i in range(1, 13): for v in ["MINT", "MAXT", "PREC"]: out.write("%02i_%s,C%02i_%s," % (i, v, i, v)) out.write(("%i_MINT,CYR_MINT,%i_MAXT,CYR_MAXT,%i_PREC,CYR_PREC,\n" ) % (yr, yr, yr)) return out
f935310a2207417eaa9c95aaffb9996e681c43bb
696,594
def cleanPath(path): """ Cleans up raw path of pdf to just the name of the PDF (no extension or directories) :path: path to pdf :returns: The PDF name with all directory/extensions stripped """ filename = path.split("/")[-1] filename = filename.split(".")[0] return filename
0cafdf13cbdc784abaedb49b9b757054c3ff25f6
696,595
def results(): """Fixture providing example results.""" return [{"a": 1, "b": 2}, {"b": 1}]
dc756e63da375c58f81da47fd830cd2f7480b9f9
696,596
def make_response_subscription(subscription_record, params): """ Marshall a json subscription object from db to msg format. :param user_auth: :param subscription_record: :param params: :return: """ ret = {} try: ret = subscription_record except Exception as err: raise Exception("failed to format subscription response: " + str(err)) for removekey in ["record_state_val", "record_state_key"]: ret.pop(removekey, None) return ret
edeace6877559dbd18fcb5bbb9fb00156cecc109
696,597
from typing import Union def get_first_line(o, default_val: str) -> Union[str, None]: """ Get first line for a pydoc string :param o: object which is documented (class or function) :param default_val: value to return if there is no documentation :return: the first line which is not whitespace """ doc: Union[str, None] = o.__doc__ if doc is None: return default_val lines = doc.split("\n") for line in lines: if line == "" or line.isspace(): continue return line.strip() return default_val
c7003dc1cc5e22ea08c35485e593fd539597637d
696,598
def capStrLen(string, length): """ Truncates a string to a certain length. Adds '...' if it's too long. Parameters ---------- string : str The string to cap at length l. length : int The maximum length of the string s. """ if length <= 2: raise Exception("l must be at least 3 in utils.capStrLen") if len(string) <= length: return string return string[0 : length - 3] + "..."
a7b2e264d867d3f5263d7037fb91220128437021
696,599
def readMap( infile, columns = (0,1), map_functions = (str, str), both_directions=False, has_header = False): """read a map (pairs of values) from infile. returns a hash. Use map functions to convert elements. If both_directions is set to true, both mapping directions are returned. """ m = {} r = {} n = 0 if columns == "all": key_column = 0 value_column = None else: key_column, value_column = columns key_function, value_function = map_functions for l in infile: if l[0] == "#": continue n += 1 if has_header and n == 1: continue d = l[:-1].split("\t") if len(d) < 2: continue key = key_function(d[key_column]) if value_column: val = value_function(d[value_column]) else: val = tuple(map( value_function, [d[x] for x in range(1, len(d))] )) m[key] = val if val not in r: r[val] = [] r[val].append(key) if both_directions: return m, r else: return m
c84a3629934edcc8f0cbf221590a2266b7dcd7a8
696,601
import uuid def valid_enumerated_design_space_data(): """Produce valid enumerated design space data.""" return dict( module_type='DESIGN_SPACE', status='VALIDATING', status_info=None, active=False, display_name='my enumerated design space', schema_id='f3907a58-aa46-462c-8837-a5aa9605e79e', id=str(uuid.uuid4()), config=dict( type='EnumeratedDesignSpace', name='my enumerated design space', description='enumerates some things', descriptors=[ dict( type='Real', descriptor_key='x', units='', lower_bound=1.0, upper_bound=2.0, ), dict( type='Categorical', descriptor_key='color', descriptor_values=['red', 'green', 'blue'], ) ], data=[ dict(x=1, color='red'), dict(x=2.0, color='green') ] ) )
fd9fa5c3cd8c598eaa74ca513b5949ba8f9eb877
696,603
def fermat_test(n): """Statistically test the primality of a number using the Fermat algorithm. """ return (2**(n - 1) % n) == 1
15883ffd8411139b40692c8f6b8a26ec0dd71fe3
696,604
def get_line_indentation_spacecount(line: str) -> int: """ How many spaces is the provided line indented? """ spaces: int = 0 if (len(line) > 0): if (line[0] == ' '): for letter_idx in range(len(line)): if (line[letter_idx + 1] != ' '): spaces = letter_idx + 1 break return spaces
b70c65431fada6367a12c1d38f0420bcd66f1d8a
696,605
def lengthOfLongestSubstring3(s: str) -> int: """滑动窗口+set""" if not s: return 0 left = 0 lookup = set() n = len(s) max_len = 0 cur_len = 0 #区别于v2:外层循环代表的含义不一样,这里是右边界,v2是左边界 for i in range(n): cur_len += 1 while s[i] in lookup:#abba窗口缩小 lookup.remove(s[left]) left += 1 cur_len -= 1 if cur_len > max_len: max_len = cur_len lookup.add(s[i]) return max_len
3fd1561ea7efcd63ebceeae95235a64c1c80cf51
696,606
def form_json_data(formset, questions: list) -> list: """ For each ``form`` in ``formset``, extracts the user's answer_choice and adds the question details along with its attributes as a dictionary. This is the actual data that is stored in the database as answer, after passing necessary validation. """ json_data = [] for form, question in zip(formset, questions): valid_dict = { 'answer_choice': int(form.cleaned_data.get('answer_choice')), 'question': { 'subclass': question['subclass'], 'factor': question['factor'], 'number': question['number'] } } json_data.append(valid_dict) json_data = json_data return json_data
569935ef39c5ca21cb3c877c8f0a082cba6db36d
696,608
def _is_possible_expansion(acro: str, full: str) -> bool: """ Check whether all acronym characters are present in the full form. :param acro: :param full: :return: """ # Empty acronym is presented everywhere if not acro: return True # Non-empty acronym is not presented in empty full form if not full: return False # First char must be the same if acro[0].lower() != full[0].lower(): return False j = 0 # We then skip the first char, as it has already been checked for i in range(1, len(acro)): j += 1 while j < len(full): if acro[i].lower() == full[j].lower(): break j += 1 if j < len(full): return True return False
e76cace8592c99d4e5e965bcbf8cba17c7114ee1
696,609
import csv def make_pairlist(path, l1, l2): """Creates pair list for l1 and l2. Args: dir: output directory l1: language 1 l2: language 2 """ pairlist = [] with open(path) as stream: reader = csv.DictReader(stream) for row in reader: if row[l1] == "-" or row[l2] == "-": continue pairlist.append((row["GLOSS"], row[l1], row[l2])) return pairlist
635b21cc3da5cf647d3652c210c07324e029a638
696,610
def read_from_postgresql(sqlContext, config): """ reads from PostgreSQL database with given configurations into Spark DataFrame :type sqlContext: SQLContext Spark SQL Context for saving :type config : dict dictionary with PostgreSQL configurations :rtype : Spark DataFrame SQL DataFrame representing the table """ options = "".join([".options(%s=config[\"%s\"])" % (opt, opt) for opt in config.keys()]) command = "sqlContext.read.format(\"jdbc\")%s.load()" % options return eval(command)
bfa8b92a230403f65b74156198521a1ff2ce5715
696,611
import math def norma(v1): """Funcion que calcula la norma de un vector (list 1D) -> int""" suma = 0 for i in range(len(v1)): suma += (v1[i][0]**2)+(v1[i][1]**2) ans = math.sqrt(suma) return ans
2571bb2707e0c463d59d33904239b72f3dc0fd84
696,612
import inspect import unittest def _skipIf(condition, _): """ @unittest.skipIf() decorator stub for Python 2.6- """ def decorator(test_item): if inspect.isclass(test_item) and \ issubclass(test_item, unittest.TestCase): test_item = type("DummyTestCase", (unittest.TestCase, ), {}) elif inspect.isfunction(test_item): def ret_none(obj): return None test_item = ret_none else: raise ValueError("unable to decorate {0}".format(test_item)) return test_item if condition: return decorator else: return lambda obj: obj
8e8a8d47cfe223f209d449860b33e9b029a37e1a
696,613
def from_context(cm): """Extract the value produced by a context manager""" with cm as x: return x
2d0be1d9d8c66487898e7f2d0f6649b952289468
696,614
def validate_performance(performance, dataset_generator, should_assert=False): """Validate the performance of all transformers for a dataset_generator. Args: performance (pd.DataFrame): The performance metrics of a transformer against a dataset_generator. dataset_generator (rdt.tests.datasets.BaseDatasetGenerator): The dataset generator to performance test against. should_assert (bool): Whether or not to raise AssertionErrors. Returns: list[bool]: A list of if each performance metric was valid or not. """ expected = dataset_generator.get_performance_thresholds() out = [] for test_name, value in performance.items(): function, metric = test_name.lower().replace(' ', '_').rsplit('_', 1) expected_metric = expected[function][metric] valid = value < expected_metric out.append(valid) if should_assert and not valid: raise AssertionError(f'{function} {metric}: {value} > {expected_metric}') return out
adcb31ccbbec4282568adc86f5cfafa3015aaa2b
696,615
def flatten(arr): """Flatten array.""" return [item for sublist in arr for item in sublist]
16555e7d4c04a0c6ffec03fb4005ddf534a474df
696,617
import copy def copyNewDancer(dancer): """ 复制原来的dancer,并且把手指都抬起来 :param dancer: :return: """ newDancer = copy.deepcopy(dancer) newDancer.releaseFingers() return newDancer
b09523a381c1d0afad606381a7efe863e97e8746
696,618
import optparse def parse_argv_split(options, argv, usage="%prog [args] [options]"): """Parse argument vector to a tuple with an arguments list and an option dictionary. """ parser = optparse.OptionParser(usage) optnames = [] has_default = [] for opt in options: parser.add_option(*opt[0], **opt[1]) # remember destination name if 'dest' in opt[1]: optnames.append(opt[1]['dest']) else: optnames.append(opt[0][1][2:]) # strip dest. name from long-opt # remember opts with defaults (in case def. is neg.) if 'default' in opt[1]: has_default.append(optnames[len(optnames)-1]) optsv, args = parser.parse_args(argv) # create dictionary for opt values by using dest. names opts = {} for name in optnames: v = getattr(optsv, name) if v or name in has_default: opts[name] = v return parser, opts, args
321e4879069ebda9e10685fb65dc03df1eebbaf0
696,619
def get_prefix_from_proxy_group(proxy_group): """ Return prefix by analyzing proxy_group name Args: proxy_group (str): proxy group name Returns: str: prefix if exists, empty string if not """ split_name = proxy_group.split("@")[0].split("-") # if there's only two sections, there's no prefix if len(split_name) <= 2: return "" return proxy_group.split("@")[0].split("-")[-3].strip()
04433d97dcecc4edd7288e1373d4d6bce5e07ee4
696,620
def get_mdot_code(rho, ucon, gdet, dx2, dx3): """Returns dMact in code units.""" dMacts = gdet * rho * ucon[:, :, :, 1] return dMacts[:21].sum() * dx2 * dx3 / 21.
6da5dc0205a7c8a57af473dcaee388fa0f84b0ff
696,621
import sys import yaml def get_cfg(filename=None): """Read diagnostic script configuration from settings.yml.""" if filename is None: filename = sys.argv[1] with open(filename) as file: cfg = yaml.safe_load(file) return cfg
308d6981d8622d0cb708069a00919e914cdc05fe
696,622
import json def j2d(data): """Convert json to dict. Parameters ---------- data : string JSON-formatted string. Returns ------- dict Data as dict. """ return json.loads(data)
31abd9bbf8b0eecf2fb2cd12310bb1642ec209c8
696,623
def read_vx(pointdata): """Read a variable-length index.""" if pointdata[0] != 255: index= pointdata[0]*256 + pointdata[1] size= 2 else: index= pointdata[1]*65536 + pointdata[2]*256 + pointdata[3] size= 4 return index, size
cc3c2335430bcff12a2824013b450a8a71ce9379
696,624
def group(url, route_list): """ Group route """ for route in route_list: route.route_url = url + route.route_url return route_list
494b432bf0f6a88e76aeff45fd28bac658a15930
696,625
from typing import Type from enum import Enum def empty(enumeration: Type[Enum]): """ Class decorator for enumerations ensuring an enum-superclass has no values. """ if len(enumeration.__members__) > 0: raise ValueError( "There should not be any values in {0!r}".format(enumeration)) return enumeration
5be36138da667b5f83fcab4f8e9a881d569efa9e
696,626
def are_passwords_matching(form) -> bool: """ Checks whether password field and password confirm field have same value :param form: filled Django form instance :return: true if fields have same value, false otherwise """ return form.cleaned_data['password'] == form.cleaned_data['password_confirm']
02067f5ff2f9914dfdf6dd81011b15e6007db457
696,627
import sys def check_tokenization(sents): """Check whether an input text is tokenized (borrowed from sacreBLEU)""" too_much = 100 tokenized_count = 0 for sent in sents: if sent.endswith(' .'): tokenized_count += 1 # Too much is too much if tokenized_count == too_much: print( f"That's {too_much} lines that end in a " "tokenized period ('.')", file=sys.stderr, ) print( "It looks like you forgot to detokenize your data, " "which may hurt your score and make your results " "difficult to replicate.", file=sys.stderr, ) return too_much return tokenized_count
ba69d1cfd9f3b4271cbedf9ad662a5f2dce209bb
696,628
def run_indel_caller(job, tumor_bam, normal_bam, univ_options, indel_options): """ Run an indel caller on the DNA bams. This module will be implemented in the future. :param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq :param dict normal_bam: Dict of bam and bai for normal DNA-Seq :param dict univ_options: Dict of universal options used by almost all tools :param dict indel_options: Options specific to indel calling :return: fsID to the merged fusion calls :rtype: toil.fileStore.FileID """ job.fileStore.logToMaster('INDELs are currently unsupported.... Skipping.') indel_file = job.fileStore.getLocalTempFile() output_file = job.fileStore.writeGlobalFile(indel_file) job.fileStore.logToMaster('Ran INDEL on %s successfully' % univ_options['patient']) return output_file
cae99c965675091ce3a1d46c01254f54e656bd4f
696,629
def getAdded(before, after): """ Find any items in 'after' that are not in 'before' """ #message("before:" + str(before)) #message("after:" + str(after)) # Any items in 'before' are removed from 'after' # The remaining list is of the newly added items for b in before: try: i = after.index(b) after.remove(b) except ValueError: pass #message("new:" + str(after)) return after
c01cc13275c85219abbd706f59fa611464602b80
696,630
def povečaj(niz, indeks): """ Kapitalizira znak v nizu, ki je na mestu številka "indeks" """ return niz[:indeks] + niz[indeks].upper() + niz[indeks + 1:]
bac906890345af9a1baa0447dbd53782559d0406
696,632
def getSBORegex(): """ Return the regex string for an SBO term. Matches whole line. """ return r'^\s*([^.\s]+)\.sboTerm\s*=\s*(SBO:)?([0-9]+)\s*(;)?\s*((#|(//)).*)?$'
25ee2b577cf9144bb06f5b41ddc48ea901dcdc58
696,633
import torch def neginf(dtype): """ Return a representable finite number near -inf for a dtype. """ if dtype is torch.float16: return -65504 else: return -1e20
b6834f7cf2c25d60b679bcbda32e3473cc20b9de
696,634
def _get_value(input): """Return the value stolen from input.""" if hasattr(input, 'numpy'): return input.numpy() return input
93fddf0dd50a200c59d5293657677d60d7b42edd
696,635
from typing import Dict import subprocess def get_acls(scope: str, profile: str) -> Dict[str, str]: """Get the list of acls from the supplied secret scope :param str scope: The scope to extract from :param str profile: The profile configured for the workspace :return: The available groups :rtype: Dict[str, str] """ # Get the acls for the scope acl_query = 'databricks secrets list-acls' acl_query += f' --profile {profile}' acl_query += f' --scope {scope}' # Run and enforce success sp = subprocess.run(acl_query, capture_output=True) sp.check_returncode() # Extract the existing scopes acl_lines = [l.strip('\r') for l in sp.stdout.decode().split('\n')[1:]] acl_lines = [l for l in acl_lines if l.replace('-', '').strip()] acl_lines = [[elem for elem in l.split(' ') if elem] for l in acl_lines] # Turn acls int a dictionary existing_acls = {acl[0]: acl[1] for acl in acl_lines} return existing_acls
6a20d33e0fb53e82a46c113e14fcba66b02919d3
696,636