content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def coerce_value(val): """ Coerce config variables to proper types """ def isnumeric(val): try: float(val) return True except ValueError: return False if isnumeric(val): try: return int(val) except ValueError: return float(val) lower_val = str(val.lower()) if lower_val in ('true', 'false'): if 'f' in lower_val: return False else: return True if ',' in val: return [coerce_value(v.strip()) for v in val.split(',')] return val
f3825e892dda0bc4da2719dfe55fd9d64d520bee
26,784
def l(lam, f, w): """Compute l""" return lam * f * w
1dca0c85aac1832033fd46b2cf6c9e9045a3c210
26,788
def isRunning( proc ): """is the process running? Parameters: * proc (psutil.Popen): Process to be checked Returns: * bool: True if the process is running. """ if proc == None: return proc.poll() return proc.is_running()
207afdbf6c6ba5f7a32dcf58f0d0a19cb5ad6085
26,789
import json def get_profile_names(file): """ :param file: path to file containing profiles :return: List<String>. A list of profile names """ profile_names = [] try: with open(file) as f: data = json.load(f) for prof in data: profile_names.append(prof['name']) except FileNotFoundError: with open('profile_example.json') as f: data = json.load(f) with open('profiles.json', 'w', encoding='utf-8') as f: json.dump(data, f, ensure_ascii=False, indent=4) return profile_names
406cfe436b8f7060a049c4af02877350aec68a65
26,790
import functools import yaml def definer(func): """Make a function that writes to the definition.yaml.""" @functools.wraps(func) def inner(def_file, *args, **kwargs): with open(def_file, "r") as fl: defn = yaml.load(fl, Loader=yaml.FullLoader) or {} defn = func(defn, *args, **kwargs) with open(def_file, "w") as fl: yaml.dump(defn, fl) return inner
45003de90fd4ebe9e075315b0423cfb1105a1879
26,793
def profile_management(riven, prof, punct): """Method used to filter the profiles punctuation""" if riven.riven_type == 3: if prof.name in ["Heavy", "Gunblade"] and ( riven.weapon["type"] not in [ "Gunblade", "Scythe", "Two-handed nikana", "Dual daggers", "Dagger", "Rapier", "Claws", "Hammer", "Machete", ] ): punct /= 2 if prof.name in ["Heavy", "General use"] and "Gunblade" in riven.weapon["type"]: punct /= 3 if prof.name == "Gunblade" and "Gunblade" not in riven.weapon["type"]: punct = -20 if riven.disposition < 1.1 and "statstick" in prof.name: punct = punct * riven.disposition / 2 if ( riven.riven_type == 2 and prof.name == "Co-primer" and riven.weapon_name not in ["epitaph", "kuva_nukor", "tenet_cycron"] ): punct = -20 if ("Rubico" in prof.name and "rubico" not in riven.weapon_name) or ( "Vectis" in prof.name and "vectis" not in riven.weapon_name ): punct = -20 elif ( "rubico" in riven.weapon_name or "vectis" in riven.weapon_name ) and prof.name == "General use": punct /= 2 return punct
43e2eb5203bec65d3b5bba459bda9caa582683be
26,794
def clean_dict(source, keys=[], values=[]): """Removes given keys and values from dictionary. :param dict source: :param iterable keys: :param iterable values: :return dict: """ dict_data = {} for key, value in source.items(): if (key not in keys) and (value not in values): dict_data[key] = value return dict_data
f521ecd878ec0f1a5706d19fec13841d2381745d
26,795
def compute_target(difficulty_target_bits): """ Calculate a target hash given a difficulty. """ return 2 ** (256 - difficulty_target_bits)
65f4a5c9e01acf5f829edc949154171b6de96c19
26,796
def is_available(event): """ Checks if the event has the transparency attribute. :param event: is the event to check. :return: True if it is transparent and False if not """ if 'transparency' in event: available = True else: available = False return available
40fe7bb61d2f02a0a030f578cfe44ece79e16d52
26,797
def is_raisable(obj) -> bool: """ Test if object is valid in a "raise obj" statement. """ return isinstance(obj, Exception) or ( isinstance(obj, type) and issubclass(obj, Exception) )
64c4233b6b8fa0b1f9b8542dd1f06f658e10bf5d
26,798
import re def check_if_url(string: str) -> bool: """Checks if a string passed in is an URL. Args: string (str): string to check Returns: bool: True if it is, False otherwise """ regex = re.compile( r"^(http://www\.|https://www\.|http://|https://)?[a-z0-9]+([\-.][a-z0-9]+)*\.[" r"a-z]{2,5}(:[0-9]{1,5})?(/.*)?$") return re.match(regex, string) is not None
badf75a2280fa0e88a162bd853f5f33f231d0137
26,799
def get_validcmds(): """ get valid cmds """ validcmds = ["showaccount", "newaccount", "deploy", "call", "sendtx", "list", "txinput", "checkaddr", "usage"] return validcmds
58c66bba38f9a7fab049cb7d082ba91d58b471f4
26,800
def mergetime(small, big): """Merge one time into another.""" big['slots'].extend(small['slots']) big['times'].append(small) return big
ed8057100168e376f81b773d8f9ec38da5f2c25d
26,802
from unittest.mock import Mock def mocked_morning(): """Mock a function, that returns the current daytime. """ mocked_daytime = Mock(return_value="morning") return mocked_daytime
47bc68f376c3092ec0c91bc3e51e799744198587
26,803
def auto_service(pipeline_settings={}, services={}): """Automatically enable service for deployment types. Args: services (dict): Services to enable in IAM Policy. pipeline_settings (dict): Settings from *pipeline.json*. Returns: dict: Services. """ deployment_type = pipeline_settings['type'] if deployment_type == 'lambda': services['lambda'] = True return services
3075bf909174ba21d0469be74a6880b99197d431
26,804
def color_list(s): """Convert a list of RGB components to a list of triples. Example: the string '255,127,0:127,127,127:0,0,255' is converted to [(255,127,0), (127,127,127), (0,0,255)].""" return (map(int,rgb.split(',')) for rgb in s.split(':'))
e3c48a2e9c0a2b86c39670317a18916f1cb8ddff
26,805
import argparse def parse_args(): """ Argument parser. """ parser = argparse.ArgumentParser('Run DFC features.') parser.add_argument('--input', type=str, help='Path to the main data folder', required=True) parser.add_argument('--output', type=str, help='Path to output folder', required=True) parser.add_argument('--starts', type=str, help='Path starts json file', required=True) parser.add_argument('--clusters', type=str, help='Path to clusters file concatentated_matrix_clusters', required=True) parser.add_argument('--features', type=int, help='Number of features (brain areas) - for PCA: BA * 2,' 'for autoencoder: number of features in encoded', required=True) return parser.parse_args()
6e728155d2821fc2dcb9a2f8f038ef28b0ed6467
26,807
import json def read_corpus(file_path): """ 读取给定的语料库,并把问题列表和答案列表分别写入到 qlist, alist 里面。 在此过程中,不用对字符换做任何的处理(这部分需要在 Part 2.3里处理) qlist = ["问题1", “问题2”, “问题3” ....] alist = ["答案1", "答案2", "答案3" ....] 务必要让每一个问题和答案对应起来(下标位置一致) :param file_path: str, 问答对文件路径 :return: qlist: list 问题列表 alist: list 答案列表 """ qlist = [] alist = [] # 读取文件并将json解析为dict with open(file_path) as f: json_str = f.read() qa_dic = json.loads(json_str) # 解析dict,的到q-a对 for data in qa_dic['data']: for para in data['paragraphs']: for qas in para['qas']: qlist.append(qas['question']) if len(qas['answers']) == 0: alist.append(qas['plausible_answers'][0]['text']) else: alist.append(qas['answers'][0]['text']) assert len(qlist) == len(alist) # 确保长度一样 return qlist, alist
b760e92c6dd71336c34d166b668f4cd105357bd4
26,809
def lexical_parsimony(ind): """ If two fitnesses are the same, break the tie with the smallest genome This implements Lexicographical Parsimony Pressure, which is essentially where if the fitnesses of two individuals are close, then break the tie with the smallest genome. >>> import toolz >>> from leap_ec.individual import Individual >>> from leap_ec.decoder import IdentityDecoder >>> from leap_ec.binary_rep.problems import MaxOnes >>> import leap_ec.ops as ops >>> problem = MaxOnes() >>> decoder = IdentityDecoder() >>> pop = [Individual([0, 0, 0, 1, 1, 1], problem=problem, decoder=decoder), Individual([0, 0], problem=problem, decoder=decoder), Individual([1, 1], problem=problem, decoder=decoder), Individual([1, 1, 1], decoder=decoder, problem=problem)] >>> pop = Individual.evaluate_population(pop) >>> best = ops.truncation_selection(pop, size=1) >>> print(f'{best[0]!s}') [0, 0, 0, 1, 1, 1] 3 >>> best = ops.truncation_selection(pop, size=1, key=lexical_parsimony) >>> print(f'{best[0]!s}') [1, 1, 1] 3 .. [Luke2002] Luke, S., & Panait, L. (2002, July). Lexicographic parsimony pressure. In Proceedings of the 4th Annual Conference on Genetic and Evolutionary Computation (pp. 829-836). :param ind: to be compared :return: altered comparison criteria """ if ind.problem.maximize: return (ind.fitness, -len(ind.genome)) else: # Because we are using a key function we are bypassing the # ScalarProblem.worse_than() that would invert the fitnesses, so we # have to do this here by flipping the sign. return (-ind.fitness, -len(ind.genome))
f4aae82e2acdeaf59aeb6e5fd849407a17794313
26,812
def set_server(client, server_url): """Select a Windchill server. Args: client (obj): creopyson Client server_url (str): server URL or Alias Returns: None """ data = {"server_url": server_url} return client._creoson_post("windchill", "set_server", data)
8ba2ad33ad68e41da74804a1a801328fec917f2c
26,813
import os def change_working_dir(new_dir): """ Changes the working directory.\n Returns the new working directory if succeded, 1 if failed. (> str/int) """ try: os.chdir(new_dir) return new_dir except: return 1
9c0dcf566609d2cd25f9a73cff3acd7f22c0ebb6
26,814
def get_model_prediction(tcdcn_cfNet, x): """ This method gets prediction of the model, with each value in the range [0, dim**2) """ return tcdcn_cfNet.get_softmax_output(x, dropout=0)
742c70ff5ce262b84cfe0aa8a1189daa84fb6731
26,815
def broadcast_model(model_state,sc): """ Broadcast models state to all nodes for distributed inference """ bc_model_state = sc.broadcast(model_state) return bc_model_state
cd9ab9561c02a09798c95c34db254c5c68dc3791
26,816
def markdown_documentation() -> str: """Expected output for api documentation formating""" with open("tests/test_data/test_docs.md") as infile: return infile.read()
8c8e818000287ace9825c521d4f4452e7e99e28a
26,818
def dictify(storage_obj): """Create a dict object from storage_obj""" return dict(storage_obj.items())
78d09322f27d5e9c4c3d87e63b3cdc46937e7f92
26,819
def rotation_geometry(): """ Returns ------- dict: GeoJSON-style geometry object. Coordinates are in grid coordinates (Affine.identity()). """ return { 'type': 'Polygon', 'coordinates': [[(481070, 4481140), (481040, 4481160), (481035, 4481130), (481060, 4481125), (481070, 4481140)]] }
17cf840a4e4357b02b5c249f23a69bca96508a41
26,820
from typing import Union def keycap_digit(c: Union[int, str]) -> str: """Returns a keycap digit emoji given a character.""" return (str(c).encode("utf-8") + b"\xe2\x83\xa3").decode("utf-8")
8db72c79acc173f8e325faca96274b1c08061eb7
26,821
def worksheet_as_list_of_lists(ws): """Convert a Worksheet object to a 2D list.""" table = [] for row in ws.rows: table.append([c.value for c in row]) return table
a6988996cdd6c64521b20ba3fe23339978dc941d
26,822
def compress(traj, copy=True, **kwds): """Wrapper for :meth:`Trajectory.compress`. Parameters ---------- copy : bool Return compressed copy or in-place modified object. **kwds : keywords keywords to :meth:`Trajectory.compress` Examples -------- >>> trc = compress(tr, copy=True, forget=['coords']) >>> trc.dump('very_small_file.pk') """ if copy: out = traj.copy() else: out = traj out.compress(**kwds) return out
42ebefdddeaecd67f61dd884dab31a3ec7f864c9
26,823
import socket def binary_ip(host): """binary_ip(host) -> str Resolve host and return IP as four byte string. Example: >>> binary_ip("127.0.0.1") '\\x7f\\x00\\x00\\x01' """ return socket.inet_aton(socket.gethostbyname(host))
d32d8e243e684bbc87ea1e68f91e9a030a5c78c8
26,825
def _get_integer_intervals(xmin, xmax): """ For a given interval [xmin, xmax], returns the minimum interval [iXmin, iXmax] that contains the original one where iXmin and iXmax are Integer numbers. Examples: [ 3.45, 5.35] => [ 3, 6] [-3.45, 5.35] => [-4, 6] [-3.45, -2.35] => [-4, -2] Parameters ---------- xmin: float origin of the interval xmax: float end of the interval Returns ------- Returns the interval [iXmin, iXmax] iXmin: integer Minimum value of the integer interval iMmax: integer Maximum value of the integer interval """ if(xmin<0.0 and xmin!=int(xmin)): iXmin=int(xmin-1) else: iXmin = int(xmin) if(xmax==int(xmax)): iXmax=xmax else: iXmax=int(xmax+1) return iXmin, iXmax
7fa1fd68b960d793c49bc5266d65999c362357ac
26,826
import base64 def encode(digest): """9.2.1. Generating Signature""" return base64.b64encode(digest).decode('ascii').rstrip('\n')
0be07f6b16ff4bb872233fa23a65e51d0f248b9d
26,827
def multByScalar(t , n = 100.0): """ Multiply a tuple by a scalar """ new = tuple([x * n for x in t]) return new
e91e01b4d45a5b8f200bf65a0eb0466f1e30856a
26,828
def xstr(s): """Creates a string object, but for null objects returns an empty string Args as data: s: input object Returns: object converted to a string """ if s is None: return "" else: return str(s)
b7ea8e906598d259244cc8a7cbb9cb1a142ba3d8
26,829
import json def list_to_json_array(value_list, array_name, value_name=None, start=True, stop=True): """ Creates a json array from a list. If the optional value_name is passed then the list becomes a json array of dictionary objects. """ if start: start_brace = "{" else: start_brace = " " if stop: stop_brace = "}" else: stop_brace = "," if value_name is None: return "%s\"%s\": %s%s" % (start_brace, array_name, json.dumps(value_list), stop_brace) lst = [] for value in value_list: data_dict = {value_name: value} lst.append(data_dict) return "%s\"%s\": %s%s" % (start_brace, array_name, json.dumps(lst), stop_brace)
271530c5c318bba12cbb5bfe3e7c908874211923
26,830
import functools def remove_null(aggfn): """Decorator for removing null values for `aggfn`""" @functools.wraps(aggfn) def wrapper(xs): return aggfn([x for x in xs if x is not None]) return wrapper
17caf0f54b7f4aa120f37a1de669b55c0fbe78e8
26,831
def word(name): """\ Function decorator used to tag methods that will be visible in the RPN builtin namespace. """ def decorate_word(function): function.rpn_name = name.lower() return function return decorate_word
78d76a2a4d260a50aa82f188fde7064d7795ce2c
26,832
import torch def tensor(data, *args, **kwargs): """ In ``treetensor``, you can create a tree tensor with simple data structure. Examples:: >>> import torch >>> import treetensor.torch as ttorch >>> ttorch.tensor(True) # the same as torch.tensor(True) tensor(True) >>> ttorch.tensor([1, 2, 3]) # the same as torch.tensor([1, 2, 3]) tensor([1, 2, 3]) >>> ttorch.tensor({'a': 1, 'b': [1, 2, 3], 'c': [[True, False], [False, True]]}) <Tensor 0x7ff363bbcc50> ├── a --> tensor(1) ├── b --> tensor([1, 2, 3]) └── c --> tensor([[ True, False], [False, True]]) """ return torch.tensor(data, *args, **kwargs)
438710f3733886646005367c04e97021c361f7bb
26,834
import numpy def writeFEvals(fevals, precision='.2'): """Returns string representation of a number of function evaluations.""" if numpy.isinf(fevals): return r'$\infty$' tmp = (('%' + precision + 'g') % fevals) res = tmp.split('e') if len(res) > 1: res[1] = '%d' % int(res[1]) res = '%s' % 'e'.join(res) pr2 = str(float(precision) + .2) #res2 = (('%' + pr2 + 'g') % fevals) res2 = (('%' + pr2 + 'g') % float(tmp)) # To have the same number of significant digits. if len(res) >= len(res2): res = res2 else: res = res[0] return res
f03813eded9e8f2dfee6b4ee972193643bac682d
26,835
from typing import Mapping def isdict(obj): """Return True if the obj is a mapping of some sort.""" return isinstance(obj, Mapping)
641fea1920e9ed17fec2a51116cd8bcad816487a
26,836
def calc_multiplicity(mut_series, purity, cr_diff, c0): """Calculate multiplicity for the mutation""" mu_min_adj = (mut_series['mu_minor'] - c0) / cr_diff mu_maj_adj = (mut_series['mu_major'] - c0) / cr_diff # returns the multiplicity * CCF for this mutation return mut_series['VAF'] * (purity * (mu_min_adj + mu_maj_adj) + 2 * (1 - purity)) / purity
9b9b818ef7c13a653134ea27dc8518c7619d9428
26,838
import functools def cached_property(fn): """Decorate property to cache return value.""" return property(functools.lru_cache(maxsize=8)(fn))
3f2e296e4ef8d8f28966ad9708a1c9671a604085
26,839
import os def which(program, win_allow_cross_arch=True): """Identify the location of an executable file.""" def is_exe(path): return os.path.isfile(path) and os.access(path, os.X_OK) def _get_path_list(): return os.environ['PATH'].split(os.pathsep) if os.name == 'nt': def find_exe(program): root, ext = os.path.splitext(program) if ext: if is_exe(program): return program else: for ext in os.environ['PATHEXT'].split(os.pathsep): program_path = root + ext.lower() if is_exe(program_path): return program_path return None def get_path_list(): paths = _get_path_list() if win_allow_cross_arch: alt_sys_path = os.path.expandvars(r"$WINDIR\Sysnative") if os.path.isdir(alt_sys_path): paths.insert(0, alt_sys_path) else: alt_sys_path = os.path.expandvars(r"$WINDIR\SysWOW64") if os.path.isdir(alt_sys_path): paths.append(alt_sys_path) return paths else: def find_exe(program): return program if is_exe(program) else None get_path_list = _get_path_list if os.path.split(program)[0]: program_path = find_exe(program) if program_path: return program_path else: for path in get_path_list(): program_path = find_exe(os.path.join(path, program)) if program_path: return program_path return None
62e75e52154be89334f26f9f172e74b551b027d3
26,840
def merge_fcst_with_ds(dataset, predictor): """ Docstring """ predictions = getattr(predictor, "predictions") fcst = [] for i in predictions: var = float(i) fcst.append(var) dataf = getattr(dataset, "df") train_len = getattr(dataset, "train_len") length = int(len(dataf) * train_len) df_fcst = dataf[length:] df_fcst.insert(len(df_fcst.columns), "forecast_price", fcst, False) return df_fcst
352b977b3952174aae77716d0dc211006284ec51
26,841
def statics(output, labels): """compute true positive, true negative, false positive, false negative""" preds = output.max(1)[1].type_as(labels) correct = preds.eq(labels).double() tp = correct.numpy() * preds.numpy() tn = correct.numpy() * (1-preds.numpy()) fp = (1-correct.numpy()) * preds.numpy() fn = (1-correct.numpy()) * (1-preds.numpy()) tp, tn, fp, fn = tp.sum(), tn.sum(), fp.sum(), fn.sum() return tp, tn, fp, fn
938b837c71314848c9eb7153be5e8b5613cc6a7d
26,842
import argparse def parse(): """Parse command-line arguments. """ parser = argparse.ArgumentParser(description='Scale a Mininet example test') parser.add_argument("-t", "--test", required=True, help="directory containing an example network") parser.add_argument("-f", "--factor", type=int, required=True, help="scaling factor") config_read = vars(parser.parse_args()) return config_read
202559ed729a5862031c1d9f332ffec57da19c4e
26,843
import yaml def get_metadata(key): """Get url and xpath location from yaml file""" data = yaml.load(open("scraping_metadata.yaml"), Loader=yaml.FullLoader) return data[key].values()
581f3ea5f77bd8edcf37c71472c00381d3307f31
26,844
import os def make_folder(name, var): """ Create a new folder if the folder does not exist. If the folder is created then increment the count and return it else return the count directly """ if not os.path.exists(name): os.makedirs(name) return var+1 return var
c581546417c2066f9ec5d23fd266abb1e47ebc71
26,845
def remove_prediction_padding_old(prediction_distribution, target_value, loss_weight, target_real_value): """ Masks prediction for artificial targets """ prediction_distribution = prediction_distribution.contiguous().view(-1, 361) target_value = target_value.contiguous().view(-1) loss_weight = loss_weight.contiguous().view(-1) inter = (target_value != -1).view(-1, 1) mask = inter.expand(prediction_distribution.size(0), prediction_distribution.size(1)) ret = [prediction_distribution[mask].view(-1, prediction_distribution.size(1)), target_value[(target_value != -1)], None] if loss_weight is not None: ret.append(loss_weight[(target_value != -1)]) else: ret.append(None) return ret
4d017aea84af61b850b1d56e123488fce675b9cd
26,848
def add_new_block(manager, advance_clock=None, *, parent_block_hash=None, data=b'', weight=None, address=None, propagate=True): """ Create, resolve and propagate a new block :param manager: Manager object to handle the creation :type manager: :py:class:`hathor.manager.HathorManager` :return: Block created :rtype: :py:class:`hathor.transaction.block.Block` """ block = manager.generate_mining_block(parent_block_hash=parent_block_hash, data=data, address=address) if weight is not None: block.weight = weight block.resolve() block.validate_full() if propagate: manager.propagate_tx(block, fails_silently=False) if advance_clock: manager.reactor.advance(advance_clock) return block
05fa760d1357caab0603db9f98058430542b58af
26,849
import os def choice(path, judge_xml_name): """ :param path: 原始的顶层目录 :param judge_xml_name: 拷贝哪个文件夹里面的xml :return:总计带xml的文件夹数量,以及所有包含xml的文件夹列表 """ xml_list = [] parent = os.listdir(path) num = 1 for child in parent: child_path = os.path.join(path, child) if not os.path.isdir(child_path): continue try: child_child = os.listdir(child_path) except Exception as e: child_child = [] for i in child_child: if i == judge_xml_name: num += 1 xml_dir = os.path.join(child_path, i) xml_list.append(xml_dir) print("一共{}个文件夹".format(num)) return num, xml_list
c33f2221062ba247273dd86a28eb7e9896638d6a
26,851
def dictElement(element, prefix=None): """Returnss a dictionary built from the children of *element*, which must be a :class:`xml.etree.ElementTree.Element` instance. Keys of the dictionary are *tag* of children without the *prefix*, or namespace. Values depend on the content of the child. If a child does not have any children, its text attribute is the value. If a child has children, then the child is the value.""" dict_ = {} length = False if isinstance(prefix, str): length = len(prefix) for child in element: tag = child.tag if length and tag.startswith(prefix): tag = tag[length:] if len(child) == 0: dict_[tag] = child.text else: dict_[tag] = child return dict_
20fe0475815fb6bbf685a45b54330738426987fd
26,853
def time_like(line): """Test if a line looks like a timestamp""" # line might be like '12:33 - 12:48' or '19:03' if line.count(':') == 2: if line.count('-') >= 1: line_without = line.replace(':', '') line_without = line_without.replace('-', '') line_without = line_without.replace(' ', '') try: int(line_without) return True except ValueError: pass if line.count(':') == 1: line_without = line.replace(':', '') try: int(line_without) return True except ValueError: pass return False
19675238633cafd1e30051ff18bb4d1a783663f6
26,856
def get_default_javascript(): """ Returns the style of additional style sheets @return list of files """ return ["_static/require.js"]
aff12f1f7991063a4154e9bbc76ef7577a06af97
26,857
def find1Dpeak(arr): """ Finds a 1D peak in a list of comparable types A 1D peak is an x in L such that it is greater than or equal to all its neighbors. ex. [1, 1, 2, 1] in this list, the elements at index 0 and index 2 are peaks. Complexity: O(log(n)) where n = len(list) """ n = len(arr) if n == 1: return arr[0] if n == 2: return max(arr) if arr[n / 2] < arr[(n / 2) - 1]: return find1Dpeak(arr[:n / 2]) if arr[n / 2] < arr[(n / 2) + 1]: return find1Dpeak(arr[n / 2:]) return arr[n / 2]
e8dc8c0ccf453a0246fa0d59e288e0e4591216e6
26,859
def set_origin(cut_plane, center_x1=0.0, center_x2=0.0): """ Establish the origin of a CutPlane object. Args: cut_plane (:py:class:`~.tools.cut_plane.CutPlane`): Plane of data. center_x1 (float, optional): x1-coordinate of origin. Defaults to 0.0. center_x2 (float, optional): x2-coordinate of origin. Defaults to 0.0. Returns: cut_plane (:py:class:`~.tools.cut_plane.CutPlane`): Updated plane of data. """ # Store the un-interpolated input arrays at this slice cut_plane.df.x1 = cut_plane.df.x1 - center_x1 cut_plane.df.x2 = cut_plane.df.x2 - center_x2 return cut_plane
f29ecbb82450adeecebdd2652392d37828751348
26,863
def AddProteinName(files,PDic,apecific_dic,MergeFileList): """ add names to the plate files and than merge them with and without controls """ controlList=["control+","SD+control"] OnlyControl=open(MergeFileList[0],'a') withoutControl=open(MergeFileList[1],'a') All=open(MergeFileList[2],'a') with open(files+".txt") as openFile: next(openFile) for line in openFile: splits=line.strip().split("\t") plateName=splits[0].strip() # if len(splits)==13: # modline="\t".join(splits) # else: modline="\t".join(splits[:13]) id=splits[3].strip()+"\t"+splits[4].strip()+"\t"+splits[5].strip() if id in PDic :#and PDic[id][1].strip() not in controlList: if PDic[id][2].strip() in apecific_dic: wln=[PDic[id][0],"\t","A-specific","\t",PDic[id][2],"\t",PDic[id][3],"\t",PDic[id][4],"\t",modline+"\n"] else: wln=[PDic[id][0],"\t",PDic[id][1],"\t",PDic[id][2],"\t",PDic[id][3],"\t",PDic[id][4],"\t",modline+"\n"] if wln[2] not in controlList: withoutControl.writelines(wln) else: OnlyControl.writelines(wln) All.writelines(wln) return files
cad467131a6c8adbbe190889034f167e8feb5d6d
26,866
import re def srt_timestamps(content): """ 获取时间轴存于字典中 """ timestamps = [] for ts in re.findall(r'\d{2}:\d{2}:\d{2},\d{3}.+\d{2}:\d{2}:\d{2},\d{3}', content): ts = ts.split(' --> ') timestamps.append(ts) return timestamps
8223c319f076b2405229e82e479a4e939c23db9a
26,867
import tempfile def get_temp_file(): """Create a temporary file and its file handle. Returns: File handle of the temporary file. """ file_handle = tempfile.TemporaryFile(mode='w+') return file_handle
05e0af4ec6009fc06764615339f8e2758a6bfc2b
26,868
import hashlib def _hash(mapping: dict) -> str: """ Return a hash of an entire dictionary. Args: mapping: a dictionary of hash-able keys to hash-able values, i.e., __str__ should return a unique representation of each object Returns: a hash of the dictionary """ # create a string to store the mapping in mapping_str = '' # iterate over the sorted dictionary keys to ensure reproducibility for key in sorted(mapping.keys()): # add the key value pairing to the string representation mapping_str += '{}{}'.format(key, mapping[key]) # convert the string to bytes and return the MD5 has of the bytes return hashlib.md5(bytes(mapping_str, 'utf8')).hexdigest()
3e608ad0739480bb4fba1367ed2534d09d9a2ed8
26,869
def parse_arg_type(arg): """ Parses the type of an argument based on its string value. Only checks ints, floats, and bools, defaults to string. For instance, "4.0" will convert to a float(4.0) :param arg: The argument value :return: The value converted to the proper type. """ if type(arg) != str: return arg else: # check int try: return int(arg) except ValueError: pass # check float try: return float(arg) except ValueError: pass # check bool if arg.lower() == "true": return True elif arg.lower() == "false": return False # return any other string return arg
c2897f57f2d6df2e7e1c4a5c41cd35d04a386597
26,870
def get_similar(obj, labels, default=None, min_similarity=0.5): """Similar to fuzzy_get, but allows non-string keys and a list of possible keys Searches attributes in addition to keys and indexes to find the closest match. See Also: `fuzzy_get` """ raise NotImplementedError("Unfinished implementation, needs to be incorporated into fuzzy_get where a list of scores and keywords is sorted.") labels = listify(labels) def not_found(*args, **kwargs): return 0 min_score = int(min_similarity * 100) for similarity_score in [100, 95, 90, 80, 70, 50, 30, 10, 5, 0]: if similarity_score <= min_score: similarity_score = min_score for label in labels: try: result = obj.get(label, not_found) except AttributeError: try: result = obj.__getitem__(label) except (IndexError, TypeError): result = not_found if result is not not_found: return result if similarity_score == min_score: if result is not not_found: return result
8dc10a166ea98453e8966f9117194ae93187e204
26,872
def is_substring_divisible(num: tuple) -> bool: """ Returns True if the pandigital number passes all the divisibility tests. >>> is_substring_divisible((0, 1, 2, 4, 6, 5, 7, 3, 8, 9)) False >>> is_substring_divisible((5, 1, 2, 4, 6, 0, 7, 8, 3, 9)) False >>> is_substring_divisible((1, 4, 0, 6, 3, 5, 7, 2, 8, 9)) True """ if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False tests = [7, 11, 13, 17] for i, test in enumerate(tests): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True
bf212de2c82890c950e00fc81bdc781efc608ab7
26,874
import numpy def _boundary_elevation(elevation, neighbours, edge_length, boundPts, btype): """ This function defines the elevation of the TIN surface edges for 2 different types of conditions: * Infinitely flat condition, * Continuous slope condition. Args: elevation: Numpy arrays containing the internal nodes elevation. neighbours: Numpy integer-type array containing for each nodes its neigbhours IDs. edge_length: Numpy float-type array containing the lengths to each neighbour. boundPts: Number of nodes on the edges of the TIN surface. btype: Integer defining the type of boundary: 0 for flat and 1 for slope condition. Returns: - elevation - numpy array containing the updated elevations on the edges. """ # Flat/fixed/wall if btype == 0: missedPts = [] for id in range(boundPts): ngbhs = neighbours[id,:] ids = numpy.where(ngbhs >= boundPts)[0] if len(ids) == 1: elevation[id] = elevation[ngbhs[ids]] elif len(ids) > 1: lselect = edge_length[id,ids] picked = numpy.argmin(lselect) elevation[id] = elevation[ngbhs[ids[picked]]] else: missedPts = numpy.append(missedPts,id) if len(missedPts) > 0 : for p in range(len(missedPts)): id = int(missedPts[p]) ngbhs = neighbours[id,:] ids = numpy.where((elevation[ngbhs] < 9.e6) & (ngbhs >= 0))[0] if len(ids) == 0: raise ValueError('Error while getting boundary elevation for point ''%d''.' % id) lselect = edge_length[id,ids] picked = numpy.argmin(lselect) elevation[id] = elevation[ngbhs[ids[picked]]] # Slope elif btype == 1: missedPts = [] for id in range(boundPts): ngbhs = neighbours[id,:] ids = numpy.where(ngbhs >= boundPts)[0] if len(ids) == 1: # Pick closest non-boundary vertice ln1 = edge_length[id,ids[0]] id1 = ngbhs[ids[0]] # Pick closest non-boundary vertice to first picked ngbhs2 = neighbours[id1,:] ids2 = numpy.where(ngbhs2 >= boundPts)[0] lselect = edge_length[id1,ids2] if len(lselect) > 0: picked = numpy.argmin(lselect) id2 = ngbhs2[ids2[picked]] ln2 = lselect[picked] elevation[id] = (elevation[id1]-elevation[id2])*(ln2+ln1)/ln2 + elevation[id2] else: missedPts = numpy.append(missedPts,id) elif len(ids) > 1: # Pick closest non-boundary vertice lselect = edge_length[id,ids] picked = numpy.argmin(lselect) id1 = ngbhs[ids[picked]] ln1 = lselect[picked] # Pick closest non-boundary vertice to first picked ngbhs2 = neighbours[id1,:] ids2 = numpy.where(ngbhs2 >= boundPts)[0] lselect2 = edge_length[id1,ids2] if len(lselect2) > 0: picked2 = numpy.argmin(lselect2) id2 = ngbhs2[ids2[picked2]] ln2 = lselect2[picked2] elevation[id] = (elevation[id1]-elevation[id2])*(ln2+ln1)/ln2 + elevation[id2] else: missedPts = numpy.append(missedPts,id) else: missedPts = numpy.append(missedPts,id) if len(missedPts) > 0 : for p in range(0,len(missedPts)): id = int(missedPts[p]) ngbhs = neighbours[id,:] ids = numpy.where((elevation[ngbhs] < 9.e6) & (ngbhs >= 0))[0] if len(ids) == 0: raise ValueError('Error while getting boundary elevation for point ''%d''.' % id) lselect = edge_length[id,ids] picked = numpy.argmin(lselect) elevation[id] = elevation[ngbhs[ids[picked]]] elevation[:boundPts] -= 0.5 # Associate TIN edge point to the border for ero/dep updates parentID = numpy.zeros(boundPts,dtype=int) missedPts = [] for id in range(boundPts): ngbhs = neighbours[id,:] ids = numpy.where(ngbhs >= boundPts)[0] if len(ids) == 1: parentID[id] = ngbhs[ids] elif len(ids) > 1: lselect = edge_length[id,ids] picked = numpy.argmin(lselect) parentID[id] = ngbhs[ids[picked]] else: missedPts = numpy.append(missedPts,id) if len(missedPts) > 0 : for p in range(len(missedPts)): id = int(missedPts[p]) ngbhs = neighbours[id,:] ids = numpy.where((elevation[ngbhs] < 9.e6) & (ngbhs >= 0))[0] if len(ids) == 0: raise ValueError('Error while getting boundary elevation for point ''%d''.' % id) lselect = edge_length[id,ids] picked = numpy.argmin(lselect) parentID[id] = ngbhs[ids[picked]] return elevation, parentID
ed0939e8a5857969d62b112bc3a03dc83b639f26
26,875
def fizz_buzz_custom_2(string_one="Fizz", string_two="Buzz", num_one=3, num_two=5): """ >>> not 0 True >>> not 5 % 5 True >>> not 6 % 5 False >>> "Test" * True 'Test' >>> "Test" * False '' >>> "" or 1 1 """ return [ f"{string_one * (not d % num_one)}{string_two * (not d % num_two)}" or d for d in range(1, 101) ]
6faed46a42fb3383a6c0cb42f7807657d82e0aa6
26,879
def get_custom_mode_name(system,custom_mode): """Returns meaningful name for system custom mode""" return "%d" % custom_mode
5debda1e4c504a63ca60cb8c8053ecfc6426f425
26,880
import typing def AssembleAssertionData(row: typing.List[str]) -> typing.Dict: """ 入力文字列をdictに変換する。json出力用 output image { "@context": "https://w3id.org/openbadges/v2", "type": "Assertion", "id": "https://example.org/beths-robotics-badge.json", "recipient": { "type": "email", "hashed": true, "salt": "deadsea", "identity": "sha256$c7ef86405ba71b85acd8e2e95166c4b111448089f2e1599f42fe1bba46e865c5" }, "image": "https://example.org/beths-robot-badge.png", "evidence": "https://example.org/beths-robot-work.html", "issuedOn": "2016-12-31T23:59:59Z", "expires": "2017-06-30T23:59:59Z", "badge": "https://example.org/robotics-badge.json", "verification": { "type": "hosted" } } """ d: typing.Dict = dict() d["@context"] = row[0] d["type"] = row[2] # 必須項目 d["id"] = row[1] # 必須項目 # 必須項目 if row[4].lower() == 'true': d["recipient"] = { "type": row[3], "hashed": True, # true と出力する。 "salt": row[5], "identity": row[6] } else: d["recipient"] = { "type": row[3], "hashed": False, "salt": row[5], "identity": row[6] } d["badge"] = row[7] # 必須項目 # 必須項目 d["verification"] = { "type": row[8] } d["issuedOn"] = row[9] # 必須項目 if len(row[10]) != 0: # 選択項目 d["expires"] = row[10] return d
d34eaa00edac94a941b336ec8ed70daa722ebd3e
26,881
def test_module(): """ returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful. """ return 'ok'
7d93a5f692280f5cce071005d2b11f0488ae023e
26,883
def visits(): """Returns number of times root path was accessed Returns: int: number of times was accessed """ visits = [] with open("visits.txt", "r", encoding="utf-8") as file: visits = file.readlines() return str(visits)
cb406ef9e97fea359092d2cbed7bcf19991a4506
26,884
def get_threshold(rows, bands): """Approximate threshold from bandwidth and number of rows :param rows: rows per band :param bands: number of bands :return: threshold value :rtype: float """ return (1. / bands) ** (1. / rows)
3c5a5e417e96797e18b571cb7c09597442cb5f44
26,886
def keypoint_2d_loss(criterion_keypoints, pred_keypoints_2d, gt_keypoints_2d, has_pose_2d): """ Compute 2D reprojection loss if 2D keypoint annotations are available. The confidence (conf) is binary and indicates whether the keypoints exist or not. """ conf = gt_keypoints_2d[:, :, -1].unsqueeze(-1).clone() loss = (conf * criterion_keypoints(pred_keypoints_2d, gt_keypoints_2d[:, :, :-1])).mean() return loss
d5ffabe27443d6c9ea251a0d8205e8ca33f724c8
26,887
def getInt(s, notFoundReturn=-1): """ attempt to find a positive integer in a string @param s [string] @param notFoundReturn = value to be returned if no integer is found @return int """ firstIx = -1 for i, c in enumerate(s): if c in "0123456789": firstIx = i break if firstIx == -1: return notFoundReturn to = firstIx+1 while 1: #print "s=%r firstIx=%r to=%r" % (s, firstIx, to) # if (nextIx) at end of string, break if to+1 > len(s): break ch = s[to] #print "to=%r ch=%r" % (to, ch) if ch not in "0123456789": break to += 1 #print "firstIx=%r to=%r" % (firstIx, to) numStr = s[firstIx:to] return int(numStr)
5f5c5b7bac0b160eddedfde1d2008c628dd7a9a6
26,893
def round_down(num, divisor): """Round down """ return num - (num%divisor)
e1c2506bdc1cb8a65c94e70de8c6b560c8e382dc
26,894
def civic_properties(line,response,host,entrez_id_column): """Given a line from the MAF file, and a response from civic, return (variant_name,drugs,link) """ link = '' drugs = '' variant_name = '' entrez_id = line[entrez_id_column] if (response): link = "[civic gene](http://{}/#/events/genes/{}/summary)".format(host,entrez_id) else: link = "_gene not found in civic_" expected_hgvs = "{}:{}-{} ({}->{})".format( line[4] , line[5] ,line[6],line[10],line[11] ) for variant in response: variant_name = variant.get('name') for evidence_item in variant.get('evidence_items'): if evidence_item.get('variant_hgvs') == expected_hgvs: if(evidence_item.get('drugs')): drugs = ",".join(list(map(lambda e:e.get('name'), evidence_item.get('drugs')))) if(evidence_item.get('drug')): drugs = evidence_item.get('drug') url = "http://{}/#/events/genes/{}/summary/variants/{}/summary/evidence/{}/summary".format(host,entrez_id,variant.get('id'),evidence_item.get('id')) link = "[civic variant]({})".format(url) return [variant_name,drugs,link]
5cfc3d6c9242d370a0f098e58d30d9375b1ae855
26,895
import platform def get_platform_specific_executable_name(universal_executable_name): """Forms the platform-specific executable name from a universal executable name @param universal_executable_name Universal name of the executable that will be converted @returns The platform-specific executable name @remarks A universal executable name is just the name of the executable without extension, using dots to separate words - for example My.Awesome.Program. Depending on the platform, this might get turned into My.Awesome.Program.exe or MyAwesomeProgram.""" if platform.system() == 'Windows': return universal_executable_name + ".exe" else: return universal_executable_name.replace('.', '')
7e35f5abaf379e80b77d4717716604bea677eafa
26,899
from dateutil import tz def datetime_local_to_datetime_utc(datetime_local): """Hardcode utc zone""" utc_zone = tz.gettz('UTC') #tz.tzutc()# or Auto-detect utc zone # Convert local time to UTC return datetime_local.astimezone(utc_zone)
2793c296044ce66145e5d8cfb51e06dc8682a86a
26,900
import curses def create_window(start_x, start_y, width, height): """Create window helper method with sane parameter names.""" return curses.newwin(height, width, start_y, start_x)
8ba73d8cad2e2b730ea2f79795dd6407d6565192
26,901
def _get_metadata_revision(metadata_repo, mongo_repo, project, revision): """Get the metadata revision that corresponds to a given repository, project, revision.""" for metadata_revision in metadata_repo.list_revisions(): reference = metadata_repo.get_reference(metadata_revision, project) if not reference: # No reference for this revision. This should not happen but we keep trying in # case we can find an older revision with a reference. continue if mongo_repo.is_ancestor(reference, revision): # We found a reference that is a parent of the current revision. return metadata_revision return None
35436be9b586319d5e60dd99e0ff4a3ef6bed042
26,903
def list_sum( data=[]): """ find the sum of all the values in the list """ summed = 0 for d in data: # add value, throw to int to avoid issues summed += int(d) return summed
f6cd32df4b6c5343b335166889d4451d7d8ed116
26,904
def skip_prologue(text, cursor): """skip any prologue found after cursor, return index of rest of text""" ### NOT AT ALL COMPLETE!!! definitely can be confused!!! prologue_elements = ("!DOCTYPE", "?xml", "!--") done = None while done is None: #print "trying to skip:", repr(text[cursor:cursor+20]) openbracket = text.find("<", cursor) if openbracket<0: break past = openbracket+1 found = None for e in prologue_elements: le = len(e) if text[past:past+le]==e: found = 1 cursor = text.find(">", past) if cursor<0: raise ValueError("can't close prologue %r" % e) cursor = cursor+1 if found is None: done=1 #print "done skipping" return cursor
32dd5e38ed8a9f35c16d0269cf478305f7ba9b70
26,905
import tempfile def make_temp_file(): """Make a temproary file and return its path""" with tempfile.NamedTemporaryFile() as f: return f.name
55c9fce8bf7964eef8a109c88c57f3ce1535c798
26,906
def _positive(index: int, size: int) -> int: """Convert a negative index to a non-negative integer. The index is interpreted relative to the position after the last item. If the index is smaller than ``-size``, IndexError is raised. """ assert index < 0 # noqa: S101 index += size if index < 0: raise IndexError("lazysequence index out of range") return index
ee23eb6ffba4d539d333cd649304b5bd879f43df
26,907
def lennard_jones_potential(x): """ calculates the lennard-jones-potential of a given value x """ return 4 * ((x ** -12) - (x ** -6))
ec7efd7865169ebcfa2ec87acbb7067d89c14b6a
26,908
def parse_field_configured(obj, config): """ Parses an object to a Telegram Type based on the configuration given :param obj: The object to parse :param config: The configuration: - is array? - is array in array? - type of the class to be loaded to :return: the parsed object """ foreign_type = config if type(config) != dict else config['class'] if type(config) == dict: if 'array' in config and config['array'] is True: return [foreign_type(x) for x in obj] elif 'array_of_array' in config and config['array_of_array'] is True: res = [] for inner_obj in obj: res.append([foreign_type(x) for x in inner_obj]) return res return foreign_type(obj)
e03acf6149653b86b8565a492bfd1eeea15464b6
26,909
import argparse def parse_args(): """Create the arguments""" parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, epilog='-----------------------------------------------------------------------------------\n' 'Examples:\n\n' ' -Search Shodan for "printer" using the specified API key and probe each\n' ' result host for being a suitable zombie:\n\n' ' sudo python zombie-pharmer.py -s "printer" -a Wutc4c3T78gRIKeuLZesI8Mx2ddOiP4\n\n') parser.add_argument("-a", "--apikey", help="Your api key") parser.add_argument("-c", "--concurrent", default='1000', help="Enter number of concurrent requests to make; default = 1000") parser.add_argument("-n", "--numpages", default='1', help="Number of pages deep to go in Shodan results with 100 results per page; default is 1") parser.add_argument("-s", "--shodansearch", help="Your search terms") parser.add_argument("-t", "--targets", help="Enter an IP, a domain, or a range of IPs to fetch (e.g. 192.168.0-5.1-254 will" "fetch 192.168.0.1 to 192.168.5.254; if using a domain include the subdomain if it exists: sub.domain.com or domain.com)") parser.add_argument("--ipfile", help="Test IPs from a text file (IPs should be separated by newlines)") return parser.parse_args()
5d17e879847d7585f369b00831783b32bc193152
26,912
import os import inspect def class_path(cls): """Return the path to the source file of the given class.""" if cls.__module__ == '__main__': path = None else: path = os.path.dirname(inspect.getfile(cls)) if not path: path = os.getcwd() return os.path.realpath(path)
8ba324ac314be6a2d8b2da319c6491b7cb16bf7a
26,914
def valid_arguments(arguments, valid_args): """Check whether provided arguments are valid.""" result = {} for key, value in arguments.items(): if key in valid_args: if isinstance(value, (tuple, list)): result[key] = "\n".join(value) else: result[key] = value return result
a5eaee1a269f4e2f38a521179a4605ed09c41bb6
26,915
import os import yaml import sys def load_config(file='webots.yaml'): """Load config from webots.yaml located in the repository root.""" config = None if os.path.isfile(file): with open(file, 'r') as f: config = yaml.load(f.read(), Loader=yaml.FullLoader) or {} if config is None: print('Cannot load `webots.yaml`') sys.exit(1) return config
83ced91fc8b97dc57b99f9fc3872d19516b9e6cb
26,917
def products_with_market_share_below_some_quantile( exports_long, min_market_share_quantile=0.05, year_min_market_share=2008, verbose=0): """Return the product codes of products whose share of the global export market in the given year is less than or equal to the given quantile of all such products' market shares in that year. """ if min_market_share_quantile <= 0: if verbose: print('min_market_share_quantile == 0, so no products will be ' 'removed due to having too small a market share in year ' '{}'.format(year_min_market_share)) print() return set() else: market_shares_of_products_in_year_of_min_market_share = ( exports_long[exports_long.year == year_min_market_share] .groupby(['product_code']) .export_value .agg('sum')) min_market_share_threshold = ( market_shares_of_products_in_year_of_min_market_share.quantile( min_market_share_quantile)) product_codes_to_remove = set( market_shares_of_products_in_year_of_min_market_share[ (market_shares_of_products_in_year_of_min_market_share <= min_market_share_threshold)] .index .get_level_values('product_code')) if verbose: print('{} many products will be removed because they have market ' 'share <= the {} quantile in year {}: {}'.format( len(product_codes_to_remove), min_market_share_quantile, year_min_market_share, sorted(product_codes_to_remove))) print() return product_codes_to_remove
4b6accf97d16bb4f5d37071c9d12ac1717ffd9bf
26,918
def _compute_lineline_intersection(line1_pt1, line1_pt2, line2_pt1, line2_pt2): """Algorithm to compute a line to line intersection, where the two lines are both defined by two points. Based on this article: https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection :param line1_pt1: First point on first line :type line1_pt1: tuple :param line1_pt2: First point on first line :type line1_pt2: tuple :param line2_pt1: First point on first line :type line2_pt1: tuple :param line2_pt2: First point on first line :type line2_pt2: tuple :return: intersection point (p_x, p_y), if it exists :rtype: tuple or None """ (x1, y1) = line1_pt1 (x2, y2) = line1_pt2 (x3, y3) = line2_pt1 (x4, y4) = line2_pt2 # Check for parallel lines denominator = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4) if denominator == 0: return None p_x = ((x1*y2 - y1*x2) * (x3 - x4) - (x1 - x2) * (x3*y4 - y3*x4)) \ / denominator p_y = ((x1*y2 - y1*x2) * (y3 - y4) - (y1 - y2) * (x3*y4 - y3*x4)) \ / denominator return (p_x, p_y)
ec34a6f2aca4fc918d1e384d3ac1fb7615af4f35
26,919
import os import json def find_saved_rects(stick): """ Function to find already detected rectangles within the json files Input: stick: filename of the stick image Output: data: dict containing the rectangles """ json_dir = os.getcwd() + "\\results\\rectangles\\" json_name = os.path.splitext(stick)[0] + ".json" if os.path.isfile(json_dir + json_name): data = json.load(open(json_dir + json_name)) else: print("Saved rects not found") data = [] return data
8082fcb4969d05194cbc1d833202c2de0c6375c6
26,920
def pretty_string_time(t): """Custom printing of elapsed time""" if t > 4000: s = 't=%.1fh' % (t / 3600) elif t > 300: s = 't=%.0fm' % (t / 60) else: s = 't=%.0fs' % (t) return s
b1c289df0e7fe30e0568783c50ce476e40b9bb7a
26,921
def get_F(): """ """ return 101325
ed43348d3b51be3b3ab9aa5cb8f5cb8337c035f7
26,922
import math def center_detector(number, height): """ Inputs position and height of Stage 2 Outputs ideal angle such that beam hits center of detector. """ ideal_angle = math.degrees(math.atan(height / (math.sqrt((number - 250) ** 2 + height ** 2) + number - 250))) return ideal_angle
beea22c813814b8ef9d6a3a2c4e6041df665d464
26,925
def actionable(msg): """ Make instructions to actionable items green so they stand out from generic logging. """ return f'\033[92m{msg}\033[0m'
49a350ee1429baeb39d6affbfc5b8dbc8351172f
26,926
import subprocess import re def _supports_correct_style(clang_exe): """Checks if the version of clang-format is 14.0.5 or newer which is required to correctly reformat code for landing""" try: rawbytes = subprocess.check_output([clang_exe, "-version"]) output = rawbytes.decode('utf-8') except subprocess.CalledProcessError: return False match = re.search(r'version ([\d+\.]+) ', output) if match: parts = match.group(1).split('.') if int(parts[0]) < 14: return False if int(parts[1]) > 0: return True if int(parts[2]) < 5: return False return True return False
c8e3ca058fa0f0a7a2510458d6a6702bd695274d
26,927
def update_params(kwargs, **params): """kwargs override params""" return dict(params, **kwargs)
1d35a04cfd67b5089fdca55d0ae150b5e2af35c9
26,928
def _create_cg_with_member(vnx_gf): """ Helper function to create a cg with a lun :param vnx_gf: the vnx general test fixture :return: created cg """ lun_name = vnx_gf.add_lun_name() lun = vnx_gf.pool.create_lun(lun_name) cg_name = vnx_gf.add_cg_name() cg = vnx_gf.vnx.create_cg(cg_name) cg.add_member(lun) return cg
98f5646cb61b2ae8aecf07865461e45edf4dcb64
26,929
def get_all_headers(message, key): """ Given an HTTPMessage, return all headers matching a given key. """ return message.get_all(key)
079d0e7c6ea3d55228ba3c74f4590a1bf4ee325d
26,930
def as_unsigned_int32_array(byte_array): """Interprets array of byte values as unsigned 32 bit ints.""" def uint32(a, b, c, d): return a + (b << 8) + (c << 16) + (d << 24) return [uint32(*byte_array[i:i + 4]) for i in range(0, len(byte_array), 4)]
dcfb5b5504056d3784c43a4adda876a6bc51f6ae
26,931