content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def all_boolean (phrase): """Returns TRUE if all elements of <phrase> are boolean""" if not isinstance(phrase,list): return False for x in phrase: if not isinstance(x,bool): return False return True
eaec8cae03041d5c8b2e7d4af0c2b4e373bfaa4c
13,400
import re def normalize_likes(sql: str) -> str: """ Normalize and wrap LIKE statements :type sql str :rtype: str """ sql = sql.replace("%", "") # LIKE '%bot' sql = re.sub(r"LIKE '[^\']+'", "LIKE X", sql) # or all_groups LIKE X or all_groups LIKE X matches = re.finditer(r"(or|and) [^\s]+ LIKE X", sql, flags=re.IGNORECASE) matches = [match.group(0) for match in matches] if matches else None if matches: for match in set(matches): sql = re.sub(r"(\s?" + re.escape(match) + ")+", " " + match + " ...", sql) return sql
8059eb4bf1ee4a8045702024c81a4f8b35469358
13,401
def escapes_sub(match): """Substitutes escaped characters.""" return match.group(1)
f9d535bfdeea2cf4791301c348d513b606ed258d
13,403
def minimum_time_to_make_a_sale(): """ Real Name: Minimum Time to Make a Sale Original Eqn: 1 Units: Months Limits: (None, None) Type: constant Subs: None What is the absolute minimum calendar time it would take to make a sale to a person, even if you had all the hours in the day to devote to them? """ return 1
ab6acc9421756d944c23119512bb84eb7750c83b
13,405
def read_data_line(line): """Process a line from data file. Assumes the following format: WORD W ER D i.e. word tab separated with pronunciation where pronunciation has space separated phonemes Args: line: string representing the one line of the data file Returns: chars: list of characters phones: list of phones """ line = line.strip() word, pronunciation = line.split(" ") #line.split("\t") chars = list(word.strip()) phones = pronunciation.strip().split(" ") return chars, phones
66a840e2f2dfbc5e6c9d4a62c938b1bc60454a1f
13,406
def api_get_balance(addr): # noqa: E501 """get balance from address using woc. get balance from address using woc. # noqa: E501 :param addr: bitcoin sv address :type addr: str :rtype: ResponseGetBalanceModel """ return 'do some magic!'
b64771f02553e134f17b8cc5dcfddc3f48465849
13,408
import os import json def load_config(config_spec): """Loads configuration for a template statement For each type of bank statement, the exact format will be different. A config file holds the instructions for how to process the raw pdf. These config files are stored in a folder structure as follows: config > [country code] > [bank] > [statement type].json So for example the default config is stored in config > za > absa > cheque.json The config spec is a code of the form [country code].[bank].[statement type] Once again for the default this will be za.absa.cheque Args: config_spec: Code that resolves to a json file as explained above Returns: The configuration as a python object """ local_dir = os.path.dirname(__file__) config_dir = os.path.join(*config_spec.split(".")[:-1]) config_file = config_spec.split(".")[-1] + ".json" config_path = os.path.join(local_dir, "config", config_dir, config_file) with open(config_path) as f: config = json.load(f) return config
4ca53ce22b8b9270a98ee8503794f79c59e8b8c4
13,410
def formatted_constituent_array(constituent_array): """ Given a constituent array of Species, return the classic CALPHAD-style interaction. Parameters ---------- constituent_array : list List of sublattices, which are lists of Species in that sublattice Returns ------- str String of the constituent array formatted in the classic CALPHAD style Examples -------- >>> from pycalphad import variables as v >>> const_array = [[v.Species('CU'), v.Species('MG')], [v.Species('MG')]] >>> formatted_constituent_array(const_array) 'CU,MG:MG' """ return ':'.join([','.join([sp.name for sp in subl]) for subl in constituent_array])
b1e60d21c28b66620eaffed1cda20abcb394a833
13,411
def calc_automated_readability_index( n_letters: int, n_words: int, n_sents: int, a: float = 6.26, b: float = 0.2805, c: float = 31.04, ) -> float: """ Вычисление автоматического индекса удобочитаемости Описание: Чем выше показатель, тем сложнее текст для чтения Результатом является число лет обучения в американской системе образования, необходимых для понимания текста Значения индекса могут интерпретироваться следующим образом: 1 - 6-7 лет 2 - 7-8 лет 3 - 8-9 лет 4 - 9-10 лет 5 - 10-11 лет 6 - 11-12 лет 7 - 12-13 лет 8 - 13-14 лет 9 - 14-15 лет 10 - 15-16 лет 11 - 16-17 лет 12 - 17-18 лет Ссылки: https://en.wikipedia.org/wiki/Automated_readability_index https://ru.wikipedia.org/wiki/Автоматический_индекс_удобочитаемости Аргументы: n_letters (int): Количество букв n_words (int): Количество слов n_sents (int): Количество предложений a (float): Коэффициент a b (float): Коэффициент b c (float): Коэффициент c Вывод: float: Значение индекса """ return (a * n_letters / n_words) + (b * n_words / n_sents) - c
d2a49044749dc93bb8684b27a81f47ff1498cc46
13,412
def FirstFree(seq, base=0): """Returns the first non-existing integer from seq. The seq argument should be a sorted list of positive integers. The first time the index of an element is smaller than the element value, the index will be returned. The base argument is used to start at a different offset, i.e. C{[3, 4, 6]} with I{offset=3} will return 5. Example: C{[0, 1, 3]} will return I{2}. @type seq: sequence @param seq: the sequence to be analyzed. @type base: int @param base: use this value as the base index of the sequence @rtype: int @return: the first non-used index in the sequence """ for idx, elem in enumerate(seq): assert elem >= base, "Passed element is higher than base offset" if elem > idx + base: # idx is not used return idx + base return None
42323664c7bb2c59506ed3b24115a38bc0fcf63d
13,413
from typing import Iterable def flatten(nested): """Flatten a nested sequence where the sub-items can be sequences or primitives. This differs slightly from itertools chain methods because those require all sub-items to be sequences. Here, items can be primitives, sequences, nested sequences, or any combination of these. Any iterable items aside from strings will be completely un-nested, so use with caution (e.g. a torch Dataset would be unpacked into separate items for each index). This also returns a list rather than a generator. Parameters ---------- nested: sequence (list, tuple, set) Sequence where some or all of the items are also sequences. Returns ------- list: Flattened version of `nested`. """ def _walk(nested): for group in nested: if isinstance(group, Iterable) and not isinstance(group, str): yield from _walk(group) else: yield group return list(_walk(nested))
464cd221dfaf6f842bf6da0d96ad8322d1c84e71
13,415
import uuid import sys def set_payload(href,session_type,rg_identification, rg_description): """Set the headers and Paylod based on the type of session and/or method to be sent to the Resource Groups API.""" rg_uuid = str(uuid.uuid4()) headers = { 'content-type': "application/json", 'accept': "application/json", 'scre.syncwait': "1", 'Referer': href } if (session_type == 'add_rg'): entityTpe = ["AgentGroup"] payload = {"entityTypes" : entityTpe, \ "arbitraryStringProperty" : "RG created by IPM-CLI",\ "displayLabel" : rg_identification,\ "description" : rg_description,\ "keyIndexName" : rg_uuid} return headers, payload else: print ("ERROR - Could not determine session origin. Exiting!") sys.exit(1)
07d8f37e43005664a10235e457e0a6f2df9205af
13,416
def update_talk_tags(talk, talk_json_data): """ Updates tags associated to this model instance """ tags = [] if "snippet" in talk_json_data: snippet = talk_json_data["snippet"] if "tags" in snippet: tags += snippet["tags"] for tag in tags: talk.tags.add(tag) return talk
356bb6d1c25f2ea0499cf9f31768a9090d0f8180
13,417
def calculadora(x = 1, y = 1): # Docstring """ Calculadora ----------- Cria um dicionário com as principais operações matemáticas, dado dois números. args ---- x : int ou float Primeiro número de entrada y : int ou float Segundo número de entrada return ------ dict {'operação' : valor} """ # Retornamos um dicionário com as operações básicas return { 'soma' : x + y, 'subtração' : x - y, 'divisão' : x / y, 'multiplicação' : x * y, 'potência' : x ** y }
6fad4c8c1d388cb5b77c52d68f63a37070379657
13,418
def move_by_month(month, offset): """Get the month with given offset raletive to current month.""" return (((month - 1) + offset) % 12) + 1
38229d5a45b4643dfeb64e0e311949b9e26625ef
13,421
def is_image_file(filename): """Checks if a file is an image. Arguments: filename {str} -- File path. Returns: bool -- True if the path is PNG or JPG image. """ return any(filename.endswith(extension) for extension in [".png", ".jpg", ".jpeg"])
d2971a57cd4fda456384f1e2f839dc32b3c897a6
13,423
import bz2 import gzip import re import os import pickle def save_results(likelihoods,info,compress,obs_filename,output_filename,output_dir): """ Saves the likelihoods together with information about the chromosome number, depth of coverage, ancestry, statistics of the genomic windows and flags that were used. Also, data compression is supported in gzip and bzip2 formats. """ Open = {'bz2': bz2.open, 'gz': gzip.open}.get(compress, open) ext = ('.'+compress) * (compress in ('bz2','gz')) obs_filename_stripped = obs_filename.rsplit('/', 1).pop() default_output_filename = re.sub('(.*)obs.p(.*)',f'\\1LLR.p{ext:s}', obs_filename_stripped, 1) if output_filename=='': output_filename = default_output_filename else: output_filename += ext output_dir = output_dir.rstrip('/') +'/' #undocumented option if output_dir!='' and not os.path.exists(output_dir): os.makedirs(output_dir) with Open(output_dir + output_filename, "wb") as f: pickle.dump(likelihoods, f, protocol=4) pickle.dump(info, f, protocol=4) return output_dir + output_filename
5223cc3b15e95f2e38a453841becf1403c110744
13,424
import math def asin(x): """ x est un nombre ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Retourne un angle en radians dont le sinus vaut x. """ return math.asin(x)
b980a538fbf51679bff8d08a84cd9cdb1a486153
13,426
def build_character( base_info, background, characteristics, fellow_investigators, hit_points, inventory, luck, magic, sanity, skills, weapons, ): """Builds a character object. Args: base_info (dict): The base character information. background (dict): The character background information. characteristics (dict): The character characteristics information. fellow_investigators (dict): The fellow investigators tied to the character. hit_points (dict): The character's hit point information. inventory (dict): The character's inventory information. luck (dict): The character's luck information. magic (dict): The character's magic information. sanity (dict): The character's sanity information. skills (dict): The character's skill information. weapons (dict): The character's weapons information. Return: dict: A completed character sheet. """ return { "character": { base_info, background, characteristics, fellow_investigators, hit_points, inventory, luck, magic, sanity, skills, weapons, } }
8504ee61d9702eebeb4000d021ab975c0f777033
13,427
def removeUser(name): """Unregisters a user from this computer. Returns true if the user was removed, false if they weren't registered in the first place. The user will lose all access to this computer. When the last user is removed from the user list, the computer becomes accessible to all players.""" return False
7cc414ed6f7d9e23e4f0993f53366a42c850b5b0
13,428
import os def get_level(name): """Get level.""" level = 'info' # default level os_level = os.getenv('LEVEL') if os_level is not None: if ',' in os_level: os_levels = os_level.split(',') if name in os_levels[1:]: level = os_levels[0] else: level = os_level return level
b2f11771e90dd3a9f6fdf61636a57a86759a0f18
13,429
def NetInvIncTax(e00300, e00600, e02000, e26270, c01000, c00100, NIIT_thd, MARS, NIIT_PT_taxed, NIIT_rt, niit): """ Computes Net Investment Income Tax (NIIT) amount assuming that all annuity income is excluded from net investment income. """ modAGI = c00100 # no foreign earned income exclusion to add if not NIIT_PT_taxed: NII = max(0., e00300 + e00600 + c01000 + e02000 - e26270) else: # do not subtract e26270 from e02000 NII = max(0., e00300 + e00600 + c01000 + e02000) niit = NIIT_rt * min(NII, max(0., modAGI - NIIT_thd[MARS - 1])) return niit
992946090d5894042f2e0909a325ea3225c8a49f
13,431
import numpy def get_COMs_celllist(cell, M): """ Calculate Center of Mass (COM) for celllist """ Mx = M[0] My = M[1] Mz = M[2] Lx = numpy.linalg.norm(cell[0, :]) Ly = numpy.linalg.norm(cell[1, :]) Lz = numpy.linalg.norm(cell[2, :]) NEW_P = [] vec_x = cell[:][0, :].copy() n_x = numpy.linalg.norm(vec_x) vec_x /= n_x vec_y = cell[:][1, :].copy() n_y = numpy.linalg.norm(vec_y) vec_y /= n_y vec_z = cell[:][2, :].copy() n_z = numpy.linalg.norm(vec_z) vec_z /= n_z for mx in range(Mx): for my in range(My): for mz in range(Mz): cellx_l = Lx / Mx * mx celly_l = Ly / My * my cellz_l = Lz / Mz * mz cellx_h = Lx / Mx * (mx + 1) celly_h = Ly / My * (my + 1) cellz_h = Lz / Mz * (mz + 1) # COMs of the subcells for cubic # new_p = numpy.array([cellx_l + (cellx_h - cellx_l) / 2., celly_l + (celly_h - celly_l) / 2., # cellz_l + (cellz_h - cellz_l) / 2.]) # COMS of the subcells for lattice basis vectors newo = cellx_l * vec_x + celly_l * vec_y + cellz_l * vec_z newx = ((cellx_h - cellx_l) / 2.) * vec_x newy = ((celly_h - celly_l) / 2.) * vec_y newz = ((cellz_h - cellz_l) / 2.) * vec_z new_p2 = newo + newx + newy + newz NEW_P.append(new_p2) return NEW_P
329a4ef25951aea6b38fee9dc0dafb19200621a4
13,433
def break_up_sink_info(sink_info:str) -> dict: """Break up the info into a dictionary. Parameters ---------- sink_info: str The sink info from pulsemixer. Returns ------- dict A dictionary of sink information. """ pieces = sink_info.split(",") if "\t" in pieces[0]: # Split up the first item pieces[0] = pieces[0].split("\t")[1] pieces_dict = {} for p in pieces: p_pieces = p.split(":") if len(p_pieces) == 2: pieces_dict[p_pieces[0].replace(" ", "")] =\ p_pieces[1].replace(" ", "") return pieces_dict
790c757500528af28a6a01bc0cea2b7772d2ea55
13,434
def project_content_url(project, **kwargs): """ Get the URL for a file path within a project. This can significantly reduce the number of queries instead of fetching the project and account for every file in a list which happens when using `{{ file.download_url }}`. """ return project.content_url(**kwargs)
91a6a927b3fa88f4b9e976093965a1176657532e
13,435
def mavlink_latlon(degrees): """Converts a MAVLink packet lat/lon degree format to decimal degrees.""" return float(degrees) / 1e7
20d883e45f99cca3c99eeb9d7c5fae96db03fd5a
13,436
def is_whitespace(c): """ static boolean isWhitespace(int codepoint) """ # try to hit the most common ASCII ones first, then the nonbreaking # spaces that Java brokenly leaves out of isWhitespace. # u202F is the BOM, see # http://www.unicode.org/faq/utf_bom.html#BOM # we just accept it as a zero-width nonbreaking space. return c in u' \n\u00A0\u2007\u202F\uFEFF' or c.isspace()
f2160386120da49cb42eb0cefd80a65a2345832b
13,438
import itertools def _merge_dicts(first, second): """Merge two dicts into a new one Args: first (dict): Primary dict; if a key is present in both, the value from this dict is used. second (dict): Other dict to merge. Returns: dict: Union of provided dicts, with value from first used in the case of overlap. """ return {k: v for k, v in itertools.chain(second.iteritems(), first.iteritems())}
85b3377d25d730b32c9bf925bfde375db5c49450
13,439
def json_dumper(obj): """ Dumps generic objects to JSON. Useful to serialize ENV_CONFIG. """ # extra hacky, but works try: return obj.toJSON() except: try: return obj.__dict__ except: return str(obj)
31d7ddcebb16bf437a18aa5584383f7ca5c83aed
13,441
def make_header(in_files,args): """Make sam header given input file species and""" #parse input barcode file and make list of individuals in_files['header'] = 'location of header' return in_files
8a4a222391a462d0d6187080ed2eccc631526513
13,442
def accuracy(actual_y: list, predicted_y: list) -> float: """ Calculate the value of accuracy based-on predictions :param actual_y:a list containing initial Y values generated by 'y_generator' function :param predicted_y: a list containing predicted Y values generated by 'predict_y_values' function :return: percentage of accuracy """ # iterate over one element of each list at a time (zip mode) # prediction is correct if actual Y value equals to predicted Y value correct = sum(1 for i, j in zip(actual_y, predicted_y) if i == j) # percentage of accuracy equals to number of correct predictions divided by number # of all data and multiplied by 100 return (correct / len(actual_y)) * 100
7082d0e5505fc58e9ba80883b63bec98f4632676
13,443
def dict2columns(data, id_col=None): """Convert a dict-based object to two-column output. Also make sure the id field is at top. """ if not data: return ({}, {}) else: if id_col is not None and id_col in data: keys = [id_col] [keys.append(key) for key in sorted(data) if key != id_col] items = [(key, data[key]) for key in keys] else: items = sorted(data.items()) return list(zip(*items))
24917339d53febc4239e21ebb8b39e487f45a370
13,444
def resolve_int_list_arg(arg): """Resolve a CLI argument as a list of integers""" if arg is None: return None if isinstance(arg, int): return [arg] if isinstance(arg, str): return [int(arg)] if isinstance(arg, tuple): # Interpret as range (ignore any other items in tuple beyond second) return list(range(arg[0], arg[1])) return arg
5201d191946c69e10253e476341ab70c76b3a67d
13,445
def groupwise_normalise(gs): """ Normalises each group of GeoSeries :param gs: GeoSeries :return: normalised GeoSeries """ return gs.groupby(level=0).apply(lambda x: x / x.sum())
e0c87701658481ccea01828c75244b7a1043ee29
13,447
from typing import Dict def component_io_to_rest_obj(io_dict: Dict): """Component inputs/outputs entity to rest object.""" rest_component_io = {} for name, port in io_dict.items(): rest_component_io[name] = port._to_rest_object() return rest_component_io
03c3b5fc0033c3488a1190db89e8361483fc765d
13,448
def generate_wrapper(config): """ Generates a wrapper script for the format. :param config: The configuration for the format """ return "#!/bin/sh\n%s" % config['interpreter']
694ecb3369de8ab4af4c57418c732d39ed57aceb
13,449
def params_to_dict(params): """ Converts the parameters of a mwparserfromhell template to a dictionary """ dct = {} for param in params: dct[param.name.strip()] = param.value.strip() return dct
0799a22627b1f09e684d2b506f69eabcc942ec2f
13,450
def render_network_key(ssid, address): """ssid:address""" return ssid + ':' + address
daf47616c455c3b542688293195f45607fb8b7ea
13,451
def checkPossibility(nums): """ :param nums: :return: """ modified = False size = len(nums) for i in range(len(nums)-1): if nums[i] > nums[i + 1]: if modified: return False """ i-1 i i+1 1 2 3 4 6 5 ---> 1 2 3 4 5 6 i i+1 i+2 3 4 5 3 6 8 ---> 3 3 4 5 6 8 i-1 i i+1 i+2 3 4 5 3 3 8 ---> 3 4 5 3 3 8 """ if i - 1 < 0 or nums[i - 1] <= nums[i + 1]: # in this case, you can change nums[i] = x (nums[i - 1] <= x <= nums[i + 1]) pass elif i + 2 >= size or nums[i] <= nums[i + 2]: # in this case, you can change nums[i + 1] = x (nums[i] <= x <= nums[i + 2]) pass else: # if it's not the both of them, it's not possible to make sorted state with one modification return False modified = True return True
83b5205ad3009b09d6f90a69c1e4a702cc17b6d0
13,453
from typing import OrderedDict def read_populations(flname): """ Read populations from a file. We assume that the file has one population per line. The population name comes first and is followed by a comma. The name of each sample follows, also separated by comma. For example: pop1,sample_1,sample_2 pop2,sample_3,sample_4 """ with open(flname) as fl: groups = OrderedDict() group_names = OrderedDict() for group_id, ln in enumerate(fl): cols = ln.strip().split(",") for ident in cols[1:]: groups[ident] = group_id group_names[group_id] = cols[0] return groups, group_names
9467bf2e882a2de0ebfb1ecfb566baeab16a1500
13,455
import inspect def is_instance_of_django_model(reference): """ Tests if a given reference is a reference to a class that extends django.db.models.Model :param reference: A given Anonymous reference. :rtype: boolean :returns: A boolean value that is true only if the given reference is a reference to a Model class. """ if not inspect.isclass(reference): return False bases = ['%s.%s' % (b.__module__, b.__name__) for b in inspect.getmro(reference)] return 'django.db.models.base.Model' in bases
b54536bded37e4a078c266bdc9b3553cbf48e0f3
13,456
from datetime import datetime def get_unix(str_ts, date_format): """Get unix timestamp from a string timestamp in date_format. Args: str_ts: string timestamp in `date_format` date_format: datetime time stamp format Returns: int: unix timestamp """ return datetime.strptime(str_ts, date_format).timestamp()
6d0c591734fa78defed11cd5ed8c66da63ad3b5b
13,457
import io import json def load_json(buffer): """ Returns a structure if file was json :param buffer: :return: JSON created object or None """ fp = io.BytesIO(buffer) try: data = json.load(fp) return data except json.decoder.JSONDecodeError: return None
6893a22009ca0093298060913d27d30e2645a92e
13,458
def run_transport(definition, transport) -> bool: """ Runs schedule & watch for a transport :param definition: :param transport: :return: """ with definition.transport(binaries=[]): transport.schedule( command="--mocked--", definition=definition, is_backup=True, version="" ) return transport.watch()
e8a3442f670351ed7642abe4b4a837fd84d36f55
13,460
def score_openpose(e, image, w, h): """Score an image using OpenPose model. Args: e: OpenPose model. image: Image in CV2 format. Returns: Nubmer of people in image. """ resize_out_ratio = 4.0 humans = e.inference( image, resize_to_default=(w > 0 and h > 0), upsample_size=resize_out_ratio ) return humans
d2969d5af4736497d463482c6b0f532b9e9f908c
13,461
def replace_underscore_with_space(original_string): """ Another really simple method to remove underscores and replace with spaces for titles of plots. Args: original_string: String with underscores Returns: replaced_string: String with underscores replaced """ return original_string.replace('_', ' ')
27bea63c33c2ffe44b1ddc591a7813d8830e9f76
13,462
import numpy def gradient_array(ndarray): """ transform numpy array into python list """ out = ndarray if isinstance(out, numpy.ndarray): out = ndarray.tolist() return out
41d4a37e50db0a319d23d18d95978f0d16e569f8
13,463
def is_submod(mod_obj): """ Find out whether module has a parent or not. :param mod_obj: module object :return: module status """ try: bt = mod_obj.get('belongs-to') if not bt: return False return True except Exception as e: return False
274d985b3b7b07f02e919af20ed7fd4531136ccc
13,464
import numpy def DerXYZEuler(alpha,beta,gamma): """ derivatives of transformation matrix with respect to Euler angle :param float alpha: euler angle alpha (radians) :param float beta: euler angle beta (radians) :param float gamma: euler angle gamma (radians) :return: - du:lst(3*3*3) derivatives of transformation matrix. u(1,1,1)=ddu(1,1)/d(alpha), u(1,1,2)=ddu(1,1)/d(beta), etc. Note: transformation matrix of Eular angles u(1,1)= cb*cc-ca*sb*sc, u(1,2)=-sc*cb-ca*sb*cc, u(1,3)= sa*sb u(2,1)= sb*cc+ca*cb*sc, u(2,2)=-sc*sb+ca*cb*cc, u(2,3)=-sa*cb u(3,1)= sa*sc, u(3,2)= sa*cc, u(3,3)= ca """ zero=0.0 du=numpy.zeros((3,3,3)) sa=numpy.sin(alpha); ca=numpy.cos(alpha) sb=numpy.sin(beta); cb=numpy.cos(beta) sc=numpy.sin(gamma); cc=numpy.cos(gamma) # derivatives of transformation matrix with respect to alpha. du[1][1][1]= sa*sb*sc; du[1][2][1]= sa*sb*cc; du[1][3][1]= ca*sb du[2][1][1]=-sa*cb*sc; du[2][2][1]=-sa*cb*cc; du[2][3][1]=-ca*cb du[3][1][1]= ca*sc; du[3][2][1]= ca*cc; du[3][3][1]=-sa # derivatives of transformation matrix with respect to beta. du[1][1][2]=-sb*cc-ca*cb*sc; du[1][2][2]= sc*sb-ca*cb*cc; du[1][3][2]= sa*cb du[2][1][2]= cb*cc-ca*sb*sc; du[2][2][2]=-sc*cb-ca*sb*cc; du[2][3][2]= sa*sb du[3][1][2]= zero; du[3][2][2]= zero; du[3][3][2]= zero # derivatives of transformation matrix with respect to gamma. du[1][1][3]=-cb*sc-ca*sb*cc; du[1][2][3]=-cc*cb+ca*sb*sc; du[1][3][3]= zero du[2][1][3]=-sb*sc+ca*cb*cc; du[2][2][3]=-cc*sb-ca*cb*sc; du[2][3][3]= zero du[3][1][3]= sa*cc; du[3][2][3]=-sa*sc; du[3][3][3]= zero return du
dd620029cc143bca661c532c9d474e4a759fa2e3
13,466
def filter_para(paras): """过滤空值和签名""" for k, v in paras.items(): if not v or k in ['sign', 'sign_type']: paras.pop(k) return paras
c73123b15add26debbb3c60e11ca90b46782b6cc
13,467
import requests def scraper_setup(site): """ site: string url of site to be scraped returns a page, or exits early if an error occurs """ print("[[ About to search {site} ]]".format(site=site)) page = requests.get(site) if page.status_code != 200: # Inform user of why it may have failed print("-" * max(49, (6 + len(site)))) print("[[ {site} ]]".format(site=site)) print("There was an error in opening this page (status code {})".format( page.status_code)) print("This is most likely due to either of two reasons:") print("a) Either the artist or song name was misspelled") print("b) The Genius page for this entry does not exist") print("Please ensure correctness of artist and song name") print("-" * max(49, (6 + len(site)))) exit(3) else: return page
545df7c51075bf4e41942e76930384811da90213
13,468
import pathlib def get_fixture(filename): """Return the data fixture.""" current_root = pathlib.Path('.').resolve() fixture_root = current_root / 'tests' / 'fixtures' / 'tools' fixture_path = fixture_root / filename return fixture_path.read_text()
225d4cdc6d2682b0dac8ddfa54b819bea335b82b
13,469
import sys def get_args(): """ Function that retrieves the script's arguments when executed. Parameters: / Returns: A dict containing the various arguments. """ valid_args = { "file_path": "" } args = sys.argv for arg_i in range(len(args)): if args[arg_i] == "-F" or args[arg_i] == "--file": valid_args["file_path"] = args[arg_i + 1] if valid_args["file_path"] != "": return valid_args print("\n\t❌ \033[41mMissing argument : -F or --file \033[0m\n") quit()
41e6b9ac23ab3084a77f2b5d658dad0c8890c8bb
13,470
def CCW_ps(Pxx, Pyy, Pxy): """Counter clockwise power spectrum.""" QS = Pxy.imag return (Pxx + Pyy + 2*QS)/2.
76710ff31e4fead278afc9eb37691f2178f45def
13,472
def check_interval(child_span, parent_span): """ Given a child span and a parent span, check if the child is inside the parent """ child_start, child_end = child_span parent_start, parent_end = parent_span if ( (child_start >= parent_start) &(child_end <= parent_end) ): return True else: return False
6c6a8e636ad181af821d185ba35f18db41d0ce77
13,473
def filter_entities_by_noise( true: list, pred: list, noise_lower: float, noise_upper: float ) -> tuple: """ Filter to keep any tokens with a LEVENSHTEIN distance within particular range. If not given, the tokens are kept as well. """ filtered_true = [] filtered_pred = [] for tok_true, tok_pred in zip(true, pred): if ( tok_true.LEVENSHTEIN is None or noise_lower <= tok_true.LEVENSHTEIN < noise_upper or noise_lower == tok_true.LEVENSHTEIN == noise_upper ): filtered_true.append(tok_true) filtered_pred.append(tok_pred) assert len(filtered_true) == len(filtered_pred) return filtered_true, filtered_pred
8add64fe828e17d1c532a1c67cc1f7da232ae5f2
13,474
def merge_to_length(a, b, length): """Merge 2 lists into a specific length. This method will merge 2 lists into a short list, replacing the last few items of the first list if necessary. For example, a = [1,2,3], b = [4,5,6,7], and length = 6. The result will be [1,2,4,5,6,7]. If length = 5, the result will be [1,4,5,6,7]. If length is greater or equal to 7, the result will be [1,2,3,4,5,6,7]. Parameters ---------- a : list A list of elements. b : list Another list of elements. length : int The length of the result list. Returns ------- list A merged list of the specified length. """ temp = b[:] for index, item in enumerate(a): if len(temp) < length: temp.insert(index, item) else: break return temp
bdadb57c04567bae2a6890d7e5be981f1ba18fc9
13,475
def calc_bpe_bulk_electrolyte_resistance(characteristic_length, sigma): """ The area specific charge transfer resistance through the bulk electrolyte units: Ohm*m2 Notes: Adjari, 2006 - "(area specific) bulk electrolyte resistance" Squires, 2010 - does not explicitly define this but uses the same equation Inputs: char_length: (m) length of BPE sigma (S/m) conductivity of electrolyte/buffer Output: Resistance: Ohm*m^2 """ R_0 = characteristic_length / sigma return R_0
f6a08cd5997de8954faa41d69e7abca1541e3fa0
13,476
import random def random_order(ugraph, num_nodes): """ return a list of nodes in ugraph in random order :param ugraph: :param num_nodes: :return: """ if len(ugraph) is 0 or num_nodes is 0: return [] random.seed() return random.sample(ugraph.keys(), num_nodes)
4bad993f22f6435648e3e0ff30992f1930961dc9
13,478
import requests import re def get_filenames(url): """ The location where the hydat database has the sqlite database file itself, as well as supporting documentation. This function returns the filenames of the documentation pdfs in both languages, as well as the database filename. Input the url for the file location at Environment Canada. -should be https://collaboration.cmc.ec.gc.ca/cmc/hydrometrics/www/ Returns two separate variables: -the first is a list of the pdf filenames -the second is the database filename. """ s = requests.get(url).text all_hrefs = [s.start() for s in re.finditer('a href=', s)] pdf_files = [] db_filename = None for l in all_hrefs: # skip past the positions corresponding to 'a href=' start = l + 8 # get the non-DB readme files file_prefix = s[start:start + 10] if file_prefix in ['ECDataExpl', 'HYDAT_Defi', 'HYDAT_Rele']: end = s[start:].find('.pdf') + len('.pdf') # extract just the filename filename = s[start:start + end] # append to the list of filenames pdf_files.append(filename) if file_prefix == 'Hydat_sqli': end = s[start:].find('.zip') + len('.zip') db_filename = s[start: start + end] if db_filename == None: raise AssertionError('No database file was found. Check https://collaboration.cmc.ec.gc.ca/cmc/hydrometrics/www/ to see if the page is up, and if a .zip file corresponding to the sqlite Hydat database file exists') return pdf_files, db_filename
0ccfc46c74d5b89be904dd093a40b82cd4abed5d
13,479
def count_correl_above(correl_mat,limit): """ count numbers of correlation matrix elements above a certain threshold Args: correl_mat (array): Matrix correlation values limit: Threshold for counting Returns: (float): Percentage of correlations above the limit """ correl_list = correl_mat.flatten() full_len = len(correl_list) above_len = len([p for p in correl_list if p>limit]) return float(above_len)/float(full_len)*100
00fb1f0aea268a9333c3741255a2fd7cafcb43ee
13,480
from typing import Optional from typing import Dict def __get_source_file_for_analyzer_result_file( analyzer_result_file_path: str, metadata: Optional[Dict] ) -> Optional[str]: """ Get source file for the given analyzer result file. """ if not metadata: return None result_source_files = {} if 'result_source_files' in metadata: result_source_files = metadata['result_source_files'] else: for tool in metadata.get('tools', {}): result_src_files = tool.get('result_source_files', {}) result_source_files.update(result_src_files.items()) if analyzer_result_file_path in result_source_files: return result_source_files[analyzer_result_file_path] return None
c40dec03234954bdb6cb43d43e0988d98c6f0bbd
13,481
import os import grp def is_effective_group(group_id_or_name): """Returns True if group_id_or_name is effective group (id/name).""" egid = os.getegid() if str(group_id_or_name) == str(egid): return True effective_group_name = grp.getgrgid(egid).gr_name return group_id_or_name == effective_group_name
de279d789d85b216f5da59762571dda5bfa9d2ab
13,482
def testdat(testdir): """Path to the testdat directory""" return testdir / "testdat"
d7c278fba718164d50863e3fb353155a1ff00eee
13,483
def allowed_anions(): """ Args: Returns: list of elements (str) that tau should be able to classify NOTE: only trained on ['O', 'F', 'Cl', 'Br', 'I'] """ return ['O', 'S', 'Se', 'Te', 'F', 'Cl', 'Br', 'I', 'N', 'P', 'As', 'Sb']
337c25b4d2097e920885630fe52c322ac23480b0
13,484
def _RequireOpenQueue(fn): """Decorator for "public" functions. This function should be used for all 'public' functions. That is, functions usually called from other classes. Note that this should be applied only to methods (not plain functions), since it expects that the decorated function is called with a first argument that has a '_queue_filelock' argument. @warning: Use this decorator only after locking.ssynchronized Example:: @locking.ssynchronized(_LOCK) @_RequireOpenQueue def Example(self): pass """ def wrapper(self, *args, **kwargs): # pylint: disable=W0212 assert self._queue_filelock is not None, "Queue should be open" return fn(self, *args, **kwargs) return wrapper
fe1b79b33e092bc5f1e5a4afe78f3ec23b2f7102
13,485
def parity(self, allow_rescaling_flag=True): """ Returns the parity ("even" or "odd") of an integer-valued quadratic form over `ZZ`, defined up to similitude/rescaling of the form so that its Jordan component of smallest scale is unimodular. After this rescaling, we say a form is even if it only represents even numbers, and odd if it represents some odd number. If the 'allow_rescaling_flag' is set to False, then we require that the quadratic form have a Gram matrix with coefficients in `ZZ`, and look at the unimodular Jordan block to determine its parity. This returns an error if the form is not integer-matrix, meaning that it has Jordan components at `p=2` which do not have an integer scale. We determine the parity by looking for a 1x1 block in the 0-th Jordan component, after a possible rescaling. INPUT: self -- a quadratic form with base_ring `ZZ`, which we may require to have integer Gram matrix. OUTPUT: One of the strings: "even" or "odd" EXAMPLES:: sage: Q = QuadraticForm(ZZ, 3, [4, -2, 0, 2, 3, 2]); Q Quadratic form in 3 variables over Integer Ring with coefficients: [ 4 -2 0 ] [ * 2 3 ] [ * * 2 ] sage: Q.parity() 'even' :: sage: Q = QuadraticForm(ZZ, 3, [4, -2, 0, 2, 3, 1]); Q Quadratic form in 3 variables over Integer Ring with coefficients: [ 4 -2 0 ] [ * 2 3 ] [ * * 1 ] sage: Q.parity() 'even' :: sage: Q = QuadraticForm(ZZ, 3, [4, -2, 0, 2, 2, 2]); Q Quadratic form in 3 variables over Integer Ring with coefficients: [ 4 -2 0 ] [ * 2 2 ] [ * * 2 ] sage: Q.parity() 'even' :: sage: Q = QuadraticForm(ZZ, 3, [4, -2, 0, 2, 2, 1]); Q Quadratic form in 3 variables over Integer Ring with coefficients: [ 4 -2 0 ] [ * 2 2 ] [ * * 1 ] sage: Q.parity() 'odd' """ ## Deal with 0-dim'l forms if self.dim() == 0: return "even" ## Identify the correct Jordan component to use. Jordan_list = self.jordan_blocks_by_scale_and_unimodular(2) scale_pow_list = [J[0] for J in Jordan_list] min_scale_pow = min(scale_pow_list) if allow_rescaling_flag: ind = scale_pow_list.index(min_scale_pow) else: if min_scale_pow < 0: raise TypeError("Oops! If rescaling is not allowed, then we require our form to have an integral Gram matrix.") ind = scale_pow_list.index(0) ## Find the component of scale (power) zero, and then look for an odd dim'l component. J0 = Jordan_list[ind] Q0 = J0[1] ## The lattice is even if there is no component of scale (power) 0 if J0 is None: return "even" ## Look for a 1x1 block in the 0-th Jordan component (which by ## convention of the local_normal_form routine will appear first). if Q0.dim() == 1: return "odd" elif Q0[0,1] == 0: return "odd" else: return "even"
12c68f6dfb447cd88d59367ad1fed9c2ba9aedde
13,487
import re def _get_pcode_ids(pcode): """ Pull out the function and variable names from the given p-code disassembly. pcode - (str) The p-code disassembly. return - (set) The set of function and variable names. """ # Look at each line of the disassembly. ids = set() in_id_section = False skip = False instructions = None for line in pcode.split("\n"): # Should we skip this line? if skip: skip = False continue # Is this the start of the ID section? if line == "Identifiers:": in_id_section = True # Skip the next blank line. skip = True continue # Is this the start of the instruction disassembly? if line.startswith("Line #") and (instructions is None): # Start saving instructions. instructions = "" continue # Is this an instruction? if instructions is not None: instructions += line + "\n" continue # Are we saving IDs? if in_id_section: # Is this an ID line? if ":" in line: curr_id = line[line.index(":") + 1 :].strip() ids.add(curr_id) continue # Done with ID section? else: in_id_section = False # These IDs seem to appear in the p-code and not necessarily in # the VBA source code. Filter them out. common_ids = set( [ "Word", "VBA", "Win16", "Win32", "Win64", "Mac", "VBA6", "VBA7", "Project1", "stdole", "VBAProject", "Excel", "Project", "ThisDocument", "_Evaluate", "Normal", "Office", "Add", "MSForms", "UserForm", "Document", ] ) # Now filter out the IDs that don't appear in the p-code # instructions. tmp = set() for curr_id in ids: # Skip IDs that are obviously not used or are common. if ((instructions is not None) and (curr_id not in instructions)) or (curr_id in common_ids) or (curr_id.startswith("_B_var_")): continue # Make sure the ID string is not embedded in some other # string. if instructions is not None: pat = "." + curr_id + "." strs = re.findall(pat, instructions) keep = False for curr_str in strs: if (not curr_str[0].isalnum()) and (not curr_str[len(curr_str) - 1].isalnum()): keep = True break if not keep: continue # It looks like some IDs in the p-code have underscores added # to the prefix or suffix of the name. Strip those off so we # can properly match the IDs in the VBA. while curr_id.startswith("_"): curr_id = curr_id[1:] while curr_id.endswith("_"): curr_id = curr_id[:-1] # This is a valid ID. Save it. tmp.add(curr_id) # Use the filtered IDs. ids = tmp # Return the function names and variables. return ids
84223a944a1f20b4e324748349fdf1d83d1ebc77
13,488
from pathlib import Path def strings_to_paths(strings: list[str]) -> list[Path]: """Converts a list of filenames in string form into a list of paths""" return [Path(string) for string in strings]
660e5f0fe72f32a4c4f2b218386e4c640c038846
13,489
import argparse def get_training_args(): """Initialize command line arguments for train.py""" parser = argparse.ArgumentParser( description = 'Training Image Classifier', ) parser.add_argument('data_dir', action='store') parser.add_argument('--save_dir', action='store', dest='save_dir', default='.') parser.add_argument('--arch', action='store', dest='arch') parser.add_argument('--learning_rate', action='store', dest='lr', type=float, default=0.001) parser.add_argument('--hidden_units', action='store', dest='hidden_uniits', type=int, default=4096) parser.add_argument('--epochs', action='store', dest='epochs', type=int, default=1) parser.add_argument('--gpu', action='store_true', dest='gpu', default=False) return parser
04dd1c9c78773fe8214dbff73dd758d16a9fd317
13,490
def three_sum_closest(nums, target): """ Return total closest to target Worst case: O(n^2) """ nums.sort() # sort nums to perform two pointers result = sum(nums[:3]) # initial sum for i, _ in enumerate(nums): left = i + 1 right = len(nums) - 1 while left < right: total = nums[i] + nums[left] + nums[right] if abs(total - target) < abs(result - target): result = total if total < target: # move left pointer rightward for larger val left += 1 elif total > target: # move right pointer leftward for smaller val right -= 1 else: # if total == target, we can directly return return result return result
8114a5b1ccd8fec0782f1757c69c183411ba1966
13,491
def min_required_char(text: str) -> int: """ NAIVE VERSION! Find minimum number of characters required to make a String Palindromic NOTE:The only operation allowed is to insert characters in the beginning of the string. Return the number of characters that are needed to be inserted to make the string a palindrome string. Rutime: O(n^2) Args: text(str): given string Returns: num of chars(int): min required chars to make a string palindromic """ if not text: return 0 left = 0 right = len(text) - 1 sliding_window = 2 while left <= right: if text[left] != text[right]: right = len(text) - sliding_window print(f"right: {right}") left = 0 print(f"left: {left}") sliding_window += 1 else: right -= 1 left += 1 return sliding_window - 2
7c2b90c9f0bfebb0de96737eac1c84729172123e
13,492
def meta(str1, str2, ratios, weights): """A meta ratio function. Returns a weighted meta ratio. The Wiesendahl ratio is a meta ratio which combines a weighted ratio of given ratio functions. Args: str1 (str): first string str2 (str): second string ratios (list(function(str, str) -> float)): ratio functions This parameter is a list of ratio functions. weights (list(float)): list of weights Each weight gets applied to its corresponding function. Returns: float: the combined and weighted meta ratio """ c = 0 r = 0.0 for i, fn in enumerate(ratios): r += fn(str1, str2) * weights[i] c += weights[i] return r / float(c)
b694216cce7b78e15065788497985416537ea95c
13,495
import warnings def get_unique_schema_name(components, name, counter=0): """Function to generate a unique name based on the provided name and names already in the spec. Will append a number to the name to make it unique if the name is already in the spec. :param Components components: instance of the components of the spec :param string name: the name to use as a basis for the unique name :param int counter: the counter of the number of recursions :return: the unique name """ if name not in components._schemas: return name if not counter: # first time through recursion warnings.warn( "Multiple schemas resolved to the name {}. The name has been modified. " "Either manually add each of the schemas with a different name or " "provide a custom schema_name_resolver.".format(name), UserWarning, ) else: # subsequent recursions name = name[: -len(str(counter))] counter += 1 return get_unique_schema_name(components, name + str(counter), counter)
68086fc7a8e523322f5a1745996b6dc8056833a1
13,496
import os def dirname(hdfs_path): """ Return the directory component of ``hdfs_path``. """ return os.path.dirname(hdfs_path)
7b1a8798d915feeb95f00692de6d7e6884cfc656
13,501
import os def change_ext(path_src, ext_dst): """ path_src の拡張子を ext_dst に変更する。 ext_dst の先頭は '.' を想定。 """ assert(ext_dst[0] == '.') fn_dir, fn_fn = os.path.split(path_src) fn_base, fn_ext = os.path.splitext(fn_fn) path_dst = os.path.join(fn_dir, '%s%s' % (fn_base, ext_dst)) return path_dst
8ccf470f2ae02192782ebd9b339e73ffe3d28813
13,502
def _nobarrier(ts_dct): """ Determine if reaction is barrierless """ print('cla', ts_dct['class']) rad_rad = bool('radical radical' in ts_dct['class']) low_spin = bool('low' in ts_dct['class']) return rad_rad and low_spin
393831d8b0af5085f8ff0f42d54bf8d14faca680
13,503
def create_args_string(num): """ 生成占位符 返回 '?,?,?,?' """ L = [] for n in range(num): L.append("?") return ", ".join(L)
f9d52affe17c9dfb138338a3258d2d21dbd12c6e
13,504
import torch def pad_collate_func(batch): """ This should be used as the collate_fn=pad_collate_func for a pytorch DataLoader object in order to pad out files in a batch to the length of the longest item in the batch. """ vecs = [x[0] for x in batch] labels = [x[1] for x in batch] x = torch.nn.utils.rnn.pad_sequence(vecs, batch_first=True) # stack will give us (B, 1), so index [:,0] to get to just (B) y = torch.stack(labels)[:, 0] return x, y
89d4f2c2adb7295457afe7f6a3235bbc4d9c8155
13,506
import re def parse_value(line,key_word): """finds value in line by parsing keyword""" value = re.sub(key_word,'',line,1) return value.strip()
eb093da1577f10f9f7a271985b62b35ff3bf906e
13,509
def calc_pr(positive, proposal, ground): """ Calculate precision and recall :param positive: number of positive proposals :param proposal: number of all proposals :param ground: number of ground truths :return: """ if not proposal or not ground: return 0, 0 return positive / proposal, positive / ground
277456d2c7c5d4352b925931342030663f1a541b
13,510
from typing import Any import random def getKeyByWeights(array: dict, item: int = 0) -> Any: """ Returns a weighted random key from array :param array: dict[Any: int | float] :param item: int :return: - key - Any """ return random.choices(list(array.keys()), weights=list(array.values()))[item]
8f1d474c97466407ff643abce1ea0b12b3ebd951
13,512
def check_bee_nectar_eq_hive_nectar(landed_bees, game): """ Check the sum of all the nectar in the hives equals the sum of the nectar carried by bees that have landed returns: True if they are same """ hive_nectar = sum(sum(hive.nectar for hive in board.hives) for board in game.boards) landed_bee_nectar = sum((sum(bee.nectar for bee in board_bees.values()) for board_bees in landed_bees)) return hive_nectar == landed_bee_nectar
86f8045c5639ca73edad7273e58bbc5fbf112220
13,513
def encrypt(plaintext, key): """ :param plaintext: string :param key: int :return: """ ciphertext = "" for char in plaintext: oNum = ord(char) if oNum > 127: # not a ASCII character new_char = char else: if(oNum + key > 127): # If value is too high or too low, we wrap around. The math checks out, trust me - Howard Lin new_char = chr(oNum + key - 128) elif(oNum + key < 0): new_char = chr(oNum + key + 128) else: new_char = chr(oNum + key) ciphertext += new_char return ciphertext
855e941e73cf15b00d3c2fa94eefaaf3c3ad31a4
13,514
def solucion_c(imc: float) -> str: """Devuelve una categoría de acuerdo al imc :param imc: Índice de masa corporal :imc type: float :return: Categoría :rtype: str """ categorias = { imc < 16 : "criterio de ingreso hospitalario", 16 <= imc < 17 : "infrapeso", 17 <= imc < 18 : "bajo peso", 18 <= imc < 25 : "saludable", 25 <= imc < 30 : "sobrepeso", 30 <= imc < 35 : "sobrepeso crónico", 35 <= imc < 40 : "sobrepeso premórbida", 40 <= imc : "obesidad mórbida", } return categorias[True]
ca05b9b7af9df3237e8aa1170638e7e695e6bcad
13,515
import re, string def clean_text(orig_text,verbose=False): """Prepare for NLP by removing problematic characters. Adapted from code kindly provided by Dr. Brian Powell at WVU. """ if verbose: print("ORIGINAL") if verbose: print(orig_text) if verbose: print("REMOVE URLS") clean_text = re.sub(r"http\S+", "", orig_text) if verbose: print(clean_text) if verbose: print("REMOVE AT SIGNS") clean_text = re.sub(r"(?<=^|(?<=[^a-zA-Z0-9-_\.]))@([A-Za-z0-9-_]+)", "", clean_text) if verbose: print(clean_text) if verbose: print("REMOVE PUNCTUATION") clean_text = clean_text.translate(str.maketrans(dict.fromkeys(string.punctuation))) if verbose: print(clean_text) if verbose: print("REPLACE LINEBREAKS") clean_text = re.sub(r"(\r?\n|\r)", " ", clean_text) if verbose: print(clean_text) if verbose: print("STRIP NON-ASCII") clean_text = re.sub(r"[^\x00-\x7F]+","", clean_text) if verbose: print(clean_text) if verbose: print("STRIP EXTRA SPACES") clean_text = re.sub(' +', ' ', clean_text) if verbose: print(clean_text) if verbose: print() return clean_text;
1ac8b6baff72fc72db748a699975da818781f215
13,516
def splitter(model): """ Splits model parameters into multiple groups to allow fine-tuning """ params = list(model.parameters()) return [ # weights and biases of the first linear layer params[:2], # weights and biases of the second linear layer params[2:], ]
2ac16b536bd50884c50ade3fb67f6ca43a16799d
13,517
def get_setup_fabric_dirs_string(): """ Returns the commands required to set up the fabric directories. This is not in the env, because modifying this is likely to break FabSim in most cases. This is stored in an individual function, so that the string can be appended in existing commands, reducing the performance overhead. """ return 'mkdir -p $config_path; mkdir -p $results_path; mkdir -p $scripts_path'
a73f3da5b7c20c588f71320f0089066ce54bf93e
13,519
def get_arch(os_list): """ 根据os_list判断需要下载哪些架构的包 """ arm, x86 = 0, 0 for os_item in os_list: if not arm and "aarch64" in os_item: arm = 1 if not x86 and "x86_64" in os_item: x86 = 1 if arm and x86: break if arm and not x86: arch = "aarch64" elif not arm and x86: arch = "x86_64" else: arch = ("x86_64", "aarch64") return arch
1522db842d971a30fb54c05919f98da83aaf1d0a
13,521
def getOptions(): """Generates arguments for specifying database and other parameters""" options = {} options['lexical_kb'] = ["https://raw.githubusercontent.com/chapmanbe/pyConTextNLP/master/KB/lexical_kb_05042016.tsv"] options['domain_kb'] = ["https://raw.githubusercontent.com/chapmanbe/pyConTextNLP/master/KB/critical_findings.tsv"] options["schema"] = "https://raw.githubusercontent.com/chapmanbe/pyConTextNLP/master/KB/schema2.csv" options["rules"] = "https://raw.githubusercontent.com/chapmanbe/pyConTextNLP/master/KB/classificationRules3.csv" return options
ecd193161645419b241679b2496f76d26e0468fd
13,522
def checkIPIsValid(ip): """ Method used to check a selected IP is possible to use with a Qt's QHostAddress. The IP can be either IPv4 address or *"localhost"* string with possible capital letters anywhere. IPv6 is not supported for now. Args: ip: A str with an IP address selected. """ if ip.upper() == "LOCALHOST": return True numbers = ip.split(".") if len(numbers) == 4: if all([(0 <= int(num) <= 255) for num in numbers]): return True return False
8b3f0a5f7ae079a0461d45efcafd0f8ce3e859ce
13,523
import math def make_bonds(pdb1): """ create bonded list based on overlap of vdw radii """ margin = 1.3 bond_list = [] nbond = 0 for i in range(pdb1.natom): for j in range(i+1,pdb1.natom): dist2 = 0. for k in range(3): dist2 += (pdb1.coords[i][k] - pdb1.coords[j][k])**2 dist = math.sqrt(dist2) overlap = dist + margin - pdb1.radius[i] - pdb1.radius[j] if (overlap < 0.): nbond +=1 bond_list.append([i,j]) #print('# of bonds: ',nbond) return bond_list
4ca098e15f912c7d7658562992c502c2271cc1bd
13,525
def parse_id(text): """ Parse and return the <job>:<id> string. Args: text String to parse Returns: This node's job name, this node's id """ sep = text.rfind(":") if sep < -1: raise ValueError("Invalid ID format") nid = int(text[sep + 1:]) if nid < 0: raise ValueError("Expected non-negative node ID") return text[:sep], nid
23dbc417abf7fb799da78e63acf18b2d0ffc450f
13,526
def read_DF_from_path(resultFile): """Read daylight factor values from a radiance .res result file.""" result = [] resultFile = open(resultFile, "r") for line in resultFile: res = float(line) if res > 100: res = 100 result.append(res) return result
583ec1892d26038fdd43d9acf152a9b7c65dc364
13,527
def get_value(source, steps): """ 根据入参字典以及取值步骤取出结果 steps 为取值步骤组成的列表 """ try: # 循环取值步骤列表 for step in steps: # 如果为数字则转为数字(数字代表从列表取值),否则为字符 step = step if not step.isdigit() else int(step) # 如果 step 是数字,则是从 list 中取值 if isinstance(step, int) and isinstance(source, list) and len(source) < 1: return None # 从结果字典取值 source = source[step] # 出现异常直接填充为空字符 except KeyError: return None return source
bccaea5544dd685b028a54b5318b7a7c930042b0
13,529
import torch def quat_to_rot(rot, conv='wxyz', device='cpu'): """converts quat into rotation matrix Args: rot ([type]): [description] conv (str, optional): [description]. Defaults to 'wxyz'. Raises: Exception: [description] Returns: [type]: [description] """ if conv == 'wxyz': w = rot[:, 0] x = rot[:, 1] y = rot[:, 2] z = rot[:, 3] elif conv == 'xyzw': y = rot[:, 1] z = rot[:, 2] w = rot[:, 3] x = rot[:, 0] else: raise Exception('undefined quaternion convention') x2 = x * x y2 = y * y z2 = z * z w2 = w * w xy = x * y zw = z * w xz = x * z yw = y * w yz = y * z xw = x * w num_rotations = rot.shape[0] matrix = torch.empty((num_rotations, 3, 3), device=device) matrix[:, 0, 0] = x2 - y2 - z2 + w2 matrix[:, 1, 0] = 2 * (xy + zw) matrix[:, 2, 0] = 2 * (xz - yw) matrix[:, 0, 1] = 2 * (xy - zw) matrix[:, 1, 1] = - x2 + y2 - z2 + w2 matrix[:, 2, 1] = 2 * (yz + xw) matrix[:, 0, 2] = 2 * (xz + yw) matrix[:, 1, 2] = 2 * (yz - xw) matrix[:, 2, 2] = - x2 - y2 + z2 + w2 return matrix
13ea4ec693d894fc253d416f9a600b299da6d87c
13,530
def v2_serving_handler(context, event, get_body=False): """hook for nuclio handler()""" if not context._server.http_trigger: event.path = "/" # fix the issue that non http returns "Unsupported" return context._server.run(event, context, get_body)
63397eaf6d4d009ea6eff3332ba7c611c2f8202a
13,532
import os import sys def get_gmake(): """ Locate GNU make """ try: GMAKE = os.environ["MAKE"] except KeyError: if "bsd" in sys.platform: GMAKE = "gmake" else: GMAKE = "make" return GMAKE
34d2e46936aafd9406225dde1de32e689ce2a774
13,533
def linear_search_iterative(elements, key): """ An index, i, where A[i] = k. If there is no such i, then NOT_FOUND. """ for idx, value in enumerate(elements): if key == value: return idx return "Not Found"
2b856762bccd355ecd847ba318d50a678c8aa89a
13,534
def substr_match(a, b): """ Verify substring matches of two strings. """ if (a is None) or (b is None): return False else: return a in b
9b98d14b6ec5f2ab433eea92d377b5e1477fef64
13,535