content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import os def rename(file, newFile): """ Rename a file :param str file: File to rename :param str newFile: New filename, including the full path :returns: True if successed :rtype: bool .. note:: Moving files between different filesystem (eg. local to nfs://) is not possible on all platforms. You may have to do it manually by using the copy and deleteFile functions. example:: success = xbmcvfs.rename(file,newFileName) """ try: os.rename(file, newFile) except EnvironmentError: return False else: return True
3c751b2d567488f23702bc488260f77807808139
33,446
def expected_peer_features(): """Return the expected peer features hexstring for this configuration""" # features 1, 3, 7, 9, 11, 13, 15 and 17 (0x02aaa2). return "02aaa2"
a26aa55408daab2fcb3226e7b764507bfcc478ab
33,447
import torch def process_label(device, label, cfg): """ :desc: turn the label into one-hot format """ batch, channel, w, h = label.size() pred1 = torch.zeros(batch, 20, w, h).to(device) # Return a tensor of elements selected from either :attr`x` or :attr:`y`, # depending on :attr:`condition label_trunk = torch.where(cfg.NUM_CLASSES > label, label, torch.Tensor([cfg.NUM_CLASSES]).to(device)) # place 1 on label place (replace figure > 19 with 19) pred1 = pred1.scatter_(1, label_trunk.long(), 1) return pred1
d1699888f2b2992151e21fd9239b2a29c2c9e9e3
33,448
import os def is_running_in_codebuild(): """ This fixture checks is $CODEBUILD_SRC_DIR is set based on the assumption that it is only set in the CI which is currently managed by Codebuild :return: bool """ return bool(os.getenv("CODEBUILD_SRC_DIR"))
7d3b689d6d1521d8d736fb2ea65002199e6ab720
33,449
def result_pks(response, cast=None): """ returns ids from wagtail admin search result :param cast: cast pks to a type, default int :param response: webtest response :return: ids list """ cast = cast or int result_rows = response.lxml.xpath('.//tr[@data-object-pk]/@data-object-pk') return [ cast(r) for r in result_rows ]
c46373733cf1451ccb7dbadd842726445112c9f2
33,450
from typing import Tuple def format_xml_property( metric_type: str, summary_method: str, metric_value: float ) -> Tuple[str, float]: """ Formats metric summary into XML name-value tuple in the form of (metric_type[summary_method], metric_value) ex: (cpu_util[avg], 88.23) """ return f"{metric_type}[{summary_method}]", metric_value
f26bfdefae85e220b9f4285a1122501ee7b7329a
33,451
import re def split(text): """ Split text into arguments accounting for muti-word arguments which are double quoted """ # Cleanup text text = text.strip() text = re.sub('\s+', ' ', text) # collpse multiple spaces space, quote, parts = ' ', '"', [] part, quoted = '', False for char in text: # Encoutered beginning double quote if char is quote and quoted is False: quoted = True continue # Encountered the ending double quote if char is quote and quoted is True: quoted = False parts.append(part.strip()) part = '' continue # Found space in quoted if char is space and quoted is True: part += char continue # Found space but not quoted if char is space: if part: parts.append(part) part = '' continue # Found other character if char is not space: part += char continue if part: parts.append(part.strip()) return parts
061f76fefc7888b16cda87a1f6f4219bf5a74af6
33,452
def get_conn_args(database="mydb", user="user", password="password", host="mydb", port="5432"): """ Get arguments for the connection to the PostgreSQL database server Parameters ---------- database: str String of the database (default: "mydb") user: str String of the user (default: "user") password: str String of the password (default: "password") host: str String of the host (default: "mydb"), "mydb" in docker and "localhost" in local port: str String of the port (default: 5432) Returns ------- conn_args: dict Dictionnary for the connection to the PostgreSQL database server """ conn_args = { "database": database, "user": user, "password": password, "host": host, "port": port } return conn_args
e6898449371a477bdc103e8011ccf53f922a7705
33,454
def thresholdForIdentity(identity, colors): """ Get the best identity threshold for a specific identity value. @param identity: A C{float} nucleotide identity. @param colors: A C{list} of (threshold, color) tuples, where threshold is a C{float} and color is a C{str} to be used as a cell background. This is as returned by C{parseColors}. @return: The first C{float} threshold that the given identity is at least as big as. """ for threshold, _ in colors: if identity >= threshold: return threshold raise ValueError('This should never happen! Last threshold is not 0.0?')
34d4f84ae7339fda68c2bf3fbaa1a1ef7cbd6e95
33,455
def sizes_valid(sizes): """ Validate the subnet masks """ return all(isinstance(size, int) and size >= 16 and size <= 28 for size in sizes)
3ca0daed72be1c1549f7a130ecd6e9a3401dedbf
33,456
import os def auto_delete_file_on_store_change(sender, instance, **kwargs): """Supprimer l'ancien fichier du dossier de stockage à la modification d'une instance Store si le fichier est différent.""" if not instance.pk: return False try: old_file = sender.objects.get(pk=instance.pk).file_path except sender.DoesNotExist: return False new_file = instance.file_path if not old_file == new_file: # Django > 2.x # instance.file_path.storage.delete(old_file.name) # Django 1.11 if os.path.isfile(old_file.path): os.remove(old_file.path)
2eadfe405bca541719d02d20afd4795505c5d68b
33,457
def package_template_dict(request, package, *args, **kw): """Context vars suitable for templates that extend package-page. see datisca/templates/top.html See also -------- models.package.get_available_tools models.tools.available """ available_tools = package.get_available_tools(request.user) return dict(*args, package=package, package_tools=available_tools, **kw)
662318170b5032ea5c5ffa8f38a45c6694c81192
33,458
def __prefixNumber(num, leading): """ Prefixes "num" with %leading zeroes. """ length = int(leading)+1 num = str(num) while len(num) < length: num = '0' + num return num
55a3745a993ffd75b0186d918dbf2a2b74771f2e
33,460
def buildRestrict(file, flag): """ Given a .ydk file, builds a banlist file using its contents :param flag: String, "banned", "limited" or "semi". Each card will be set to this flag's corresponding value (0, 1, 2) :param file: String, filepath to the .ydk :return d: Dict, A dictionary containing all this info """ d = {} restrict = 3 if flag == "Banned": restrict = 0 elif flag == "Limited": restrict = 1 elif flag == "Semi": restrict = 2 with open(file) as f: for line in f: if not line.startswith(('#', '!', '')): split = line.split(" ", 1) if not split[0].strip() in d: if (len(split) == 2): d[split[0].strip()] = (split[1].rstrip(), restrict) else: print("here") d[split[0].strip()] = ("", restrict) return d
3edb448f6a7e7703e7068bd9c9eade9eb7274b6d
33,462
import requests from bs4 import BeautifulSoup def scrap_page(slug): """ Scrape the documentation page body text. :param slug: Documentation URL slug eg. 'the-lead-page' :return: Documentation page body text if it exists """ url = f"https://help.close.com/docs/{slug}" response = requests.get(url) document = BeautifulSoup(response.text, 'html.parser') containers = document.select('#content-container') if containers: return containers[0].get_text()
c11279d6292d9a0f7171a8bc5ed94b1f5cdc363d
33,463
def gases(var_col): """The gas associated with each variable""" gasidx = lambda x: x.split('|').index('Emissions') + 1 return var_col.apply(lambda x: x.split('|')[gasidx(x)])
347b10ce2c630aa02a678b4ba79caee3ce72d5b4
33,466
def bound_protection(points, height, width): """ Avoid array overbounds :param points: :param height: :param width: :return: """ points[points[:, 0] > width, 0] = width - 1 # x points[points[:, 1] > height, 1] = height - 1 # y # points[points[:, 0] < 0, 0] = 0 # x # points[points[:, 1] < 0, 1] = 0 # y return points
ce3214dfb27751f5732d9121bb18871511d7e2b5
33,467
def stations_by_river(stations): """for Task1D, to return a dictionary that maps rivers to stations""" dict_1d={} for i in range(len(stations)): if stations[i].river in dict_1d: dict_1d[stations[i].river].append(stations[i].name) else: dict_1d[stations[i].river]=[] dict_1d[stations[i].river].append(stations[i].name) return dict_1d
f3fe723813552b6bdf40410c700aab7a66a7b894
33,468
import os def getSubModulesAndPackages(): """Return list of all modules and packages contained within current package.""" modulesToImport = [] # Add all Python files in directory directoryName = os.path.dirname(__file__) for filename in os.listdir(directoryName): # Ignore filenames in exclude list if filename in ["__pycache__"]: continue # If filename ends with .py, we assume it's a Python module elif filename.endswith(".py"): modulesToImport.append(filename[:-3]) # If filename is actually a directory, we assume it's a subpackage else: absolutePath = os.path.abspath( os.path.join(directoryName, filename) ) if os.path.isdir(absolutePath): modulesToImport.append(filename) return modulesToImport
151de8495ae169f02e7c819c8b63e70d50e53380
33,469
def Shift(xs, shift): """Adds a constant to a sequence of values. Args: xs: sequence of values shift: value to add Returns: sequence of numbers """ return [x+shift for x in xs]
e3e3f8b32c0cc4633ef09bbd1dbb2ef197ed79e6
33,471
def decode_story(id2word, result): """ :param id2word: vocab :param result: (batch_size, story_size, seq_length) :return: out: a list of stories. the size of the list is batch_size """ batch_size, story_size, seq_length = result.size() out = [] for i in range(batch_size): txt = '' for j in range(story_size): for k in range(seq_length): vocab_id = result[i, j, k] if vocab_id != 2: txt = txt + ' ' + id2word[int(vocab_id.item())] else: break out.append(txt) return out
0ad315bfde06904a1ca2590f32781361e75f1edd
33,472
def get_computed_response_parameter_number(response): """ extract the number of parameters from the Dialogflow response, fallback: 0 """ try: return len(response.query_result.parameters) except: return 0
4a469c2a543662d4b1826c3cba309d216db0e831
33,473
from typing import List def sort_strings(terms: List[List[str]]): """To account for (anti)symmetric indices, just sort the strings representing the fields. For use in the function `check_remapping_on_terms`. """ data = [["".join(sorted(item)) for item in interaction] for interaction in terms] return set(map(lambda x: tuple(sorted(x)), data))
6b1f10019f66142a5b26b8895d73f1aab03242b7
33,474
def config(): """ config = { "wasabi_bwlimit" : [rclone] Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable. "digitalocean_bwlimit" : [rclone] Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable. } """ config = { "wasabi_bwlimit" : "500K", "digitalocean_bwlimit" : "500K" } return config
0f4bd3ac9e83cc7a1f75c009e2fdc74d2bbeb42d
33,475
from typing import Dict from typing import Any def _get_nested_metadata(dataset: Dict[str, Any], prefix: str) -> Dict[str, Any]: """Generate a metadata dictionary using flattened metadata keys. Args: dataset: dictionary containing the dataset keys and values. Keys are flatened. prefix: common prefix of the metadata fields. Returns: Nested dictionary with the episode metadata. If the dataset contains: { 'metadata/v1/v2': 1, 'metadata/v3': 2, } and prefix='metadata', it returns: { 'v1':{ 'v2': 1, } 'v3': 2, } It assumes that the flattened metadata keys are well-formed. """ episode_metadata = {} for k in dataset.keys(): if f'{prefix}/' not in k: continue keys = k.split('/')[1:] nested_dict = episode_metadata leaf_value = dataset[k] for index, nested_key in enumerate(keys): if index == (len(keys) - 1): nested_dict[nested_key] = leaf_value else: if nested_key not in nested_dict: nested_dict[nested_key] = {} nested_dict = nested_dict[nested_key] return episode_metadata
6c2e2b430cc8a977c5cf2136ad456f3f533afe22
33,478
def is_int(val): """Checks if type can be cast to numeric value""" try: if float(val).is_integer(): return True else: return False except ValueError: return False
c8f42e3cdbac723a79575b1d26b7b15c12f2866d
33,479
import tempfile def open_tempfile(): """ Open a temporary file """ global file_ file_ = tempfile.NamedTemporaryFile('w+', newline='', delete=False) return file_
72317d651a292b4ffde68a720c399756e79688de
33,480
import base64 def str2b64(string: str)->str: """ 字符串转base64 :param string: 源字符串 :return: base64编码的字符串 """ return base64.b64encode(string.encode("utf8")).decode()
2d6c0fc14df29426c64c91690db63bb49f5807da
33,481
def find_best_match_junction(tree, donor, accep, max_diff=20): """ donor, accept -- both should be 0-based """ hits = tree.find(donor, accep) if len(hits) == 0: return None elif len(hits) == 1: if hits[0].start-donor > max_diff or hits[0].end-accep > max_diff: return None return hits[0] else: # multiple hits, find the closest one diff = [] for h in hits: if h.start-donor > max_diff or h.end-accep > max_diff: continue diff.append((abs(h.start-donor)+abs(h.end-accep), h)) diff.sort(key=lambda x: x[0]) return diff[0][1]
ed177c48c7f046bcbe919f83b62381b9f71decd2
33,482
def vvo2max(vo2max): """ Calculates velocity (kilometers/hour) at a specified VO2Max (mL/(kg*min)) args: vo2max (float): VO2Max, given in mL/(kg * min) Returns: float: kilometers / hour """ return vo2max / 3.5
a90c8fc20710782d991731324483d37e7cbf3a54
33,484
def _ones_translator(_input_element, place="more than ten"): """ Takes a single-digit number as an input and converts it into a string corresponding to the Te Reo Māori word for that number. """ Te_Reo_Numbers = ["kore", "tahi", "rua", "toru", "whā", "rima", "ono", "whitu", "waru", "iwa", "tekau"] if place == "more than ten": Te_Reo_Numbers[0] = "" Te_Reo_Numbers[1] = "kotahi" # Add spacing for tens placeholder for i in range(len(Te_Reo_Numbers[1:])): Te_Reo_Numbers[i] = Te_Reo_Numbers[i] + " " if place == "ten": Te_Reo_Numbers[1] = "" if place == "ones": Te_Reo_Numbers[1] = "tahi" return Te_Reo_Numbers[_input_element]
136c525a8b0af654b35c5b511e4d4a50f871d548
33,485
import numbers def get_dtype(item): """ Attempt to get the datatype from an item. >>> get_dtype(1) 'float32' >>> get_dtype(True) 'bool' >>> get_dtype(1.1) 'float32' >>> get_dtype([1]) 'float32' >>> get_dtype([[[1, 2, 3]]]) 'float32' >>> get_dtype(np.array([True, False, True], dtype=bool)) 'bool' """ if hasattr(item, "dtype"): return item.dtype.name elif isinstance(item, bool): return "bool" elif isinstance(item, str): return "str" elif isinstance(item, numbers.Real): return "float32" else: try: return get_dtype(item[0]) except: return None
9f908527af2c508c331f692c86f214d30c6b2dc0
33,487
import torch def quat_to_rmat(quaternion: torch.Tensor) -> torch.Tensor: """Converts quaternion(s) to rotation matrix. The quaternion should be in (w, x, y, z) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape (*, 4). Return: torch.Tensor: the rotation matrix of shape (*, 3, 3). """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # unpack the normalized quaternion components w, x, y, z = torch.chunk(quaternion, chunks=4, dim=-1) # compute the actual conversion tx: torch.Tensor = 2.0 * x ty: torch.Tensor = 2.0 * y tz: torch.Tensor = 2.0 * z twx: torch.Tensor = tx * w twy: torch.Tensor = ty * w twz: torch.Tensor = tz * w txx: torch.Tensor = tx * x txy: torch.Tensor = ty * x txz: torch.Tensor = tz * x tyy: torch.Tensor = ty * y tyz: torch.Tensor = tz * y tzz: torch.Tensor = tz * z one: torch.Tensor = torch.tensor(1., device=quaternion.device) matrix: torch.Tensor = torch.stack([ one - (tyy + tzz), txy - twz, txz + twy, txy + twz, one - (txx + tzz), tyz - twx, txz - twy, tyz + twx, one - (txx + tyy) ], dim=-1) shape = quaternion.shape[:-1] + (3, 3) return matrix.view(shape)
83f86b41842843b316c2b9ca13f5c1d1eab38dd9
33,489
def citem2higher(citem): """This gets the higher representation of a given :class:``citem``, \ that is, a :class:``cinsn_t`` or :class:``cexpr_t`` :param citem: a :class:``citem`` object :type citem: :class:``citem`` """ if citem.is_expr(): return citem.cexpr return citem.cinsn
5055467250308a01cfc82ea980b63066cfdecb5b
33,491
def extract_img_annot(df, frame_num): """ Extract annotations point form txt files and send send to draw """ # flag = [] # flag.append(frame_num) # return df.loc[df['FRAME'].isin(flag)] row = df.loc[df['FRAME'] == frame_num] x1 = row['BB_LEFT'] y1 = row['BB_TOP'] x2 = row['BB_WIDTH'] y2 = row['BB_HEIGHT'] return x1, y1, x2, y2, row.shape[0]
d5c3fe706b3abda8529bf3828f2794262cef685b
33,492
def parse_modified(full_dict, ignored_keys=('#', '?')): """ Extract 'staged' and 'modified' counts from Git status lines. Arguments --------- full_dict: dict full meta data dictionary ignored_keys: iterable keys that should not contribute towards the staged and modified counts (e.g., branch meta data or untracked files) Returns ------- list a list of two counts: [num_staged, num_modified] """ counts = [0, 0] for k in full_dict: if k not in ignored_keys: values = [x.split()[1].index('.') for x in full_dict[k]] for v in values: counts[v] += 1 return counts
d8d05a3afd5c57d8f2e10bb2992a0dfd1b7a0363
33,493
def price_query_filter(price_query_dict): """ Ensure that certain keys are available. - exchange - period """ if not isinstance(price_query_dict, dict): raise TypeError("dict_list is not a list. Please try again") required = ["period", "exchange"] pkeys = price_query_dict.keys() for r in required: if r not in pkeys: raise KeyError("An important key is not available {}".format(r)) return True
379541af0b143790d2bef0d84cc9ff90e92fa871
33,495
from io import StringIO import sys def run_python(cmd, timeout=60): """interactively interpret recieved python code""" try: try: buffer = StringIO() sys.stdout = buffer exec(cmd) sys.stdout = sys.__stdout__ out = buffer.getvalue() except Exception as error: out = error out = str(out).strip() if len(out) < 1: try: out = "[eval]: "+str(eval(cmd)) except Exception as error: out = "[eval]: "+str(error) else: out = "[exec]: "+out except Exception as python_exception: out = "[X]: %s" % python_exception return out.strip()
c1472ec0752895a1530952c82be875f607e1dfb2
33,496
import re def _add_folders_to_path(path_line, folders_to_add_list): """Ensures that the given list of folders are inside the given line (for the PATH environment variable). Args: path_line(str): line in /etc/environment for the PATH environment variable folder_to_add_list(list of str): list of strings, where each string is a folder that must be present in the PATH environment variable. This list is assumed to be free of duplicate folders. Returns: (bool, str): The boolean value indicates if the line for the PATH environment variable has been modified; true means modified, false means not modified. The string value is the new line for the PATH environment variable, with all the folders in the `folders_to_add_list` """ PATH_STRING_TEMPLATE = "PATH=\"{folders}\"" # strip off surrounding quotes match_obj = re.match(r"""^PATH=['"]*(.*)$""", path_line) existing_folders_line = None if match_obj is None: return (True, PATH_STRING_TEMPLATE.format( folders=":".join(folders_to_add_list) ),) else: # strip trailing quotes. # We cannot just add a ['"]* to the above regex due to how greedy matching # works. It's possible to use non-greedy pattern matching but we do not do # that here. existing_folders_line = re.sub(r"""['"]*$""", "", match_obj.group(1)) # obtain existing folders existing_folders_list = [ folder for folder in existing_folders_line.split(":") if folder.strip() != "" ] existing_folders_set = set(existing_folders_list) path_line_modified = False for folder in folders_to_add_list: if folder not in existing_folders_set: path_line_modified = True existing_folders_list.append(folder) existing_folders_set.add(folder) return (path_line_modified, PATH_STRING_TEMPLATE.format( folders=":".join(existing_folders_list) ),)
d7d1940c4794f483eebcf6c70ae120de222c7034
33,498
def tuple_to_m_list(tup: tuple, c: str = '') -> str: """Return a string representation of a tuple to be used as an NBT list in Minecraft.""" if type(tup[0]) is float: return '[' + ', '.join(tuple('{:f}'.format(i) + c for i in tup)) + ']' else: return '[' + ', '.join(tuple(str(i) + c for i in tup)) + ']'
7d4346e288d7751a6e710b605aed0a1fc50521c7
33,499
def get_distance(cell1, cell2): """Calculates the distance between two cells.""" return abs(cell1.position - cell2.position)
e1845663c40a97bc6e4bcf56867cc9268632fa17
33,500
def as_numeric(df): """ When a dataframe contains only numeric values, format them """ return ( df .dropna() # remove fully empty lines .applymap('{:,.2f}'.format) # format floats with two digits .replace('nan', '-') # replace NaN with '-' )
0bb32684e016c53827acfb0f54cd9607ac02aee6
33,501
import bisect def find_index(array, x): """Locate the leftmost value exactly equal to x. **中文文档** 返回第一个值等于x的元素的索引。 """ i = bisect.bisect_left(array, x) if i != len(array) and array[i] == x: return i raise ValueError
df5236991b906a6409ecc4cd69623193a5162ee7
33,502
def filter_protected(f): """ filter_protected will filter out protected tweets and users unless explicitly requested not to. """ def new_f(self, *args, **kwargs): for obj in f(self, *args, **kwargs): if self.protected == False: if 'user' in obj and obj['user']['protected']: continue elif 'protected' in obj and obj['protected']: continue yield obj return new_f
1b932541e8cdfb8545980e4404f22af69479987c
33,503
def convert(stat): """Convert byte value to pretty string""" size = 1024 * 1024 * 1024 * 1024 * 1024 for mag in ['P', 'T', 'G', 'M', 'K']: if stat > size: return "%10.2f %s" % (float(stat) / size, mag) size = size / 1024 return "%10d " % stat
619e228fa652dd876a42958e39bb5dd9c000c020
33,504
def Counter(al, br, delta): """Counter derivative """ if br < al: return None return (br - al) / float(delta)
f0a78eae08e9272ddeac57d663c20bde7c61f82e
33,505
import os def exclude_mono(flists_flat): """exclude uid-sid if not both mics are available""" # find 'uid-sid-mic' triples uid_sid_mics = [] for entry in flists_flat: uid_sid_mic = '_'.join(os.path.basename(entry[0]).split('_')[:3]) uid_sid_mics.append(uid_sid_mic) # count 'uid-sid' tuples uid_sid_count = {} for uid_sid_mic in uid_sid_mics: uid_sid = '_'.join(uid_sid_mic.split('_')[:2]) if uid_sid in uid_sid_count.keys(): uid_sid_count[uid_sid] += 1 else: uid_sid_count[uid_sid] = 1 # split 'uid-sid' based on their counts (either 1 or 2) uid_sid_mono, uid_sid_dual = [], [] for uid_sid in uid_sid_count.keys(): if uid_sid_count[uid_sid] == 1: uid_sid_mono.append(uid_sid) elif uid_sid_count[uid_sid] == 2: uid_sid_dual.append(uid_sid) # split flist based on the counts of uid_sid flists_mono, flists_dual = [], [] for entry in flists_flat: uid_sid_mic = '_'.join(os.path.basename(entry[0]).split('_')[:3]) uid_sid = '_'.join(uid_sid_mic.split('_')[:2]) if uid_sid in uid_sid_mono: flists_mono.append(entry) elif uid_sid in uid_sid_dual: flists_dual.append(entry) return flists_dual, flists_mono
f85f85f5f17ea5c086f0fad3df02f2ff2fd8cbbe
33,506
import torch def parse_alignment(line): """ Parses a single line from the alingment file. Args: line (str): String containing the alignment of the format: <src_idx_1>-<tgt_idx_1> <src_idx_2>-<tgt_idx_2> .. <src_idx_m>-<tgt_idx_m>. All indices are 0 indexed. Returns: torch.IntTensor: packed alignments of shape (2 * m). """ alignments = line.strip().split() parsed_alignment = torch.IntTensor(2 * len(alignments)) for idx, alignment in enumerate(alignments): src_idx, tgt_idx = alignment.split("-") parsed_alignment[2 * idx] = int(src_idx) parsed_alignment[2 * idx + 1] = int(tgt_idx) return parsed_alignment
4f2953b331eb81bffb3fc7cda19fe3d96826cadb
33,509
import os import pickle import logging def parse_mutation_from_maf(maf_path, output_dir='', mut_pickle=False): """ Extract somatic mutation information from given maf file. Parameters ---------- maf_path: str, maf file path output_dir: str, save a pickle for maf_dict to save preprocess time mut_pickle: bool, flag indicating whether to pickle mutation info to disk Returns ------- mut_dict: with key (sample, chromo) and values (var_dict) """ maf_pkl_file = os.path.join(output_dir, 'maf.pickle') if mut_pickle: if os.path.exists(maf_pkl_file): f = open(maf_pkl_file, 'rb') mutation_dic = pickle.load(f) logging.info("Use pickled maf mutation dict in {}".format(maf_pkl_file)) return mutation_dic f = open(maf_path) lines = f.readlines() mutation_dic = {} for i, line in enumerate(lines[1:]): print(i) items = line.strip().split('\t') if items[9] == 'SNP': # only consider snp sample_id = items[15] chr = items[4] pos = int(items[5]) - 1 var_dict = {} var_dict['ref_base'] = items[10] var_dict['mut_base'] = items[12] var_dict['strand'] = items[7] var_dict['variant_Classification'] = items[8] var_dict['variant_Type'] = items[9] if (sample_id, chr) in list(mutation_dic.keys()): mutation_dic[((sample_id, chr))][int(pos)] = var_dict else: mutation_dic[((sample_id, chr))] = {} mutation_dic[((sample_id, chr))][int(pos)] = var_dict if mut_pickle: f_pkl = open(maf_pkl_file, 'wb') pickle.dump(mutation_dic, f_pkl) logging.info("create maf pickled mutation dict for next time's use in {}".format(maf_pkl_file)) return mutation_dic
017f6a40c200724645ed086339f7624b896ef5f3
33,510
def update_dict(old_dict, values): """ Update dictionary without change the original object """ new_dict = old_dict.copy() new_dict.update(values) return new_dict
c1246e1849904ea23864d5e11f734497c6ff1e09
33,511
def c2st_rfi(acc_prop, acc_base, M_prop, M_base, g): """ Args: acc_prop (float): Proposed model accuracy. acc_base (float): Baseline model accuracy. M_prop (int): Number of parameters for proposed model. M_base (int): Number of parameters for baseline model. g (function): Scalar-valued function. """ delta_prop = g(acc_prop - 0.5) delta_base = g(acc_base - 0.5) return 1 - (M_prop / M_base) * (delta_prop / delta_base)
383c89bb6488c8a3564a950990e275b02d887133
33,512
def render_template(template, context): """ Generate an HTML test report. Args: template (Template): Jinja2 Template object containing the template to render context (dict): the context to pass to the template Returns: str: the contents of the rendered template """ return template.render(context)
5cf0b16855a62439b6b4b76f383bea684fb9b4ec
33,514
import hashlib def create_base_url(softwareversion): """ Make the root URL for production server files. :param softwareversion: Software version to hash. :type softwareversion: str """ # Hash software version swhash = hashlib.sha1(softwareversion.encode('utf-8')) hashedsoftwareversion = swhash.hexdigest() # Root of all urls baseurl = "http://cdn.fs.sl.blackberry.com/fs/qnx/production/{0}".format(hashedsoftwareversion) return baseurl
cda1283505f5e31208f39e12d422ccb99826c702
33,515
import re def strip_version_info(package_name): """Return package_name without version numbers. Some packages specified as dependencies have a version number, e.g. gcc>=5.1. We shouldn't care about this, we always sync to an up-to-date mirror before building packages, so strip this info. """ for pat in [r">=", r"<=", r"=", r"<", r">"]: depth = re.search(pat, package_name) if depth: package_name = package_name[:depth.start()] return package_name
d731d9f8e7696080e9ccbe173217255653d8a67f
33,517
import re def is_password_valid_by_regex(password): """ Check if given password match the REGEX """ REGEX = r"^(?=.*[a-z])(?=.*[A-Z])(?=.*\d)(?=.*[@$!%*?&#])[A-Za-z\d@$!%*?&#]{8,}$" if re.match(REGEX, password): return True return False
52fc444564123bde66737d7c5e47761ec7d2b947
33,518
def num2char(numpair, key): """Takes in a numpair like '34', and returns the character in row 3 (actually 4th row) and column 4 (actually 5th column) of the key""" row_num = int(numpair[0]) column_num = int(numpair[1]) return(key[row_num][column_num])
a86cf1ef327d2f1fbedf8b661e812c4d486fb3ac
33,521
def draw_labels(ax, labels, positions, scores=None, class_names=None, color='w', font_size=8, scales=None, horizontal_alignment='left'): """Draw labels on the axes. Args: ax (matplotlib.Axes): The input axes. labels (ndarray): The labels with the shape of (n, ). positions (ndarray): The positions to draw each labels. scores (ndarray): The scores for each labels. class_names (list[str]): The class names. color (list[tuple] | matplotlib.color): The colors for labels. font_size (int): Font size of texts. Default: 8. scales (list[float]): Scales of texts. Default: None. horizontal_alignment (str): The horizontal alignment method of texts. Default: 'left'. Returns: matplotlib.Axes: The result axes. """ for i, (pos, label) in enumerate(zip(positions, labels)): label_text = class_names[ label] if class_names is not None else f'class {label}' if scores is not None: label_text += f'|{scores[i]:.02f}' text_color = color[i] if isinstance(color, list) else color font_size_mask = font_size if scales is None else font_size * scales[i] ax.text( pos[0], pos[1], f'{label_text}', bbox={ 'facecolor': 'black', 'alpha': 0.8, 'pad': 0.7, 'edgecolor': 'none' }, color=text_color, fontsize=font_size_mask, verticalalignment='top', horizontalalignment=horizontal_alignment) return ax
1d8551bce6421e4e697dc33667a51111a0d2c9d4
33,522
def check_complete(start, start_date, end, end_date, array): """ Function: check_complete Description: Boolean function to check if both the date objects are created Input: start_date - start date end_date - end date Output: - True if both the date objects are created else False """ if start and end: print("Both date objects created") array.append(start_date) array.append(end_date) return True else: return False
07814d19cf021f3b3b50c1b6cd3a5e022924725c
33,523
import re def strHighlight(s, colors): """ Interpolate the terminal coloring codes specified in colorDict into the string 's' based on dreamdir syntax rules. The string should contain headers, and may also contain dream text. colorDict should specify color codes as the values for keys 'headers', 'lucid', 'notes', and 'verbatim', and the clear code for the key 'clear'. These can be obtained with, e.g., tput(1). """ out = [] charColors = {'{': 'lucid', '[': 'notes', '\`': 'verbatim'} try: headers, rest = s.split('\n\n', 1) except ValueError: # nothing provided beyond the headers, or input is malformed headers = s rest = '' for i in headers.split('\n'): out.append(re.sub(r'^(.*):\t(.*)$', r'%s\1:\t%s\2' % (colors['headers'], colors['clear']), i)) out.append('\n') out.append('\n') colStack = [] curColor = colors['clear'] inBacktick = False for char in rest: if char in ('[', '{') or (char == '\`' and not inBacktick): colStack.append(curColor) curColor = colors[charColors[char]] out.append(curColor) out.append(char) elif char in (']', '}') or (char == '\`' and inBacktick): out.append(char) curColor = colStack.pop() out.append(curColor) else: out.append(char) if char == '\`': inBacktick = not inBacktick return ''.join(out)
d8458887e2d200e441f8223485928c716ed4246f
33,524
def dt(obj=None, trnsfrm=None): """ Extracts field from object using DT language syntax :type obj: ``dict`` :param obj: The object to look in for the requested field :type trnsfrm: ``str`` :param trnsfrm: The field to get value of :return: The field value in the object :rtype: ``str`` """ return ""
58f8c6c67186a7ea4c3955943c6369ca091fa6b0
33,526
import subprocess import re def get_repo_url_from_remote(): """ Function that gets the repository URL from the `git remote` listing """ git_remote_bytes = subprocess.check_output(["git", "remote", "-v"]) # check_output returns the command results in raw byte format remote_string = git_remote_bytes.decode('utf-8') pattern = re.compile(r"github.com[/:]\w+/\w+") m = re.search(pattern, remote_string) if m: return m.group(0) else: raise Exception("Incorrect rexex pattern finding repo url")
58bbb666a1de14c889463187b3e023d9a37894b3
33,527
import base64 def basic_auth_creds(request): """ Extract any HTTP Basic authentication credentials for the request. Returns a tuple with the HTTP Basic access authentication credentials ``(username, password)`` if provided, otherwise ``None``. :param request: the request object :type request: pyramid.request.Request :returns: a tuple of (username, password) or None :rtype: tuple or NoneType """ try: authtype, value = request.authorization except TypeError: # no authorization header return None if authtype.lower() != 'basic': return None try: user_pass_bytes = base64.standard_b64decode(value) except TypeError: # failed to decode return None try: # See the lengthy comment in the tests about why we assume UTF-8 # encoding here. user_pass = user_pass_bytes.decode('utf-8') except UnicodeError: # not UTF-8 return None try: username, password = user_pass.split(':', 1) except ValueError: # not enough values to unpack return None return (username, password)
285e9732d77ebc5cd0ac96f8579d743b7bf527f0
33,528
import os def get_full_path( file_name ): """ Full path to file in root folder """ return os.path.join( os.path.dirname(os.path.abspath(__file__)), file_name)
1b77d930896b38f6d8af1b1824cdcfb1ae5642fb
33,529
import re def parse_samples(lines): """Parse lines to sample representation - tuple with three lists.""" samples = [] regex = r'\[(\d+), (\d+), (\d+), (\d+)\]' for single_sample in lines.split('\n\n'): before, codes, after = single_sample.split('\n') before = [int(num) for num in re.findall(regex, before)[0]] codes = [int(num) for num in codes.split()] after = [int(num) for num in re.findall(regex, after)[0]] samples.append((codes, before, after)) return samples
01bfa36045105002b5ddf84e5301843eb34aecae
33,530
def bottom_index_iter(shape): """Iterator for the bottom boundary indices of a structured grid.""" return range(0, shape[1])
ee952e8c5129a4f504414756361ce55e0f85a095
33,531
def tune_install(): """ User input for data source and state machine tuning """ tune = {} # ================================================================== "DATA source EXAMPLE" # ================================================================== source = 1 # choose data source if source == 1: # bitshares dex smart contracts (backtest and mode['live']) tune["data_source"] = "DEX" tune["currency"] = "OPEN.BTC" tune["asset"] = "BTS" if source == 2: # cryptocompare.com crypto:crypto (backtesting only) tune["data_source"] = "CEX" tune["currency"] = "BTC" tune["asset"] = "BTS" if source == 3: # alphavantage.com fiat:fiat (backtesting only) tune["data_source"] = "FOX" tune["currency"] = "USD" tune["asset"] = "CNY" if source == 4: # alphavantage.com crypto:fiat (backtesting only) tune["data_source"] = "CRY" tune["currency"] = "USD" tune["asset"] = "BTC" if source == 5: # alphavantage.com USstocks:USD (backtesting only) tune["data_source"] = "STX" tune["currency"] = "USD" tune["asset"] = "QURE" if source == 6: tune["data_source"] = "SYN" tune["currency"] = "SYNTHETIC" tune["asset"] = "DATA" # ================================================================== "EXTINCTION EVENT STATE MACHINE TUNE EXAMPLE" # ================================================================== # ma1 is the raw signal line # ma2 is the long moving average # alpha signal of state machine is the moving average crossover tune["ma1"] = 10 # about 5 to 25 (min 3 for daily backtesting) tune["ma2"] = 50 # about 30 to 70 (max 75 for live warmup) # min and max cross describe ma1 offset and thickness respectively # they are coeffs of ma1 (signal line) upon crossing ma2 # the full thickness of the signal must pass through ma2 # to switch alpha market state from bull to bear; vise versa tune["min_cross"] = 1 # about 0.9 to -1.1 tune["max_cross"] = 1.05 # greater than 1, usually no more than 1.2 # bull and bear stop are offsets of ma2 # they increase aggressiveness of support/resistance # usually only near the moving average crossover # support is max(signal, ma2*bullstop) # resistance is min(signal, ma2*bearstop) tune["bull_stop"] = 1 # about 0.9 to -1.1 tune["bear_stop"] = 1 # about 0.9 to -1.1 # selloff and support are bull market coeffs of ma1 # resistance and despair are bear market coeffs of ma1 # these create the outer buy/sell boundaries of active market # the bull market is shaded green; the bear market red # the inactive "extinct" market is plotted in purple tune["selloff"] = 1.5 # about 1.5 - 2.5 tune["support"] = 1.1 # about 0.8 - 1.2 tune["resistance"] = 0.9 # about 0.8 - 1.2 tune["despair"] = 0.7 # about 0.6 - 0.8 # all ten thresholds allow float inputs; ie 1.0254 # these are the "synapses" of a "neural network" # you can make adjustments; tune them by repeatedly backtesting # and using gross ROI "gradient ascent" as the "cost function" # this process is called "back propagation" # you could automate it, I have. but do not get too lost in that... # the algorithm can be profitably tuned for ANY currency pair # the ideal values for each are close to defaults # a good tune can be manually achieved in about 50 backtests # a perfect tune; "the solution" can be found in about 50,000 tests # the perfect solution will evolve slowly over time # this is to say a tune will become stale after 12-18 months # feed forward optimization is periodically retuning to a window # eg.: 365 days of the latest data retuned quarterly # though a bit tedious, this can certainly be achieved manually # though an algorithm is "perfectly tuned" to old data # inadequate volume at extremes can occur in the more mature market # override tune_install() thresholds conservatively by 0.01 = 1% # applies to despair, resistance, support, selloff tune["gravitas"] = 0.00 # use 0.00 for backtesting # 0.00-0.05 for final testing and when live " ELITIST BRED QUANTUM PARTICLE SWARM OPTIMIZATION " " WITH CYCLICAL SIMULATED ANNEALING AND CONTINUAL SUMMIT EROSION " # if you like I'll invoke the sorceress for you # it takes time, intuitive experience, and burns cpus very hard ' 1 pair "perfect" tuned to any 365 day dataset is: ' # 5000 BTS - dex most pairs # 6000 BTS - dex bts:fiat or btc:fiat # 7000 BTS - dex bts:btc # 8000 BTS - cryptocompare crypto:btc # 9000 BTS - cryptocompare crypto:fiat or crypto:crypto # 20000 BTS - alphavantage stocks and FX # 10% deposit, 24hrs I show results, then u settle, then u get tune # telegram @litepresence " prices per github extinctionEVENT.py at time of request " # wanna see something pretty cool? # run a 1000 day backtest on this: """ tune['data_source'] = "CEX" tune['currency'] = "BTC" tune['asset'] = "EMC2" tune['ma1'] = 6.034 tune['ma2'] = 17.002 tune['selloff'] = 1.9214 tune['support'] = 0.9861 tune['resistance'] = 1.0159 tune['despair'] = 0.8363 tune['min_cross'] = 0.9522 tune['max_cross'] = 1.0301 tune['bull_stop'] = 1.0127 tune['bear_stop'] = 1.0122 """ # every strategy must specify pair and max indicator period tune["max_period"] = 1 + int(max(tune["ma1"], tune["ma2"])) if tune["data_source"] == "DEX": tune["pair"] = tune["asset"] + ":" + tune["currency"] else: tune["pair"] = "%s_%s" % (tune["currency"], tune["asset"]) return tune
66cf062164015fac5fc0c904652fe624758b6999
33,532
def parameter_dict_merge(log, param_dicts): """ Merge the param dicts into a single one. If fail, return None """ merged_dict = {} for param_dict in param_dicts: for key, value in param_dict.items(): if key not in merged_dict: merged_dict[key] = value continue if merged_dict[key] != value: log.cl_error("ambiguous values for key [%s] in parameters", key) return None return merged_dict
663881ae85c349095bd64133e77acb0513a320bd
33,533
def qit(fmpp, f0pp, fmf0=4.88): """Calculate qIt qIt = (fmf0 / ((fmpp / f0pp) - 1)) - 1 :param fmpp: Fm'' :param f0pp: F0'' :param fmf0: Fv/Fm (default: 4.88) :returns: qIt (float) """ return (fmf0 / ((fmpp / f0pp) - 1)) - 1
4fc18ff9604b7514ff4bd8f6df0c8640b7514fbe
33,534
import requests def retrieve_tag_gitignore(*tags: str): """ Given a number of tags retrieves the generated gitignore of the combined list :param tags: an arbitrary number of tags that .gitignore will recognize :return: the raw text response from the website """ return requests.get(f"https://www.toptal.com/developers/gitignore/api/{','.join(tags)}").text
c29a97c9a2f19e68857d60295d623245e34aacb6
33,535
def data_cleansing(files_all): """ takes input as a dictionary of text files output as a dictionary representing provinces where province code is the key and has list of tuples for licence plate constraints """ #There was some special cases where data had to be cleansed, #but the source stays untouched! cleansed_files = {} for i in files_all.keys(): temp = files_all[i] temp = temp.split() if i == 7: # for Antalya (Private Plates) del temp[:29] if i == 26:# for Eskisehir (Numbers in tax administration were causing problems... temp = ["İKİ" if i == str(2) else i for i in temp] #.. for a later algorithm) if i == 45: # for Manisa (Private Plates) del temp[:11] if i == 55: # for Samsun (One of the letters was also missing and added here) del temp[:138] # Also the numbers are removed again from the tax admn temp = ["ON DOKUZ" if i == str(19) else i for i in temp] index = temp.index("7250") temp.insert(index, "T") if i == 73: # for Sırnak (Had a lot of letters missing) j = -1 # But the letters are added in the safest way possible while j >= -len(temp): if temp[j].isnumeric() and temp[j-1].isnumeric(): temp.insert(j, temp[j-2]) j -= 1 del temp[:14] # Removing headers from all lists #Removing tax administration places since they were causing a lot of inconsistency: k = 6 while k < len(temp): while not temp[k].isnumeric(): del temp[k] k += 7 # Creating the plate boundary tuples: temp = [[temp[i+2], temp[i+3], temp[i+4], temp[i+5]] for i in \ range(0, len(temp)-6, 7)] if i == 20: # for Denizli (Private Plates) del temp[25:]#(It was easier to fix these ones after creating the tuples) if i == 28: # for Giresun (Private Plates) del temp[-1:] if i == 59: # for Tekirdag (Private Plates) del temp[-2] if i == 8: # for Artvin temp[44][3] = temp[44][2] temp[44][2] = temp[44][0] #private plates are removed (maximum length of chars will be 3) temp = [j for j in temp if len(j[0]) <= 3 and len(j[2]) <= 3] cleansed_files[i] = temp # Adding regional plates to the dict - list of tuples return cleansed_files
2d4132ca8338f9e9877022af90f55bb972b1a574
33,536
def map_msg_extent(msg): """ Returns the extent of the map in world coordinates :param msg: ((nav_msgs.OccupancyMap|gmapping.doubleMap) A map message. :return: (list) The extents of the map in world coordinates [x0, x1, y0, y1] """ w = msg.info.width h = msg.info.height # Set the plot's extension in world coordinates for meaningful plot ticks delta = msg.info.resolution x0 = msg.info.origin.position.x y0 = msg.info.origin.position.y x1 = x0 + w * delta y1 = y0 + h * delta extent = [x0, x1, y0, y1] return extent
0e597bd77a7c64bea2a689718574944307facf1b
33,537
def set_invenio(ctx, production): """Add Invenio details: api urls, communities, to context object Parameters ---------- ctx: click context obj Api details production: bool If True using production api, if False using sandbox Returns ------- ctx: click context obj Api details with url and community added """ if production: base_url = 'https://oneclimate.dmponline.cloud.edu.au/api' else: base_url = 'https://test.dmponline.cloud.edu.au/api' ctx.obj['url'] = f'{base_url}/records' ctx.obj['deposit'] = f'{base_url}/records' ctx.obj['communities'] = f'{base_url}/communities' return ctx
f34d0c2806f9a8594d4cc07037daa6288686f786
33,538
import requests def get_app_content(app_port, ip): """ Returns the content of the app. """ get_port = requests.get('http://{}:{}'.format(ip, app_port)) return (get_port.content.decode("utf-8").rstrip(), get_port.status_code)
d9194fe64c88f77e8420f70dffba1dba82f55a13
33,539
def map_range( x: float, in_min: float, in_max: float, out_min: float, out_max: float ) -> float: """ Maps a number from one range to another. Somewhat similar to the Arduino ``map()`` function, but returns a floating point result, and constrains the output value to be between ``out_min`` and ``out_max``. If ``in_min`` is greater than ``in_max`` or ``out_min`` is greater than ``out_max``, the corresponding range is reversed, allowing, for example, mapping a range of 0-10 to 50-0. :param float in_min: Start value of input range. :param float in_max: End value of input range. :param float out_min: Start value of output range. :param float out_max: End value of output range. :return: Returns value mapped to new range. :rtype: float """ in_range = in_max - in_min in_delta = x - in_min if in_range != 0: mapped = in_delta / in_range elif in_delta != 0: mapped = in_delta else: mapped = 0.5 mapped *= out_max - out_min mapped += out_min if out_min <= out_max: return max(min(mapped, out_max), out_min) return min(max(mapped, out_max), out_min)
d85affb68b711236fcf455876c5fd6f8f3d9940c
33,540
def flatten_nested_lists(activation_maps): """Flattens a nested list of depth 3 in a row major order. Args: activation_maps: list of list of list of z3.ExprRef with dimensions (channels, activation_map_size, activation_map_size), activation_maps. Returns: list of z3.ExprRef. """ flattened_activation_maps = [] for activation_map in activation_maps: for activation_map_row in activation_map: flattened_activation_maps.extend(activation_map_row) return flattened_activation_maps
7eee52d809dbc659f94623634814ccaacd575183
33,541
def recursive_conditional_map(xr, f, condition): """Walks recursively through iterable data structure ``xr``. Applies ``f`` on objects that satisfy ``condition``.""" return tuple(f(x) if condition(x) else recursive_conditional_map(x, f, condition) for x in xr)
807a9ce5ac42cd10ad7cfbcb42f1912bb7fca1a0
33,543
import queue def get_messages_from_queue(mp_queue, timeout=0.01): """Safely get all messages from a multiprocessing queue. Args: mp_queue (queue): a multiprocess Queue instance timeout (float): seconds to block other processes out from the queue Returns: list: List of messages or an empty list if there weren't any """ msgs = [] # According to the python docs https://docs.python.org/2/library/multiprocessing.html # after putting an object on an empty queue there may be an # infinitesimal delay before the queue's empty() method returns False # # We've actually run into this (SPT-1354) so we'll first kick the # tires with a get() and then see if there are more using empty() try: msgs.append(mp_queue.get(True, timeout)) except queue.Empty: pass else: while not mp_queue.empty(): msgs.append(mp_queue.get_nowait()) return msgs
1d9aa7f404f87206d48c881d4ed50dcde8b8006a
33,544
def image_normalizer(image): """ :param image: Image to be normalized. :type image: Array :return: Normalized image. [0, 1] :rtype: Array """ return image / 255.
316371283d2a976fc2830bcfa3ea2b5ec85240f7
33,545
from sys import maxsize from math import floor, ceil from numpy import digitize def quantizeParameters(*args): """Smartly quantize parameters in user-defined per-parameter ways Parameters ---------- args: 3-sequence like (any, None|sequence|slice, None|string) The first element represents the actual value. The second one indicates the way to quantize the parameter. The third one specifies some options. See Examples for more details. Returns ------- outlist: list Quantized parameters. Raises a ValueError exception if encounters an invalid option. See Also -------- hashParameters: create a hash of a list of quantized parameters Examples -------- >>> print quantizeParameters( ... ('M', None, None) , # -> 'M' (no quantize, no check) ... ('A', ['A','B'], 'exact') , # -> 'A' (no quantize, check value) ... (1.5, [1,3,4], 'floor') , # -> 1 ... (-1, [1,3,4], 'floor') , # -> None ... (1.5, [1,3,4], 'ceil') , # -> 3 ... (5.0, [1,3,4], 'ceil') , # -> None ... (1.5, [1,3,4], 'index') , # -> 0 ... (-1, [1,3,4], 'index') , # -> -1 ... (5.0, [1,3,4], 'index') , # -> 2 ... (365.3, [0,90,180,270], 'wfloor') , # -> 0 (wrapped) ... (365.3, [0,90,180,270], 'wceil') , # -> 90 (wrapped) ... (365.3, [0,90,180,270], 'windex') , # -> 0 (wrapped) ... (1.5, slice(1,4), 'floor') , # -> 1) ... (1.5, slice(1,4), 'ceil') , # -> 2) ... (1.5, slice(1,4), 'index') , # -> 0) ... (365.3, slice(0,360), 'wfloor') , # -> 5 (wrapped) ... (365.3, slice(0,360), 'wceil') , # -> 6 (wrapped) ... (365.3, slice(0,360), 'windex') , # -> 5 (wrapped) ['M', 'A', 1, None, 3, None, 0, -1, 2, 0, 90, 0, 1.0, 2.0, 0.0, 5.0, 6.0, 5.0] """ rit = [] for value,bins,option in args: if bins is None: rit.append(value) elif type(bins) is slice: start = float(0 if bins.start is None else bins.start) stop = float(maxsize if bins.stop is None else bins.stop) step = float(1 if bins.step is None else bins.step) bin_ = (value-start)/step if option[0]=='w': #wrap bin_ %= (stop - start) option = option[1:] if option=='floor': rit.append(floor(bin_)*step + start) elif option=='ceil': rit.append( ceil(bin_)*step + start) elif option=='round': rit.append(floor(bin_+0.5)*step + start) elif option=='index': rit.append(floor(bin_)) else: raise ValueError("Unknown option: %s" % option) else: if option=='exact': if value in bins: rit.append(value) else: raise ValueError("Unadmittable value: %s not in %s" % (value, bins)) else: if option[0]=='w': #wrap value = ((value-bins[0]) % (bins[1]-bins[0]) ) + bins[0] option = option[1:] idx = digitize([value], bins)[0] if option=='floor': rit.append(bins[idx-1] if idx>0 else None) elif option=='ceil': rit.append(bins[idx] if idx<len(bins) else None) elif option=='index': rit.append(idx-1) else: raise ValueError("Unknown option: %s" % option) return rit
f47520a0e9a3f234c45248ddeb058bde7931eab0
33,546
import unicodedata def normalize_text(html): """ 使用 NFKC 对网页源代码进行归一化,把特殊符号转换为普通符号 :param html: :return: """ return unicodedata.normalize('NFKC', html)
5f8fa78854cca83e1138b8b041546744b9d525e9
33,548
def fibonacci_sequence(end_number): """ :param end_number: number under which we want finding terms in the Fibonacci sequence :return: list of terms in the Fibonacci sequence """ new_list = [1, 2] while True: num = new_list[-1] + new_list[-2] if num >= end_number: break else: new_list.append(num) return new_list
7c61e793dd6c2e7ebc583088491a967595ebe5f3
33,550
def filter_sql_query(log): """Extract SQL statements from log""" sqls = [] in_sql = False for q in log: if 'sqlalchemy.engine.base.Engine' in q: # Remove '2017-11-22 15:17:14,810 INFO [sqlalchemy.engine.base.Engine] ' q = q[61:].strip() sqls.append(q) in_sql = True elif in_sql and not q.startswith('2017-'): # Rest of previous SQL query: append to previous sql sqls[-1] = sqls[-1] + ' ' + q.strip() else: in_sql = False return sqls
b0340b73408ee7dce3ebbcb34f75ac225114d611
33,551
def _read_tagmap(data, separator=';', comment='#', joiner=' ', tag_column=0, unicode_column=1): """Read a tag map from file data.""" chr2tag = {} for line in data.decode('utf-8').splitlines(): if line.startswith(comment): continue columns = line.split(separator) if len(columns) > max(tag_column, unicode_column): tag = columns[tag_column] unicode_str = columns[unicode_column] try: char = ''.join(chr(int(_str, 16)) for _str in unicode_str.split(joiner)) except ValueError: pass else: chr2tag[char] = tag return chr2tag
9f4f32a206b4c59a621fa4c4587126d45234d0f5
33,552
def aggregate_dict(metadata_list): """ Aggregate over multiple metadata frequency lists, sum up frequencies over course of multiple days. :param metadata_list: :return: dict-like list of frequencies """ metadata_counter = {} for meta in metadata_list: for key, value in meta: if key not in metadata_counter: metadata_counter[key] = value else: metadata_counter[key] += value return list(metadata_counter.items())
aa5d788f4c779b5b5250e6f67a3db14318cb8600
33,553
def process_file(in_file_name: str) -> bool: """ There are some instances where the file should no be processed at the moment, by not processing these files defined in the no_process variable there may be some small manual changes needed to get the new PCF files to be used""" if in_file_name.find('ExtResource') >= 0: return True return False
0b09fa5108a87b8e8f04055f4a6d37790b16a872
33,555
def caesar_cipher(message, key): """ 凯特加密法 :param message: 待加密数据 :param key: 加密向量 :return: 被加密的字符串 """ LEFTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' translated = '' message = message.upper() for symbol in message: if symbol in LEFTERS: num = LEFTERS.find(symbol) num = num + key if num >= len(LEFTERS): num = num - len(LEFTERS) elif num < 0: num = num + len(LEFTERS) translated = translated + LEFTERS[num] else: translated = translated + symbol return translated
c3ee79ef5198058107cdc6dd2ed60d3099f5d11a
33,556
from datetime import datetime def parse_iso(timestamp: str) -> datetime: """A function to convert the ISO 8601 timestamp to :class:`datetime`. Parameters ---------- timestamp: str The ISO 8601 timestamp to be converted. Returns ------- datetime The converted :class:`datetime` timestamp. """ return datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%f%z')
54a313a1100d67b3411452aa60f79826bce8ab66
33,558
def mplayer_cmd( song, volume=100 ): """ defines a command string to launch mplayer """ return "mplayer %s -volume %.3f"%(song, volume)
e33e2439d43d7becf30a0e355fe238687635fc53
33,560
def parse_fasta_header(header): """ Parses a FASTA format header (with our without the initial '>') and returns a tuple of sequence id and sequence name/description. If NCBI SeqID format (gi|gi-number|gb|accession etc, is detected the first id in the list is used as the canonical id (see see http://www.ncbi.nlm.nih.gov/books/NBK21097/#A631 ). """ if header[0] == '>': header = header[1:] tokens = header.split('|') # check to see if we have an NCBI-style header if header.find("|") != -1 and len(tokens[0]) <= 3: # "gi|ginumber|gb|accession bla bla" becomes "gi|ginumber" seqid = "%s|%s" % (tokens[0], tokens[1].split()[0]) name = tokens[-1:][0].strip() # otherwise just split on spaces & hope for the best else: tokens = header.split() seqid = tokens[0] name = header[0:-1].strip() return seqid, name
ee51e9aab6ae75d7d0a59ff3e2adc6211e2cf897
33,562
def dsu_sort2(list, index, reverse=False): """(dsu_sort2): This function sorts only based on the primary element, not on secondary elements in case of equality. """ for i, e in enumerate(list): list[i] = e[index] if reverse: list.sort(reverse=True) else: list.sort() for i, e in enumerate(list): list[i] = e[1] return list
c23e7a16afff451c82573e65624870b50666434f
33,564
import sys import binascii def get_macaroon(filepath=None): """Read and decode macaroon from file The macaroon is decoded into a hex string and returned. """ if filepath is None: print("Must specify macaroon_filepath") sys.exit(1) with open(filepath, 'rb') as f: macaroon_bytes = f.read() return binascii.hexlify(macaroon_bytes).decode()
b6a2f16f7dd68751ba33708a71f0b060c7d0cdec
33,565
import torch def pack(inputs, is_tensor=False): """Pack the inputs into tuple if they were a single tensor""" single = torch.is_tensor(inputs) outputs = (inputs, ) if single else inputs return (outputs, single) if is_tensor else outputs
14c1f7c16e0871d1fa7ec265e4db062cdc62b82d
33,570
def insertion_sort(list_obj): """Insert a list to be ordered.""" for i in range(1, len(list_obj)): current = list_obj[i] while i > 0 and list_obj[i - 1] > current: list_obj[i] = list_obj[i - 1] i = i - 1 list_obj[i] = current return list_obj
ddea76cf7cfed171da4efe53ba4c2886d773b657
33,571
def convert_signature_id(sigid): """Standardize the signature ID to XXX-XXX if info is available.""" escaped_sigid = sigid.replace(' ', '').replace('-', '').upper() if len(escaped_sigid) == 6: return "%s-%s" % (escaped_sigid[:3], escaped_sigid[3:]) else: return sigid.upper()
63f0af55415d58a7db4791f716e35a6dbacc8e89
33,572
import subprocess def simple_command(cmd, timeout=5): """ :param cmd: :return: 简单执行命令行,并且传回 """ if isinstance(cmd, list): cmd = " ".join(cmd) out_bytes = subprocess.check_output(cmd, shell=True, timeout=timeout, stderr=subprocess.STDOUT) if out_bytes: out_text = out_bytes.decode('utf-8') return out_text
6cacec479e45c29e1139483d72084516ede8fce0
33,573
import numpy def calc_d(theta_s_i, theta_v_i, relative_azimuth): """Calculate d. Args: theta_s_i (numpy array): theta_s_i. theta_v_i (numpy array): theta_v_i. relative_azimuth (numpy array): relative_azimuth. Returns: d : numpy.array. """ return numpy.sqrt( numpy.tan(theta_s_i)*numpy.tan(theta_s_i) + numpy.tan(theta_v_i)*numpy.tan(theta_v_i) - 2*numpy.tan(theta_s_i)*numpy.tan(theta_v_i)*numpy.cos(relative_azimuth))
d4966820a6df6b37bf4daa8dcf6dcd6bdff7cb78
33,574
def get_srid(crs): """Returns the SRID for the provided CRS definition The CRS can be defined in the following formats - urn:ogc:def:crs:EPSG::4326 - EPSG:4326 - 4326 """ if ':' in crs: crs = crs.split(':') srid = crs[len(crs)-1] else: srid = crs return int(srid)
7780ed484ddb7d653b99198f15e1d8178e60aced
33,575
def _progress_bar_update(pbar): # noqa: D413 """Update progress bar manually. Helper method for S3 Transfer, which needs a callback to update the progressbar. Args: pbar: progressbar.ProgressBar instance Returns: a function that in turn accepts chunk that is used to update the progressbar. """ # noqa: D202 def _update_pbar(chunk_uploaded_in_bytes): pbar.update(pbar.value + chunk_uploaded_in_bytes) return _update_pbar
653d08d880541dabb0105abf27337429deb31aca
33,578
import numpy def _histogram_zr_norm(radius): """Return number of particles at radius for uniform distribution.""" yx = numpy.mgrid[:radius, :radius] r = numpy.hypot(yx[0], yx[1]) r += 0.5 r = r.astype(numpy.int32) norm = numpy.bincount(r.ravel()) norm = norm[:radius] norm[1:] *= 4 return norm
2310b9074bbecb512c3753115544f5ab41da2b49
33,579
def find_matching_paren(string, index, lparen='(', rparen=')'): """Find the closing paren corresponding to the open paren at <index> in <string>. Optionally, can provide other characters to match on. If found, returns the index of the matching parenthesis. If not found, returns -1. """ if not string[index] == lparen: raise ValueError("Character at index %d is '%s'. Expected '%s'" % (index, string[index], lparen)) index += 1 count = 1 while index < len(string) and count > 0: while index < len(string) and string[index] not in (lparen, rparen): index += 1 if string[index] == lparen: count += 1 elif string[index] == rparen: count -= 1 if count == 0: return index else: return -1
3216a020403eb33f557f3f032f0986e078384421
33,580