content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def is_holidays(holidays_dt, day_dt): """ :param holidays_dt: List of holidays with format datetime :param day_dt: Day with format datetime to analyze :return: True if day is a holiday, False if not """ holidays_dt = [i.date() for i in holidays_dt] return day_dt.date() in holidays_dt
5a8db2929fdde4402bef87f355eb991cd2825e17
37,423
import subprocess import warnings def mount_nextcloud(frompath, topath): """ Mount a NextCloud folder in your local machine or viceversa. """ command = (['rclone', 'copy', frompath, topath]) result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output, error = result.communicate() if error: warnings.warn("Error while mounting NextCloud: {}".format(error)) return output, error
bf30bb4b62860684ca93e3b62cdac5f110816139
37,424
import torch def box_prediction(): """Example bounding box prediction.""" return torch.rand(1, 3, 4)
bf3dd357d789ef81ad9ebee110ca46aca3bcc923
37,425
def getColumnTitle(min='-inf', max='+inf'): """ Human readable column titles """ if str(min) == '-inf' and str(max) == '+inf': return 'Total' elif str(min) == '-inf': return 'Up to ' + '{0:,}'.format(max) elif str(max) == '+inf': return 'From ' + '{0:,}'.format(min) elif min == max: return '{0:,}'.format(min) else: return '{0:,}'.format(min) + ' to ' + '{0:,}'.format(max)
3d11e2224de186d6f31eca815e3a5f34b546cf33
37,428
def parse_schema(spec, schema): """ Parse an AIRR schema object for doc tables Arguments: spec (str): name of the schema object schema (dict): master schema dictionary parsed from the yaml file. Returns: list: list of dictionaries with parsed rows of the spec table. """ data_type_map = {'string': 'free text', 'integer': 'positive integer', 'number': 'positive number', 'boolean': 'true | false'} # Get schema properties = schema[spec]['properties'] required = schema[spec].get('required', None) # Iterate over properties table_rows = [] for prop, attr in properties.items(): # Standard attributes required_field = False if required is None or prop not in required else True title = attr.get('title', '') example = attr.get('example', '') description = attr.get('description', '') # Data type data_type = attr.get('type', '') data_format = data_type_map.get(data_type, '') # Arrays if data_type == 'array': if attr['items'].get('$ref') is not None: sn = attr['items'].get('$ref').split('/')[-1] data_type = 'array of :ref:`%s <%sFields>`' % (sn, sn) elif attr['items'].get('type') is not None: data_type = 'array of %s' % attr['items']['type'] elif attr.get('$ref') == '#/Ontology': data_type = ':ref:`Ontology <OntoVoc>`' elif attr.get('$ref') is not None: sn = attr.get('$ref').split('/')[-1] data_type = ':ref:`%s <%sFields>`' % (sn, sn) # x-airr attributes if 'x-airr' in attr: xairr = attr['x-airr'] nullable = xairr.get('nullable', True) deprecated = xairr.get('deprecated', False) identifier = xairr.get('identifier', False) # MiAIRR attributes miairr_level = xairr.get('miairr', '') miairr_set = xairr.get('set', '') miairr_subset = xairr.get('subset', '') # Set data format for ontologies and controlled vocabularies if 'format' in xairr: if xairr['format'] == 'ontology' and 'ontology' in xairr: base_dic = xairr['ontology'] ontology_format = (str(base_dic['top_node']['id']), str(base_dic['top_node']['label']) ) # Replace name with url-linked name data_format = 'Ontology: { top_node: { id: %s, value: %s}}' % (ontology_format) # Get 'type' for ontology example = 'id: %s, value: %s' % (example['id'], example['label']) elif xairr['format'] == 'controlled vocabulary': if attr.get('enum', None) is not None: data_format = 'Controlled vocabulary: %s' % ', '.join(attr['enum']) elif attr.get('items', None) is not None: data_format = 'Controlled vocabulary: %s' % ', '.join(attr['items']['enum']) else: nullable = True deprecated = False identifier = False miairr_level = '' miairr_set = '' miairr_subset = '' if deprecated: field_attributes = 'DEPRECATED' else: f = ['required' if required_field else 'optional', 'identifier' if identifier else '', 'nullable' if nullable else ''] field_attributes = ', '.join(filter(lambda x: x != '', f)) # Return dictionary r = {'Name': prop, 'Set': miairr_set, 'Subset': miairr_subset, 'Designation': title, 'Field': prop, 'Type': data_type, 'Format': data_format, 'Definition': description, 'Example': example, 'Level': miairr_level, 'Required': required_field, 'Deprecated': deprecated, 'Nullable': nullable, 'Identifier': identifier, 'Attributes': field_attributes} table_rows.append(r) return(table_rows)
384118e7f05d88d74dfdbac572da765d4a5251fc
37,431
def data_prepocessing( dataset ): """ This function conducts data preprocessing. The unique values of start stations, end stations and bike numbers are found and returned. Also, the entire dataset, in the form of a dictionary named 'dataframe' is also returned. The keys of dataframe involve the classes and the values involve the respective features of those points. We also prune the dataset by removing the unnecessary columns like the names of the start stations and the names of the end stations. Handling Start Date and End Date: The exact date they started and ended the bike sharing service does not matter, but the duration they used it does matter, so we will extract only the duration, and add it to new_dataset. """ start_station = [ ] end_station = [ ] bike_numbers = [ ] new_dataset = [ ] classes = [ ] for row in dataset: if row[ 7 ][0] == "W": row[ 7 ] = row[ 7 ][ 1: ] if row[ 3 ] not in start_station: start_station.append( row[ 3 ] ) if row[ 5 ] not in end_station: end_station.append( row[ 5 ] ) if row[ 7 ] not in bike_numbers: bike_numbers.append( row[ 7 ] ) new_dataset.append( [ row[0], row[3], row[5], row[7] ] ) if row[ 8 ] == "Member": classes.append( 1 ) else: classes.append( 2 ) return new_dataset, classes, bike_numbers, start_station, end_station
025928b18c500fe3b4a26ff0d7834dc8b5edd1c2
37,432
import re def clause_count(doc, infinitive_map): """Return clause count (heuristic). This function is decorated by the :func:`TRUNAJOD:surface_proxies.fix_parse_tree` function, in order to heuristically count clauses. :param doc: Text to be processed. :type doc: Spacy Doc :param infinitve_map: Lexicon containing maps from conjugate to infinitive. :type infinitive_map: dict :return: Clause count :rtype: int """ n_clauses = 0 regexp = re.compile("VerbForm=Fin") regexp_perif = re.compile("Perif") for token in doc: verb_or_aux = token.pos_ in {"VERB", "AUX"} if verb_or_aux and not regexp_perif.search(token.tag_): if regexp.search(token.tag_): n_clauses += 1 return n_clauses
98edc098f625cc8f8efb8f892df14f14c03c32a2
37,433
def humanize_key(key): """Returns a human-readable key as a series of hex characters.""" return ':'.join(["%02x" % ord(c) for c in key.get_fingerprint()])
caa98d4fe392627cc153dcc0dd9f29ca42548efe
37,434
import math def f(x: float) -> float: """ :param x: an input. :return: the value of the expression f(x) """ return math.sqrt(x+3) - x + 1
72027f74af4f9d2d5d6ef1e8443d2bb90ce82b2b
37,435
def asId(v, default=0): """The *asId* method transforms the *value* attribute either to an instance of @ int@ or to @None@, so it can be used as *id* field in a @Record@ instance. If the value cannot be converted, then the optional *default* (default value is @0 @) is answered. >>> asId(123) == 123 True >>> asId('abcd', 'ABCD') 'ABCD' """ try: v = int(v) if v <= 0: return default return v except (ValueError, TypeError): return default
a0584a98d09b8097305636f192d99c06d2c86444
37,437
import string def encode(number, base): """Encode given number in base 10 to digits in given base. number: int -- integer representation of number (in base 10) base: int -- base to convert to return: str -- string representation of number (in given base) """ # Handle up to base 36 [0-9a-z] assert 2 <= base <= 36, 'base is out of range: {}'.format(base) # Handle unsigned numbers only for now assert number >= 0, 'number is negative: {}'.format(number) printable = string.digits + string.ascii_lowercase """ While the number can be divided by the base: Divide the number by the base Put the remainders in a list If that number can be divided by the base, repeat this loop The number of iterations of the loop is the number of digits - 1 For each digit, enter the remainder of the digit before it """ # remainders = get_remainders(number, base) # result = "" # for rem in reversed(remainders): # result += printable[rem] # remainders.clear() # return result num, survivor = divmod(number, base) survivors = [] survivors.append(survivor) while num > 0: num, survivor = divmod(num, base) survivors.append(survivor) casual_ties = "" for survivor in reversed(survivors): casual_ties += printable[survivor] return casual_ties
3896a59522d1839decdee6c6765f29d8f58ae2f5
37,438
def koma_sepp(n): """ Take input integer n and return comma-separated string, separating 1000s. >>> koma_sepp(131032047) '131,032,047' >>> koma_sepp(18781) '18,781' >>> koma_sepp(666) '666' """ return '{:,}'.format(n)
e72ebc81bf116720f17a005af7bff2b40569a822
37,440
def soundspeed(temperature=27, salinity=35, depth=10): """Get the speed of sound in water. Uses Mackenzie (1981) to compute sound speed in water. :param temperature: temperature in deg C :param salinity: salinity in ppt :param depth: depth in m :returns: sound speed in m/s >>> import arlpy >>> arlpy.uwa.soundspeed() 1539.1 >>> arlpy.uwa.soundspeed(temperature=25, depth=20) 1534.6 """ c = 1448.96 + 4.591*temperature - 5.304e-2*temperature**2 + 2.374e-4*temperature**3 c += 1.340*(salinity-35) + 1.630e-2*depth + 1.675e-7*depth**2 c += -1.025e-2*temperature*(salinity-35) - 7.139e-13*temperature*depth**3 return c
782bb031d879d8f7ffb9e412de716e78710abe4f
37,441
def cal_accuracy(label_list, classify_res): """ calculate the accuracy""" assert(len(label_list) == len(classify_res)) right_count = 0 for i in range(len(label_list)): if (label_list[i] == classify_res[i]): right_count += 1 return right_count / float(len(label_list))
6751fcd9eff775412269e7675211d43727c84b1d
37,442
import argparse def args_parse(): """ Create the argument parser. """ parser = argparse.ArgumentParser(description='Linux TC unit tests') return parser
b8ec6a0f440e9ada5d86a9aaf067a5b351538a0b
37,443
import traceback def Last_Error(): """ Return details of the last error produced, perfect for try/except statements CODE: Last_Error() EXAMPLE CODE: try: xbmc.log(this_should_error) except: koding.Text_Box('ERROR MESSAGE',Last_Error()) ~""" error = traceback.format_exc() return error
328bfdf024f8ed433f71d4a331714e7af6bb33b4
37,444
def stereo_to_mono(signal): """ This function converts the input signal (stored in a numpy array) to MONO (if it is STEREO) """ if signal.ndim == 2: if signal.shape[1] == 1: signal = signal.flatten() else: if signal.shape[1] == 2: signal = (signal[:, 1] / 2) + (signal[:, 0] / 2) return signal
18023ba5d54c680778e01907dad2853e3d104650
37,446
def build_shortcut(key, mods): """return text for key combo """ mod2str = (('C', 'Ctrl+'), ('S', 'Shift+'), ('A', 'Alt+'), ('W', 'WinKey+')) result = '' for x, y in mod2str: if x in mods: result += y key = key.capitalize() return result + key
0b04e93608cc2230a4ecbc9c8745dd722cf131a3
37,447
import requests def read_data(url_page, parser): """ Call requests.get for a url and return the extracted data. Parameters: url_page -- url of the Billboard data parser -- Instantiated parser (either ParseWeek() or ParseYear(). """ req = requests.get(url_page) parser.feed(req.text) return parser.result
d9d8bedc0a18b64a197e007cf0fdc63d5a1e63cb
37,448
from typing import Tuple import re def parse_source_type_name(field_type_name: str) -> Tuple[str, str]: """ Split full source type name into package and type name. E.g. 'root.package.Message' -> ('root.package', 'Message') 'root.Message.SomeEnum' -> ('root', 'Message.SomeEnum') """ package_match = re.match(r"^\.?([^A-Z]+)\.(.+)", field_type_name) if package_match: package = package_match.group(1) name = package_match.group(2) else: package = "" name = field_type_name.lstrip(".") return package, name
45a48a2ad53b3b98618d8a82a3bf2cb75cdab250
37,452
def solution(exponent: int = 30) -> int: """ For any given exponent x >= 0, 1 <= n <= 2^x. This function returns how many Nim games are lost given that each Nim game has three heaps of the form (n, 2*n, 3*n). >>> solution(0) 1 >>> solution(2) 3 >>> solution(10) 144 """ # To find how many total games were lost for a given exponent x, # we need to find the Fibonacci number F(x+2). fibonacci_index = exponent + 2 phi = (1 + 5 ** 0.5) / 2 fibonacci = (phi ** fibonacci_index - (phi - 1) ** fibonacci_index) / 5 ** 0.5 return int(fibonacci)
e61be3f0ed92954c6b7a0d25baffa9d6d40107a1
37,453
def _get_publisher_signal_info(func): """ Return signal info for publisher in format accepted by `WithSignalDisabled`. """ return { 'dispatch_uid': func._signal_dispatch_uid, 'sender': func._signal_model, 'signal': func._signal_type, 'receiver': func, }
8fd8f0961e9d77fd5676dd7bcbef352279021be3
37,454
def split_curves(curves, param): """Split curves into lists sorted by the given parameter Parameters ---------- curves : array_like of Curve all curves param : str key of parameter to sort after Returns ------- split_curves : list of list of Curve """ out = {} for curve in curves: val = curve.params[param] if val not in out.keys(): out.update({val: [curve]}) else: out[val].append(curve) return list(out.values())
3b5e3f4fca8720ef5c50a24732d39f5f41dc4690
37,456
def splitnest(d, sep = '.'): """ Takes dict where the keys are like 'one.two': d = {'one.two': 1, 'one.three': 2, 'two.one': 3} Splits by sep (default '.') and nests. Returns: {'one': {'two': 1, 'three': 2}, 'two': {'one': 3}} Non-recursive. Helps munchify so you can do: m.one.two """ def nest(i, e, v): if len(i)<2:e[i[0]]=v else:nest(i[1:], e.setdefault(i[0], {}), v) e = dict() for i, v in d.items(): nest(i.split(sep), e, v) return e
c6815e77a1713736f95edbea60c46b1941709594
37,458
import random def randomPath(term=.25): """Generates a random (but valid) path name for a property given a chance to terminate after any segment""" validNameChars = ([chr(c) for c in range(ord('a'), ord('z')+1)] + [chr(c) for c in range(ord('A'), ord('A')+1)] + [chr(c) for c in range(ord('0'), ord('9')+1)] + ['.', '/']) meaningfulBits = ['frames', 'vars', '.f32', '.f64', '.u8', '.u64', '.i32', '.i64', '.ind', '.uni'] randomBits = [''.join(random.sample(validNameChars, random.randint(1, 10))) for _ in range(20)] allBits = meaningfulBits + randomBits result = '' while random.random() > term: seg = ''.join(random.sample(allBits, random.randint(1, 5))) result += seg + '/' # get rid of trailing slash result = result[:-1] return (result if result else randomPath(term=term))
a34f7f1a88509ae8cdf4a0717b94fdf6e7c5f3f2
37,461
from typing import Tuple import math def get_roll_and_shift( input_offset: Tuple[float, ...], target_offset: Tuple[float, ...] ) -> Tuple[Tuple[int, ...], Tuple[float, ...]]: """Decomposes delta as integer `roll` and positive fractional `shift`.""" delta = [t - i for t, i in zip(target_offset, input_offset)] roll = tuple(-math.floor(d) for d in delta) shift = tuple(d + r for d, r in zip(delta, roll)) return roll, shift
b2233aa0f8f22072239b46bc8eb0ef730ff53334
37,463
def normalise_toolshed_url(tool_shed): """ Return complete URL for a tool shed Arguments: tool_shed (str): partial or full URL for a toolshed server Returns: str: full URL for toolshed, including leading protocol. """ if tool_shed.startswith('http://') or \ tool_shed.startswith('https://'): return tool_shed return "https://%s" % tool_shed
4837f69122dc841549a4fc1923300be4eb04057c
37,464
def cleanup_page(s): """Clean up the article page string. cleanup_pages(str) -> str """ s = s.replace('--', '-') return s
00e4f4e1bec424c574f557c99fbeff02ec029840
37,466
from typing import OrderedDict def parse_MTL(filename): """ Return an ordered dict from a Landsat "MTL" metadata file Args: filename (str): MTL filename Returns: OrderedDict: dict of MTL file """ data = OrderedDict() with open(filename, 'rt') as fid: for line in fid: split = line.split(' = ') if len(split) == 2: data[split[0].strip().strip('"')] = split[1].strip().strip('"') return data
b06aa302475a0a4a5ee756db2d7f7aa02a44efd6
37,468
from pathlib import Path import pickle def cache_pickle(cache_path, func, args=[], kwargs={}): """ 関数の結果をpickleで保存し、そのファイルがあればそれを読み込む。 Args: cache_path: 出力パス func: 実行関数 args(list): 関数の引数 kwargs(dict): 関数のキーワード引数 """ # 読み込み if cache_path is not None and Path(cache_path).exists(): return pickle.load(open(cache_path, "rb")) # 作成 result = func(*args, **kwargs) # 保存 pickle.dump(result, open(cache_path, "wb")) return result
ca85fda5ccccc445fe5d466c793dd17844ebe413
37,469
import subprocess def get_git_branch(): """Return the current git branch as a string""" return subprocess.check_output(["git", "symbolic-ref", "--short", "HEAD"])\ .decode('utf8').strip()
e2dc01c591db37a89a7ad217c1d2304b6c65ce16
37,470
def get_list_dimensions(_list): """ Takes a nested list and returns the size of each dimension followed by the element type in the list """ if isinstance(_list, list) or isinstance(_list, tuple): return [len(_list)] + get_list_dimensions(_list[0]) return []
e36b3e375be454f819b2a75875d68e29d6a1d30e
37,471
def out_box(i, j, mesh): """ Judge whether current pixel lies inside the bounding box formed by the vertices of current mesh :param i: idx of pixel :param j: idx of pixel :param mesh: current mesh ( [ [x0, y0, z0], [x1, y1, z1], [x2, y2, z2] ] ) :return: bool: in box: true, else false """ return (i > max(max(mesh[0, 0], mesh[1, 0]), mesh[2, 0]) or i < min(min(mesh[0, 0], mesh[1, 0]), mesh[2, 0]) or j > max(max(mesh[0, 1], mesh[1, 1]), mesh[2, 1]) or j < min(min(mesh[0, 1], mesh[1, 1]), mesh[2, 1]))
9a749a88632fc9b2abcebd42145bda17cd2a4161
37,473
def score_with_model(model, test_spo, direction="o"): """ :param model: kge.model.KgeModel :param test_spo: torch.Tensor of test triples :param direction: 's' for heads or 'o' for tails :return scores: embedding scores for predicted triples """ s, p, o = [test_spo[:, i] for i in range(3)] if direction == "o": # score tails return model.score_sp(s, p) return model.score_po(p, o)
d918f6fa766b2cf222b3be2012c0df4936003d42
37,474
def _tensor_name(tensor): """Get a name of a tensor without trailing ":0" when relevant.""" # tensor.name is unicode in Python 3 and bytes in Python 2 so convert to # bytes here. name = str(tensor.name) return name[:-2] if name.endswith(':0') else name
57b05fd2aaa7f65e49f9eb27538e9d1dfbe1d5c0
37,475
def big_o_nn(n_base, m=1, o=1, i=1, nodes=(100, 8), t=1, method='scikit', inv=False): """ Calculates the expected computation effort compared to n_time :param n_base: Calculation time for baseline n :param algo: algorithm to calculate computation effort for :param m: features :param o: output neurons :param i: iterations :param nodes: list of node sizes (ie. [a, b, c, d] for a 4 layer network) :param t: training examples :param method: method for complexity calculation :return: Calculation time extrapolated to parameters that will be used """ nodecomplexity = 0 for q in range(len(nodes) - 1): nodecomplexity += nodes[q] * nodes[q + 1] if method == 'stack': # https://ai.stackexchange.com/questions/5728/what-is-the-time-complexity-for-training-a-neural-network-using-back-propagation if inv: return n_base / (t * nodecomplexity) else: return n_base * t * nodecomplexity elif method == 'scikit': # https://scikit-learn.org/stable/modules/neural_networks_supervised.html if inv: return n_base / (t * m * nodecomplexity * o * i) else: return n_base * t * nodecomplexity * o * i
21a85af0ecd8c0de765efdffbbe2aa4eb2572fae
37,476
def listdict_to_listlist_and_matrix(sparse): """Transforms the adjacency list representation of a graph of type listdict into the listlist + weight matrix representation :param sparse: graph in listdict representation :returns: couple with listlist representation, and weight matrix :complexity: linear """ V = range(len(sparse)) graph = [[] for _ in V] weight = [[None for v in V] for u in V] for u in V: for v in sparse[u]: graph[u].append(v) weight[u][v] = sparse[u][v] return graph, weight
fb4b113317f78320add25940adc2d2f04797e118
37,477
def filter_tags(tags, prefixes=None): """Filter list of relation tags matching specified prefixes.""" if prefixes is not None: # filter by specified relation tag prefixes tags = tuple( t for t in tags if any(( t.startswith(p) for p in prefixes )) ) return tags
4378831c0f6ebf290c9a6d0e33e7be1f57acb26d
37,479
def get_dict_value_insensitive(d: dict, k: str): """ Returns a value matching to a case insensitive key of a dict Args: d (dict): The dict k (str): The key Returns: val: The matching value """ return {key.lower(): val for key, val in d.items()}.get(k.lower(), None)
3c730c58fad48faa1bc6421f110b2174ba7d088c
37,480
import platform import os import subprocess def run_hook_command(cmd, param): """Executes a git hook command cmd = the command line file to be executed. This can be a file that is run by OS association. param = a list of parameters to pass to the cmd command On windows, the extension is checked to see if it should be run with the Git for Windows Bash shell. If there is no file extension, the file is deemed a bash shell and will be handed off to sh.exe. Otherwise, Windows will be called with the shell to handle the file assocation. For non Windows operating systems, the file is called as an executable. """ cli = [cmd] + param use_shell = False if platform.system() == 'Windows': (root,ext) = os.path.splitext(cmd) if ext == "": exe_path = os.environ.get("EXEPATH") if exe_path is None: exe_path = "" else: exe_path = os.path.join(exe_path, "bin") cli = [os.path.join(exe_path, "SH.EXE")] + cli else: use_shell = True return subprocess.call(cli, shell=use_shell)
aaaa7fe021eaedba46d987c66dae74dba4be4125
37,482
def is_cwl_record(d): """Check if an input is a CWL record, from any level of nesting. """ if isinstance(d, dict): if d.get("type") == "record": return d else: recs = list(filter(lambda x: x is not None, [is_cwl_record(v) for v in d.values()])) return recs[0] if recs else None else: return None
dbfa01c8d24d78e6da4fac0a4f912e3b11c56023
37,483
import os def pyinstaller_path(relative_path): """Modify the path so that assets can be found in PyInstaller's onefile. When using the --onefile flag, PyInstaller will by default extract necessary files into a temporary folder named '_MEIPASS2'. In order for the executable to access them, file paths must be modified to include this folder name. Executables using --onedir are not affected as the files are where they are expected to be in the original or installation folder Modified from source: https://stackoverflow.com/questions/7674790 """ # PyInstaller creates a temp folder and stores path in _MEIPASS base_path = os.environ.get("_MEIPASS2", os.path.abspath(".")) return os.path.join(base_path, relative_path)
a07993bbdc202dd5ad2a501facd0ff864ff20a42
37,484
def _put_categorical_feature_first(features, first_feature_categorical): """If the user is doing a two-way partial dependence plot and one of the features is categorical, we need to make sure the categorical feature is the first element in the tuple that's passed to sklearn. This is because in the two-way grid calculation, sklearn will try to coerce every element of the grid to the type of the first feature in the tuple. If we put the categorical feature first, the grid will be of type 'object' which can accommodate both categorical and numeric data. If we put the numeric feature first, the grid will be of type float64 and we can't coerce categoricals to float64 dtype. """ new_features = features if first_feature_categorical else (features[1], features[0]) return new_features
8e95c34486ae5755db9d4a1e051e8cee003b04f0
37,487
def get_severity(data): """Convert level value to severity """ if 'warning' == data: return 'Medium' elif 'error' == data: return 'Critical' else: return 'Info'
f8b16def6a3c583114ae66d269d00417d91d20d6
37,488
def unit_interval(x, xmin, xmax, scale_factor=1.): """ Rescale tensor values to lie on the unit interval. If values go beyond the stated xmin/xmax, they are rescaled in the same way, but will be outside the unit interval. Parameters ---------- x : Tensor Input tensor, of any shape. xmin, xmax : float Minimum and maximum values, which will be rescaled to the boundaries of the unit interval: [xmin, xmax] -> [0, 1]. scale_factor : float, optional Scale the unit interval by some arbitrary factor, so that the output tensor values lie in the interval [0, scale_factor]. Returns ------- y : Tensor Rescaled version of x. """ return scale_factor * (x - xmin) / (xmax - xmin)
4083e1904eefeec606e8a22e53485f7007257e71
37,489
def get_index(result_tuple): """Takes a tuple like (2.5, 0, 2.5, 0, 0) and makes it into (1,0,1,0,0), then converts the base two number 10100 into base ten -- 20.""" new = [] for x in result_tuple: if x == 0: new.append(0) else: new.append(1) new.reverse() coeff = 1 total = 0 for x in new: total += x * coeff coeff *= 2 return total
406b848df05e151e4d0738347f60142eeb7181c1
37,491
def decoder_inputs_and_outputs(target_words, base_vocab): """Convert a sequence of tokens into a decoder input seq and output seq. Args: target_words (list[unicode]) base_vocab (Vocab) Returns: input_words (list[unicode]) output_words (list[unicode]) """ # prepend with <start> token input_words = [base_vocab.START] + target_words # append with <stop> token output_words = target_words + [base_vocab.STOP] return input_words, output_words
ec66ec0fa7161d65ce6dea6688aa32e44f1b6377
37,492
def get_projection(projections, spec): """ Get the projection for a spec from a projections dict. """ all_projection = None for spec_like, projection in projections.items(): if spec.satisfies(spec_like, strict=True): return projection elif spec_like == 'all': all_projection = projection return all_projection
3163748e80581565a51620a9f15a14136b587e0d
37,494
def MatchAlignedDGP(dgp, idxmap_aligne2seq, posindexmap, aligned_toposeq):#{{{ """ match dgp (a list of tuples) to the aligned toposeq posindexmap is the index map from the shrinked seq to the original seq idxmap_aligne2seq is a dictionary of index map from the original (non-shrinked) MSA to the gapless seq """ aligned_dgp = [] lenAlignedSeq = len(aligned_toposeq) resMap = {} inew = 0 if len(posindexmap) == 0: isShrink = False else: isShrink = True # convert dgp in to dictionary dgp_dt = {} for (idx, dg) in dgp: dgp_dt[idx] = dg for j in range(lenAlignedSeq): if aligned_toposeq[j] != '-': if isShrink: j_origseq = idxmap_aligne2seq[posindexmap[j]] else: j_origseq = idxmap_aligne2seq[j] try: dg = dgp_dt[j_origseq] aligned_dgp.append((j, dg)) except KeyError: pass return aligned_dgp
96892329dbbf09b08a9038e644eaddb69842b25b
37,496
import os def GetTestFilePath(path_segments): """Retrieves the path of a test file in the test data directory. Args: path_segments (list[str]): path segments inside the test data directory. Returns: str: path of the test file. """ # Note that we need to pass the individual path segments to os.path.join # and not a list. return os.path.join(os.getcwd(), 'test_data', *path_segments)
3f636af5da526aa0fa98cc0f8b43cd460c19a622
37,498
import logging def _get_logger_class(hass_overrides): """Create a logger subclass. logging.setLoggerClass checks if it is a subclass of Logger and so we cannot use partial to inject hass_overrides. """ class HassLogger(logging.Logger): """Home Assistant aware logger class.""" def setLevel(self, level) -> None: """Set the log level unless overridden.""" if self.name in hass_overrides: return super().setLevel(level) # pylint: disable=invalid-name def orig_setLevel(self, level) -> None: """Set the log level.""" super().setLevel(level) return HassLogger
a0a62edb01a062010aa4d991f95c88a7e080685e
37,499
def join_paths(*args) -> str: """Collect given paths and return summary joined absolute path. Also calculate logic for `./` starting path, consider it as "from me" relative point. """ summary_path = "" for path in args: # Check if path starts from slash, to remove it to avoid errors if path[0] == "/": path = path[1:] # Check if path given in logical form (starts from "./") elif path[:2] == "./": # Remove leading "./" to perform proper paths joining path = path[2:] # Check if path ends with slash, to remove it to avoid errors if path[len(path)-1] == "/": path = path[:len(path)-1] summary_path += "/" + path return summary_path
12f79f94954202c643f6fb14ae58eb855d5e96c7
37,500
def m2mm(value): """@value in meters for millimeter""" return value*1000
ca0f334c94200c7de070d99f350f0c54a3a836e4
37,501
def get_scatter_data(tsa, value_keynames, stat_func): """ get data structure to use for highgraph scatter plots, [ { name : str(<key>), data : [stat_func(tsa[key][value_keys[0]]), stat_func(tsa[key][value_keys[1]], ] }, ... ] parameters: tsa <TimeseriesArray> value_keys <tuple> with len 2, represents x- and y-axis stat_fuc <str> statistical function to use to aggregate xcolumn and ycolumns must exist in Timeseries object returns: <list> of <dict> data structure to use directly in highgraph scatter plots, when json encoded """ assert len(value_keynames) == 2 data = [] for key in tsa.keys(): stats = tsa[key].get_stat(stat_func) data.append([key, stats[value_keynames[0]], stats[value_keynames[1]]]) return data
f3f6425d95c325ba5384d4d2722b3d2b030938e6
37,502
def get_gradebook_mdata(): """Return default mdata map for Gradebook""" return { }
e99b7a65ed46d2cf5e796cf0188e11f4300d5918
37,503
def group_by(df, group_by_col, value_col): """take a dataframe and group it by a single column and return the sum of another""" return df.groupby([group_by_col])[value_col].sum()
e82943568b8d107c0045af45a4923a442bf07486
37,505
def t(_data, copy=False): """Get the transposed dataframe Args: _data: The dataframe copy: When copy the data in memory Returns: The transposed dataframe. """ return _data.transpose(copy=copy)
e6acf4ccba87f10ce346e3618900807c748084e5
37,508
import operator def compute_map(data, anomalous_keys): """ This function computes the map score for test data @data : test_Data with seq keys and anomaly scores @anomalous_keys : a list of the ground truth with anomalous seq key Assumption : Every anomalous keys is present in the data return map_score """ # sort the data sorted_data = sorted(data.items(), key=operator.itemgetter(1), reverse=True) # set the indices """ @indices: a list of ranks of correctly retrieved items note that the ranks are 0-based (i.e, best rank = 0) """ indices = [] for i, keys in enumerate(sorted_data): if keys[0] in anomalous_keys: indices.append(i) collected_precision = [] for i, val in enumerate(indices): collected_precision.append((i+1)/float(val + 1)) map_score = sum(collected_precision)/(len(collected_precision)) return map_score
e513ce7ded4c1f32bf77b02dcddec9eeddee2681
37,509
import os def get_xml_list(xml_path): """Get a list of xml files.""" for a, b, files in os.walk(xml_path): return files
8487801968b07a5ebc32dec6231151e4c463811c
37,512
def decode(ciphertext, key1, key2, forbidden=''): """ Does a best effort to decode the ciphertext based on guessing when the encoding made a mistake """ plaintext = '' index = 0 while index < len(ciphertext): decoded = chr(int(ciphertext[index:index+2], 16) ^ key1) if decoded.isprintable() and decoded not in forbidden: plaintext += decoded else: plaintext += chr(int(ciphertext[index], 16) ^ key1) index -= 1 key1 = (key1 + key2) % 256 index += 2 return plaintext
bfbc7739139e4efae105cf66d31969de1fbec27c
37,513
def select_eps(xmrs, nodeid=None, iv=None, label=None, pred=None): """ Return the list of all [EPs] that have the matching *nodeid*, *iv*, *label*, and or *pred* values. If none match, return an empty list. """ epmatch = lambda n: ((nodeid is None or n.nodeid == nodeid) and (iv is None or n.iv == iv) and (label is None or n.label == label) and (pred is None or n.pred == pred)) return list(filter(epmatch, xmrs.eps()))
65ed019d42cbc4529a18e50741ce5ad561b93dd5
37,514
import re def clearColors(message): """ Clears ANSI color codes >>> clearColors("\x1b[38;5;82mHello \x1b[38;5;198mWorld") 'Hello World' """ retVal = message if isinstance(message, str): retVal = re.sub(r"\x1b\[[\d;]+m", "", message) return retVal
4d5edbe7a2899803f14a2d1cdd043d65d3a25718
37,515
def get_value_from_json(json_dict, sensor_type): """Return the value for sensor_type from the JSON.""" if sensor_type == "state": if "project_name" in json_dict: return "Printing" return "Operational" return json_dict.get(sensor_type)
97e01d54943b877a4cdadf48a30caad55ab9628a
37,517
def add_progress(kwargs, git, progress): """Add the --progress flag to the given kwargs dict if supported by the git command. If the actual progress in the given progress instance is not given, we do not request any progress :return: possibly altered kwargs""" if progress is not None: v = git.version_info[:2] if v >= (1, 7): kwargs['progress'] = True # END handle --progress # END handle progress return kwargs
a3963846fbb3015cfd2035fc2ed8b56ce36fd88e
37,518
def num_deriv(r, func, h = 0.1e-5): """Returns numerical derivative of the callable `func` :param r: Value at which derivative of `func` should be evaluated. :param func: Function whose gradient is to be evaluated. :param h: Step size used when performing numerical differentiation. :return: Numerical derivative of func at `r`.""" r1 = r-(h/2.0) r2 = r+(h/2.0) dr = r2 -r1 dU = func(r2) - func(r1) dUdr = dU/dr return dUdr
db6be035c23a36f1d49ec2812dfbe59b41f3c75e
37,519
def rename_tree(tree, names=None): """Rename the leaves of a tree from ints to names""" # rename leaves to integers for node in list(tree): if node.is_leaf(): tree.rename(node.name, int(node.name)) if names: # rename leaves according to given names for i, name in enumerate(names): tree.rename(i, name) return tree
9ce485c73a2f71653e1bec4e78a44aaff7e9a654
37,520
import math def poincare_index_at(i, j, angles, tolerance): """ compute the summation difference between the adjacent orientations such that the orientations is less then 90 degrees https://books.google.pl/books?id=1Wpx25D8qOwC&lpg=PA120&ots=9wRY0Rosb7&dq=poincare%20index%20fingerprint&hl=pl&pg=PA120#v=onepage&q=poincare%20index%20fingerprint&f=false :param i: :param j: :param angles: :param tolerance: :return: """ cells = [(-1, -1), (-1, 0), (-1, 1), # p1 p2 p3 (0, 1), (1, 1), (1, 0), # p8 p4 (1, -1), (0, -1), (-1, -1)] # p7 p6 p5 angles_around_index = [math.degrees(angles[i - k][j - l]) for k, l in cells] index = 0 for k in range(0, 8): # calculate the difference difference = angles_around_index[k] - angles_around_index[k + 1] if difference > 90: difference -= 180 elif difference < -90: difference += 180 index += difference if 180 - tolerance <= index <= 180 + tolerance: return "loop" if -180 - tolerance <= index <= -180 + tolerance: return "delta" if 360 - tolerance <= index <= 360 + tolerance: return "whorl" return "none"
b1e0eae61b73d0f2ec987c88ac19c2aca6a6ed6b
37,521
import time def is_token_expired(token_initiate_time: float, token_expiration_seconds: float) -> bool: """ Check whether a token has expired. a token considered expired if it has been reached to its expiration date in seconds minus a minute. for example ---> time.time() = 300, token_initiate_time = 240, token_expiration_seconds = 120 300.0001 - 240 < 120 - 60 Args: token_initiate_time (float): the time in which the token was initiated in seconds. token_expiration_seconds (float): the time in which the token should be expired in seconds. Returns: bool: True if token has expired, False if not. """ return time.time() - token_initiate_time >= token_expiration_seconds - 60
9834838aed4127900ba2bd4b8c78133d2377be64
37,525
import socket def get_host(): """ Returns host information. @rtype: C{tuple} @param: A tuple containing the host name, the ip address, and the fully qualified domain name. """ hostname = socket.gethostname() ip_addr = socket.gethostbyname(hostname) fqdn = socket.getfqdn() return (hostname, ip_addr, fqdn)
3ccc46aa5a5de7ee4afa813d1caea765da8ce0d5
37,526
def logout(identity): """Logout the user. :returns: a dict with the operation result """ if not identity: return {'no-data': ''} return {'success': 'Successfully logged out.'}
31bdee658dde3b636f2840c2c8a1b428c5049623
37,528
import os def load_vaihingen_data(folderlist): """load current file from the list""" file_path, filename = os.path.split(folderlist) filelist = [] with open(folderlist) as w : content = w.readlines() content = [x.strip() for x in content] for line in content : if line : filelist.append(line) all_left_img = [] all_right_img = [] all_left_disp = [] for current_file in filelist : filename = file_path + '/' + current_file[0: len(current_file)] #print('left image: ' + filename) all_left_img.append(filename) #index1_dir = current_file.find('/') #index2_dir = current_file.find('/', index1_dir + 1) index2_dir = current_file.rfind('/') index1_dir = current_file.rfind('/', 0, index2_dir) filename = file_path + '/' + current_file[0: index1_dir] + '/colored_1' + current_file[index2_dir: len(current_file)] #print('right image: ' + filename) all_right_img.append(filename) filename = file_path + '/' + current_file[0: index1_dir] + '/disp_occ' + current_file[index2_dir: len(current_file)] #print('disp image: ' + filename) all_left_disp.append(filename) #pdb.set_trace() return all_left_img, all_right_img, all_left_disp
3b4be4b81368ad05c77c9f63417b4504d4205a7d
37,529
def add_desi_proc_joint_fit_terms(parser): """ Add parameters to the argument parser that are only used by desi_proc_joint_fit """ #parser.add_argument("-n", "--nights", type=str, help="YEARMMDD nights") parser.add_argument("-e", "--expids", type=str, help="Exposure IDs") parser.add_argument("-i", "--inputs", type=str, help="input raw data files") return parser
80c23dcfe1e91df2b4d42ca066a697715b10d39d
37,531
def _hgrid_find_chunks(fname: str): """ Find different chunk of the Gr3/Hgrid file returns: dict(header, elem, nodes, [elems, [boundaries]]) """ with open(fname) as f: _ds = f.readlines() _length = len(_ds) # first check if there is minimum 2 lines to give us basic information try: assert(_length > 2) except AssertionError: raise Exception('Invalid length of file!') # Get the grid name at the first line, could be empty _name = ' '.join(_ds[0].split()) # Get the element and node counts from the second line _nelem, _nnode = _ds[1].split() _nelem = int(_nelem) _nnode = int(_nnode) _return_chunks = { 'header':_name, 'nelem':_nelem, 'nnode':_nnode } # Try reading the nodes sagment try: _nodes = _ds[2:_nnode+2] except IndexError: raise IndexError(f'Could not read {_nnode} nodes.') else: _return_chunks['nodes'] = _nodes # If we do not hit empty line just after nodes, then try reading the element sagment if _ds[_nnode+2].strip(): try: _elems = _ds[_nnode+2:_nnode+_nelem+2] except IndexError: raise IndexError(f'Could not read {_nelem} elements.') else: _return_chunks['elems'] = _elems # If we do not hit empty line just after elements, then try reading the boundary sagment if _ds[_nnode+_nelem+2].strip(): _boundaries = _ds[_nnode+_nelem+2:_length] _readflag = (True, True, True) _return_chunks['boundaries'] = _boundaries return _return_chunks
82d662fe3c0b3cd16ef17a28d1091ab63f786bc9
37,532
def get_shape(card): """Returns the card's shape Args: card (webelement): a visible card Returns: str: card's shape """ return card.find_element_by_xpath(".//div/*[name()='svg']/*[name()='use'][1]").get_attribute("href")[1:]
fc7fc60766625a22ac9bf9942ccd5bf32d80d959
37,534
def get_iterable(input_var): """ Returns an iterable, in case input_var is None it just returns an empty tuple :param input_var: can be a list, tuple or None :return: input_var or () if it is None """ if input_var is None: return () return input_var
ad98d537e711a4357c1527df86b00d977d017a30
37,536
def check_category(driver, domain): """Check domain category on Fortiguard.""" print("Checking Fortiguard proxy") driver.get(f"https://www.fortiguard.com/webfilter?q={domain}&version=8") category = driver.find_element_by_xpath("//h4[@class='info_title']") return category.text.replace("Category: ", "")
4bd2f32b4df01971ba985daeb0134229e896f92a
37,537
import ast def to_dict(value): """ Create a dictionary from any kind of incoming object """ if value is None: return {} if isinstance(value, dict): myreturn = value else: myreturn = ast.literal_eval(value) return myreturn
a215554ca0bb86775b5cf97c793e163c78123fac
37,538
def get_path(path, npa): """ creates path string from list """ return ''.join(path[:npa+1])
46e0c1351facde66ab8de23628c7196544921fb9
37,539
def __position_string_wrapper(position_str='top right'): """ Convert the string into the format n_rows, n_cols, index """ if position_str == 'full': return 1, 1, 1 elif position_str == 'top': return 2, 1, 1 elif position_str == 'bottom': return 2, 1, 2 elif position_str == 'left': return 1, 2, 1 elif position_str == 'right': return 2, 2, 2 elif position_str == 'top left': return 2, 2, 1 elif position_str == 'top right': return 2, 2, 2 elif position_str == 'bottom left': return 2, 2, 3 elif position_str == 'bottom right': return 2, 2, 4 else: raise NotImplementedError("Unknown position '{}', " "Choose from: 'top', 'bottom', 'left', 'right', " "'top left', 'top right', 'bottom left', 'bottom right'".format(position_str))
4379ab385cb7e26bb4b67ff9ce4a9a2e28641094
37,540
import pytz from datetime import datetime def eventIsNow(event): """Checks if an event object is happening right now. Args: event: Object with 'start' and 'end' datetimes or dates. Returns: Whether the event is now. """ if 'start' not in event or 'end' not in event: return False # Since Google returns all datetimes with the timezone of the calendar, # and we're assuming this server is running in the same timezone, # equalize all the timezones for time comparison. start = event['start'].replace(tzinfo=pytz.utc) end = event['end'].replace(tzinfo=pytz.utc) return start <= datetime.now().replace(tzinfo=pytz.utc) <= end
f07735d3254e66c618776391914209cca902e532
37,541
def status_reporter(server_name, status): """Produce a report for a server status dict""" report = [] report.append(f"***{server_name}***") for k, v in status.items(): report.append(f"{k}: {sorted(v)}") report.append("\n") return "\n".join(report)
67142767276a4e5c49c422b6cd8627ff63058190
37,543
from typing import List from typing import Tuple def bounded_rectangle( rect: List[Tuple[float, float]], bounds: List[Tuple[float, float]] ) -> List[Tuple[float, float]]: """ Resize rectangle given by points into a rectangle that fits within bounds, preserving the aspect ratio :param rect: Input rectangle [ll, ur], where each point is (y, x) :param bounds: Input bounding rectangle [ll, ur] :return: Bounded rectangle with same aspect ratio """ assert len(rect) == len(bounds) == 2 assert rect[0][0] <= rect[1][0] and rect[0][1] <= rect[1][1] assert bounds[0][0] <= bounds[1][0] and bounds[0][1] <= bounds[1][1] w_input = rect[1][1] - rect[0][1] h_input = rect[1][0] - rect[0][0] w_bound = bounds[1][1] - bounds[0][1] h_bound = bounds[1][0] - bounds[0][0] w = w_input h = h_input # find new rect points while w > w_bound or h > h_bound: if w > w_bound: # need to bound w a_w = w_bound / w w = w_bound h = h * a_w if h > h_bound: # need to bound w a_h = h_bound / h h = h_bound w = w * a_h rect_out = [rect[0], (rect[0][0] + h, rect[0][1] + w)] return rect_out
30da7769b7ea6d8cf69d1c791614d7db1999d335
37,545
def image_center_crop(img): """ Center crop images. Args: ----- img (numpy.ndarray): Image array to crop. Returns: -------- numpy.ndarray: """ h, w = img.shape[0], img.shape[1] pad_left = 0 pad_right = 0 pad_top = 0 pad_bottom = 0 if h > w: diff = h - w pad_top = diff - diff // 2 pad_bottom = diff // 2 else: diff = w - h pad_left = diff - diff // 2 pad_right = diff // 2 return img[pad_top: h - pad_bottom, pad_left: w - pad_right, :]
d97c3c978d9b0b8170450d513aba1beab8850435
37,546
def get_requirements(): """ Returns a list of dependencies from the `requirements.txt` file """ dep = [] with open("requirements.txt", "r") as fp: line = fp.readline() while line: dep.append(line.strip()) line = fp.readline() return dep
97de087c9e4dd1d935f53d9cec683b8c9e431acf
37,547
import time def getTimeStr(seconds): """Get HH:MM:SS time string for seconds. """ if seconds < 86400: return time.strftime('%H:%M:%S', time.gmtime(seconds)) else: return time.strftime('{}d%H:%M:%S'.format(int(seconds) // 86400), time.gmtime(int(seconds)))
174fab592b2edeae81a8bdd6688810eee8c5a53b
37,548
def make_matching(user_mentions): """ 5人のときの2vs2マッチング表文字列を返却する。 user_mentionsがその5人のメンション一覧。 """ m = [ [2, 3, 4, 5, 1], [1, 4, 3, 5, 2], [1, 5, 2, 4, 3], [1, 3, 2, 5, 4], [1, 2, 3, 4, 5] ] us = [None] + user_mentions text = "\n対戦表\n" for i in range(0, 5): text += "%d戦目 🔴 %s %s 🟢 %s %s 📹 %s\n" % ( i+1, us[m[i][0]], us[m[i][1]], us[m[i][2]], us[m[i][3]], us[m[i][4]]) return text
3b37ad03cc247165195d2c0878604d363b117729
37,551
def celsius_to_fahrenheit(temp): """Simple temperature conversion Cº to Fº""" return temp * 1.8 + 32
d5574ffc5bb4e10e7e51af836c6a60dee76f488f
37,552
def json_to_uri(credentials_json): """ Convert JSON object containing database credentials into a string formatted for SQLAlchemy's create_engine, e.g.: drivername://user:password@host:port/dbname It is assumed that the json has already been validated against json_ops.CREDENTIALS_SCHEMA :param json credentials_json: JSON object containing database credentials :return str credentials_str: URI for connecting to the database """ return \ credentials_json["drivername"] + "://" + \ credentials_json["username"] + ":" + \ credentials_json["password"] + "@" + \ credentials_json["host"] + ":" + \ str(credentials_json["port"]) + "/" + \ credentials_json["dbname"]
821994b3d4d7bc8cda4bfb4324a4c23023586c70
37,553
def percentage(x, y): """ Convert x/y into a percentage. Useful for calculating success rate Args: x (int) y (int) Returns: str: percentage formatted into a string """ return '%.2f%%' % (100 * x / y)
361b427b413ef989dc2aec8d804a30641d0c49e8
37,555
def global_accuracy(task_accuracies, test_samples_per_task): """ Calculate global accuracy of the model based on accuracies from single tasks accounting number of test samples per task. :param task_accuracies: list of accuracies for each task :param test_samples_per_task: list of test samples for each task :return: total/global accuracy in % """ accurate_samples = sum([round((task_acc / 100) * task_samples) for task_acc, task_samples in zip(task_accuracies, test_samples_per_task)]) return (accurate_samples / sum(test_samples_per_task)) * 100
eab9c9000d16504ace30dc276faa1d24eeb427aa
37,556
def temp_commentary(current_temp): """Gives temperature advice to the end user.""" temperature = int(current_temp) temperature_level = { 0: "It's scorching hot right now. Stay inside and be cool!", 1: "It's hot and sunny right now. Don't forget that sunscreen!", 2: "It's nice and warm right now. Time to flex those flip-flops!", 3: "It's nice and cool right now. Go play outside in this great weather!", 4: "It's cold right now. Make sure you keep yourself warm!", 5: "Brrrrrrr!!! Remember to wear your protective gear so you don't freeze!", 6: "It's Freezing Cold. Staying inside and a cup of Hot chocolate would be nice!" } if temperature >= 95: return temperature_level[0] elif 80 <= temperature < 95: return temperature_level[1] elif 69 <= temperature < 80: return temperature_level[2] elif 59 <= temperature < 69: return temperature_level[3] elif 40 <= temperature < 59: return temperature_level[4] elif 25 <= temperature < 40: return temperature_level[5] elif temperature < 25: return temperature_level[6]
24150b262959c27a0a505000da60e1329ac82a1d
37,557
def select_user(user): """Selects a specific mysql user and returns true if it exists.""" return "SELECT user FROM mysql.user WHERE user = '" + user + "'"
66864c4190ff3518e648ed8956d44ff07ebd8414
37,559
def db_list_sensors(cursor): """ Get list of temperature and humidity sensors from database """ sql = "SELECT DISTINCT name FROM sensors" cursor.execute(sql) sensor_list = cursor.fetchall() # Convert list of lists from database to list: # [(u'sensor_1',), (u'sensor_2',)] -> [u'sensor_1', u'sensor_2'] sensors = [sensors[0] for sensors in sensor_list] return sensors
e0b78d5649bc61dd4f90e25dadf783c2e19ce207
37,560
def text2seg_pos(seg_pos_text, pattern='[。!?]'): """ 经过分词的文档,原始一条用户评论通过指定的标点符号分成多个句子 """ seg_list = [] # 保存全部按标点切分的seg pos_list = [] # 保存全部按标点切分的pos seg_review_list = [] # 用户完整的一条评论 for seg_pos in seg_pos_text: seg_sub_list = [] pos_sub_list = [] cur_review = [] for term in seg_pos: word, flag = term.split('/') cur_review.append(word) if word in pattern: seg_sub_list.append(word) pos_sub_list.append(flag) seg_list.append(list(seg_sub_list)) pos_list.append(list(pos_sub_list)) seg_sub_list = [] pos_sub_list = [] else: seg_sub_list.append(word) pos_sub_list.append(flag) seg_review_list.append(list(cur_review)) return seg_list, pos_list, seg_review_list
b3dca66fc210db6ee5decc800e373b0933152781
37,561
def build_sql_filter(url_filters, fields): """given a dictionary containing a list of url parameters, construct the sql code for in (<field_name>__in=), like (<field_name>__like=) and is equal (<field_name>=) to for each of the specified fields in the filter dictionary. Any fields that do not exist in the table are ignored. The fields key of the filter is also ignored - as it is used elsewhere to build the select statement. Arguments: - `url_filters`: - `fields`: '{} is not null' """ _notNulls = url_filters.get("notNull") if _notNulls: notNulls = list(_notNulls.split(",")) else: notNulls = [] likes = [ (k.replace("__like", ""), v.upper()) for k, v in url_filters.items() if k.endswith("__like") ] ins = [ (k.replace("__in", ""), ",".join(["'{}'".format(x) for x in v.split(",")])) for k, v in url_filters.items() if k.endswith("__in") ] rest = [ (k, v) for k, v in url_filters.items() if not (k.endswith("__in") or k.endswith("__like")) and not k == "fields" ] # check for fields here - make sure they exist in this table and remove them if they don't notNulls = [x for x in notNulls if x in fields] likes = [x for x in likes if x[0] in fields] ins = [x for x in ins if x[0] in fields] rest = [x for x in rest if x[0] in fields] notNull_sql = " AND ".join( ["NOT ([{0}] is null OR [{0}]='') ".format(x) for x in notNulls] ) likes_sql = " AND ".join( ["UPPER([{}]) like '%{}%'".format(fld, value) for fld, value in likes] ) ins_sql = " AND ".join(["[{}] in ({})".format(fld, value) for fld, value in ins]) rest_sql = " AND ".join(["[{}]='{}'".format(fld, value) for fld, value in rest]) sql = " AND ".join( [x for x in [likes_sql, ins_sql, notNull_sql, rest_sql] if x != ""] ) return sql
e5c4e42461f8c82fb6513fb5c8015d4eed6d4dee
37,562
from typing import Tuple from typing import Optional import ipaddress import socket def ip_and_hostname(ip_or_hostname: str) -> Tuple[str, Optional[str]]: """returns (ip, maybe_hostname)""" try: ipaddress.ip_address(ip_or_hostname) return ip_or_hostname, None except ValueError: return socket.gethostbyname(ip_or_hostname), ip_or_hostname
967c90c748c0a52614c56e1a4b7b8615862ccc09
37,563
def speed2dt(speed): """Calculate the time between consecutive fall steps using the *speed* parameter; *speed* should be an int between 1 and 9 (inclusively). Returns time between consecutive fall steps in msec.""" return (10-speed)*400
676fd396ca83cedc1a6e15addc06abdfd98bced9
37,564
def _flip_top_bottom_boundingbox(img, boxes): """Flip top bottom only bounding box. Args: img: np array image. boxes(np.ndarray): bounding boxes. shape is [num_boxes, 5(x, y, w, h, class_id)] """ height = img.shape[0] if len(boxes) > 0: boxes[:, 1] = height - boxes[:, 1] - boxes[:, 3] return boxes
ad196f59f85d5a6027e0a17ce6d543303c102357
37,565
def get_subclasses(c): """ Get all subclasses of a given class """ return c.__subclasses__() + sum(map(get_subclasses, c.__subclasses__()), [])
c38c6d9df23039c816d508663c5f40a84b9de299
37,566
def back_indel_shift(info_index_list, cur_index) -> int: """return acc_shift(back_indel_shift) Args: info_index_list (list/tuple): list or tuples generated from align.cigar tuples cur_index (int): index related to MD tag in BAM file Returns: int: acc_shift index """ # parse soft clip and insertion if len(info_index_list) == 0: return 0 acc_shift = 0 for info_start_index, info_len in info_index_list: if info_start_index >= cur_index: return acc_shift else: acc_shift += info_len return acc_shift
457080c2ead0c97abf462623116b255856bdb0ed
37,568