content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def _get_subgroup(file: str) -> str: """Function that will pull the subgroup substring out of the filepath Parameters file : str filepath to the ERSA file Returns returns a substring that has the subgroup. Ex "sub1" """ return file[-4:]
a7e8a396b27a98223c2fd3595a3922bf1bc73662
25,807
import torch def angles_directions_to_quat(angles, directions): """Represents a list of rotation angles and axes as quaternions.""" t = angles / 2 return torch.cat((t.cos().view(-1, 1), t.sin().view(-1, 1) * directions), dim=1)
c92f0df9c6619a910b0fc19b97e2029601905172
25,808
def getfield(f): """convert values from cgi.Field objects to plain values.""" if isinstance(f, list): return [getfield(x) for x in f] else: return f.value
d49f997213b4ca0825d40f890cfcb05979da3c22
25,809
import re def assign_cluster_wordnet_sense(file_name, input_lemma): """ After UKB assign WN senses to top 5 words in each cluster find cluasters with the same sense. Creates a dictionary of where sense offset as the key and cluster numbers as values. """ similar_senses = {} with open('../data/temp/{}'.format(file_name)) as f: lines = f.readlines() for line in lines: if line.split()[0] == '!!': continue elif re.match(r'^ctx_.+', line): line_content = line.split() wordnet_synset_offset = line_content[2] offset_pos = wordnet_synset_offset.split('-') offset = int(offset_pos[0]) pos = offset_pos[1] lemma = line_content[4] if lemma == input_lemma: if wordnet_synset_offset in similar_senses.keys(): similar_senses[wordnet_synset_offset].extend(line_content[0].split('_')[1]) else: similar_senses[wordnet_synset_offset] = [line_content[0].split('_')[1]] return similar_senses
29b2ab7ca4fa429ceaf721ecda22649267a0ab4b
25,810
import sympy def _jrc(a, n): """Get the Jacobi recurrence relation coefficients.""" return ( sympy.Rational((a + 2 * n + 1) * (a + 2 * n + 2), 2 * (n + 1) * (a + n + 1)), sympy.Rational(a * a * (a + 2 * n + 1), 2 * (n + 1) * (a + n + 1) * (a + 2 * n)), sympy.Rational(n * (a + n) * (a + 2 * n + 2), (n + 1) * (a + n + 1) * (a + 2 * n)) )
c11527f9b568924cf3b84b3ac36e1641b3ed022e
25,812
import json def loads(data) -> dict: """Load a JSON to a dict via available JSON converter.""" return json.loads(data)
6be110c4739d4a94fc77be592e83de6cca7856ac
25,813
def test_get_project__returns_data(from_line, tmpdir, monkeypatch, pypi_repository): """ Test PyPIRepository._get_project() returns expected project data. """ expected_data = {"releases": {"0.1": [{"digests": {"sha256": "fake-hash"}}]}} class MockResponse: status_code = 200 @staticmethod def json(): return expected_data def mock_get(*args, **kwargs): return MockResponse() monkeypatch.setattr(pypi_repository.session, "get", mock_get) ireq = from_line("fake-package==0.1") actual_data = pypi_repository._get_project(ireq) assert actual_data == expected_data
8a8979983e33561e5875bcd2331bef5cec9417a4
25,814
async def get_story_data(session, story_id, story_rank): """ Gets the given story data - title and url """ url = 'https://hacker-news.firebaseio.com/v0/item/{}.json'.format(story_id) async with session.get(url) as response: result_data = await response.json() story_url = "" if "url" in result_data: # The url key might not be in the results data story_url = result_data['url'] return story_rank, result_data['title'], story_url
cbac4d05915a82ab11854b9365acddb9c42944bd
25,816
def format_authors(authors): """ Input: string of (possibly comma- and semi-colon-separated) authors Output: list of dicts, stripped of empty authors and whitesapce """ list_of_authors = [] for author in authors[:].split(";"): if any(char.isalnum() for char in author): author = author.split(",", maxsplit=1) try: a = {"last_name": author[0].strip(), "first_name": author[1].strip()} except IndexError: a = {"last_name": author[0].strip(), "first_name": ""} list_of_authors.append(a) return list_of_authors
9de8ec149c4ca2ed7bb32c116be908fd59d61b59
25,818
def text_cell(f): """wrap text cells in appropriate divs""" def wrapped(self, cell): rendered = f(self, cell) classes = "text_cell_render border-box-sizing rendered_html" lines = ['<div class="%s">' % classes] + rendered + ['</div>'] return lines return wrapped
b17c7a997ff352aaabdc455ddaaa0a933f0cd11a
25,819
def comp_power(self, out_dict, machine): """Compute the electrical average power Parameters ---------- self : Electrical an Electrical object out_dict : dict Dict containing all magnetic quantities that have been calculated in comp_parameters of EEC """ qs = machine.stator.winding.qs Id, Iq = out_dict["Id"], out_dict["Iq"] Ud, Uq = out_dict["Ud"], out_dict["Uq"] # All quantities are in RMS Pem_av = qs * (Ud * Id + Uq * Iq) out_dict["Pem_av"] = Pem_av return out_dict
d743e7b64688d03b7a52872b79e05f99b6e1e0f7
25,820
def color_string(guess: str, target: str) -> str: """ Returns Wordle colors for guess given target. """ c_string = "" for pos, letter in enumerate(guess): if target[pos] == letter: c_string += "g" elif letter in target: c_string += "y" else: c_string += "b" return c_string
f5caa33850d50e6d3c98532d77da5a740a255d10
25,821
import numpy def _add_poly(poly1, poly2): """ Add two-dimensional polynomials together. Parameters ---------- poly1 : numpy.ndarray poly2 : numpy.ndarray Returns ------- numpy.ndarray """ if not isinstance(poly1, numpy.ndarray) and poly1.ndim == 2: raise TypeError('poly1 must be a two-dimensional numpy array.') if not isinstance(poly2, numpy.ndarray) and poly2.ndim == 2: raise TypeError('poly2 must be a two-dimensional numpy array.') out = numpy.zeros((max(poly1.shape[0], poly2.shape[0]), max(poly1.shape[1], poly2.shape[1])), dtype='float64') out[:poly1.shape[0], :poly1.shape[1]] += poly1 out[:poly2.shape[0], :poly2.shape[1]] += poly2 return out
bb927bfd9bace54c1d769e8a015296f987c8c018
25,823
def diamond(n): """Create a diamond-shaped string using asterisks. Keyword arguments: n -- the size of the Diamond Return resulted diamon-shaped string. """ if n < 1 or not n % 2: # n is negative or even return None s = '' for i in range(n): # Build asterisks for the line ast = '*'*(i*2 + 1) if i <= n/2 else '*'*((n-i)*2 - 1) # Add a line s += ' '*int((n-len(ast)) / 2) + ast + '\n' return s
f9dda781c629a38a994aa464c3595132150075fb
25,825
def merge_data(data_list, tmp_data, **kwargs): """Function: merge_data Description: Adds a series of similar token data into a single string and adds the token type and string as set to a list. Arguments: (input) data_list -> List of summarized categorized tokens. (input) tmp_data -> List of current series of token data. (output) data_list -> List of summarized categorized tokens. """ data_list = list(data_list) tmp_data = list(tmp_data) data = tmp_data.pop(0) tmp_a = data[0] data_type = data[1] for item in tmp_data: tmp_a = tmp_a + " " + item[0] data_list.append((tmp_a, data_type)) return data_list
2a9bc12da1ae308f18a58ae3869beceadeca242e
25,826
def check_device_condition(obj_db, arg_device, arg_attr, arg_data): """ Checks the conditions in database for arg_device, arg_attribute """ if arg_data[0] == '=': arg_data = arg_data[1:] result = obj_db.select('device_attr', {'device_id': arg_device, 'attr_id': arg_attr, 'data': arg_data}) elif arg_data[0] == '>' or arg_data[0] == '<': result = obj_db.select('device_attr', 'device_id = ' + arg_device + ' AND attr_id = ' + arg_attr + ' AND data ' + arg_data[ 0] + ' ' + arg_data[1:]) else: result = obj_db.select('device_attr', {'device_id': arg_device, 'attr_id': arg_attr, 'data': arg_data}) if result: return True else: return False
3f608a3fdd6e11f3b78577447c998c0452de27ff
25,827
def unique_contributors(nodes, node): """ Projects in New and Noteworthy should not have common contributors """ for added_node in nodes: if set(added_node['contributors']).intersection(node['contributors']) != set(): return False return True
4cd63731b36f3aeec6c46564f36d8c95585c2879
25,830
import time def time_func(target, *args, **kwargs): """ Time the execution of a function. :param target: The target function. :param args: Arguments to the function. :param kwargs: Keyword arguments to the function. :return: The original return value of args and kwargs applied to the target. """ start_time = time.time() ret = target(*args, **kwargs) end_time = time.time() return ret, end_time - start_time
42d6fdf0ccc481317bd2ae47049681ab4bd728c5
25,833
import requests def get_uniprot(accession, uniprot_url='https://www.uniprot.org/uniprot/{0}.txt'): """ Retrieve Uniprot Annotation file from Uniprot ID e.g. Q15858 """ try: results = requests.get( uniprot_url.format(accession.strip()), allow_redirects=True ) except ValueError: raise ValueError('no Uniprot results retrieved for {0}'.format(accession)) if results: return results.content.decode("utf-8") else: raise ValueError('no Uniprot results retrieved for {0}'.format(accession))
852d801c7110e14d0e33d188828bd4c839194589
25,835
import json import io def parse_header_json( header, key_mod=lambda x: x, value_mod=lambda x: x ): """ Parse an HTTP header returning a dict where the headers are the keys and the values are the values Parameters ---------- header : str or list HTTP header to parse, either as a string containing json or as a list of dicts with 'h' and 'v' keys key_mod : callable, optional Function mapping str to str that modifies the header names value_mod : callable, optional Function mapping str to str that modifies the header values Returns ------- data : dict dict with header names as keys and header values as values """ if not isinstance(header, str): raise ValueError("header has type '%s'- expected str" % type(header)) try: header_json_parsed = json.load(io.StringIO(header)) except ValueError: return None return { key_mod(_header['h']): value_mod(_header['v']) for _header in header_json_parsed }
2c6f80a21150f74ce864715dd1b24e48faf74333
25,838
def in_3d_box(box, coords): """ Check if point is in a box Args: box (tuple): ((x0, x1), (y0, y1), (z0, z1)). coords (tuple): (x, y, z). Returns bool """ cx = coords[0] >= box[0][0] and coords[0] <= box[0][1] cy = coords[1] >= box[1][0] and coords[1] <= box[1][1] cz = coords[2] >= box[2][0] and coords[2] <= box[2][1] return cx and cy and cz
4580e67c89b02565b0ac4d1b5c1d11dd5396f74a
25,840
def _jwt_decode_handler_no_defaults(token): # pylint: disable=unused-argument """ Accepts anything as a token and returns a fake JWT payload with no defaults. """ return {}
0d24d14c41ad427cfbfb7796b64ca6d2fb8830a6
25,841
import math def r_max_KimKim(c, Theta_r, Theta_a, Theta, sigma, rho, g): """ effective maximum drop radius """ r_max = math.sqrt(6*c*(math.cos(Theta_r)-math.cos(Theta_a))*math.sin(Theta)*sigma / \ (math.pi*(2-3*math.cos(Theta)+(math.cos(Theta))**3)*rho*g)) return r_max
0e89c65ff66309de858f7d74406b546f450227ae
25,842
import time def timestamp_decorator(func): """Decorator that stamps the time a function takes to execute.""" def wrapper(*args, **kwargs): start = time.time() func(*args, **kwargs) end = time.time() print(f' Finished in {end-start:.3} secs') return wrapper
c517b56646e0ab6f0f89effb61d68373a36327f7
25,844
def read_txt_data(file_name, num_per_line=1): """ 读取并解析txt文档数据 :param file_name: :param num_per_line:每行数据个数 :return: """ with open(file_name) as file_object: # line = file_object.readline() # 读取一行;指针自动下移 lines = file_object.readlines() # 读取每一行存在一个列表中 data = [] for line in lines: line = line.replace(',', '') data_line = line.strip('\n').split() # 去除首尾换行符,并按空格划分 if len(data_line) != num_per_line : # if data_line == []: continue else: data.append(data_line[0]) return data
6b47bde11eec5c805c6bb009b514346b19bd17d5
25,845
def wiki_brand_compare(pan): """The function wiki_brand_compare(pan) in this module checks BIN numbers (first 6 digits of pan) against data from the Wikipedia page https://en.wikipedia.org/wiki/Payment_card_number.""" #Accuracy is not guaranteed!!! # Matching performed in order of most accurate digit groupings bin_number = pan[:6] pan_first_1 = int(bin_number[:1]) pan_first_2 = int(bin_number[:2]) pan_first_3 = int(bin_number[:3]) pan_first_4 = int(bin_number[:4]) pan_first_5 = int(bin_number[:5]) pan_first_6 = int(bin_number[:6]) # If Wikipedia has a 6 digit match if pan_first_6 in range(560221, 560225+1): return "BANKCARD" elif pan_first_6 in range(622126, 622925+1): return "DISCOVER" elif pan_first_6 in [564182, 633110]: return "SWITCH" elif pan_first_6 in range(506099, 506198+1) or pan_first_6 in range(650002, 650027+1): return "VERVE" elif pan_first_6 in range(979200, 979289+1): return "TROY" # If Wikipedia has a 5 digit match # none as of June 2017 # If Wikipedia has a 4 digit match elif pan_first_4 == 5610: return "BANKCARD" elif pan_first_4 in [2014, 2149]: return "DINERSCLUB" elif pan_first_4 == 6011: return "DISCOVER" elif pan_first_4 in range(3528, 3589+1): return "JCB" elif pan_first_4 in [6304, 6706, 6771, 6709]: return "LASER" elif pan_first_4 in [5019, 4175, 4571]: return "DANKORT" elif pan_first_4 in range(2200, 2204+1): return "MIR" elif pan_first_4 in range(2221, 2720+1): return "MASTERCARD" elif pan_first_4 in [6334, 6767]: return "SOLO" elif pan_first_4 in [4903, 4905, 4911, 4936, 6333, 6759]: return "SWITCH" elif pan_first_4 == 5392: return "CARDGUARD" # If Wikipedia has a 3 digit match elif pan_first_3 in [300, 301, 302, 303, 304, 305, 309]: return "DINERSCLUB" elif pan_first_3 in range(644, 649+1): return "DISCOVER" elif pan_first_3 == 636: return "INTERPAYMENT" elif pan_first_3 in [637, 638, 639]: return "INSTAPAYMENT" # If Wikipedia has a 2 digit match elif pan_first_2 in [34, 37]: return "AMEX" elif pan_first_2 == 62: return "CHINAUNIONPAY" elif pan_first_2 in [36, 38, 39]: return "DINERSCLUB" elif pan_first_2 == 65: return "DISCOVER" elif pan_first_2 in [50, 56, 57, 58]: return "MAESTRO" elif pan_first_2 in [51, 52, 53, 54, 55]: return "MASTERCARD" # If Wikipedia has a 1 digit match elif pan_first_1 == 6: return "MAESTRO" elif pan_first_1 == 4: return "VISA" elif pan_first_1 == 1: return "UATP" else: return "UNKNOWN"
e981d6e01f8a1492b100ad9dd4b63c08c0821421
25,847
def cyberpunk(dppd): """Turn this plot into a cyberpunk styled plot with theme and glowing figures""" res = dppd.theme_cyberpunk().scale_color_cyberpunk().scale_fill_cyberpunk() res.df.cyberpunked = True # res.add_scatter = res.add_scatter_cyberpunk # res.add_line = res.add_line_cyberpunk return res
c965ab8a89f6702bd1230749cfab970b39ae74ba
25,849
import subprocess import logging def execute(command): """Executes the specified command and returns the stdout output.""" process = subprocess.Popen(command, stdout=subprocess.PIPE) output = process.communicate()[0].strip() if process.returncode: logging.warning('Failed to execute command: %s', command) return None return output
c3092a2cdc108130096143c9f01845df9724ccb6
25,850
def bool_not(x): """Implement `bool_not`.""" return not x
b97c8ee15bf48729b9110f5d69ad760e3cd611c2
25,851
def path_distance(path_1, path_2, feature_names, min_max_feature_values): """path_distance function computes the distance of two paths (rules) Args: path_1: the first path path_2: the second path feature_names: the list of features min_max_feature_values: the min and max possible values of each feature Return: distance: the distance of the paths """ distance = 0 feature_count = 0 for i in feature_names: if i in path_1 and i in path_2: if len(path_1[i]) == 2: l1 = path_1[i][1][1] u1 = path_1[i][0][1] else: if path_1[i][0][0] == '<=': u1 = path_1[i][0][1] l1 = min_max_feature_values[i][0] else: l1 = path_1[i][0][1] u1 = min_max_feature_values[i][1] if len(path_2[i]) == 2: l2 = path_2[i][1][1] u2 = path_2[i][0][1] else: if path_2[i][0][0] == '<=': u2 = path_2[i][0][1] l2 = min_max_feature_values[i][0] else: l2 = path_2[i][0][1] u2 = min_max_feature_values[i][1] distance = distance + (1 / 2) * (abs(l1 - l2) + abs(u1 - u2)) feature_count = feature_count + 1 elif i in path_1 or i in path_2: distance = distance + 1 feature_count = feature_count + 1 if feature_count != 0: distance = distance / feature_count else: distance = 0 return distance
97e243f549270da698f89bf1f27ac95b6a802f5a
25,852
def tflite_ios_lab_runner(version): """This is a no-op outside of Google.""" # Can switch back to None when https://github.com/bazelbuild/rules_apple/pull/757 is fixed return "@build_bazel_rules_apple//apple/testing/default_runner:ios_default_runner"
8bc765c9f534e1960f5b2217f414063417f7fba5
25,853
def uri_for(reference): """ Utility function for accessing the URI or a reference. :param reference: :return: """ return reference._uri
3b1f99fdd87ac282f7ada774a208cf5a7ca67c85
25,854
import argparse import os def parse_args(): """Parse the command line arguments.""" parser = argparse.ArgumentParser( description = 'YubiKey screen unlocker.', add_help = True, formatter_class = argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--configure', action='store_true', default=False, help='Configure token') parser.add_argument('--slot', type=int, default=1, help='Configuration slot to use') parser.add_argument('--secret', type=str, default='~/.yubiunlock', help='Location of shared secret file.') parser.add_argument('--poll', type=float, default=1, help='Polling interval in seconds to check for token') parser.add_argument('--daemon', action='store_true', default=False, help='Send program to background') parser.add_argument('--screensaver', type=str, default='gnome-screensaver', help='Name of screensaver process to kill') args = parser.parse_args() args.secret = os.path.expanduser(args.secret) return args
23f05fd3162121cb3abf998cddbcc9875779fe55
25,855
def hexStr2Bytes(hexStr: str) -> bytes: """ Convert an hexadecimal string in bytes :param hexStr: The hexadecimal string :type hexStr: str :return: The bytes of the hexadecimal string :rtype: bytes """ return bytes.fromhex(hexStr)
720ae814a2252db7497fc5850eb02a262b32aa0c
25,856
def convert_window_size(ws): """ This function converts the shorthand input window size and returns an integer of the same value (i.e. "100kb" == int(100000)) Args: ws: window size (bp/kb/mb) Returns: Integer of window size """ window_size = None if "bp" in ws: window_size = int(ws.strip("bp"))*100 elif "kb" in ws: window_size = int(ws.strip("kb"))*1000 elif "mb" in ws: window_size = int(ws.strip("mb"))*10000 return window_size
c6c93cc78ec260862fbe1e91dddaf74394aa58ea
25,858
def deserialize_utf8(value, partition_key): """A deserializer accepting bytes arguments and returning utf-8 strings Can be used as `pykafka.simpleconsumer.SimpleConsumer(deserializer=deserialize_utf8)`, or similarly in other consumer classes """ # allow UnicodeError to be raised here if the decoding fails if value is not None: value = value.decode('utf-8') if partition_key is not None: partition_key = partition_key.decode('utf-8') return value, partition_key
21cc61d6048b5f7d9333ceadb86d03666719f05f
25,860
from typing import Iterable from typing import Union from typing import List from typing import Generator def make_single_arguments(iterable_of_args: Iterable, generator: bool = True) -> Union[List, Generator]: """ Converts an iterable of single arguments to an iterable of single argument tuples :param iterable_of_args: A numpy array or an iterable containing tuples of arguments to pass to a worker, which passes it to the function :param generator: Whether or not to return a generator, otherwise a materialized list will be returned :return: Iterable of single argument tuples """ gen = ((arg,) for arg in iterable_of_args) return gen if generator else list(gen)
4469b7bb1a18e72948380ebea90c52459f528be1
25,861
def getVocabularyList(fileName): """ 从词汇列表文件中获取语料库 :param fileName: :return: """ fr = open(fileName) vocabularyList = fr.readline().strip().split('\t') fr.close() return vocabularyList
aac0bb7621508a013baa5ea2f0b9aebd66762e5c
25,862
def FindPosCol( iDotData ): """Find the column representing the SNP position, based on our conventions.""" return 'Pos' if 'Pos' in iDotData.headings else 'pos'
7048a0fd3d662f392e5a8b36acd94d1b92b674ba
25,863
def alf (c, s): """ change for 1 alfabet """ if c in s: return s[ (s.index(c) + len(s)//2 ) % len(s) ] return False
c91f26eed0bb376facf8fcbc3e2d319c6a1a00cb
25,864
import argparse def handle_arguments(): """ Function used to set arguments that can be passed to the script :return args: The parsed arguments """ parser = argparse.ArgumentParser(description='Split EEG data preprocess and create spectrograms') parser.add_argument('-c', '--class', dest='classes', required=True, choices=['PD_OFF', 'PD_ON', 'NONPD', 'ALL'], help='Flag used to determine what class type we want to cretae spectrogram images for') parser.add_argument('-s', '--stft', dest='stft', action='store_true', default=False, help='Flag used to utilize the short-time fourier transform in data processing') parser.add_argument('-w', '--wave', dest='wavelet', action='store_true', default=False, help='Flag used to utilize wavelet transform in data processing') parser.add_argument('-i', '--input-dir', dest='input_dir', required=True, help='Flag used to determine the root input directory of the data') parser.add_argument('-o', '--output-dir', dest='output_dir', required=True, help='Flag used to determine the root output path to place images') parser.add_argument('-a', '--ica', dest='ica', action='store_true', required=False, help='Flag used to generate Independent Component Analysis of EEG data' ) args = parser.parse_args() return args
9556b377f0cbe8fe42bb8f3b02728dfcdf53252c
25,865
def get_media_info(player): """ Function to fetch the current playing track and artist information""" media = player.get_media() info = str(media.get_meta(12)) info = info.split("-") artist = info[0] track = info[1] return artist, track
32463946e98d453ae399b25dc7d6b8555812f319
25,867
def cut_img(img, x, y): """ 函数功能:进行图片裁剪(从中心点出发) :param img: 要裁剪的图片 :param x: 需要裁剪的宽度 :param y: 需要裁剪的高 :return: 返回裁剪后的图片 """ x_center = img.size[0] / 2 y_center = img.size[1] / 2 new_x1 = x_center - x//2 new_y1 = y_center - y//2 new_x2 = x_center + x//2 new_y2 = y_center + y//2 new_img = img.crop((new_x1, new_y1, new_x2, new_y2)) return new_img
8060ca4b34d36de3ed74fc99870317d679e44a77
25,870
def bin_to_dec(bin): """ parameters: bin: list -> returns int""" return sum(value*2**position for position, value in enumerate(reversed(bin)))
c930685aa767347133b039723d7604a1652cc839
25,871
import copy def gaussj(A, b): """ Solve Ax = b by Gauss-Jordan Elimination (no pivoting) returns solution vector x """ A = copy.deepcopy(A) # number of rows in A n = len(A) # append b to A for j in range(n): A[j].append(b[j]) # loop through cols for i in range(n): # Search for max in col maxVal = abs(A[i][i]) maxRow = i for j in range(i+1, n): if abs(A[j][i]) > maxVal: maxVal = abs(A[j][i]) maxRow = j # Swap max row with current row for j in range(i, n+1): tmp = A[maxRow][j] A[maxRow][j] = A[i][j] A[i][j] = tmp # Make all rows below this one 0 in current column for j in range(i+1, n): c = -A[j][i] / A[i][i] for k in range(i, n+1): if i == k: A[j][k] = 0 else: A[j][k] += c * A[i][k] # Solve equation Ax = b for an upper triangular matrix A x = [0 for i in range(n)] for i in range(n-1, -1, -1): x[i] = A[i][n] / A[i][i] for j in range(i-1, -1, -1): A[j][n] -= A[j][i] * x[i] # return the solution vector x return x
97b9927c550bbc52470d42be420d8cec6fd9576c
25,875
def get_step_time_shift(step): """Calculates the time shift generated in each filtering step Parameters ---------- step: dict Dictionary object holding information about a given filter step Returns ------- shift: float Time shift value """ input_sample_period = step["input_sample_period"] numtaps = len(step["window"]) shift = input_sample_period * ((numtaps - 1) / 2) return shift
b6c2d299337e37a13e0fe6472577da3e3663aae1
25,876
def query_row_args_2_dict(args): """ change query_row_args to dict :param args: query row args :type args: QueryRowArgs :return: :rtype dict """ return { 'rowkey': args.rowkey, 'maxVersions': args.max_versions, 'cells': args.cells }
50f7131052cf17fba960759a5d6a27b3203e4008
25,881
def basin_ensemble_mean(dict_by_basin, basin_name, case): """Compute the multi-GCM ensemble mean SPEI for a given basin and case Parameters ---------- dict_by_basin : DICT Stores SPEI per basin basin_name : STR Which basin to study case : STR 'WRunoff', 'NRunoff', 'diff' Returns ------- em: pandas.Series object Ensemble mean SPEI for this basin and case """ basin_df = dict_by_basin[basin_name][case] em = basin_df.mean(axis=1) #compute mean among all models at each timestep return em
b7c9c2bebca2639f260c65abddea0e30c3ecef7f
25,883
def timedelta_to_seconds(td): """Convert a timedelta to seconds. The returned seconds will be rounded to the nearest whole number. """ return round(td.total_seconds())
66cbe5409ac378825590ba96fc5affc469267f3d
25,884
def create_groups(groups, client): """Find all groups and create new groups if needed""" to_add = [] existing_groups = [] for pair in groups: pair_groups = list(pair.values()) for group in pair_groups: for cleaned_group in group.split(","): to_add.append(cleaned_group.strip()) for existing_group in client.groups(): existing_groups.append( {"id": existing_group.id, "group_name": existing_group.name}) for group_name in to_add: if not any(d['group_name'] == group_name for d in existing_groups): created_group = client.create_group({"name": group_name}) existing_groups.append( {"id": created_group.id, "group_name": created_group.name}) return existing_groups
2de912076181c38c46febef57d5071dd610cbc2d
25,885
import copy def remove_whitespace(dictionary): """ Remove values that empty whitespace in the dictionary """ nd = copy.deepcopy(dictionary) bad_keys = [] for k,v in nd.iteritems(): if hasattr(v,'strip'): stripped = v.strip() # ghetto and maybe unnecessary if stripped == '' or stripped == u'': bad_keys.append(k) for bad_key in bad_keys: del nd[bad_key] return nd
7f06794cafff4754430a45ee39b38735263fc357
25,886
import hashlib def sha256(filepath,blocksize=2**20): """ Return the sha256 hash of a file. `blocksize` adjusts how much of the file is read into memory at a time. This is useful for large files. 2**20: 1 mb 2**12: 4 kb """ hasher = hashlib.sha256() with open(filepath, 'rb') as afile: buf = afile.read(blocksize) while len(buf) > 0: hasher.update(buf) buf = afile.read(blocksize) return hasher.hexdigest()
086d6284953709a87b1a14c5c5b8c56b28b1d9d5
25,887
def get_empty_user_credentials(): """ Monkeypatch GithubBackend.get_user_credentials to force the case where invalid credentias were provided """ return '', '', False, False
fb2cc890f8cd41a9fa25757cdae6b70ff7db2094
25,888
def sum_risk(method): """ Sum risk of all nodes in the method graph. """ risk = 0 for node_name in method.graph.nodes_iter(): node = method.graph.node[node_name] if "entry_point" in node: continue if "risk" in node: risk += node["risk"] return risk
5ae3bc3138a7f4b9d905c5f2122c7a1ecd9bbc09
25,889
def split_labels(data, label_idx=-1): """ Split labels from numerical data :param data: array of inputs data :type data: nd.array :param label_idx: index where label is located in the array. It can be only at start of at the end of the array :type label_idx: int :return: data without labels, labels :rtype: nd.array, nd.array """ # return (Data, Labels) if label_idx == -1 or label_idx == data.shape[-1]: return data[..., :-1], data[..., -1] elif label_idx == 0: return data[..., 1:], data[..., 0] else: raise RuntimeError('Labels must be on axis 0 or 1')
d3b59dca790255ae14269836ace58df151f84684
25,890
import ast def init_quantizer(name: str) -> ast.Assign: """ Generate quantization node initialization to add to the end of __init__() :param name: generated name of the node :return: quantization init ast node """ quant_linear = ast.Attribute(value=ast.Name(id="quant_nn", ctx=ast.Load()), attr="QuantLinear", ctx=ast.Load()) default_quant_desc_input = ast.Attribute(value=quant_linear, attr="default_quant_desc_input", ctx=ast.Load()) tensor_quant = ast.Name(id="TensorQuantizer", ctx=ast.Load()) quant_value = ast.Attribute(value=ast.Name(id="self", ctx=ast.Load()), attr=name, ctx=ast.Store()) return ast.Assign( targets=[quant_value], value=ast.Call(func=tensor_quant, args=[default_quant_desc_input], keywords=[]), )
b6159998556dba27bbb46049d7aeb8cbd390d2c7
25,891
import random def creerCercle(nb_couleur,nb_point): """"creation du cercle out : tab avec le numero de la couleur de chaque point ex : [1,0,2,4,0,...,1,0,0] pour n couleur = 5""" cpt_couleur = [0]*nb_couleur tab = [] while len(tab) != (nb_couleur*nb_point): couleur = random.randint(0,nb_couleur-1) if cpt_couleur[couleur] != nb_point : tab += [couleur] cpt_couleur[couleur] += 1 return tab
77feba3b1c5422b867a9a4dce574457098058238
25,892
def str2link(s, directory, title=''): """ Used by the --html options, this takes a string and makes it into an html <a href...> link without a closing </a>. """ if directory == '': return '' else: if title: return '<a href="%s/%s" title="%s">' % (directory, s, title) else: return '<a href="%s/%s">' % (directory, s)
512af2dd4308ac65b24f84e24aec95cb8144f8a1
25,893
import re def extract_only_numbers(text): """ Little function to extract only numbers from a string. Usuful in OCR if we want to detect only digits. Also, in converts 'o's and 'O's to 0 since it appears to be hard for tesseract even if I fucking tell it to parse only digits. GG google. :param text: A string :type text: str :return: string with only the digits :rtype: str """ text = text.replace('O', '0') text = text.replace('o', '0') text = re.findall(r'\d+', text) return ''.join(text)
a51520376c492a0629c564e8b8d54a19c1d5041e
25,894
def _get_unit(x): """Get unit for conversion of time deltas to integers""" if hasattr(x, "freqstr"): return x.freqstr else: return None
5ed6042cc1e4e603de50716694aea9939cb1479d
25,895
import fnmatch def does_pattern_exists(patterns_list, value): """ Checks a list of patterns against a value. :param: patterns_list : A list of regular glob expression strings :type: str :returns: Returns True if any of the patterns match the value, False otherwise. :type: boolean """ for pattern in patterns_list: if fnmatch.fnmatch(str(value), str(pattern)): return True return False
cb1d6c8ea079aa25f980b42c2755db9253f68284
25,896
def _set_antecessor(self, descendants): """ Set reference to largest related cluster. Notes ----- We want the clusters to know who they are related to. The antecessor is the largest structure in the current family. Every time a new branch is formed the branch becomes the antecessor. However, we must descend the family tree and assign the antecessor property of all descendants (branch clusters or leaf clusters) to the current branch. """ # Create a temporary list of descendants that will be updated new_descendants = descendants # Cycle through descendants looking for new descendants while (len(new_descendants) !=0 ): descendant_list = [] # Loop over descendants for descendant in new_descendants: # Set the antecessor property to the current cluster level descendant._antecessor = self # Check to see if the current descendant has any descendants if (len(descendant.descendants) !=0 ): # If there are, add these to the descendant_list descendant_list.extend(descendant.descendants) # Once search for descendants has finished begin a new search based # on the descendant_list new_descendants = descendant_list return self._antecessor
93c682e6821cc5620ddf0a78057e4dba6a2d2fb0
25,897
import wave import struct def readwav(wavfile, duration, start_time=0, channel=0): """Read a segment of a wav file. If stereo then can select channel 0 or channel 1. """ wave_fp = wave.open(wavfile, 'rb') sample_rate = wave_fp.getframerate() nchannels = wave_fp.getnchannels() nsamples = int(duration * sample_rate) wave_fp.setpos(int(start_time * sample_rate)) wavbytes = wave_fp.readframes(nsamples) signal = struct.unpack(f'{nsamples * nchannels}h', wavbytes) if nchannels == 2: signal = signal[channel::2] return signal, sample_rate
65b98941e6d58fa28f503fccfef80c95436bc3e3
25,898
def top_n_countries(world_confirmed, feature='Confirmed', n=10): """Return list of top n countries with highest feature (Confirmed, Deaths, Recovered, Daily ...)""" top_n = (world_confirmed .groupby('Country/Region') .agg('max') .sort_values(feature, ascending=False) .head(n) .index .values) return list(top_n)
dd50882f8378e2405c8d6b2eadbe878d11b43351
25,900
def selectors_escape(s): """ Escapes all selectors used in html, css, js & jquery :param s: string The string to escape :return: string The escaped string """ selectors_reserved = '\!"#$%&\'()*+,./:;<=>?@[]^``{|}~' for char in selectors_reserved: s = s.replace(char, f'\\\\{char}') return s
fd95d9ebd5de2461bcf836f4e0792b17433451ce
25,901
import subprocess import sys def is_latest_version(package_name:str, current_version:str): """Compares the current version with the latest version, and returns if they are different Parameters ---------- package_name : str The name of a pip python package current_version : str The installed version of a package, such as "1.2.3" Returns ------- bool Whether the versions are the same """ latest_version = str(subprocess.run([sys.executable, '-m', 'pip', 'install', '{}==random'.format(package_name)], capture_output=True, text=True)) latest_version = latest_version[(latest_version.find('(from versions:') + 15):] latest_version = latest_version[:latest_version.find(')')] latest_version = latest_version.replace(' ', '').split(',')[(- 1)] if (latest_version == current_version): return True else: return False
45c99a755913ae2215c6aaa67971bd6e6d82ad0e
25,902
def d(series, n=1): """Difference over n periods""" return series-series.shift(n)
b585f13851a9cff9655cad29564b909ecffbd238
25,903
import os import logging def substituteVars(oldList, runSet, sourcefile=None): """ This method replaces special substrings from a list of string and return a new list. """ benchmark = runSet.benchmark # list with tuples (key, value): 'key' is replaced by 'value' keyValueList = [('${benchmark_name}', benchmark.name), ('${benchmark_date}', benchmark.date), ('${benchmark_instance}', benchmark.instance), ('${benchmark_path}', benchmark.baseDir or '.'), ('${benchmark_path_abs}', os.path.abspath(benchmark.baseDir)), ('${benchmark_file}', os.path.basename(benchmark.benchmarkFile)), ('${benchmark_file_abs}', os.path.abspath(os.path.basename(benchmark.benchmarkFile))), ('${logfile_path}', os.path.dirname(runSet.logFolder) or '.'), ('${logfile_path_abs}', os.path.abspath(runSet.logFolder)), ('${rundefinition_name}', runSet.realName if runSet.realName else ''), ('${test_name}', runSet.realName if runSet.realName else '')] if sourcefile: keyValueList.append(('${sourcefile_name}', os.path.basename(sourcefile))) keyValueList.append(('${sourcefile_path}', os.path.dirname(sourcefile) or '.')) keyValueList.append(('${sourcefile_path_abs}', os.path.dirname(os.path.abspath(sourcefile)))) # do not use keys twice assert len(set((key for (key, value) in keyValueList))) == len(keyValueList) newList = [] for oldStr in oldList: newStr = oldStr for (key, value) in keyValueList: newStr = newStr.replace(key, value) if '${' in newStr: logging.warn("a variable was not replaced in '{0}'".format(newStr)) newList.append(newStr) return newList
6413b1e20e397cb98253e04b4f3181884b703b66
25,905
def _parse_line_(line): """ """ try: filename, rest = line.split('(') info, what = rest.split(")") what = what.replace(":", "") return [filename.replace(" ","")]+info.split("/")+[what.replace(" [A]","").strip()] except: return None
3edb3f169af9990e83c6501d30441e8b3590789b
25,908
import requests def rest_api_call(url, payload={}, cookie="", type="GET", content_type="text/plain" ): """ :return: """ headers = { 'Content-Type': content_type, 'Cookie': cookie } response = requests.request(type, url, headers=headers, data = payload, verify=False) # print(response) # print(dir(response)) # print(response.json()) return response
d60208b499ae92d8ad2b43ecf1b8c727eed0e030
25,910
def isnumber(num): """ Checks whether argument is number""" try: float(num) return True except ValueError: return False
4ea3b253ff1ffdd8f5adf532ffa194525c8dfaf0
25,911
def update(day, day_to_implement, new_social_distance, new_stay_at_home): """ updates current policies, panic, quarantine policiy and stay at home order Parameters ---------- day : int current day of the simulation. day_to_implement : int day from which measures are implemented. new_social_distance : int radius of square in which agent will look for other less populated areas. new_stay_at_home : int determines if stay at home order will be activated. Returns ------- updated panic index, quarantine policy and stay at home order """ panic = 0 quarantine_policy = 0 stay_home = 0 if (day >= day_to_implement-1): panic = new_social_distance quarantine_policy = 0 stay_home = new_stay_at_home return panic, quarantine_policy, stay_home
fd0e3232f8ef7b7e7c4ed6fb0716ba851635b2e4
25,912
def get_options_at_path(user_selected_path: str, file_list: list): """ Returns all the alternative items (folders and files) on the provided path """ # Get the number of folders at the current level n_folders = len(user_selected_path.split("/")) # Get all the files at level subfolder_options = [] for filepath in file_list: subfolder = "/".join(filepath.split("/")[:(n_folders+1)]) if filepath.startswith(user_selected_path) and subfolder not in subfolder_options: subfolder_options.append(subfolder) options = [_.split("/")[-1] for _ in subfolder_options] return options
31fc39005fdd3b9151cdcea75738c4ed06b30504
25,914
def format_title(host: str) -> str: """Format the title for config entries.""" return "Controller ({})".format(host)
99ca5c97007a8f0373184d8d9c5c55bb5d7d64b9
25,916
def box_with_datetime(request): """ Like `box`, but specific to datetime64 for also testing DatetimeArray """ return request.param
423e2f78e686fdde1581c6772500546c590494e7
25,918
def parse_bool(data): """Parse a string value to bool""" if data.lower() in ('yes', 'true',): return True elif data.lower() in ('no', 'false',): return False else: err = f'"{data}" could not be interpreted as a boolean' raise TypeError(err)
60f35d69178fa6322f3ff29b64b96deb7bd43f94
25,919
def read_last_seen(FILE_NAME: str) -> int: """ gets the id of last seen tweet Args: FILE_NAME: static file name which stores the last seen id Returns: last_seen_id: id of the tweet """ file_read = open(FILE_NAME, 'r') readed = file_read.read() if readed != "": last_seen_id = int(readed.strip()) file_read.close() return last_seen_id else: return 0 print("Last Seen ID is readed.")
d7d919deabc133d8500ab0a6f6ea786e9a291edd
25,921
import os def _load_test_data(name): """Load test data.""" with open(os.path.join('testdata', name), 'rb') as f: return f.read()
a678e79576df640b15c6bcb0208a2b88e390d023
25,922
def reverse_chain(joints): """ Reverses the hierarchy of the joint chain. :param joints: List of joints in the chain to reverse :return: the same list of joints in reverse order """ # -- Store the base parent so we can reparent the chain # -- back under it base_parent = joints[0].getParent() # -- Start by clearing all the hierarchy of the chain for joint in joints: joint.setParent(None) # -- Now build up the hierarchy in the reverse order for idx in range(len(joints)): try: joints[idx].setParent(joints[idx + 1]) except IndexError: pass # -- Finally we need to set the base parent once # -- again joints[-1].setParent(base_parent) joints.reverse() return joints
8961c0031c7fd230935ffd212eca63f9f2ff6112
25,923
def _node_like(test_dict: dict): """ Evaluates whether a dict can be converted to a node safely. test_dict : dict | Dict to check """ if not isinstance(test_dict, dict): return False keys = list(test_dict.keys()) try: keys.remove("ParameterName") keys.remove("ParameterValue") keys.remove("ParameterInfo") except ValueError: return False if keys: return False if not isinstance(test_dict["ParameterName"], str): return False if not isinstance(test_dict["ParameterInfo"], str): return False return True
c03d2f6d5824068646ef011b8b3d9dd2fca58767
25,924
def modifyNADictCopy(indict, v_new, start, end, ivol, nvol): """ Returns a copy of a dictionary with some modifications. """ newDict = {} for key,value in indict.items(): if key == "X": newlist = indict["X"][start:end] newDict["X"] = newlist elif key == "V": newDict["V"] = v_new elif key == "IVOL": newDict["IVOL"] = ivol elif key == "NVOL": newDict["NVOL"] = nvol else: newDict[key] = value return newDict
5e77e4509edba58c438e88ff2d6278e95bb3a6df
25,926
def getSynonyms(word,word2Synset,synonyms): """ Module to extract synonyms of a word using Hindi wordnet Args: word: the word of which synonyms has to be extracted word2Synset: dictionary of synset of the word synonyms: dictionary of synonyms of the word Returns: synList: list of synonyms of the word """ synList=[] if word2Synset.has_key(word): synsets = word2Synset[word] for pos in synsets.keys(): for synset in synsets[pos]: if synonyms.has_key(synset): synDict = synonyms[synset] synList.append(synDict) return synList
bc3195cd786a043000e5c8e3887986cb0323eda7
25,930
from typing import Set from typing import Tuple def get_line_between_points(x_0: int, y_0: int, x_1: int, y_1: int) -> Set[Tuple[int, int]]: """ Gets a line between two coordinate pairs represented by a list of tuple[x, y]. The line is found using Bresenham's line algorithm. See: https://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm :param x_0: The x-coordinate of the first point. :param y_0: The y-coordinate of the first point. :param x_1: The x-coordinate of the second point. :param y_1: The y-coordinate of the second point. :return: A list of coordinates that make up the line between the two points. """ coordinates = set() dx = x_1 - x_0 dy = y_1 - y_0 x_step = 1 if dx >= 0 else -1 y_step = 1 if dy >= 0 else -1 dx = abs(dx) dy = -abs(dy) x = x_0 y = y_0 error = dx + dy while True: coordinates.add((x, y)) if x == x_1 and y == y_1: break error2 = error * 2 if error2 >= dy: error += dy x += x_step if error2 <= dx: error += dx y += y_step return coordinates
98324b303268a6e8b48aa7e7dba2e1fa85f7b463
25,931
def find_char_groups(s): """ Find character groups """ pos = 0 groups = [] escaped = False found = False first = None for c in s: if c == "\\": escaped = not escaped elif escaped: escaped = False elif c == "[" and not found: found = True first = pos elif c == "]" and found: groups.append((first, pos)) pos += 1 return groups
82d65fd0834470c24a9e884e7a7b3bcf5263b468
25,932
import glob import os def get_files(folder, extension=None, pre_name=None, mid_name=None, post_name=None): """Search folder and returns list of files that match name and/or extension. Equivalent to doing folder/pre_name*mid_name*post_name.extension input: folder -- folder to Search optional -- extension,pre_name,mid_name,post_name """ name = "*" if pre_name: name = pre_name + "*" if mid_name: name += mid_name + "*" if post_name: name += post_name + "." ext = "*" if extension: if extension[0] == ".": extension = extension[1:] ext = extension name += ext vlist = glob.glob(os.path.join(folder, name)) vlist.sort() return vlist
fdd523d69122e48580f561431d7c771a2eb9b4ab
25,934
def currencies_filter(query, abbreviation, currency, favorite=None): """Return true if query satisfy certain criterias""" favorite = favorite or [] if abbreviation in favorite: return False if not query: return True if abbreviation.startswith(query.upper()): return True for key_word in currency.split(): if key_word.lower().startswith(query.lower()): return True return False
153eb9ebf4baf837aa402013149880611258803b
25,935
def open_file(): """ Summary of open_file function: Opens input file that contains a Madlib template for user input. Parameters: none Returns: int: Returns a string that contains a Madlib template which read from input file. """ with open('sample_template.txt', 'r') as sample_file: # count = len(open('sample_template.txt', 'r').readlines()) file_input_string = sample_file.read() return file_input_string
59a0621e70520e71265e7debaf9cf28e25c30db6
25,938
import logging def get_reviewers(events, reviewers=None): """ Return the set of users that have a code review requested or completed. """ reviewers = reviewers or set() for event, body, _timestamp in events: action = body.get('action') if event == 'pull_request': if action == 'review_requested': if 'requested_reviewer' not in body: logging.warning('no reviewer present -- self-review?') continue reviewers.add(body['requested_reviewer']['login']) elif action == 'review_request_removed': reviewers -= {body['requested_reviewer']['login']} elif event == 'pull_request_review': if action == 'submitted': reviewers.add(body['sender']['login']) return reviewers
1b86d6d25d28f8f5e8b6807874ddd1999d654ffa
25,939
import json def parse_json_data(filename) -> dict: """Returns the contents of a JSON file as a dict""" with open(filename, "r") as f: return json.loads("".join(f.readlines()))
d061043cf29d1ed8d790a51d135c251e2226de77
25,940
import re def kanji2int(kanjis: str, error="raise", style="auto") -> int: """ :param kanjis - str: Kanji str to convert into Integer :param error - str: How to handle Error. "raise": raise error. "ignore": ignore error , "warn": warn but don't raise :param style - str: Which style of format will be used. "mixed": Arabic and Kanji Mixed like "4億5230万3千", "all": All letter must be Kanji, "auto": detect automatically by checking any arabic character is in kanjis. :return: int """ if error not in ("raise", "warn", "ignore"): raise ValueError("unexpected value {} for argument error".format(error)) number = {"一": 1, "二": 2, "三": 3, "四": 4, "五": 5, "六": 6, "七": 7, "八": 8, "九": 9} little_digit = {"十": 1, "百": 2, "千": 3} digit = {"万": 4, "億": 8, "兆": 12, "京": 16, "垓": 20, "𥝱": 24, "穣": 28, "溝": 32, "澗": 36, "正": 40, "載": 44, "極": 48, "恒河沙": 52, "阿僧祇": 56, "那由多": 60, "不可思議": 64, "無量大数": 68} if style not in ("all", "mixed", "auto"): raise ValueError("unexpected value {} for argument style".format(style)) # check arguments num = 0 if style == "mixed" or (style == "auto" and any(str(num) in kanjis for num in range(10))): for group in re.compile("([0-9]*?千)?([0-9]*?百)?([0-9]*?十)?([0-9]*)({})?".format('|'.join(digit.keys()))) \ .findall(kanjis)[:-1]: c_num = 0 for index, dig in enumerate(group[:4]): if dig: c_num += (1000, 100, 10, 1)[index] * int(dig.rstrip('千百十') or 1) num += c_num * 10 ** digit.get(group[-1], 0) return num else: current_mini_num = 0 current_num = 0 for word in re.compile('|'.join(list(number.keys()) + list(little_digit.keys()) + list(digit.keys()))) \ .findall(kanjis): if word in number: current_mini_num = number[word] elif word in little_digit: current_num += (current_mini_num if current_mini_num else 1) * 10 ** little_digit[word] current_mini_num = 0 elif word in digit: num += (current_num + current_mini_num) * 10 ** digit[word] current_num = current_mini_num = 0 else: raise ValueError("unexpected letter: {}".format(word)) return num + current_num + current_mini_num
5ec03fd38f06de2a74f22086f5702e857d210bcf
25,941
def get_marble(value=0, prev=None, next=None): """Get new marble, with value, prev and next.""" return {'value': value, 'prev': prev, 'next': next}
2cf13f5a46111b56c154a1bba0eec33bfb3ad8bc
25,944
from unittest.mock import patch def pretend_this_data_set_exists(data_set_config): """Patches methods on the performanceplatform-client that respond to queries for data sets to respond with valid data set configuration""" try: namespace = 'performanceplatform.client.AdminAPI' with patch(namespace + '.get_data_set') as get_data_set: with patch(namespace + '.get_data_set_by_name') as get_data_set_by_name: with patch(namespace + '.list_data_sets') as list_data_sets: def get_data_set_side_effect(data_group, data_type): if (data_group == data_set_config['data_group'] and data_type == data_set_config['data_type']): return data_set_config def get_data_set_by_name_side_effect(name): if name == data_set_config['name']: return data_set_config get_data_set.side_effect = get_data_set_side_effect get_data_set_by_name.side_effect = get_data_set_by_name_side_effect list_data_sets.side_effect = NotImplementedError( "The method to list data sets is not patched yet") yield finally: pass
2d81ed6f6c65b3ca31e5404a62eafa0736dd6464
25,945
from datetime import datetime def api_sessions_cleanup(config): """Create scheduler task for cleanup api sessions.""" async def _api_sessions_cleanup(): """Cleanup old api sessions.""" now = datetime.now() for session, until_valid in config.security_sessions.items(): if now >= until_valid: config.security_sessions = (session, None) return _api_sessions_cleanup
87ee4a1240bc3fcef9a098d069704a9a932fb895
25,947
import glob import os import json def read_ground_truth_files(truth_folder): """ reads ground truth files into dict :param truth_folder: path to folder holding ground truth files :return: dict of ground truth files with problem-id as key and file content as value """ truth = {} for truth_file in glob.glob(os.path.join(truth_folder, 'truth-problem*.json')): with open(truth_file, 'r') as fh: curr_truth = json.load(fh) truth[os.path.basename(truth_file)[6:-5]] = curr_truth return truth
791902f54e68f69c3851cb133abe2b46e3b8386a
25,949
import os def get_partial_name(filename): """Get partial template id from filename.""" return os.path.splitext(os.path.basename(filename))[0]
45f965bd0f540412baefab695f5cb2ccce2bd3ba
25,951
def position(x): """ Function to calculate position bias """ position=list() num_seqs = len(x) num_bases = len(x[1]) for j in range(0,num_bases): #each base count_A=0 ; count_T=0 ; count_C=0 ; count_G=0 ; count_other=0 ; total=0 for i in range(0,num_seqs): #each sequence if x[i][j]=='A': count_A=count_A+1 elif x[i][j]=='T': count_T=count_T+1 elif x[i][j]=='C': count_C=count_C+1 elif x[i][j]=='G': count_G=count_G+1 else: count_other=count_other+1 pos = j total=count_A+count_T+count_C+count_G+count_other freq_a=float(count_A)/float(total) freq_t=float(count_T)/float(total) freq_c=float(count_C)/float(total) freq_g=float(count_G)/float(total) freq_other=float(count_other)/float(total) result_a = (pos, 'a', freq_a) result_c = (pos, 'c', freq_c) result_g = (pos, 'g', freq_g) result_t = (pos, 't', freq_t) results=(result_a, result_c, result_g, result_t) [position.append(result) for result in results] return(position)
e4e26beaf5c7145ef5d50c2ef7eb187f82f0ba65
25,953
def isNaN(x): """ Test if supplied float is an IEEE not-a-number (NaN). For some reason Python does not hav a function to do this, and nor does Numeric (although numpy and scipy have support for it). Parameters: x - float to test for NaN Return value: True if x is NaN, else False. """ # NaN is the only float value that is not equal to itself (IEEE # standard) if x != x: return True else: return False
de8e025967189e1285e4fb4308d55d8e1a5686c6
25,955
def CEN_misclassification_calc( table, TOP, P, i, j, subject_class, modified=False): """ Calculate Misclassification probability. :param table: input confusion matrix :type table: dict :param TOP: number of positives in predict vector :type TOP: int :param P: number of actual positives :type P: int :param i: table row index (class name) :type i: any valid type :param j: table col index (class name) :type j: any valid type :param subject_class: subject to class (class name) :type subject_class: any valid type :param modified: modified mode flag :type modified: bool :return: misclassification probability as float """ try: result = TOP + P if modified: result -= table[subject_class][subject_class] result = table[i][j] / result return result except (ZeroDivisionError, TypeError): return "None"
0c9b74b9b2dc9bee060d2b74d66072ce0c0c27aa
25,956
def sort_alternatives(scores, alternatives, reverse): """ sorts alternatives by score """ sorted_scores, sorted_alternatives = (list(t) for t in zip(*sorted(zip(scores, alternatives), reverse=reverse))) return sorted_scores, sorted_alternatives
6d53ac1e7b4728c004f1014a5ab839dc6aaef19f
25,958
def return_sentence_annotators(sentence, short_annotators): """Return a list of this sentence's annotators""" sentence_annotators = [] xml_sen_ann = sentence.find_all(['annotator', 'primary', 'secondary']) for annotator in xml_sen_ann: try: sentence_annotators.append(short_annotators[annotator.text]) except KeyError: sentence_annotators.append(annotator.text) return sentence_annotators
63c57b1745a38f58ffa36c775a3e6bb184dc384b
25,959