content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def get_script(title): """Retrieve script text from movie titles""" with open("Scripts/"+title.strip()+".txt", "rb") as f: contents = f.read() # import ipdb;ipdb.set_trace() contents = str(contents).replace(",", "") return contents
0d9d88f3f513981621a1c0f809fa4fe8d383e808
696,637
def zeros_mat(r, c): """ r by c matrix full of zeroes :param r: rows of matrix :param c: columns of matrix :return: 2D array of the matrix (list containing elements of each row) """ A = [] while len(A) < r: A.append([]) # Generate Row while len(A[-1]) < c: # traverse the columns until the last row is reached A[-1].append(0) # initialize all column values to 0 until the last row is reached return A
4ec8e1a3457db91d0ef69b9ad757378ab477fa30
696,638
import hashlib def kdf(secret, phrase): """ Default key derivation function. """ return hashlib.pbkdf2_hmac('sha256', secret, phrase, 1000)[:16]
91e817101a3ed85aadf4e652f676d486d973f3ef
696,639
def calculate_yearly_total(sales: list[list[str]]) -> int: """Calculate the total yearly sales.""" total: int = 0 for row in sales: total += int(row[-1]) return total
dbf5fbd038d0e7b2fad7e6794803b919e5902518
696,640
def f2(c=100, **kws): """ Doc for f2: """ return c/2.0
eebd854f3b63fc615d580750676f6c601e65eb72
696,641
def same_as_previous(context): """Check whether hash has changed compared to the results in config.previous_results.""" previous_results = context.config.previous_results testname = context.test.getFullName() executable_hash = context.executable_hash if previous_results and "tests" in previous_results: for test in previous_results["tests"]: if "name" not in test or test["name"] != testname: continue if "metrics" not in test: continue metrics = test["metrics"] return "hash" in metrics and metrics["hash"] == executable_hash return False
602bdf5cf3b66c83d0f6c666481fc3ed20d34c30
696,642
import tokenize def bracket_delta(token): """Returns +/-1 if the current token increases/decreases bracket nesting depth, 0 otherwise.""" if token[0] == tokenize.OP and token[1] in ['{', '(', '[']: return 1 elif token[0] == tokenize.OP and token[1] in ['}', ')', ']']: return -1 else: return 0
0c225259482bad2cd5470b69f2c232bff1d421e2
696,643
def get_entity(obj): """ Returns the entity for an expression """ if obj.mul_expression is not None: obj = obj.mul_expression return obj.unary_expression.pow_expression.primary_expression.entity
00185db09f73d4970eec4f7395a9d84a872781c3
696,644
import sqlite3 def connect_db(db_file, parameter): """ Execute SQL query to obtain molecules and parameters Args: db_file: database for examination parameter: molecule parameter for query Returns: query result containing SMILES for molecules and their properties """ conn = sqlite3.connect(db_file) c = conn.cursor() t = (parameter,) c.execute("""SELECT text_key_values.key,text_key_values.value,text_key_values.id, number_key_values.key, number_key_values.value FROM text_key_values INNER JOIN number_key_values ON text_key_values.id=number_key_values.id WHERE text_key_values.key='SMILES' and number_key_values.key=?""", t) result = c.fetchall() return result
e9f1848155a228fe64179cbd1cae873882fb031a
696,645
def resolver(*classes, **options): """Metaclass resolver Example: SomeClass(resolver(SomeMixin, SomeBaseClass, ..., some_generic_field=..., ...)): ... :param classes: :param options: :return: generic metaclass """ is_not_type = lambda cls: cls is not type generic_name = lambda gc: '_'.join(c.__name__ for c in gc) metaclass = tuple(set(filter(is_not_type, map(type, classes)))) metaclass = ( metaclass[0] if len(metaclass) == 1 else type(generic_name(metaclass), metaclass, options) ) return metaclass(generic_name(classes), classes, options)
b25fc164967c977deac1da25b2892af8befe3957
696,647
import torch def mapping_shtools_to_compact(lmax): """ pyshtools uses an output format to represent spherical harmonic coefficients that is not memory efficient. This function creates a mapping to represent the coefficients differently. Our representation Y(l, m), shape(lmax+1, lmax+1) : Y(0, 0) Y(1, 1) Y(2, 2) ... Y(1,-1) Y(1, 0) Y(2, 1) ... Y(2,-2) Y(2,-1) Y(2, 0) ... ... ... ... Example : mapping = mapping_shtools_to_compact(lmax) x, _ = SHExpandLSQ(d, phi, theta, lmax, [1, -1]) # pyshtools y = torch.tensor(x)[mapping[..., 0], mapping[..., 1], mapping[..., 2]] # Compact z = torch.zeros([2, lmax+1, lmax+1]) z[mapping[..., 0], mapping[..., 1], mapping[..., 2]] = y # Back to pyshtools """ mapping = torch.zeros([lmax + 1, lmax + 1, 3], dtype=torch.long) mapping[..., 0] = torch.tril(torch.ones([lmax + 1, lmax + 1], dtype=torch.long)) - torch.eye(lmax + 1, dtype=torch.long) linspace = torch.linspace(0, lmax, lmax + 1, dtype=torch.long) mapping[..., 1] = torch.triu(linspace.view(1, -1).expand(lmax + 1, lmax + 1)) \ + torch.tril(linspace.view(-1, 1).expand(lmax + 1, lmax + 1) - torch.diag(linspace)) mapping[..., 2] = torch.abs(linspace.view(1, -1).expand(lmax + 1, lmax + 1) - linspace.view(-1, 1).expand(lmax + 1, lmax + 1)) return mapping
48c821c74f24f66af367f40204301badde71b379
696,648
from bs4 import BeautifulSoup def get_corpus_file_soup(corpus_filename): """ For a given corpus xml filename, return its BeautifulSoup soup. """ return BeautifulSoup(open(corpus_filename), 'xml')
6327cfc185e1ac1c7372b4879dcbcf62c0689a89
696,649
def items(dct_or_lst): """Returns list items, or dictionary items""" if isinstance(dct_or_lst, dict): return list(dct_or_lst.items()) return list(enumerate(dct_or_lst))
7526f98214de1b76cc622d98abf4e0d5d3e003eb
696,650
import requests def get_currencies(api_key): """ List of all currencies available on this API. """ url = 'https://free.currconv.com/api/v7/currencies?apiKey={}'.format(api_key) data = requests.get(url) return list(data.json()['results'].values())
d57b1803f6b2cc7ba6e8a44364619b24dbac9593
696,651
import string import binascii import base64 def decode_base64(encoded_data: str) -> bytes: """ Decode base64 somewhat like Vuforia does. Raises: binascii.Error: Vuforia would consider this encoded data as an "UNPROCESSABLE_ENTITY". Returns: The given data, decoded as base64. """ acceptable_characters = string.ascii_letters + string.digits + '+/=' for character in encoded_data: if character not in acceptable_characters: raise binascii.Error() if len(encoded_data) % 4 == 0: decoded = base64.b64decode(encoded_data) elif len(encoded_data) % 4 == 1: decoded = base64.b64decode(encoded_data[:-1]) elif len(encoded_data) % 4 == 2: decoded = base64.b64decode(encoded_data + '==') else: assert len(encoded_data) % 4 == 3 decoded = base64.b64decode(encoded_data + '=') return decoded
64f64a83a99b4732e2d660d688244ff93365abbd
696,653
def vec(A): """Return the vectorized matrix A by stacking its columns.""" return A.reshape(-1, order="F")
668bb854406b7a5d7442b66a7de4c3222e25386c
696,655
def stiff_b(v1v1, v0v1, v0v0, rold): """called from stiff_a(). Decide if the iteration has degenerated because of a strongly dominant real eigenvalue. Have just computed the latest iterate. v1v1 is its dot product with itself, v0v1 is the dot product of the previous iterate with the current one, and v0v0 is the dot product of the previous iterate with itself. rold is a previous Rayleigh quotient approximating a dominant real eigenvalue. It must be computed directly the first time the subroutine is called. It is updated each call to stiff_b, hence is available for subsequent calls. If there is a strongly dominant real eigenvalue, rootre is set True, root1[:] returns the eigenvalue, rho returns the magnitude of the eigenvalue, and root2[:] is set to zero. Original source: RKSuite.f, https://www.netlib.org/ode/rksuite/ """ # real and imag parts of roots are returned in a list root1 = [0.0, 0.0] root2 = [0.0, 0.0] r = v0v1 / v0v0 rho = abs(r) det = v0v0 * v1v1 - v0v1**2 res = abs(det / v0v0) rootre = det == 0.0 or (res <= 1e-6 * v1v1 and abs(r - rold) <= 0.001 * rho) if rootre: root1[0] = r rold = r return rold, rho, root1, root2, rootre
71a8a4a40bd98c5ad42e1333548f0f93fecfa0da
696,656
def method_withBadName_with_parameters_on_multiple_lines(x, y): """Provide parameters on multiple lines test case.""" return x + y
a7fee09cd434d646d9eaa70c5cb5d7fb37f68b4e
696,657
def invalid_fg_vsby(s, v): """Checks if visibility is inconsistent with FG""" # NWSI 10-813, 1.2.6 if len(s) == 2 or s.startswith('FZ'): if v > 0.6: return True elif s.startswith('MI'): if v < 0.6: return True return False
2b4fb86c19deef2893b8dfa0b55cd13ea2749e6f
696,659
import uuid def generate_uuid(): """ :returns: UUID1 universal identifier :rtype: str """ return str(uuid.uuid1())
bacae0c1373d50cb7f42a5effb05793894064aa5
696,660
def radec_limits(region): """region containing bricks that ran with obiwan""" if region == 'eboss_ngc': return dict(ra=(120,170), dec=(12,32)) elif region == 'eboss_sgc': return dict(ra=(-50,50), dec=(-6,6)) elif region == 'cosmos': return dict(ra=(148.5,151.6), dec=(0.4,3.4))
33205f91ee2ab8012b6bb3c02467372ce3a62002
696,661
import re def mangle_command(command, name_max=255, has_variables=False): """ Mangle a command line string into something suitable for use as the basename of a filename. At minimum this function must remove slashes, but it also does other things to clean up the basename: removing directory names from the command name, replacing many non-typical characters with undersores, in addition to replacing slashes with dots. By default, curly braces, '{' and '}', are replaced with underscore, set 'has_variables' to leave curly braces alone. This function was copied from the function that insights-client uses to create the name it uses to capture the output of the command. Here, server side, it is used to figure out what file in the archive contains the output of a command. Server side, the command may contain references to variables (names within matching curly braces) that will be expanded before the name is actually used as a file name. To completly mimic the insights-client behavior, curly braces need to be replaced with underscores. If the command has variable references, the curly braces must be left alone. Set has_variables, to leave curly braces alone. This implementation of 'has_variables' assumes that variable names only contain characters that are not replaced by mangle_command. """ if has_variables: pattern = r"[^\w\-\.\/{}]+" else: pattern = r"[^\w\-\.\/]+" mangledname = re.sub(r"^/(usr/|)(bin|sbin)/", "", command) mangledname = re.sub(pattern, "_", mangledname) mangledname = re.sub(r"/", ".", mangledname).strip(" ._-") mangledname = mangledname[:name_max] return mangledname
e111c3a959e454462e39448385a1f555052919d4
696,662
import os def filter_bamfiles(bams, sample): """ :param bams: :param sample: :return: """ red = filter(lambda x: os.path.basename(x).startswith(sample), bams) histones = [] control = [] for fp in red: if '_Input' in fp: control.append(fp) elif '_H3K' in fp: histones.append(fp) else: continue assert len(histones) >= 6, 'Missing histone mark for sample {}: {}'.format(sample, histones) if len(control) > 1: control = [x for x in control if 'N.mrg.' in x] assert len(control) == 1, 'Could not identify control BAM: {}'.format(control) return histones, control
a859bf989b1db1e350a10bfaaf113349c68b09ca
696,665
from datetime import datetime def this_year_naive(): """ Returns this year, number. No TZ aware """ return int(datetime.now().year)
f281f4c7cd6fcb81bab4ba022825bf7e58a57229
696,666
def short_arr(): """returns list for testing.""" return [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]
253860e2ccf673bbb7354e4f09008f6785210e64
696,667
def getExcludes(backendName): """ getExcludes(backendName) Get a list of excludes. If using the 'wx' backend, you don't want all the qt4 libaries. backendName is the name of the backend which you do want to use. """ # init excludes = [] # Neglect qt4 if 'qt4' != backendName: excludes.extend(["sip", "PyQt4", "PyQt4.QtCore", "PyQt4.QtGui"]) # Neglect wx if 'wx' != backendName: excludes.extend(["wx"]) # done return excludes
d5c53034bdbf12af5292bc2f77b74eb2ba2e2ef1
696,668
import os def file_exists(filepath): """Tests for existence of a file on the string filepath""" if os.path.exists(filepath) and os.path.isfile(filepath): return True else: return False
3f3be980205543a9d48c91320dbf8af743da0ae6
696,669
import os def enter_wd(create=False): """ Enter working directory Parameters ---------- create : bool, optional Create directory tree Returns ------- cwd : string Current working directory """ # Get current working directory cwd = os.getcwd() # Create directory tree if create: try: os.makedirs('gps') except: pass try: os.makedirs('gps/exposure') except: pass # Step into working directory os.chdir('gps/exposure') # Return return cwd
3e18807466bd6068a35add79cdf127ed2986a0d4
696,670
import yaml def read_config_file(config_file): """ Read the yaml config file specified. :param config_file: path to config file. :return: Object representing contents of config file. """ config_file_data = {} if config_file: with open(config_file) as file_handle: config_file_data = yaml.safe_load(file_handle) return config_file_data
9f21bfac3e8ac772782aafbc711b8cb04e2580bc
696,671
def username_to_file(username): """ Return the network file name according to the username """ return '{0}.gml'.format(username)
4cca884c92f07f18427a68bc28fe9d25f9ec94f7
696,672
import colorsys def hsv_to_rgb(h, s, v): """ Convert an HSV tuple to an RGB hex string. h -- hue (0-360) s -- saturation (0-100) v -- value (0-255) Returns a hex RGB string, i.e. #123456. """ r, g, b = tuple(int(i * 255) for i in colorsys.hsv_to_rgb(h / 360, s / 100, v / 255)) return '#{:02X}{:02X}{:02X}'.format(r, g, b)
48bb7e31f16b6c435094aa990bb4d02f06cc37f7
696,673
import pathlib import subprocess def run_command(tool: str, options: str, story_file: str) -> list: """ Run a tool command. This function runs a specified tool with any associated options. The output of the tool will be captured and returned. """ pathlib.Path("resources/zdata").mkdir(parents=True, exist_ok=True) result = subprocess.run( args=[tool, f"-{options}", story_file], universal_newlines=True, stdout=subprocess.PIPE, check=True, ) return result.stdout.splitlines(True)
ae5aefd14f2156a559cb7ac77edb6a4f4adbed3e
696,674
def find_continuous_sequence(n: int) -> list: """ Parameters ----------- n: the sum of continuous sequence Returns --------- out: the sum of the continuous of nums is equal to the given n. Notes ------ """ if n < 1: return [] res = [] left, right = 1, 2 mid = (1 + n) // 2 while left < mid: lst = list(range(left, right+1)) sums = sum(lst) if sums == n: res.append(lst) left += 1 elif sums < n: right += 1 else: left += 1 return res
221868b46aeee509ed60a80a6ab028bec3a53250
696,675
import fcntl def lockFile(lockfile): """Attempt to create lock file or fail if already locked""" fp = open(lockfile, 'w') try: fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: return False return True
76fb74f2b5218834b66f017857bf7d0d087533b3
696,676
def depends(*dependencies): """Decorator function for marking fixture dependencies of a function. Example: .. code:: python from rok.fixtures import fixture, depends @depends("engine") def fetch_records(engine): # Do something with the engine ... # Fixtures themselves can also depend on other fixtures @fixture @depends("config") def engine(config): return create_engine(config=config) @fixture def config: return load_config() Args: *dependencies: Fixtures the decorated function depends on Returns: callable: Decorator for explicitly marking function dependencies. """ def decorator(func): if not hasattr(func, "depends"): func.depends = [] func.depends.extend(dependencies) return func return decorator
6b8a7b8f24705b9f545933ffc580788062752fb3
696,677
import math def brackets_depth_distance(clashes, idx_a, idx_b): """Given two slot indices (each clash has two slots) from the first round, returns the number of rounds each one must pass to face each other. """ n_clashes = len(clashes) if idx_a < 0 or idx_a >= n_clashes: raise IndexError('idx_a index out of range') if idx_b < 0 or idx_b >= n_clashes: raise IndexError('idx_b index out of range') if idx_a == idx_b: return 1 max_distance = int(math.log(n_clashes, 2)) + 1 s = n_clashes // 2 while s > 0 and idx_a // s == idx_b // s: max_distance = max_distance - 1 s = s // 2 return max_distance
13ca2531002b067bd7e02506f66899816043a51e
696,678
import torch def so3_rotation_angle(R, eps: float = 1e-4, cos_angle: bool = False): """ Calculates angles (in radians) of a batch of rotation matrices `R` with `angle = acos(0.5 * (Trace(R)-1))`. The trace of the input matrices is checked to be in the valid range `[-1-eps,3+eps]`. The `eps` argument is a small constant that allows for small errors caused by limited machine precision. Args: R: Batch of rotation matrices of shape `(minibatch, 3, 3)`. eps: Tolerance for the valid trace check. cos_angle: If==True return cosine of the rotation angles rather than the angle itself. This can avoid the unstable calculation of `acos`. Returns: Corresponding rotation angles of shape `(minibatch,)`. If `cos_angle==True`, returns the cosine of the angles. Raises: ValueError if `R` is of incorrect shape. ValueError if `R` has an unexpected trace. """ N, dim1, dim2 = R.shape if dim1 != 3 or dim2 != 3: raise ValueError("Input has to be a batch of 3x3 Tensors.") rot_trace = R[:, 0, 0] + R[:, 1, 1] + R[:, 2, 2] if ((rot_trace < -1.0 - eps) + (rot_trace > 3.0 + eps)).any(): raise ValueError( "A matrix has trace outside valid range [-1-eps,3+eps]." ) # clamp to valid range rot_trace = torch.clamp(rot_trace, -1.0, 3.0) # phi ... rotation angle phi = 0.5 * (rot_trace - 1.0) if cos_angle: return phi else: return phi.acos()
8b491db2fb5dfe8ffbeada503316d425e0df580c
696,679
def has_options(cli): """ Checks if the cli command contains any options (e.g. --regex $REGEX). """ for item in cli: if '--' in item: return True return False
bc7b7400fc810d7195f03ba2805112645540ee63
696,681
def _get_path_by_name(part, paths): """ Given a command part, find the path it represents. :throws ValueError: if no valid file is found. """ for path in paths: if path.alias == part: return path raise ValueError
98ba110ff068ccab9195c6a1511f969bc3600756
696,682
def end_file(current_output): """End an output file. This is smart enough to do nothing if current_output is None. @param current_output: the current file output (or None). @returns: None, to represent the closed stream. """ if current_output: # write the iati-activities end tag current_output.write("</iati-activities>\n") # close the output current_output.close() return None
5b974790ad78038654080f8b0b11a183e4b1513b
696,683
def getChanceAgreement(l1, l2): """ Returns p_e, the probability of chance agreement: (1/N^2) * sum(n_k1 * n_k2) for rater1, rater2, k categories (i.e. two in this case, 0 or 1), for two binary lists L1 and L2 """ assert(len(l1) == len(l2)) summation = 0 for label in [0, 1]: summation += l1.count(label) * l2.count(label) return (1 / float(len(l1)**2)) * summation
a4d11255ab8607c62a140b4bbd145b73b1e18524
696,684
import argparse def parse_args(): """Parse arguments for training script""" parser = argparse.ArgumentParser(description='PackNet-SfM training script') parser.add_argument('file', type=str, help='Input file (.ckpt or .yaml)') args = parser.parse_args() assert args.file.endswith(('.ckpt', '.yaml')), \ 'You need to provide a .ckpt of .yaml file' return args
ac548525a78dbb3f4237da9df3f6daf96f8a6dd7
696,685
def minimum_absolute_difference(arr): """Hackerrank Problem: https://www.hackerrank.com/challenges/minimum-absolute-difference-in-an-array/problem Given an array of integers, find and print the minimum absolute difference between any two elements in the array. Solve: Sort the array, and then compare the different between each two adjacent values. After sorting, we know that the minimum absolute difference has to be between two values that are stored sequentially in the list so we simply find the smallest difference and return that Args: arr: Array of integers to check Returns: int: The minimum absolute difference between two elements of the array """ arr.sort() min_diff = arr[-1] - arr[0] for i in range(len(arr)-1): if arr[i+1] - arr[i] < min_diff: min_diff = arr[i+1] - arr[i] return min_diff
00dc3ce179282b669407ea3a94fea07538a404d9
696,686
def find_dead_end_reactions(model): """ Identify reactions that are structurally prevented from carrying flux (dead ends). """ stoichiometries = {} for reaction in model.reactions: for met, coef in reaction.metabolites.items(): stoichiometries.setdefault(met.id, {})[reaction] = coef blocked_reactions = set() while True: new_blocked = set() for met_id, stoichiometry in stoichiometries.items(): if len(stoichiometry) == 1: # Metabolite is only associated with 1 reaction, which can thus not be active new_blocked.add(list(stoichiometry)[0]) if len(new_blocked) == 0: break # No more blocked reactions # Remove blocked reactions from stoichiometries stoichiometries = { met_id: {reac: coef for reac, coef in stoichiometry.items() if reac not in new_blocked} for met_id, stoichiometry in stoichiometries.items()} blocked_reactions.update(new_blocked) return frozenset(blocked_reactions)
c60af7b0a1c2813e57c599ddb5516ec1fe8d2aa2
696,687
def de_punc(s, punc=None, no_spaces=True, char='_'): """Remove punctuation and/or spaces in strings and replace with underscores or nothing Parameters ---------- s : string input string to parse punc : string A string of characters to replace ie. '@ "!\'\\[]' no_spaces : boolean True, replaces spaces with underscore. False, leaves spaces char : string Replacement character """ if (punc is None) or not isinstance(punc, str): punc = '!"#$%&\'()*+,-./:;<=>?@[\\]^`{|}~' # _ removed if no_spaces: punc = " " + punc s = "".join([[i, char][i in punc] for i in s]) return s
670a1bdc19fb65684e0f334542e5734cf06a636e
696,688
def GetTypeFromSoappyService(type_name, ns, soappy_service): """Digs in a SOAPpy service proxy and returns the object representing a type. Args: type_name: string The name of the WSDL-defined type to search for. ns: string The namespace the given WSDL-defined type belongs to. soappy_service: SOAPpy.WSDL.Proxy The SOAPpy service object encapsulating the information stored in the WSDL. Returns: mixed The object created by SOAPpy representing the given type. May be either a SOAPpy.wstools.XMLSchema.SimpleType or SOAPpy.wstools.XMLSchema.ComplexType object. """ return soappy_service.wsdl.types[ns].types[type_name]
6aa77925fe4d24020bb5860b370192a3fd0675e8
696,689
def equalizer(n: int, m: int, total: int): """ Receives total, m and n [0..total] Returns a tuple (a, b) so that their sum -> total, and a / b -> 1 """ oddity = total % 2 smallest = min(n, m, total // 2 + oddity) if smallest == n: return (n, min(m, total-n)) elif smallest == m: return (min(n, total-m), m) else: return (total // 2, total // 2 + oddity)
5f1a6177c9d728a00735f3330be3a4f5772bd283
696,690
def k_neighbors(kdtree, k): """ Get indices of K neartest neighbors for each point Parameters ---------- kdtree: pyntcloud.structrues.KDTree The KDTree built on top of the points in point cloud k: int Number of neighbors to find Returns ------- k_neighbors: (N, k) array Where N = kdtree.data.shape[0] """ # [1] to select indices and ignore distances # [:,1:] to discard self-neighbor return kdtree.query(kdtree.data, k=k + 1, n_jobs=-1)[1][:, 1:]
1e377c8a1444858b9fc5e293a8300fd20e0dea8a
696,691
import argparse def parse_args(): """Defines and parses command-line arguments.""" parser = argparse.ArgumentParser(description=""" Compute metrics for trackers using MOTChallenge ground-truth data. Files ----- All file content, ground truth and test files, have to comply with the format described in Milan, Anton, et al. "Mot16: A benchmark for multi-object tracking." arXiv preprint arXiv:1603.00831 (2016). https://motchallenge.net/ Structure --------- Layout for ground truth data <GT_ROOT>/<SEQUENCE_1>/gt/gt.txt <GT_ROOT>/<SEQUENCE_2>/gt/gt.txt ... Layout for test data <TEST_ROOT>/<SEQUENCE_1>.txt <TEST_ROOT>/<SEQUENCE_2>.txt ... Sequences of ground truth and test will be matched according to the `<SEQUENCE_X>` string.""", formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('--gt_txt', type=str, help=' ground truth files.', default='/workspace/workspace/tracking/py-motmetrics/motmetrics/data/TUD-Campus/gt.txt') parser.add_argument('--test_txt', type=str, help=' tracker result files', default='/workspace/workspace/tracking/py-motmetrics/motmetrics/data/TUD-Campus/test.txt') parser.add_argument('--solver', type=str, help='LAP solver to use for matching between frames.') parser.add_argument('--id_solver', type=str, help='LAP solver to use for ID metrics. Defaults to --solver.') parser.add_argument('--exclude_id', dest='exclude_id', default=False, action='store_true', help='Disable ID metrics') parser.add_argument('--loglevel', type=str, help='Log level', default='info') parser.add_argument('--fmt', type=str, help='Data format', default='mot15-2D') return parser.parse_args()
9412df8605693ac7d0bc0605f19505b36be553db
696,692
def flow_balance(m, v, co, t): """Calculate commodity flow balance in a vertex from/to arcs. """ balance = 0 for w in m.neighbours[v]: balance += m.Pot[w,v,co,t] balance -= m.Pin[v,w,co,t] return balance
e832ecb64915ac5c6bed22eaaeba1baadd3c8fbc
696,693
import os def resolve_full_gcp_key_path(key: str) -> str: """ Returns path full path to provided GCP key. :param key: Name of the GCP key, for example ``my_service.json`` :type key: str :returns: Full path to the key """ path = os.environ.get("CREDENTIALS_DIR", "/files/airflow-breeze-config/keys") key = os.path.join(path, key) return key
07140f74f4bb1d0b13942e46aa24bb360973a96a
696,694
import inspect def geometry_package(top: float, bottom: float, left: float, right: float) -> str: """ Generate latex code to add geometry package :param top: top margin :param bottom: bottom margin :param left: left margin :param right: right margin """ return inspect.cleandoc(rf""" \usepackage[left = {left}cm, right = {right}cm, top = {top}cm, bottom = {bottom}cm]{{geometry}} """)
676f5490ec599fcd10075365f0f90db84bd2896b
696,695
import re import importlib def create_component_obj(model, sim_params): """Create components from model. :param model: smooth model :type model: dictionary :param sim_params: simulation parameters :type sim_params: :class:`~smooth.framework.simulation_parameters.SimulationParameters` :return: list of components in model :rtype: list of :class:`~smooth.components.component.Component` """ # CREATE COMPONENT OBJECTS components = [] for name, this_comp in model['components'].items(): # Add simulation parameters to the components so they can be used this_comp['sim_params'] = sim_params # assign unique name this_comp['name'] = name # load the component class. this_comp_type = this_comp['component'] # Component type should consist of lower case letters, numbers and underscores if re.fullmatch(r'[a-z0-9_]+', this_comp_type) is None: raise ValueError('Invalid component type name "{}". ' 'Only lower case letters, numbers and underscores are allowed.' .format(this_comp_type)) # Import the module of the component. this_comp_module = importlib.import_module('smooth.components.component_' + this_comp_type) # Convert component type from snake_case to CamelCase to get class name class_name = ''.join(x.capitalize() for x in this_comp_type.split('_')) # Load the class (which by convention has a name with a capital first # letter and camel case). this_comp_class = getattr(this_comp_module, class_name) # Initialize the component. this_comp_obj = this_comp_class(this_comp) # Check if this component is valid. this_comp_obj.check_validity() # Add this component to the list containing all components. components.append(this_comp_obj) return components
c266424406d417c9e5553064ecef37c76ead5aeb
696,696
import os def get_test_data_file_path(test_data_file_name=''): """Try to locate the test case that comes with capriqorn. Returns the full path to the test data directory including a trailing slash, if 'test_data_file_name' is given, the full path to the file is returned, or None in case the directory and/or file does not exist. """ # get path to the current Python file file_path = os.path.dirname(os.path.abspath(__file__)) # construct relative test data file path testcase_path = os.path.abspath(file_path + "/tests/data/" + test_data_file_name) if (os.path.isdir(testcase_path)): return testcase_path + "/" if (os.path.isfile(testcase_path)): return testcase_path else: return None
581bdf8405c862bc674eeafbfa94eb0c6ce6b02b
696,697
def sum_lists_product(list1, list2): """ Return the sum of multiplying corresponding list elements :param list1: A list :param list2: A list :return: A number representing the sum """ lst_sum = sum([x * y for x, y in zip(list1, list2)]) return lst_sum
8c27c510458c7591b2a8ca3ccdd7e3f54041f171
696,698
def process_manual_category(tag, manual_tag): """ 处理category信息 """ replace_tag = str(manual_tag) if str(manual_tag) in ["1", "1.0"]: replace_tag = tag if "[" and "]" not in replace_tag: tag_list = replace_tag.split(",") else: tag_list = eval(replace_tag) return ",".join([t.split("|")[0] for t in tag_list])
4e9a2423921df6e8d9d722bec9e51e845a0e0267
696,699
def propeller_efficiency(thrust, shaft_power, velocity): """Efficiency of converting shaft power to thrust power.""" return thrust * velocity / shaft_power
84a6360e3aec7da2022c2cedca0104f4c7f95190
696,700
import logging def get_log(): """Get the singleton :class:`logging.Logger` instance for the Stac library. :return: The logger instance for the library :rtype: logging.Logger """ return logging.getLogger('stac')
81fddee3ad3b3433cf0f330346846bb3114ce0d4
696,701
def make_dict(string, integer): """makes a dictionary of Account Names and Account Numbers""" return {string: integer}
e71bc7cdbf6d8318094d5c9d81a87a6395340ce4
696,702
def _get_rest_endpoint_base_url(rest_service_base_url: str) -> str: """ Return the base URL of the endpoints of BAM's REST service. :param rest_service_base_url: Base URL of BAM's REST service. :type rest_service_base_url: str :return: The base URL of the endpoints of BAM's REST service. :rtype: str """ return ( rest_service_base_url + "v1" )
d947d242a63203f0007433be383ed74cb4289ff4
696,703
import os def os_path_choose(): """Find the last modified, else highest name, else '/dev/null' in the Cwd""" paths = os.listdir() paths.sort(key=lambda _: os.stat(_).st_mtime) if not paths: return "/dev/null" path = paths[-1] return path
b8d3609e1db760f788b99e6ea0b3180ef040fcb2
696,704
def get_feature_importance_by_model(model): """ Returns the features importance of a model :param model: the classifier :return: The list of feature importance """ return model.feature_importances_
b9dff896024f3a006862254289e9ee81b901e8a9
696,705
def _filter_kwargs(args, keys): """Filters and transforms argparse's args to a kwargs. Args: args: The arguments found in the `argparse.ArgumentParser` keys: The keys to keep """ kwargs = dict() for k in keys: if hasattr(args, k) and getattr(args, k) is not None: kwargs[k] = getattr(args, k) return kwargs
676dff968fc8b8dd9ffe31648df397c84af4e12f
696,706
from typing import Tuple def parse_sender(line: str) -> Tuple[str, str]: """ Extracts the sender of a line (the timestamp should already be removed) :param line: Line to parse. :return: a tuple with the sender (if any) and the rest of the line. """ if ':' not in line: return "", line # It's a system message sender, rest_of_line = line.split(':', 1) rest_of_line = rest_of_line[1:] # remove leading space return sender, rest_of_line
637af2f7a73d1cd953a26b8cbd52eab5f3487fe3
696,707
import os def _insert_hash(filepath, file_hash): """Inserts hash into filepath before the file extension. Args: filepath: str. Path where the hash should be inserted. file_hash: str. Hash to be inserted into the path. Returns: str. Filepath with hash inserted. """ filepath, file_extension = os.path.splitext(filepath) return '%s.%s%s' % (filepath, file_hash, file_extension)
2ba281b45bb39898d8cfc40fa326e556ed5021c6
696,708
import random import string def random_tag(k=8): """ Returns a random tag for disambiguating filenames """ return "".join(random.choice(string.ascii_uppercase + string.digits) for _ in range(k))
00ae606478b10a78bacb8cfe673554aebc738a89
696,709
def __split_sci(value): """Split scientific notation into (coefficient,power). This is a private function that currently only works on scalars. Parameters ---------- value : numerical value Returns ------- coefficient : float Written by S.Chakraborty, 7th March 2017 """ s = '{0:e}'.format(value) s = s.split('e') return (float(s[0]),float(s[1]))
09692f2238c9f1ccffa3d33bc1c99508bd7ad900
696,710
def create_metrics_extension_conf(az_resource_id, aad_url): """ Create the metrics extension config :param az_resource_id: Azure Resource ID for the VM :param aad_url: AAD auth url for the VM """ conf_json = '''{ "timeToTerminateInMs": 4000, "configurationExpirationPeriodInMinutes": 1440, "configurationQueriesFrequencyInSec": 900, "configurationQueriesTimeoutInSec": 30, "maxAcceptedMetricAgeInSec": 1200, "maxDataEtwDelayInSec": 3, "maxPublicationAttemptsPerMinute": 5, "maxPublicationBytesPerMinute": 10000000, "maxPublicationMetricsPerMinute": 500000, "maxPublicationPackageSizeInBytes": 2500000, "maxRandomPublicationDelayInSec": 25, "metricsSerializationVersion": 4, "minGapBetweenPublicationAttemptsInSec": 5, "publicationTimeoutInSec": 30, "staleMonitoringAccountsPeriodInMinutes": 20, "internalMetricPublicationTimeoutInMinutes": 20, "dnsResolutionPeriodInSec": 180, "maxAggregationQueueSize": 500000, "initialAccountConfigurationLoadWaitPeriodInSec": 20, "etwMinBuffersPerCore": 2, "etwMaxBuffersPerCore": 16, "etwBufferSizeInKb": 1024, "internalQueueSizeManagementPeriodInSec": 900, "etwLateHeartbeatAllowedCycleCount": 24, "etwSampleRatio": 0, "maxAcceptedMetricFutureAgeInSec": 1200, "aggregatedMetricDiagnosticTracePeriod": 900, "aggregatedMetricDiagnosticTraceMaxSize": 100, "enableMetricMetadataPublication": true, "enableDimensionTrimming": true, "shutdownRequestedThreshold": 5, "internalMetricProductionLevel": 0, "maxPublicationWithoutResponseTimeoutInSec": 300, "maxConfigQueryWithoutResponseTimeoutInSec": 300, "maxThumbprintsPerAccountToLoad": 100, "maxPacketsToCaptureLocally": 0, "maxNumberOfRawEventsPerCycle": 1000000, "publicationSimulated": false, "maxAggregationTimeoutPerCycleInSec": 20, "maxRawEventInputQueueSize": 2000000, "publicationIntervalInSec": 60, "interningSwapPeriodInMin": 240, "interningClearPeriodInMin": 5, "enableParallelization": true, "enableDimensionSortingOnIngestion": true, "rawEtwEventProcessingParallelizationFactor": 1, "maxRandomConfigurationLoadingDelayInSec": 120, "aggregationProcessingParallelizationFactor": 1, "aggregationProcessingPerPartitionPeriodInSec": 20, "aggregationProcessingParallelizationVolumeThreshold": 500000, "useSharedHttpClients": true, "loadFromConfigurationCache": true, "restartByDateTimeUtc": "0001-01-01T00:00:00", "restartStableIdTarget": "", "enableIpV6": false, "disableCustomMetricAgeSupport": false, "globalPublicationCertificateThumbprint": "", "maxHllSerializationVersion": 2, "enableNodeOwnerMode": false, "performAdditionalAzureHostIpV6Checks": false, "compressMetricData": false, "publishMinMaxByDefault": true, "azureResourceId": "'''+ az_resource_id +'''", "aadAuthority": "'''+ aad_url +'''", "aadTokenEnvVariable": "MSIAuthToken" } ''' return conf_json
08c1bd23fc021515664e8476307264179c0c9652
696,711
import pickle def load_pickle(file_path): """ Unpickle some data from a given path. :param file_path: Target file path. :return: data: The python object that was serialized and stored in disk. """ pkl_file = open(file_path, 'rb') data = pickle.load(pkl_file) pkl_file.close() return data
2aa2d2fcdd408cedfce43c9cea05065904bd2c98
696,712
import itertools def _sorted_kwargs_list(kwargs): """ Returns a unique and deterministic ordered list from the given kwargs. """ sorted_kwargs = sorted(kwargs.items()) sorted_kwargs_list = list(itertools.chain(*sorted_kwargs)) return sorted_kwargs_list
e442044bf6ff1ec0d5308cd200272e4a8993174f
696,713
def dpd(td, t, roundit=True): """ This function calculates a dew point depression scalar or array from a scalar or array of temperature and dew point temperature and returns it. :param td: dew point temperature in degrees C (array or scalar) :param t: dry bulb temperature in degrees C (array or scalar) :param roundit: flag to tell function to round to one decimal place, default TRUE :type td: float :type t: float :type roundit: boolean :return: dew point depression in degrees C (array or scalar) :rtype: float Inputs: td = dew point temperature in degrees C (array or scalar) t = dry bulb temperature in degrees C (array or scalar) Outputs: dp = dew point depression in degrees C (array or scalar) Ref: TESTED! dpd = dpd(10..,15.) dpd = 5.0 """ if td is None or t is None: return None dp = t - td if roundit: dp = round(dp * 10.) / 10. return dp
b4505fa9ec6ee3fc5ec07788e35ddc494ae839bb
696,714
def version(cmd_input): """ We implement the musedev version 3 of the api. output: the output is the 3 value """ cmd_input.logger.info("Command: version") cmd_input.outstream.write("3") cmd_input.outstream.flush() return 0
670ad5d5b2af44ee7685e1ee6768c242492a4ede
696,715
def format_parties(parties): """ Return the list of parties from the case title. :param parties: string containing the parties name :type parties: str :return: list of names :rtype: [str] """ if parties.startswith('CASE OF '): parties = parties[len('CASE OF '):] if parties[-1] == ')': parties = parties.split('(')[0] parties = parties.split(' v. ') parties = [p.strip() for p in parties] return parties
b0a9040d2c8a5b69647f92550c590192470d5692
696,716
import csv def read_csv_fieldnames(filename, separator, quote): """ Inputs: filename - name of CSV file separator - character that separates fields quote - character used to optionally quote fields Ouput: A list of strings corresponding to the field names in the given CSV file. """ #function assumes first row of CSV file contains the field names csvtable = [] with open(filename, "rt", newline = '') as csvfile: #open reader with delimeter and quotechar options to set seperator and quote csvreader = csv.reader(csvfile, delimiter = separator, quotechar = quote) #easiest way to access a row for row in csvreader: csvtable.append(row) #beak becuase you only need first row break #instead returning csvtable = [[]] lst is returned = [] lst = csvtable[0] return lst
ead8b5ff5ca11d47771cf793309a640e2b1410a1
696,717
import torch import warnings def properize_captions(captions, vocab, add_sos=True): """ :param captions: torch Tensor holding M x max_len integers :param vocab: :param add_sos: :return: """ # ensure they end with eos. new_captions = [] missed_eos = 0 for caption in captions.cpu(): ending = torch.where(caption == vocab.eos)[0] if len(ending) >= 1: # at least one <eos> symbol is found first_eos = ending[0] if first_eos < len(caption): caption[first_eos+1:] = vocab.pad else: missed_eos += 1 caption[-1] = vocab.eos new_captions.append(caption) new_captions = torch.stack(new_captions) dummy = torch.unique(torch.where(new_captions == vocab.eos)[0]) assert len(dummy) == len(new_captions) # assert all have an eos. if add_sos: sos = torch.LongTensor([vocab.sos] * len(new_captions)).view(-1, 1) new_captions = torch.cat([sos, new_captions], dim=1) if missed_eos > 0: warnings.warn('{} sentences without <eos> were generated.'.format(missed_eos)) return new_captions
ac7278ec0c7b075a095dc8297959f440aee4401c
696,718
def resolve_negative_axis(ndims, axis): """ Resolve all negative `axis` indices according to `ndims` into positive. Usage:: resolve_negative_axis(4, [0, -1, -2]) # output: (0, 3, 2) Args: ndims (int): Number of total dimensions. axis (Iterable[int]): The axis indices. Returns: tuple[int]: The resolved positive axis indices. Raises: ValueError: If any index in `axis` is out of range. """ axis = tuple(int(a) for a in axis) ret = [] for a in axis: if a < 0: a += ndims if a < 0 or a >= ndims: raise ValueError('`axis` out of range: {} vs ndims {}.'. format(axis, ndims)) ret.append(a) if len(set(ret)) != len(ret): raise ValueError('`axis` has duplicated elements after resolving ' 'negative axis: ndims {}, axis {}.'. format(ndims, axis)) return tuple(ret)
7f43943e20d66d6e9de8ea750fd1c2eb16764e69
696,719
def preprocess(train): """ This method replaces punctuations and newline characters with space character given a string. Then it splits all words into a list and returns it. Parameters: train -- The string that will be preprocessed Returns: train -- A list containing words """ train = train.replace('\n', ' ') punctuations = [",", ".", ":", "\"", "'", "/", "\\", "*", "=", "-", "_", ")", "(", "[", "]", "{", "}", "%", "+", "!", "@", "#", "$", "^", "&", "+", "|", ";", "<", ">", "?", "`", "~", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9"] for punctuation in punctuations: train = train.replace(punctuation, " ") train = train.split() return train
544befcc0e74d43261679aa634cc4d677f1bada2
696,720
def number_of_lines(filename=""): """ Function that returns the number of lines of a text file Args: filename (str): The name of the file Returns: The number of lines of the file """ cnt = 0 with open(filename, 'r', encoding='utf8') as f: for line in f: cnt += 1 return cnt
47539fc0280251f0334da9ce2ab2973d9f0cc63e
696,721
def split(w, e, s, n, top, bottom, nlon, nlat, nr, stack, stktop): """ Divide the region into smaller parts and add them to the stack. """ dlon = (e - w)/nlon dlat = (n - s)/nlat dr = (top - bottom)/nr for i in range(nlon): for j in range(nlat): for k in range(nr): stktop += 1 stack[stktop, 0] = w + i*dlon stack[stktop, 1] = w + (i + 1)*dlon stack[stktop, 2] = s + j*dlat stack[stktop, 3] = s + (j + 1)*dlat stack[stktop, 4] = bottom + (k + 1)*dr stack[stktop, 5] = bottom + k*dr return stktop
405454e7ed9c1cba9b54a81406ffb279a337050d
696,722
def build_average_on_duplicate_rows(df): """ If the dataframe has duplicate-indexes, the average value of all those indexes is calculated and given to the first occurrence of this duplicate index. Therefore, any dataFrame should be already sorted before calling this function. :param pd.DataFame df: DataFrame with the data to process :return: pd.DataFame The processed DataFame Example: >>> df = pd.DataFrame({"idx": np.ones(5), "val": np.arange(5)}).set_index("idx") >>> df = convert_index_to_datetime_index(df, origin=datetime(2007, 1, 1)) >>> print(df) val idx 2007-01-01 00:00:01 0 2007-01-01 00:00:01 1 2007-01-01 00:00:01 2 2007-01-01 00:00:01 3 2007-01-01 00:00:01 4 >>> print(build_average_on_duplicate_rows(df)) val idx 2007-01-01 00:00:01 2.0 """ # Find entries that are exactly the same timestamp double_ind = df.index[df.index.duplicated()].unique() # Calculate the mean value mean_values = [] for item in double_ind: mean_values.append(df.loc[item].values.mean(axis=0)) # Delete duplicate indices df_dropped = df[~df.index.duplicated(keep='first')].copy() # Set mean values in rows that were duplicates before for idx, values in zip(double_ind, mean_values): df_dropped.loc[idx] = values return df_dropped
fdca09314588a2e121420ba1547185869b61a66d
696,723
def to_bytes_literal(seq): """Prints a byte sequence as a Python bytes literal that only uses hex encoding.""" return 'b"' + "".join("\\x{:02x}".format(v) for v in seq) + '"'
dadbc38fd86daf2b6dd618eee6c792bc2044d09c
696,724
def filter_cellular_barcodes_manual(matrix, cell_barcodes): """ Take take all barcodes that were given as cell barcodes """ barcodes = list(set(matrix.bcs) & set(cell_barcodes)) metrics = { 'filtered_bcs': len(barcodes), 'filtered_bcs_lb': len(barcodes), 'filtered_bcs_ub': len(barcodes), 'max_filtered_bcs': 0, 'filtered_bcs_var': 0, 'filtered_bcs_cv': 0, } return barcodes, metrics, None
58a07034139fac1d189303203d6e849d5637e3fe
696,726
def line(etok): """String giving the starting line number of Etok.""" raw = etok.raw if raw: return f'line={raw[0].lineno}.' return ''
5b1160054db8b90fd27cc7557e1cee17b9f8f8e3
696,727
def get_ws(R, g, Ds, nu): """ Calculate settling velocity of sediment particles on the basis of Ferguson and Church (1982) Return ------------------ ws : settling velocity of sediment particles [m/s] """ # Coefficients for natural sands C_1 = 18.0 C_2 = 1.0 ws = R * g * Ds ** 2 / (C_1 * nu + (0.75 * C_2 * R * g * Ds ** 3) ** 0.5) return ws
cf09276c3624b2ba86082c3399b805c36b0b67b8
696,728
def isinstance_safe(value, type_): """Determine if value is a subclass of type_ Will work even if it is not a valid question to ask of the given value. """ try: return isinstance(value, type_) except TypeError: # Cannot perform isinstance on some types return False
57223307123efb564fe1cfd4089a4e056254c258
696,729
import collections def collapseExpansions(expansions, numDims): """Scans through the given list of expansions (each assumed to pertain to a single 3D image), and combines any which cover the same image area, and cover adjacent volumes. :args expansions: A list of expansion slices - see :func:`calcExpansions`. :args numDims: Number of dimensions covered by each expansion, not including the volume dimension (i.e. 3 for a 4D image). :returns: A list of expansions, with equivalent expansions that cover adjacent images collapsed down. .. note:: For one expansion ``exp`` in the ``expansions`` list, this function assumes that the range at ``exp[numDims]`` contains the image to which ``exp`` pertains (i.e. ``exp[numDims] == (vol, vol + 1)``). """ if len(expansions) == 0: return [] commonExpansions = collections.OrderedDict() expansions = sorted(expansions) for exp in expansions: vol = exp[numDims][0] exp = tuple(exp[:numDims]) commonExps = commonExpansions.get(exp, None) if commonExps is None: commonExps = [] commonExpansions[exp] = commonExps for i, (vlo, vhi) in enumerate(commonExps): if vol >= vlo and vol < vhi: break elif vol == vlo - 1: commonExps[i] = vol, vhi break elif vol == vhi: commonExps[i] = vlo, vol + 1 break else: commonExps.append((vol, vol + 1)) collapsed = [] for exp, volRanges in commonExpansions.items(): for vlo, vhi in volRanges: newExp = list(exp) + [(vlo, vhi)] collapsed.append(newExp) return collapsed
073923a29d065ee21e26ef234135b4d358ecd288
696,730
import os def InChroot(): """Returns True if currently in the chroot.""" return 'CROS_WORKON_SRCROOT' in os.environ
47fee163cc76702e15c7769a2a25301c41170d9b
696,731
def div(n1, n2): """ Divides n1 by n2 >>> div(9, 3) 3.0 """ return n1 / n2
19419ea749c640fd7fb895e45bd2e06ad29bde32
696,732
import numpy def r_squared(y, estimated): """ Calculate the R-squared error term. Args: y: list with length N, representing the y-coords of N sample points estimated: a list of values estimated by the regression model Returns: a float for the R-squared error term """ error = ((numpy.array(estimated) - numpy.array(y))**2).sum() meanError = error/len(y) # return round(1 - (meanError/numpy.var(y)), 4) return 1 - (meanError/numpy.var(y))
c0544c16ab6130a304716493e699289dc5d3f88f
696,733
def getInputUser(wordChoose: str): """ Get the user input, it will loop until the input is correct. :param wordChoose: str :return: list[str] """ inputUser = "" while not inputUser.isalpha() or len(inputUser) != len(wordChoose): inputUser = input("Choissisez un mot de " + str(len(wordChoose)) + " lettre(s) : ") return inputUser
de2e7680792bad85f769fa36cf07f93678c2fe5f
696,735
def est_pair(n: int) -> bool: """ Description: Vérifie si un nombre est pair ou non. Paramètres: n: {int} -- Le nombre à vérifier. Retourne: {bool} -- True si le nombre est pair, False sinon. Exemple: >>> est_pair(6) True >>> est_pair(11) False 00000000000000000000000000000000 32 bits 00000000000000000000000000000001 1 bit, représente 2^0 soit 1 les nombres pairs sont des nombres avec un bit à 0 à la fin pour tout n qui est un entier positif, (n % 2) + 1 est un nombre impair. """ return n & 1 == 0
e839a02e3a949885f33cc5f0bfbd5d955b8dfbfc
696,736
def INT_NPURE(am): """Gives the number of spherical functions for an angular momentum. #define INT_NPURE(am) (2*(am)+1) """ return 2 * abs(am) + 1
a13bef8d883a7b60c184320ed3457ad12c9febca
696,737
def bounds(a): """Return a list of slices corresponding to the array bounds.""" return tuple([slice(0,a.shape[i]) for i in range(a.ndim)])
fa9c7c8ed51d5c10ecd4392a72c6efe379a9407b
696,738
def move1(state,b1,dest): """ Generate subtasks to get b1 and put it at dest. """ return [('get', b1), ('put', b1,dest)]
c84a2d8246017fa94a73dd2e3408f7f05cf3573d
696,739
import logging import os def start(): """ App Engine is starting a fresh instance """ logging.info( "Start, version {0}, instance {1}" .format( os.environ.get("GAE_VERSION", ""), os.environ.get("GAE_INSTANCE", "") ) ) return "", 200, {}
7f834f3c171d99ba2fd39e4bdac8cbba1b745d53
696,740
def exists(dict, key): """ Check if a key exists in a dict """ return key in dict.keys()
3dd531b8a13af2e8905f721e05756bd8184e24c4
696,741
def get_character_table(): """ ポケットミクのデフォルトの文字テーブルを返す. """ character = u'''あ い う え お か き く け こ が ぎ ぐ げ ご きゃ きゅ きょ ぎゃ ぎゅ ぎょ さ すぃ す せ そ ざ ずぃ ず ぜ ぞ しゃ し しゅ しぇ しょ じゃ じ じゅ じぇ じょ た てぃ とぅ て と だ でぃ どぅ で ど てゅ でゅ ちゃ ち ちゅ ちぇ ちょ つぁ つぃ つ つぇ つぉ な に ぬ ね の にゃ にゅ にょ は ひ ふ へ ほ ば び ぶ べ ぼ ぱ ぴ ぷ ぺ ぽ ひゃ ひゅ ひょ びゃ びゅ びょ ぴゃ ぴゅ ぴょ ふぁ ふぃ ふゅ ふぇ ふぉ ま み む め も みゃ みゅ みょ や ゆ よ ら り る れ ろ りゃ りゅ りょ わ うぃ うぇ うぉ ん'''.strip().split() character_table = dict([(text, i) for i, text in enumerate(character)]) # 特殊音の追加 character_table.update(dict([(text, i) for i, text in enumerate(u'づぁ づぃ づ づぇ づぉ'.split(), 0x1A)])) character_table.update(dict([(text, i) for i, text in enumerate(u'ゐ ゑ を N\\ m N J n'.split(), 0x78)])) return character_table
f037785cc90f3fadd4dac15e51534998aa4bfba1
696,742
def allowed_file(filename): """ Returns true if file is a csv :param filename: File name input as a str :return: Boolean """ return '.' in filename and filename.rsplit('.', 1)[1].lower() == 'csv'
80a467c6cfc2372a212d797209cbd4979d5d2c96
696,743
def list_peak_finder(L, start=0, end=None): """ Definition: A peak exists at n L[n] where L[n] >= L[n-1], L[n+1]. If L[n] only has one neighbour, then it is sufficient that it is greater than that neighbour only. Returns the peak. Complexity: T(n) = time take for an operation on list of size n. Then: T(n) = T(n/2) + c = T(n/4) + c + c ...lg n times = T(1) + lg(n)*c so, T(n) = Th(lg n) """ if end == None: end = len(L) size = end - start n = start + (size >> 1) if size > 2: if L[n-1] >= L[n]: return list_peak_finder(L, start, n) if L[n+1] >= L[n]: return list_peak_finder(L, n+1, end) if size == 1: pass if size == 2: if L[n-1] >= L[n]: return L[n-1] return L[n]
64e919d2095722b3b36b6c51898b40297c2723c4
696,745
def miller_rabin_isprime(a, i, n): """ Miller-Rabin primality test returns a 1 if n is a prime usually i = n - 1 see http://en.wikipedia.org/wiki/Miller–Rabin_primality_test#Deterministic_variants_of_the_test for a values """ if i == 0: return 1 x = miller_rabin_isprime(a, i // 2, n) if x == 0: return 0 y = (x * x) % n if ((y == 1) and (x != 1) and (x != (n - 1))): return 0 if (i % 2) != 0: y = (a * y) % n return y
47f54b9be3f8dcbaaae8b78caa2b81fa36f21d1e
696,746