content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import math def resolveToJamoIndex(syllable): """ 음절로부터 자보 배열을 생성한다. :param syllable: """ code = syllable.charCodeAt(0) - 0xAC00 choseong = math.floor(((code - code % 28) / 28) / 21) jungseong = math.floor(((code - code % 28) / 28) % 21) jongseong = code % 28 def isValid(n): return n >= 0 if not n.isdigit() else False if not (isValid(choseong) and isValid(jungseong) and isValid(jongseong)): return None return [choseong, jungseong, jongseong]
ac65b58e02a5190f087a34db3efb2656726a6c77
699,939
def page_body_class(context): """ Get the CSS class for a given resolved URL. """ try: return "url-{}".format(context.request.resolver_match.url_name) except AttributeError: return "404"
ec998cc7c4710837944e72130a3e7e761290cdde
699,940
def csvfile_out(csvfile): """Returns a function that will write out connections from a {_ConnectionKey->{key->value}} dictionary (where their keys are the properties of the connection). The function is used as a callback for writing 'buckets'. Note that prior to using that function, the passed in csvfile should be initialized with init_csvfile. """ def csv_cb(bucket_time, bucket): for key in bucket: csvfile.write(','.join([ str(bucket_time), str(key.ip1), str(key.port1), str(key.ip2), str(key.port2), str(key.proto), str(bucket[key]['1to2Bytes']), str(bucket[key]['2to1Bytes']), str(bucket[key]['1to2Packets']), str(bucket[key]['2to1Packets']) ]) + "\n") return csv_cb
fc82fb038d6d70ef12f455dcb1bc923b3c02042b
699,942
def set_heads(doc, heads): """Set heads to doc in UD annotation style. If fail to set, return doc without doing anything. """ if max(heads) > len(doc) or min(heads) < 0: return doc for head, token in zip(heads, doc): if head == 0: token.head = token else: token.head = doc[head - 1] return doc
bad84ae29c0f5000d02243c55350df0a2dc698f4
699,944
def sammenlign( kandidat, neste, koordinattoleranse = 1e-6): """ Regler for nå to oppføringer kan slås sammen: Samme koordinat, samme vegref-verdi og tilstøtende datoer """ dx = kandidat['geometry']['coordinates'][0] - neste['geometry']['coordinates'][0] dy = kandidat['geometry']['coordinates'][1] - neste['geometry']['coordinates'][1] if abs( dx) < koordinattoleranse and abs( dy) < koordinattoleranse and \ kandidat['properties']['vegref'] == neste['properties']['vegref'] and \ kandidat['properties']['fradato'] == neste['properties']['tildato']: # Sjekker veglenkeID og posisjon, om de finnes if 'veglenkeid' in kandidat['properties'].keys(): if kandidat['properties']['veglenkeid'] == neste['properties']['veglenkeid'] and \ round( kandidat['properties']['veglenkeposisjon'], 8) == \ round( neste['properties']['veglenkeposisjon'], 8): return True else: return False else: return True else: return False
d23b7ca13177255c135ead3687443d8693841022
699,945
def tokenize(text): """Simple tokenizer, change for something more sophisticated """ return text.lower().split()
ce7269ebfe3446c70346cef38a526393accda8b8
699,946
def route_not_found(error): """route to handle error""" return error
83b1f52de8ed92e8b8a833ba85ce725cbb5d3757
699,947
def clean_mentions(line): """Escape anything that could resolve to mention.""" return line.replace("@", "@\u200b")
b23ae6f9f0d54c5c2d210cf9f8a14732acd5b532
699,948
import time def ctime(): """ Get time now Returns: time_string (str): current time in string """ return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
57dfa3f2c9e4c7dbff51e18cdc63011df37a7241
699,949
import ast def is_valid_ast(tree): """Make sure an ast is valid by trying to compile it""" try: ast.fix_missing_locations(tree) compiled = compile(tree, "<test-ast>", "exec") except (SyntaxError, ValueError, TypeError) as e: return (False, e) return (True, None)
ede0c703e463d9bfbe15831e03de474d669aba44
699,950
from typing import List def get_characters_surrounding_index(password: str, index: int) -> List[str]: """ :param password: str :param index: int, index representing index from param password :return: list in form [str, optional str] where first index is character before the param index and the second is the character after the param index. If previous/next character is not available it is not added (example if index is start position or end position) """ surrounding_chars = [] if index > 0: surrounding_chars.append(password[index - 1]) try: surrounding_chars.append(password[index + 1]) except IndexError: pass return surrounding_chars
c78610300092d46fcc9ed171279f0025188cb98c
699,951
def get_reference_data(p): """Summarise the bibliographic data of an article from an ADS query Returns dict of 'author' (list of strings), 'title' (string), and 'ref' (string giving journal, first page, and year). """ data = {} try: data['author'] = p.author except: data['author'] = 'Anon' try: data['title'] = p.title except: data['title'] = 'Untitled' try: refstring = p.pub except: refstring = 'Unknown' try: refstring += f' {p.volume}, {p.page[0]}' except: pass try: refstring += f' ({p.year})' except: pass data['ref'] = refstring return data
f8f792d15bade96881e87c3a7e1047dddc125d15
699,954
from functools import reduce def build_stream_function(*funcs): """ 构建流处理函数 函数参数更严格 只接受一个参数 d 字典值 函数执行的顺序是从左到右 :param funcs: :return: """ return reduce(lambda f, g: lambda d: g(f(d)), funcs)
0f09c3f39e8a469f60748a60da5013cfb2cc3758
699,955
import os def is_locked(filepath): """Checks if a file is locked by opening it in append mode. If no exception thrown, then the file is not locked. """ locked = None file_object = None if os.path.exists(filepath): try: print("Trying to open", filepath) buffer_size = 8 # Opening file in append mode and read the first 8 characters. file_object = open(filepath, 'a', buffer_size) if file_object: print(filepath, "is not locked.") locked = False except IOError: print("File is locked (unable to open in append mode).") locked = True finally: if file_object: file_object.close() else: print(filepath, " not found.") return locked
fb3aeed3f350c19c94a004c95faa9dea12d91ee5
699,956
def _parse_config_args(args): """ Parse stub configuration arguments, which are strings of the form "KEY=VAL". `args` is a list of arguments from the command line. Any argument that does not match the "KEY=VAL" format will be logged and skipped. Returns a dictionary with the configuration keys and values. """ config_dict = dict() for config_str in args: try: components = config_str.split('=') if len(components) >= 2: config_dict[components[0]] = "=".join(components[1:]) except: # lint-amnesty, pylint: disable=bare-except print(f"Warning: could not interpret config value '{config_str}'") return config_dict
6295f95ae798445d94ab4575bbb88a57b03b5df4
699,957
def execute(channel, command): """Execute command and get remote PID""" command = command + '& pid=$!; echo PID=$pid' stdin, stdout, stderr = channel.exec_command(command) pid = int(stdout.readline().replace("PID=", "")) return pid, stdin, stdout, stderr
ecefbef4d4ed7b8e954f688006e2356b6fc710ac
699,958
def add_pylivetrader_imports(code: str) -> str: """ add all imports from the pylivetrader api """ imports = """ from pylivetrader.api import * \r\n """ return imports + code
8077a28dff60e00c2f44fe2aaf5a226a5ae15c7c
699,959
import re def get_name(str: str): """Get the name of the class object""" if str.isupper(): return str return " ".join(re.sub(r"([A-Z])", r" \1", str).split())
414d7634459f25549275447fc124a509429fc1b5
699,960
def parse_flowcell_id(flowcell_id): """ Return flowcell id and any status encoded in the id We stored the status information in the flowcell id name. this was dumb, but database schemas are hard to update. """ fields = flowcell_id.split() fcid = None status = None if len(fields) > 0: fcid = fields[0] if len(fields) > 1: status = fields[1] return fcid, status
621745aba31ed906e823177d0eebc33a8231843f
699,961
def proc_sched_b(dict_sched_b): """ This function processes Part1 Interest and Part2 Ordinary Dividends of the Sched B document. """ items = dict_sched_b["Part1_Interest"]["i1"] i_2 = 0 for i in items: i_2 += int(i.split("=")[1].strip()) dict_sched_b["Part1_Interest"]["i_2"] = i_2 dict_sched_b["Part1_Interest"]["i3"] = 0 dict_sched_b["Part1_Interest"]["i4"] = dict_sched_b["Part1_Interest"]["i_2"]\ -dict_sched_b["Part1_Interest"]["i3"] items = dict_sched_b["Part2_Ordinary_Dividends"]["i5"] i_6 = 0 for i in items: i_6 += int(i.split("=")[1].strip()) dict_sched_b["Part2_Ordinary_Dividends"]["i_6"] = i_6 return dict_sched_b
66d07b785c90b91f64539d49617f8f0be0f31c12
699,962
import re def what_lang(filename): """ Judge what language the file is written by from filename extention. """ langs=[('c|cc|cpp|h', 'c/c++'), ('java', 'java'), ('sh', 'sh'), ('pl', 'perl'), ('rb', 'ruby'), ('py', 'python'), ('xml', 'xml'), ('txt|md', 'txt')] for lang in langs: reg = r'.+\.(' + lang[0] + r')$' if re.match(reg, filename): return lang[1] return 'default'
02316266746bfc59cbdc3ad4b84a8be667158109
699,963
def get_text(xml, tag): """Return the text from a given tag and XML element. """ elem = xml.find(tag) if elem is not None: return elem.text.strip()
ece7c28a98f8bf61a3d182a2109875b6a031dbaa
699,964
from typing import Dict from typing import Any from typing import Iterable from typing import Hashable def get_in( dictionary: Dict[Any, Any], keys: Iterable[Hashable], default: Any = None, ) -> Any: """Traverses a set of nested dictionaries using the keys in kws, and returns the value assigned to the final keyword in the innermost dictionary. Calling get_in(d, [X, Y]) is equivalent to calling d.get(X).get(Y), with the difference that any missing keys causes the default value to be returned. Behavior on non-dictgionaries is undefined.""" keys = list(keys) for key in keys[:-1]: try: dictionary = dictionary[key] except KeyError: return default return dictionary.get(keys[-1], default)
af8d88f7f0a9f8af6f201e6d626392aec3f94864
699,965
import pathlib import os def get_default_settings_file()->str: """Return the path to the default settings file Returns: str: path to settings file """ env_file = 'config.env' root = pathlib.Path(__file__).parent.absolute() env_path = os.path.join(root, env_file) return env_path
14fb9005e9b67b2cd1581a04f87ff8df1400c126
699,966
def _unique_in_order(seq): """ Utility to preserver order while making a set of unique elements. Copied from Markus Jarderot's answer at https://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-whilst-preserving-order Args: seq : sequence Returns: unique_list : list List of unique elements in their original order """ seen = set() seen_add = seen.add return [x for x in seq if not (x in seen or seen_add(x))]
e02624dee4275d79dbce8dd67b07429ba3828e86
699,967
def extra_node_ids(a, b): """ Return list of nodes in a which aren't in b by id attribute. :param etree.ElementTree a: ElementTree representing first SVG document. :param etree.ElementTree a: ElementTree representing second SVG document. """ ids_a = set([ el.attrib['id'] for el in a.iter() ]) ids_b = set([ el.attrib['id'] for el in b.iter() ]) return ids_a - ids_b
94728ab3600df1ab31bd2c72e4712eb3540baa5a
699,968
from typing import Iterable def get_score(bits: Iterable[int]) -> int: """Get the score. For each "1" add 1 for each "0" takeaway 1 """ return sum(-1 if bit == 0 else 1 for bit in bits)
440e831ca4d02be57a3e394d268b95f7af5fa22e
699,969
import random def _pathway_feature_permutation(pathway_feature_tuples, permutation_max_iters): """Permute the pathways across features for one side in the network. Used in `permute_pathways_across_features` Parameters ----------- pathway_feature_tuples : list(tup(str, int)) a tuple list [(pathway, feature)] where the pathway, feature pairing indicates that a pathway was overrepresented in that feature permutation_max_iters : int specify the maximum number of iterations, limit the number of attempts we have to generate a permutation Returns ----------- list(tup(str, int)), the list of pathway, feature pairings after the permutation """ pathways, features = [list(elements_at_position) for elements_at_position in zip(*pathway_feature_tuples)] original_pathways = pathways[:] random.shuffle(pathways) feature_block_locations = {} i = 0 while i < len(pathways): starting_index = i current_feature = features[i] pathway_set = set() # input is grouped by feature, so we want to keep track of the start # and end of a given "block" of the same feature--this corresponds # to all the pathways overrepresented in that feature. while i < len(pathways) and features[i] == current_feature: # check the results of the permutation. if `pathway_set` does # not contain the current pathway, we are maintaining the # necessary invariants in our permutation thus far. if pathways[i] not in pathway_set: pathway_set.add(pathways[i]) else: k = 0 random_pathway = None while True: # select another random pathway from the list # and get the feature to which it belongs j = random.choice(range(0, len(pathways))) random_pathway = pathways[j] random_feature = features[j] if (random_pathway != pathways[i] and random_pathway not in pathway_set): # if this is a feature we have not already seen, # we are done. if random_feature not in feature_block_locations: break # otherwise, look at the indices that correspond # to that feature's block of pathways feature_block_start, feature_block_end = \ feature_block_locations[random_feature] pathway_block = pathways[feature_block_start: feature_block_end] # make sure that the current pathway is not in # that block--ensures that we maintain the invariant # after the swap if pathways[i] not in pathway_block: break k += 1 if k > permutation_max_iters: print("Permutation step: reached the maximum " "number of iterations {0}.".format( permutation_max_iters)) return None pathway_set.add(random_pathway) pathways[j] = pathways[i] pathways[i] = random_pathway i += 1 ending_index = i feature_block_locations[current_feature] = ( starting_index, ending_index) if original_pathways == pathways: return None return list(zip(pathways, features))
98f65a6bedddfd44122f024f007fc29358a6ca96
699,970
def get_cli_fname(lon, lat, scenario=0): """Get the climate file name for the given lon, lat, and scenario""" # The trouble here is relying on rounding is problematic, so we just # truncate lon = round(lon, 2) lat = round(lat, 2) return "/i/%s/cli/%03ix%03i/%06.2fx%06.2f.cli" % ( scenario, 0 - lon, lat, 0 - lon, lat, )
2310bcfc10c2ae5b80d67bed147625e365388fff
699,971
from pathlib import Path import click def update_file( path: Path, text: str, encoding: str, verbose: bool, quiet: bool, test_run: bool, in_path: Path, git_repo, ) -> bool: """Update a file.""" changed = False if not path.exists(): if not test_run: path.parent.mkdir(parents=True, exist_ok=True) path.write_text(text, encoding=encoding) if git_repo is not None: # this is required, to ensure file creations are picked up by pre-commit git_repo.index.add([str(path)], write=True) if verbose: click.echo(f"Added to git index: {str(path)}") changed = True elif text != path.read_text(encoding=encoding): if not test_run: path.write_text(text, encoding=encoding) changed = True if changed and not quiet: click.secho(f"Compiled: {str(in_path)} -> {str(path)}", fg="blue") if not changed and verbose: click.echo(f"Already Exists: {str(in_path)} -> {str(path)}") return changed
546943db9bc02bf27915521c93122ccc3e25dbe8
699,972
import json def get_stooges(stooge_file): """Get regular and psychedelic stooges.""" with open(stooge_file) as data: stooges = json.load(data) return stooges
afe1fc38886626ab7754f0229fde0531d36421a5
699,973
import os import ctypes from os.path import join, dirname, getmtime, exists, expanduser def compile_speedup(): """Tries to compile/link the C version of this module Like it really makes a huge difference. With a little bit of luck this should *just work* for you. You need: - Python >= 2.5 for ctypes library - gcc (``sudo apt-get install gcc``) """ # library = join(dirname(__file__), '_xterm256.so') library = expanduser('~/.xterm256.so') sauce = join(dirname(__file__), '_xterm256.c') if not exists(library) or getmtime(sauce) > getmtime(library): build = "gcc -fPIC -shared -o %s %s" % (library, sauce) if (os.system(build + " >/dev/null 2>&1") != 0): raise OSError("GCC error") xterm256_c = ctypes.cdll.LoadLibrary(library) xterm256_c.init() def xterm_to_rgb(xcolor): res = xterm256_c.xterm_to_rgb_i(xcolor) return ((res >> 16) & 0xFF, (res >> 8) & 0xFF, res & 0xFF) return (xterm256_c.rgb_to_xterm, xterm_to_rgb)
c91a1d249a1d6d082c1dbadecc9f764a6b6c2ff3
699,974
import os def badge_path(sysname): """Returns a path pointing to the badge for a given sysname.""" return os.path.join('static', 'badges', sysname + '.png')
68d6f3f16b49cb5457464818264eaf9becd132fa
699,975
def mandel_number(x: float, y: float, n: int = 100) -> int: """Return the mandel-number of point (x, y). This is the smallest index of the mandel sequence at which u_n^2 + v_n^2 > 4. Assumptions: * the sequence diverges when u_n^2 + v_n^2 > 4 :param x: x-coordinate of the point for which the Mandel number is computed :param y: y-coordinate of the point for which the Mandel number is computed :param n: upper bound to detect divergence :return: the mandel-number of point (x, y). :examples: >>> mandel_number(1, 0) 3 >>> mandel_number(0, 0, n = 10) 10 """ return 0
9f4e7c0d8713146c55a9e0253a00f4127ad4269d
699,976
def conv_repoids_to_list(repo_ids): """ Convert repo ids seperated by "\n" to list. """ if not repo_ids: return [] repoid_list = [] for repo_id in repo_ids.split("\n"): if repo_id == '': continue repoid_list.append(repo_id) return repoid_list
6a76a8ae4f565ac27839478f068f9e9a13276263
699,977
import itertools def _select_iterables(elements): """expand tables into individual columns in the given list of column expressions. """ return itertools.chain.from_iterable( [c._select_iterable for c in elements] )
12ecce590ab7d599e62b1105f7a405263cb7022c
699,979
def roundAllFloats(lista, l): """Round to 3 decimals""" nlista = [] for ii in lista: tt = ii[:l + 1] for jj in ii[l + 1:]: if jj > 100.0: jj = round(jj, -1) nn = round(jj, 3) tt.append(nn) nlista.append(tt) return nlista
47707449128215bc2288fc1b033f05a74eef51f4
699,980
def rename(term): """ Re-format feature terms after they've been formated by the vectorizer. Parameters: ---------- term : str Mutilated term in string format. Returns ------- str The normalised term. """ term = term.upper() if 'IPR' in term: return term elif 'PF' in term: return term elif 'GO' in term: return term.replace("GO", 'GO:').replace('::', ':') else: return term
ec7f963ea37a0057f9a5b92ed3f4d9fc37167d17
699,981
def _get_pos_from_key(key, char): """ Returns a list of the indices where char appears in the string. Pass in a list of only the pulses no residuals (ie capital letters) +1 is because the pulses are index from 1. """ return [i+1 for i, c in enumerate(key) if c == char]
6870266a92db59bf3f5dd9f69211e13297321e7c
699,982
import os import sys def get_sample_names(infiles, ext, reads): """ Get sample names without file extensions """ s = set() lext = len(ext) l0 = len(reads[0]) l1 = len(reads[1]) for x in infiles: x = os.path.basename(x)[:-lext] if x.endswith(reads[0]): x = x[:-l0] s.add(x) elif x.endswith(reads[1]): x = x[:-l1] s.add(x) else: sys.stderr.write("Warning! {} does not have {} as its name suffix. " "Either change it or modify the 'reads' in the " "config.yaml to your deired ones.\n".format(x, reads)) if sorted(list(s)) == []: sys.exit("Error! No sample has the right read suffix ({}). " "Please modify them or update the config.yaml with " "your desired suffix.".format(reads)) return sorted(list(s))
e2f67a30338bb28ea7961ca92ba8b4436a5997a0
699,983
def bpstr(ts, accum): """Make a string representation of this breakpoint and accumulation""" return "%02i.%02i %6.2f" % (ts.hour, ts.minute / 60.0 * 100.0, accum)
bd2ae124b5ef094ea7927124b86f100749bf0405
699,984
def sigma_lambda_to_Sigma(sigma, l, eps2=0): """ Parameters ---------- Sigma: shape (m, k) Returns ------- sigma: shape (m, k) l: shape (m, k) l-parameter ready for gradient computation """ m = len(l) return sigma ** 2 / (m * (l ** 2 + eps2))
808b970802938cd3f30187bea993cfe1ae1933c5
699,985
import os def download_local(name: str, data_dir: str): """ Get path to a previously-downloaded local version of the corpus (which may be an older version). :param name: name of Corpus :return: string path to local Corpus """ custom_data_dir = data_dir data_dir = os.path.expanduser("~/.convokit/") #pkg_resources.resource_filename("convokit", "") if not os.path.exists(data_dir): raise FileNotFoundError("No convokit data directory found. No local corpus version available.") if not os.path.exists(os.path.join(data_dir, "downloads")): raise FileNotFoundError("Local convokit data directory found, but no downloads folder exists. No local corpus version available.") dataset_path = os.path.join(data_dir, "downloads", name) if custom_data_dir is not None: dataset_path = os.path.join(custom_data_dir, name) if not os.path.exists(os.path.dirname(dataset_path)): os.makedirs(os.path.dirname(dataset_path)) dataset_path = os.path.realpath(dataset_path) downloadeds_path = os.path.join(data_dir, "downloads", "downloaded.txt") if not os.path.isfile(downloadeds_path): raise FileNotFoundError("downloaded.txt is missing.") with open(downloadeds_path, "r") as f: downloaded_lines = f.read().splitlines() downloaded = {} downloaded_paths = {} for l in downloaded_lines: dname, path, version = l.split("$#$") version = int(version) if dname not in downloaded or downloaded[dname] < version: downloaded[dname, path] = version downloaded_paths[dname] = path if custom_data_dir is None and name == dname: dataset_path = os.path.join(path, name) # print(list(downloaded.keys())) if (name, os.path.dirname(dataset_path)) not in downloaded: raise FileNotFoundError("Could not find corpus in local directory.") print("Dataset already exists at {}".format(dataset_path)) dataset_path = os.path.join(downloaded_paths[name], name) return dataset_path
ce07f445961fb9f4878782db51c2bf2d15ce4897
699,986
import numpy def one_body_basis_change(one_body_tensor, rotation_matrix): """Change the basis of an 1-body interaction tensor such as the 1-RDM. M' = R^T.M.R where R is the rotation matrix, M is the 1-body tensor and M' is the transformed 1-body tensor. Args: one_body_tensor: A square numpy array or matrix containing information about a 1-body interaction tensor such as the 1-RDM. rotation_matrix: A square numpy array or matrix having dimensions of n_qubits by n_qubits. Assumed to be real and invertible. Returns: transformed_one_body_tensor: one_body_tensor in the rotated basis. """ # If operator acts on spin degrees of freedom, enlarge rotation matrix. n_orbitals = rotation_matrix.shape[0] if one_body_tensor.shape[0] == 2 * n_orbitals: rotation_matrix = numpy.kron(rotation_matrix, numpy.eye(2)) # Effect transformation and return. transformed_one_body_tensor = numpy.einsum('qp, qr, rs', rotation_matrix, one_body_tensor, rotation_matrix) return transformed_one_body_tensor
6082a7ca5620dd7e00a545e86867d85b254b4304
699,987
import math def CalculateDistanceT92(info): """ P,Q: transitions, transversions frequencies q: G+C content d = -2q(1 - q)loge(1 - P/[2q(1 - q)] - Q) -[1 -2q(1 -q)]loge(1 - 2Q)/2,(4.18) V(d) = [c12P + c32Q - (c1P + c3Q)2]/n,(4.19) where c1 = 1/(1 - P/[2q(1 - q)] - Q), c2 = 1/(1 - 2Q), c3 = 2q(1 - q)(c1 - c2) + c2, and q is the G+C content Note: result is undefined if the number of transversions is >= 0.5 the G+C content is 0 """ gc = info.getGCContent() # if there are no GC or no AT pairs: result is undefined if gc == 0 or gc == 1: return -1, -1 wg = 2.0 * gc * (1.0 - gc) P = float(info.mNTransitions) / info.mNAligned Q = float(info.mNTransversions) / info.mNAligned a1 = 1.0 - P / wg - Q if a1 <= 0: return -1, -1 a2 = 1.0 - 2.0 * Q if a2 <= 0: return -1, -1 # print a1, a2, wg, gc, "p=", P, "q=", Q, str(info) distance = -wg * math.log(a1) - 0.5 * (1.0 - wg) * math.log(a2) c1 = 1 / a1 c2 = 1 / a2 c3 = wg * (c1 - c2) + c2 variance = ( c1 * c1 * P + c3 * c3 * Q - math.pow(c1 * P + c3 * Q, 2.0)) / info.mNAligned return distance, variance
90242b905283785524d6b96682abc854346b2d11
699,988
import requests from datetime import datetime, timedelta def update_data_covid_states(cursor): """ Summary: Adds in the table "covid_states" daily data of Covid to home Brazilian state. * Ir first sets the API base URL and make a request * Third creates a loop that adds the data returned from JSON response to the "covid_states" table of the data collection. Args: : cursor (Database cursor): Cursor of the connection with the database """ #! Define API URL and parameters and make the request # Base Api Url API_URL = "http://covid-api.com/api/reports" # Identifies yesterday's date and formats it to a format that can be used in the API query ("YYYY-mm-dd") date = str((datetime.today() - timedelta(days=1)))[0:10] # Makes a request in the API for the date and returns a JSON with information about all states from that date query = {"date": date, "q": "Brazil", "region_name": "Brazil", "iso": "BRA"} json = requests.request("GET", API_URL, params=query).json()["data"] #! Define which data of which line for i in range(0, 27): state = json[i]["region"]["province"] confirmated = json[i]["confirmed"] deaths = json[i]["deaths"] last_update = json[i]["last_update"] confirmated_difference = json[i]["confirmed_diff"] deaths_difference = json[i]["deaths_diff"] #! Add the Covid data of the state cursor.execute( f"INSERT INTO covid_states VALUES ('{state}', '{date}', {confirmated}, {confirmated_difference}, {deaths}, {deaths_difference}, '{last_update}')") return None
6266390dd79275e1428ee9d432952d58efe8c743
699,989
def linear_warmup_lr(current_step, warmup_steps, base_lr, init_lr): """Linear learning rate""" lr_inc = (float(base_lr) - float(init_lr)) / float(warmup_steps) lr = float(init_lr) + lr_inc * current_step return lr
0ea43c8cf815d25d8caf4d3e5ee8f0f027c5cd41
699,990
import pickle def unpickle(filename): """ Parse CIFAR10 data. Return a dict containing {data, filenames, labels, batch_label} """ with open(filename, 'rb') as fo: data = pickle.load(fo, encoding='bytes') return data
48cb766df6ffc0e448d1c4937a5097bae83c8b78
699,991
def syntactic_roles_to_semantic_match(syntactical_sentence, voice_is_active=True): """ Selects which elements of the syntactical sentence must match the verb, agent and patient, respectively. Args: syntactical_sentence: a list of tuples (symbol, attributes) voice_is_active(bool): If True, we assume the syntactical sentence is in active voice, therefore the agent must match the first noun phrase, and the patient the second noun phrase. If False, the patient must match the first noun phrase, and the agent the second noun phrase. Returns: """ first_NP = None first_NP_index = None second_NP = None second_NP_index = None first_V = None first_V_index = None index = -1 for symbol, attributes in syntactical_sentence: index = index + 1 if symbol == 'NP': if first_NP is None: first_NP = (symbol, attributes) first_NP_index = index elif second_NP is None: second_NP = (symbol, attributes) second_NP_index = index elif symbol == "V" and first_V is None: first_V = (symbol, attributes) first_V_index = index if voice_is_active: return (first_V, first_NP, second_NP), \ (first_V_index, first_NP_index, second_NP_index) return (first_V, second_NP, first_NP), \ (first_V_index, second_NP_index, first_NP_index)
84ade39bf48bcc0b065680ac2094da76bde0a5d5
699,993
import requests import sys def access_data_from_guardian(): """ **While there has been a api package for the Guardian, our package will focus on the news on the Guardian releated to the covid. None of the code is copied/paraphrased from the existed package. If there is any similarity, it would be just a coincidence. First discovered in January 2020, we have suffered from COVID-19 for almost a whole year. It is of great urgency to continuously update our knowledge about the virus. It is kind of a bummer that the API only return 9 results at a time for public user. However, we are going to design our function in a base of generalizaiton. If wanting to get more result, contact the Guardian Office. Instead of using and showing my api key, I use the "test" key here. This function requests data about COVID-19 from Guardian REST API v1 ---------- Parameters: key word q: coivd api key: can acquire a key by registering an account """ r = requests.get('https://content.guardianapis.com/search?from-date=2019-12-31&to-date=2020-12-01&q=covid&api-key=test') if r.status_code != 200: print("fail to access to API") sys.exit() elif r.status_code == 200: print("successful!") data_json = r.json() return(data_json)
47d8374d34e2d06c91f13cca55cf2da4f85c7f2b
699,994
import subprocess def is_valid_tar_gz(file_path: str): """Check tar file integrity.""" try: retcode = subprocess.call(['gunzip', '-t', file_path]) return retcode == 0 except BaseException: return False
abb1495e213d297e8d687b4166de85192b3a3b40
699,995
def get_tool_study_max_java_heap_size(): """ Some of the tools allow you to specify the java heap size. We want to ensure all of the tools use the same heap size (if they allow it to be specified), so the run_analysis scripts should use this method to retrieve the size """ return "4096m"
8d180d76052bacdbc7350cc2efacc0a55acdb29e
699,996
import csv def openCSVfile(filepath, delimiter = ","): """ Returns the lists for csv file """ with open(filepath,"r") as csvfile: rows = csv.reader(csvfile,delimiter = delimiter) return list(rows)
5d8beda891862281976ec48ea117d3c768a553a6
699,998
def find_direction(start, end): """ Find direction from start to end """ if start[0] == end[0]: if start[1] < end[1]: return 5 else: return 1 elif start[1] == end[1]: if start[0] < end[0]: return 3 else: return 7 elif start[0] < end[0]: if start[1] < end[1]: return 4 else: return 2 elif start[0] > end[0]: if start[1] < end[1]: return 6 else: return 8
ff282de669832159d236cd5fe805b1832b990bb6
699,999
def load_lookup(data): """ Load output area lookup. """ output = {} for idx, row in data.iterrows(): output[row['msoa']] = { 'lad': row['lad'], 'region': row['region'], 'population': row['population'], 'area_km2': row['area_km2'], 'pop_density_km2': row['pop_density_km2'], 'geotype': row['geotype'], 'households': row['households'], 'prems_residential': row['prems_residential'], 'prems_residential_floor_area': row['prems_residential_floor_area'], 'prems_residential_footprint_area': row['prems_residential_footprint_area'], 'prems_non_residential': row['prems_non_residential'], 'prems_non_residential_floor_area': row['prems_non_residential_floor_area'], 'prems_non_residential_footprint_area': row['prems_non_residential_footprint_area'], } return output
77e3b88b1d4a270860b4ce328fd1e3d53f3af45a
700,000
def day_date(src): """ Returns the date string for the given day. :param src: :return: """ return src.xpath('./h4[1]')[0]
f71343c8f63941e0ab4045e2d94febf4aaebfa5b
700,001
def one(n = 10): """ a strange function with an ambiguous name Parameters ---------- :param n: int the number of points to test """ sum = 0.0 for i in range(n): denom = 1.0 sum += 1/n return sum
2b8ce5aa198ad1139165fbabf02b89f2d9f7cf73
700,002
from typing import Tuple import torch from typing import List def long_tensor_2d(shape: Tuple[int, int], fill_value: int = 0) -> torch.Tensor: """Return a new 2d torch.LongTensor with size according to shape. The values of this tensor will be fill_value.""" outer = torch.jit.annotate(List[List[int]], []) inner = torch.jit.annotate(List[int], []) for _i in range(shape[1]): inner.append(fill_value) for _i in range(shape[0]): outer.append(inner) return torch.tensor(outer, dtype=torch.long)
c1aaeb01058b9153c31e911f31eae6217f03eb64
700,003
def _pad_digits(text: str) -> str: """A str method with hacks to support better lexicographic ordering. The output strings are not intended to be human readable. The returned string will have digit-runs zero-padded up to at least 8 digits. That way, instead of 'a10' coming before 'a2', 'a000010' will come after 'a000002'. Also, the original length of each digit-run is appended after the zero-padded run. This is so that 'a0' continues to come before 'a00'. """ was_on_digits = False last_transition = 0 chunks = [] def handle_transition_at(k): chunk = text[last_transition:k] if was_on_digits: chunk = chunk.rjust(8, '0') + ':' + str(len(chunk)) chunks.append(chunk) for i in range(len(text)): on_digits = text[i].isdigit() if was_on_digits != on_digits: handle_transition_at(i) was_on_digits = on_digits last_transition = i handle_transition_at(len(text)) return ''.join(chunks)
7e842669747919a3bbc9fd40e45a4bfc7641cc3a
700,004
from typing import List def get_missing_settings(settings_class) -> List[str]: """Used to validate required settings. Verifies that all attributes which don't start with ``_`` and aren't named in ``_optional_settings`` are not set to None. Args: settings_class: The global settings class to validate settings on. Returns: List of setting names that should not be ``None``. If the list is empty then all required settings are defined. """ missing_settings = [] for attr in vars(settings_class): if attr.startswith("_") or \ attr in getattr(settings_class, "_optional_settings", ()): continue if getattr(settings_class, attr) is None: missing_settings.append(attr) return missing_settings
efbb2dc3078fc5221e8ce327b078d42eff167d65
700,005
def _check_equal_list(iterator): """ Check that all elements in list are equal """ return len(set(iterator)) <= 1
94bd94a203819965d95105e7f978ecb496ce97bc
700,006
import hashlib import os def calc_firmware_sha(file): """ Open firmware image file and calculate file size Pad file size to a multiple of 64 bytes Calculate SHA256 hash of the padded contents """ with open(file, 'rb') as firmware: firmware.seek(0, 2) size = firmware.tell() # pad firmware to round upto 64 byte boundary padding = (size % 64) if padding != 0: padding = (64 - padding) size += padding firmware.seek(0, 0) sha256 = hashlib.sha256() for block in iter(lambda: firmware.read(4096), b""): sha256.update(block) firmware.close() if padding != 0: sha256.update(b'\0' * padding) print('Firmware (%s): %d bytes, will be padded to %d bytes.' % (os.path.basename(file), size - padding, size)) else: print('Firmware file size: %d bytes.' % size) print('SHA: ' + sha256.hexdigest()) return (size, padding, sha256.digest())
17e0ce8c6c8d2e8cdb5b9bc259087bc4677924ca
700,007
def save_plot(code, elem): """Converts matplotlib plots to tikz code. If elem has either the plt attribute (format: plt=width,height) or the attributes width=width and/or height=height, the figurewidth and -height are set accordingly. If none are given, a height of 4cm and a width of 6cm is used as default. Args: code: The matplotlib code. elem: The element. Returns: The code and some code to invoke matplotlib2tikz. """ if 'plt' in elem.attributes: figurewidth, figureheight = elem.attributes['plt'].split(',') else: try: figureheight = elem.attributes['height'] except KeyError: figureheight = '4cm' try: figurewidth = elem.attributes['width'] except KeyError: figurewidth = '6cm' return f"""import matplotlib matplotlib.use('TkAgg') {code} from matplotlib2tikz import get_tikz_code tikz = get_tikz_code(figureheight='{figureheight}', figurewidth='{figurewidth}') # noqa print(tikz)"""
8d2f2ecb6b750eff98d1e17ce553f1613d411216
700,008
def find_layer(model, layer_class): """ Find all layers in model that are instances of layer_class """ layers = [] for layer in model.layers: if isinstance(layer, layer_class): layers.append(layer) elif hasattr(layer, "layers"): # search in nested layers layers += find_layer(layer, layer_class) return layers
d240d916f26e087edb7ccef8006b91b9c539bd76
700,009
import importlib def connect(settings): """ Connect to a database. """ driver = importlib.import_module(settings.pop("driver", "sqlite3")) return driver.connect(**settings)
cd186b3210b1ec012539dfc4a5306834dc9bf058
700,010
def getJ1939ProtocolDescription(protocol : int) -> str: """ Returns a description of the protocol selected with protocol arg. Feed the result of RP1210Config.getJ1939FormatsSupported() into this function to get a description of what the format means. Honestly, I don't see anyone ever using this function. """ if protocol == 1: return "Variable J1939 baud rate. Select 125, 250, 500, 1000, or Auto." elif protocol == 2: return "General default for J1939 baud rate (250k baud)." elif protocol == 3: return "Driver uses SampleLocation to calculate parameters." elif protocol == 4: return "Baud formula derived from BOSCH CAN specification." elif protocol == 5: return "Baud formula derived from Intel implementations." else: return "Invalid J1939 protocol format selected."
a52c48930d45c04f0570689620d2b01783f27e38
700,012
import json import subprocess def nvme_id_ctrl(device): """Identify controller.""" command = "sudo nvme id-ctrl {0} -o json".format(device) id_ctrl = json.loads( subprocess.run( command, shell=True, check=True, text=True, capture_output=True ).stdout ) return id_ctrl
40d739bb00b474cf27d0839fe421ec39a324c7ed
700,013
def in_region(pos, regions): """Find whether a position is included in a region. Parameters ---------- pos : int DNA base position. regions : list of tuples List of (start, end) position integers. Returns ------- bool True if the position is within an of the regions, False otherwise. Examples -------- # Empty list >>> in_region(1, []) False # In list >>> in_region(10, [(3, 5), (9, 12)]) True # Not in list >>> in_region(10, [(3, 5), (11, 12)]) False """ for region in regions: if (pos >= region[0]) and (pos <= region[1]): return True return False
07154584fe3fadf93f16bf858810e4484828eb31
700,014
import sys def _implementation_version(): """Return implementation version.""" return ''.join(map(str, sys.version_info[:2]))
ccc6150871b0efcf6cd9281c5297cd53483ab307
700,015
def _hue_process_transition_time(transition_seconds): """ Transition time is in 1/10th seconds and cannot exceed MAX_TRANSITION_TIME. """ # Max transition time for Hue is 900 seconds/15 minutes return min(9000, transition_seconds * 10)
69f822d2836fef91206e2845d6d3bbd7623c03fc
700,016
import six def zero_lpad(number, length): """ Fill 0 on the left of number :param number: number to be padded :param length: length of result string :return: """ if six.PY3: return str(number).ljust(length, '0') return '{number:0>{length}}'.format(number=number, length=length)
d8682249a7094e2c0de9ff015df3c1c7525b7427
700,017
def line0(x,a): """ Straight line through origin: a*x Parameters ---------- x : float or array_like of floats independent variable a : float first parameter Returns ------- float function value(s) """ return a*x
247a9ac56418ec34089bab0d9a914c69eb216f31
700,021
import json def to_json(msg): """Pretty-print a dict as a JSON string Use Unicode and 2-space indents. """ return json.dumps(msg, ensure_ascii=False, indent=2)
e10bf04ce54482f1892aa7a7452a7004024ea6d4
700,022
def get_min_and_max_velocity_values(velocities) : """ Повертає min та max значення швидкості у вигляді кортежу """ return (velocities.min(), velocities.max())
f9a2995bec07d129c34581d55a070a4004b8436c
700,023
def shape(a): """the shape of a matrix""" _rows = len(a) _cols = len(a[0]) if a else 0 return _rows, _cols
79c32a5f09ecbea4849929a3f6f627068198953f
700,024
def read_txt_file(file, is_list): """ @summary - read from a test file. @description - method will read from a text file and either return it as a string or a list. If is_list is True the delimiter that will split the text file apart is ';' and '\n' will be removed. @author - mah60 @param - file - string - file path location for the text file wanting to be read. @param - is_list - boolean - if True return string will be split into list with delimiter as ';' and no '\n' characters. @return - None """ # opens text file file_read = open(file, "r") contents = "" # read file if file_read.mode == "r": # convert return string to list if is_list is true if(is_list): contents = file_read.read().rstrip('\n').split(';') else: contents = file_read.read() return contents
cad2d80c107f165cb55b23cb71f88c228146e475
700,025
import inspect def _filter_module_all(path, root, children): """Filters module children based on the "__all__" arrtibute. Args: path: API to this symbol root: The object children: A list of (name, object) pairs. Returns: `children` filtered to respect __all__ """ del path if not (inspect.ismodule(root) and hasattr(root, '__all__')): return children module_all = set(root.__all__) children = [(name, value) for (name, value) in children if name in module_all] return children
9cbc86a6a0321722910fb2f48b8857d5d6488511
700,026
def contains_pattern(input: str, pattern: str) -> bool: """Check if the `input` contains all signals of the given `pattern`""" assert len(input) >= len(pattern) return all([p in input for p in pattern])
0fd2b5d35145fe21f855358c061995e04ad695a9
700,027
def _prune_dict_null_str(dictionary): """ Prune the "None" or emptry string values from dictionary items """ for key, value in list(dictionary.items()): if value is None or str(value) == "": del dictionary[key] if isinstance(value, dict): dictionary[key] = _prune_dict_null_str(dictionary[key]) return dictionary
c6e408f3b3a3d2bafc77fdb1c106bd5ea3a27dbb
700,028
from typing import Collection from typing import Tuple from typing import List def decode_access_list( access_list: Collection[Tuple[bytes, Collection[bytes]]] ) -> List[Tuple[bytes, Tuple[int, ...]]]: """Decode an access list into friendly Python types""" work_list = [] if not access_list or len(access_list) < 1: return [] for item in access_list: work_slot_list = [] for slot in item[1]: work_slot_list.append(int.from_bytes(slot, "big")) work_list.append((item[0], tuple(work_slot_list))) return work_list
18d24f3a6f2b8c88ff47383e8586588a00d15905
700,029
import os import re def skey(a): """ Complex sorting hack: elki > elki-core > elki-* > others """ a = os.path.basename(a) if re.search(r"^elki-[0-9]", a): return (-10, a) if re.search(r"^elki-core-", a): return (-5, a) if re.search(r"^elki-", a): return (-1, a) return (0, a)
f5bc3aceaf018e699b1979fa4eba603a6ea89750
700,030
import os def EnvArray(): """Returns an env variable array from the os.environ map object.""" return map(lambda k,v: k+"="+v, os.environ.keys(), os.environ.values())
c9b14dcb26a299db597ea35435de7eee2dca703e
700,031
def recursive_fibonacci(i, seq=None): """Recursive solution""" if seq is None: seq = [0, 1] if i > len(seq): seq.append(seq[-1] + seq[-2]) return recursive_fibonacci(i, seq) return i, seq[i - 1], seq[:i]
385cac4df9ca3a47ed2968a27601a07cd80e9908
700,032
def strToRange(s): """ Convert string to range """ return [int(i) for i in s.split(',')]
970a3a76d72187808aecafecbdcf35ca21e5f781
700,033
def nbest_oracle_eval(nbest, n=None): """Return the evaluation object of the best sentence in the nbest list.""" return nbest.oracle_hyp(n=n).eval_
fe7641f6ccbaae7d85f620f4772e3a8b506880f5
700,034
def reaction_class_from_data(class_typ, class_spin, class_radrad, class_isc): """ Build a full-class description including the following useful descriptors of the reaction class: typ: type of reaction (e.g., abstraction, addition, migration) spin: whether the reaction occurs on a low- or high-spin state radrad: whether the reactants or products are two-or-more radicals :param class_typ: reaction type designation :type class_typ: str :param class_spin: spin type designation :type class_spin: str :param class_radrad: radical-radical type designation :type class_radrad: bool :param class_isc: intersystem-crossing designation :type class_isc: bool :rtype: (str, str, bool, bool) """ return (class_typ, class_spin, class_radrad, class_isc)
7ab3b7713c252e4dc3f2f9410d0021f24141a901
700,036
import pickle def get_one_hot_encodings(filepath='../data/one-hot.pkl'): """ Gets the one_hot encodings of the verses of the Quran, along with mappings of characters to ints :param filepath: the filepath to the one_hot encoding pickled file :return: """ with open(filepath, 'rb') as one_hot_quran_pickle_file: one_hot_obj = pickle.load(one_hot_quran_pickle_file) return one_hot_obj
f255ba44018ae1d38694ba12ad9e733ac4fb433f
700,037
def otherline_from_line(line_dict, filing_number, line_sequence, is_amended, filer_id): """ http://initd.org/psycopg/docs/extras.html#hstore-data-type """ try: # Some lines have illegal transaction ids -- longer than 20 characters. Truncate those. line_dict['transaction_id'] = line_dict['transaction_id'][:20] except KeyError: # Some lines are actually summary lines (F3S) and have no transaction ids, so don't freak out about this. pass line_dict['superseded_by_amendment'] = is_amended line_dict['line_sequence'] = line_sequence line_dict['filing_number'] = filing_number line_dict['filer_committee_id_number'] = filer_id try: # Text records use rec_type instead of form. line_dict['form_type'] = line_dict['rec_type'] except KeyError: pass return line_dict
2afe0981de5102d80307bdaa99fae0b42ef90c88
700,038
def classifier(density): """Classify rocks with secret algorithm.""" if density <= 0: raise ValueError('Density cannot be zero or negative.') elif density >= 2750: return 'granite' elif density >= 2400: return 'sandstone' else: return 'not a rock'
17eaba4ec43effb7bdb90432fddd55c6f2bf163a
700,039
def get_label(Z, V): """ Transforms Z_arr into the desired form. Also works if data set is not contiguous. """ xlim = Z.shape[0] dict_labels = dict(zip(range(V), range(V))) for i in range(xlim): Z1 = int(Z[i,0]) Z2 = int(Z[i,1]) Z[i,0] = min(dict_labels[Z1], dict_labels[Z2]) Z[i,1] = max(dict_labels[Z1], dict_labels[Z2]) dict_labels[Z2] = int(V + i) return Z
e51d21a7f3be8a99bc09e309b944ca0478eb8f69
700,040
def flatten(lst): """Flatten a nested list lst""" return [i for subl in lst for i in subl]
5835f05ca6b098c096fdb2bbface034a3c3bee26
700,041
import random def extract_words(text, word_count): """ Extract a list of words from a text in sequential order. :param text: source text, tokenized :param word_count: number of words to return :return: list list of words """ text_length = len(text) if word_count > text_length: raise RuntimeError('Cannot extract {} words from a text of {} words.'.format(word_count, text_length)) # Determine start index max_range = text_length - word_count start_range = random.randrange(max_range) return text[start_range:start_range + word_count]
f84f8b4148380d6c6e29dc0742e42481dda2d11a
700,042
import os def _find_in_path(path, file): """Find a file in a given path string.""" for p in path.split(os.pathsep): candidate = os.path.join(p, file) if (os.path.exists(os.path.join(p, file))): return candidate return False
297ab3d91aabca5a979e45ea2b1a2595f05fedd6
700,043
import sys def callersContext(): """ get context of caller of a function """ return sys._getframe(2).f_code
d849326202c0062da076f54c379fe5f35b4d35b5
700,044
def rgb_to_hex(color): """Helper function for converting RGB color to hex code Args: color (list): List of R,G,B value Returns: str: Hex code for the RGB value """ r,g,b = color #print('%02x%02x%02x' % (r,g,b)) return '#%02x%02x%02x' % (r,g,b)
9308fa029cb2bfd75495c92a2e145f3439e3b60b
700,045
def sort_queryset(queryset, request, allowed_sorts, default=None): """ Sorts the queryset by one of allowed_sorts based on parameters 'sort' and 'dir' from request """ sort = request.GET.get('sort', None) if sort in allowed_sorts: direction = request.GET.get('dir', 'asc') sort = ('-' if direction == 'desc' else '') + sort queryset = queryset.order_by(sort) elif default: queryset = queryset.order_by(default) return queryset
7d4ef00e0d345d4636caaa9ca69ade0a09e33ea4
700,046
def extract_path_from_filepath(file_path): """ ex: 'folder/to/file.txt' returns 'folder/to/' :param file_path: :return: """ st_ind=file_path.rfind('/') foldern = file_path[0:st_ind]+'/' return foldern
7014ac6d4fa47edff3315f7e23688cfe2e28a820
700,047
def fname_to_string(fname): """Return given file as sring Parameters ---------- fname : str absolute path to file. """ with open(fname) as fid: string = fid.read() return string
f9a3f94dc4a63c27cadb5c5f9a41eaa942332937
700,048
import argparse def parse_args(): """ Parse muCLIar args :return: """ parser = argparse.ArgumentParser(description='muCLIar - Music from CLI') parser.add_argument('-s', '--song', type=str, help='Song name', required=True) parser.add_argument('-c', '--config', action='store_true') return parser.parse_args()
ee55696b68927a6c0ff05ab4536a57f7ece91108
700,049
def global_color_table(color_depth, palette): """ Return a valid global color table. The global color table of a GIF image is a 1-d bytearray of the form [r1, g1, b1, r2, g2, b2, ...] with length equals to 2**n where n is the color depth of the image. ---------- Parameters color_depth: color depth of the GIF. palette: a list of rgb colors of the format [r1, g1, b1, r2, g2, b2, ...]. The number of colors must be greater than or equal to 2**n where n is the color depth. Redundant colors will be discarded. """ try: palette = bytearray(palette) except: raise ValueError('Cannot convert palette to bytearray.') valid_length = 3 * (1 << color_depth) if len(palette) < valid_length: raise ValueError('Invalid palette length.') if len(palette) > valid_length: palette = palette[:valid_length] return palette
4fc8b0cad668724b0a6d5735f70dcb16b6b9b140
700,050