content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def numero_bande(x, larg_bande): """ Trouver le numero de la bande sur lequel se trouve x si le graph est compose de bandes verticales et que elles sont numerotees de 0, au N. Avec N = numero de bandes total. """ return int(x / larg_bande)
4f68dc4d4b527e4afdaa2ea009c932a6fac455f5
18,960
def ask(question: str, default: str = "") -> str: """A Simple interface for asking questions to user Three options are given: * question and no default -> This is plain input * question and a default value -> with no user input dafault is returned * question and 'yes'/'no' default -> user can type n, y, yes, no type-insensitive and 'yes' or 'no' is returned in any case Arguments: question (str): the question for the user Keyword Arguments: default (str): a default value (default: {""}) Returns: str -- the user answer of the default (can be empty string) """ if default == 'yes': appendix = " [Y/n] " elif default == 'no': appendix = " [y/N] " elif default: appendix = " [{}] ".format(default) else: appendix = " " try: answer = input(question + appendix) except EOFError as eof: exit("Stdin was closed. Exiting...") return answer if answer else default
e0262592b32f893043b4bc48fd32e99e6b864732
18,961
import glob import os def list_file(path,extension): """Return list of files with specific extension within the given path """ return glob.glob(os.path.join(path,'*.'+extension))
0dd0de44aaf6df9cea0bd63b1131af73a697e913
18,962
import math def slice_length(n, s): """Returns the circumradius of a regular polygon with ```n``` sides of length ```s````.""" return math.sqrt((s**2)/((1-math.cos(360*math.pi/(180*n)))*2))
353b18428fbcb0df5ae0084bc7c6c73b63bc15b2
18,963
def mock_check_version(*args, **kwargs): """Necessary mock for KafkaProducer not to connect on init.""" return 0, 10
6b989ed3e6226e17c38569c266c03bb94b3db0d3
18,965
def check_password_vs_blacklist(current_string, blacklist): """Checks a string to determine whether it is contained within a blacklist Arguments: current_string {string} -- the string to be checked against blacklist blacklist {list} -- list of words defined within a given blacklist file Returns: [bool] -- whether current_string was found within the blacklist file """ if (((current_string.strip()).strip('\"') in blacklist) or ((current_string.strip()).strip("'") in blacklist) or ((current_string.strip()).strip('`') in blacklist)): return True else: return False
e11223cc0c8ffcb0d03e9df2dbfa9a71a87e2667
18,967
import argparse def get_parser(): """ 解析命令行参数 """ parser = argparse.ArgumentParser( description="Generate contributors.md via the command line" ) parser.add_argument( "repo", metavar="REPO", type=str, help="the github repo.(:owner/:repo)" ) parser.add_argument( "-p", "--path", type=str, default="CONTRIBUTORS.md", help="the output file path.(default CONTRIBUTORS.md)", ) parser.add_argument( "-c", "--count", type=int, default=6, help="the columns count in one row.(default 6)", ) parser.add_argument( "-v", "--version", action="store_true", help="displays the current version of gh-c", ) return parser
1ca0a06b6949004b4e02ebf6695254ff7444ce0d
18,968
from typing import Dict import re def variable_expansion(word: str, environment: Dict[str, str]) -> str: """Подставляет значения из окружения вместо переменных.""" return re.sub(r'\$([^$\s\'"]+)', lambda s: environment.get(s.group(1), ''), word)
34bacdae86db49eeb75e92c41cabaedf1c221c79
18,970
def plusOneSum(arr): """returns the sum of the integers after adding 1 to each element""" return sum(arr)+len(arr)
a3297841007deec4bf4430bbb18ff6405a7657fa
18,972
def merge(*dicts): """Merges the given dictionaries into a single dictionary, ignoring overlapping keys.""" out = dict() for dictionary in dicts: for (key, val) in dictionary.items(): out[key] = val return out
c9a3899407b36357c046bed594d5939bc7aab4b3
18,973
import math def absolute_error(y, yhat): """Returns the maximal absolute error between y and yhat. :param y: true function values :param yhat: predicted function values Lower is better. >>> absolute_error([0,1,2,3], [0,0,1,1]) 2.0 """ return float(max(map(lambda x, y: math.fabs(x-y), y, yhat)))
c1262e042d3895a9ba06c213b27a1d5cb23c96fb
18,974
def prefix_regex(exp): """Fonction de transformation d'une regex en sa forme postfixé La fonction prend en paramètre une expression régulière et la transformer en forme postfixé suivant l'algorithme 'shunting-yard' https://en.wikipedia.org/wiki/Shunting-yard_algorithm Le paramètre exp doit absolument expliciter les formes de concaténation avec '.' Example: a.(a+b)*.b Args: exp (str): Expression régulière Returns: list: Une liste contenant la forme postfixé de l'expression """ prio = { ')': 0, '(': 0, '*': 3, '.': 1, '+': 1, '?': 1 } operators = ['*', '.', '+'] # op_stack = deque() # out_stack = deque() op_stack = [] out_stack = [] print("thomson") for c in exp: # Si le caractère lu n'est ni opérateur ni ( ) # L'ajoute à la pile de sortie if c not in prio.keys(): # print('1-', c) out_stack.append(c) # Si le caractère est un opérateur elif c in operators: # print('2-', c) # 1 - Si le sommet est un opérateur avec une + grande priorité if len(op_stack) > 0: top = op_stack.pop() while top in operators and prio[top] >= prio[c]: out_stack.append(top) # Parcours la pile tant que le sommet est un opérateur # et est plus prioritaire que la caractère lu # while (len(op_stack) > 0): if (len(op_stack) == 0): break top = op_stack.pop() if (top in operators and prio[top] < prio[c]) or top not in operators: break # out_stack.append(top) # Si le caractère lu est un opérateur, et le sommet de pile est une '(' if top in operators and prio[top] < prio[c]: op_stack.append(top) op_stack.append(c) if top == '(': op_stack.append(top) op_stack.append(c) if (len(op_stack) == 0): op_stack.append(c) else: op_stack.append(c) # Si le caractère lu est une '(' elif c == '(': # print('3-', c) op_stack.append(c) # Si le caractère lu == ')': dépiler tout les opérateurs jusqu'à arriver à '(' elif c == ')': # print('4-', c) if (len(op_stack) > 0): top = op_stack.pop() if top in operators: out_stack.append(top) while (len(op_stack) > 0): top = op_stack.pop() if top == '(': break out_stack.append(top) else: print('Mauvaise expression.') for op in op_stack: out_stack.append(op) return out_stack
1ca773deb16861468208508c914c33bbe2126f69
18,975
from pathlib import Path import os def junit_report_filename(suite: str) -> Path: """Compute the JUnit report filename for the specified test suite. See also `upload_test_report`. In CI, the filename will include the Buildkite job ID. Args: suite: The identifier for the test suite in Buildkite Test Analytics. """ filename = f"junit_{suite}" if "BUILDKITE_JOB_ID" in os.environ: filename += "_" + os.environ["BUILDKITE_JOB_ID"] return Path(f"{filename}.xml")
c47314597294cc8b29fb8913ed3cbb114c9e92f0
18,976
def get_facetview_link(link, _id, version=None): """ Return link to object_id in FacetView interface. :param link: str :param _id: str, _id for elasticsearch document :param version: str :return: constructed URL for facetview """ if link.endswith("/"): link = link[:-1] origin = link.split("/")[-1:] print(origin) if "figaro" in origin: term = "job_id" else: term = "_id" if version is None: query_string = 'query_string="' + term + '%3A%5C"' + _id + '%5C""' else: query_string = 'query_string="' + term + '%3A%5C"' + _id + '%5C""&system_version="' + version + '"' print(_id) return "%s/?%s" % (link, query_string)
6fb7ff32cba075fd23d3c0e6e5514158f2e752c0
18,977
import re def clean_wsj(txt, remove_fragments=True): """ Prepares WSJ transcripts according to Hannun et al., 2014: https://arxiv.org/pdf/1408.2873.pdf It is assumed that data has already been processed with Kaldi's s5 recipe. A full overview of the wsj transcription guidelines is found here: https://catalog.ldc.upenn.edu/docs/LDC93S6A/dot_spec.html It is not fully exhaustive which may be due to transcriber variations/mistakes. The final character set will be: - letters: "a-z" - noise-token: "?" - apostrophe: "'" - hyphen: "-" - dot: "." - whitespace: " " Args: txt: Text to be normalized. Returns: str: The normalized string. """ txt = txt.lower() # Explanations for replacements: # - cursive apostrophe [`] should have been ' (very rare) # - double tilde [~~] seems to indicate silence during the full clip (very rare) # - noise-tag [<noise>] used to indicate noise (can be very subtle breathing between words) # - underscore [_] should have been a hyphen (one instance) # - pointy brackets [<...>] seems to indicate verbal insertion, but guidelines says deletion # - word enclosed in asterisk symbols [*hospital*] indicates mispronunciation, but will be spelled correctly though # - semi-colon [;] should have been a . in the abbreviation corp. (one instance) # - wrongful spelling of parentheses txt = txt.replace("`", "'").replace('~', '').replace('<noise>', '').replace('_', '-') txt = txt.replace('<', '').replace('>', '').replace('*', '').replace('corp;', 'corp.') txt = txt.replace('in-parenthesis', 'in-parentheses') # - word fragment in parenthesis [-(repub)lican] indicates missing fragment txt = re.sub("\([a-z'-]+\)", "", txt) # Everything in the remove list is vocalized punctuation, however, a few characters also have other uses: # - colon associated with word [securi:ty or a:] used to indicate lengthening # - prepended exclamation-point [!but] used for emphatic stress # These can, however, simply be removed anyway. remove = ['"', '(', ')', '{', '}', ',', '&', '/', ';', ':', '!'] for char in remove: txt = txt.replace(char, '') # The following is also vocalized punctuation but can not simply be removed, as we sometimes want to keep these: # - hyphen/dash [-] when used to compound words and in the beginning/end of word fragments # - period [.] when used in acronyms and abbreviations # - single-quote ['] when used for contractions and possessive form txt = txt.replace('-dash', 'dash').replace('-hyphen', 'hyphen') txt = txt.replace('.period', 'period').replace('...ellipsis', 'ellipsis') txt = txt.replace("'single-quote", 'single-quote').replace('?question-mark', 'question-mark') if remove_fragments: # adjacent fragements are joined to one word txt = txt.replace('in- -communicado', 'incommunicado') txt = txt.replace('republi- -publicans', 'republicans') txt = txt.replace('gi- -vestigating', 'investigating') txt = txt.replace('exa- -cerbate', 'exacerbate') txt = txt.replace('rel- -linquish', 'relinquish') txt = txt.replace('cata- -lysmic', 'cataclysmic') txt = txt.replace('toro- -ronto', 'toronto') # all simple fragments are removed txt = re.sub(r"([a-z']+-( |$)|( |^)-[a-z']+)", "", txt) # should only be - between verbalized punctuation txt = txt.replace('-', '') # used in front of letters in acronyms and abbreviations txt = txt.replace('.', '') # whitespace normalization: convert whitespace sequences to a single whitespace txt = re.sub("\s+", " ", txt) return txt.strip()
e1e7c4c3f984f4656a17ffddd6c54481083ac094
18,978
def format_submission_id(fixedString, variablePartLength, submissionCounter): """creates name of IPD file """ fixedString += "1" numberStringLength = len(str(submissionCounter)) numberOfZeroes = int(variablePartLength) - numberStringLength submissionId = fixedString + ("0" * numberOfZeroes) + str(submissionCounter) return submissionId
0e9818b8149a3f1c9bb1822b945a71d14bbd5cb8
18,980
def bin_string_to_bytearray(binary_string: str) -> bytearray: """Converts a binary string to a bytearray Parameters ---------- binary_string: str The binary string used to build the bytearray Returns ------- bytearray The generated bytearray """ # Fill in bits if the binary string is not dividable by 8 (byte) binary_string += ((8 - len(binary_string)) % 8) * '0' # Generate the bytearray bytes_array = bytearray() for binary_byte in [binary_string[i:i + 8] for i in range(0, len(binary_string), 8)]: bytes_array.append(int(binary_byte, 2)) return bytes_array
5f1af3a46ee97ad23e3d0a6cb9ded9e1e8568a2b
18,981
import sys import re def filt_htm(content): """filt extra tabs for html courses_data""" if sys.version > '3': content = content.decode('utf-8') return re.sub(r'[\s\r\t\n]+', ' ', content)
6b71e924269cfae5569b28c453047f0ee3e13f2e
18,982
def _is_delayed(obj): """Check that object follows the ``class:neurtu.Delayed`` API """ return (hasattr(obj, 'compute') and hasattr(obj, 'get_tags') and callable(obj.compute) and callable(obj.get_tags))
7970b4dd90e9e83f43278860857e766f7fae7c81
18,985
def filter(pred): """Filter, for composition with generators that take coll as an argument.""" def generator(coll): for x in coll: if pred(x): yield x return generator
b55ea9aa572da29107f3cab061321682e9648b77
18,986
def truthy_string(s): """Determines if a string has a truthy value""" return str(s).lower() in ['true', '1', 'y', 'yes']
ee670b353de29165bd2922b53a13b0d4b3908d40
18,987
def expectedPrimerPair(l,r): """ Inputs: left primer Segment right primer Segment Check if 2 primers come from the same primer pair Returns: Boolean """ return l[2] == r[2]
7960463d6579c65ff66282a71c3e3d235d01bcf5
18,991
def read_requirements(filename="requirements.in"): """ Load list of dependent packages for the mesos package. :param filename: filename to load requirements from :type filename: str :rtype: list[str] """ with open(filename) as f: return f.readlines()
c0c5e5a92f6e2059803ee4f703a6a8b375b0d43e
18,992
def getDiff(aT, bT): """ (``Dialog`` 클래스의 ``get_dialog`` 메소드의 내부함수) 사용자의 질문과 유사한 데이터를 찾는 함수입니다. """ cnt = 0 for i in aT: for j in bT: if i == j: cnt += 1 return cnt / len(aT)
cc722a516048341d04cd206cec008381d678188c
18,993
def lookup(obj): """ returns all the methods and atributes of an object """ return dir(obj)
e26bae9a1eeb4460312963526e059268f3a61a35
18,994
def digest_model_data(model_resource): """Digest model meta-data.""" model_data = { "id": model_resource.id, "name": model_resource.name, "description": model_resource.description, "filename": model_resource.distribution.name, "created": model_resource._store_metadata._createdAt, "modified": model_resource._store_metadata._updatedAt } return model_data
fa39bf8eb5cd53d836861116f900b66013fcdb0e
18,995
import json def parse(data_path, out_path, cpe_path, logger): """ Parse output of scanning and map CMS names and versions to the CPE format :param data_path: path to the file with data acquired from scanning :param out_path: path to the file, where the output will be saved :param cpe_path: path to the file with information about services which this component detects, by default a data/cms.json can be used. :param logger: logger instance :return: Results """ with open(cpe_path, 'r') as cpefile: cpe_dict = json.load(cpefile) with open(data_path, 'r') as datafile: try: data = json.load(datafile) except json.JSONDecodeError as e: logger.info(f'Empty result JSON, nothing to parse. Exiting...') return False raw_results = {} with open(out_path, 'w') as outfile: for host in data: print(host) if "plugins" not in host or "IP" not in host["plugins"]: continue ip = host["plugins"]["IP"]['string'][0] ip_cpes = set() for key in host["plugins"]: if key in cpe_dict: if 'version' in host["plugins"][key]: if "-" in host["plugins"][key]['version'][0]: version = host["plugins"][key]['version'][0][:host["plugins"][key]['version'][0].find("-")] else: version = host["plugins"][key]['version'][0] if version in cpe_dict[key]['versions']: ip_cpes.add(cpe_dict[key]['product'] + ":" + version) else: ip_cpes.add(cpe_dict[key]['product'] + ":*") else: ip_cpes.add(cpe_dict[key]['product'] + ":*") if ip not in raw_results: raw_results[ip] = set() for cpe in ip_cpes: raw_results[ip].add(cpe) results = [] for key, values in raw_results.items(): host_data = {"ip": key, "cpe": (list(values))} results.append(host_data) json.dump({"data": results}, outfile) return results
a4978264013907665a7fe378a201dbfb30f58f2d
18,996
def table_key_url(table_name: str, partition_key: str, row_key: str) -> str: """Creates the url path for the combination of a table and a key""" partition_key = partition_key.replace("'", "''") row_key = row_key.replace("'", "''") return f"{table_name}(PartitionKey='{partition_key}',RowKey='{row_key}')"
ce3c4e0639d8b2d78b754fdf6377fd2a693b703e
18,997
def reset_env(env): """Resets the pendulum in the safe area.""" env.reset() env.env.state = env.np_random.uniform(low=[-0.1, -0.5], high=[0.1, 0.5]) env.env.last_u = None return env.env._get_obs()
55c987fb8bd9011d5fe16e70828bd4acac2b6be6
18,998
def square_digits(num): """"Return concatednated square of every digit of a given number""" num_list = [int(digit)**2 for digit in str(num)] return int(''.join(str(digit) for digit in num_list))
cfa6349c13f8b9a23dd479d0bf90fd0517778953
18,999
import re def get_test_args(args_line): """Returns the list of arguments from provided text line.""" try: args_line, _ = args_line.split('#', 1) # Strip optional ending comment except ValueError: pass return re.split(r'(?<!\\) ', args_line.strip())
9129bf3d773165dc5bf0bc723b2a12b82a2c5517
19,002
def openingRing(op, p): """Makes a linear ring of the feature (opening).""" X = op['origin'][0] Y = op['origin'][1] width = op['size'][0] height = op['size'][1] if op['wall'] == 0: ring = "%s %s %s " % (p[0][0]+X, p[0][1], p[0][2]+Y) ring+= "%s %s %s " % (p[0][0]+X, p[0][1], p[0][2]+Y+height) ring+= "%s %s %s " % (p[0][0]+X+width, p[0][1], p[0][2]+Y+height) ring+= "%s %s %s " % (p[0][0]+X+width, p[0][1], p[0][2]+Y) ring+= "%s %s %s" % (p[0][0]+X, p[0][1], p[0][2]+Y) elif op['wall'] == 1: ring = "%s %s %s " % (p[1][0], p[1][1]+X, p[1][2]+Y) ring+= "%s %s %s " % (p[1][0], p[1][1]+X, p[1][2]+Y+height) ring+= "%s %s %s " % (p[1][0], p[1][1]+X+width, p[1][2]+Y+height) ring+= "%s %s %s " % (p[1][0], p[1][1]+X+width, p[1][2]+Y) ring+= "%s %s %s" % (p[1][0], p[1][1]+X, p[1][2]+Y) elif op['wall'] == 2: ring = "%s %s %s " % (p[2][0]-X, p[2][1], p[2][2]+Y) ring+= "%s %s %s " % (p[2][0]-X, p[2][1], p[2][2]+Y+height) ring+= "%s %s %s " % (p[2][0]-X-width, p[2][1], p[2][2]+Y+height) ring+= "%s %s %s " % (p[2][0]-X-width, p[2][1], p[2][2]+Y) ring+= "%s %s %s" % (p[2][0]-X, p[2][1], p[2][2]+Y) elif op['wall'] == 3: ring = "%s %s %s " % (p[3][0], p[3][1]-X, p[3][2]+Y) ring+= "%s %s %s " % (p[3][0], p[3][1]-X, p[3][2]+Y+height) ring+= "%s %s %s " % (p[3][0], p[3][1]-X-width, p[3][2]+Y+height) ring+= "%s %s %s " % (p[3][0], p[3][1]-X-width, p[3][2]+Y) ring+= "%s %s %s" % (p[3][0], p[3][1]-X, p[3][2]+Y) else: raise ValueError("The door is positioned on an unknown wall.") return ring
11063cbe3414a6d990e5d58ee7fff2cd694a9664
19,004
import pickle def load_pickle(pickle_file_name): """Loads a pickle from given pickle_file_name.""" with open(pickle_file_name, "rb") as f: data = pickle.load(f) print("Pickle file loaded: {}".format(pickle_file_name)) return data
8bbf04e2c1a1859b89c2cede128381a6c7ebadc6
19,006
import random def set_array(len_arr): """ Function generates an array of size 2 ** n of repeated {1,2,3} elements. """ array = [] for _ in range(len_arr): array.append(random.randint(1,3)) return array
386184ad02c70713895ea8bba78212fb471d8a41
19,007
import os def _run_program(cmd_to_run): """ Run the provided command and return the corresponding return code. :param cmd_to_run: Command to run :return: Return value """ return os.system(cmd_to_run)
bf4b35e4e51001b682d3da7bf5fa5439916de4f5
19,008
def get_image_sizes(metadata): """ image_sizes.txt has the structure <path>,<w>,<h> path/to/image1.jpg,500,300 path/to/image2.jpg,1000,600 path/to/image3.jpg,500,300 ... """ image_sizes = {} with open(metadata.image_sizes) as f: for line in f.readlines(): image_id, ws, hs = line.strip('\n').split(',') w, h = int(ws), int(hs) image_sizes[image_id] = (w, h) return image_sizes
a3910f22938aa0a635f5c3e424891dd7c76ce41a
19,009
import os def lstdirNoExt(fileDir, ext=None): """ return the list of file names without extension """ #return [x.split('.')[0] for x in os.listdir(fileDir)] if ext is None: return [x.split('.')[0] for x in os.listdir(fileDir) if not x.startswith('.')] else: return [x.split('.')[0] for x in os.listdir(fileDir) if not x.startswith('.') and x.endswith(ext)]
44e2b1fdc10ebbe33c50b0bfb46766e56413fc1c
19,010
from typing import Counter def word_form_hapaxes(tokens): """ Takes a list of tokens and returns a list of the wordform hapaxes (those wordforms that only appear once) For wordforms this is simple enough to do in plain Python without an NLP package, especially using the Counter type from the collections module (part of the Python standard library). """ counts = Counter(tokens) hapaxes = [word for word in counts if counts[word] == 1] return hapaxes
9ea3f56ebd2967baf427abe1c1604fb8d4063046
19,012
import argparse def parse_command_line_arguments(): """Parse command line arguments with argparse.""" parser = argparse.ArgumentParser( description="Run skyscan data and model scripts.", epilog="For help with this program, contact John Speed at jmeyers@iqt.org.", ) parser.add_argument( "--prep", default=False, # default value is False action="store_true", help="Prepare voxel51 dataset.", ) parser.add_argument( "--upload_train", default=False, # default value is False action="store_true", help="Upload train samples to labelbox.", ) parser.add_argument( "--resume_upload_train", default=False, # default value is False action="store_true", help="Resume uploading train samples to labelbox.", ) parser.add_argument( "--upload_eval", default=False, # default value is False action="store_true", help="Upload eval samples to labelbox.", ) parser.add_argument( "--resume_upload_eval", default=False, # default value is False action="store_true", help="Resume uploading eval samples to labelbox.", ) parser.add_argument( "--download", "--download_from_labelbox", default=False, # default value is False action="store_true", help="Download dataset from labelbox.", ) parser.add_argument( "--train", default=False, # default value is False action="store_true", help="Train a model.", ) parser.add_argument( "--train_multi_class", default=False, # default value is False action="store_true", help="Train a multi-class model.", ) parser.add_argument( "--predict", default=False, # default value is False action="store_true", help="Model prediction.", ) parser.add_argument( "--evaluate", default=False, # default value is False action="store_true", help="Model evaluation.", ) parser.add_argument( "--normalize", default=False, # default value is False action="store_true", help="Normalize plane data", ) parser.add_argument( "--predict_tiled", default=False, # default value is False action="store_true", help="Tiled model prediction.", ) parser.add_argument( "--build_multi_class_dataset", default=False, # default value is False action="store_true", help="Build multi-class dataset.", ) parser.add_argument( "--export_yolo_dataset", default=False, # default value is False action="store_true", help="Export a YOLO v4 dataset for multi-class train & eval.", ) parser.add_argument( "--export_model", default=False, # default value is False action="store_true", help="Export a trained model.", ) return parser.parse_args()
2d9f1c321728cf1dcd7e27863ee30d55e55cfd2b
19,013
def get_vehicle_hours_by_type(gtfs, route_type): """ Return the sum of vehicle hours in a particular day by route type. """ day = gtfs.get_suitable_date_for_daily_extract() query = (" SELECT * , SUM(end_time_ds - start_time_ds)/3600 as vehicle_hours_type" " FROM" " (SELECT * FROM day_trips as q1" " INNER JOIN" " (SELECT route_I, type FROM routes) as q2" " ON q1.route_I = q2.route_I" " WHERE type = {route_type}" " AND date = '{day}')".format(day=day, route_type=route_type)) df = gtfs.execute_custom_query_pandas(query) return df['vehicle_hours_type'].item()
061bf4ddb6b8d512caa5b3c6a3331a9e1643ecbd
19,014
def _get_1st_obj(ll): """given a list [of list ...] of something get the first non-list object """ if type(ll) is not list: return ll return _get_1st_obj(ll[0])
5fec16f80677f948e4670c7ad4a8a3fe7a6f6e39
19,018
def mimetype(serial): """Function to generate a blaze serialization format mimetype put into a dictionary of headers for consumption by requests. Examples -------- >>> from blaze.server.serialization import msgpack >>> mimetype(msgpack) {'Content-Type': 'application/vnd.blaze+msgpack'} """ return {'Content-Type': 'application/vnd.blaze+%s' % serial.name}
9b99777c1968b881c4d3240dfad00ccc6f80ba54
19,019
import math def vectorDistance(vector1, vector2): """Return the distance between vectors.""" return math.sqrt( math.pow((vector2.x - vector1.x), 2) + math.pow((vector2.y - vector1.y), 2))
61db7f6258b6704d44cf8d630b4dd33f13be8911
19,021
def tmp(tmpdir_factory): """Create a common temp directory.""" return str(tmpdir_factory.mktemp('tmp_test'))
3901ede63f669752622f7273f876347218b0011c
19,022
def is_valid_jwt(jwt: str) -> bool: """ Check your jwt. Parameters ---------- jwt: str jwt string. Returns ------- bool True if jwt is valid , False else """ return len(jwt.split(".")) == 3
fc979b1f4b877b9ebbb9af132135b221e1239cca
19,023
def clean(G, volume_threshold): """ Removes all edges in G with volume < volume_threshold. """ H = G.copy() edges_to_remove = [e for e in H.edges if H.edges[e]['volume'] < volume_threshold] H.remove_edges_from(edges_to_remove) nodes_to_remove = [n for n in H.nodes if H.in_degree(n) + H.out_degree(n) == 0] H.remove_nodes_from(nodes_to_remove) return H
47b8eb2f2010828d78b3a0e3ab45fdc7beee812e
19,026
def count_linestokens(line): """ 计算句子token个数(包括符号) :param line: 句子 :return: line_num """ line_num = len(line.split(' ')) return line_num
d80868a3ab56fab5fe3d1cb04c8cee05cf7e7e5d
19,029
def tokenize(text): """Tokenize a passage of text, i.e. return a list of words""" text = text.replace('.', '') return text.split(' ')
497be6a4b8a7fd6ef0cc715b6343d9eec42e70f9
19,030
from typing import Set from typing import Tuple import random def prompt_x(independent: Set[str], y: str) -> Tuple[str, float]: """Prompt the user for the independent variable to look up >>> prompt_x({'year'}, 'slope') At what value do you want the slope? e.g. to find the slope when the year is 2000, type year=2000 >? CO2=2000 Invalid input! Please enter one of year # The grammar is awkward here because there is only # one valid input (year). When we call the function in main, we will always have more than one >? year=2000 ('year', 2000.0) """ example = random.choice(list(independent)) print(f'At what value do you want the {y}?') x_str = input(f'e.g. to find the {y} when the {example} is 2000, type {example}=2000').lower() x, expected = x_str.split('=') while x not in independent: x_str = input('Invalid input! Please enter one of ' + ' | '.join(independent)) x, expected = x_str.split('=') return (x, float(expected))
fbfaecc7353526ecf520a300e7e31fdf19cb33c1
19,032
def _get_cell(row, name, decimal=False): """Retrieves a cell from a row by name.""" for cell in row.cells: if cell.name == name: if decimal: return cell.decimal_value[0] return cell.string_value[0]
16e0c5475121a3d01fdef1ca2f16e3a6e30cccba
19,033
import sys def ismatrice(mat): """test if the matrice mat is a csv matrice """ # test for iterability over rows try: iter(mat) except TypeError: return False # test for rows for row in mat: if type(row) != list: return False # test if cell is float, int or string for row in mat: for cell in row: if type(cell) not in (float, int): if sys.version_info[0] == 2: # I would like to just redefine basestring to str on Python 2 but I don't have time right now if not isinstance(cell, basestring): # noqa: F821 return False else: # python 3 if not isinstance(cell, str): return False return True
cfcd2cca77b61c27432788c94eb32d590e0d8e53
19,034
def _parse_tensor_name(tname): """Adapt from TensorFlow source code """ components = tname.split(":") if len(components) == 2: try: output_index = int(components[1]) except ValueError: raise ValueError("invalid output index: {}".format(tname)) return (components[0], output_index) elif len(components) == 1: return (components[0], 0) else: raise ValueError("invalid tensor name: {}".format(tname))
a8530e5381024476ea819ae859553d768df3e00d
19,035
def _or_queries(queries): """Helper function that concatenates lazy Django queries""" result = queries.pop() for item in queries: result |= item return result
fd5502af810bd401fd44817f9c05ef1ab124f525
19,037
def is_label_definition(line): """Returns if the line is a LABEL node.""" return line.startswith("LABEL ")
e508f0987204e01bc97ef52dda189171e0c7befb
19,038
import os def normalize_path(filepath, basedir=os.getcwd()): """ Normalizes a given file path """ normalized = filepath.replace('\\', os.path.sep) if not os.path.isabs(normalized): normalized = os.path.normpath(os.path.join(basedir, normalized)) return normalized
693547684055b08d4c962864c3dcb5a5b3dd4d2b
19,039
def nfa_word_acceptance(nfa: dict, word: list) -> bool: """ Checks if a given word is accepted by a NFA. The word w is accepted by a NFA if exists at least an accepting run on w. :param dict nfa: input NFA; :param list word: list of symbols ∈ nfa['alphabet']; :return: *(bool)*, True if the word is accepted, False otherwise. """ current_level = set() current_level = current_level.union(nfa['initial_states']) next_level = set() for action in word: for state in current_level: if (state, action) in nfa['transitions']: next_level.update(nfa['transitions'][state, action]) if len(next_level) < 1: return False current_level = next_level next_level = set() if current_level.intersection(nfa['accepting_states']): return True else: return False
ca552fe4061f7c87fc0a9991a9626baca4b63fc6
19,041
def is_str(value: str) -> str: """str""" return str(value)
1c92833fbba4cfa379ac03fabf75d913eea00587
19,043
def save_cast_int(int_str: str) -> int: """ Helper function so the version number of prereleases (i.e. 3.8.0rc1) does not throw exceptions Parameters ---------- int_str : str String which should represent a number. Returns ------- int Int representation of int_str """ try: return int(int_str) except ValueError: return 0
5c8ad0c7f2d1aaa6997061e6a63c9ccf17e2a495
19,044
def get_longest_common_subseq(data, get_all_subseqs=False): """ Adapted from http://stackoverflow.com/a/28869690 The get_all_subseqs parameter was added. :param data: a list of iterables :param get_all_subseqs: returns all the subsequences if True :return: - the longest common subsequence - None if the two sequences are equal - [] if there is no subsequence - True if possible_subseq == seq """ def is_subseq(possible_subseq, seq): if len(possible_subseq) > len(seq): return False def get_length_n_slices(n): for i in range(len(seq) + 1 - n): yield seq[i:i + n] for slyce in get_length_n_slices(len(possible_subseq)): if slyce == possible_subseq: return True return False def is_subseq_of_any(find, data): if len(data) < 1 and len(find) < 1: return False for i in range(len(data)): if not is_subseq(find, data[i]): return False return True substr = [] if len(data) > 1 and len(data[0]) > 0: for i in range(len(data[0])): for j in range(len(data[0])-i+1): potential_subseq = data[0][i:i+j] if is_subseq_of_any(potential_subseq, data): if not get_all_subseqs and j > len(substr): substr = potential_subseq if get_all_subseqs: substr.append(potential_subseq) return substr
43f94f71c9839250729acdc6c06e7b8afa6b7087
19,047
def dt2str(dt): """Convert datetime.timedelta to human readable format""" if dt.days: return '{}D'.format(dt.days) if dt.seconds > 3600: return '{}H'.format(dt.seconds // 3600) return '{}M'.format(dt.seconds // 60)
b87b17707cc647876f14da11c4bfabe95736afae
19,048
def sounds_like(query): """Match common query terms to the actual key mapping value in the URL dictionary""" docs_list = ['documentation', 'help', 'doxx.org'] blog_list = ['things', 'tutorials', 'tuts'] updates_list = ['twitter', 'feed', 'update', 'news', 'whatsnew'] source_list = ['sourcecode', 'code', 'modules', 'repository'] pr_list = ['packagerepo', 'package-repo', 'packages'] template_list = ['templates'] key_list = ['keys'] archive_list = ['archives'] changes_list = ['changelog'] if query in docs_list: return 'docs' elif query in blog_list: return 'blog' elif query in updates_list: return 'updates' elif query in source_list: return 'source' elif query in pr_list: return 'pr' elif query in template_list: return 'template' elif query in key_list: return 'key' elif query in archive_list: return 'archive' elif query in changes_list: return 'changes' else: return query
ef17962ab7e0fe7e8144aa3599be5f85103163d3
19,049
import random def randomfill(materials): """Choose a random material from those listed. A material may be repeated to increase its chance of being chosen.""" def f(point): return random.choice(materials) return f
bfc4ab1ac8c66b7e051bdeb9ec3d5205c5c132ce
19,052
import os def find_empty_dirs(parent_dir): """Returns a list of directories that are empty or contain empty directories. Positional arguments: parent_dir -- parent directory string to search. Returns: List of empty directories or directories that contain empty directories. Remove in order for successful execution. """ dirs_to_remove = {r: bool(f) for r, d, f in os.walk(parent_dir)} # First get all dirs available. dirs_to_remove.pop(parent_dir, None) # If parent_dir is empty don't include it, just focus on subdirectories. for directory in sorted(dirs_to_remove.keys(), reverse=True): does_dir_have_files = dirs_to_remove.get(directory, False) # Skip if dir has already been removed from dict. if not does_dir_have_files: continue # Directory has files. Remove entire directory tree from dirs_to_remove. dirs_to_remove.pop(directory) while directory != parent_dir: directory = os.path.split(directory)[0] dirs_to_remove.pop(directory, None) return sorted(dirs_to_remove, reverse=True)
bce97997c63e7eeafa3bba68aaec0b99547bbf88
19,053
def _rho1(basis_states): """State ρ₁ from the "3states" functional""" d = len(basis_states) # dimension of logical subspace return sum( [ (2 * (d - i) / (d * (d + 1))) * psi * psi.dag() for (i, psi) in enumerate(basis_states) # note that i is 0-based, unlike in the paper ] )
939f9835f011fcf3457f7a3cb0f63b446d2cd093
19,054
def str_to_bool(bool_as_string: str) -> bool: """ A converter that converts a string representation of ``True`` into a boolean. The following (case insensitive) strings will be return a ``True`` result: 'true', 't', '1', 'yes', 'y', everything else will return a ``False``. :param bool_as_string: The string to be converted to a bool. :type bool_as_string: str :rtype: bool :raises TypeError: Raised when any type other than a string is passed. """ if not isinstance(bool_as_string, str): raise TypeError("Only string types supported") return bool_as_string.lower() in ["true", "t", "1", "yes", "y"]
62fef04039d66d3530e4cac42bf3c152d8f891e6
19,056
def all_neg(literals): """ >>> all_neg(['x1', 'x2', 'x3']) '!x1 !x2 !x3' >>> all_neg(['x1']) '!x1' """ return "!" + " !".join(literals)
b6da0186d22dbda835344141e57df9c6afa94267
19,058
def split_conn_PFI(connection): """Return PFI input number of a connection string such as 'PFI0' as an integer, or raise ValueError if format is invalid""" try: return int(connection.split('PFI', 1)[1]) except (ValueError, IndexError): msg = "PFI connection string %s does not match format 'PFI<N>' for integer N" raise ValueError(msg % str(connection))
b0d22bf7c4bcaafe087d9d3a982d4b17f10cd017
19,059
def format_block(block: list) -> str: """ Transforms [1, 2, 3, 4, 5, 6, 7, 8] -> 1 2 3 4 5 6 7 8 9 """ return "\n".join(" ".join(str(v) for v in block[i*3:i*3+3]) for i in range(3))
10c69f39ab26451a5fa29aa3232c0f3ca932deb6
19,060
def check_convergence(early_stop_count, v_loss, best_loss, tr_ratio, best_tr_ratio, epochs_since_prev_convergence, model, args): """ Verify if a boosted component has converged """ c = model.component first_component_trained = model.component > 0 or model.all_trained model_improved = v_loss < best_loss[c] early_stop_flag = False if first_component_trained and v_loss < best_loss[c]: # tried also checking: tr_ratio > best_tr_ratio[c]), but simpler is better # already trained more than one component, boosted component improved early_stop_count = 0 best_loss[c] = v_loss best_tr_ratio[c] = tr_ratio elif not first_component_trained and v_loss < best_loss[c]: # training only the first component (for the first time), and it improved early_stop_count = 0 best_loss[c] = v_loss elif args.early_stopping_epochs > 0: # model didn't improve, do we consider it converged yet? early_stop_count += 1 early_stop_flag = early_stop_count > args.early_stopping_epochs # Lastly, we consider the model converged if a pre-set number of epochs have elapsed time_to_update = epochs_since_prev_convergence % args.epochs_per_component == 0 # But, model must have exceeded the warmup period before "converging" past_warmup = (epochs_since_prev_convergence >= args.annealing_schedule) or model.all_trained converged = (early_stop_flag or time_to_update) and past_warmup return converged, model_improved, early_stop_count, best_loss, best_tr_ratio
1b7318f6f21a3e9ad9b1769d5a9ed62bd430f7ba
19,061
def num_diffs(state): """ Takes a state and returns the number of differences between adjacent entries. num_diffs(str) -> int """ differences = 0 for i in range(0, len(state) - 1): if state[i] != state[i+1]: differences += 1 return differences
8ad010412a66badfa1ebf142429b5fd9752c78ee
19,062
def bfs(graph, start, path=[]): """BFS""" queue = [start] while queue: vertex = queue.pop(0) if vertex not in path: path.append(vertex) queue.extend(graph[vertex]) return path
69655c3af6faca0fe4b3a2c208153b1d2ed69cab
19,066
def lmp_Kc(link_key): """ Derive Kc from link key """ Kc = b'' return Kc
1279505d7840ddc94592c7a28ed60c0f00bceec2
19,069
import argparse def get_args(): """Parse all the arguments. Returns: A list of parsed arguments. """ parser = argparse.ArgumentParser(description="FCN for motif location") parser.add_argument("-d", dest="data_dir", type=str, default=None, help="A directory containing the training data.") parser.add_argument("-n", dest="name", type=str, default=None, help="The name of a specified data.") parser.add_argument("-g", dest="gpu", type=str, default='0', help="choose gpu device.") parser.add_argument("-c", dest="checkpoint", type=str, default='./models/', help="Where to save snapshots of the model.") parser.add_argument("-o", dest="outdir", type=str, default='./motifs/', help="Where to save experimental results.") return parser.parse_args()
dbc707fa4119500e425fbc321ff0d793ffdc5dfa
19,071
def with_metaclass(meta, *bases): """Create a base class with a metaclass for py2 & py3 This code snippet is copied from six.""" # This requires a bit of explanation: the basic idea is to make a # dummy metaclass for one level of class instantiation that replaces # itself with the actual metaclass. Because of internal type checks # we also need to make sure that we downgrade the custom metaclass # for one level to something closer to type (that's why __call__ and # __init__ comes back from type etc.). class metaclass(meta): __call__ = type.__call__ __init__ = type.__init__ def __new__(cls, name, this_bases, d): if this_bases is None: return type.__new__(cls, name, (), d) return meta(name, bases, d) return metaclass("temporary_class", None, {})
513f9e212bd8f689b09c74867876d51ab1ac544b
19,072
def format_time(seconds): """ format seconds to time string :param seconds: seconds, in float format :return: formatted time string """ h = int(seconds // 3600) m = int(seconds % 3600 // 60) s = seconds % 60 if h: time_str = '{:d}h {:d}min {:.02f}s'.format(h, m, s) elif m: time_str = '{:d}min {:.02f}s'.format(m, s) else: time_str = '{:.02f}s'.format(s) return time_str
8c8ebeb8074f4a2f2f8c0e69497ba9a02744fab0
19,073
import tempfile import os def bytes_to_h5file(modelBytes): """ Dump HDF5 file content bytes to a local file :return: path to the file """ temp_dir = tempfile.mkdtemp() temp_path = os.path.join(temp_dir, "model.h5") with open(temp_path, mode='wb') as fout: fout.write(modelBytes) return temp_path
04590cd51bcc3c3939718672a7e3b3cadc6938b3
19,074
import time def to_timestamp(dt): """ Replicate python3 datetime.timestamp() method (https://docs.python.org/3.3/library/datetime.html#datetime.datetime.timestamp) https://stackoverflow.com/a/30021134 """ return int((time.mktime(dt.timetuple())+ dt.microsecond/1e6))
e2acf3453e68f0d211cb8dde04ea75a8b0861801
19,075
def quat_negate(q): """ return -q. """ return q*-1.0
57ae0dc235d6b80f41abe6506c33b9a024a4bcee
19,076
import io import os import re def find_version(file_path): """ Find the version of pygccxml. Used by setup.py and the sphinx's conf.py. Inspired by https://packaging.python.org/single_source_version/ Args: file_path (str): path to the file containing the version. """ with io.open( os.path.join( os.path.dirname(__file__), os.path.normpath(file_path)), encoding="utf8") as fp: content = fp.read() version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", content, re.M) if version_match: return version_match.group(1) raise RuntimeError("Unable to find version string.")
20ad8c0b185a7f9f066463e40c11d7343c57cca6
19,078
def get_assertion_message(assertion_details, **format_options): """ Get a detailed message about the failing matcher. """ msg = "" if assertion_details: separator = format_options.get("separator", "-") title = format_options.get("title", " DETAILS ") nb_separator = format_options.get("nb_separator", 40) first_title_line = ( separator * ((nb_separator - len(title)) // 2) + title + separator * ((nb_separator - len(title)) // 2) ) msg += "{}\n{}\n{}\n".format( first_title_line, str(assertion_details), separator * nb_separator ) return msg
0f9ff4a3e5bd8ab4aefee3339cf44c0a5590b15d
19,079
def simplify_selector(selector): """ Simplify a selector from the OCR intermediate format or capture model format into a compact representation "selector": { "id": "0db4fdc1-73dd-4555-95da-7cbc746c980c", "state": { "height": "60", "width": "20", "x": "821", "y": "644" }, "type": "box-selector" }, Becomes (XYWH): 832,644,20,60 """ if selector: if selector.get("state"): if (selector_type := selector.get("type")) is not None: if selector_type == "box-selector": selector_list = [ selector["state"].get("x"), selector["state"].get("y"), selector["state"].get("width"), selector["state"].get("height"), ] if all([x is not None for x in selector_list]): try: return {selector_type: [int(x) for x in selector_list]} except ValueError: return return
03629ff6acc767324ed4ff8b5e64010be533813c
19,081
def _SplitAndPad(s): """Splits a string on newlines, and pads the lines to be the same width.""" split_string = s.split('\n') width = max(len(stringpiece) for stringpiece in split_string) + 1 padded_strings = [ stringpiece.ljust(width, ' ') for stringpiece in split_string ] return padded_strings
f51976a558c6c76b26d9cf6e9302ce9d31436aed
19,083
def get_location(encounter): """ get location from encounter :param encounter: :return: """ if "location" in encounter: id = "" if 'individual' in encounter['location'][0]: id_split = encounter['location'][0]['location']['reference'].split("/") id = id_split[1] return {'id': id, 'reference': encounter['location'][0]['location']['reference'], 'display': encounter['location'][0]['location']['display']} else: return {}
309aedb2fd8cb1a3570af09547447e602261a876
19,084
def check_language_presence(match) -> bool: """Checks if some group is present inside the match object""" try: match.group('lang') return True except IndexError: return False
7a0fe185af00bbd222e7d3706ff822eb94ebba2b
19,085
import six def to_unicode(value): """ Converts string to unicode: * Decodes value from utf-8 if it is a byte string * Otherwise just returns the same value """ if isinstance(value, six.binary_type): return value.decode('utf-8') return value
1bca40318e9e311f5ad26cf7035cef656f14cb0e
19,086
def strip_balanced_edge_parens(s): """ Return a string where a pair of balanced leading and trailing parenthesis is stripped. For instance: >>> strip_balanced_edge_parens('(This is a super string)') 'This is a super string' >>> strip_balanced_edge_parens('(This is a super string') '(This is a super string' >>> strip_balanced_edge_parens('This is a super string)') 'This is a super string)' >>> strip_balanced_edge_parens('(This is a super (string') '(This is a super (string' >>> strip_balanced_edge_parens('(This is a super (string)') '(This is a super (string)' """ if s.startswith('(') and s.endswith(')'): c = s[1:-1] if '(' not in c and ')' not in c: return c return s
3e603fbd48ec9a107ab23a2c7a0282efb9f1ee36
19,087
def _decode_data(data): """ for python3, if the data is bytes, then decode it to string """ if isinstance(data, bytes): # convert bytes to string try: data = data.decode("utf-8", "ignore") except UnicodeDecodeError: data = data.decode("iso8859-1", ) return data
ef50a76c46a95419454431d08ac0090efd80c308
19,088
def hex_to_rgb(hexcode): """ Convert Hex code to RGB tuple """ return (int(hexcode[-6:-4], 16), int(hexcode[-4:-2], 16), int(hexcode[-2:], 16))
0bf09edac600dcf1e6e0bd527d3e48d702a98add
19,090
import time def timestamp_to_date(timestamp=None, ft=None): """ :type timestamp: int """ if ft is None: ft = '%Y-%m-%d' if timestamp is None: t = time.gmtime() else: t_str = time.ctime(timestamp) t = time.strptime(t_str, '%a %b %d %H:%M:%S %Y') return time.strftime(ft, t)
050993997df41be1cf7cebe4b5c972d899d9aac9
19,093
from typing import Union from typing import List def add_n(obj: Union[int, List], n: int) -> Union[int, List]: """Return a new nested list where <n> is added to every item in <obj>. >>> add_n(10, 3) 13 >>> add_n([1, 2, [1, 2], 4], 10) [11, 12, [11, 12], 14] """ if isinstance(obj, int): return obj + n else: new_list = [] for sublist in obj: new_list.append(add_n(sublist, n)) return new_list
803d730e005aa81afcdeae48819aeb312b27e3f7
19,094
def add_type(job_json: dict) -> dict: """Get the type for a job based on its name and add that to the job dict.""" # A job name is user_stage_NAME_GOES_HERE_..., so we need to # remove the first two underscore-delimited fields, and then what # comes after depends on the name split_name = job_json["jobName"].split("_")[2:] # The last field of a job name is always an ID, so strip that split_name = split_name[:-1] # If the new last field is numeric, then it must be a RAM amount, so # strip that too. Otherwise, the last field will be part of the job # name for jobs without RAM amounts, so we want to keep it. if split_name[-1].isnumeric(): split_name = split_name[:-1] job_json["type"] = "_".join(split_name) return job_json
d0f361a80ed50e386805806d409608f878fb5adb
19,095
def adam(grad, state_sum, nodes, lr, device, only_gpu): """ calculate gradients according to adam """ grad_sum = (grad * grad).mean(1) if not only_gpu: grad_sum = grad_sum.cpu() state_sum.index_add_(0, nodes, grad_sum) # cpu std = state_sum[nodes].to(device) # gpu std_values = std.sqrt_().add_(1e-10).unsqueeze(1) grad = (lr * grad / std_values) # gpu return grad
047a2d8050dbb31fbe53dcde7627c231ae0fca9a
19,096
def parse(): """ Take the input and turn it in to a list of edges. """ edges = [] with open('in.txt') as f: for r in f: src = int(r.split(' <-> ')[0]) dests = [int(x) for x in r.split(' <-> ')[1].split(',')] edges += [(src,dest) for dest in dests] return edges
c9a5da3031bafe6c52fd9aff02ad187dec86e941
19,097
from typing import List def decoupage_mots(phrase : str) -> List[str]: """Précondition : phrase est composée de mots séparés par des espaces Renvoie la liste des mots de la phrase. """ # La liste des mots lmots : List[str] = [] # Le mot courant mot : str = "" ch : str # Caractère courant dans la phrase for ch in phrase: if ch == ' ': if mot != "": lmots.append(mot) mot = "" else: mot = mot + ch if mot != "": lmots.append(mot) return lmots
58106293a8e8485979d3d4f3122f8bd06b314003
19,099
def AlgoGlouton(k,V,s): """ s(int): valeur initiale de s (le premier volume de confiture) k(int): valeur initiale de k (la taille du permier systéme de capacité utilisé) V(int[k]): La liste des volumes des bocaux du systéme de capacité Implementation de l'algorithme Glouton. Retourne un couple (cpt,L) avec cpt le nombre total de bocaux utilises et L la liste des bocaux utilises """ L=[0]*k st=s while(st>0): i=0 imax=0 while((i<k)and(V[i]<=st)): if(V[i]>V[imax]): imax=i i+=1 st-=V[imax] L[imax]+=1 return sum(L),L
be8a846e58678a0b6566864131359997c79d5476
19,100
def splitdefines(txt): """split the code into two lists of defines and code""" pre = [] c = [] for line in txt.split("\n"): if line.startswith("#"): pre.append(line) else: c.append(line) return pre, c
bcd82feb293116f39cd1d514484847ad0d20bbe5
19,101
from typing import Counter def maximum_points(table, buttons_count): """ На клавише написана либо точка, либо цифра от 1 до 9. В момент времени t игрок должен одновременно нажать на все клавиши, на которых написана цифра t. Игроки могут нажать в один момент времени на k клавиш каждый. Выведите единственное число –— максимальное количество баллов. """ points = 0 elements_count = Counter(''.join(table)) for x in elements_count.values(): if x <= buttons_count: points += 1 return points
22717b8a96667b131f9ef48c87ddfd6be5dbce71
19,102
def translate(sentences,dict_words,dict_files,size): """Translate numbers back to words in a list format for kitconc KIWC object.""" dict_files = {v: k for k, v in dict_files.items()} # reverse keys concordance = [] i = 0 for sent in sentences: i+=1 c = [] # concordance for i in sent[0]: c.append(dict_words[i]) # add to list concordance.append((i, ' '.join(c), dict_files[sent[3]][0],sent[1]-(size-1),str(sent[2]),str(sent[3]))) return concordance
db45ce59fe5f0b407d78e887cb454dc16b032116
19,105
def right_bisect(sorted_list, element): """ Find an element in a list (from the right) """ idxLeft = 0 idxRight = len(sorted_list) - 1 while idxLeft < idxRight: idxMiddle = (idxLeft + idxRight) // 2 middle = sorted_list[idxMiddle] if middle <= element: idxLeft = idxMiddle + 1 elif middle > element: idxRight = idxMiddle return idxRight
eb9085431feb90a102fa814c401861f954e8516b
19,106
import yaml def load_settings(path: str): """Load a yaml settings file. Args: path (str): Path to the settings file. """ with open(path, "r") as settings_file: SETTINGS_LOADED = yaml.load(settings_file, Loader=yaml.FullLoader) return SETTINGS_LOADED
9cb4b3e6418cdd82aa601e2cf16aa183383157ef
19,107