content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
from datetime import datetime def get_timestamp(): """ Obtains current timestamp in standard format with milliseconds Examples: >>> get_timestamp() '2017-08-22 20:34:54,584' @retval string A consistent-length date and time string """ date_format = datetime.strftime(datetime.now(), "%Y-%m-%d %H:%M:%S,%f").__str__()[:-3] return date_format
5b3d1f8098233c09fc485d060b5903d2027df0c9
12,323
def _format_media_type(endpoint, version, suffix): """ Formats a value for a cosmos Content-Type or Accept header key. :param endpoint: a cosmos endpoint, of the form 'x/y', for example 'package/repo/add', 'service/start', or 'package/error' :type endpoint: str :param version: The version of the request :type version: str :param suffix: The string that will be appended to endpoint type, most commonly 'request' or 'response' :type suffix: str :return: a formatted value for a Content-Type or Accept header key :rtype: str """ prefix = endpoint.replace('/', '.') separator = '-' if suffix else '' return ('application/vnd.dcos.{}{}{}' '+json;charset=utf-8;version={}').format(prefix, separator, suffix, version)
993e178fc2a91490544936e019342e6ab8f928ce
12,325
def fill_result_from_objective_history(result, history): """ Overwrite function values in the result object with the values recorded in the history. """ # counters result.n_fval = history.n_fval result.n_grad = history.n_grad result.n_hess = history.n_hess result.n_res = history.n_res result.n_sres = history.n_sres # initial values result.x0 = history.x0 result.fval0 = history.fval0 # best found values result.x = history.x_min result.fval = history.fval_min # trace result.trace = history.trace return result
a2f1388c5d71a06f45369098039a3fa623a25735
12,326
import hashlib import sqlite3 def hash_blocks_until(db,n): """Returns combined hash of all block hashes in db until block_height n """ sha224 = hashlib.sha224() with sqlite3.connect(db) as ledger_check: ledger_check.text_factory = str h3 = ledger_check.cursor() for row in h3.execute("SELECT block_hash FROM transactions where " "block_height>-{} and block_height<{} order by block_height asc".format(n,n)): sha224.update(str(row[0]).encode("utf-8")) return sha224.hexdigest()
6bb59b063e9512483783dab70322082247638f46
12,327
def Armijo_Rule(f_next,f_initial,c1,step_size,pg_initial): """ :param f_next: New value of the function to be optimized wrt/ step size :param f_initial: Value of the function before line search for optimum step size :param c1: 0<c1<c2<1 :param step_size: step size to be tested :param pg: inner product of step direction, p, with the gradient before stepping, g_initial :return: True if condition is satisfied """ return (f_next <= f_initial+c1*step_size*pg_initial)
90462eda94244c10afdf34e1ad042118d793c4fd
12,328
def get_version(): """Return the version.""" return "0.1"
318c8ee9c5ecfb8b603e7d756879947145fcc242
12,330
import os def find_hcp_data(): """ Returns the freesurfer data path defined in the environment. """ try: dir_hcp_data = os.getenv('HCP_DATA') except: dir_hcp_data = None return dir_hcp_data
77611b86852523cb117fecee74dd711b22b33463
12,331
import socket def connect(host, port): """Connect to remote host.""" # Create socket try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) except socket.error as msg: return (None, msg) # Get remote IP try: addr = socket.gethostbyname(host) except socket.gaierror as msg: s.close() return (None, msg) # Connect try: s.connect((addr, port)) except socket.error as msg: s.close() return (None, msg) return (s, None)
ab03e5551f0fbf92c6e2417635bc753d5b77eae2
12,332
import os def allowed_to_access_dir(path): """ Checks whether we have permission to access the specified directory. This includes read, write, and execution. :param path: the path to the directory :type path: string :return: whether access is allowed :rtype: boolean """ try: os.makedirs(path, exist_ok=True) except OSError: print("error: can't write to " + path) if os.access(path, os.R_OK | os.W_OK | os.F_OK | os.X_OK): return True return False
763fa485d7c35ef5a4d283c1d0362d07c56b2179
12,334
import pprint def pfomart(obj): """格式化输出某对象 :param obj: obj对象 :return: 格式化出处的字符串 """ return pprint.pformat(obj)
2f2c7d7df9de9bb65c72634e8ba9386365408445
12,335
from typing import Any from typing import Type def extend_class(base_cls: Any, cls: Any) -> Type: """Apply mixins""" base_cls_name = base_cls.__name__ return type(base_cls_name, (cls, base_cls), {})
5ed297f9115ec3940991b3536c207da3f6b036d0
12,336
def journal_info(): """Create a info dictionary for edit or create later.""" info = { 'title': 'testing', 'body': 'testing_body', 'creation_date': '2017-11-02' } return info
e596f42ec158b7635b248558c924e3af56ae33ea
12,337
import re def rle2cells(rle_str: str) -> str: """Convert lifeform string in RLE encoding to PlainText Args: rle_str (str): single line of RLE commands Returns: str: valid PlainText-encoded lifeform """ # drop the last part if "!" in rle_str: rle_str = rle_str[: rle_str.index("!")] else: raise ValueError('Incorrect input: no "!"') if not set(rle_str).issubset("0123456789bo$"): raise ValueError("Incorrect input: wrong character set") commands = re.findall("([0-9]*)(b|o|\\$)", rle_str) if len(commands) == 0: raise ValueError("Incorrect input: wrong pattern format") layout_string = "" parse_dict = {"b": ".", "o": "O", "$": "\n"} for com in commands: n = int(com[0]) if com[0] else 1 layout_string += parse_dict[com[1]] * n return layout_string
e48ea40bd9032445e1aabe4753b7fbdcc62191ed
12,338
def format_list_of_floats(float_list, length_per_value=6, digit_after_point=4): """Returns a formatted string""" string = "[" str_format = " {:" + str(length_per_value) + "." + str(digit_after_point) + "f}," for value in float_list: string += str_format.format(value) string += "]" return string
622d20132d7e9af19fc3a143306268e8381c457c
12,339
import argparse def parse_args(args): """ Parse script arguments. :return: Parsed args for assignment """ parser = argparse.ArgumentParser() parser.add_argument('--build-directory', required=True, help="Top level directory that stores all the cloned repositories.", action='store') parser.add_argument('--debian-depth', help="The depth in top level directory that you want" " this program look into to find debians.", default=3, type=int, action='store') parser.add_argument('--bintray-credential', required=True, help="bintray credential for CI services: <Credentials>", action='store') parser.add_argument('--bintray-subject', required=True, help="the Bintray subject, which is either a user or an organization", action='store') parser.add_argument('--bintray-repo', required=True, help="the Bintary repository name", action='store') parser.add_argument('--bintray-component', help="such as: main", action='store', default='main') parser.add_argument('--bintray-distribution', help="such as: trusty, xenial", action='store', default='trusty') parser.add_argument('--bintray-architecture', help="such as: amd64, i386", action='store', default='amd64') parsed_args = parser.parse_args(args) return parsed_args
808801c29f6059997776222abb340581c1949bec
12,341
import torch def __fan(mask, iter): """Fans out bitmask from input to output at `iter` stage of the tree See arrows in Fig. 1 (right) of Catrina, O. "Improved Primitives for Secure Multiparty Integer Computation" """ multiplier = (1 << (2 ** iter + 1)) - 2 if isinstance(mask, (int, float)) or torch.is_tensor(mask): return mask * multiplier # Otherwise assume BinarySharedTensor result = mask.clone() result._tensor *= multiplier return result
e98679e04ff4b4cab2f281bfed0085dfab7a6484
12,342
def pointcloud_normalization(ptc1): """Normalize pointcloud as out=(input-input.mean())/input.std()""" if ptc1[:, 0].std() == 0: ptc1[:, 0] = ptc1[:, 0] - ptc1[:, 0].mean() else: ptc1[:, 0] = (ptc1[:, 0] - ptc1[:, 0].mean()) / ptc1[:, 0].std() if ptc1[:, 1].std() == 0: ptc1[:, 1] = ptc1[:, 1] - ptc1[:, 1].mean() else: ptc1[:, 1] = (ptc1[:, 1] - ptc1[:, 1].mean()) / ptc1[:, 1].std() if ptc1[:, 2].std() == 0: ptc1[:, 2] = ptc1[:, 2] - ptc1[:, 2].mean() else: ptc1[:, 2] = (ptc1[:, 2] - ptc1[:, 2].mean()) / ptc1[:, 2].std() return ptc1
cca4f23d810a56a0f06115de67d2f2ec6feee918
12,344
def pow_sq(x, y): """ Compute x^y, without calling its pow, using exponentiation by squaring. """ r = 1 while y: if y & 1: r = r * x x = x * x y >>= 1 return r
f6d82257ff909d9c890360dcd9408db0d742b13d
12,345
def list_attributes(any_class): """List all public methods and attributes of a class sorted by definition order """ return [x for x in any_class.__dict__.keys() if not x.startswith("_")]
fb3d8624b318a13b40f7b6f70482671943c37fc0
12,346
def get_counts_string(match): """ Looks for keys matching top_events_X in matches, generated by get_top_counts, and returns a readable string about the various counts. """ message = '' for key, counts in match.items(): if key.startswith('top_events_'): message += '%s:\n' % (key[11:]) top_events = counts.items() top_events.sort(key=lambda x: x[1], reverse=True) for term, count in top_events: message += '%s: %s\n' % (term, count) message += '\n' return message
28674902c488add7e5e50af7bcf1a555255a07f7
12,347
def colorizer(x, y): """ Map x-y coordinates to a rgb color """ r = min(1, 1-y/3) g = min(1, 1+y/3) b = 1/4 + x/16 return (r, g, b)
6b135728e02148af5368bc21bfb7d1bed90200d5
12,349
def get_point(values, pct): """ Pass in array values and return the point at the specified top percent :param values: array: float :param pct: float, top percent :return: float """ assert 0 < pct < 1, "percentage should be lower than 1" values = sorted(values) return values[-int(len(values)*pct)]
11474d804e0845284a864a5cb8de23299999a5e7
12,351
import os import yaml def _get_smartstack_proxy_ports_from_file(root, file): """Given a root and file (as from os.walk), attempt to return the highest smartstack proxy port number (int) from that file. Returns 0 if there is no smartstack proxy_port. """ ports = set() with open(os.path.join(root, file)) as f: data = yaml.safe_load(f) if file.endswith('service.yaml') and 'smartstack' in data: # Specifying this in service.yaml is old and deprecated and doesn't # support multiple namespaces. ports = {int(data['smartstack'].get('proxy_port', 0))} elif file.endswith('smartstack.yaml'): for namespace in data.keys(): ports.add(data[namespace].get('proxy_port', 0)) return ports
7456db23657a158f083247a4300fd891a0034b97
12,352
def prepare_for_table(data, machine_id): """We don't want to store certain fields in the DB, e.g. the _BOOT_ID or the __CURSOR because they don't add any value in the central log index. """ defint = lambda x: 0 if x == '' else int(x) keep_and_convert = { 'MESSAGE': str, 'PRIORITY': defint, '__REALTIME_TIMESTAMP': defint, '_PID': defint, '_UID': defint, '_SYSTEMD_UNIT': str, 'SYSLOG_IDENTIFIER': str, '_COMM': str, } result = dict((key, converter(data.get(key, ''))) for key, converter in keep_and_convert.items()) result['MACHINE_ID'] = machine_id return data['__CURSOR'], result
54c75fac6206c0c5e6b8d44553853c0c624080fa
12,353
def check_columns(df): """ Checks wether dataframe contains the required columns. :param df: the data frame with track data. :return: whether or not df contains all the required columns. required columns: refName, start, end, name, score (is optional) :rtype: boolean """ required = ["refName", "start", "end", "name"] return all(col in df for col in required)
3ddc53fc0e2caad74b3218f7f47134aed258cba9
12,354
def accuracy_inf_sol(inferred,cliques_solution): """ 'inferred' should be a set of vertices 'cliques_solution' an iterable of all the solution cliques (as sets) """ assert len(cliques_solution)!=0, "No solution provided!" max_overlap = 0 best_clique_sol = cliques_solution[0] clique_size = len(cliques_solution[0]) for cur_clique in cliques_solution: temp_inter = cur_clique.intersection(inferred) cur_overlap = len(temp_inter) if cur_overlap > max_overlap: max_overlap = cur_overlap best_clique_sol = cur_clique return max_overlap, clique_size, best_clique_sol
d2d38d7f3470520058f90699dab6b7eb59cbd5cf
12,355
def _ziprange(alist, ix): """ returns zip of the list, and the one with ix added at the end and first element dropped Example ------- :: alist = [2,4,7] _ziprange (alist, 10) --> zip([2,4,7], [4,7,10]) -> (2,4), (4,7), (7,10) """ blist = alist.copy() blist.append(ix) del blist[0] return zip(alist, blist)
e816ad20e8c193487d8483cdfd3ee27d77fef814
12,356
import os def iterFilePaths(root_path): """Walks through all files in all subdirectories of the given root_path and yields their full path.""" return (os.path.join(root_path, file) for _, _, f in os.walk(root_path) for file in f)
cee890e16210967a75b0e0b7fc915b62b9202a2c
12,357
import re def title_output_replace(input_title_output, metadata_dict, data_dict, rel_channels_dict, is_title = False, custom_vars = None): """Substitute %VAR% variables with provided values and constants. Given a title (or output path) string template, replace %VAR% variables with the appropriate value or constant. Variables supported include: * %EXPERIMENT_ID% - Experiment ID, sourced from metadata_dict. * %INSTRUMENT_SAT% - Instrument/satellite ID, sourced from metadata_dict. * %CHANNEL% - Channel number, sourced from metadata_dict. * %RELCHANNEL% - Relative channel number, indirectly sourced from rel_channels_dict. * %FREQUENCY% - Frequency of the selected channel, sourced from the frequency field in data_dict. * %ASSIMILATION_STATUS% - Placeholder for the assimilation status, which is determined from the iuse field in data_dict (not done here). * %START_DATE% - Start date in YYYYMMDD format, sourced from metadata_dict. * %END_DATE% - End date in YYYYMMDD format, sourced from metadata_dict. * Additional custom %VARS% sourced from custom_vars, if specified. Args: input_title_output (str): The string with the title (or output path) template. metadata_dict (dict): The metadata dictionary containing data source information. data_dict (dict): The data dictionary to retrieve values from for certain %VAR% variables. See get_data() help (in data.py) for more information on its format. rel_channels_dict (dict): The relative channels dictionary to map relative channels to actual data channels. Its keys are the relative channels, and its values are the actual data channels. (In this case, the mapping is reversed later to allow easy conversion from data channel to relative channel.) is_title (bool): Boolean indicating whether the string templace is a title or not. This affects how and what variables are replaced. By default, this is set to False. custom_vars (dict): Dictionary containing custom variables to be replaced. Its keys are %VAR% variables without the percent sign, and its values are what should take their places. For instance, given { "TESTVAR": "test123" }, the template "%TESTVAR%" should be replaced with "test123". By default, this is set to None - this argument is optional if there are no custom variables. Returns: str: A string with %VAR% variables replaced with the appropriate value or constant. Some %VAR% variables may not be replaced if they do not exist, or certain conditions are not met. """ # Replace experiment ID variable input_title_output = input_title_output.replace("%EXPERIMENT_ID%", metadata_dict["experiment_id"]) # Capitalize %INSTRUMENT_SAT% if we're using it in a title. if is_title: input_title_output = input_title_output.replace("%INSTRUMENT_SAT%", metadata_dict["instrument_sat"].upper()) else: input_title_output = input_title_output.replace("%INSTRUMENT_SAT%", metadata_dict["instrument_sat"]) # Replace data channel variable input_title_output = input_title_output.replace("%CHANNEL%", str(metadata_dict["channel"])) # Reverse the channel map # Original: rel_channel -> actual data channel # Inverted: actual data channel -> rel_channel rel_channels_inv_map = dict(zip(rel_channels_dict.values(), rel_channels_dict.keys())) input_title_output = input_title_output.replace("%RELCHANNEL%", str(rel_channels_inv_map[metadata_dict["channel"]])) # Ensure that we have adequate data to determine frequency. # If we do, replace the frequency variable! if data_dict and "frequency" in data_dict: input_title_output = input_title_output.replace("%FREQUENCY%", str(data_dict["frequency"])) # Replace assimilation status placeholder... only if it's a title. if is_title: input_title_output = input_title_output.replace("%ASSIMILATION_STATUS%", " .......................") # Replace date variables input_title_output = input_title_output.replace("%START_DATE%", str(metadata_dict['start_year']).zfill(4) + str(metadata_dict['start_month']).zfill(2) + str(metadata_dict['start_day']).zfill(2)) input_title_output = input_title_output.replace("%END_DATE%", str(metadata_dict['end_year']).zfill(4) + str(metadata_dict['end_month']).zfill(2) + str(metadata_dict['end_day']).zfill(2)) # Custom vars if custom_vars: for custom_var in custom_vars: # Do a case insensitive replace replace_re = re.compile(re.escape('%'+custom_var+'%'), re.IGNORECASE) input_title_output = replace_re.sub(custom_vars[custom_var], input_title_output) return input_title_output
83102c1557643b67ce1c52f0c69154bcfac4ac72
12,358
def format_data(data, es_index): """ Format data for bulk indexing into elasticsearch """ unit = data["unit"] rate_unit = data["rateUnit"] egvs = data["egvs"] docs = [] for record in egvs: record["unit"] = unit record["rate_unit"] = rate_unit record["@timestamp"] = record.pop("systemTime") record.pop("displayTime") record["realtime_value"] = record.pop("realtimeValue") record["smoothed_value"] = record.pop("smoothedValue") record["trend_rate"] = record.pop("trendRate") docs.append({"_index": es_index, "_type": "document", "_source": record}) return docs
094af427daaf4922371e17fca70a2b8c8539d54c
12,359
from pathlib import Path import subprocess import os def add_video_padding(video_path: Path) -> Path: """Adds audio padding to previously rendered videos. This ensures that there won’t be a shift between the audio and video when rendering the final project. This function does not edit the original video. It creates a new file from the padded version of the provided file. `add_video_padding()` also **removes** the original video. Args: video_path (Path): The path towards an `mp4` which will be used to create a new padded video. Returns: Path: The path towards the padded video. Those paths follow this scheme: [videos-path]/padded_[id].mp4 """ # File stem as a format of `[name]_[id]`. video_id: str = video_path.stem.split("_")[1] output_path: Path = video_path.parent / Path(f"padded_{video_id}.mp4") subprocess.run( [ "ffmpeg", "-i", f"{video_path}", "-af", "apad", "-c:v", "copy", "-safe", "0", "-shortest", "-avoid_negative_ts", "make_zero", "-fflags", "+genpts", f"{output_path}", ], check=True, ) # Removing not padded video. os.remove(video_path) return output_path
e505286c11f4b791ed1202abb8d46c0d11b516d1
12,360
def paste_filename(search): """ Function that will create a name for the files to be saved to using the search """ # Removes any spaces cleaned_keyword = search.replace(' ', '_') # Adds 'videos.csv' at the end filename = cleaned_keyword + "_videos.csv" return filename
3279ef21e039b7a63a728a6d02714086a61f3e0e
12,361
def input_default(default_word='', tip_words='Please input words.'): """return default_word while input() return blank""" input_data = input(tip_words + '(default: ' + default_word + ')\n') if input_data.strip() == '': print('Blank input. Using the default value: {0}'.format(default_word)) return default_word else: return input_data
5873fd6b29f932c8f59262fb44749827eb9bcc29
12,362
import torch def repeat_column(column: torch.Tensor, times: int) -> torch.Tensor: """ Repeats the given column the given number of times. :param column: the column to repeat. Size [H]. :param times: the number of repetitions = W. :return: the given column repeated the given number of times. Size [H, W]. """ return column.unsqueeze(1).repeat(1, times)
dfa955fbff0c4b87a7f5cef729a454eb99c93760
12,364
import unicodedata def remove_punctuation(text): """Replaces all Unicode punctuation except dashes (Pd) with whitespaces. Potentially induces duplicate whitespaces. """ text = ''.join( ' ' if unicodedata.category(c)[0] == 'P' and unicodedata.category(c)[1] != 'd' else c for c in text) return text
0bf4c1f9100051a09252bcd3f24a8a593dc02ce2
12,365
import time def _strTogMonth(v): """Test gYearMonth value @param v: the literal string @return v @raise ValueError: invalid value """ try: time.strptime("2001-" + v + "-01", "%Y-%m-%d") return v except: raise ValueError("Invalid gMonth %s" % v)
64962552ab9ae5df06f76f07c91c549927caa0dd
12,366
def parse_markers(f): """ Parse markers from mrk file f. Each marker determines a time point and an according class-label of the movement that was imagined. Args: f (String) - an mrk file Returns: tuple of lists of ints - one list for the markers, one for the labels """ mrks = list() y = list() with open(f) as f_m: for line in f_m: mrk, cls = line.strip('\n').split('\t') mrks.append(int(float(mrk))) y.append(int(float(cls))) return mrks, y
670ce43529a7aae4f4ed4341938a90eb6e714fb3
12,367
import random def randomFlip(input_image, flip_prob = 0.5): """ flip the single image horizontally with probability flip_prob""" tmp = random.random() if tmp <= flip_prob: fliped_image = input_image[:, ::-1,:]; output_image = fliped_image FLAG = 1 else: output_image = input_image FLAG = 0 return output_image, FLAG
93587e838439aea12ea84ba44556c8575b7e3759
12,368
def dp_key(relations): """ generates a unique key for the dptable dictionary :param relations: set of relations :return: str """ return '-'.join(sorted([r.name for r in relations]))
e415778193d5a5c90ba7574bccc7e82d0d95c2e8
12,369
def as_list(obj, tuple_to_list=False, if_none=NotImplemented): """ This is useful to allow arguments that are theoretically lists, but tend to be single items in practice. As syntactic sugar, we allow passing in these single items in some cases. For example, multiple allowed statuses or zones might be passed into the below functions, but it tends to just be one: """ if obj is None: if if_none == NotImplemented: # Here NotImplemented is a magic value indicating # no-special casing, we will return [None] pass elif type(if_none) == type and issubclass(if_none, Exception): # Note that issubclass raises a TypeError if a non-class is # passed in, thus we have to check the type first raise if_none("as_list received None as input") else: return if_none if not hasattr(obj, '__iter__') or (tuple_to_list and type(obj) is tuple): obj = [obj] return obj
d1331365111cf16919285575600f437e09cb0405
12,372
def _format_command_stdout(stdout): """ Formats the output from stdout returned from subprocess """ lines, list_of_strs = stdout.splitlines(), list() for line in lines: list_of_strs.append(line.decode()) return list_of_strs
69603c16f35f92cc4da7d2b3710ffa44f295d24d
12,373
def somme(*nombres): """Retourne la somme des nombre de la liste donnée """ total = 0 for nombre in nombres: total += nombre return total
683a8b48a59d538d9883f25cf5ab8f8f2bbf9232
12,374
def extract_sectors(pred_df, thres): """Extracts labels for sectors above a threshold Args: pred_df (df): predicted sector thres (float): probability threshold """ long_df = ( pred_df.reset_index(drop=False) .melt(id_vars="index", var_name="division", value_name="probability") .query(f"probability > {thres}") ) out = long_df.groupby("index")["division"].apply(lambda x: list(x)) return out
a772e965685ca8ac8cd6a52861c36e9cf7faf887
12,377
import bisect def find_closest(numeric_list, query_number): """ Given a list of numbers, and a single query number, find the number in the sorted list that is numerically closest to the query number. Uses list bisection to do so, and so should be O(log n) """ sorted_numeric_list = sorted(numeric_list) pos = bisect.bisect_left(sorted_numeric_list, query_number) if pos == 0: return sorted_numeric_list[0] if pos == len(sorted_numeric_list): return sorted_numeric_list[-1] before = sorted_numeric_list[pos - 1] after = sorted_numeric_list[pos] if after - query_number < query_number - before: return after else: return before
ae2dad58162f38f7c1e4d7149943488a96c3c8dc
12,378
def set_starting_position(poly, ggr=None): """Check and set starting position.""" if not ggr: starting_position = "upperleftcorner" # EnergyPlus default else: starting_position = ggr.Starting_Vertex_Position.lower() poly = poly.order_points(starting_position) return poly
0c04182e3bb23e817e551a5a361fba28f812aad4
12,379
import random def rand(plan, num=1): """Shuffle to generate new perm""" def shuffle_tasks(): perm = plan.perm[:] random.shuffle(perm) return perm return [shuffle_tasks() for _ in range(num)]
5c4318e4be1330f0a78eeec30665626eb5802934
12,381
def count_features_type(types, include_binary=False): """ Counts two or three different types of features (binary (optional), categorical, continuous). :param types: list of types from get_type :returns a tuple (binary (optional), categorical, continuous) """ if include_binary: return ( types.count('binary'), types.count('categorical'), types.count('continuous') ) else: return ( types.count('categorical'), types.count('continuous') )
4158122256c9a407f58987d278657e4a006e6a13
12,386
def is_pandas_module(fullname: str) -> bool: """Check if a fully qualified name is from the pandas module""" return fullname.startswith("pandas.")
0becdbfd7c1c4f5b7990cbc0466a6e45f25acb14
12,387
import os import json def set_json_app_argument(config_path, key, value): """Writes kv pair to json argument file Arguments: config_path {string} -- path to json config file, example: /var/run/appconfig/streaming_args.json key {string} -- the name of the argument to set value {any} -- the value of the argument to set """ if not os.path.exists(config_path): # Create new file with open(config_path, 'w') as f: json.dump({}, f) # Read current config JSON json_data = json.load(open(config_path)) # Set the new value for the argument. json_data[key] = value # Save the json file json.dump(json_data, open(config_path, 'w')) return True
149a013c57db130161612cd3070ceb393ec1a6e7
12,388
def squeeze_first(inputs): """Remove the first dimension in case it is singleton.""" if len(inputs) == 1: inputs = inputs[0] return inputs
c2c0cabc873baf88ce7673f2c8889fedce0f05da
12,390
def get_total_distance_of_path(path, table): """ Calculates the total distance of an individual bee's path. Terminates at starting node to complete cycle. """ # Creates a copy of path, puts head at end of list. # Zip lists to create pairs of neighbor coords, # will create a cycle that terminates at starting node. new_path = list(path) new_path.insert(len(path), path[0]) new_path = new_path[1:len(new_path)] coordinates = zip(path, new_path) distance = sum([table[i[0]][i[1]] for i in coordinates]) return round(distance, 3)
d6c655b7bbed075dc8323d5d60ee128b8693d792
12,391
from typing import Counter def removeInsignificantNodes(dataframe,filename = None,csvSeparator = ';'): """ Removes nodes that occur only once, as they are just the supposed ends of given tube. Parameters ---------- dataframe : pandas.DataFrame() Dataframe containing arranged data filename : str, optional Path of the file for data export, (default is None) csvSeparator : str, optional Separator for export to CSV files (default is ';') """ nodes = dataframe['nodeID'] counted = Counter(nodes) remove = [node for node in nodes if counted[node] < 2] removeIdx = [idx for idx in range(len(nodes)) if nodes[idx] in remove] df = dataframe.drop(removeIdx) df.index = range(len(df)) #display(df) if(filename is not None): df.to_csv('output_significant_nodes_' + str(filename) +'.csv',sep=csvSeparator) return df
0aa345d257aba84e67a73845a795f820de9190dd
12,393
def format_with_bold(s_format, data=None): """ Returns the string with all placeholders preceeded by '_b' replaced with a bold indicator value; :param: s_format: a string format; if contains '_b{}b_' this term gets bolded. :param: s: a string or value Note 1: '... _b{}; something {}b_ ...' is a valid format. Note 2: IndexError is raised using the output format only when the input tuple length < number of placeholders ({}); it is silent when the later are greater (see Example). Example: # No error: fmt = 'What! _b{}b_; yes: _b{}b_; no: {}.' print(format_with_bold(fmt).format('Cat', 'dog', 3, '@no000')) # IndexError: print(format_with_bold(fmt).format('Cat', 'dog')) """ if data is None: raise TypeError('Missing data (is None).') if '{' not in s_format: raise TypeError('Missing format placeholders.') # Check for paired markers: if s_format.count('_b') != s_format.count('b_'): err_msg1 = "Bold indicators not paired. Expected '_b with b_'." raise LookupError(err_msg1) # Check for start bold marker: b1 = '_b' i = s_format.find(b1 + '{') # Check marker order: '_b' before 'b_': if i > s_format.find('}' + 'b_'): err_msg2 = "Starting bold indicator not found. Expected '_b before b_'." raise LookupError(err_msg2) while i != -1: # Check for trailing bold marker: b2 = 'b_' j = s_format.find('}' + b2) if j != -1: s_format = s_format.replace(b1, '\033[1m') s_format = s_format.replace(b2, '\033[0m') else: err_msg3 = "Trailing bold indicator not found. Expected '_b with b_'." raise LookupError(err_msg3) i = s_format.find(b1 + '{') # Now combine string with data: mismatch_err = 'Format placeholders != data items' if isinstance(data, (tuple, list)): if s_format.count('{}') != len(data): raise IndexError(mismatch_err) return s_format.format(*data) elif isinstance(data, dict): if '{}' not in s_format: # eg, assume keys given as indices: 'a is {a}, b is {b}' return s_format.format_map(data) else: if s_format.count('{}') != len(data.values()): raise IndexError(mismatch_err) return s_format.format(*data.values()) else: if s_format.count('{}') != 1: raise IndexError(mismatch_err) return s_format.format(data)
07fd31eaeb5cfd4a39d256e3c2d48fe3a97a9564
12,394
def is_part_of_word(word_fragment, wordlist): """Returns True if word_fragment is the beginning of a word in wordlist. Returns False otherwise. Assumes word_fragment is a string.""" for word in wordlist: is_part_of_list = word_fragment == word[:len(word_fragment)] if is_part_of_list == True: return True return False
54f572655fe7bb383cb00d732b57d85156b5f528
12,395
def render_output(data): """Print the formatted output for the list """ output = ['[Dataduct]: '] output.extend(data) return '\n'.join(output)
5e3bee31890f682eca6aa03128dbf8d51e2fe473
12,396
def get_parent_doc(__type: type, /) -> str | None: """Get the nearest parent documentation using the given :py:class:`type`'s mro. :return The closest docstring for an object's class, None if not found. """ doc = None for parent in __type.__mro__: doc = parent.__doc__ if doc: break return doc
efe61d30a82e08ccdf5411ffc9feb4252fdb53e2
12,397
def drop_multiple_col(col_names_list, df): """AIM -> Drop multiple columns based on their column names. INPUT -> List of column names, df. OUTPUT -> updated df with dropped columns""" df.drop(col_names_list, axis=1, inplace=True) return df
991144349d383b79e1510fa5c106226254f8329b
12,398
def replstring(string, i, j, repl): """ Replace everything in string between and including indices i and j with repl >>> replstring("abc", 0, 0, "c") 'cbc' >>> replstring("abc def LOL jkl", 8, 10, "ghi") 'abc def ghi jkl' """ # Convert to list since strings are immutable strlist = list(string) # Delete characters between given indices for k in range(j - i + 1): del strlist[i] # i instead of k, since deleting an element makes list smaller # Insert new chars for l in range(len(repl)): strlist = strlist[:i + l] + [repl[l]] + strlist[i + l:] return "".join(strlist)
97eee8912a6c8fd9e29a5784af1f3853b714cf0b
12,399
import os import shutil def delete_directory(dirpath): """Delete a directory""" errors = [] if not os.path.isdir(dirpath): errors.append('Directory does not exist') else: try: shutil.rmtree(dirpath, ignore_errors=False) except PermissionError: errors.append('Error: PermissionError') except Exception: errors.append('An error occurred while renaming folder') return errors
d76fb2e29585cf6f27c3afa79fa2362e2056c15a
12,401
def cli(ctx, workflow_id): """Delete a workflow identified by `workflow_id`. Output: A message about the deletion .. warning:: Deleting a workflow is irreversible - all workflow data will be permanently deleted. """ return ctx.gi.workflows.delete_workflow(workflow_id)
9ee3aa82a9577f9b20574f821f4a9e226665740d
12,402
def drsclient(drs_client): """ Mock drsclient """ return drs_client
3aaf02188dc2193271a7999a4b3c763a6c3e435f
12,404
def int2bit(x, w=20): """ Generates a binary representation of an integer number (as a tuple) >>> bits = int2bit(10, w=4) >>> bits (1, 0, 1, 0) >>> bit2int( bits ) 10 """ bits = [ ] while x: bits.append(x%2) x /= 2 # a bit of padding bits = bits + [ 0 ] * w bits = bits[:w] bits.reverse() return tuple(bits)
b65cd8f7c6232896eb2aef9f9d69b6ac4fd97bc6
12,405
def to_tuple(x): """Converts lists to tuples. For example:: >>> from networkx.utils import to_tuple >>> a_list = [1, 2, [1, 4]] >>> to_tuple(a_list) (1, 2, (1, 4)) """ if not isinstance(x, (tuple, list)): return x return tuple(map(to_tuple, x))
29586512b336ae5079e991bb13c1ac904e5eefe9
12,408
def mean_residue_ellipticity(phi, n, c, l): """ Calculate mean residue ellipticity (millideg cm2 / decimol) from ellipticity (mdeg) Args: phi (float): a ellipticity (milli deg) n (int): the number of residues c (float): the molar concentration of the polymer (mol/L) l (float): the length of the cuvette (cm) Returns: a mean residue ellipticity (deg cm2 decimol^{-1} residue^{-1}) """ return phi / (10 * l * n * c)
a51a90e3a12b921b2e12fb75160929d60652dcca
12,409
import re from typing import Counter def __chemical_elements(chemical): """Decomposes a chemical to it's elements. Parameters ---------- chemical : string The molecular formula of the given chemical compound given as a string. Returns ------- dict Dictionary of the chemicals elemental components and their counts. """ primary_list = [] temp_primary_list = [] compound_list = [] simplified_compounds_list = [] raw_element_list = [] def decompose_elements(string): """Decompose string into list of components based on capital letters or parenteses.""" temp_list = re.findall(r'(\(.*?\)\d+)|(\(.*?\))|([A-Z][^A-Z|(]*)', string) temp_list = [item for sublist in temp_list for item in sublist] temp_list = list(filter(None, temp_list)) return temp_list # split major components of the given chemical primary_list = decompose_elements(chemical) # separate compounds from simple elements for component in primary_list: if re.match('\(.*?\)\d+|\(.*?\)', component): compound_list.append(component) primary_list.remove(component) # simplify the compounds for compound in compound_list: trim = re.findall('\)\d+', compound) if trim: length = len(trim[0]) units = trim[0][1:] simplified_compound = compound[1:] simplified_compound = simplified_compound[:-length] else: length = 1 units = 1 simplified_compound = compound[1:] simplified_compound = simplified_compound[:-length] for i in range(int(units)): simplified_compounds_list.append(simplified_compound) # decompose compounds for compound in simplified_compounds_list: temp_list = decompose_elements(compound) temp_primary_list = temp_primary_list + temp_list # merge inital list with decomposed compounds primary_list = primary_list + temp_primary_list # break down multiple atoms (e.g. Al2 = Al + Al) for element in primary_list: trim = re.findall('\d+', element) if trim: length = len(trim[0]) units = trim[0] simplified_element = element[:-length] else: length = 0 units = 1 simplified_element = element for i in range(int(units)): raw_element_list.append(simplified_element) return Counter(raw_element_list)
8297aa4997b6f708e7e5c2b2e181f85531a4f146
12,410
def increase_patch_version(old_version): """ :param old_version: 2.0.1 :return: 2.0.2.dev """ return "{}.{}.{}.dev".format( old_version.major, old_version.minor, old_version.micro + 1 )
313adf2a3e862e123c79b16a437a7a9018f8203a
12,411
import re def convert(input): """Find all defines in the compiler generated assembly and convert them to #define pragmas""" asm_define_re = re.compile(r'">>(\w+) (?:\$|#)([-0-9]+) (?:\$|#)(0|1)<<"') asm_defines = asm_define_re.findall(input) if not asm_defines: raise RuntimeError("Failed to find any asm defines in the input") # Convert the found constants to #define pragmas. # In case the C++ compiler decides to reorder the AsmDefinesFor_${name} functions, # we don't want the order of the .h file to change from one compilation to another. # Sorting ensures deterministic order of the #defines. output = [] for name, value, negative_value in sorted(asm_defines): value = int(value) if value < 0 and negative_value == "0": # Overflow - uint64_t constant was pretty printed as negative value. value += 2 ** 64 # Python will use arbitrary precision arithmetic. output.append("#define {0} {1:#x}".format(name, value)) return "\n".join(output)
ad67532f8497f09655a09598d7babbfa89808b12
12,413
from typing import List from typing import Tuple import random def generate_round_robin_matches(bots: List[str]) -> List[Tuple[str, str]]: """ Returns a list of pairs of bots that should play against each other for a round robin. """ # This makes the list of matches consistent over multiple calls. E.g. the --list option will always so same order random.seed(bots[0] + bots[-1]) # Create all possible pairs of bots with bots from the given list matches = [] count = len(bots) for i in range(count): for j in range(i + 1, count): matches.append((bots[i], bots[j])) random.shuffle(matches) return matches
1d29e36613210d8d1198fe5943c035c0e435b8dc
12,414
def bin2dec(string_num): """Turn binary into decimal.""" return str(int(string_num, 2))
5c8ba774f1a749947e64a00c86e6cb4054b44d97
12,415
def get_config_dict(robustness_tests, base_config_dict): """ Combines robustness_test and train_config_dict into a single config_dict. Args: robustness_tests (dict): robustness test config dict base_config_dict (dict): train/data/eval/model/hyperparam config dict Returns: config_dict (dict): config dict """ config_dict = {} if robustness_tests is not None: if base_config_dict is not None: config_dict = {**robustness_tests, **base_config_dict} else: config_dict = robustness_tests else: if base_config_dict is not None: config_dict = base_config_dict return config_dict
593a2307849fa27b895f18d8b9eacd679eeab04a
12,416
def distance_vector_between(point_1, point_2): """Compute and return the vector distance between two points.""" return [point_2[0] - point_1[0], point_2[1] - point_1[1]]
397d3191cc4c214bb0d4b474db2efe7c63e8a10f
12,417
def print_consensus(genomes): """ print consensensus sequences for each genome and sample """ # generate consensus sequences cons = {} # cons[genome][sample][contig] = consensus for genome, contigs in list(genomes.items()): cons[genome] = {} for contig, samples in list(contigs.items()): for sample, stats in list(samples.items()): if sample not in cons[genome]: cons[genome][sample] = {} seq = cons[genome][sample][contig] = [] for pos, ps in enumerate(stats['bp_stats'], 1): ref, consensus = ps['ref'], ps['consensus'][0] if consensus == 'n/a': consensus = ref.lower() seq.append(consensus) # print consensus sequences for genome, samples in cons.items(): for sample, contigs in samples.items(): fn = '%s.%s.consensus.fa' % (genome, sample) f = open(fn, 'w') for contig, seq in contigs.items(): print('>%s' % (contig), file = f) print(''.join(seq), file = f) f.close() return cons
0b49c5709af71f017e9114ac403ff86b41fe0478
12,418
def var_Y(y_vector): """ Args: no array Output: variance as float """ m=y_vector.mean() diff_v=0 for i in range(len(y_vector)): diff_v+=(y_vector[i]-m)**2 res= diff_v /len(y_vector) return(res)
80847d75ffac23b43e75e1727caab910b7856dbc
12,419
def StoreEnclosedBoundaries(Spaces, WallInfo, OpeningsDict): """ Add the id's of included space boundaries to their related space boundaries """ for sp in Spaces: for b in sp.Boundaries: if b.RelatedBuildingElement in WallInfo.keys() and (len(b.GapsProfile)>0 or len(b.Profile)>4): for o in OpeningsDict.values(): if o[2][0] in b.Id: for b2 in sp.Boundaries: if o[1][0] in b2.Id: b.IncludedBoundariesIds.append(b2.Id) return Spaces
f75b2ac6a502d23a4622d83dabf53586feb0ec0e
12,420
def split_names(data, names, fosa_types, drop): """Separates facility type prefix from facility name inputs data: data frame containing source data from IASO names: column name to split fosa_types: list of facility types drop: list of prefixes indicating row should be dropped from data outputs data frame with name column separated into fosa type and fosa name columns """ type_pattern = '|'.join(fosa_types) data.loc[:, "fosa_type"] = data.loc[:, names].str.extract('('+type_pattern+')', expand=True) data.loc[:, "fosa_name"] = data.loc[:, names].str.replace(type_pattern, "") data = data[~(data[names].isin(drop))] data = data[~(data.fosa_name.isin(drop))] return data
4842756abd8b332d22548382f56a7c03d43609c2
12,421
def _NamesNotIn(names, mapping): """Returns a list of the values in |names| that are not in |mapping|.""" return [name for name in names if name not in mapping]
ec91dcb6e29b0a9c1aa66f04e1b61d715ded3266
12,422
import re def natural_sort(l): """ Takes in a list of strings and returns the list sorted in "natural" order. (e.g. [test1, test10, test11, test2, test20] -> [test1, test2, test10, test11, test20]) Source: https://stackoverflow.com/questions/4836710/is-there-a-built-in-function-for-string-natural-sort Parameters ---------- l : list of str Unsorted list of strings Returns ------- sorted_l : list of str Sorted list of strings """ convert = lambda text: int(text) if text.isdigit() else text.lower() alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] return sorted(l, key = alphanum_key)
7db22ee5f75703f52b25eecd888847eb35590e65
12,423
def get_extensions(list_of_files): """Function take a list of Path file objects and adds the file extenstion/suffix to a set. The set of extensions is returned""" extensions = set() for file in list_of_files: if len(file.suffix) < 6: extensions.add((file.suffix).lstrip('.')) return extensions
654b893c148535a99dd112dc3711ea9005ae96b7
12,424
def org_rm_payload(org_default_payload): """Provide an organization payload for removing a member.""" rm_payload = org_default_payload rm_payload["action"] = "member_removed" return rm_payload
e976396f6fe5de1073f2235aac36b300f129867a
12,427
def bin2complement2(binary): """Convert decimal into binary two's compliment Args: binary (str): [description] Returns: str: [description] """ binmap = map(lambda i: '1' if i == '0' else '0', list(binary)) binstr = "".join(list(binmap)) intFromBin = int(binstr, 2) intToSumUp = int('00000001',2) finalBin = bin(intFromBin + intToSumUp) return str(finalBin).replace('0b','')
6614ce4bad3c9f15d5d9d25c1a695b8c0bd10158
12,428
def most_recent_verification(photo_id_verifications, sso_id_verifications, manual_id_verifications, most_recent_key): """ Return the most recent verification given querysets for photo, sso and manual verifications. This function creates a map of the latest verification of all types and then returns the earliest verification using the max of the map values. Arguments: photo_id_verifications: Queryset containing photo verifications sso_id_verifications: Queryset containing sso verifications manual_id_verifications: Queryset containing manual verifications most_recent_key: Either 'updated_at' or 'created_at' Returns: The most recent verification. """ photo_id_verification = photo_id_verifications and photo_id_verifications.first() sso_id_verification = sso_id_verifications and sso_id_verifications.first() manual_id_verification = manual_id_verifications and manual_id_verifications.first() verifications = [photo_id_verification, sso_id_verification, manual_id_verification] verifications_map = { verification: getattr(verification, most_recent_key) for verification in verifications if getattr(verification, most_recent_key, False) } return max(verifications_map, key=lambda k: verifications_map[k]) if verifications_map else None
a1272eb45a03ed6c9627852692591deb5ba45c33
12,429
def SingleStr2Num(ch): """ 单个字符转成数字 Example: F --> 15 """ result = 0 if str(ch) == "0": result = 0 elif str(ch) == "1": result = 1 elif str(ch) == "2": result = 2 elif str(ch) == "3": result = 3 elif str(ch) == "4": result = 4 elif str(ch) == "5": result = 5 elif str(ch) == "6": result = 6 elif str(ch) == "7": result = 7 elif str(ch) == "8": result = 8 elif str(ch) == "9": result = 9 elif str(ch) == "A": result = 10 elif str(ch) == "B": result = 11 elif str(ch) == "C": result = 12 elif str(ch) == "D": result = 13 elif str(ch) == "E": result = 14 elif str(ch) == "F": result = 15 elif str(ch) == "a": result = 10 elif str(ch) == "b": result = 11 elif str(ch) == "c": result = 12 elif str(ch) == "d": result = 13 elif str(ch) == "e": result = 14 elif str(ch) == "f": result = 15 return result
70d0e3bccc476410d235d8b37127dd122e59cac9
12,430
def get_progress_rate(k, c_species, v_reactants): """Returns the progress rate for a reaction of the form: va*A+vb*B --> vc*C. INPUTS ======= k: float Reaction rate coefficient c_species: 1D list of floats Concentration of all species v_reactants: 1D list of floats Stoichiometric coefficients of reactants RETURNS ======== w: float prgress rate of this reaction NOTES ===== PRE: - k, each entry of c_species and v_reactants have numeric type - c_species and v_reactants have the same length POST: - k, c_species and v_reactants are not changed by this function - raises a ValueError if k <= 0 - raises an Exception if c_species and v_reactants have different length - returns the prgress rate w for the reaction EXAMPLES ========= >>> get_progress_rate(10, [1.0, 2.0, 3.0], [2.0, 1.0, 0.0]) 20.0 """ if k <= 0: raise ValueError('k must be positive.') if len(c_species) != len(v_reactants): raise Exception('List c_species and list v_reactants must have same length.') w = k for c, v in zip(c_species, v_reactants): w *= pow(c, v) return w
6baaaa07fe0814dbc50516b29ba55087c4ec23fd
12,431
def progress_bar(perc, width = 30): """ Gets a progress bar for printing. :param perc: The percent completed. :param width: The entire width of the bar. :return: The progress bar string. """ assert(width > 10) width -= 3 prog = int(perc * width) bar = "[" + "=" * prog + (">" if perc < 1 else "=") + "." * (width - prog) + "]" return bar
5e796310d8f58d0a4f3e07e357bcca417e0fb3a2
12,432
def gather_feat(feat, ind): """ feat=[b,h*w,c] ind=[b,k] expand to [b,k,3] and get from [b,h*w,c] """ dim = feat.size(2) ind = ind.unsqueeze(2).expand(ind.size(0),ind.size(1),dim) feat = feat.gather(1, ind) return feat
8404d20fa96b16e2209a2ab83223b9fbf2f73dd2
12,434
import json def get_key(filepath, key, default=None): """ Opens the file and fetches the value at said key :param str filepath: The path to the file :param str key: The key to fetch :param default: The value to return if no key is found :return: The value at the key (or the default value) """ with open(filepath, "r") as f: file_content = json.load(f) return file_content.get(key, default)
9f720f33373ceec9a9a1a4b46c0e9257d3a55787
12,435
def format_time(input_datetime): """ Formats the datetime according to the FIX spec. Args: input: a datetime Returns: A string that can be sent in a FIX message. """ return input_datetime.strftime("%Y%m%d-%H:%M:%S")
8ea7476d60e2f7fcd73ddedd43b886e4df5afdf6
12,436
import dill def general_pack(*args) -> bytes: """Converts all args into a bytes string. """ return dill.dumps(args)
40ff3c6263294d9874a5129baff404d74824e56c
12,437
def get_model(mdict, Tvals, i, logg, metal, vsini, alpha=None, mode='same'): """ Get the model with the requested parameters Parameters: =========== - mdict: 5x-nested dictionary, such as generated by MakeModelDicts The model dictionary - Tvals: List of floats A list of temperatures in the first level of mdict - i: integer The index of the requested temperature within the Tvals list - logg, metal, vsini, alpha: float The parameter you want. These index into mdict. - mode: How to get the model. valid options: - 'same': Get the model with the exact requested parameters. - 'lower': Get model with the exact values of everything except temperature (find the next lowest temperature) - 'upper': Get model with the exact values of everything except temperature (find the next highest temperature) """ if mode == 'same': if alpha is None: mdict[Tvals[i]][logg][metal][vsini] else: return mdict[Tvals[i]][logg][metal][alpha][vsini] elif mode == 'lower': done = False idx = i - 1 idx = max(0, idx) idx = min(len(Tvals), idx) while not done: if idx == 0 or idx == len(Tvals) - 1: return get_model(mdict, Tvals, idx, logg, metal, vsini, alpha, mode='same'), idx try: return get_model(mdict, Tvals, idx, logg, metal, vsini, alpha, mode='same'), idx except KeyError: idx -= 1 elif mode == 'upper': done = False idx = i +1 idx = max(0, idx) idx = min(len(Tvals)-1, idx) while not done: if idx == 0 or idx == len(Tvals) - 1: return get_model(mdict, Tvals, idx, logg, metal, vsini, alpha, mode='same'), idx try: return get_model(mdict, Tvals, idx, logg, metal, vsini, alpha, mode='same'), idx except KeyError: idx += 1
7b12d29efa7005547744eb9ec60074a7eb49de15
12,438
def getExtension(dirname): """get extension of a path""" extension = dirname[:3] return extension
7f8b1c0d1509ca03be43c6fcd86876e8365f46a2
12,439
def autre_joueur(pikominos_joueur:list, pikominos_autre_joueurs:list, need_pikominos:int): """ Fonction qui permet de prendre le pikominos d'un autre joueur Fonction qui ajoute le pikominos au joueur :param pikominos_joueur: liste des pikominos du joueur :param pikominos_autre_joueurs: liste des pikominos des autres joueurs :param need_pikominos: pikominos que le joueur veut prendre :return: pikominos_joueur:list et pikominos_autre_joueurs:list """ pikominos_joueur.append(need_pikominos) # on ajoute le pikominos au joueur # on supprime le derniere element de la liste pikominos_autre_joueurs pikominos_autre_joueurs.pop() return pikominos_joueur, pikominos_autre_joueurs
d99ce592db7f25fdc4fcbf1e52eb7ac7f408b05b
12,440
def commonprefix(l): """Return the common prefix of a list of strings.""" if not l: return '' prefix = l[0] for s in l[1:]: for i, c in enumerate(prefix): if c != s[i]: prefix = s[:i] break return prefix
235636c207c89a7128295fb5aa5b0cba732f50a1
12,441
def mock_profile(func): """ Mock decorator that is used when the PROFILER is not set """ def wrapper(*arg, **kwargs): """ Mock wrapper function """ return func(*arg, **kwargs) return wrapper
05b25c97f50a6dab4fe0c8d0eb39ee173bafdc01
12,442
def get_qtypes(dataset_name, part): """Return list of question-types for a particular TriviaQA-CP dataset""" if dataset_name not in {"location", "person"}: raise ValueError("Unknown dataset %s" % dataset_name) if part not in {"train", "dev", "test"}: raise ValueError("Unknown part %s" % part) is_biased = part in {"train", "dev"} is_location = dataset_name == "location" if is_biased and is_location: return ["person", "other"] elif not is_biased and is_location: return ["location"] elif is_biased and not is_location: return ["location", "other"] elif not is_biased and not is_location: return ["person"] else: raise RuntimeError()
0aa1a186ebf4fcfe5820ecbb697d8cb166114310
12,443
import re def get_numbers_from_file(path, skip_lines=2): """ Function to read a file line-wise and extract numbers. Parameters ---------- path: str Path to the file including the filename. skip_lines: int Number of lines to skipp at the beginning of the file. Returns ------- lst: list A list with sepereated entries for found numbers. """ with open(path, "r") as data_file: lst = [] for string in data_file: line = re.findall( "[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?", string ) lst.append(line) del lst[0:skip_lines] return lst
64026a6c5cf9aa16076a3c8872663ab7996c1add
12,445
def describe_list_indices(full_list): """ Describe the indices of the given list. Parameters ---------- full_list : list The list of items to order. Returns ------- unique_elements : list A list of the unique elements of the list, in the order in which they first appear. element_indices : dict A dictionary of lists for each unique element, giving all the indices in which they appear in the original list. """ unique_elements = [] element_indices = {} for i in range(len(full_list)): item = full_list[i] # new item if item not in unique_elements: unique_elements.append(item) element_indices[item] = [i] # previously seen item else: element_indices[item].append(i) return unique_elements, element_indices
664bcfd63dd0d5d5114ce24a4c7b2850b61364c5
12,447
def load_metadata_txt(file_path): """ Load distortion coefficients from a text file. Parameters ---------- file_path : str Path to a file. Returns ------- tuple of floats and list Tuple of (xcenter, ycenter, list_fact). """ if ("\\" in file_path): raise ValueError( "Please use a file path following the Unix convention") with open(file_path, 'r') as f: x = f.read().splitlines() list_data = [] for i in x: list_data.append(float(i.split()[-1])) xcenter = list_data[0] ycenter = list_data[1] list_fact = list_data[2:] return xcenter, ycenter, list_fact
d1220c39fd9e69b76b1aa6f3e73cd9eef708c451
12,448
from typing import OrderedDict def gen_top(service_uri, no_pages, num_mem, label=None): """ Generate the top level collection page. :param service_uri: base uri for the AS paged site. :param no_pages: :param num_mem: :param label: :return: dict """ top = OrderedDict() top['@context'] = [ "http://iiif.io/api/presentation/2/context.json", "https://www.w3.org/ns/activitystreams" ] top['id'] = service_uri top['type'] = 'OrderedCollection' if label: top['label'] = label top['total'] = num_mem top['first'] = {'id': service_uri + str(1), 'type': 'OrderedCollectionPage'} top['last'] = {'id': service_uri + str(no_pages), 'type': 'OrderedCollectionPage'} return top
14b330e0ad57b08462b3854eba2771e859344df2
12,449
def reverse_words(text): """ Complete the function that accepts a string parameter, and reverses each word in the string. All spaces in the string should be retained. :param text: string of words. :return: all words in the string reversed. """ return " ".join(x[::-1] for x in text.split(" "))
350b2e60cff7fb4fcfb6f71fd2459d1d5a5d4558
12,450
def _observables_plots(): """ Metadata for observables plots. """ def id_parts_plots(obs): return [(obs, species, dict(label=label)) for species, label in [ ('pion', r'$\pi$'), ('kaon', '$K$'), ('proton', '$p$') ]] return [ dict( title='Yields', ylabel=( r'$dE_T/d\eta\ [\mathrm{GeV}]$' ), ylim=(0, 900), height_ratio=1, subplots=[ ('dET_deta', None, dict(scale=1)), ] ), ]
dd991a5c5f1b6e155892431387a5839f28dca11a
12,451