content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import os import json import collections def parse_rc_json(): """ Reads the json configuration file(.yasirc.json), parses it and returns the dictionary """ fname = '.yasirc.json' path = os.path.expanduser('~/' + fname) if os.path.exists(fname): path = os.path.abspath(fname) elif not os.path.exists(path): path = '' content = '' if path: with open(path) as f: content = f.read() ret = {} if content: ret = json.loads(content) return collections.defaultdict(dict, ret)
c09085724d846faf8c58aeca971b6ee8516a77d5
49,526
from typing import Dict from typing import Any from typing import List async def get_sorted_agenda_items( agenda_items: Dict[int, Dict[str, Any]] ) -> List[Dict[str, Any]]: """ Returns all sorted agenda items by id first and then weight, resulting in ordered items, if some have the same weight. """ return sorted( sorted(agenda_items.values(), key=lambda item: item["id"]), key=lambda item: item["weight"], )
d661488d7e508e86a974d98ec558856f5810a3e4
49,527
import os def get_file_system_root_path(): """ Return the root path of the local file system. The function returns "/cvmfs" or "/(some path)/cvmfs" in case the expected file system root path is not where it usually is (e.g. on an HPC). A site can set the base path by exporting ATLAS_SW_BASE. :return: path (string) """ return os.environ.get('ATLAS_SW_BASE', '/cvmfs')
0d35402af4ef032836ae6d21bf9f0f328a87bb96
49,528
def _melt_keep_index(df, value_name="value"): """ Fully melt a dataframe keeping index, setting new index as all but `value` """ id_vars = df.index.names return ( df.reset_index() .melt(id_vars=id_vars, value_name=value_name) .set_index([*id_vars, df.columns.name]) )
94bfc7f663d62db2c36f15bc9f09a078cc978174
49,529
from sys import version_info def removesuffix(string: str, suffix: str) -> str: """ Return a str with the given suffix string removed if present. If the string ends with the suffix string and that suffix is not empty, return string[:-len(suffix)]. Otherwise, return a copy of the original string. """ if version_info[0:2] >= (3, 9): return string.removesuffix(suffix) elif string.endswith(suffix): return string[:-len(suffix)] return string
a0e0d231fb93bef00204d2c860e2a8c5a5be90e4
49,530
import os def get_basename(directory): """ Get the last part of a directory name. If the name is C:/goo/foo, this will return foo. Args: directoroy(str): A directory path. Returns: str: The last part of the directory path. """ if directory: return os.path.basename(directory)
d28023df90c16e5c665c875a8e2cc08e554c9ce9
49,532
def findall_text(node, path): """Find all n.text elements from a path. """ return [n.text for n in node.findall(path)]
2a40b2442e50e58a64320539153e27acfde15a8f
49,533
import struct def add(text: str) -> str: """ Add surrogate pairs to characters in the text. This makes the indices match how most platforms calculate string length when formatting texts using offset-based entities. Args: text: The text to add surrogate pairs to. Returns: The text with surrogate pairs. """ return "".join( "".join(chr(y) for y in struct.unpack("<HH", x.encode("utf-16le"))) if (0x10000 <= ord(x) <= 0x10FFFF) else x for x in text )
ffcaa7216686d2da09fae65719ed1a244a755f94
49,534
import torch def tsn_sample(num_tokens, num_samples, training): """ num_tokens >= num_samples args: num_tokens: int, num of total tokens num_samples: int, num of sampled tokens training: bool returns: indexes: tensor, sampled indexes of frames """ if num_samples == 1: return torch.tensor([0], dtype=torch.long) base = torch.floor( (num_tokens - 1) * torch.arange(0, num_samples).to(torch.float) / (num_samples - 1)) if training: offset_range = base[1:] - base[:-1] base[:-1] += torch.rand((offset_range.size()[0],)) * offset_range indexes = torch.floor(base) else: indexes = base return indexes.to(torch.long)
8821aa111e17c231a8ffc61afd43c9fd71e3efcc
49,535
def getCacheThumbName(path): """ Returns a thumb cache filename. :param path: string or unicode -- path to file Example:: thumb = xbmc.getCacheThumbName('f:\videos\movie.avi') """ return str()
f3b467d542842ab011bbbf22cee86399cf80d1e6
49,537
import torch def mae(predictions, actuals): """ computes mean absolute error - predictions: predictions computed from call to model.forward(...) (Tensor) - actuals: actual values (labels) (Tensor) @returns: computed mae = sum(abs(predictions - actuals)) / actuals.size(0) """ diff = actuals - predictions mae_err = torch.mean(torch.abs(diff)) return mae_err.detach().numpy()
00cb15d83a06427947bcabb85e48ef4ffa0d2677
49,539
def split_path(path): """//foo/bar/baz -> ['foo', 'bar', 'baz']""" return [x for x in path.split('/') if x != '']
13376639f9597d598c2b69a844e232a7c059fc3a
49,540
import torch def exp_and_normalize(features, dim=0): """ Aka "softmax" in deep learning literature """ normalized = torch.nn.functional.softmax(features, dim=dim) return normalized
f16f0da2c3c66dddfc17411d722b867d307fceb3
49,541
def s2b(s): """portable way to convert string to bytes. In 3.x socket.send and recv require bytes""" return s.encode()
92a2b6301858c80d856d80e1200f00b17e2e2327
49,542
from typing import Optional def distribute_about_center(index: int, size: int, max_loc: float = 1.0, max_size: Optional[int] = None): """ Get the coordinate from between 0 and 1 of an item given its index in a collection. :param index: The 0-based index of the item in its collection. :param size: The number of items in the item's collection. :param max_loc: The maximum location of an item. :param max_size: The maximum number of items that can appear in any collection. Use if all items in all collections should be equally spaced. Leave as None to give each collection its own spacing. """ if max_size is None: max_size = size spacing = max_loc / (max_size - 1 if max_size > 1 else 1) min_loc = (max_loc / 2) - ((size - 1) * spacing) / 2 return min_loc + index * spacing
4dfae63fc3b7639c08f37ece62e8afad352f8d23
49,543
def parse_post_age(text): """ map 'posted 10 days ago' => '10' """ if 'hours' in text: return '1' return ''.join(list(filter(lambda c: c.isdigit(), text)))
c8cf5233e5ed8700f05d0d1419d8d8f2903e5f2a
49,544
def restore_frametime(func): """Decorator for quick test mode, restores frametime after test""" def func_wrapper(self): try: func(self) finally: if self.quick_test: self.device.frametime_us = self.frametime_us return func_wrapper
35d2eb6ae2934818c9c6a2d7de657724c7517b49
49,546
def createNewCluster(cluster, Z): """ Gets the index of the element in the cluster which will be split from the cluster, to form a new cluster """ maxSumDist = 0 newClusterIndex = 0 index = 0 for i in cluster: sumDist = 0 for j in cluster: sumDist += Z[i][j] if sumDist > maxSumDist: maxSumDist = sumDist newClusterIndex = index index += 1 return newClusterIndex
3abf164105f560393ec72283a3dc243aac8177cd
49,547
import fnmatch def _matches(file, glob): """ check if file matches a glob""" return fnmatch.fnmatch(file, glob)
5a104720f76910bcb280350c440d4bea8d156b81
49,548
def box_area(boxes): """ Args: boxes(np.ndarray): [N, 4] return: [N] """ return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
0af9e6e9fb9e3b2924ec38b77e6cf024773e92cc
49,549
async def test_coroutine_frame(awaitable): """ >>> class Awaitable(object): ... def __await__(self): ... return iter([2]) >>> coro = test_coroutine_frame(Awaitable()) >>> import types >>> isinstance(coro.cr_frame, types.FrameType) or coro.cr_frame True >>> coro.cr_frame is coro.cr_frame # assert that it's cached True >>> coro.cr_frame.f_code is not None True >>> code_obj = coro.cr_frame.f_code >>> code_obj.co_argcount 1 >>> code_obj.co_varnames ('awaitable', 'b') >>> next(coro.__await__()) # avoid "not awaited" warning 2 """ b = await awaitable return b
f8a81fc37362fbbb948d8587663e1b176063f5af
49,551
from datetime import datetime import re def get_date_from_string(date_string): """Transforms the specified date string into a proper datetime object. """ format_string = '%Y-%m-%dT%H:%M:%S%z' regexp = r"(\+\d{1,2})(:)(\d{1,2})" data = datetime \ .strptime(re.sub(regexp, r"\1\3", date_string), format_string) \ .replace(tzinfo=None) return data
f6e6d713cecef451b226e57ddae550fccc91cae8
49,553
import re def clean_attr_name(attr_name: str) -> str: """Remove invalid characters from the attribute name.""" if not attr_name: return "" # Remove leading and trailing spaces attr_name = attr_name.strip().replace(" ", "_").lower() # Remove invalid characters attr_name = re.sub("[^0-9a-zA-Z_]", "", attr_name) # Remove leading characters until we find a letter or underscore attr_name = re.sub("^[^a-zA-Z_]+", "", attr_name) return attr_name
122db806354d98916021f11918338771d8d7b7ac
49,554
def shieldsio_markdown_badge_generator(badge_label=None, badge_hover=None, badge_link=None, metric='count', badge_color=None, badge_style='flat-square'): """Generates badge generator for a badge hosted on shields.io. Params: badge_label - the label in the badge itself. For example "algorithms". this label is displayed in the left box of the badge. badge_hover - the hover text displayed when going over the badge. Used in the Markdown markup for the badge. badge_link - the link of the badge (in the markup). metric - the name of the property in the saved JSON that contains the data that should be displayed in the right box of the badge. badge_color - the color of the badge. badge_style - badge style. May be: 'plastic', 'flat', 'flat-square'. """ def _get_badge_url(data_url): url = 'https://img.shields.io/badge/dynamic/json?' params = { 'label': badge_label, 'query': metric, 'url': data_url, 'color': badge_color, 'style': badge_style, } for param, value in params.items(): if value is not None: url += '&' + '{key}={value}'.format(key=param, value=value) return url def _get_markup(hover, badge_url): hover = badge_hover or hover return '[![{hover}]({badge_url})]({badge_link})'.format( hover=hover, badge_url=badge_url, badge_link=badge_link) def _generator(value): url = _get_badge_url(value['data_url']) return { 'provider': 'https://shields.io', 'url': url, 'markup': _get_markup(hover=value['label'], badge_url=url) } return _generator
2401eb53f0725b3594274b4b258e3dfe2c04b2d5
49,555
import sys import importlib def load_python_module(sys_paths, import_name): """ Imports a Python module by name. This will only have effect the first time it is called and subsequent calls will do nothing. """ assert isinstance(sys_paths, (tuple, list)), "%s is not a list or tuple" % sys_paths for path in reversed(sys_paths): if path not in sys.path: sys.path.insert(1, path) return importlib.import_module(import_name)
e1162ce9598eb53466041d9bd22cbe32caa16779
49,557
def mps_complex_conjugate(mps): """Will take complex conjugate of every entry of every tensor in mps, and append label_suffix to every label""" new_mps = mps.copy() for x in new_mps.data: x.conjugate() return new_mps
808b6ef5e6cf403d9954ab1f7b71228d8decc14b
49,558
def _strip_message(message, index): """Splits an array at the given index""" if index > len(message): raise ValueError('Index Not Applicable | {} {}'.format(message, index)) strip = message[:index] leftover = message[index:] return strip, leftover
1f57d21fe42d36c552873e473edea0602929c654
49,559
def get_seg_map(seg_range, seg_seq): """ return: 染色体位置对应序列计数位置 {123456: 1, 123457: 2, ...} """ start, end = [int(i) for i in seg_range.split('-')] seg_map = dict(zip(range(start, end+1), range(0, len(seg_seq)))) return seg_map
0397c81d28b761e4644f006d64906ec5a43d9e26
49,562
def calculateBlockPlacementCost(boardSize, desiredSpace): """determine the coin cost to purchase a block placement at the selected position. Note: placement cost has a minimum of 1!""" return min(((boardSize - 1 - desiredSpace.x) * desiredSpace.x * (boardSize - 1 - desiredSpace.y) * desiredSpace.y * 10 / ((boardSize - 1)**4 / 16))//2,1)
f6e100644d1288d74916e61543a9cad0f57ed2c7
49,563
def dostime_to_timetuple(dostime): """Convert a RAR archive member DOS time to a Python time tuple.""" date = dostime >> 16 & 0xffff time = dostime & 0xffff day = date & 0x1f month = (date >> 5) & 0xf year = 1980 + (date >> 9) second = 2 * (time & 0x1f) minute = (time >> 5) & 0x3f hour = time >> 11 return (year, month, day, hour, minute, second)
d5f76a1cfff5cd723de0dcb48b323b863b4aea21
49,564
def convert_twos_compliment(bin_str): """ Converts a string of binary numbers to their two's compliment integer representation """ length = len(bin_str) if(bin_str[0] == '0'): return int(bin_str, 2) else: return int(bin_str, 2) - (1 << length)
b5b62eb0f74aecd35de880d11470855e189fd713
49,567
import re def clean_text(text): """去掉文本的空白字符、标点符号""" return re.sub(r'[? 、;!,。“”?.~…,$\r\n《》——]|(<.*>)', '', text.strip())
b5da8949a30ccf1216508b94b1088c0a3dd8f547
49,568
import subprocess import os import sys def run(command, input=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=None, copy_local_env=False, **kwargs): """ Cross platform compatible subprocess with CompletedProcess return. No formatting or encoding is performed on the output of subprocess, so it's output will appear the same on each version / interpreter as before. .. code:: python reusables.run('echo "hello world!', shell=True) # CPython 3.6 # CompletedProcess(args='echo "hello world!', returncode=0, # stdout=b'"hello world!\\r\\n', stderr=b'') # # PyPy 5.4 (Python 2.7.10) # CompletedProcess(args='echo "hello world!', returncode=0L, # stdout='"hello world!\\r\\n') Timeout is only usable in Python 3.X, as it was not implemented before then, a NotImplementedError will be raised if specified on 2.x version of Python. :param command: command to run, str if shell=True otherwise must be list :param input: send something `communicate` :param stdout: PIPE or None :param stderr: PIPE or None :param timeout: max time to wait for command to complete :param copy_local_env: Use all current ENV vars in the subprocess as well :param kwargs: additional arguments to pass to Popen :return: CompletedProcess class """ if copy_local_env: # Copy local env first and overwrite with anything manually specified env = os.environ.copy() env.update(kwargs.get('env', {})) else: env = kwargs.get('env') if sys.version_info >= (3, 5): return subprocess.run(command, input=input, stdout=stdout, stderr=stderr, timeout=timeout, env=env, **kwargs) # Created here instead of root level as it should never need to be # manually created or referenced class CompletedProcess(object): """A backwards compatible near clone of subprocess.CompletedProcess""" def __init__(self, args, returncode, stdout=None, stderr=None): self.args = args self.returncode = returncode self.stdout = stdout self.stderr = stderr def __repr__(self): args = ['args={0!r}'.format(self.args), 'returncode={0!r}'.format(self.returncode), 'stdout={0!r}'.format(self.stdout) if self.stdout else '', 'stderr={0!r}'.format(self.stderr) if self.stderr else ''] return "{0}({1})".format(type(self).__name__, ', '.join(filter(None, args))) def check_returncode(self): if self.returncode: if python_version < (2, 7): raise subprocess.CalledProcessError(self.returncode, self.args) raise subprocess.CalledProcessError(self.returncode, self.args, self.stdout) proc = subprocess.Popen(command, stdout=stdout, stderr=stderr, env=env, **kwargs) if PY3: out, err = proc.communicate(input=input, timeout=timeout) else: if timeout: raise NotImplementedError("Timeout is only available on Python 3") out, err = proc.communicate(input=input) return CompletedProcess(command, proc.returncode, out, err)
e80337535bdd94d868686e47645ad348e7668ed7
49,569
import os import tempfile def created_temp_dir(): """ creates archive directory in temp (used of none of specified exist) """ pth = os.path.join(tempfile.gettempdir(), "zipget") if not os.path.isdir(pth): print("Warning: none of archive dirs existed, creating", pth) os.makedirs(pth) return pth
1c63eecd6bcd2effca8daa729349f364b4b2f748
49,570
import random def random_choice(bucket): """Safely get a random choice from a list. If the list is zero-length, this just returns an empty string rather than raise an exception. Parameters: bucket (list): A list to randomly choose from. Returns: str: The random choice. Blank string if the list was empty. """ if len(bucket) == 0: return "" return random.choice(bucket)
2b781ede8fff9c2f455eebab9bf64cdf01717bee
49,572
import torch def mul(input, other, *args, **kwargs): """ Multiplies each element of the input ``input`` with the scalar ``other`` and returns a new resulting tensor. Examples:: >>> import torch >>> import treetensor.torch as ttorch >>> ttorch.mul( ... ttorch.tensor([1, 2, 3]), ... ttorch.tensor([3, 5, 11]), ... ) tensor([ 3, 10, 33]) >>> ttorch.mul( ... ttorch.tensor({ ... 'a': [1, 2, 3], ... 'b': {'x': [[3, 5], [9, 12]]}, ... }), ... ttorch.tensor({ ... 'a': [3, 5, 11], ... 'b': {'x': [[31, -15], [13, 23]]}, ... }) ... ) <Tensor 0x7f11b139ca58> ├── a --> tensor([ 3, 10, 33]) └── b --> <Tensor 0x7f11b139cb00> └── x --> tensor([[ 93, -75], [117, 276]]) """ return torch.mul(input, other, *args, **kwargs)
027789f0ef85cb68e374994065a4eb19f815eb52
49,573
def add_entry_list_strings(list_string, entry): """ add a new entry to a list of strings, including some newlines """ list_string.append(entry) list_string.append('\n') return list_string
49e5beafa3748adfae397c8f44d5ef6abcdaef78
49,574
def doc_template_user(m): """ Decorator to mark method `m` as using docstring template If ``klass`` is the enclosing class of `m`, and ``klass`` is decorated with ``doc_templater``, then the docstring for `m` will be givem by ``klass._doc_templates[m.__name__] % klass._doc_dict`` """ m._uses_doc_template = True return m
a8a079b5315438242108351553584c86ee82895f
49,575
def interval_intersection_width(a, b, c, d): """returns the width of the intersection of intervals [a,b] and [c,d] (thinking of these as intervals on the real number line)""" return max(0, min(b, d) - max(a, c))
fc913c1d2510e9141ab40f4d56f4ce952f3d7d04
49,577
import csv def loadData(path: str): """ Carrega os dados de geração do grafo de Markov a partir de um arquivo CSV. A ordem de colunas do arquivo deve ser a seguinte: - nodeName (string): identificador do nó - p (float): probabilidade de vitória de P - q (float): probabilidade de vitória de Q - pWinsNode (string): identificador do nó associado à vitória de P - qWinsNode (string): identificador do nó associado à vitória de Q """ data = {} with open(path, "r") as csvFile: csvReader = csv.reader(csvFile, delimiter=",") lineCount = 0 for row in csvReader: lineCount += 1 if lineCount == 1: pass else: nodeP = None nodeQ = None if row[3] != "": nodeP = row[3] if row[4] != "": nodeQ = row[4] data[row[0]] = { "probP": row[1], "probQ": row[2], "nodeP": nodeP, "nodeQ": nodeQ, } csvFile.close() return data
e30e84a4cf4eb199b97de2f02fdd0116ca851209
49,580
import torch def query_index(question, embedding_model, tokenizer, wiki_dataset, kb_index, n_results=10, max_length=128, min_passage_length=20, device="cpu"): """ :param question: :param embedding_model: :param tokenizer: :param wiki_dataset: :param kb_index: :param n_results: :param max_length: :param min_passage_length: :param device: :return: """ embedding_model.to(device) # embed the question tokenized_question = tokenizer([question], max_length=max_length, padding="max_length", truncation=True, return_tensors='pt') with torch.no_grad(): embedded_question = embedding_model.embed_questions(tokenized_question["input_ids"].to(device), tokenized_question["attention_mask"].to(device)) # now put on the cpu as numpy so we can do faiss. default, it should be on the cpu already embedded_question = embedded_question.cpu().numpy() # now query the index, using faiss. getting more than we need to make sure the text is long enough D, I = kb_index.search(embedded_question, 2 * n_results) # get the results of the query all_wikidata = [wiki_dataset[int(k)] for k in I[0]] all_passages = "<P> " + " <P> ".join([p["passage_text"] for p in all_wikidata]) return all_passages
92166d714ec492a59397bd8d28367d22e009d01c
49,582
def filter_dict(data, keys): """Filter dict :data by given :keys""" result = {} for key in keys: if key in data: result[key] = data[key] return result
80cc8a7226c8b7929588191a48a5d3f1ce07bbbd
49,584
def remove_dice_from_roll(roll, dice_to_remove): """ This function remove the dice we got in a round if we got an existing combination. """ values = list(roll.values()) for eyes in dice_to_remove: if eyes in values: values.remove(eyes) else: raise ValueError("Value not in list and therefore can not be removed.") return {index: value for index, value in enumerate(values)}
c50a747e2eb6f2a619700ed4be83be4f602b59d7
49,585
def format_schema_errors(e): """Format FlaskJsonSchema validation errors""" return { "error": e.message, "errors": [validation_err.message for validation_err in e.errors], }
9197f83adc03d031681ed097dbc96206dbdd8ac9
49,586
def find_e_int(path, csvfile): """Gets the dispersion component of the interaction energy (per ion pair) for each configuration, each key of the groups dictionary. This value, when temperature corrected, is the enthalpy of interaction.""" disp_contribution = 0.0 elec = 0.0 if path[0] == '.': config = path[2:] else: config = path with open(csvfile, 'r') as f: for line in f.readlines()[1:]: splitup = line.split(',') if splitup[0] == config : #filepath is the first column of csv if len(splitup) == 15 or len(splitup) == 16: disp_contribution = float(splitup[6]) elec = float(splitup[4]) # neutral species included else: disp_contribution = 0.0 elec = float(splitup[8]) # just ionic clusters- check index- total mp2 is elec return disp_contribution, elec
c0b3fe7cf931c245ef38803d7b040911b15b4ab1
49,588
def fmt_unary_op(vm, arg=None, repr=repr): """returns string of the repr() for the first element of the evaluation stack """ # We need to check the length because sometimes in a return event # (as opposed to a # a RETURN_VALUE callback can* the value has been popped, and if the # return valuse was the only one on the stack, it will be empty here. if len(vm.frame.stack): return " (%s)" % (repr(vm.top()),) else: raise vm.PyVMError("Empty stack in unary op")
9376a9dfa3cf1c50ebbea50c36f8166126445dc6
49,589
def calc_inertialshapefactor(IA, IB, IC): """ Shape descriptor based on PMI Cannot be calculated for plannar surfaces """ Si = IB / (IA*IC) return Si
00ccfc826d4e88248f60f6dcddc6f5123a4bb44f
49,590
import hashlib def _make_stmt_name(func): """ Create the prepared query name which is a string up-to 63 characters made up of - an underscore - the md5 hash of the <module.function> name - an underscore - up-to 29 characters of the function name """ m = hashlib.md5() func_name = func.__name__ fully_qualified_name = f"{func.__module__}.{func.__name__}" m.update(bytes(fully_qualified_name, encoding="utf-8")) stmt_name = "_" stmt_name += m.hexdigest() stmt_name += "_" + func_name[:29] return stmt_name
bad6f2530b7dbae2a814ab6eba030e6f0040a58f
49,592
import subprocess def deletesnapshots(snapshots): """Deletes all snapshots in given list Args: snapshots (list): List of snapshots returns: True if successful """ i = 0 count = len(snapshots) for item in snapshots: i += 1 print("Removing snapshot {:d} of {:d}".format(i, count)) subprocess.check_output(['tmutil', 'deletelocalsnapshots', item]) return True
82ab2481750fb4efad2accd696694f69ad8655b2
49,594
def extract_headers(response): """Extract relevant headers from a response into a dictionary.""" result = {} for key in response.headers.keys(): if isinstance(key, bytes): key = key.decode('utf-8') value = response.headers.get(key) if isinstance(value, bytes): value = value.decode('utf-8') if key in ('Date', 'Content-Length', 'Etag','Last-Modified'): continue result[key] = value return result
fde2f4d9c4e449f95c57fcc20dae3d003ec6bc9b
49,596
def find_max_occupancy_node(dir_list): """ Find node with maximum occupancy. :param list_dir: list of directories for each node. :return number: number node in list_dir """ count = 0 number = 0 length = 0 for dirs in dir_list: if length < len(dirs): length = len(dirs) number = count count += 1 return number
fad18eb1698d2f7188688f33ad3d27c17cac5734
49,598
from typing import Any from typing import Union def issubclass_( cls: Any, types: Union[type[Any], tuple[type[Any], ...]], ) -> bool: """Like `issubclass`, but do not raise error if value is not `type`.""" return isinstance(cls, type) and issubclass(cls, types)
f300d79f12a74ac549b6e7d66e1d865449b0eca9
49,599
import argparse def parse_arguments(): """ Parser. """ parser = argparse.ArgumentParser() parser.add_argument("--input_dir", "-i", type=str, required=True, help="Path of the input directory." ) parser.add_argument("--output_dir", "-o", type=str, required=True, help="Path of the output directory." ) parser.add_argument("--model_name_or_path", "-m", type=str, required=True, help="Path to pre-trained model or shortcut name.", ) parser.add_argument("--cache_dir", "-c", type=str, help="Where do you want to store the pre-trained models downloaded from s3.", ) parser.add_argument("--batch_size", "-b", default=64, type=int, help="Batch size per GPU/CPU." ) parser.add_argument("--dataparallelmodel", "-p", action='store_true', help="Use full capacity of GPUs parallel embedding." ) arguments, _ = parser.parse_known_args() return arguments
a25fc9923c6c8f02784b4a247ed807852896e5aa
49,602
def is_valid_sequence(s): """ (str) -> bool The parameter is a potential DNA sequence. Return True if and only if the DNA sequence is valid (that is, it contains no characters other than 'A', 'T', 'C' and 'G'). >>> is_valid_sequence("ATTCCGGGA") True >>> is_valid_sequence("SALIMAAAA") False """ nucs = "ATCG" counterL = [] total = 0 for i in s: if i in nucs and not i in counterL: total += 1 counterL += i elif i not in nucs: return False return (total >= 4) and (nucs == "ATCG")
265cf40f972c889040ab6a0d5d21680471c97f29
49,603
def scriptClear(*args, **kwargs): """ Clears a Nuke script and resets all the root knobs to user defined knob defaults. To reset to compiled in defaults only pass in resetToCompiledDefaults=True. """ return None
7e567ce674ee24247f0207418718ec636a915e9c
49,604
def find_difference_sum_of_squares_and_square_of_sum(num: int) -> int: """Find the difference between sum of squares and square of sum for the first `num` natural numbers. Args: num (int): First natural numbers. Returns: int: Difference. """ sum_of_squares = sum(x ** 2 for x in range(1, num + 1, 1)) square_of_sum = sum(range(1, num + 1, 1)) ** 2 return square_of_sum - sum_of_squares
3f0d5fbc75451f8c883d0aedc3b5a91cbffb1a77
49,606
import os import pwd def is_effective_user(user_id_or_name): """Returns True if user_id_or_name is effective user (id/name).""" euid = os.geteuid() if str(user_id_or_name) == str(euid): return True effective_user_name = pwd.getpwuid(euid).pw_name return user_id_or_name == effective_user_name
19e502e12b2cc8b102cd0f5acfbe5d064cd8fed9
49,607
def hello(name): """ Function that returns a greeting for whatever name you enter. Usage: >>> hello('Emiel') 'Hello, Emiel!' """ return ''.join(["Hello, ", name, '!'])
ffbdf2ee0869b3fd20f0cb85eff8eae748e30d28
49,609
def merge(source, destination): """ Merges 2 dicts recursively. If leaves are lists, they are extended. If leaves are ints, floats, strs, they are concatenated into a string, if the leaves are not the same. """ for key, value in source.items(): if isinstance(value, dict): # get node or create one node = destination.setdefault(key, {}) merge(value, node) elif isinstance(value, list): destination[key] += value else: if destination[key] != value: destination[key] = f'{destination[key]},{value}' else: destination[key] = value return destination
7f771e83c5f89fa641fa79f863f4382ac5cc0ead
49,610
def getSimCol(bird): """ Convert data stored as BIRD and TISSUE objects into lists of columns. The index (i) is important, because it allows the correct randomly generated carotenoid concentration to be selected from a list of possible values on a per bird basis. See simNutrients function in ANDfunctions_SimulateData.py file for details. """ i = 0 col_birdid = [] col_sex = [] col_treatment = [] col_bodymass = [] col_tissuetype = [] col_mt = [] cols_carot = {} for bird_id,bird_obj in bird.items(): for tissue_type,tissue_obj in bird_obj.tissues.items(): col_birdid.append(bird_id) col_sex.append(bird_obj.sex) col_treatment.append(bird_obj.treatment) col_bodymass.append(bird_obj.bodymass) col_tissuetype.append(tissue_type) col_mt.append(tissue_obj[0].mass_total) for tissue,nutrients in bird_obj.carot_conc_ind.items(): for nutrient_type,carot_conc in nutrients.items(): if nutrient_type in cols_carot: cols_carot[nutrient_type].append(carot_conc[i]) else: value = [] value.append(carot_conc[i]) cols_carot[nutrient_type] = value i += 1 return col_birdid, col_sex, col_treatment, col_bodymass, col_tissuetype, col_mt, cols_carot
bff2a168afb02ef81ed3f86d310f215c43a41ef3
49,611
def Fix_float_error_for_df(data_frame, var_name, var_value_list, eps=1e-8, abs_eps=True): """ After this fix, it becomes possible to use == condition for float values. """ data_frame_fixed = data_frame.copy() for var_value in var_value_list: if abs_eps: tol = eps else: tol = var_value * eps data_frame_fixed.loc[(data_frame_fixed[var_name] < var_value + tol) & (data_frame_fixed[var_name] > var_value - tol), var_name] = var_value return data_frame_fixed
f1a364c4fcb220d9b6a8fa33263044155be5dbb2
49,612
from typing import Tuple def color_interpolate(color1: Tuple[int, int, int], color2: Tuple[int, int, int], w: float) -> str: """ Nice, gamma corrected interpolation """ r1, g1, b1 = color1 r2, g2, b2 = color2 # Regular, old, ugly linear interpolation # r = r1 + w * (r2 - r1) # g = g1 + w * (g2 - g1) # b = b1 + w * (b2 - b1) r1_, g1_, b1_, r2_, g2_, b2_ = r1**2.2, g1**2.2, b1**2.2, r2**2.2, g2**2.2, b2**2.2 r_ = r1_ + w * (r2_ - r1_) g_ = g1_ + w * (g2_ - g1_) b_ = b1_ + w * (b2_ - b1_) r, g, b = int(r_**(1/2.2)), int(g_**(1/2.2)), int(b_**(1/2.2)) return f"#{r:02x}{g:02x}{b:02x}"
2d37c0303290f64170b9c188174b95367b00463a
49,613
import hashlib def hash_bucket(elemento,num_canastas=10): """ Regresa la cubeta, de acuerdo al hash generado por el elemento. """ elemento = elemento.encode() hashes = int(hashlib.sha256(elemento).hexdigest(),16) % num_canastas return hashes
69f6eae88a5064ae6a500d4f59436cacea5224b8
49,614
import re def filter_content(content): """Filter content to make sure it can be built into json object """ filtered_content = re.sub(r'[^\x00-\x7f]', r' ', content.strip("'"). replace("\n", "\\n"). replace("\r", ""). replace("\"", "'")) return filtered_content
267be82061ddc3614e8eb340a0eab065dd2a7b23
49,615
from typing import Optional from typing import List def transform(dict_: dict, typed_dict: dict, substring_to_type: Optional[List] = None) -> dict: """ Convert values in input dictionary to typed values in object. Allows for recursive populating of dictionary types if provided in substring_to_type kwarg. The format is as follows: substring_to_type substring: Text - The prepended string to filter parameter names by field: Text - The field that will be filled in dict_ type: TypedDict - The typeddict that will be created as field """ if substring_to_type: for item in substring_to_type: # Initialize substring values to convert into dictionary type substring = item['substring'] field = item['field'] type_ = item['type'] # Find keys that match the substring and pull them into a new dictionary sub_dict = dict((key.strip(substring), dict_.pop(key)) for key in set(dict_.keys()) if substring in key) # Transform the dictionary into the dictionary type provided dict_[field] = transform(sub_dict, type_) fields = typed_dict.__annotations__ return {name: fields[name](value) for name, value in dict_.items() if name in fields.keys()}
0de491b2efec21870beb6acf0c8a164da2fcc7c8
49,616
def shave(inputs, div=64, **kwargs): """Crop borders""" div = int(div) h, w = inputs.shape[-3:-1] h_mod = h - h % div w_mod = w - w % div return inputs[..., :h_mod, :w_mod, :]
35c000344c839b3129bd09ccdb1ff1396cb55e4b
49,617
import subprocess import os def _extract_frames(video, video_root='', frame_root='', tmpl='%06d.jpg'): """Extract frames from video using call to ffmpeg.""" print(f'extracting frames from {video}') return subprocess.run( [ 'ffmpeg', '-i', os.path.join(video_root, video), '-vf', 'scale=320:-1,fps=25', os.path.join(frame_root, video.rstrip('.mp4'), tmpl), ] )
d08342d963d5d9a50f90b2842f46c5140c0fd337
49,619
def int_to_bin_nopad(n): """ Renvoie la représentation binaire de n (big endian), sans pad. """ return [int(x) for x in bin(n)[2:]]
1ad1b03e0ca61019261eacc69de0f91a6304a165
49,620
def cost_req_met(card, opponent_cost): """Check if a card meets cost requirements""" if card.intrigue: if (opponent_cost - card.cost) > 0: if (opponent_cost - card.cost) % card.intrigue == 0: return True else: if card.cost == opponent_cost: return True return False
8a6dfc891b338a4dadcc412b08ad1b16fdc14dce
49,621
def twoSum(nums, target): """ :type nums: List[int] :type target: int :rtype: List[int] """ s = {} size = len(nums) for i in range(size): s[nums[i]] = i for i in range(size): diff = target-nums[i] if diff in s and i != s[diff]: return [i, s[diff]] return [-1, -1]
39e41fd9caa5ec0f19747b8e7356c5c44b2d915f
49,623
import torch from typing import Optional def squash(inputs: torch.Tensor, dim: Optional[int] = -1) -> torch.Tensor: """ Apply `squash` non-linearity to inputs Args: inputs (T): input tensor which is to be applied squashing dim (int, optional): dimension to be applied squashing. Defaults to -1 Returns: T: squashed tensor """ # Calculate squared norm of inputs squared_norm = torch.sum(torch.pow(inputs, 2), dim=dim, keepdim=True) # Squash non-linearity to inputs return (squared_norm / (1 + squared_norm)) * (inputs / (torch.sqrt(squared_norm) + 1e-8))
91ee55ecedf0a8fac4098c37d9352775db85cc3c
49,625
def build_id(Z: int, A: int, state: str = "") -> int: """ Builds a canonical nuclide id from atomic number, atomic mass, and and energy state. Parameters ---------- Z : int Atomic number. A : int Atomic mass. state : str energy state. Returns ------- int Canonical nuclide id. Examples -------- >>> rd.utils.build_id(1,2) 10020000 >>> rd.utils.build_id(28,56,'m') 280560001 """ if state != "": if state == "m": state_int = 1 elif state == "n": state_int = 2 else: raise ValueError(state + " is not a valid energy state.") else: state_int = 0 canonical_id = (Z * 10000000) + (A * 10000) + state_int return canonical_id
6686ac4841e6272ff7a683b4d2b267f95e1615be
49,626
def power(a, b=2): """ :return: returns the power of a**b. by default b is 2 """ return a ** b
c9f6e6eeb1867ddf6d5492ea6261840858de7acd
49,630
def read_expected_result(filename): """ Get the expected result from the given file. The expected outcome is the % message following after %Expected outcome: :param filename: The name of the file :return: The expected result.strip(). :rtype: str """ result = "" with open(filename) as f: reading = False for l in f: l = l.strip() if l.startswith('%Expected outcome:'): reading = True elif reading: if l.lower().startswith('% error'): return l[len('% error'):].strip() elif l.startswith('%'): result = result + "\n" + l[1:] else : reading = False return result.strip()
98522127ecfeb9a32b9bf429eab35fccd90cc849
49,631
def check_for_dead_tasks(net, graph): """ We compute a list of dead tasks. A dead task is a task which does not appear in the Minimal Coverability Graph :param net: Petri Net representation of PM4Py :param graph: Minimal coverability graph. NetworkX MultiDiGraph object. :return: list of dead tasks """ tasks=[] for transition in list(net.transitions): if transition.label != None: tasks.append(transition) for node,targets in graph.edges()._adjdict.items(): for target_node,activties in targets.items(): for option,activity in activties.items(): if activity['transition'] in tasks: tasks.remove(activity['transition']) return tasks
03a204b7d1a963216dabb617a49d4a87d7cdf8aa
49,632
from bs4 import BeautifulSoup def parse_sandwatch_gui(soup: BeautifulSoup) -> str: """ Parse Sandwatch GUI. """ result_div = soup.find_all('div', attrs={"class": 'form-group col-lg-12'})[0] result = result_div.text.strip() return result
a12592899750768a65bf57052af806de9dad366b
49,637
import torch def __train_with_batch__(args, loss, num_peds_per_batch, pred_traj_len_per_batch, full_loss_list, full_num_peds_per_batch_list, full_pred_traj_len_per_batch_list, optimizer): """ train a single batch, returning the complete loss values and their "weights" using existing list from previous batches. :param args: several arguments that may be used to configure the manner in which the training will be done :param loss: Tensor of shape (num_batches). Contains loss for potentially several batches (to reach the number of args.batch_size in case some batches are smaller). :param num_peds_per_batch: Tensor of shape (num_batches). Number of pedestrians per batch (for average of losses for existing batches) :param pred_traj_len_per_batch: Tensor of shape (num_batches). Average prediction length per batch (for weighted average of losses for existing batches) :param full_loss_list: The current loss list for previously trained batches. The loss list for the training of these batches will be appended here. :param full_num_peds_per_batch_list: The current list of number of pedestrians for previously trained batches. The number of pedestrians for these batches will be appended here. :param full_pred_traj_len_per_batch_list: The current list of prediction lengths for previously trained batches. The average prediction lengths for these batches will be appended here. :param optimizer: Provided optimizer to perform training :return: the lists for losses, number of pedestrians, and prediction lengths, with the newly appended data """ # average of the errors with respect to number of pedestrians (or number of trajectories), and also the # length of the the trajectories # can be done for several "batches". Note that grad needs scalar outputs if args.loss_no_len: loss_train = torch.dot(loss, num_peds_per_batch / torch.sum(num_peds_per_batch)) else: loss_train = torch.dot(loss, (num_peds_per_batch * pred_traj_len_per_batch) / torch.dot(num_peds_per_batch, pred_traj_len_per_batch)) # backpropagation - computing gradients loss_train.backward() # update weights - backward pass was done prior to this optimizer.step() # zero the gradients for training optimizer.zero_grad() full_loss_list = torch.cat((full_loss_list, loss.detach()), dim=0) full_num_peds_per_batch_list = torch.cat((full_num_peds_per_batch_list, num_peds_per_batch), dim=0) full_pred_traj_len_per_batch_list = torch.cat((full_pred_traj_len_per_batch_list, pred_traj_len_per_batch), dim=0) return full_loss_list, full_num_peds_per_batch_list, full_pred_traj_len_per_batch_list
97e0b458c64439f610149eb13f26dc3284216978
49,640
def make_field(name, _values, **kwargs): """ specialization of make_parameters for parameters that define fields (aka color inputs). In this case the values is a list of name, type pairs where types must be one of 'rgb', 'lut', 'depth', 'value', or 'luminance' May also be given an set of valueRanges, which have min and max values for named 'value' type color selections. """ values = _values.keys() img_types = _values.values() valid_itypes = ['rgb', 'lut', 'depth', 'value', 'luminance', 'normals'] for i in img_types: if i not in valid_itypes: raise RuntimeError( "Invalid typechoice, must be one of %s" % str(valid_itypes)) default = kwargs['default'] if 'default' in kwargs else values[0] if default not in values: raise RuntimeError("Invalid default, must be one of %s" % str(values)) typechoice = 'hidden' label = kwargs['label'] if 'label' in kwargs else name properties = dict() properties['type'] = typechoice properties['label'] = label properties['values'] = values properties['default'] = default properties['types'] = img_types if 'valueRanges' in kwargs: properties['valueRanges'] = kwargs['valueRanges'] return properties
f5eb045d17b75e4fb315a43c1b5611a82827fda9
49,642
import hashlib def generate_sha1_password(user_id, original_password): """ 根据用户id和原始密码, 生成一个经过sha1加密的密码 :param user_id: 用户id :param original_password: 原始密码 :return: 加密密码 """ uid_password = '%s:%s' % (user_id, original_password) sha1_password = hashlib.sha1(uid_password.encode('utf-8')).hexdigest() return sha1_password
758701c2fc79950efbb52685888705e4dc1429ce
49,643
def multiply_by_two(val): """ Multiply a number by two. Paramters --------- val: number The number which is being double. Returns ------- The number times two. """ return val * 2
c4b2e5b4fd547b41955b1039dcc11b324dd345dd
49,644
import os def _check_os_path(path: str) -> str: """ check file valid or not, when valid, just process it :param path: original file path :return: normalized file path """ return os.path.normpath(path)
c9504f641a23b87042fa9ba73ff0a37a85a1d1f6
49,645
def _mn_str_ ( self , l = 3 , v = 0.0 ) : """Print MINUIT information: >>> m = ... >>> print m """ # self.mnprin ( l , v ) return '\n'
0781f99c2c4a16df8601fb7d90c7c70f49727332
49,646
def find_nearest_points_to_point(point, pt_graph, max_dist): """ 找到所有 一个点的最大距离范围内的的所有点 :param point: :param pt_graph: :param distance_func: :param max_dist: :return: """ nearby_points = [] for pt_node in pt_graph: # if distance_func(point, pt_node.point) < max_dist: if point.distance_to(pt_node.point) < max_dist: nearby_points.append(pt_node.index) return nearby_points
dc9e3bd12a744bb191acd1c02e8af94e13d1e922
49,648
def _check_normal(**__): """普通活动检查""" class NormalEvent: event_type = 0 return True, NormalEvent(), {}
ad6893116e05281177b4bca55cd2340ca34f9a27
49,649
def makeQuery(kwargs): """ Make query element of URI from a supplied dictionary """ return (kwargs and "?"+"&".join([ k+"="+v for (k,v) in kwargs.iteritems()])) or ""
1bd1d95a196be41b121d1dcf49583e8cefaea695
49,653
def select_difficulty(): """ This function will let user to choose the difficulty level :return: difficulty_level[] """ difficulty_level = [] print("\nEnter the Range of difficulty between 800 to 3500: ") difficulty_level.append(int(input("Min: "))) difficulty_level.append(int(input("Max: "))) return difficulty_level
9b1f52f4fec38b857f1344e750530d764fdc6744
49,654
import torch def class_accuracy(output, target, topk=1): """Computes the precision@k for the specified values of k""" # [128, 10],128 maxk = topk batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) # [128, 1],indices pred = pred.t().squeeze(0) # print(pred) # print(target) res = [] # correct = pred.eq(target.view(1, -1).expand_as(pred)) for k in range(2): indices=torch.where(target==k) # print(indices) correct = torch.where(pred[indices]==k) # print(len(correct[0]),len(indices[0])) try: res.append(len(correct[0])*100.0 / len(indices[0])) except: res.append(0) return res
325af8ce3d64741495e1edf7c58535be1006a913
49,655
import os def FilterTestsByExpectationsFile(tests_to_run, expected_output_dir): """Filter tests against a file. Open up the appropriate TestExpectations file and prune the expected failures from the tests that will be run. Note that this is a simple version of the layout test expectations parser found in external/chromium/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests /models/test_expectations.py The language defined by this simple parser is a very small subset of the language defined by the WebKit Test Expectations parser. The reason we don't simply use that is because we don't have a WebKit port framework setup for Steel, which is a dependency of the WebKit test expectations parser, and what seems to be a fair bit of work to implement. Args: tests_to_run: List of tests to run. expected_output_dir: Path to TestExpectations.txt Raises: ParseError: Expectations file was invalid. Returns: List of tests to run. """ class ParseError(Exception): pass tests_to_run_set = set(tests_to_run) expectations_file_name = os.path.join(expected_output_dir, 'TestExpectations.txt') if not os.path.exists(expectations_file_name): # If the file doesn't exist, there is no filtering to be done return tests_to_run expectations_file = open(expectations_file_name, 'r') for line in expectations_file: line = line.strip() if not line: continue space_pos = line.find(' ') if space_pos == -1: raise ParseError test_name = os.path.splitext(line[:space_pos])[0] expected_results = line[space_pos + 1:] if expected_results.upper().find('FAILURE') != -1: # This test is expected to fail, remove it from the set of tests to # run. if test_name in tests_to_run_set: tests_to_run_set.remove(test_name) expectations_file.close() return list(tests_to_run_set)
02149fb4822641b974b3d7725dfb9eca287fd915
49,657
import logging def str_to_float(value: str) -> float: """ Return float from string or raise/log error. :param value: The string value. :return: The float value. """ try: int_val = float(value) return int_val except ValueError as e: logging.error(f"Cannot convert string value to float: {value=}") raise e
152be429a66cbf4062ef9b288e0d71d89b2f7352
49,658
def empty_cells(state): """ Each empty cell will be added into cells' list :param state: the state of the current board :return: a list of empty cells """ cells = [] for x, row in enumerate(state): for y, cell in enumerate(row): if cell == 0: cells.append([x, y]) return cells
b3e3944d9dd3c699ff2bfbffbd66407e1bda7d4a
49,659
import socket def is_ipv6_address(string_ip): """ Examples: >>> [is_ipv6_address(ip) for ip in ('::1', '2001:db8:85a3::370:7334', '1234:'*7+'1234')] [True, True, True] >>> [is_ipv6_address(ip) for ip in ('192.168.10.10', '1234:'*8+'1234')] [False, False] """ try: socket.inet_pton(socket.AF_INET6, string_ip) except socket.error: return False return True
b022c1cba4f27341f9a7b019d4ae79b2ae68d303
49,660
def eccentric_anomaly_from_mean_anomaly(M, e, tol=1E-10, maxiter=128, method='Newton1'): """ Parameters ---------- M : quantity_like [angle] Mean anomaly. e : numeric Eccentricity. tol : numeric, optional Numerical tolerance used in iteratively solving for eccentric anomaly. maxiter : int, optional Maximum number of iterations when iteratively solving for eccentric anomaly. method : str, optional The method to use for iterative root-finding for the eccentric anomaly. Options are: ``'Newton1'`` and ``'Householder3'``. Returns ------- E : numeric [radian] Eccentric anomaly. Issues ------ - Magic numbers ``tol`` and ``maxiter`` """ func = eval('cy_eccentric_anomaly_from_mean_anomaly_{0}'.format(method)) return func(M, e, tol, maxiter)
caeff3bbb173acac13935ed7a0350732fa7dc19a
49,661
def compute_best_test_losses(data, k, total_queries): """ Given full data from a completed nas algorithm, output the test error of the best architecture after every multiple of k """ results = [] for query in range(k, total_queries + k, k): test_losses = [d[-1] for d in data[:query]] best_test_loss = sorted(test_losses)[0] results.append((query, best_test_loss)) return results
024dc22fa93c6577193da484d6de499be4ecbf00
49,662
def isStarted(q, quest_name): """Teste si une quête est commencée""" return q[quest_name]["state"] == "enabled" and q[quest_name]["step_lvl"] > 0
b92c944359e2726ee7dee88a5d9ab885d75edb78
49,663
def translation(rng): """Generates translations uniformly from (-2, 2), going outside of the box.""" return tuple(rng.uniform(low=-2, high=2, size=2))
a62997a31ba73fe6f4484d01337f01d5702a57a1
49,664
def to_mac(addr: str) -> str: """Return formatted MAC address""" return ':'.join(f'{i:02X}' for i in addr)
b1e239cdcc740e5fde34e2edad54ec4d4ccb5b62
49,665
import inspect import functools def varargin(f): """ Decorator to make a function able to ignore named parameters not declared in its definition. Arguments: f (function): Original function. Usage: @varargin def my_f(x): # ... is equivalent to def my_f(x, **kwargs): #... Using the decorator is recommended because it makes it explicit that my_f won't use arguments received in the kwargs dictionary. """ # Find the name of parameters expected by f f_params = inspect.signature(f).parameters.values() param_names = [p.name for p in f_params] # name of parameters expected by f receives_kwargs = any( [p.kind == inspect.Parameter.VAR_KEYWORD for p in f_params] ) # f receives a dictionary of **kwargs @functools.wraps(f) def wrapper(*args, **kwargs): if not receives_kwargs: # Ignore named parameters not expected by f kwargs = {k: kwargs[k] for k in kwargs.keys() if k in param_names} return f(*args, **kwargs) return wrapper
136658a58f0f9acab41d2969fdbec4e35e6b0de7
49,667
import random def rolls_until_six(die): """Roll the die until you get a 6 and return the number of rolls it took to do so. If six is not a the possible values to roll, return a string saying '6 is not a possible value of this die' >>> rolls_until_six(dice(1, 5)) '6 is not a possible value of this die' >>> rolls_until_six(dice(6, 6)) # Takes one roll to get 6 1 >>> x = sum([rolls_until_six(dice(1, 6)) for _ in range(100)])/100 # Repeat 100 times and average >>> 5 <= x <= 7 # Check that it takes between 5 and 7 rolls overall on average True """ if 6 not in die: return '6 is not a possible value of this die' else: t = 1 while (random.choice(die) != 6): t =t +1 return t
b762c12eda0c20a45b22f84c7ca6776d0706c26e
49,668
def return_compatible_representation(*args): """Return representation of spaces on same grid.""" # Check if at least one space is barycentric. is_barycentric = any([space.is_barycentric for space in args]) if not is_barycentric: return args else: # Convert spaces converted = [space.barycentric_representation() for space in args] if not all(converted): raise ValueError("Not all spaces have a valid barycentric representation.") return converted
9454f1977b720ba0a1b43805b3b9ed84168288e5
49,669
import os def modelconfigfile(modelfile): """Derive the file name of a model-specific config file""" return os.path.splitext(modelfile)[0] + '.vars'
de3b6d63cc61d0348e6e23d7581e8c763a523cf1
49,670