content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def num_arrows(soln): """Return the number of arrows in the solution.""" return sum(1 for reactor in soln.reactors for waldo in reactor.waldos for arrow, _ in waldo.instr_map.values() if arrow is not None)
5695a83ced47725c15b77b0f53fa89a762b86609
104,162
import types def className(obj, addPrefix=False): """Return human-readable string of class name.""" if isinstance(obj, str): class_str = obj # don't add prefix -- it's unknown prefix = "" elif isinstance(obj, type): class_str = obj.__name__ if addPrefix: prefix = "Class " else: prefix = "" elif isinstance(obj, types.ModuleType): class_str = obj.__name__ if addPrefix: prefix = "Module " else: prefix = "" else: try: class_str = obj.__class__.__name__ except AttributeError: class_str = str(type(obj)) prefix = "" return prefix + class_str
a5a9d7fb0be0b01700a4f9d02d6532e758d662bd
104,163
def gen_topline_employment_change(shortened_dv_list): """Create variables for the topline employment change for the current and previous months.""" topline_employment_change = round((shortened_dv_list[11] - shortened_dv_list[10]) * 1000) prev_topline_employment_change = round((shortened_dv_list[10] - shortened_dv_list[9]) * 1000) return topline_employment_change, prev_topline_employment_change
5afea158ffc75558cd4d615dacdf7e9945189741
104,167
def _create_url_rule(func): """Creates the URL rule for a function. :param function func: The MicronMethod to create the route for. :returns: The URL rule: "/<function name>" """ name = func.__name__.split('.')[-1] return '/' + name
8982e5a18ce07326d9f118bbcbc2252b21cdc5dd
104,172
def sampling_map(data, n_req, n_step): """ Creates a list containing 3-tuples where each element can be used to index a specific sequence and then index start and end positions for a sample of that sequence. Each sample will be n_req in length which should be long enough to create a batch. We sample a sequence at intervals of n_step apart which, as a default, could be n_sample (batch size) apart. """ map_ = [] for i, sequence in enumerate(data): for j in range(0, len(sequence) - n_req, n_step): map_.append((i, j, j + n_req)) return map_
5fba1b7bd86f4674c98d550eba38d71232da8034
104,180
import itertools def leading(predicate, iterable, start=0): """Returns the number of leading elements in the iterable for which the predicate is true. :param predicate: Predicate function of the form:: f(x) -> bool :param iterable: Iterable sequence. :param start: Start index. (Number of items to skip before starting counting.) """ i = 0 for _ in itertools.takewhile(predicate, itertools.islice(iterable, start, None, 1)): i += 1 return i
01f0ac8aa3d7361a61bc8186ba3774f0d9b9dd1d
104,182
import re def get_host_from_url(url_path): """Given a smrtlink job url, return host (e.g., 'http://smrtlink-alpha')""" return re.sub(r'https', 'http', ':'.join(url_path.split(':')[0:2]))
21a9a5c83546bfb14c8fa437177825a8c01f374f
104,183
def collocation_order(request): """Order of collocation method.""" return request.param
35bed240a6d505d4c469b9de54559305b0db3daa
104,186
def array(n): """Return array declaration for rank n""" return ", ".join([":"] * n)
946d9b3cfcbe5eda64545dae8d6ae96ab3483737
104,188
def get_dev_input4() -> list: """Build a maze for development purposes.""" form1 = ( """oooooo+--+oooooo o+--+o|oo|o+--+o o|oo+-+oo+-+oo|o o++oooooooooo++o oo+-+o+--+o+-+oo oooo|o|oo|o|oooo o+--+o+--+o+--+o ++oooooooooooo++ |oooooooooooooo| +--------------+ oooooooooooooooo o+-+oo+--+xx+-+o ++o|oo|oo|oo|o++ |oo+--+oo+--+oo| |oooooooooooooo| +--------------+""") form2 = form1.splitlines() grid = [] for line in form2: grid.append(list(line)) return grid
40a7e1220dc23cc5ff0c85b50ca9eab01e7904aa
104,193
def create_db_links(txt_tuple_iter, link_format): """ From an iterable containing DB info for records in DB or 'not in DB' if no records were found, returns info formatted as url links to detail pages of the records. :param txt_tuple_iter: an iterable of strings and tuples where the 0 element of the tuple is the gene/residue and element 1 is the DB key of the record (iter of tuples and strs) :link_format: a tuple where the 0th element is the detail page relevant for the records passed and element 1 is a tuple of the position of the txt to display in the txt_tuple_iter and the position of the DB key in the txt_tuple_iter ('page', (txt, position, key position)) (len 2 tuple) :return: string containing URL links to each entry (str) """ if txt_tuple_iter != 'not in DB': detail_page = link_format[0] line_links = [] for info in txt_tuple_iter: key = info[link_format[1][1]] txt = info[link_format[1][0]] line_links.append('<a target="_blank" href="/%s/%s">%s</a>' % (detail_page, key, txt)) line = ', '.join(line_links) else: line = 'not in DB' return line
4cb393382aa6bf21481e1cd2a60d2746188811f9
104,195
def GetPersistentDeviceList(file_name): """Returns a list of devices. Args: file_name: the file name containing a list of devices. Returns: List of device serial numbers that were on the bot. """ with open(file_name) as f: return f.read().splitlines()
aa68d4a4f7eaea79f74e0c2ca6e44e49fab3aae9
104,198
def is_list_of(seq, check_type): """ check if the `obj` is a list of `check_type` data. """ if not isinstance(seq, list): return False else: for item in seq: if not isinstance(item, check_type): return False return True
443191f33cbee519af3770721063da5763ebfd4b
104,199
from warnings import warn def warning(*args, **kwargs): """ Issues a warning. Parameters ---------- \*args : \* Arguments. \*\*kwargs : \*\* Keywords arguments. Returns ------- bool Definition success. Examples -------- >>> colour.utilities.warning('This is a warning!') # doctest: +SKIP /Users/.../colour/utilities/verbose.py:42: UserWarning: This is a warning! """ warn(*args, **kwargs) return True
3133ebdbaabb212f62b9118e81bc245bd7d6fb30
104,203
def get_time_seconds(string): """Returns Slurm-compatible time string as seconds """ while (len(string.split(":")) < 3): string = "00:" + string return sum(secs * int(digit) for secs,digit in zip([3600, 60, 1], string.split(":")))
fedbb7b4666f2396028b221342da68bc3fce5878
104,207
def date2epoch(dt): """ Convert list of datetime objects to epoch time Parameters ---------- dt : datetime.datetime Single or list of datetime object(s) Returns ------- time : float Datetime converted to epoch time (seconds since 1/1/1970 00:00:00) """ if not isinstance(dt, list): dt = [dt] return [t.timestamp() for t in dt]
1d13af15d856ca9d8f45fcda5f37d7a7eaff284a
104,210
def BooleanFromString(s): """Interpret 's' as a boolean and return its value. Raise ValueError if it's not something we can interpret as true or false.""" s = s.lower() if s in ("true", "t", "1", "on", "yes", "y"): return True if s in ("false", "f", "0", "off", "no", "n"): return False raise ValueError("'%s' not a valid boolean" % (s,))
0b1bf77f113da032ea6677f2f56e610b2384cf8a
104,211
def print_dataframe_memory_usage(df): """ df: a Pandas.DataFrame such as curr_active_pipeline.sess.spikes_df Usage: from pyphocorehelpers.print_helpers import print_dataframe_memory_usage print_dataframe_memory_usage(curr_active_pipeline.sess.spikes_df) >> prints >>: ======== print_dataframe_memory_usage(df): ======== Index 0.00 MB t 7.12 MB t_seconds 7.12 MB t_rel_seconds 7.12 MB shank 3.56 MB cluster 3.56 MB aclu 3.56 MB qclu 3.56 MB x 7.12 MB y 7.12 MB speed 7.12 MB traj 3.56 MB lap 3.56 MB maze_relative_lap 3.56 MB maze_id 3.56 MB cell_type 35.58 MB flat_spike_idx 3.56 MB x_loaded 7.12 MB y_loaded 7.12 MB lin_pos 7.12 MB fragile_linear_neuron_IDX 3.56 MB PBE_id 7.12 MB dtype: object ============================ Dataframe Total: 142.303 MB """ print(f'======== print_dataframe_memory_usage(df): ========') curr_datatypes = df.dtypes each_columns_usage_bytes = df.memory_usage(deep=True) # memory usage in bytes. Returns a Pandas.Series with the dataframe's column name as the index and a value in bytes. # each_columns_usage.index curr_column_names = each_columns_usage_bytes.index each_columns_usage_MB = each_columns_usage_bytes.apply(lambda x: x/(1024*1024)) # each_columns_usage_MB each_columns_usage_MB_string = each_columns_usage_MB.apply(lambda x: f'{x:.2f} MB') # Round to 2 decimal places (the nearest 0.01 MB) print(f'{each_columns_usage_MB_string}') # Index 0.00 MB # t 7.12 MB # t_seconds 7.12 MB # t_rel_seconds 7.12 MB # shank 3.56 MB # cluster 3.56 MB # aclu 3.56 MB # qclu 3.56 MB # x 7.12 MB # y 7.12 MB # speed 7.12 MB # traj 3.56 MB # lap 3.56 MB # maze_relative_lap 3.56 MB # maze_id 3.56 MB # cell_type 35.58 MB # flat_spike_idx 3.56 MB # x_loaded 7.12 MB # y_loaded 7.12 MB # lin_pos 7.12 MB # fragile_linear_neuron_IDX 3.56 MB # PBE_id 7.12 MB total_df_usage_MB = each_columns_usage_MB.sum() total_df_usage_MB_string = f'Dataframe Total: {total_df_usage_MB:.3f} MB' # round the total to 3 decimal places. print(f'============================\n{total_df_usage_MB_string}') return total_df_usage_MB
d74f8579a89ec8ef17ecf3a10f1c17bb66269201
104,214
import requests from bs4 import BeautifulSoup def getLinks(url): """ Receive a URL and return a BeautifulSoup object """ headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36"} try: html = requests.get(url, headers=headers) except: print("Get links error!") return soup = BeautifulSoup(html.text, "lxml") return soup
018ccede3109942d51f6f59cedc87da0efc59f93
104,215
def image_size(data): """ Parameters ---------- data : numpy.ndarray Image data. Returns ------- tuple (int, int) Image width and height. """ image_shape = data.shape return (image_shape[0], image_shape[1])
011c8a3385fc04c407f8b76b48cc7833f58d7df6
104,217
import json def decode_json(s): """ Compatible with JSON format that incorrectly uses '\' escape character. Such as \&quot; \&#39; See http://stackoverflow.com/questions/7921164/syntax-error-when-parsing-json-string for details. JSON online verification:http://jsonlint.com/ """ result = None if s: s = s.replace(r'\&', '') try: result = json.loads(s) except ValueError: pass return result
c49838cd18aea94647e67ba91ce4496f20322051
104,222
from typing import Optional import torch def get_spectrogram( waveform, *, n_fft: int = 2048, hop_length: Optional[int] = None, win_length: Optional[int] = None, window: Optional[torch.Tensor] = None, center: bool = True, pad_mode: str = "reflect", power: Optional[float] = None, ): """Generate a spectrogram of the given Tensor Args: n_fft: The number of FFT bins. hop_length: Stride for sliding window. default: ``n_fft // 4``. win_length: The size of window frame and STFT filter. default: ``n_fft``. winwdow: Window function. default: Hann window center: Pad the input sequence if True. See ``torch.stft`` for the detail. pad_mode: Padding method used when center is True. Default: "reflect". power: If ``None``, raw spectrogram with complex values are returned, otherwise the norm of the spectrogram is returned. """ hop_length = hop_length or n_fft // 4 win_length = win_length or n_fft window = torch.hann_window(win_length, device=waveform.device) if window is None else window spec = torch.stft( waveform, n_fft=n_fft, hop_length=hop_length, win_length=win_length, center=center, window=window, pad_mode=pad_mode, return_complex=True, ) if power is not None: spec = spec.abs() ** power return spec
17ee54cd5e909c72f99a97648c3d911f1a1b7508
104,225
def get_cloud_directive(key, task_directives, cloud_args_key='cloud_args'): """ Helper function to get a directive one layer undernearth ``cloud_args`` :param key: str Directive key :param task_directives: dict The dictionary of task directives :param cloud_args_key: str Key for the first level :return: object The requested ``cloud_args`` directive """ return task_directives.get(cloud_args_key, dict()).get(key)
70d97d8d3f7edb6c1a3eebebd0fc50adbdc4b42a
104,232
def hex_colour(color: int) -> str: """Converts an integer representation of a colour to the RGB hex value.""" return f"#{color:0>6X}"
422c84cad5721445c1ce38afa4f9c2e241f9a4a3
104,233
def title_or_url(node): """ Returns the `display_name` attribute of the passed in node of the course tree, if it has one. Otherwise returns the node's url. """ title = getattr(node, 'display_name', None) if not title: title = str(node.location) return title
7a2a90dc683a77490ed005b66d2666151421f940
104,234
def remap_to_range(x, x_min, x_max, out_min, out_max): """convert x (in x_min..x_max range) to out_min..out_max range""" if x < x_min: return out_min elif x > x_max: return out_max else: ratio = (x - x_min) / (x_max - x_min) return out_min + ratio * (out_max - out_min)
0e6c7a755ea3dc4651fbf5abf2cc313b33be7db1
104,241
import random import string def get_testdir(length): """ This function creates a random string that is used as the testing subdirectory. """ return ''.join(random.choice(string.ascii_lowercase) for _ in range(length))
08aa9fa3767792006ef7ad3bfc421546756e2d27
104,242
import queue def read_file(file, file_encoding='utf-8'): """读取文件内容,注意文件是UTF-8的格式且不是BOM格式 :param file: 读取的文件 :param file_encoding: 文本编码 """ queues = queue.Queue(maxsize=0) with open(file, encoding=file_encoding) as fp: for line in fp: queues.put(line) return queues
2225f2c130147945579896197699a47b5aee2103
104,243
def cm2inch(value: float) -> float: """ Conversion function from centimeters to inches. """ return value / 2.54
5ac8f72210ab3b19ebd3171d6f8badf0042db691
104,248
def forward_box(box): """Increase box level (max 4) Parameters ---------- box: int question box level Returns ------- int: updated box """ if box < 4: box += 1 return box
ae0de5b0821e8bde81063471f1f3768022d1856e
104,249
def _number_to_3digits(number): """ Transform a number smaller than 1000 (0-999) to a string representation with three characters (000, 001, ..., 021, ..., 089, ..., 123, ..., 999). """ # Make sure the value we transform is under 1000 and is positive. mod_number = number % 1000 if mod_number < 10: return "00" + str(mod_number) elif mod_number < 100: return "0" + str(mod_number) else: return str(mod_number)
fa5e6f76114ca89ea5ee954b46e32f6cf71cd60f
104,250
def filter_raw_data(df, min_entities=1, max_entities=10000000): """ Removing sentences with 'min' and 'max' labels other than 'O'. Args: df: pd.DataFrame containing all sentences of the data. min_entities: int representing the min number of labels other than 'O'. max_entities: int representing the max number of labels other than 'O'. Returns: filtered_df: pd.DataFrame containing only """ row_idxs_to_drop = [] for row_idx, row in enumerate(df.iloc[:, 2]): labels = [] # appending all entity indices for e in row: if e != 'O': labels.append(e) if (len(labels) < min_entities) or (len(labels) >= max_entities): row_idxs_to_drop.append(row_idx) return df.drop(row_idxs_to_drop, axis=0)
ce91d1ee0464408b930c347eba131c1c29dd93c4
104,255
import difflib def get_book_id(book_title, metadata): """ Gets the book ID for a book title based on the closest match in the metadata dataframe. """ existing_titles = list(metadata['title'].values) closest_titles = difflib.get_close_matches(book_title, existing_titles) book_id = metadata[metadata['title'] == closest_titles[0]]['best_book_id'].values[0] return book_id
c2a860960fc34c11fc759856852b60dc31779740
104,256
def convert_condition(cond): """Convert a condition from auto-libm-test-out to C form.""" conds = cond.split(':') conds_c = [] for c in conds: if not c.startswith('arg_fmt('): c = c.replace('-', '_') conds_c.append('TEST_COND_' + c) return '(%s)' % ' && '.join(conds_c)
96361f0a3e6f3f9c84aca64d140ad82cbb1c2676
104,257
def top_markets_by_property(property_data_frame, number_of_markets, dimension): """ A function which returns the top number_of_markets per dataframe I: dataframe, the number of markets to return O: list of the top markets sorted by number of observations""" markets = property_data_frame.groupby([dimension]).count().reset_index() markets = markets.copy() markets = markets.sort(markets.columns[2],ascending=False) top_markets = markets.head(number_of_markets) return top_markets[dimension].tolist()
6799d9ce3cb343a0827aa23733ca44345420aeb0
104,262
def PlaceHolders(sql_args): """Return a comma-separated list of %s placeholders for the given args.""" return ','.join('%s' for _ in sql_args)
bd24cf36514e23cbe7d5ca3d0981c38930aa6b4a
104,264
import math def round_up_to(x: int, base: int) -> int: """Round ``x`` up to the nearest multiple of ``base``.""" return int(math.ceil(x / base)) * base
b3001b6575df26c3aabcf02d5bb27129c2c5a364
104,265
def toBytes(s): """ Method aimed to convert a string in type bytes @ In, s, string, string to be converted @ Out, response, bytes, the casted value """ if type(s) == type(""): return s.encode() elif type(s).__name__ in ['unicode','str','bytes']: return bytes(s) else: return s
fb5253027efc0f98fc8da803a4982b49fd9af533
104,266
from pathlib import Path import yaml def is_app_based_tracing_intervention(intervention=None, intervention_conf=None): """ Determines if the intervention requires an app. Args: intervention (str): name of the intervention that matches a configuration file in `configs/simulation/intervention`. Default is None. intervention_conf (dict): an experimental configuration. Default is None. Returns: (bool): True if an app is required. """ assert intervention is not None or intervention_conf is not None, "Expects non-None intervention_conf when internvention is None. " if intervention_conf is not None: return intervention_conf['RISK_MODEL'] != "" if isinstance(intervention, dict): # This can happen if intervention is transformer (with weights and rec levels specified) intervention = next(iter(intervention.keys())) intervention_yaml_file = Path(__file__).resolve().parent.parent / "configs/simulation/intervention" / f"{intervention}.yaml" if "transformer" in intervention_yaml_file.name: intervention_yaml_file = Path(__file__).resolve().parent.parent / "configs/simulation/intervention" / f"transformer.yaml" with open(intervention_yaml_file, "r") as f: conf = yaml.safe_load(f) app_required = conf['RISK_MODEL'] != "" return app_required
beb00bab70bca479742536b630164240d9e9f17e
104,267
def _calculate_positives_negatives(target_details): """ Takes expected and actual target values, generating true and false positives and negatives, including the actual correct # of positive and negative values. """ true_positive = 0 true_negative = 0 false_negative = 0 false_positive = 0 actual_positive = 0 actual_negative = 0 for idx in range(len(target_details)): predicted_target = target_details[idx]["predicted_target"] expected_target = target_details[idx]["expected_target"] if expected_target == 1: actual_positive = actual_positive + 1 else: actual_negative = actual_negative + 1 if predicted_target == 1 and expected_target == 1: true_positive = true_positive + 1 elif predicted_target == 0 and expected_target == 0: true_negative = true_negative + 1 elif predicted_target == 1 and expected_target == 0: false_positive = false_positive + 1 elif predicted_target == 0 and expected_target == 1: false_negative = false_negative + 1 return { "true_positive": float(true_positive), "false_positive": float(false_positive), "actual_positive": float(actual_positive), "true_negative": float(true_negative), "false_negative": float(false_negative), "actual_negative": float(actual_negative), }
43314d34e98c4e9fa959426666f17d1a7d71af44
104,269
import torch def get_device(gpu_list=None): """Get a Pytorch device corresponding to one of the GPU indices listed in gpu_list. If gpu_list is empty, get the device corresponding to the CPU instead. If gpu_list is None (the default), enumerate the available GPU indices and pick one as though the list had been passed directly, except that in the case of there being no GPUs, an IndexError will be thrown. The return value is a pair of the device and a boolean that is true if the returned device is a GPU device. Note that we currently return the first listed device. """ if gpu_list is None: gpu_list = list(range(torch.cuda.device_count())) elif not gpu_list: return torch.device('cpu'), False return torch.device('cuda:{}'.format(gpu_list[0])), True
d183d3b807b38f47cc83ae00397bdcd924f6e0dd
104,272
def CreateSQSQueueSubscription(*, session, queuearn, topicarn): """Use SQS queue to subscribe to the SNS topic :param session: Session to use for AWS access :type session: boto3.session.Session :param queuearn: ARN of the queue :type queuearn: str :param topicarn: ARN of the SNS topic to subscribe to :type topicarn: str :return: A subscription object confirming the subscription :rtype: boto3.sns.Subscription """ # Create subscriptions snsconn = session.connect_to("sns") Subscriptions = session.get_collection("sns", "SubscriptionCollection") subscriptions = Subscriptions(connection=snsconn) subscription = subscriptions.create(topic_arn=topicarn, protocol="sqs", notification_endpoint=queuearn) # the endpoint will need to confirm the subscription return subscription
122ce9623bb760f56e9e3f9de8dc911f208d2357
104,274
from pathlib import Path def find_db_files(s_db_path: str): """Find all csv files in a given directory and store them in a list. Args: s_db_path: A string with the data path to the database folder. Returns: files: A list with db (.csv) files in the folder. """ s_data_path = Path(s_db_path) files = [] for item in s_data_path.glob("**/*"): if item.suffix in [".csv"]: files.append(Path.resolve(item)) return files
408748ed200a5e03c51f8b46669b3bb62e0586f5
104,287
def _to_flat_dict_key(keys): """Converts a list of nested keys to flat keys used in state.as_dict(). Args: keys: List of keys from outmost to innermost. Returns: Corresponding flat dictionary for the given list of keys. """ return '/' + '/'.join(keys)
271fe3fe89a83a1160f8c9203984ec01ffdf4358
104,289
def _get_trailing_whitespace(marker, s): """Return the whitespace content trailing the given 'marker' in string 's', up to and including a newline. """ suffix = '' start = s.index(marker) + len(marker) i = start while i < len(s): if s[i] in ' \t': suffix += s[i] elif s[i] in '\r\n': suffix += s[i] if s[i] == '\r' and i+1 < len(s) and s[i+1] == '\n': suffix += s[i+1] break else: break i += 1 return suffix
5a5e5d88accc105334c7cec74a8dbcd51c03e184
104,298
import codecs def normalise_encoding_name(option_name, encoding): """ >>> normalise_encoding_name('c_string_encoding', 'ascii') 'ascii' >>> normalise_encoding_name('c_string_encoding', 'AsCIi') 'ascii' >>> normalise_encoding_name('c_string_encoding', 'us-ascii') 'ascii' >>> normalise_encoding_name('c_string_encoding', 'utF8') 'utf8' >>> normalise_encoding_name('c_string_encoding', 'utF-8') 'utf8' >>> normalise_encoding_name('c_string_encoding', 'deFAuLT') 'default' >>> normalise_encoding_name('c_string_encoding', 'default') 'default' >>> normalise_encoding_name('c_string_encoding', 'SeriousLyNoSuch--Encoding') 'SeriousLyNoSuch--Encoding' """ if not encoding: return '' if encoding.lower() in ('default', 'ascii', 'utf8'): return encoding.lower() try: decoder = codecs.getdecoder(encoding) except LookupError: return encoding # may exists at runtime ... for name in ('ascii', 'utf8'): if codecs.getdecoder(name) == decoder: return name return encoding
150f0c7b754ebfd74e3ec719f16c1d1102cb1af7
104,300
def _nevergrad_ask_tell(optimizer, ob_func, no_bias=False): """Exposes the Nevergrad Optimizer ask and tell interface Parameters ---------- optimizer: nevergrad.Optimizer Nevergrad Optimizer instance to perform optimization routine ob_func: nevergrad.MultiobjectiveFunction Nevergrad MultiobjectiveFunction instance to be optimized no_bias: bool, optional Whether or nor to calculate hyper-volume from objective function value and return to optimizer Returns ------- x: nevergrad.Parameter Parameter values determining input point to be calculated value: float Output value calculated from objective function """ # Ask the optimizer for a new value x = optimizer.ask() # Calculate the optimizer objective score values value = ob_func.multiobjective_function(*x.args) # Update the objective function with the new value and # compute the hyper-volume. If no_bias is enforced, then # do not report any information to both optimizer or # objective function if no_bias: volume = 0 else: volume = ob_func.compute_aggregate_loss( value, *x.args, **x.kwargs ) # Tell hyper-volume information to the optimizer optimizer.tell(x, volume) # Return reference to both input and output values return x, value
e75e8828bf14176ae267ab628213f733478a3ffc
104,307
def mock_endpoint_factory(mock_endpoint): """ Fixture that yields a factory function used to create a USB endpoint at the given address. """ def factory(address): mock_endpoint.getAddress.return_value = address return mock_endpoint return factory
c21a60887a1f4f3daef10a3c3c918e081921dcfa
104,308
def node_offsets_formatter(node): """ Returns a terminal node's original character offsets. """ return "%d:%d" % (node.start_token.start_char, node.end_token.end_char)
23ca0c602597080e0368e3b24364ab38b8cf24c2
104,309
def root_to_list(root): """takes in etree root node, parses it to a depth-first ordered list Arguments: root [element] : root element to be converted to list Returns: [list] List of element objects in depth-first order """ return list(root.iter())
a6861ba73352582cba2ef8d79528df840bf13609
104,312
from typing import IO from typing import Dict from typing import List import csv def parse_package_to_repos_file(input_file: IO[str]) -> Dict[str, List[str]]: """Parse CSV file mapping package names to repositories. :param IO[str] input_file: CSV file to parse. The file needs to contain a column `package` and a column `all_repos`. `all_repos` contains a comma separated string of Github repositories that include an AndroidManifest.xml file for package name in column `package`. :returns Dict[str, List[str]]: A mapping from package name to list of repository names. """ return { row['package']: row['all_repos'].split(',') for row in csv.DictReader(input_file) }
25b36a3d965aa00fa6428fc9045ed5c99bb8e253
104,316
def create_station_id(loc_name, vs30, z1p0=None, z2p5=None): """ Creates a projects station id based on if Z1.0/Z2.5 values were specified or not""" station_id = f"{loc_name}_{str(vs30).replace('.', 'p')}" if z1p0 is not None and z2p5 is not None: station_id += f"_{str(z1p0).replace('.', 'p')}_{str(z2p5).replace('.', 'p')}" return station_id
b833f6abfb39843cc136346deaa9762f1dee4bbb
104,319
def safe_delete_key(dictionary, key): """ Safely delete a key from a dictionary only if it exists :param dictionary: the dictionary :param key: the key to delete :return: the dictionary """ try: del dictionary[key] except: pass return dictionary
bc58b2a23281634291eb2b2587a550f0a7da206a
104,320
import hashlib def create_hashname(id1, id2, bins, bins_col, direction, ops): """ Helper function to generate a hash name for the cached filename :param id1: :param id2: :param bins: :param bins_col: :param direction: :param ops: :return: """ name = str(id1) + '_' + str(id2) + '_' + str(bins) + '_' + str(bins_col) + '_' + str(direction) + '_' + str(ops) return hashlib.md5(name.encode('utf-8')).hexdigest()
c896b71b1cc513307e0c5a57c6d6450332d70a16
104,322
def strip(input_string): """Strip trailing whitespace, single/double quotes.""" return input_string.strip().rstrip(";").strip('"').strip("'")
8278395cd8bcfc5fe700dfe593957410e79d174f
104,325
import re def parse_regexp(regexp, string, json_key_list): """ Parse a regular expression and return the result as dictionary object :param regexp: A regular expression to parse :param string: A string to parse :param json_key_list: A list of JSON keys to extract from the regular expression and return :param ignore_case: Default to ignore case :return: """ match = re.match(regexp, string) list_iter = iter(json_key_list) result = dict() default_key = 0 if match: for group_val in match.groups(): json_key = next(list_iter) if json_key: result[json_key] = group_val else: result[f"GROUP_{default_key}"] = group_val default_key = default_key + 1 return result
efc05390504c48bf8068f0c44a33b5acfa1c5fb7
104,326
def add_frame(img, c, b=40): """Add a colored frame around an image with color c and thickness b """ img = img.copy() img[:, :b] = img[:b, :] = img[:, -b:] = img[-b:, :] = c return img
63b00fd6e47347baf49c28a38b74829aef158b98
104,330
import json def annotation_to_GeckoJSON(annotation, distances={}, colors={}): """ Parameters: ----------- annotation: `pyannote.core.Annotation` proper pyannote annotation for speaker identification/diarization distances: `dict`, optional in-cluster distances between speech features see `get_distances_per_speaker` colors: `dict`, optional speaker id : consistent color Returns: -------- gecko_json : a JSON `dict` based on the demo file of https://github.com/gong-io/gecko/blob/master/samples/demo.json should be written to a file using json.dump """ gecko_json = json.loads("""{ "schemaVersion" : "3.1", "monologues" : [ ] }""") for segment, track, label in annotation.itertracks(yield_label=True): distance = distances.get(label, {}).get(segment) color = colors.get(label) gecko_json["monologues"].append( { "speaker": { "id": label, "color": color, "distance": distance, "non_id": [], "annotators": 0 }, "start": segment.start, "end": segment.end }) return gecko_json
2df1a0937d8b2aa5957b06b2237b7cfe7ed9454b
104,331
def binaire_vers_decimal(bits): """ Transforme un tableau de bits en entier :param bits: Le tableau de bits :return: L'entier représentant la valeur binaire """ nb = 0 for bit in bits: nb = (nb << 1) | bit return nb
e837f35161b631634c62852adbf03b8b18065635
104,332
def bbox_parse(annotation, gt_bboxes, gt_labels, gt_bboxes_ignore, cat2label): """ Parse ground-truth box in an annotation dict. There is no return in this function, because the `gt_bboxes`, `gt_labels`, `gt_bboxes_ignore` are lists, and if they have append element in this function, the change will be with them automatically, do not need to return. Args: annotation (dict): The annotation dict for an image. gt_bboxes (list): The list of ground truth boxes, each element in this list is a ground truth box `[x1, y1, x2, y2]` gt_labels (list): The list of ground truth boxes' labels, each element in this list is a numerical label for the specific gt-box. gt_bboxes_ignore (list): The list of crowded ground truth boxes, and we will ignore the crowded ground truth boxes. Each element in this list is a ignored ground truth box `[x1, y1, x2, y2]` cat2label (dict): The dict that save the matching between the category id in the dataset and the numerical label. """ assert len(gt_bboxes) == len(gt_labels), \ "The length of gt_bboxes and gt_labels must match." # if this annotation is marked as `ignore`, or `area <= 0` # we return `-1` and pass this image. The loop continues if annotation.get('ignore', False): return False x1, y1, w, h = annotation['bbox'] if annotation['area'] <= 0 or w < 1 or h < 1: return False # change `bbox` mode from `xywh` to `xyxy` bbox = [x1, y1, x1 + w - 1, y1 + h - 1] if annotation['iscrowd']: gt_bboxes_ignore.append(bbox) else: gt_bboxes.append(bbox) gt_labels.append(cat2label[annotation['category_id']]) return True
51e1cd9ced94c8e976d651caa26825eb3df0ee40
104,333
from bs4 import BeautifulSoup def remove_html_tag(text): """ Removes the html tags from the text to clean the data :param text: (str) The text document :return text: (str) Returns the text document free of html tags """ text = BeautifulSoup(text, 'html.parser').get_text() return text
81e8babc7b2dbfc39052cbb096071f75997fff62
104,334
def path_3d(quad_path, ax3d, planned_path=None, waypoints=None, quad_lw=1.0, path_size=3.5, wp_size=6.0): """ Plots the path of a quad on a matplotlib axes. Only use if you want to modify the look of the plots, otherwise use plot_path_3d to generate plots. Args: quad_path (numpy.ndarray): A 2D numpy.ndarray of x,y,z quad positions ax3d (mplot3d axes3d): The mplot3d axes3d object the path's are plotted onto. planned_path (numpy.ndarray): A 2D numpy.ndarray of x,y,z target positions waypoints (numpy.ndarray): A 2D numpy.ndarray of x,y,z positions quad_lw (float): The float linewidth for the quads path path_size (float): The float markersize for the planned path markers wp_size (float): The float markersize for the waypoint markers Returns: ax3d (mplot3d axes3d): The mplot3d axes3d with path's plotted onto it. """ if planned_path is not None: ax3d.plot(planned_path[:,0], planned_path[:,1], planned_path[:,2], 'go', color='r', markersize=path_size, markeredgecolor='r') if waypoints is not None: ax3d.plot(waypoints[:,0], waypoints[:,1], waypoints[:,2], 'gH', color='b', markersize=wp_size, markeredgecolor='b') ax3d.plot(quad_path[:,0], quad_path[:,1], quad_path[:,2], color='k', linewidth=quad_lw) ax3d.tick_params(labelsize=8) ax3d.set_xlabel('X', weight='bold') ax3d.set_ylabel('Y', weight='bold') ax3d.set_zlabel('Z', weight='bold') return ax3d
75cc67013a752779baa71b1a2b68540b453cc351
104,337
from typing import Any import types import functools def is_callable(func: Any) -> bool: """ Return ``True`` if ``func`` is callable. :param func: Function object :return: ``True`` if function """ # noinspection PyTypeChecker return isinstance(func, (types.FunctionType, types.BuiltinFunctionType, types.MethodType, functools.partial))
7eaa9f439c3464df44cd82d63a4f0f9e9d6ad928
104,345
def pull_field(cursor, field): """ Wrapper function to return dictionary key-val pair being accession-field """ field = ["`" + field + "`"] result = cursor.execute("SELECT `accession`, ? FROM SEQUENCES;", field) field_dict= dict(result.fetchall()) return field_dict
3eb477fe3399e1d61ca6f6ce0b3fb9ac1bff2a21
104,349
def lazyprop(fn): """ Instead of having to implement the "if hasattr blah blah" code for lazy loading, just write the function that returns the value and decorate it with lazyprop! See example below. Taken from https://github.com/sorin/lazyprop. :param fn: The @property method (function) to implement lazy loading on :return: a decorated lazy loading property >>> class Test(object): ... @lazyprop ... def a(self): ... print 'generating "a"' ... return range(5) >>> t = Test() >>> t.__dict__ {} >>> t.a generating "a" [0, 1, 2, 3, 4] >>> t.__dict__ {'_lazy_a': [0, 1, 2, 3, 4]} >>> t.a [0, 1, 2, 3, 4] >>> del t.a >>> t.a generating "a" [0, 1, 2, 3, 4] """ attr_name = '_lazy_' + fn.__name__ @property def _lazyprop(self): if not hasattr(self, attr_name): setattr(self, attr_name, fn(self)) return getattr(self, attr_name) @_lazyprop.deleter def _lazyprop(self): if hasattr(self, attr_name): delattr(self, attr_name) @_lazyprop.setter def _lazyprop(self, value): setattr(self, attr_name, value) return _lazyprop
a38bf3c2f4b9833a24137fc1d103973abbc040d8
104,350
def create_filters(filter_dict): """ Converts a dict to a list of boto3 filters. The keys and value of the dict represent the Name and Values of a filter, respectively. """ filters = [] for key in filter_dict.keys(): filters.append({'Name': key, 'Values': filter_dict[key]}) return filters
6f734b2be22fa7789783351e3e75b1f64072c0ed
104,353
from pathlib import Path import logging def check_arg_output_plot(output_file: str, create_parent_dir: bool = True) -> bool: """Return True of the output_plot has the expected format (deduced from the file extension). Accepted extensions are: svg and png. If the parent directory of the output file does not exist, it has to either be created or fail the check. :param output_file: the output file :param create_parent_dir: create the output file's parent folder in case it does not exist """ # output_format path_output_file = Path(output_file) if path_output_file.suffixes not in [['.svg'], ['.png']]: raise ValueError(f"Error! Unexpected value '{path_output_file.suffixes}' for output plot.") # create_parent_dir output_dir = path_output_file.resolve().parent if not output_dir.is_dir(): if create_parent_dir: logging.warning(f"Output_dir could not be found at '{output_dir}', attempting to create it.") output_dir.mkdir(parents=True, exist_ok=True) else: raise ValueError(f"Error! Output_dir could not be found at '{output_dir}'.") return True
39ca0976dedf96d8bed6e863b83e5081d45e0ed0
104,354
def filing_1550126(get_fixture): """ Returns the file path for 1550126.fec """ return get_fixture("1550126.fec")
5e28bbe2553d4b14596b0de8ced0f4935a36b387
104,355
import fnmatch def GetTestsFromDevice(runner): """Get a list of tests from a device, excluding disabled tests. Args: runner: a TestRunner. """ # The executable/apk needs to be copied before we can call GetAllTests. runner.test_package.StripAndCopyExecutable() all_tests = runner.test_package.GetAllTests() # Only includes tests that do not have any match in the disabled list. disabled_list = runner.GetDisabledTests() return filter(lambda t: not any([fnmatch.fnmatch(t, disabled_pattern) for disabled_pattern in disabled_list]), all_tests)
89c575bfca9b03983354be84df00100da49ab41e
104,356
def square(x): """ Calculate the square of a number Parameters ---------- x : a number (int, float, complex...) or numpy array Outputs ------- The number squared, .. math:: x^2 Example ------- >>> square(5) 25 """ return x**2
2298d40f69c1ca70f225fc0c2ae9947b854cfeeb
104,362
def filter_non_src_files(files): """ Filters out all files which don't contain prefix Inst i.e. all files that aren't pymod dicts for TestComponent """ src_files = [] for file in files: if file[:4] == "Inst": src_files.append(file) return src_files
d94310dc144c107bc99e17e964db9c26663cf3de
104,366
def is_valid_directed_joint_degree(in_degrees, out_degrees, nkk): """ Checks whether the given directed joint degree input is realizable Parameters ---------- in_degrees : list of integers in degree sequence contains the in degrees of nodes. out_degrees : list of integers out degree sequence contains the out degrees of nodes. nkk : dictionary of dictionary of integers directed joint degree dictionary. for nodes of out degree k (first level of dict) and nodes of in degree l (seconnd level of dict) describes the number of edges. Returns ------- boolean returns true if given input is realizable, else returns false. Notes ----- Here is the list of conditions that the inputs (in/out degree sequences, nkk) need to satisfy for simple directed graph realizability: - Condition 0: in_degrees and out_degrees have the same length - Condition 1: nkk[k][l] is integer for all k,l - Condition 2: sum(nkk[k])/k = number of nodes with partition id k, is an integer and matching degree sequence - Condition 3: number of edges and non-chords between k and l cannot exceed maximum possible number of edges References ---------- [1] B. Tillman, A. Markopoulou, C. T. Butts & M. Gjoka, "Construction of Directed 2K Graphs". In Proc. of KDD 2017. """ V = {} # number of nodes with in/out degree. forbidden = {} if len(in_degrees) != len(out_degrees): return False for idx in range(0, len(in_degrees)): i = in_degrees[idx] o = out_degrees[idx] V[(i, 0)] = V.get((i, 0), 0) + 1 V[(o, 1)] = V.get((o, 1), 0) + 1 forbidden[(o, i)] = forbidden.get((o, i), 0) + 1 S = {} # number of edges going from in/out degree nodes. for k in nkk: for l in nkk[k]: val = nkk[k][l] if not float(val).is_integer(): # condition 1 return False if val > 0: S[(k, 1)] = S.get((k, 1), 0) + val S[(l, 0)] = S.get((l, 0), 0) + val # condition 3 if val + forbidden.get((k, l), 0) > V[(k, 1)] * V[(l, 0)]: return False for s in S: if not float(S[s]) / s[0] == V[s]: # condition 2 return False # if all conditions abive have been satisfied then the input nkk is # realizable as a simple graph. return True
c144816f286894e6e4df9adb107d02be630576f2
104,367
def try_read_until(fp, pattern, chunk_size=16384): """ Perform buffered read of `fp` until pattern, and return all data accumulated and ends with pattern. If read failed (no pattern found), file position is backtracked. Else: file cursor will be at position right after pattern. :returns: None, if EOF reached, otherwise -- the data block (bytes) with data+pattern. """ assert len(pattern) > 0 and isinstance(pattern, bytes) chunk_size = max(chunk_size, len(pattern)) buff = [] tail = b'' data = b'' old_pos = fp.tell() while True: chunk = fp.read(chunk_size) if not chunk: break split_pos = (tail + chunk).find(pattern) if split_pos >= 0: split_pos += len(pattern) # Include pattern into result buff.append( chunk[:split_pos-len(tail)] ) data = b''.join(buff) break else: tail = chunk[-len(pattern):] # Memorize last len(pattern) positions to match at next iteration buff.append( chunk ) fp.seek(old_pos + len(data)) return data or None
1135827dba5f6cbb1fb29f410a99a6dfc2c3f794
104,372
def cbany(callback, iterable): """Return True if any callback(row) of the iterable is true. If the iterable is empty, return False :param callback: callable :param iterable: Sequence :returns: bool """ for v in iterable: if callback(v): return True return False
95e7ed255eb00ead6652188d65771903f3c4ab72
104,373
def groups(hand, numwildcards=0): """Checks for pairs, threes-of-a-kind, fours-of-a-kind, and fives-of-a-kind Inputs: list of non-wildcards plus wildcard count 2,3,4, ... 10, 11 for Jack, 12 for Queen, 13 for King, 14 for Ace Hand can be any length (i.e. it works for seven card games) Output: tuple with counts for each value (high cards first) for example (3, 14), (2, 11) full-house Aces over Jacks for example (2, 9), (2, 7) two-pair Nines and Sevens Maximum count is limited to five (there is no seven of a kind). Original list is not mutated. >>> groups([11,14,11,14,14]) [(3, 14), (2, 11)] >>> groups([7, 9, 10, 9, 7]) [(2, 9), (2, 7)] >>> groups([11,14,11,14], 1) [(3, 14), (2, 11)] >>> groups([9,9,9,9,8], 2) [(5, 9), (2, 8)] >>> groups([], 7) [(5, 14), (2, 13)] """ result = [] counts = [(hand.count(v), v) for v in range(2,15)] for c, v in sorted(counts, reverse=True): newcount = min(5, c + numwildcards) # Add wildcards upto five numwildcards -= newcount - c # Wildcards remaining if newcount > 1: result.append((newcount, v)) return result
d9b80b39d91aaa521bd092704a70922a757af88f
104,375
def find_factorial(number: int) -> int: """Return factorial for specified number.""" if number > 0: return number * find_factorial(number - 1) elif number == 0: return 1 else: raise ValueError("Negative number")
6071ac6909c2161a068694c0da85172198b3266f
104,378
def mfacebookToBasic(url): """Reformat a url to load mbasic facebook instead of regular facebook, return the same string if the url don't contains facebook""" if "m.facebook.com" in url: return url.replace("m.facebook.com", "mbasic.facebook.com") elif "www.facebook.com" in url: return url.replace("www.facebook.com", "mbasic.facebook.com") else: return url
26b55c23048bd8febe6ef93f15b0c4bbfd434e36
104,381
import curses def keypending( scr ): """ return true if getch is going to return a real key """ ch = scr.getch() if ch >= 0: curses.ungetch(ch) return (ch >= 0)
435b8f83717405f12843d1e54c02b3431a94a8ca
104,387
def app_model_or_none(raw_model): """ Transforms `raw_model` to its application model. Function can handle `None` value. """ return raw_model.get_app_model() if raw_model is not None else None
fdaa53876e4d0eba6260b4d8b5ecf90c5e8a33aa
104,390
def get_offer_address(html_parser): """ This method returns the offer address. :param html_parser: a BeautifulSoup object :rtype: string :return: The offer address """ try: address = html_parser.find(class_="address-text").text except AttributeError: return else: return address
f8de5c7d30ed78016ebb246454c64e5b37913c28
104,394
def get_destination_from_obj(destination): """Helper to get a destination from a destination, event, or tour. For events and tours, return the first related destination, if any.""" if hasattr(destination, 'first_destination'): # tour with related destination(s); use the ordered first return destination.first_destination else: # not an event or tour return destination
025bee5615793aad61e81d5723bd3df43d43115d
104,399
import requests def get_url(url): """ Get a url, return it's contents. """ headers = {"User-Agent": "dftools"} resp = requests.get(url, headers=headers) if resp.status_code == requests.codes.ok: return resp.content, resp.status_code else: return None, resp.status_code
6a67f273277605979fb114031760e07253dbd565
104,404
import functools import math def get_factors(n): """return all factors of an integer Parameters ---------- n: int integer to factorize Returns ------- factors: list List of all factors """ step = 2 if n % 2 else 1 ret = list( set( functools.reduce( list.__add__, ([i, n // i] for i in range(1, int(math.sqrt(n)) + 1, step) if n % i == 0), ) ) ) ret.sort() return ret
67ca23f94bf17ff2321ad654c1d4bb6eb6feb977
104,406
import time def formatDatetime(date): """ It return the timestamp of a concret date. Args: date: A datetime object. Returns: An integer containing the timestamp of a concrete date. """ return int(time.mktime(date.timetuple()))*1000
e22bd2f63776c96c3bccad3eb38fccc9eb780aa6
104,408
def power(n,r): """ Return n^r """ return n**r
d1ed5d802a5975ed100e40f18c739fd39653beaf
104,413
import inspect def parameters(only=None, exclude=None, ignore='self'): """Returns a dictionary of the calling functions parameter names and values. The optional arguments can be used to filter the result: only use this to only return parameters from this list of names. exclude use this to return every parameter *except* those included in this list of names. ignore use this inside methods to ignore the calling object's name. For convenience, it ignores 'self' by default. """ args, varargs, varkw, defaults = \ inspect.getargvalues(inspect.stack()[1][0]) if only is None: only = args[:] if varkw: only.extend(defaults[varkw].keys()) defaults.update(defaults[varkw]) if exclude is None: exclude = [] exclude.append(ignore) return dict([(attrname, defaults[attrname]) for attrname in only if attrname not in exclude])
d582fb95d03accc5216c1fd78c95ab2af733f128
104,417
import re def _parse_ffmpg_results(stderr): """ Extract number of channels and sample rate from the given FFMPEG STDERR output line. :param stderr: STDERR output line to parse. :returns: Parsed n_channels and sample_rate values. """ # Setup default value. n_channels = 0 sample_rate = 0 # Find samplerate match = re.search(r'(\d+) hz', stderr) if match: sample_rate = int(match.group(1)) # Channel count. match = re.search(r'hz, ([^,]+),', stderr) if match: mode = match.group(1) if mode == 'stereo': n_channels = 2 else: match = re.match(r'(\d+) ', mode) n_channels = match and int(match.group(1)) or 1 return n_channels, sample_rate
4dc49194e5a07bc3914c5d8c2decdb65ee348cd0
104,418
def weighted_choice(weights, prng): """Samples from a discrete distribution. Parameters ---------- weights : list A list of floats that identifies the distribution. prng : numpy.random.RandomState A pseudorandom number generator object. Returns ------- int """ rnd = prng.rand() * sum(weights) n = len(weights) i = -1 while i < n - 1 and rnd >= 0: i += 1 rnd -= weights[i] return i
0ff2dc3c669f128a97d4bca1d34f36f1d5bb6482
104,421
def beta(mu,phi): """Transforms beta function parameters from average and variance form to the alpha & beta parameters""" a = mu*phi b = (1-mu)*phi return a, b
5948b130455276f783969387a059ffc283bf6753
104,422
def get_request_ips(request): """ Get the chain of client and proxy IP addresses from the request as a nonempty list, where the closest IP in the chain is last. Each IP vouches only for the IP before it. This works best if all proxies conform the to the X-Forwarded-For header spec, including whatever reverse proxy (such as nginx) is directly in front of the app, if any. (X-Real-IP and similar are not supported at this time.) """ return request.headers.getlist('X-Forwarded-For') + [request.remote_addr]
8aafda8ea790e7d6eea01f08e128cf1f70310dd1
104,423
def protege_data(datas_str, sens): """ Used to crypt/decrypt data before saving locally. Override if securit is needed. bytes -> str when decrypting str -> bytes when crypting :param datas_str: When crypting, str. when decrypting bytes :param sens: True to crypt, False to decrypt """ return bytes(datas_str, encoding="utf8") if sens else str(datas_str, encoding="utf8")
b7e8bad20ca6825dd68552a435920c45cd582a30
104,425
from typing import List def string_of_single_to_list_of_ints(input_string: str) -> List[int]: """ Split a string on `split_string` and return a list of integers :param input_string: String to split :return: List of integers """ list_of_ints = list(map(int, input_string)) return list_of_ints
a6c9de29cce4c96ea86f77a7f17aa330960ad77f
104,428
def canUnlockAll(boxes): """ - boxes is a list of lists - A key with the same number as a box opens that box - You can assume all keys will be positive integers - The first box boxes[0] is unlocked - Return True if all boxes can be opened, else return False """ canUnlockAll = False keys = {0: True} n_boxes = len(boxes) while(True): n_keys = len(keys) for i in range(len(boxes)): if boxes[i] and keys.get(i, False): for j in boxes[i]: if j < n_boxes: keys[j] = True boxes[i] = None if not(len(keys) > n_keys): break if n_keys == len(boxes): canUnlockAll = True return canUnlockAll
439d5694a442342626d960fce716fce930512b28
104,433
def defaults_override(dictionary, overrides): """override and append values from defaults to dictionary""" for key in overrides: dictionary[key] = overrides[key] return dictionary
9d98ebd347527f57bf3f03435673abb8de825af0
104,435
def report_exit(combined_test_report): """The exit code of this script is based on the following: 0: All tests have status "pass", or only non-dynamic tests have status "silentfail". 31: At least one test has status "fail" or "timeout". Note: A test can be considered dynamic if its name contains a ":" character.""" ret = 0 for test in combined_test_report.test_infos: if test.status in ["fail", "timeout"]: return 31 return ret
f3fa40a42c8f6b61f8d2729abdb0e10321cc50f0
104,436
def generate_resource_url( url: str, resource_type: str = "", resource_id: str = "", version: str = "v1" ) -> str: """ Generate a resource's URL using a base url, the resource type and a version. """ return f"{url}/{resource_type}/{resource_id}"
350a0c07813153107f2d103cca5e3df6bc197f68
104,440
import math def Gamma2(z: float) -> float: """Gamma Function. Gergő Nemes version. Gamma(n) == fact(n-1) >>> import math >>> round(Gamma2(2),1) 1.0 >>> round(Gamma2(3),1) 2.0 >>> round(Gamma2(4),1) 6.0 >>> round(Gamma2(5),1) 24.0 >>> round(Gamma2(.5), 7) # Not quite right 1.7630962 >>> round(math.sqrt(math.pi), 7) 1.7724539 """ t_1 = math.sqrt(2*math.pi/z) t_2 = (z+(1/(12*z-(1/(10*z)))))/math.e return t_1*t_2**z
a998aa7e8188a61e23fac36a2cc8e243e8878925
104,450
def generate_default_hostname(instance_id): """Default function to generate a hostname given an instance reference.""" return str(instance_id)
006f6ea820d91a5ad1c5fb07ce7c0a44ee1644d7
104,453
import hashlib def get_hash(value): """ Return a 32-byte hash of value as a hex string""" return hashlib.sha256(value).hexdigest()
ced272a574c339c713d1354d665d225367784188
104,455
def strip_domain_strings_wrapper(subset): """wraps a function to strip results_type, subset, and domain_frac text""" def strip_domain_strings(text): """Strip the string""" text = text.strip('*') text = text.replace('results_', '') text = text.replace(subset, '') text = text.replace('domain_frac_', '') text = text.replace('domain_appears_', '') text = text.replace('code_appears', '') text = text.strip('_') return text return strip_domain_strings
9055f51ad7023735dbbc12a7034ecf7be78fa16c
104,462