content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def cuboid_volume(height, width, depth): """ Return volume of cuboid """ vol = height * width * depth return vol
19193fc0212f6d2c7982fffbeb9c0e77c93ef78b
107,209
import codecs import json import errno def read_json(fp): """ Read JSON file to dict. Return None if file not found. Parameters ---------- fp : str Path of the JSON file. Returns ------- dict """ content = dict() try: with codecs.open(fp, 'r', encoding='utf-8') as f: content = json.load(f) except IOError as e: if e.errno not in (errno.ENOENT, errno.EISDIR, errno.EINVAL): raise return content
f6967dd81462ab36807e0eab00d2b4f5d89acbd5
107,210
def getQuarter(side, point): """ Given a 2d point, returns the corresponding quarter ID. The path of the robot is split in 4 segments, each segment corresponds to one edge of the square. Since the robot is not exactly on the edge, we need a way to map any point to one of the 4 edges. To do this we split the world into 4 quarters using the extended diagonals of the square. Each quarter corresponds to one edge of the square. The ID of each quarter corresponds to the order in which the robot will go through them. side: size of one side of the square, in meters. point: tuple containing x and y coordinates. returns 0, 1, 2 or 3 depending on the quarter in which the point is located """ # Checks on which side of the bottom-left to top-right diagonal the point # is. posDiag = (point[1] - point[0] > 0) # Checks on which side of the top-left to bottom-right diagonal the point # is. negDiag = (point[0] + point[1] - side < 0) if posDiag: if negDiag: return 0 else: return 3 else: if negDiag: return 1 else: return 2
7bcbcb5981b30fbba0c5e625ad17af5727f0ce68
107,216
def normalize_header_name(header_name): """ Normalizes an header name to lower case, stripping all its leading and trailing white spaces. :param header_name: the header name to normalize :type header_name: str :return: the normalized header name :rtype: str """ return header_name.strip().lower() if header_name is not None else None
a438f4378831b9ce70badb801a1f52ccca774b86
107,220
def image_course_object_factory(image_id, course_id): """Cook up a fake imagecourse json object from given ids.""" courseimage = { 'image_id': image_id, 'course_id': course_id } return courseimage
7a91119ba61232f66545153c92b811f4df12095e
107,222
import re import yaml def get_yaml(note, key): """ Find a YAML property in a note. (Gets the first occurrence of a property.) You can also pass a regex pattern matching a key. """ body = note.body.split('\n') i = 0 while i<len(body): if (type(key)==str and body[i].startswith(key)) or (type(key)==re.Pattern and key.search(body[i])): #print(i) yaml_content = body[i] while i+1<len(body) and (body[i+1].startswith(' ') or body[i+1].startswith('\t')): yaml_content += '\n'+body[i+1] i += 1 #print(i) return(yaml.safe_load(yaml_content)[key]) i += 1 return(None)
6e10c61609c61b4b7cc3a07aa296570c28377a13
107,226
def non_shared_get_K(Kp: int, C: int, num_params: int) -> int: """ Inverse of non_shared_get_Kp, get back K=number of mixtures """ return Kp // (num_params * C)
37e9859fccc2703adbe8ca48caba33de5ab39f40
107,231
def _str_to_num(s): """Convert a string token to a number: either int or float.""" try: r = int(s) except ValueError: #try float r = float(s) return r
f3754f2c0184391d6b19cd523fab36c80220292f
107,235
import math import random def random_word_fix(tokens, vocab_range, mask, default_num=10): """ Masking some random tokens for Language Model task with probabilities as in the original BERT paper. :param tokens: list of int, tokenized sentence. :param vocab_range: for choosing a random word :return: (list of int, list of int), masked tokens and related labels for LM prediction """ total_len = len(tokens) mask_len = math.ceil(default_num * 0.15) mask_num = random.sample([_ for _ in range(total_len)], mask_len) output_label = [-1 for _ in range(total_len)] for mask_index in mask_num: token = tokens[mask_index] tokens[mask_index] = mask output_label[mask_index] = token return tokens, output_label
fc6450dc58d2b4bd63f8260a9e63b43ed945b1bd
107,238
def is_sorted(t): """Takes a list, returns True if list sorted in ascending order, else False""" t2 = sorted(t) return t2 == t
c4ae143807463c07c3c1c09868494d8727ba69af
107,239
from pathlib import Path def get_acme_tab_file_path() -> Path: """ Return a full path for the acme.tab file """ return Path(__file__).parent / "acme.tab"
86c8fc0a474e859edb3078ee9678b3bdbbb40473
107,242
def binary_search(data, instructions, control): """ day05 has a few binary search problems where the search direction is a specific control character. - `data` is a list to search - `instructions` is a string with control characters - `control` is two characters, indicating which direction to subdivide to continue the search """ if len(control) != 2: raise ValueError("Invalid control set") if len(instructions) > 0 and not any(item in instructions for item in control): raise ValueError("Instruction not in control set") half = int(len(data) / 2) if len(data) == 1: return data[0] if instructions[0] == control[0]: # first half return binary_search(data[:half], instructions[1:], control) elif instructions[0] == control[1]: # last half return binary_search(data[half:], instructions[1:], control)
aaa95622c53865a097c98114876152c729297e7a
107,247
def get_most_common_performance(T): """ Accuracy given by guessing the most common goal in the data. """ goals = {} for _,g in T: if g not in goals: goals[g] = 0 goals[g] +=1 return max([goals[g] for g in goals])/float(len(T))
e522ba78c2c6f920fdcb8207f7a2f67b843c55b8
107,248
def is_cand_finished(cand, max_length, eos_id): """ A candidate is finished generating if the number of decoded IDs of the candidate is at the max length or if the EOS ID has been generated """ if len(cand) >= max_length or cand.last_decoded_id == eos_id: return True else: return False
89ddbcb7578a879bd5c36435ae54a798d25c07bf
107,249
def _MaxPoolGradShape(op): """Shape function for the MaxPoolGrad op.""" orig_input_shape = op.inputs[0].get_shape().with_rank(4) return [orig_input_shape]
853fcd620a871dbda1ecf7a69a700da8638aface
107,255
def is_edge(obj, shape): """ Check if a 2d object is on the edge of the array. Parameters ---------- obj : tuple(slice, slice) Pair of slices (e.g. from scipy.ndimage.measurements.find_objects) shape : tuple(int, int) Array shape. Returns ------- b : boolean True if the object touches any edge of the array, else False. """ if obj[0].start == 0: return True if obj[1].start == 0: return True if obj[0].stop == shape[0]: return True if obj[1].stop == shape[1]: return True return False
ffd85db0c87fff9f86874d2f343c07ed07b19be1
107,262
def wait(obj, timeout=None): """ Wait until *obj* gets notified with #notify() or #notify_all(). If a timeout is specified, the function can return without the object being notified if the time runs out. Note that you can only use this function on #synchronized() objects. # Arguments obj (Synchronizable): An object that can be synchronized. timeout (number, None): The number of seconds to wait for the object to get notified before returning. If not value or the value #None is specified, the function will wait indefinetily. """ if timeout is None: return obj.synchronizable_condition.wait() else: return obj.synchronizable_condition.wait(timeout)
566c2317633856cae890432a49cd254dbd0ed98b
107,270
def evaluate(x, poly): """ Evaluate the polynomial at the value x. poly is a list of coefficients from lowest to highest. :param x: Argument at which to evaluate :param poly: The polynomial coefficients, lowest order to highest :return: The result of evaluating the polynomial at x """ if len(poly) < 1: return 0 else: return x * evaluate(x, poly[1:]) + poly[0]
4542861d0ebe38627355022834d5e23e1c5f917e
107,272
def format_user(user): """A helper method to format the user data for token generation. Args: user (dict): User dictionary Returns: Object: Formatted user dictionary """ return { 'id': user['id'], 'firstname': user['firstName'], 'lastName': user['lastName'], 'username': user['username'], 'email': user['email'], 'verified': user['verified'] }
07accb326a67f017a921da9f4049729856578c99
107,274
def wer_summary(details_by_utterance): """ Computes summary stats from the output of details_by_utterance Summary stats like WER Arguments --------- details_by_utterance : list See the output of wer_details_by_utterance Returns ------- dict Dictionary with keys: * "WER": (float) Word Error Rate. * "SER": (float) Sentence Error Rate (percentage of utterances which had at least one error). * "num_edits": (int) Total number of edits. * "num_scored_tokens": (int) Total number of tokens in scored reference utterances (a missing hypothesis might still have been scored with 'all' scoring mode). * "num_erraneous_sents": (int) Total number of utterances which had at least one error. * "num_scored_sents": (int) Total number of utterances which were scored. * "num_absent_sents": (int) Hypotheses which were not found. * "num_ref_sents": (int) Number of all reference utterances. * "insertions": (int) Total number of insertions. * "deletions": (int) Total number of deletions. * "substitutions": (int) Total number of substitutions. NOTE: Some cases lead to ambiguity over number of insertions, deletions and substitutions. We aim to replicate Kaldi compute_wer numbers. """ # Build the summary details: ins = dels = subs = 0 num_scored_tokens = ( num_scored_sents ) = num_edits = num_erraneous_sents = num_absent_sents = num_ref_sents = 0 for dets in details_by_utterance: num_ref_sents += 1 if dets["scored"]: num_scored_sents += 1 num_scored_tokens += dets["num_ref_tokens"] ins += dets["insertions"] dels += dets["deletions"] subs += dets["substitutions"] num_edits += dets["num_edits"] if dets["num_edits"] > 0: num_erraneous_sents += 1 if dets["hyp_absent"]: num_absent_sents += 1 wer_details = { "WER": 100.0 * num_edits / num_scored_tokens, "SER": 100.0 * num_erraneous_sents / num_scored_sents, "num_edits": num_edits, "num_scored_tokens": num_scored_tokens, "num_erraneous_sents": num_erraneous_sents, "num_scored_sents": num_scored_sents, "num_absent_sents": num_absent_sents, "num_ref_sents": num_ref_sents, "insertions": ins, "deletions": dels, "substitutions": subs, } return wer_details
4857f96a70f0d794d70cd08653dcfcd3676b1971
107,277
def get_table3_coeff(A_A): """その他の一次エネルギー消費量の算出に用いる表3の係数を取得 Args: A_A(float): 床面積の合計 (m2) Returns: tuple: 係数 a_SV, b_SV """ # その他の一次エネルギー消費量の算出に用いる係数 table_3 = [ (33, 38, 33), (129, -21, 579) ] if A_A < 30: index = 0 elif A_A < 120: index = 1 else: index = 2 a_SV = table_3[0][index] b_SV = table_3[1][index] return a_SV, b_SV
736f4d5579449ba84193d8a997e9fe2800b007b6
107,280
def get_number_rows(setting, ship_height, alien_height): """Calculate the row number of aliens """ avaliable_space_y = setting.WindowHeight - (3 * alien_height) - ship_height number_rows = int(avaliable_space_y / (2 * alien_height)) return number_rows
f5e3e9037ccd0ccad46b57f0e496ddb2064b0168
107,284
def make_timestamp_from_safe_file_name(file_name): """Helper: Returns a datestore timestamp string from a safe file name.""" #r'^(\d\d\d\d)-(\d\d)-(\d\d)\s(\d\d):(\d\d):(\d\d)\.(\d*?)$' # ['backup', '2008', '08', '10', 'at', '14', '10', '08', '827815'] g = file_name.split('_') timestamp = g[1] + '-' + g[2] + '-' + g[3] + ' ' + g[5] + ':' + g[6] + ':' + g[7] + '.' + g[8] return timestamp
69062464d6505929a1a6014122f8b8cf1b8de2ab
107,286
def get_amplicon_id(primer, index=3, delimiter='_', idloc=1): """ Extract the amplicon id from the name of the format: "virus_idloc__location" """ return str(primer[index]).split(delimiter)[idloc]
4bcebbc5ef1278fc099c91d67196035ed42166af
107,288
import json def success(value): """ Returns a JSON based success message around the given value. """ return json.dumps({"type": "SUCCESS", "value": value})
50cfbf41738d1ae36d641ac228020630725bc10e
107,294
def column(rows, name): """Returns a list of the data in the given column""" return [r[name] for r in rows]
15f28806543c1465b343b394a844561ff19c0738
107,300
def to_codes(s): """Return the the array of ASCII codes of all characters in the string""" return [ord(c) for c in s]
8afd3b40ed886812be959f377f8de0feb503708b
107,308
def do_citation_related_item(ref): """decide whether to create a related_item for a citation""" if ref.publication_type and ref.publication_type == "data": return bool(ref.doi or ref.accession or ref.pmid or ref.uri) return False
f5cb87a965d65dd2ff042a2fa94ad76e13033528
107,313
from datetime import datetime def now_as_local() -> datetime: """ get current time as a tz-aware datetime """ xnow = datetime.now().astimezone() return xnow
39a02c8e7be56161ed67fe8440146559d0a2ba1f
107,315
def calculate_bool_stats(df, groupby_col, bool_col='bool_of_effective_complaints', count_col='id_llamado'): """ Given a data frame with a group column, a boolean column and a value column, calculates stats by group: - Total value (bool0 + bool1) - Rate of bool1 over total for each group - Percentage of bool1 over complete df - Cumulative percentage over complete df Parameters ---------- df: pandas.DataFrame Table schema groupby_col: string Column name for which to group by bool_col: string Boolean column count_col: string Numeric column over which calculate stats Return ------- pandas.DataFrame Stats table """ groupby_df = df.groupby([groupby_col, bool_col])[count_col].nunique().to_frame().reset_index() stats_df = groupby_df.pivot(index=groupby_col, columns=bool_col, values=count_col)\ .fillna(0).reset_index().rename(index=str, columns={0: 'n_bool_0', 1: 'n_bool_1'}) stats_df.columns.name = None stats_df['n_total'] = stats_df['n_bool_1'] + stats_df['n_bool_0'] stats_df['rate'] = stats_df['n_bool_1'] / stats_df['n_total'] * 100 stats_df['percentage'] = stats_df['n_bool_1'] / stats_df['n_bool_1'].sum() * 100 stats_df = stats_df.sort_values('percentage', ascending=False) stats_df['cum_percentage'] = stats_df['percentage'].cumsum() return stats_df
4c59f3876817e2d31095989c4241a38b3b587026
107,317
def get_campaign_id(campaign_name, campaigns): """Get campaign id from campaign name. Args: campaign_name (string): Full campaign name. campaigns (dict): Campaign id as key, campaign name as value. Raises: LookupError: Campaign name is not found in campaigns dictionary. Returns: Campaign id corresponding to the campaign name provided. """ for campaign_id, name_value in campaigns.items(): if name_value == campaign_name: return campaign_id raise LookupError(f'Campaign name "{campaign_name}" not found.')
8f606423f3785f55c4b44de144be865dc2dfe830
107,320
def info_extract(symbol_info): """ This function extracts the useful information of the symbols to be written in the A2L. :param symbol_info: information of the symbols :return: extracted information """ extracted_symbol_info = [] for symbol in symbol_info: name = symbol['name'] addr = symbol['address'] size = symbol['size'] extracted_symbol_info.append([name, addr, size]) return extracted_symbol_info
f33d38ba17c6844c0d47defc827fed1cab566c84
107,322
def versioned(fname): """ Function to convert folder name into version ('1.2.13' -> (1, 2, 13)). """ try: return tuple(map(int, fname.split('.'))) except ValueError: return (0,)
da20e8b0860ff8b7b54c1d43b400850e8d6a0d98
107,323
import re def find_string_in_grouping(groups, pattern): """ Searches for a string in an array structure of strings. Performs DFS. :param groups: Strings grouped by arrays with no bound on subgroups. :param pattern: str; The key string to search for; it is a regex search. :return: list[int]; Full index of the first match. """ for (index, value) in enumerate(groups): assert isinstance(value, (list, str)) if isinstance(value, str): if re.search(pattern, value): return [index] else: submatch = find_string_in_grouping(value, pattern) if submatch: index = [index] index.extend(submatch) return index return None
b77a808e90da0ca79f87b97b4c4ef0c2fb6f9c4f
107,325
def load_configs(config_file): """ Load the training configurations :param config_file: :return: a dict with the training arguments """ arg_dict = {} files = config_file.get('data') if isinstance(files['max_features'], int): arg_dict['max_features'] = files['max_features'] else: raise ValueError('max_features must be an int') if isinstance(files['max_labels'], int): arg_dict['max_labels'] = files['max_labels'] else: raise ValueError('max_labels must be an int') parameters = config_file.get('parameters') if isinstance(parameters['model_type'], str): arg_dict['model_type'] = parameters['model_type'] else: raise ValueError('model_type must be a str') if isinstance(parameters['epochs'], int): arg_dict['epochs'] = parameters['epochs'] else: raise ValueError('epochs must be an integer') if isinstance(parameters['num_topics'], list): arg_dict['num_topics'] = parameters['num_topics'] else: raise ValueError('num_topics must be list of integers') if isinstance(parameters['runs'], int): arg_dict['runs'] = parameters['runs'] else: raise ValueError('runs must be and integer') bert_model = config_file.get('bert_model') if isinstance(bert_model['training'], str): arg_dict['training'] = bert_model['training'] else: raise ValueError('training must be an str') if isinstance(bert_model['testing'], str): arg_dict['testing'] = bert_model['testing'] else: raise ValueError('testing must be an str') input = config_file.get('input') if isinstance(input['dataset_path'], str): arg_dict['dataset_path'] = input['dataset_path'] else: raise ValueError('dataset_path must be an str') output = config_file.get('output') if isinstance(output['save_path'], str): arg_dict['save_path'] = output['save_path'] else: raise ValueError('save_path must be an str') if isinstance(output['model_output'], str): arg_dict['model_output'] = output['model_output'] else: raise ValueError('model_output must be an str') return arg_dict
60368c009d7de538165386e92834f48a7a70c2e1
107,329
def format_aprs_frame(frame): """ Formats APRS frame-as-dict into APRS frame-as-string. :param frame: APRS frame-as-dict :type frame: dict :return: APRS frame-as-string. :rtype: str """ formatted_frame = '>'.join([frame['source'], frame['destination']]) if frame['path']: formatted_frame = ','.join([formatted_frame, frame['path']]) formatted_frame = ':'.join([formatted_frame, frame['text']]) return formatted_frame
92740c4b5fd2cc3d5906c427688474831677c250
107,331
def array_offset_geo(full_geo, x_offset, y_offset): """Return sub_geo that is offset from full_geo Args: full_geo (): gdal.geotransform to create the offset geotransform x_offset (): number of cells to move in x direction y_offset (): number of cells to move in y direction Returns: gdal.Geotransform offset by the spefiied number of x/y cells """ sub_geo = list(full_geo) sub_geo[0] += x_offset * sub_geo[1] sub_geo[3] += y_offset * sub_geo[5] return tuple(sub_geo)
aa02bdfb7c293e76acd5445bf46688499b8f18ce
107,333
import struct def _readFloat(data): """Tries to interpret the next 4 bytes of the data as a 32-bit float. """ if(len(data)<4): print("Error: too few bytes for float", data, len(data)) rest = data float = 0 else: float = struct.unpack(">f", data[0:4])[0] rest = data[4:] return (float, rest)
a5749392ccca1fc191cf05c6a545fff3600a2a23
107,336
def map_layer_output_name_with_index(model_layers): """ Each layer has a unique output name. This function maps this unique name with the layer index so that we can easily access each layer with index instead of name. :param model_layers: List of layers of the model :return: a dictionary that contains mapping of all layer names to their index """ output_name_to_index = {} total_layer_count = len(model_layers) for i in range(total_layer_count): output_name_to_index[model_layers[i].output.name] = i return output_name_to_index
b312401ef24f459d10942c4f7f4c85813de9183e
107,339
def is_full_path(file): """ Return True if path is absolute """ if file.startswith("\\") or file.startswith("/"): return True try: if file[1:3] == ":\\": return True except: pass return False
a4e70a679880e75d9d982b244d6a196c017e546e
107,340
def scpd_filter(scan, key=None): """ Filter scpd_scan result by key substring in serviceType """ if not key: return scan return [i for i in scan if key in i[0]]
2952f0786ace1eb2d0aeea9dc079b7267358abda
107,345
def parse_txt(file): """ Parse text files""" with open(file) as file: contents = file.read() return contents
109949cb817c6d04646c38e0439b199e58a41cea
107,346
def flatten_list(items, seqtypes=(list, tuple), in_place=True): """Flatten an irregular sequence. Works generally but may be slower than it could be if you can make assumptions about your list. `Source`__ __ https://stackoverflow.com/a/10824086 Parameters ---------- items : iterable The irregular sequence to flatten. seqtypes : iterable of types (optional) Types to flatten. Default is (list, tuple). in_place : boolean (optional) Toggle in_place flattening. Default is True. Returns ------- list Flattened list. Examples -------- >>> l = [[[1, 2, 3], [4, 5]], 6] >>> wt.kit.flatten_list(l) [1, 2, 3, 4, 5, 6] """ if not in_place: items = items[:] for i, _ in enumerate(items): while i < len(items) and isinstance(items[i], seqtypes): items[i : i + 1] = items[i] return items
91bd7a903a44edd5d1ccc04abfa7cc86e6f0cf29
107,352
def isValid(text): """ Returns True if the input is related to music. Arguments: text -- user-input, typically transcribed speech """ return any(word in text for word in [u"听歌", u"音乐", u"播放", u"我想听", u"唱歌", u"唱首歌", u"歌单", u"榜单"])
c988c61990d394f965baac0b6f1b0b297c7e20c4
107,354
def remove_smallest(numbers): """Remove the smallest value from a list.""" if numbers == []: return numbers smallest = numbers[0] index = 0 for idx, number in enumerate(numbers): if number < smallest: smallest = number index = idx numbers.pop(index) return numbers
743750b232ccd1a4ed0a7dddf50b15ce22f205b7
107,357
import sympy def order_from_anf(anf): """ Counts the minimum order of a feedback shift register. This is accomplished by stringifying the symbols in the expression and finding the greatest index. Parameters ---------- anf : algebraic normal form A SymPy expression describing the algebraic normal form of a feedback shift register. Must be using integer symbols named `x_k`, where `k=0, 1, ...`. Returns ------- n : integer The minimum order of `anf`. Examples -------- >>> x0, x1, x2 = sympy.symbols('x_0 x_1 x_2', integer=True) >>> order_from_anf(x0 + x1) 2 >>> order_from_anf(x0 + x2) 3 """ n = 0 for arg in anf.args: if isinstance(arg, sympy.Symbol): m = int(str(arg).split('_')[-1]) + 1 n = m if m > n else n else: m = order_from_anf(arg) n = m if m > n else n return n
8d8ce2baa17421b8a2eb0da4cb1d5ee80ca5d468
107,361
def input_with_default(prompt, default=None): """ Prompts for a text answer with an optional default choice. :param prompt: question to be displayed to user :param default: default choice :return: user-provided answer or None, if default not provided. :rtype: Union[str,None] """ value = input(f'{prompt} ({default}):') if value: return str(value) return str(default)
58f37a7572d999b4c3045d3991539e03b56e4a76
107,362
def translate_method(input_method): """Removal of whitespace/miscellaneous characters to smooth out method names. Args: input_method (str): The name of the method to adjust. Returns: str: The cleaned method name. """ # Get the lowercase version sel_method = input_method.lower() # Remove some common characters transtable = str.maketrans({ "-": "", "_": "", " ": "" }) return sel_method.translate(transtable)
19163fe91912fbf75e1353f60bac8d98df01e34b
107,364
def local_options(webdriver): """ This method adds options to webriver and uses it as chrome_options working in local environment. It also returns list with chrome_options and driver_path as arguments. """ chrome_options = webdriver.ChromeOptions() driver_path = "/Users/micha/Documents/GitHub/fitness_app/chromedriver" chrome_options.add_argument("--window-size=1280x1696") chrome_options.add_argument("--disable-dev-shm-usage") arg_list = [chrome_options, driver_path] return arg_list
d1dfbea766838ee0491e03ed21187d5bc3c186dc
107,365
def is_akamai_domain(domain): """ Is the provided domain within akamai.net? """ return domain.endswith(".akamai.net")
d68af4119614f470738ead3b15db085eaeeda359
107,370
def fill_config_template(config_temp, model_info): """ Write the model infomation to the model config file. """ # Replace template values config_content = config_temp config_content = config_content.replace("$MODELNAME$", model_info.name) config_content = config_content.replace("$AUTHORNAME$", model_info.author) config_content = config_content.replace("$EMAILADDRESS$", model_info.email) config_content = config_content.replace("$DESCRIPTION$", model_info.description) return config_content
251a7922104c85ed777e1b0e5474aa1c5c43bd78
107,371
def from_paletton_hue_to_rgbvs(hue, color_wheel): """ generate rgbsv tuples from the given 0 <= hue <=359 >>> from .constants import COLOR_WHEEL >>> for a in range(240, 256): ... print(from_paletton_hue_to_rgbvs(a, COLOR_WHEEL)) (0, 51, 204, 1.0, 0.8) (2, 49, 202, 1.0, 0.79) (3, 48, 201, 1.0, 0.79) (5, 46, 199, 1.0, 0.78) (7, 44, 197, 1.0, 0.77) (8, 42, 195, 1.0, 0.77) (10, 41, 194, 1.0, 0.76) (12, 39, 192, 1.0, 0.75) (13, 37, 190, 1.0, 0.75) (15, 35, 188, 1.0, 0.74) (17, 34, 187, 1.0, 0.73) (18, 32, 185, 1.0, 0.73) (20, 30, 183, 1.0, 0.72) (22, 28, 181, 1.0, 0.71) (23, 27, 180, 1.0, 0.71) (25, 25, 178, 1.0, 0.7) """ def avrg(a, b, k): return a + round((b - a) * k) hue = round(hue % 360) d = hue % 15 k = d / 15. derivative1 = hue - d derivative2 = (derivative1 + 15) % 360 colorset1 = color_wheel[derivative1] colorset2 = color_wheel[derivative2] rgbv = tuple(avrg(c1, c2, k) for c1, c2 in zip(colorset1, colorset2)) return rgbv[:-1] + (1., rgbv[-1]/100)
69dd2925fb89712603c19a4fd44c7469b03358c6
107,372
def split_image(image, size): """将图像按网格划分成多个小图像 :param image: PIL Image 对象 :param size: size 网格的行数和列数 :return: 小图像列表 """ m, n = size w, h = int(image.size[0] / n), int(image.size[1] / m) imgs = [] # 先按行再按列裁剪出 m * n 个小图像 for j in range(m): for i in range(n): # 坐标原点在图像左上角 imgs.append(image.crop((i * w, j * h, (i + 1) * w, (j + 1) * h))) return imgs
ca36bba7e050b4f8d645001173a2fb8debdcc908
107,375
import hashlib def hash_md5(file): """ 返回文件file的md5值 :param file: 文件 :return: md5值 """ hasher = hashlib.md5() # chunks()是方法,之前把括号漏了 for chunk in file.chunks(): hasher.update(chunk) return hasher.hexdigest()
4475e89adb963c7298eb8ab9066421c082d0e7db
107,377
def twosComp ( intVal, numBits ): """ This function will compute the two's compliment for the given unsigned integer value and bit length. Parameters ---------- intVal: An unsigned integer value numBits: Bit length of the input integer (register) Returns ------- signedIntVal: Signed two's compliment integer """ signedIntVal = intVal if ( signedIntVal & ( 1 << ( numBits - 1 ) ) ) != 0: signedIntVal = signedIntVal - ( 1 << numBits ) return signedIntVal
ec4b32b9ce0e82b9e1d8839d75e4e6eadb078889
107,380
def my_test_func(*args, **kwargs): """ Used as a fake filter/test function """ return True
153a2f1b6fdf6cfdf2b5129c601423f57c99da7f
107,383
import re def check_community(name) -> bool: """Perform basic validation on community name""" if (name and isinstance(name, str) and len(name) > 5 and name[:5] == 'hive-' and name[5] in ['1', '2', '3'] and re.match(r'^hive-[123]\d{4,6}$', name)): return True return False
f9f936a0a85299cef6decfd61d84f0f5ef0cf23b
107,384
import hashlib def derivate_key(shared_secret, *key_derivation_parameters): """ Key derivation function of IEEE P1363 :arg shared_secret: shared secret in string format :arg key_derivation_parameters: list of possible key derivation parameters in string format :type shared_secret: str :type key_derivation_parameters: list[str] :rtype : str """ value_to_hash = shared_secret for arg in key_derivation_parameters: value_to_hash += arg h = hashlib.sha512() h.update(value_to_hash) return h.hexdigest()
7b721db3759c4f3a30f8c069d885cb4f4f77798c
107,389
def get_username() -> str: """Asks the user for a username and returns it""" while True: username: str = input("Enter a username: ") if username == "": print("The username cannot be blank") continue if len(username) > 16: print("The username must be 16 characters or less") continue if not username.strip("_").isalnum(): print( "The username must be made up of only alpha numeric characters and underscores" ) continue break return username
e6284257a28d68a97cdcff5dabddebe39272ee60
107,390
def read_bed(bed): """Read bed file.""" bed_data = {} f = open(bed, "r") for line in f: bed_info = line[:-1].split("\t") chrom = bed_info[0] chromStart = int(bed_info[1]) chromEnd = int(bed_info[2]) name = bed_info[3] blockSizes = [int(x) for x in bed_info[10].split(',') if x != ''] bed_data[name] = (chrom, chromStart, chromEnd, blockSizes) f.close() return bed_data
20087910a4b404bd4d7a6500371761a64ddc4a0a
107,391
from typing import List def _parse_author_affil_back_propagate(author_list: List[List[str]], back_prop: int) -> List[List[str]]: """Back propagate author affiliation. Take the author list structure generated by parse_author_affil_split(..) and propagate affiliation information backwards to preceeding author entries where none was give. Stop before entry $back_prop to avoid adding affiliation information to collaboration names. given, eg: a.b.first, c.d.second (affil) implies a.b.first (affil), c.d.second (affil) and in more complex cases: a.b.first, c.d.second (1), e.f.third, g.h.forth (2,3) implies a.b.first (1), c.d.second (1), e.f.third (2,3), g.h.forth (2,3) """ last_affil: List[str] = [] for x in range(len(author_list) - 1, max(back_prop - 1, -1), -1): author_entry = author_list[x] if len(author_entry) > 3: # author has affiliation,store last_affil = author_entry elif last_affil: # author doesn't have affil but later one did => copy author_entry.extend(last_affil[3:]) return author_list
445d326d66f3d2ec65ef6d526eecefba9d1f2b62
107,392
from collections import defaultdict def gen_frequencies(count_list): """ Take a list of residue contact counts (see output of `gen_counts`) and compute total counts and frequencies. Example: clist = [ (4, {("A1", "R4"): 4, ("A1", "C5"): 3}), # First simulation has 4 frames and two contacts (3, {("A1", "R4"): 2}) # Second simulation has 3 frames and one contact ] gen_frequencies(clist) # Returns: (7, {("A1", "R4"): (6, 0.857), ("A1", "C5"): (3, 0.429)}) Parameters ---------- count_list: list of (int, dict of (str, str): int) List with individual frame counts and dictionaries mapping residue pairs to frame-counts Return ------ (int, dict of (str, str): (int, float)) Total framecount and mapping of residue ID pairs to the number of frames in which they contact and the frequency """ # print("gen_frequencies") # print(count_list) rescontact_count = defaultdict(int) total_frames = 0 for frames, rescount_dict in count_list: total_frames += frames for (res1, res2), count in rescount_dict.items(): rescontact_count[(res1, res2)] += count respair_freqs = {respair: (count, float(count) / total_frames) for respair, count in rescontact_count.items()} return total_frames, respair_freqs
18689547935223206fbbfb5ee0668df5f46c5189
107,396
def color_scale(x): """prediction binning method bins river predictions into discrete categories for color coding Args: x: (float) quantitative prediction rating Returns: str for associated bin """ if x == -1.: return 'unknown' elif 0 <= x < .66: return 'optimal' elif .66 <= x < .99: return 'fair' else: return 'not_recommended'
c0765d8ac2d19fcfd265d96213ba6f3339ec3aba
107,398
from pathlib import Path def wikipedia_language(filepath): """Parse the filename to find the language code of a scraped wikipedia file The function assumes a path such as: /some/folder/xyz.wikipedia.org/some/file This function returns the language code xyz (2 or 3 letters). Raise `ValueError` if filepath has no wikipedia hostname component. """ for part in Path(filepath).parts: if part.endswith(".wikipedia.org"): return part.split(".")[0] raise ValueError(f"{filepath} has no Wikipedia language information")
7e06a4553658df04eed3c04825a2ecfd33a73465
107,404
def str_to_type(x): """Try to convert string to Python types. There are probably more general ways to do this, but ints, floats, and bools are good enough for our purposes. >>> str_to_type("3.0") 3.0 >> str_to_type("3") 3 >> str_to_type("True") True >> str_to_type("false") False >> str_to_type("a_string") "a_string" """ try: if str(int(x)) == x: return int(x) except ValueError: pass try: return float(x) except ValueError: pass if x.lower() == "true": return True if x.lower() == "false": return False return x
256331bea279bbbe1c85049e71aae862dd74f78d
107,406
from typing import Tuple def line_nos(txt: str, offsets: Tuple[int, int]) -> Tuple[int, int]: """Determines lines to include based on the raw text and offsets. Args: txt: Raw file contents. offsets: Tuple containing (top-offset, bottom-offset) Returns: Tuple containing (start-line, end-line) """ _start, _end = offsets depth = len(txt.split('\n')) start_line = 0 + _start if _start else 1 end_line = depth - _end return start_line, end_line
0f0ac5f612525866ae0440089714f51e96e8b4e8
107,413
def modify_handlers(handlers): """Modifies the set of web request handlers. Args: handlers: List of (path_regex, webapp.RequestHandler) instances that are configured for this application. Returns: Modified list of handlers, with some possibly removed and others added. """ return handlers
b29cb63137b7c99aee8e40a21061f7b9defff4d0
107,414
def fixedToFloat(value, precisionBits): """Converts a fixed-point number to a float given the number of precision bits. Args: value (int): Number in fixed-point format. precisionBits (int): Number of precision bits. Returns: Floating point value. Examples:: >>> import math >>> f = fixedToFloat(-10139, precisionBits=14) >>> math.isclose(f, -0.61883544921875) True """ return value / (1 << precisionBits)
f85cbd137fd3951b05437131748830820574bcff
107,418
def parseEnsemblLabels(intree): """ Function to convert a Dendropy tree obtained from Ensembl (in NHX) format. Returns a new tree (leaves the input tree unchanged) :param intree: a tree as Dendropy object """ t = intree.clone(depth=1) for n in t.internal_nodes(): if n.annotations['D'].value == 'Y' or n.annotations['DD'].value == 'Y': n.label = 'duplication' else: n.label = 'speciation' t.seed_node.label = None # if the tree is unrooted with root node of degree 2, turn into trifurcation # while keeping the correct state cn = t.seed_node.child_nodes() if not t.is_rooted and len(cn)==2: i = 1 if cn[0].is_leaf() else 0 tmp = cn[i].label cn[i].edge.collapse() t.seed_node.label = tmp return(t)
7f91b1e5cdc5dce018a1b5b8594055dbbad6bab0
107,419
def append_useflags(useflags): """Used to append a set of useflags to existing useflags. Useflags that shadow prior use flags will cause the prior flag to be removed. (e.g. appending '-foo' to 'foo' will cause 'foo' to be removed) Usage: new_config = base_config.derive(useflags=append_useflags(['foo', '-bar']) Args: useflags: List of string useflags to append. """ assert isinstance(useflags, (list, set)) shadowed_useflags = {'-' + flag for flag in useflags if not flag.startswith('-')} shadowed_useflags.update({flag[1:] for flag in useflags if flag.startswith('-')}) def handler(old_useflags): new_useflags = set(old_useflags or []) new_useflags.update(useflags) new_useflags.difference_update(shadowed_useflags) return sorted(list(new_useflags)) return handler
d947d6420807fff09d9ed9ea0e318a70f452dd22
107,427
from random import expovariate def generate_random_times(lam = 1.0, a = 2.0, tmax = 1000.0): """ Generate two Poisson processes with rates lam and a*lam Parameters ---------- lam : float (optional) Rate of Poisson process 1. a : float (optional) Rate of second Poisson process will be a*lam. tmax : float (optional) Maximum time of the simulated processes. Returns ------- times : dictionary of 2 lists of times Dictionary with the lists of times of events for each process. """ times = {1:[], 2:[]} ids = times.keys() lam = 1.0 l = [lam, a*lam] for idi in ids: lm = l[idi - 1] t = expovariate(lm) while t < tmax: times[idi].append(t) t += expovariate(lm) return times
417fa50839c01ad2c55374c70ee8b70f3b30da6e
107,428
def safemod(a, b): """Modulus of a and b, but return NaN or infinity on division by zero The behavior is equivalent to Numpy with the 'ignore' setting. """ try: return a % b except ZeroDivisionError: return float('nan')
e6a1ba58bfbc5545231179a7ff1ec31631cf35e8
107,441
import torch import math def rand_sphere(size): """ Randomly sample on the sphere uniformly See http://corysimon.github.io/articles/uniformdistn-on-sphere/ Our convention is 0 < theta < pi ; -pi < phi < pi :param size: torch.size or list of ints, size of returned tensors :return: theta, phi """ theta = torch.acos(1 - 2 * torch.rand(size)) phi = 2 * math.pi * torch.rand(size) - math.pi return theta, phi
fe8a54da47c13b736e5b46839969b9899bbc6769
107,442
import hashlib def sha256fromfile(abs_file_path: str) -> str: """ sha256fromfile will create a sha256 digest from the file at given path. To preserve memory and speed up the digest, the file is digested with the help of a memoryview and hashlib.sha256().update. :raises Parameters ---------- abs_file_path : str The absolute path to the file to digest Returns ------- str The cast or unchanged argument Raises ------ FileNotFoundError If abs_file_path is not a file """ sha = hashlib.sha256() b = bytearray(128 * 1024) mv = memoryview(b) with open(abs_file_path, 'rb', buffering=0) as f: for n in iter(lambda: f.readinto(mv), 0): sha.update(mv[:n]) return sha.hexdigest()
de0c60666cd30462211ed888fa541ca88273cedc
107,443
def task_4_build_auth_headers_for_the_request( amzdate, payload_hash, algorithm, credential_scope, signed_headers, signature, access_key, security_token): """ ************* TASK 4: ADD SIGNING INFORMATION TO THE REQUEST *********** The signing information can be either in a query string value or in a header named Authorization. This function shows how to use the header. It returns a headers dict with all the necessary signing headers. """ # Create authorization header and add to request headers authorization_header = ( algorithm + ' ' + 'Credential=' + access_key + '/' + credential_scope + ', ' + 'SignedHeaders=' + signed_headers + ', ' + 'Signature=' + signature ) # The request can include any headers, but MUST include "host", # "x-amz-date", and (for this scenario) "Authorization". "host" and # "x-amz-date" must be included in the canonical_headers and # signed_headers, as noted earlier. Order here is not significant. # Python note: The 'host' header is added automatically by the Python # 'requests' library. return { 'Authorization': authorization_header, 'x-amz-date': amzdate, 'x-amz-security-token': security_token, 'x-amz-content-sha256': payload_hash }
3d84aae21493c44960c0fa75ab488522981fb78b
107,445
def get_game_type_from_season_type(game): """ Determines game type (as used in storage directories) from season type (e.g RS or PO) as stored in game definition. """ if game['season_type'] == 'RS': return 1 elif game['season_type'] == 'PO': return 3 elif game['season_type'] == 'MSC': return 4 else: return 0
c0de5585d3127ce8baaa6404ab8b3ee252154f1c
107,451
def CheckChange(input_api, output_api): """Checks that changes to client_variations.proto are mirrored.""" has_proto_update = False has_parser_update = False cwd = input_api.PresubmitLocalPath() for path in input_api.AbsoluteLocalPaths(): if not path.startswith(cwd): continue name = input_api.os_path.relpath(path, cwd) if name == 'client_variations.proto': has_proto_update = True elif name == 'devtools/client_variations_parser.js': has_parser_update = True results = [] if has_proto_update and not has_parser_update: results.append(output_api.PresubmitPromptWarning( 'client_variations.proto was changed. Does the JS parser at ' 'devtools/client_variations_parser.js need to be updated as well?')) return results
712c33fbb79b0a4d453cc51e6497ddac21df12b3
107,452
import re def fix_whitespace(code: str) -> str: """Perform basic whitespace post-processing. This corrects a couple of formatting issues that Jinja templates may struggle with (particularly blank line count, which is tough to get consistently right when ``if`` or ``for`` are involved). Args: code (str): A string of code to be formatted. Returns str: Formatted code. """ # Remove trailing whitespace from any line. code = re.sub(r'[ ]+\n', '\n', code) # Ensure at most two blank lines before top level definitions. code = re.sub(r'\s+\n\s*\n\s*\n(class|def|@|#|_)', r'\n\n\n\1', code) # Ensure at most one line before nested definitions. code = re.sub(r'\s+\n\s*\n(( )+)(\w|_|@|#)', r'\n\n\1\3', code) # All files shall end in one and exactly one line break. return f'{code.rstrip()}\n'
36fc6844e15892378a17bd9103e619d860f5f359
107,456
def _get_localized_fn(path, root_dir): """ Return absolute `path` relative to `root_dir`. When `path` == ``/home/xex/somefile.txt`` and `root_dir` == ``/home``, returned path will be ``/xex/somefile.txt``. Args: path (str): Absolute path beginning in `root_dir`. root_dir (str): Absolute path containing `path` argument. Returns: str: Local `path` when `root_dir` is considered as root of FS. """ local_fn = path if path.startswith(root_dir): local_fn = path.replace(root_dir, "", 1) if not local_fn.startswith("/"): return "/" + local_fn return local_fn
d3869d3882b8df62e48ac2c0296663f54df8680c
107,468
import binascii def decode_pubkey(serialized_pubkey, encoding_format='hex'): """Decodes a provided public key into the requested format Args: serialized_pubkey (str): The encoded public key encoding_format (str): The format of the provided encoded public key. Must be 'hex'. Returns: bytes: The native bytes representation of the public key """ if encoding_format == 'hex': serialized_pubkey = binascii.unhexlify(serialized_pubkey) else: raise ValueError("Unrecognized pubkey encoding format") return serialized_pubkey
034fb8064039a8aa0a980fa570ec882914bb718b
107,469
def build_oxo_payload(id_list: list, target_list: list, distance: int) -> dict: """ Build a dict containing the payload with which to make a POST request to OxO for finding xrefs for IDs in provided id_list, with the constraints provided in target_list and distance. :param id_list: List of IDs with which to find xrefs using OxO :param target_list: List of ontology datasources to include :param distance: Number of steps to take through xrefs to find mappings :return: dict containing payload to be used in POST request with OxO """ payload = {} payload["ids"] = id_list payload["mappingTarget"] = target_list payload["distance"] = distance return payload
18284e87c82e136c22c652e4b130618836dd9654
107,474
import torch def get_intersection(interval_1, interval_2): """Determine the intersection of two intervals Args interval_1 [*shape, 2] interval_2 [*shape, 2] Returns intersection [*shape, 2]: this will be nan if there is no intersection """ # Extract left_1, right_1 = interval_1[..., 0], interval_1[..., 1] left_2, right_2 = interval_2[..., 0], interval_2[..., 1] shape = interval_1.shape[:-1] # Compute the intersection for cases in which there is an intersection intersection = torch.stack([torch.max(left_1, left_2), torch.min(right_1, right_2)], dim=-1) # Fill with nan if there isn't an interesection is_there_an_intersection = (right_1 < left_2) | (left_1 < right_2) intersection[~is_there_an_intersection[..., None].expand(*[*shape, 2])] = float("nan") return intersection
213d047003e5db0fe5aa115a309d5ace178c8fb2
107,480
import pickle def _load_pkl(from_file): """Load the Python object from a file with Pickle. """ with open(from_file, "rb") as f: return pickle.load(f)
b6a49a55d5edec141310fd7d74c3339c8600c858
107,481
import time def date_as_string(value): """ Convert a date field into a string """ return time.strftime('%Y/%m/%d:%H:%M:%S', value)
c85b0c315ca29bf5569cc3385ba7901481a83225
107,484
def list_to_str(lst): """ convert list of number of a string with space """ return ' '.join([str(e) for e in lst])
fbc69eb913b69032d36e4c4d0cafc67b204317bf
107,487
def get_fastq_read_ids(ref_path): """Extracts the read ids from a fastq file.""" read_ids = set() with open(ref_path, 'r') as ref: for line in ref: if line.startswith('@'): # i.e if line is header # split the line on spaces, take the first element, remove @ read_id = line.split(' ')[0].replace('@', '') read_ids.add(read_id) return read_ids
85e4564c6df617e22dd0b76519cdbad57aefced7
107,492
def find_tile(player, name): """ Find an adjacent tile that is connected to the current tile by name :param text_game_maker.player.player.Player player: player object :param str name: name of adjacent tile to search for :return: adjacent matching tile. If no matching tiles are found, None is\ returned :rtype: text_game_maker.tile.tile.Tile """ for tile in player.current.iterate_directions(): if tile and (name in tile.name): return tile return None
970ead769e5bb5388b406352c92bdc1b46f69e05
107,498
def dependency_graph(page, *provided_dependencies): """Creates a dependency graph of the form {page: set(page.dependencies[0:i]), page.dependencies[0]: set(page.dependencies[0][0:j] ... page.dependencies[i][j][...][n]: set(page.dependencies[i][j][...][n][0:z]), ...} Any optional provided_dependencies will be included as if they were dependencies, without affecting the value of each keyed page. """ graph = {} dependencies = set(getattr(page, 'dependencies', [])) # Some HasCreate's can claim generic Base's w/o dependencies graph[page] = dependencies for dependency in dependencies | set(provided_dependencies): graph.update(dependency_graph(dependency)) return graph
0efd8e1c231ec320d511ff731eed203490228242
107,499
def Load(value, u=None): """Implement `Load`.""" return value
da766be6ae787470de936b25db43cf6d3bcffc2b
107,509
def check_structure_acyclic(structure): """ Returns whether the directed graph g has a cycle. g must be represented as a dictionary mapping vertices to iterables of neighbouring vertices. For example: >>> check_structure_acyclic({1: (2,), 2: (3,), 3: (1,)}) True >>> check_structure_acyclic({1: (2,), 2: (3,), 3: (4,)}) False """ path = set() visited = set() def visit(vertex): if vertex in visited: return False visited.add(vertex) path.add(vertex) for neighbour in structure.get(vertex, ()): if neighbour in path or visit(neighbour): return True path.remove(vertex) return False return not any(visit(v) for v in structure)
1bfa384f4a9bb4fdd8c3aa00eaae8dffb5e50352
107,510
import pipes def escape( *args ): """ Returns the arguments joined as a string. Each argument has any shell special characters escaped. """ cmd = '' for s in args: if cmd: cmd += ' ' if not s: cmd += '""' else: cmd += pipes.quote(s) return cmd
9524c95255fc1644ed0ad035074d5d68b4b5d9d9
107,518
def mass_transfer_coefficient_HNS(Sh, Dc, diameter): """ return the mass transfer coefficent [m/s] source : (HNS-MS) Parameters ---------- Sh : Average sherwood number [] Dc : Diffusion coefficient at 25 °C[m²/s] diameter : Diameter of the slick or the droplet [m] """ return (Sh*Dc) / diameter
dc2a3fa91bf9d9aacb7fef652e4753fe5af6943b
107,520
import glob def glob_files(globname): """Given a list of strings to glob certain files, return a list of all the files We check that some files are alway matched to reduce the risk of input errors """ file_list = [] if len(globname) > 0: for i in range(len(globname)): matching_files = glob.glob(globname[i]) if len(matching_files) == 0: msg = 'Could not match this file: ', globname[i] raise Exception(msg) file_list = file_list + \ glob.glob(globname[i]) return file_list
5da4ca086d3c51bc76314b007d7ae3de8e4b459d
107,522
import click def ask_question(question, input_type, default=None, hide_input=False): """Presents the user with a prompt with a default return value and a type. Args: question (str): the text that the user will be prompted. input_type (type): the type of the input data. default (object): default value for the question, optional. hide_input (bool): whether the input should be hidden, eg. when asking for a password. Returns: object: The value (type of input_type) that is ready by the user. """ if default: return click.prompt( question, type=input_type, default=default, hide_input=hide_input) return click.prompt(question, type=input_type, hide_input=hide_input)
84f82bce3d6d8276462ba880272b4c544a529e3d
107,526
def _successful(response): """ Returns whether a response was considered successful. If no body is available or the 'meta' dict in the response envelope doesn't contain a 'code' value, checks the HTTP response code instead. :param requests.Response response: a response object :returns: (boolean) True if successful """ code = response.status_code try: code = response.json()['meta']['code'] except Exception: pass return code in (200, 201, 202)
e086073f6dc43b7df4e4211250f649f1c86d58cf
107,531
def inverse_lerp(a: float, b: float, c: float) -> float: """ Inverse function to `lerp`. Calculates t such that lerp(a, b, t) = c. (will divide by zero if a = b) """ return (c - a) / (b - a)
6f164c9e14b40c8acb5dbedc90b4a65e52614080
107,532
def build_edge_topic_prefix(device_id: str, module_id: str) -> str: """ Helper function to build the prefix that is common to all topics. :param str device_id: The device_id for the device or module. :param str module_id: (optional) The module_id for the module. Set to `None` if build a prefix for a device. :return: The topic prefix, including the trailing slash (`/`) """ if module_id: return "$iothub/{}/{}/".format(device_id, module_id) else: return "$iothub/{}/".format(device_id)
05fe493967b1064dc21c0f4280297a35102f55a1
107,534
def standard_error(sample_size, successes): """ Calculates the standard error of a sample proportion. Formula: σp = sqrt [ p(1 - p) / n ]. with: p = proportion of successes in sample (successes / sample size) :param sample_size: the size of the sample :param successes: the number of successes on the given sample. :return: the standard error on the sample proportion -> σp """ p = successes / sample_size return (p * (1 - p) / sample_size) ** 0.5
77cbde0689dec5e2432043362337b925c5ea7296
107,536
def pk_equals(first, second): """ Helper function to check if the ``pk`` attributes of two models are equal. """ return first.pk == second.pk
1596b87073e9dbb14fee1dba921f328704c8e6e2
107,537
def curb_gpred_spans(dmrs_xml, max_tokens=3): """ Remove general predicate node token alignments if a general predicate node spans more than max_tokens. This prevents general predicate nodes from dominating rule extraction. :param dmrs_xml: Input DMRS XML :param max_tokens: Maximum number of allowed tokens before the entire general predicate node span is removed :return: Modified DMRS """ for entity in dmrs_xml: if entity.tag != 'node': continue # Determine if the node is a general predicate gpred_node = False for node_info in entity: if node_info.tag == 'gpred': gpred_node = True break if not gpred_node: continue # Remove the alignment if the number of tokens exceeds the specified limit tokalign = entity.attrib.get('tokalign') gpred_token_num = len(tokalign.split(' ')) if gpred_token_num > max_tokens: entity.attrib['tokalign'] = '' return dmrs_xml
45d80423a0604ca503e8f2ae730b9b5ca0c3e1e1
107,538