content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import operator def sort_similarities(total_similarities): """ Sort the similirities """ return sorted(total_similarities.iteritems(), key=operator.itemgetter(1))
fe4df2e71a3f97b7f041a93b62932d1604d8a7b6
16,366
def probs2marginals(combined_qc, probs): """ Given a probability distribution corresponding to a given combined circuit (made up of two equal sized circuits combined in parallel), this function returns the two marginals for each subcircuit. """ num_qubits = int(combined_qc.num_qubits/2) marginals = [{},{}] for string in probs: substrings = [string[0:num_qubits], string[num_qubits::]] for j,substring in enumerate(substrings): if substring in marginals[j]: marginals[j][substring] += probs[string] else: marginals[j][substring] = probs[string] return marginals
0e14a69e47070c704e8b25cfb29e532fe87f6fc6
16,367
def SplitCoordinates(xyz_str): """ Function to split coord. str in float list xyz_str: (str) a list input of "x,y,z" coordinates """ xyz = [] for i in range (len(xyz_str)): x_,y_,z_ = xyz_str[i].split(",") xyz.append([float(x_),float(y_),float(z_)]) return(xyz)
808a3c73783d8f36a2ecd70d128fca49fc7f7e1e
16,368
def build_biosample_params(qparams, biosample_id=None): """Fills the `inidividual` part with the request data""" biosample_params = {} if biosample_id is not None: biosample_params['id'] = biosample_id return biosample_params
ef85dcfa6f28d85a1263f860e4e77a7e795cc89f
16,369
import re def judge_cancel(question): """ 判断“取消”属性名 :param question: :return: """ neg_word, pos_word = ['不要', '不想', '取消', '不需要', '不用'], ['办理', '需要', '开通'] sentence_blocks = re.split(',|\.|:|,|。', question) for block in sentence_blocks: for n in neg_word: if n in block: for p in pos_word: if p in block: return True return False
4f4a9bf3e3a6c927558ed1f1b2f5b6f84a831051
16,371
def vectorOFRightDirection(OFvect, FOE): """Returns True if OF vector is pointing away from the FOE, False otherwise.""" # Get points of optical flow vector a1, b1, c1, d1 = OFvect # If left side of FOE if a1 <= FOE[0]: if c1 <= a1: return False # If right side of FOE else: if c1 >= a1: return False return True
febe984f172ffbcf6557bbad0829d52e83c94dd7
16,372
import math def create_lp_util_map( c_wavelength, connection_pairs, demand_matrix, gb_links, gb_wavelengths_edge_map, gb_paths, gb_flows): """ extracts the utilization map from gurobi vars """ util_map = dict() for i, j in gb_links: n_wavelengths = math.ceil(gb_wavelengths_edge_map[i, j].X) cap = n_wavelengths * c_wavelength flow_sum_edge = 0 for start, end in connection_pairs: flow_sum_edge += gb_paths[start, end, i, j].X * gb_flows[start, end].X * demand_matrix[start, end] if cap > 0: util = flow_sum_edge / cap else: util = 0 util_map[i, j] = util return util_map
f462afa58ea7d5c4c113135fbb6c45a52a355af8
16,373
def get_best_model(comparison_result, metric="fmeasure", reverse=False): """ Returns the model row in dataframe which has the best performance on a given metric :param comparison_result: a dataframe containing evaluation info about various models :param metric: metric on which a comparison is be made, defaults to "fmeasure" :return: a dataframe row corresponding to the model which has the best score on the given metric """ if reverse: model_row = comparison_result[ comparison_result[metric] == comparison_result[metric].min() ] else: model_row = comparison_result[ comparison_result[metric] == comparison_result[metric].max() ] return model_row.iloc[0]
fd4deb629386c537b59552528e99ee27391d7aa3
16,374
def generate_potential_boxes_for_coord(box_width_height,coord): """ Assumption 1: box_width_height is an array of dictionary with each dictionary consisting of {"Width":positive integer, "Height": positive integer} Assumption 2: coord is an array of dictionary with each dictionary consistening of {"x":centre of box x coordinate,"y",centre of box y coordinate"} """ potential_boxes = [] for box_dim in box_width_height: potential_boxes.append({ "x1": coord["x"]-int(box_dim["Width"]/2) ,"y1": coord["y"]-int(box_dim["Height"]/2) ,"x2": coord["x"]+int(box_dim["Width"]/2) ,"y2": coord["y"]+int(box_dim["Height"]/2) }) return potential_boxes
27751df4582f93012819e23132a96d962027ba84
16,375
import re def is_timeline_speed_request(text): """ Returns true if the specified text requests the home timeline speed. """ tlspeed_re = '流速' return not re.search(tlspeed_re, text) is None
9c14ed5f095043dd02aa8fc254a45e702925eeef
16,376
def set_domain_shift(domain_shifts, env_name): """ Set the domain shift """ domain_shifts = [int(i) for i in domain_shifts.split('-')] if len(domain_shifts) == 5: if env_name == 'hotel' or 'env1' in env_name: alpha_e = domain_shifts[0] elif env_name == 'univ' or 'env2' in env_name: alpha_e = domain_shifts[1] elif env_name == 'zara1' or 'env3' in env_name: alpha_e = domain_shifts[2] elif env_name == 'zara2' or 'env4' in env_name: alpha_e = domain_shifts[3] elif env_name == 'eth' or 'env5' in env_name: alpha_e = domain_shifts[4] else: raise ValueError('Unkown Environment!') elif len(domain_shifts) == 1: alpha_e = domain_shifts[0] else: raise ValueError('Express a domain_shift for each of the 5 enviroment or 1 for all.') return alpha_e
c430ffdd9b1ecf2f7c2c5e580fe6e80764d46d9a
16,378
def wprota(self, thxy="", thyz="", thzx="", **kwargs): """Rotates the working plane. APDL Command: WPROTA Parameters ---------- thxy First rotation about the working plane Z axis (positive X toward Y). thyz Second rotation about working plane X axis (positive Y toward Z). thzx Third rotation about working plane Y axis (positive Z toward X). Notes ----- The specified angles (in degrees) are relative to the orientation of the working plane. This command is valid in any processor. """ command = f"WPROTA,{thxy},{thyz},{thzx}" return self.run(command, **kwargs)
ef0dd188ad663d190f85490d9229d28bdb5b3fce
16,379
import random def passwordCreation(): """ We will create a new 8 digit password """ abcd = "abcdefghijklmnopqrstuvwyzABCDEFGHIJKLMNOPQRSTUVWYZ" newPass = "" for i in range(0,5): newPass += str(random.randint(0,10)) newPass += str(abcd[random.randint(0,len(abcd)-1)]) return newPass
7366dd2cb688c5399abf3cbd4497d65e1c4839b7
16,380
def may_have_copyright_notice(filename): """Check that the given file does not seem to have a copyright notice. The filename is relative to the root directory. This function assumes that the current working directory is that root directory. The algorigthm is fairly crude, meaning that it might return some false positives. I do not think it will return any false negatives... We might improve this function to handle more complex cases later... """ # For now, it may have a copyright notice if we find the word # "Copyright" at the (reasonable) start of the given file, say # 50 lines... MAX_LINES = 50 # We don't really know what encoding each file might be following, # so just open the file as a byte stream. We only need to search # for a pattern that should be the same regardless of encoding, # so that should be good enough. fd = open(filename, 'rb') lineno = 1 for line in fd: if b'Copyright' in line: return True lineno += 1 if lineno > 50: return False return False
26fa0f34eebf5017d92c2ba2c0d72b95860075dc
16,381
from typing import List import itertools def generate_combinations(urls: List[str], params: List[dict]) -> List[tuple]: """ Private method to combine urls and parameter dictionaries in tuples to pass to the download method. Args: - urls (List[str]): list of urls - params (List[dict]): list of parameter dictionaries Returns: List[tuple]: list of tuples in the format (url, parameters) """ # assert len(urls) == len(params) return list(itertools.product(urls, params))
4e24e85a49a685c810b3d4089886abac7c785bfb
16,383
import re def has_month_data(href): """ Deal with href node :param href: href :type href: str :return: href result :rtype: str """ return href and re.compile("monthdata.php").search(href)
b768648069fa2cd8e62d26821e3085e27139a8c5
16,384
from typing import Union from typing import Tuple from typing import List from typing import Any def deep_tuple_to_list(inp: Union[Tuple, List]) -> List[Any]: """Transform a nested tuple to a nested list. Parameters ---------- inp: Union[Tuple, List] The input object. Returns ------- ret: List[Any] The nested list. """ return list(map(deep_tuple_to_list, inp)) if isinstance(inp, (list, tuple)) else inp
770b4f653b734ecd2caf1d6b56ac090e4ffe3208
16,387
from pathlib import Path from typing import Dict import json def make_temporary_features_json( make_temporary_folders_and_files: Path, test_input_json: Dict, test_input_json_filename: str, ) -> Path: """Make a temporary features JSON file.""" # Define the list of folders and filenames for possible removal # Define the file path for the temporary features JSON file, then write its contents temp_json_path = make_temporary_folders_and_files.joinpath(test_input_json_filename) temp_json_path.touch() with open(temp_json_path, "w", encoding="utf-8") as f: json.dump(test_input_json, f) return temp_json_path
5e7274e33d5ca6cda29798c6e80f782129788a9b
16,388
def word16be(data): """return data[0] and data[1] as a 16-bit Big Ended Word""" return (data[1] << 8) | data[0]
4f9c05993980ed95ffdd6fe26389a9025c41f7c9
16,389
import os import imp def import_as(module, name): """ Imports the specified module (from our local directory) as the specified name, returning the loaded module object. """ dir = os.path.split(__file__)[0] return imp.load_module(name, *imp.find_module(module, [dir]))
22c256ac9337ccaa063266d4b4719e3ef8a12496
16,390
def makes_twenty(n1, n2): """ Given two integers, return True if the sum of the integers is 20 or if one of the integers is 20. If not, return False :param n1: int :param n2: int :return: bool makes_twenty(20,10) --> True makes_twenty(12,8) --> True makes_twenty(2,3) --> False """ return n1 == 20 or n2 == 20 or (n1 + n2) == 20
6a5ebc86a14c8ce95060bdd557b8b9c627fd1d0d
16,391
def is_latitude_norther_than(lat_a, lat_b): """ Check if lat_a is norther than lat_b """ is_latitude_a_negative = lat_a < 0 is_latitude_b_negative = lat_b < 0 same_sign = is_latitude_a_negative == is_latitude_b_negative if not is_latitude_a_negative and is_latitude_b_negative: return True if is_latitude_a_negative and not is_latitude_b_negative: return False if same_sign: return lat_a > lat_b return lat_a < lat_b
290c0a4f298290bb375170c6748515b7a7218d95
16,392
def replace(cache, pointer, refer_bit, page, frame_num): """ replace policy: loop the cache, if the bit of entry is 0, just replace and return next pointer if bit is 1, it has second chance, set to 0 :param cache: :param pointer: :param refer_bit: :param page: :param frame_num: :return: """ while True: if refer_bit[pointer] == 0: cache[pointer] = page pointer = (pointer + 1) % frame_num return pointer else: refer_bit[pointer] = 0 pointer = (pointer + 1) % frame_num
a09ca594d4dc9dbc287383258a72697bec441643
16,393
from typing import Tuple from typing import List def get_metadata(description: str) -> Tuple[str, List[str], str]: """Returns a tuple of (description, tags, category) from a docstring. metadata should be of the form key: value, e.g. category: Category Name""" tags: List[str] = [] category: str = "" lines: List[str] = description.split("\n") description_trimmed: List[str] = [] for index, line in enumerate(lines): line_stripped: str = line.strip().lower() if line_stripped.startswith("tags:"): tags = line[line.find(":") + 1 :].split(",") tags = list(map(lambda t: t.strip(), tags)) continue elif line_stripped.startswith("category:"): category = line[line.find(":") + 1 :].strip() continue else: description_trimmed.append(line) return "\n".join(description_trimmed), tags, category
21e28d7a3c802d2513f2bf8eaeccc215070a2e24
16,394
def preprocess_location(location): """ get rid of possible spaces around farm location string """ locations = location.split(',') res = "" for location in locations: res = res + location.strip() + "," res = res[:-1] return res
a9060b8f83deba86da7fa27526f188981b0e1953
16,397
def get_repo_dicts(r): """Return a set of dicts representing the most popular repositories.""" response_dict = r.json() repo_dicts = response_dict['items'] return repo_dicts
822a0dc0674b2eda5210fdd88257d894c3dc902c
16,398
def compare_version(version_req, current_version): """ Performs version comparison to report if host is already at required version Args: version_req: Version that want to be at current_version: Version that host is currently at Returns: True if current_version >= version_req False otherwise Raises: ValueError: if version is in unsupported format """ if current_version == None: return False if version_req == None: return True if current_version == version_req: return True # Split by . current_vers = current_version.split(".") req_vers = version_req.split(".") # Will loop checking values of each sub-part, so as to cope with # comparing 2.1.1 to 2.2, will loop which ever is shorter num_loops = len(current_vers) if len(req_vers) < num_loops: num_loops = len(req_vers) # Now go through each index for index in range(num_loops): if int(current_vers[index]) < int(req_vers[index]): # Current is less than required, so return False return False elif int(current_vers[index]) > int(req_vers[index]): # Current is greater than required, so return True return True # else we are at same, so need to go onto next index to compare # So so far we are at the same version, but that might mean we have # compared 2.1.1 with 2.1 so still need more checks if len(current_vers) > len(req_vers): # We were same until stopped checking, but current has more # values then required, e.g. 2.1.1 compared to 2.1, so return True return True elif len(req_vers) > len(current_vers): # We were same until stopped checking, but required has more # values then required, e.g. 2.1 compared to 2.1.1, so return False return False else: # We must be exact match! return True
9edd7513742c478ec9e8b4ae3644d7ada4bde25b
16,399
def _partition(array, low, high): """Choose the first element of `array`, put to the left of it all elements which are smaller, and to the right all elements which are bigger than it. Return the final position of this element. """ if low >= high: return i = low + 1 j = high while i <= j: if array[i] <= array[low]: i += 1 else: array[i], array[j] = array[j], array[i] j -= 1 array[low], array[j] = array[j], array[low] return j
89f23c2e0c14c9a516e8b895e888936773fbfe94
16,400
def norm(items: list[float]) -> list[float]: """Normalize all items between (0 , 1) by min-max normalization, but set min=0 because 0s will degrade the prioritization algorithm. """ n = max(items) return [i / n for i in items] if n > 0 else [0.0] * len(items)
0b5d8b9cf400ebced8fa69027db658be33e42cc8
16,401
def db_to_power(decibel): """ Returns power value from a decibel value. """ return 10**(decibel/10)
5188d8d646f2421546cf894ad45d2bdd30d97e53
16,402
import requests from bs4 import BeautifulSoup def downloader(url): """ 下载成语并保存 """ response = requests.get(url) if response.status_code != 200: print(f'{url} is failed!') return print(f'{url} is parsing') html = BeautifulSoup(response.content.decode('gbk', errors='ignore'), "lxml") table = html.find_all('table')[-2] prefix = 'http://www.zd9999.com' words = [prefix + a.get('href') for a in table.find_all('a')] res = [] for i in range(0, len(words)): response = requests.get(words[i]) print(f'{[words[i]]} is parsing') if response.status_code != 200: print(f'{words[i]} is failed!') continue wordhtml = BeautifulSoup(response.content.decode('gbk', errors='ignore'), "lxml") explanation = wordhtml.find_all('table')[-3].find_all('tr') res.append({'word':explanation[0].text.strip(),\ 'pinyin': explanation[1].find_all('tr')[0].find_all('td')[1].text.strip(),\ 'explanation': explanation[1].find_all('tr')[1].find_all('td')[1].text.strip(),\ 'derivation': explanation[1].find_all('tr')[2].find_all('td')[1].text.strip(),\ 'example': explanation[1].find_all('tr')[3].find_all('td')[1].text.strip()}) return res
1807b4de06fe5f5904fb17960fe618cc6bc0d988
16,403
def _get_rid_for_name(entries, name): """Get the rid of the entry with the given name.""" for entry in entries.values(): if entry['name'] == name: return entry['rid'] raise ValueError(u'No entry with name {} found in entries {}'.format(name, entries))
0791f3185404ca59417fd9196467c8aef69c429c
16,404
import uuid def get_uuid(s): """Return UUID for the string passed in.""" return str(uuid.uuid5(uuid.NAMESPACE_OID, str(s)))
f034e235ff3e673152216fbb84f9fc0ca85cfc41
16,405
import click def detect_config_version(config): """Return version of an slo-generator config based on the format. Args: config (dict): slo-generator configuration. Returns: str: SLO config version. """ if not isinstance(config, dict): click.secho( 'Config does not correspond to any known SLO config versions.', fg='red') return None api_version = config.get('apiVersion', '') kind = config.get('kind', '') if not kind: # old v1 format return 'v1' return api_version.split('/')[-1]
26cb4d7ae7eba981e456dc8f9201df719f720896
16,407
def o_to_matsubara_idx_b(o): """ Convert index in "o" convension to bosonic Matsubara index Parameters ---------- o 2*n Returns n ------- """ assert o%2 == 0 return int(o/2)
347313ac016033360910d94e19c7d3ef8bc3f7e3
16,408
import torch def circus_loss(z, a=1., k=2.1): """Make the system follow an elongated circus-like shape with curve a and length k""" x, y = z[...,:1], z[...,1:] a1 = torch.sqrt((x + a)**2 + y**2) a2 = torch.sqrt((x - a)**2 + y**2) return torch.abs(a1*a2 - k).mean()
58a316294f9cabdf6fe4be42813dd6a3feb7c637
16,409
def get_verbose_details(account): """ Return details for verbose output. :param safe.model.Account account: Account for which to get details :return: Sequence of 2-tuples with details for the UI :rtype: :func:`tuple` of 2-tuples ``(field name, info)`` where both items are strings """ questions = 'no questions' questions_count = account.question_query.count() if questions_count > 0: questions = '%i question' % questions_count if questions_count > 1: questions += 's' codes = 'no backup codes' codes_count = account.code_query.count() if codes_count > 0: codes = '%i backup code' % codes_count if codes_count > 1: codes += 's' used_codes = 'none used' used_codes_count = account.code_query.filter_by(used=True).count() if used_codes_count > 0: used_codes = '%i used' % used_codes_count codes += ' (%s)' % used_codes security = '%s, %s' % (questions, codes) fmt = 'password is %s, question is %s' password_policy = '<not set>' if account.password_policy: password_policy = '"%s"' % account.password_policy.name question_policy = '<not set>' if account.question_policy: question_policy = '"%s"' % account.question_policy.name policies = fmt % (password_policy, question_policy) return (('security', security), ('policies', policies))
94e86a0a3753d67fd7a21908b8e8e1dba401c9b8
16,410
def _get_warmup_factor_at_iter(method: str, curr_iter: int, warmup_iters: int, warmup_factor: float) -> float: """Return the learning rate warmup factor at a specific iteration. Parameters ---------- method: str Warmup method; either "constant" or "linear". curr_iter: int Iteration at which to calculate the warmup factor. warmup_iters: int The length of the warmup phases. warmup_factor: float The base warmup factor (the meaning changes according to the method used). Returns ------- float: The effective warmup factor at the given iteration. """ if curr_iter >= warmup_iters: return 1.0 if method == "constant": return warmup_factor if method == "linear": alpha = curr_iter / warmup_iters return warmup_factor * (1 - alpha) + alpha raise ValueError(f"Unknown warmup method: {method}")
5d53b32746450189eeca116f0599dbe00c05d82b
16,411
def pierce(t: float) -> float: """Calculates Pierce. Args: t (float): Air temperature [K]. Returns: float: Pierce. """ return 0.0658 * t**3 - 53.7558 * t**2 + 14703.8127 * t - 1345485.0465
94dd4ccf4de79ba9f91aa37578b68c1b6d462c05
16,412
def instrument_trial_pairs(df): """ Extract a list of all unique instrument/trial pairs. """ df_iter = df.groupby(['instrument', 'trial']).size().reset_index() return [(r['instrument'], r['trial']) for _, r in df_iter.iterrows()]
600e2a96e2bd64f3dc1128fbdbb881b3b1790719
16,413
def gen_headervaluestr_from_headervaluedict(headervaluedict): """Generate Email header value from a dict. :return: Email header value in the form: "k=v; k=v;..." :rtype: str """ # NOTE: next line makes header attrs non-deterministic # return "; ".join(["=".join([k, v]) for k, v in headervaluedict.items()]) return "; ".join(["=".join([i, headervaluedict[i]]) for i in ['addr', 'prefer-encrypt', 'keydata'] if i in headervaluedict.keys()])
a3350b6aca15183552716ea7626cadb54dd8c784
16,415
def compare(pipeline1, pipeline2): """ Compare if two dataset pipelines are the same. Args: pipeline1 (Dataset): a dataset pipeline. pipeline2 (Dataset): a dataset pipeline. Returns: Whether pipeline1 is equal to pipeline2. Examples: >>> pipeline1 = ds.MnistDataset(mnist_dataset_dir, 100) >>> pipeline2 = ds.Cifar10Dataset(cifar_dataset_dir, 100) >>> ds.compare(pipeline1, pipeline2) """ return pipeline1.to_json() == pipeline2.to_json()
dacd93eabc63f6b51502f437fa625a14314e0740
16,416
import random def spanning_tree_maze_generator(X, Y): """ Generate a maze that has no loop """ assert X == Y, "only support square maps" pad = False if X % 2 == 0: pad = True X, Y = X - 1, Y - 1 visited = set([]) maze = [[(' ' if x % 2 == 0 and y % 2 ==0 else '#') \ for x in range(X)] for y in range(Y)] edges = set([]) x, y = (X + 1) / 2, (Y + 1) / 2 def dfs(cur): visited.add(cur) ## do not move moves outside moves = [(-1, 0), (1, 0), (0, 1), (0, -1)] random.shuffle(moves) for m in moves: next = (cur[0] + m[0], cur[1] + m[1]) if not next in visited and next[0] >= 0 \ and next[0] < x and next[1] >= 0 and next[1] < y: edges.add((cur, next)) dfs(next) ## always start from (0, 0) dfs((0, 0)) ## render the maze for e in edges: mid_x = e[0][0] + e[1][0] mid_y = e[0][1] + e[1][1] maze[mid_y][mid_x] = ' ' if pad: # for even sizes, we pad the maze maze.append([' ' if i % 2 == 0 else '#' for i in range(X)]) for i, m in enumerate(maze): m.append(' ' if i % 2 == 0 else '#') return maze
3fa7b3e97dd2bdf65f6e6321b19f4a97cfff50a1
16,417
def vec_subvec(a, r): """ Extracts a sub-vector from a given vector Parameters ---------- a: list[] A vector of scalar values r: tuple A pair (ps, pe) indicating the start and end points of the sub-vector dimension. If ps>pe then the sub-vector will be reversed. Values must be positive. Returns ------- list[] The sub-vector """ assert len(r) == 2 assert min(r) >= 0 step = 1 if r[0] <= r[1] else -1 return (a[r[0]:: step] if r[0] > r[1] == 0 else a[r[0]: r[1] + step: step])
d704f50c6269bf5a593ccdba5cdce67542a462d7
16,420
import re def detectMetadataFormat(xmldoc): """ Detect the format of the metadata in `xmldoc`. """ root = xmldoc if re.search("eml$", root.tag): return "eml" elif re.search("Dryad", root.tag): return "dryad" elif re.search("metadata", root.tag): return "fgdc" else: return "unknown"
6534f0f9c4bb3b2905be6a924ae8ceb0ce39ab1b
16,422
def astz(dt, tz): """ Given a datetime object and a timezone object, return a new datetime object that represents the same moment, but written in terms of the new timezone. :param dt: a datetime object :param tz: a timezone object :return: a datetime object """ # See http://pythonhosted.org/pytz/ for why this is # not as trivial as it might first appear. return tz.normalize(dt.astimezone(tz))
af98a3e9e6fccc21f09302b5f7e655149904e196
16,423
def image_generator_from_dataframe(dataframe, img_size, batch_size, cls_labels, datagen, color_mode='rgb'): """ Creates a generator that loads images from information in a pandas dataframe. The dataframe must have at least two columns: - "filename" with the absolute path to the file - "cls" with the class label of each image (text) Images will be preprocessed using an ImageDataGenerator, resized to a fixed shape and converted to grayscale if desired. :param dataframe: Pandas dataframe with the image information :param img_size: Shape of to resize the images to, e.g. (128, 128) :param batch_size: Size of the generator batch :param cls_labels: List containing each class label :param datagen: The ImageDataGenerator for preprocessing :param color_mode: 'rgb' or 'grayscale' to produce 3 or 1 channel images, respectively :return: """ return datagen.flow_from_dataframe( dataframe, x_col="filenames", y_col="cls", classes=cls_labels, target_size=img_size, batch_size=batch_size, color_mode=color_mode, interpolation='bilinear', class_mode='categorical')
d208b1e6ba36df1cee9d35c718431c110ab08304
16,425
def get_object_class(configvalue): """ Formats the objectclass line from the config into a list """ objclass = configvalue.split('|') return objclass
3cb4c9370d5e711fcadbbd4d0583b2967e9ebe6d
16,426
import os def split_pkg(pkg): """Split a subdir + conda package into parts. From @isuruf and @CJ-Wright originally. Parameters ---------- pkg : str The conda package (e.g. `linux-64/python-3.7.6-py37djfa_0.tar.bz2`) Returns ------- plat : str The platform (e.g., `linux-64`). name : str The package name (e.g., `python`). ver : str The version (e.g., `3.7.6`). build : str The build string (e.g., `py37djfa_0`) """ if not pkg.endswith(".tar.bz2"): raise RuntimeError("Can only process packages that end in .tar.bz2") pkg = pkg[:-8] plat, pkg_name = pkg.split(os.path.sep) name_ver, build = pkg_name.rsplit("-", 1) name, ver = name_ver.rsplit("-", 1) return plat, name, ver, build
52fa50f8e87226a685294eb2259859594f6f90af
16,428
def sol(arr, n, k): """ Keep traversing right and updating start and end if the arr[i] <= k, Set the flag if k is included in between the start and the end. Finally count the length if the flag is set """ start = None end = None l = 0 t = 0 maxK = False for i in range(n): if arr[i] > k: t += l start = None maxK = False l = 0 else: if start == None: start = i end = i if arr[i] == k: maxK = True if maxK: l = end-start+1 t+=l # The last subarray needs to be counted as well return t
3e5da8c1b31595cafe4c42d6b7ea408e01086769
16,429
def stereo_to_mono(data): """ The function name explain it all... """ if data.ndim == 2: data_mono = ((data[:, 0] + data[:, 1]) / 2).astype(data.dtype) return data_mono else: print("Signal is already mono.") return data
0d8c785bcd9f26b93f79bb34664428284da04ab0
16,430
def find_default_dataset_and_split_names(datasets, default_dataset_name=None, default_split_name=None, train_split_name=None): """ Return a good choice of dataset name and split name, possibly not the train split. Args: datasets: the datasets default_dataset_name: a possible dataset name. If `None`, find a suitable dataset, if not, the dataset must be present default_split_name: a possible split name. If `None`, find a suitable split, if not, the dataset must be present. if `train_split_name` is specified, the selected split name will be different from `train_split_name` train_split_name: if not `None`, exclude the train split Returns: a tuple (dataset_name, split_name) """ if default_dataset_name is None: default_dataset_name = next(iter(datasets)) else: if default_dataset_name not in datasets: return None, None if default_split_name is None: available_splits = datasets[default_dataset_name].keys() for split_name in available_splits: if split_name != train_split_name: default_split_name = split_name break else: if default_split_name not in datasets[default_dataset_name]: return None, None return default_dataset_name, default_split_name
6a1c844109afb2fcd3fd9f85bc966377fa7b7bc2
16,432
def getLCA(root, a, b, v): """ The method assumes that keys are present in Binary Tree. If one key is present and other is absent, then it returns the present key as LCA (Ideally should have returned NULL). To overcome this we use findLCA() which is a wrapper over this """ if root == None: return None if root.data == a: v[0] = True return root if root.data == b: v[1] = True return root llca = getLCA(root.left, a, b, v) rlca = getLCA(root.right, a, b, v) if llca and rlca: return root if not llca: return rlca if not rlca: return llca
a621574e7da50871579df9ab8acfffdb66dbdaf9
16,433
def pearsoncc(x, y): """ Compute Pearson Correlation Coefficient. """ x = (x - x.mean(0)) / x.std(0) y = (y - y.mean(0)) / y.std(0) return (x * y).mean()
145471d2007feaef0c285312b645d07e6922d4c2
16,435
def build_stack_tags(stack): """Build a common set of tags to attach to a stack.""" return [{'Key': t[0], 'Value': t[1]} for t in stack.tags.items()]
6963fdbc724f546f66839900a45b48a803950f91
16,436
def robot_go_to_coffee_machine(agents, self_state, self_name): """ This action has no effect nor preconditions. It only represents the robot leaving the room (for the example) """ return agents
44bfb0e9ebed3085a30bf5bbc86aaf0d77ecef26
16,437
def get_projects_by_4(p): """ The frontend displays a list of projects in 4 columns. This function splits the list of the projects visible by the user in chunks of size 4 and returns it.""" # Split the list of visible projects by chunks of size 4 projects = sorted([e['id'] for e in p['projects']]) n = 4 # split projects in chunks of size 4 projects_by_4 = [projects[i * n:(i + 1) * n] for i in range((len(projects) + n - 1) // n)] return projects_by_4
c44e45c96a0d0869f0b8c2e6b61779feda59e4ee
16,438
def my_pow(x: float, n: int) -> float: """ Offer 16. 数值的整数次方 """ if x == 0: return 0 res, flag = 1, True if n < 0: flag = False n = -n while n: if n & 1: res *= x n = n >> 1 x = x * x return res if flag else 1/res
8ee19f64cedc3307f5f495d09a24ce513688f201
16,439
def sdetectBatch(sess, image_tensor, tensor_dict, images): """ Detects objects on an already-configured session and tensor dict with a set of images """ output = sess.run(tensor_dict, feed_dict={image_tensor: images}) return output
a0d289df27dbfe8c20553d0362e9c6f0e5d5c1c4
16,440
def _domain_object_metadata(domain_object): """Return mapping of domain metadata key to value. Args: domain_object: Workspace domain object. Returns: dict. """ meta = {"object": domain_object} meta["name"] = getattr(meta["object"], "name") meta["description"] = getattr(meta["object"], "description") meta["owner"] = getattr(meta["object"], "owner") meta["is_coded_value"] = getattr(meta["object"], "domainType") == "CodedValue" meta["is_range"] = getattr(meta["object"], "domainType") == "Range" # meta["merge_policy"] = getattr(meta["object"], "mergePolicy") # meta["split_policy"] = getattr(meta["object"], "splitPolicy") meta["code_description_map"] = getattr(meta["object"], "codedValues", {}) meta["range"] = getattr(meta["object"], "range", []) meta["type"] = getattr(meta["object"], "type") return meta
d6afb9b1d651e50f970a7ae87e89cf1c721473af
16,441
def add_NA_indicator_variables(df, inplace=False): """ Add indicator variables for each column to indicate missingness. """ df_ = df if inplace else df.copy() for i, c in enumerate(df_.columns): x = df_[c].isna() if x.any(): df_.insert(i + 1, '{}_NA'.format(c), x) return df_
834b72f4df820d520cc2e2c1fb3605ad846a9f2f
16,443
def maybe_ansi(text: str, level: int, use_ansi: bool = True): """ Adds an ANSI highlight corresponding to the level, if enabled """ return f"\u001b[{(level % 6) + 31}m{text}\u001b[0m" if use_ansi else text
0242e7f18ded1af6c3424b1a10c61f0bd5e3f39f
16,446
def generate_restricted_queryset(): """Generate a function to return a restricted queryset compatible with the internal permissions system.""" def get_queryset(queryset, info): return queryset.restrict(info.context.user, "view") return get_queryset
a64583ddbb97bcc1eaa20a70b8dc8b077bc8ffed
16,447
import hashlib def gen_server_hash(server_id, shared_secret, public_key): """Generates the server hash for use in authentication. Parameters ---------- server_id : :class:`str` The server id found in :class:`~.EncryptionRequestPacket`. shared_secret The shared secret gotten from :func:`gen_shared_secret`. public_key The public key found in :class:`~.EncryptionRequestPacket`. Returns ------- :class:`str` The server hash. """ h = hashlib.sha1() h.update(server_id.encode("ascii")) h.update(shared_secret) h.update(public_key) return f"{int.from_bytes(h.digest(), byteorder='big', signed=True):x}"
f6294f68fa94a92fca1e1942d280a07535ce7abb
16,448
import re def parse(color): """ color @string : A string like 'rgb({:d},{:d},{:d})' is expected """ red, green, blue = map(int, re.findall(r'\d+', color)) return f"#{red:02x}{green:02x}{blue:02x}"
4f195dc1312296b6361fd886e0d2a92dbd6edcf2
16,449
import base64 def PngFile_to_Base64(file_name): """Converts a png file to a base 64 encoded string""" in_file=open(file_name, "rb") encoded=base64.b64encode(in_file.read()).decode() return encoded
018aed2f85584ce4c585236afb58996b6952e852
16,450
import os def normalize_path(optional_path=None): """Return a cleaned-up version of a given filesystem path, or None. Converts the path to the operating system's native conventions, and removes redundancies like `.`. The return value will be `None`, an absolute path, or a relative path, same as the argument. But it will have redundant path separators, unnecessary detours through parent directories, and use of the current directory "." removed. """ if optional_path is None: return None else: path = os.path.normpath(optional_path) path = path.replace('/', os.path.sep) path = path.replace('\\', os.path.sep) return path
11146448dda7d062f511ad5e8aaec0761a7ae971
16,451
def dicecoeff_precount(e1, e2, count): """ Dice coefficient measures the similarity of two bit patterns :param e1: bitarray1 :param e2: bitarray2 :param count: float bitcount1 + bitcount2 :return: real 0-1 similarity measure """ if count == 0: return 0 return 2*(e1 & e2).count()/count
d35658e5d369b7c36ee422d5b30980236c7112fb
16,452
from pathlib import Path def _path(dirname): """Always use absolute paths, easier to control when working with FSL / Freesurfer""" if dirname is None: return None else: return Path(dirname).resolve()
32225853615e3f964f486fa04aa6b9f2d08e3ab1
16,454
def find_tts(prices): """Returns a list containing the buy day, sell day and resulting profit - finds the best days on which to buy and sell.""" buyday = 0 sellday = 0 profit = 0 for x in range(len(prices)): for y in range(x+1, len(prices)): if prices[x] < prices [y]: if prices[y] - prices[x] > profit: profit = prices[y] - prices[x] buyday = x sellday = y return [buyday, sellday, profit]
efa5bcf672d58b0f1a0562afb33fe9881668dd2b
16,456
def int_or_zero(v): """ Convert object to int """ if isinstance(v, str): v = v.strip() try: return int(v) except (ValueError, TypeError): return 0
52b65c33974b044c601d287ae2eee0ec65857586
16,458
import click def ipa_password_prompt(ctx, param, value): """IPA admin password prompt""" return value or click.prompt('IPA admin password ', hide_input=True)
9c3971e738593a125e6bf334aacf5b81a6ec1895
16,459
import argparse def parse_cmd(): """ Build the command line arguments parser for inferring task. """ parser = argparse.ArgumentParser( description="Globally Normalized Reader in PaddlePaddle.") parser.add_argument( "--model_path", required=True, type=str, help="Path of the trained model to evaluate.", default="") parser.add_argument( "--data_dir", type=str, required=True, help="Path of the training and testing data.", default="") parser.add_argument( "--batch_size", type=int, required=False, help="The batch size for inferring.", default=1) parser.add_argument( "--use_gpu", type=int, required=False, help="Whether to run the inferring on GPU.", default=0) parser.add_argument( "--trainer_count", type=int, required=False, help=("The thread number used in inferring. When set " "use_gpu=True, the trainer_count cannot excess " "the gpu device number in your computer."), default=1) return parser.parse_args()
f9edba184f21d96f78c9f24ce5d85d8e8bfcfe10
16,461
def optional_dependency_graph(page, *provided_dependencies): """Creates a dependency graph for a page including all dependencies and optional_dependencies Any optional provided_dependencies will be included as if they were dependencies, without affecting the value of each keyed page. """ graph = {} dependencies = set(getattr(page, 'dependencies', []) + getattr(page, 'optional_dependencies', [])) graph[page] = dependencies for dependency in dependencies | set(provided_dependencies): graph.update(optional_dependency_graph(dependency)) return graph
97783871265f5cfa995d7b9cfe2e59e2b7ba2ef1
16,463
def get_clusters_as_list(df): """Return a list of list of Event Args: df (DataFrame): see get_dataframe() Returns: list ot list of Event: list of event clusters """ return df.groupby('label')['event'].apply(list).values.tolist()
b49a8efeaef0506659a483cb9cb8431d284557a5
16,464
def get_compact_promiscuous_df(enzyme_df): """ input:enzyme dataframe (dataframe) output:promiscuous enzyme dataframe (dataframe) """ promiscuous_df = enzyme_df[[True if len(rxn) > 1 else False for rxn in enzyme_df['reaction']]] compact_promiscuous_df = promiscuous_df[['entry','reaction','product','substrate']] return compact_promiscuous_df
60f2133ee43fcd6a41376dd8e81681393128a6c7
16,465
def saml_assertion_to_ldap_style_name(assertion_attributes): """ Return string, approximating a NOAA LDAP-style name for SAML user Keyword Parameters: assertion_attributes -- Dict, representing SAML assertion attributes for a logged in user >>> test_attributes = {'mail': ['Pat.Ng@noaa.gov']} >>> saml_assertion_to_ldap_style_name(test_attributes) 'uid=pat.ng,ou=People,o=noaa.gov' """ # Adapt SAML user email assertion, into a LDAP-style name user_name, user_domain = assertion_attributes['mail'].pop().split('@') generated_ldap_id = 'uid={},ou=People,o={}'.format(user_name.lower(), user_domain) return generated_ldap_id
c21c1f461e5ad06721417fb51284e82b7b5128d7
16,466
import pathlib def _get_institution(path: pathlib.Path) -> str: """Returns the institution. As per docstring, this is index -3.""" return str(path).split('/')[-3]
4c42e40bc19dbe7c7e45f5984a6ff5cf940d4fd7
16,467
def calc_per_difference(dev_per_dict:dict) -> dict: """ Calculates the differecence between the speak testset PER and the training-dev sets. This difference is a measure of data mismatch. """ per_diff_dict = dict() for name, per in dev_per_dict.items(): if not name=='speak': diff_name = name + "-speak" per_diff_dict[diff_name] = dev_per_dict.get('speak', 0.0) - dev_per_dict.get(name, 0.0) return per_diff_dict
25f1b331e9b8ba0346422000a6cf3e68061dd8a2
16,468
import os.path import getpass def read_config(cfg="~/.config/instacron/config"): """Read the config. Create a config file at `cfg` with the following information and structure: my_user_name my_difficult_password """ _cfg = os.path.expanduser(cfg) try: with open(_cfg, "r") as f: user, pw = [s.replace("\n", "") for s in f.readlines()] except Exception: print(f"\nReading config file `{cfg}` didn't work") user = input("Enter username and hit enter\n") pw = getpass.getpass("Enter password and hit enter\n") save_config = input(f"Save to config file `{cfg}` (y/N)? ").lower() == "y" if save_config: os.makedirs(os.path.dirname(_cfg), exist_ok=True) with open(_cfg, "w") as f: f.write(f"{user}\n{pw}") return {"username": user, "password": pw}
50e326f6bfa8c4d28b2268bacdd7441131255bee
16,469
def fdiv_loss(convex_conjugate): """General template for :math:`f`-divergence losses given convex conjugate. Args: convex_conjugate: The convex conjugate of the function, :math:`f`. """ def loss(batch, weights=(1, 1)): r"""Args: batch: pair of minibatches drawn from each sample weights: Provides an alternative means of reweighting minibatches. See `hybrid estimation <user_guide.rst#hybrid-estimation>`__ for details.""" input1, input2 = batch batch_loss = ( convex_conjugate(input2).mean() * weights[1] - input1.mean() * weights[0] ) # print(batch_loss) return batch_loss return loss
434a9ef9e433db5f00a887669de02ba1dbdffc77
16,473
def hash_diff(old_hash, new_hash): """ Returns the keys in new_hash that have different values in old_hash. Also returns keys that are in new_hash but not in old_hash. Keyword arguments: old_hash -- the dictionary of hashes for the old directory new_hash -- the dictionary of hashes for the new directory """ paths_changed = [] new_paths = [] for key, value in new_hash.items(): if key in old_hash: if value != old_hash[key]: paths_changed.append(key) else: new_paths.append(key) return (paths_changed, new_paths)
7c9c650c64371385843f8f7604eaa07209e9149f
16,474
def compare(sample_list, first, last): """ This function aims to find tuples with each unique first element.For equal first element tuples, find the one with largest last element. Parameters ---------- sample_list : list, This parameter represents the a list of several tuples. first : integer, This parameter represents the first index of each tuple last : integer, This parameter represents the last index of each tuple Returns ------- res : list, A list of finding tuples """ op = [] for m in range(len(sample_list)): li = [sample_list[m]] # source code here has 4 indents, I asjust it myself. for n in range(len(sample_list)): if (sample_list[m][first] == sample_list[n][first] and # here index should be 2 rather than 3 sample_list[m][last] != sample_list[n][last]): li.append(sample_list[n]) op.append(sorted(li, key = lambda dd : dd[2], reverse = True)[0]) res = list(set(op)) return res
8ab35ded6e883a4b90864535af73031ba8233af2
16,475
def search_001(data): """ 搜索结果解析: zvideo # 视频 https://www.zhihu.com/zvideo/1386442207516278784 wiki_box # 话题 https://api.zhihu.com/topics/19551455 search_result # 搜索结果 article https://api.zhihu.com/articles/362094095 answer https://api.zhihu.com/answers/2051939405 videoanswer https://www.zhihu.com/question/20008942/answer/1981804371 # 量少忽略 relevant_query # 搜索推荐 https://www.zhihu.com/search?q=%E6%96%B0%E6%B5%AA%E5%BE%AE%E5%8D%9A&utm_content=search_relatedsearch&type=content search_club // 不适合爬取 knowledge_result // 不适合爬取 电子书 https://www.zhihu.com/market/pub/119584018/manuscript/1071910337044873216 带广告问答 https://www.zhihu.com/question/312456927/answer/1172171000 knowledge_ad // 不适合爬取 盐选专栏 https://www.zhihu.com/remix/albums/1171945943569555456/tracks/1171947062626668544/content?km_channel=search&origin_label=search https://www.zhihu.com/market/specials/1124754111790456832 电子书 https://www.zhihu.com/pub/book/119571280 Live讲座 https://www.zhihu.com/lives/913829452423774208 链接转换 # articles https://api.zhihu.com/articles/367168940 -> https://zhuanlan.zhihu.com/p/367168940 # zvideo https://www.zhihu.com/zvideo/1363956955149594625 # answer https://api.zhihu.com/answers/1969291425 https://api.zhihu.com/questions/341407139 -> https://www.zhihu.com/question/341407139/answer/1969291425 # relevant_query -- 直接存储 list ['谈谈当代中国如何突破美国的核心技术封锁', '中核战略规划研究总院'] # wiki_box -- 信息全面无需再次请求 https://api.zhihu.com/topics/19551455 """ results = {"article": [], "relevant_query": [], "topic": [], "zvideo": [], "answer": []} for d in data: type = d.get('type', '') # search_result if type == 'search_result': object = d.get('object', {}) object_type = object.get('type', '') # article if object_type == 'article': url = object.get('url', '') p_id = url.split('/')[-1] u = "https://zhuanlan.zhihu.com/p/{}".format(p_id) results['article'].append(u) # answer if object_type == 'answer': url = object.get('url', '') # https://api.zhihu.com/answers/1721963692 question_url = object.get('question', {}).get('url', '') # https://api.zhihu.com/questions/442751850 answer_id = url.split('/')[-1] question_id = question_url.split('/')[-1] u = "https://www.zhihu.com/question/{}/answer/{}".format(question_id, answer_id) results['answer'].append(u) # topic if object_type == 'topic': results['topic'].append(object) # wiki_box(topic) if type == 'wiki_box': object = d.get('object', {}) object_type = object.get('type', '') if object_type == 'wiki_box': results['topic'].append(object) # relevant_query if type == 'relevant_query': query_list = d.get('query_list', {}) query_list = [i.get('query') for i in query_list] results['relevant_query'] = query_list # zvideo if type == 'zvideo': object = d.get('object', {}) zvideo_id = object.get('zvideo_id', '') u = "https://www.zhihu.com/zvideo/{}".format(zvideo_id) results['zvideo'].append(u) return results
5024f3ee270245c643669373ae9822432cbb6c57
16,476
def ask_int(question, n_trials, max_attempts=10, count=1, special=False): """ Ask for an integer user input. Parameters ---------- question : str the statement and/or question that needs to be answered range : List[float] if not `None`, provides a lower and/or upper bound for the selected integer max_attempts : int the maximum number of tries a user has before breaking count : int the user attempt number Returns ------- answer : int the user's integer answer or `None` if the number of attempts exceeds the allowed number """ while count < max_attempts: answer = input(question) try: if special: try: value = float(answer) except ValueError: print('ERROR: please try again ') else: return value elif float(answer).is_integer() and not special: if int(answer) == 0: special = True question = 'What is your value for numax? ' elif int(answer) >= 1 and int(answer) <= n_trials: return int(answer) else: print('ERROR: please select an integer between 1 and %d \n or 0 to provide a different value\n'%n_trials) else: print("ERROR: the selection must match one of the integer values \n") except ValueError: print("ERROR: not a valid response \n") count += 1 return None
5ef5d817c98db2316410753dbae56d401999bfff
16,477
def str2ints(stat_id_str: str) -> list: """Convert stat_id string e.g. 'account_id:tank_id' to list of ints""" return [ int(x) for x in stat_id_str.split(':') ]
15def7276ac9cfea86a5a8010b95de189f7750d5
16,479
import numpy as np def docov(x, w): """ Calculates the weighted covariance matrix centered on each atom. The original centred covariance (Todeschini et al. 2013) is weighted according to the atomic partial charges (normalized absolute values). ==================================================================================================================== :param x(n_at x 3): molecular 3D coordinate matrix w(n_at x 1): molecular property to consider :returns cov(n_at x n_at): weighted atomic centred covariance ==================================================================================================================== Francesca Grisoni, 12/2016, v. alpha ETH Zurich """ n, p = x.shape # dimensions cov = {} # pre allocation samp_v = np.zeros((p, p)) # init type_w = 1 # if 1, it normalizes according to the total sum of weights # normalizes partial charges if type_w == 2: den = (n-1) else: den = sum(abs(w)) if den is 0: den = n-1 w_abs = abs(w)/den for i in range(n): for j in range(p): for k in range(p): cvhere = 0 for s in range(n): cvhere += w_abs[s] * (x[s, j] - x[i, j]) * (x[s, k] - x[i, k]) samp_v[j, k] = cvhere cov[i, 1] = samp_v samp_v = np.zeros((p, p)) # re-init return cov
dc8404987cb3291d0738740a74c447a72eb9667e
16,480
import textwrap def dedent_all(f): """Dedents all constant strings in the decorated function. DO NOT use this with functions containing f-strings. """ consts = list(f.__code__.co_consts) for i, const in enumerate(consts): if type(const) is str: consts[i] = textwrap.dedent(const) f.__code__ = f.__code__.replace(co_consts=tuple(consts)) return f
dbab89f626c47819ced988accb3530b7cc35bdc7
16,481
def replace_space(value, replace_string): """Basically the inverse of space replace :param value: :param replace_string: :return: """ return value.replace(' ', replace_string)
b27a6edbb087605badefca2de3c9b7fab4510627
16,482
def get_category(line1): """Collects breach category from the line. Args: line1 (str): 1.st line of data block Returns: str: breach category """ line1 = line1.split(' ') return line1[-2]
49da3f6efef3ed72dd8ba43795d297d04b8c20c8
16,483
import random def generate_random_offset(background_shape: tuple, object_shape: tuple) -> tuple: """ Generate a safe random offset for the background. :param background_shape: tuple :param object_shape: tuple :return: tuple - offset in x, y """ b_height, b_width = background_shape o_height, o_width = object_shape random_x = random.randrange(0, b_width - o_width, 1) random_y = random.randrange(0, b_height - o_height, 1) return random_x, random_y
c3d015f3be7add5ee1a472e8c73f0a32abca898e
16,485
def get_ad_sublist(adlist, names): """ Select a sublist of AstroData instances from the input list using a list of filename strings. Any filenames that don't exist in the AstroData list are just ignored. """ outlist = [] for ad in adlist: if ad.filename in names: outlist.append(ad) return outlist
61af4ebb3c4c7cd93af1576a6906c612aae5872d
16,486
import os def get_cluster_filename(filename_prefix, k): """ computes the filename of the cluster file @param filename_prefix: experiment name and text type (e.g. cause_title) @param k: k determines the amount of clusters @return: the filenname """ return os.path.join("preprocessed_context_compatibility", '{}.k{}.pkl'.format(filename_prefix, k))
56b4ff1df14c53502357fb816caa901aa973b3d3
16,488
import json def store_username(): """Store new user""" filename="username.json" username=input("Enter your username") with open(filename,'w') as f: json.dump(username,f) return username
1e0688d5a1d074ffb368ba97a5fc63bbb74cbaf5
16,489
def unaligned_words(f_words, e_words, biphrases): """Find unaligned words :param f_words: source words :param e_words: target words :param biphrases: list of phrase pairs (check `minimal_biphrases`) :returns: set of unaligned source words, set of unaligned target words """ fs = set() es = set() for fp, ep in biphrases: fs.update(fp) es.update(ep) return frozenset(range(len(f_words))) - fs, frozenset(range(len(e_words))) - es
dbee429e9f72b17d3e3ba311fdd4c17d2938bca1
16,490
def toTf(f): """ :param f: input pose :type f: :class:`PyKDL.Frame` Return a tuple (position, quaternion) for the pose. """ return ((f.p[0], f.p[1], f.p[2]), f.M.GetQuaternion())
3fe386803804b1c27919c47f1a1cf4a59b20b1ed
16,491
def template_xml() -> str: """Returns an <xml> tag with attributes""" return '<?xml version="1.0" encoding="UTF-8"?>'
f7775700be1e39b319257269d0089ba7c782dc8a
16,492
def predict_model(dataset, model): """ Method to predict reliability of dataset using the provided model :param dataset: dataset whose reliability is to be predicted :param model: model to be used to predict reliability :return: the reliabilities of the dataset """ for drop_column in ["is_reliable", "vic_x", "vix_y", "latitude", "longitude"]: if drop_column in dataset.columns: dataset = dataset.drop(columns=[drop_column]) return model.predict(dataset)
081f2fa73663c46b9f0018ba037acd0ce1d2d086
16,493
def nobrackets(v): """ Remove brackets """ return v.replace('[', '').replace(']', '')
807dafa83a743a94ca81666e979857ba8481eab9
16,494
import sys def current_py_version(): """Return the Python version under which this script is being run.""" return "%d.%d" % (sys.version_info.major, sys.version_info.minor)
c1219d8af3178561e1f4f87202173fd629652313
16,495