content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def eligible_parties(zweitstimmen_bundesgebiet, direktmandate): """Determine which parties reach the Bundestag in a federal state. Input: zweitstimmen_bundesgebiet (DataFrame): votes by party bundesgebiet direktmandate (DataFrame): By party the number of Direktmandate Output: eligible_parties (list): all parties that are eligible for BT seats """ zweitstimmen_bundesgebiet = zweitstimmen_bundesgebiet.div( zweitstimmen_bundesgebiet.sum(axis=0) ) zweitstimmen_bundesgebiet.reset_index(inplace=True) eligible_direktmandate = direktmandate[direktmandate > 3].index.tolist() eligible_huerde = ( zweitstimmen_bundesgebiet[zweitstimmen_bundesgebiet["Bundesgebiet"] > 0.05] .loc[:, "Partei"] .tolist() ) eligible_parties = list(dict.fromkeys(eligible_direktmandate + eligible_huerde)) return eligible_parties
0ec0c16f0e5fc5d149ff12dbf4baa4d13699986d
45,452
def get_slope(x, y): """Calculate slope by taking first and last values.""" return (y[-1]-y[0])/(x[-1]-x[0])
92f00bd246e27dae51552dbe546ae108abc17e40
45,453
def s3_get_bucket_versioning(s3_obj, bucketname, s3_client=None): """ Boto3 client based Get Bucket Versioning function Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket s3_client: Any s3 client resource Returns: dict : GetBucketVersioning response """ if s3_client: return s3_client.get_bucket_versioning(Bucket=bucketname) else: return s3_obj.s3_client.get_bucket_versioning(Bucket=bucketname)
58017d4a414929d06e4da4c8cac83e6a02ef9975
45,454
import pathlib import os def get_next_name(file_path: str) -> str: """ Get next available name to download file. Parameters ---------- file_path: str Absolute path of the file for which next available name to be generated. Returns ------- str Absolute path of the next available name for the file. """ posix_path = pathlib.Path(file_path) counter: int = 1 new_file_name: str = os.path.join("{0}", "{1}-copy{2}{3}") while os.path.isfile( new_file_name.format( posix_path.parent, posix_path.stem, counter, "".join(posix_path.suffixes), ) ): counter += 1 return new_file_name.format( posix_path.parent, posix_path.stem, counter, "".join(posix_path.suffixes), )
d662b86469bbef9c8af3eda8cc14404b98041589
45,455
def req_file(filename): """ We're using a requirements.txt file so that pyup.io can use this for security checks :param filename: :return str: """ with open(filename) as f: content = f.readlines() content = filter(lambda x: not x.startswith("#"), content) return [x.strip() for x in content]
ea4b9574b129ebd7aa6e87f3089f9be5da0e0d9c
45,456
def do_full_save(experiment_result): """This is a simple check to see if the final OOF ROC-AUC score is above 0.75. If it is, we return True; otherwise, we return False. As input, your do_full_save functions should expect an Experiment's result dictionary. This is actually the dictionary that gets saved as the Experiment's "description" file, so for more information on what's in there, look at any description file or see :attr:`hyperparameter_hunter.recorders.DescriptionRecorder.result` (the object passed to `do_full_save`)""" return experiment_result["final_evaluations"]["oof"]["roc_auc_score"] > 0.75
a81c0225c910eb88a75b949dc7f1146b3ec4d768
45,458
from typing import Union from typing import List def append_forward_slash_path( paths: Union[List[str], str] ) -> Union[List[str], str, None]: """ Returns the input string(s), in the same format as they were passed in, with a minimum of one forward slash at the end, given that no forward slash exists at the end. :param paths: one or more paths to add a forward slash to. :return: """ if paths is None: return None if isinstance(paths, str): if paths[-1] != "/": paths += "/" return paths else: output = [path + ("/" if path[-1] != "/" else "") for path in paths] return output
e5726067818674457bd5da1b39ba7e47dc91c4d5
45,459
def MK_BM_calc(item1, item2): """ Calculate Informedness (BM), Markedness (MK), and Individual classification success index (ICSI). :param item1: item1 in expression :type item1:float :param item2: item2 in expression :type item2:float :return: MK and BM as float """ try: result = item1 + item2 - 1 return result except TypeError: return "None"
0a24268c905a03ce904794ae72fbed057ad67c61
45,462
import copy def merge_config(base_config, new_config, in_place=False): """Recursively merges configurations. """ if base_config is None: return new_config if not in_place: base_config = copy.deepcopy(base_config) if new_config is None: return base_config # Merge a configuration file. for key, new_value in new_config.iteritems(): base_value = base_config.get(key) if isinstance(base_value, dict): assert isinstance(new_value, dict), \ "New value must be dict: {} - {}".format(key, new_value) # Recurse. value = merge_config(base_value, new_value, in_place=True) else: # Overwrite. value = new_value base_config[key] = value return base_config
9c26679c72559a82c46e99b61b1b5f8395258e9d
45,463
def div_growth_rateGm(curr_earn, cap_emp, dt): """ Calculates the growth rate of a dividend using the dividend growth rate valuation model where dividend is paid yearly. parameters: ----------- curr_earn = current earnings cap_emp = capital employed dt = current dividend """ roi = curr_earn/cap_emp b = (curr_earn - dt)/curr_earn g = (roi * b) * 100 return round(g, 4)
f81bb60fe4c0f903ad308d444c578e1447bbb598
45,464
import re def cigarToLen(cigar): """ Calculate sequence length from CIGAR string """ # Split "cigar" on capital letters span = re.split('[A-Z]', cigar) ops = re.split('[0-9]+', cigar) len = 0 del(span[-1]) del(ops[0]) for i, span in enumerate(span): if ops[i] in ["M", "I", "S"]: len += int(span) return len
fa7c2b0633a349cc3295519bffcff9965c6ae704
45,466
import torch def mask_out(tensor, start_ind, end_ind, value, dim=1): """ Set the elements before start_ind and after end_ind (both inclusive) to the value. """ if dim != 1: raise NotImplementedError batch_size, time = list(tensor.shape)[:2] # (oleg) This creates the indices every time, but doesn't seem to affect the speed a lot. inds = torch.arange(time, device=tensor.device, dtype=start_ind.dtype).expand(batch_size, -1) mask = (inds >= end_ind[:, None]) | (inds <= start_ind[:, None]) tensor[mask] = value return tensor, mask
1326220320679c32d728ce727d174e00065eaa0a
45,467
import os import json def _read_to_json(file: str) -> dict: """ read json file to json data """ file_path = os.path.join(os.getcwd(), file) print(file_path) if not os.path.exists(file_path): raise FileNotFoundError(f'file <{file}> not found') with open(file_path, 'r+', encoding='utf-8') as f: return json.load(f)
538d1d6a4eecf3bc9cf3ba6fbf28c16584a600d4
45,469
def getFileType(fileName): """ Helper fucntion to get the file type of the given file name """ # +1 for the index to ignore the dot "." fileExtension = fileName[fileName.rindex(".")+1:].lower() if fileExtension in ["mp4", "mov", "avi"]: return "video" elif fileExtension in ["jpg", "png", "jpeg", "gif"]: return "image" else: return "sound"
87976cfeca7d5b70ab44b59830dabe37abc4ba52
45,470
def interaction_strength(idx: int, mol_map: dict, acceptor_exp: float) -> float: """Calculate interaction strength for atom with index `idx`.""" acceptor_strength = mol_map[idx]["sa"] num_lp = mol_map[idx]["num_lp"] if num_lp != 0: return acceptor_strength * (num_lp ** acceptor_exp) return 0.0
382b69e56002a24691fdc9e8b6943eeeeee7293a
45,471
import math def project_z(r, x, y): """Project an x,y pair onto a sphere of radius r OR a hyperbolic sheet if we are away from the center of the sphere. """ d = math.sqrt(x*x + y*y) if (d < r * 0.70710678118654752440): # Inside sphere z = math.sqrt(r*r - d*d) else: # On hyperbola t = r / 1.41421356237309504880 z = t*t / d return z
07c284fe3f26655a940b0f4438cd1f328eb3db57
45,474
import re def parse_auth_header(data: str): """ Parse an auth header (without a prefix scheme) """ val = {} while True: m = re.match(r'([^\s,"=]*)\s*=\s*([^\s,"]+|"[^"\\]*(?:\\.[^"\\]*)*")\s*', data) if not m: break val[m.group(1)] = m.group(2).replace('"', '') data = data[m.end() :] # There must be a comma now or done if not data or data[0] != ",": break data = data[ 1: ].lstrip() return val, data
27e6590554ee1321146d81d9da0ee55cf8329f1c
45,475
import torch def rmv(A, b): """Tensorized matrix vector multiplication of rightmost dimensions.""" return torch.matmul(A, b.unsqueeze(-1)).squeeze(-1)
965a29255afd1cf9b102b49ab1b24cd4b3cbfd98
45,476
from json import JSONDecodeError, loads from typing import Any def decode_attrs(attrs: Any) -> Any: """Try to decode attributes as json if possible, otherwise return the attrs as they are Parameters ---------- attrs: Any an object containing attributes Returns ---------- Any either the attributes as json or the attributes as received """ try: attrs = loads(attrs) except (JSONDecodeError, TypeError): return attrs
e49f344769b75f3d37a0ce86d84ac55f681f8156
45,477
def calculate_score(hazard_list, dictionary): """This function will take the hazard codes from a list and grab the respectvie hazard scores from a dictionary """ score_list = [] for code in hazard_list: # some of the codes end with a colon from extarcting from jsons. Remove them here if present. if code.endswith(':'): # removes last string from item, which will be the colon. code = code[:-1] for i in dictionary['Code']: # loop through the dictionary if code == dictionary['Code'][i]: # if code is present in dictionary # append the hazard score to the score list score_list.append(dictionary['Hazard Score'][i]) return score_list
e0fe7d995ca263f47624929c84f1a87a506257ed
45,478
def get_all_ips(instance, network_client): """ Returns the private and public ip addresses of an Azure instances """ output = [] for interface in instance.network_profile.network_interfaces: if_name=" ".join(interface.id.split('/')[-1:]) rg="".join(interface.id.split('/')[4]) try: thing=network_client.network_interfaces.get(rg, if_name).ip_configurations for x in thing: private_ip=x.private_ip_address public_ip=None """print('\nifdata: ',rg,if_name,x.private_ip_address,x.public_ip_address)""" """ Have to extract public IP from public IP class structure...if present """ if x.public_ip_address is not None: public_ip_id=x.public_ip_address.id public_ip_name=" ".join(public_ip_id.split('/')[-1:]) try: public_ip_return = network_client.public_ip_addresses.get(rg, public_ip_name) public_ip=public_ip_return.ip_address except: """ Ignore the exception. return no additional values """ pass temp_pair = (private_ip, public_ip) output.append(temp_pair) except Exception as e: """ Ignore the exception. return no additional values """ pass return output
80b5880586bae4d4238d5b194a74e235cd07a14e
45,479
def verify(self, case='', level='', **kwargs): """Enter the verification run mode. .. note:: This command is only valid at the ``/BEGIN`` level, obtained with ``mapdl.finish()``. Parameters ---------- case : str, optional Optional title of the verification manual file. Also accepts ``'OFF'`` to disable the verification run mode. level : int, optional Verification level ranging from 1 to 6 defaulting to 4. Returns -------- Examples -------- Enter the verification routine with the default option. >>> mapdl.finish() >>> mapdl.verify('VM1') '*** VERIFICATION RUN - CASE VM1 *** OPTION= 4' """ return self.run(f'/VERIFY,{case},{level}', **kwargs)
0b25f240a6bc3bb551613727c32524ebc663f5ee
45,480
def checksum(input): """ Consider each pair: 11, 00, 10, 11, 01, 00. These are same, same, different, same, different, same, producing 110101. The resulting string has length 6, which is even, so we repeat the process. The pairs are 11 (same), 01 (different), 01 (different). This produces the checksum 100, which has an odd length, so we stop. """ calc = [1 if a == b else 0 for a, b in zip(input[::2], input[1::2])] if len(calc) % 2 == 0: calc = checksum(calc) return calc
4b22e3cac0434cd399d4f1bcfec82fff1c059b43
45,481
def get_expected_first_bin(begin, freq): """Get the first bin of a given frequency based on the begin ts of a timeseries query.""" # Determine the first bin in the series based on the begin # timestamp in the timeseries request. # # Bin count math will round down to last bin but timerange queries will # return the next bin. That is, given a 30 second bin, a begin # timestamp of 15 seconds past the minute will yield a bin calc # of on the minute, but but the time range query will return # 30 seconds past the minute as the first result. # # A begin timestamp falling directly on a bin will return # that bin. bin = (begin/freq)*freq if bin < begin: return bin+freq elif bin == begin: return bin else: # Shouldn't happen raise RuntimeError
2bdf681bcb83fb90036748a86fbd1cc2e84b9941
45,482
import os def get_test_category(test_file): """Get a category for the specified test using its path and name. Args: test_file (str): the test python file Returns: str: concatenation of the test path and base filename joined by dashes """ file_parts = os.path.split(test_file) return "-".join( [os.path.splitext(os.path.basename(part))[0] for part in file_parts])
efe9bde27faf791fcb658cc8b58724ecf4addb53
45,485
import argparse import re def contains_any_copyright(comments: list, args: argparse.Namespace) -> bool: """ Return True if any comment contain the word "copyright" """ return any( comment.line_number() <= args.max_lines and re.search(r'copyright', comment.text(), re.IGNORECASE) for comment in comments )
954235764556414587cc544d481d022931f00e69
45,486
import unicodedata import re def _key_from_url(url: str) -> str: """ Convert a URL str to one valid for use as a file name or dict key. URL Protocols are removed entirely. The returned string will have characters in the set [a-zA-Z.-_]. Parameters ---------- url : str A URL string Returns ------- str A filename-safe string Example ------- >>> url = _key_from_url('http://test.alyx.internationalbrainlab.org/') 'test.alyx.internationalbrainlab.org' """ url = unicodedata.normalize('NFKC', url) # Ensure ASCII url = re.sub('^https?://', '', url).strip('/') # Remove protocol and trialing slashes url = re.sub(r'[^.\w\s-]', '_', url.lower()) # Convert non word chars to underscore return re.sub(r'[-\s]+', '-', url)
3e9f85fe38e68966f00f8d2a1fb0b3752d9efb75
45,487
def mix_wave(src, dst): """Mix two wave body into one.""" if len(src) > len(dst): # output should be longer dst, src = src, dst for i, sv in enumerate(src): dv = dst[i] if sv < 128 and dv < 128: dst[i] = int(sv * dv / 128) else: dst[i] = int(2 * (sv + dv) - sv * dv / 128 - 256) return dst
174dabee1122e6a207fa5e72c5634c85b03a6378
45,488
def calculate_slasher_snapshot_difference(client, snapshot_old, snapshot_new): """ Calculates the difference between two slasher snapshots Parameters ---------- client : ``Client`` The respective client. snapshot_old : `None` or `tuple` of (`dict` of (`int`, `list` of `tuple` \ (`bool`, ``SlasherApplicationCommand``)) items, `None` or `set` of ``ComponentCommand``) An old snapshot taken. snapshot_new : `None` or `tuple` of (`dict` of (`int`, `list` of `tuple` \ (`bool`, ``SlasherApplicationCommand``)) items, `None` or `set` of ``ComponentCommand``) A new snapshot. Returns ------- snapshot_difference : `None` or `tuple` (`tuple` (`set` of ``SlasherApplicationCommand``, `set` of \ ``SlasherApplicationCommand``), `tuple` (`None` or `set` of ``ComponentCommand``, `None` or \ `set` of ``ComponentCommand``) The difference between the two snapshots. """ if (snapshot_old is None) and (snapshot_new is None): return None if snapshot_old is None: application_command_snapshot_old = None component_command_snapshot_old = None else: application_command_snapshot_old, component_command_snapshot_old = snapshot_old if snapshot_new is None: application_command_snapshot_new = None component_command_snapshot_new = None else: application_command_snapshot_new, component_command_snapshot_new = snapshot_new if (application_command_snapshot_old is not None) or (application_command_snapshot_new is not None): added_application_commands = [] removed_application_commands = [] guild_ids = set() if (application_command_snapshot_old is not None): guild_ids.update(application_command_snapshot_old.keys()) if (application_command_snapshot_new is not None): guild_ids.update(application_command_snapshot_new.keys()) for guild_id in guild_ids: local_added_application_commands = [] local_removed_application_commands = [] if (application_command_snapshot_new is not None): try: new_changes = application_command_snapshot_new[guild_id] except KeyError: pass else: for added, command in new_changes: if added: local_added_application_commands.append(command) else: local_removed_application_commands.remove(command) if (application_command_snapshot_old is not None): try: old_changes = application_command_snapshot_old[guild_id] except KeyError: pass else: for added, command in old_changes: if added: try: local_added_application_commands.remove(command) except ValueError: local_removed_application_commands.append(command) else: try: local_removed_application_commands.remove(command) except ValueError: local_added_application_commands.append(command) added_application_commands.extend(local_added_application_commands) removed_application_commands.extend(local_removed_application_commands) if (not added_application_commands): added_application_commands = None if (not removed_application_commands): removed_application_commands = None if (added_application_commands is None) and (removed_application_commands is None): application_command_difference = None else: if client.running and client.application.id: slasher = getattr(client, 'slasher', None) if (slasher is not None): slasher.sync() application_command_difference = added_application_commands, removed_application_commands else: application_command_difference = None if (component_command_snapshot_old is None) or (component_command_snapshot_new is None): removed_component_commands = component_command_snapshot_old added_component_commands = component_command_snapshot_new else: removed_component_commands = component_command_snapshot_old-component_command_snapshot_new added_component_commands = component_command_snapshot_new-component_command_snapshot_old if (not removed_component_commands): removed_component_commands = None if (not added_component_commands): added_component_commands = None if (added_component_commands is None) and (removed_component_commands is None): component_command_difference = None else: component_command_difference = (removed_component_commands, added_component_commands) if (application_command_difference is None) and (component_command_difference is None): snapshot_difference = None else: snapshot_difference = (application_command_difference, component_command_difference) return snapshot_difference
8e16f3943bcf7651496a7e6e6a2c2e7f59c6fb37
45,490
import argparse def parse_args() -> str: """Returns Memwatch log filename.""" # LOCAL VARIABLES parser = argparse.ArgumentParser() # Parser args = None # Arguments log_name = '' # Memwatch log filename # DO IT parser.add_argument('-l', '--log', help='Memwatch log filename', default='memwatch.log') args = parser.parse_args() try: log_name = args.log except AttributeError: pass # DONE return log_name
7c5c6d695626fa5c1607b3c38f6626b12754afe9
45,491
def render_item(type_, obj, autogen_context): """Apply custom rendering for selected items.""" autogen_context.imports.add("import sqlalchemy_utils") return False
757448cfda4cee9d144d071686ef47b95b6fdc32
45,492
def generate_link(resources): """ Generates a link in the CoRE Link Format (RFC6690). :param resources: Array of resources that should translated into links. Resources are dict, containing a path property and a parameters property. Path is a string and parameters is a dict, containing the parameters and their values. """ links = "" for i, resource in enumerate(resources): link = "<" + resource["path"] + ">" if "parameters" in resource: for parameter in resource["parameters"]: link += ";" + str(parameter) + "=" + str(resource["parameters"][parameter]) links += link if i != len(resources) - 1: links += "," return links
add8c2a96e1a575b0f51a145761f4bae639f7dae
45,493
def _is_paired(fastq, fastq2, single_end): """Determines the workflow based on file inputs. Args: """ if fastq and fastq2: paired_end = True interleaved = False elif single_end: paired_end = False interleaved = False else: paired_end = True interleaved = True return paired_end, interleaved
44526810e9c41294f4529baa846eced362b1a2f8
45,494
import re def multi_delimiter_split(string, delimiters='', split_whitespace=True, remove_whitespace=True): """ 多个分隔符分割字符串, delimiters = ';,' split_whitespace 是否加上按照空白分割 默认是按照空白分割 remove_whitespace 是否移除分隔符两边多余的空白字符 默认移除 """ expr_one = '\s' if split_whitespace else '' expr = '[{0}{1}]'.format(delimiters, expr_one) if remove_whitespace: expr = '\s*' + expr + '\s*' res = re.split(expr, string) return res
6178b25b03e3200af70efff5da74c8f45bb58b87
45,495
import numpy from operator import add def getMSE(dtModel, data): """ Return mean squared error (MSE) of DecisionTreeModel on the given RDD[LabeledPoint]. """ seqOp = (lambda acc, x: acc + numpy.square(x[0] - x[1])) predictions = dtModel.predict(data.map(lambda x: x.features)) truth = data.map(lambda p: p.label) trainMSE = predictions.zip(truth).aggregate(0, seqOp, add) if data.count() == 0: return 0 return trainMSE / (0.0 + data.count())
3f10c24f640cbf4321edfba3ee6b351e74775874
45,496
def my_range (start, stop, step, include_start = True, include_end = True): """ Generate a range of number but allows to force the inclusion of the start and the end the Cowells but it takes more time to be calculated. Even if the step is too big, the start and end can be forced Args: start : end : step : include_start : include_end Returns : A list with the interval """ result = [] i = 0 while start + i * step <= stop: result.append(start + i * step) i += 1 if include_end : if result[-1] != stop : result.append(stop) else : if result[-1] == stop : result = result[:-1] if not include_start : result = result[1:] return result
f99107c488d58bb1d3c964bef496be9edf784bc0
45,498
import re import sys def check_system_build(config, download_os): """Check system build and return release data.""" if 'system_build' not in config.keys(): release = download_os['releases'][0] print(f"Using first available {download_os['name']} build: {release['label']}") return release # Get system build from config system_build = config['system_build'] # Find a release that matches the build for release in download_os['releases']: if re.match(system_build, release['label'], re.I): return release # Make sure release is valid builds = "\n".join([f" + {r['label']}" for r in download_os['releases']]) sys.exit(f"Value for 'system_build' is not valid, must regex match one of:\n{builds}")
92396f8117478f22d6682552d33d7129ce68abeb
45,500
from typing import Tuple def summarize_location(location: Tuple[str, str, str, str, str, str]) -> Tuple[str, str, str]: """ Get manuscript location and summarize for usage in citavi Args: location (tuple): metadata of manuscript's location Returns: tuple: summary of metadata of manuscript's location """ location_list = list(location) for i in range(len(location_list)): if not location_list[i]: location_list[i] = "" continue location_list[i] = location_list[i] + ", " try: settlement = location_list[1] + location_list[0] settlement = settlement[:-2] except: settlement = "unknown" try: archive = location_list[2] + location_list[3] + location_list[4] archive = archive[:-2] except: archive = "unknown" signature = location_list[5] signature = signature[:-2] return settlement, archive, signature
fa6ed40a4ddc4510ea919773d9b68458c5edc738
45,501
def sumIntegerSeries(start = 1, stop = 100, difference = 1): """ This formula is taken from pg 78/79 of "The Art of the Infinite" by Robert and Ellen Kaplan. It's based on an ancient trick for summing a series of numbers without having to go through all the middle-work For example if start = 1, stop = 5, and difference = 3 sumint = 1 + 4 + 7 + 13 = 35 There's also the famous "schoolboy in class exercise": Sum 1 to 100 start = 1, stop = 100, difference = 1 sumint = 1 + 2 + 3 + 4 + .... + 100 = 5050 """ sumint = (stop * (2 * start + (stop - 1) * difference)/2) return sumint
635c6e1a9a327d8e4a84482e23d6f48cbe52c872
45,502
def ldd_to_ddl(ldd): """Args: list of dicts of dicts Returns: dict of dicts of lists """ return { placer_name: { metric_name: [mval[placer_name][metric_name] for mval in ldd] for metric_name in ldd[0][placer_name] } for placer_name in ldd[0].keys() }
44dae6ade09b50e057c20ccfcc4e4136253f91fc
45,503
def join_audio(audio1, audio2): """ >>> join_audio([1, 4], [2, 5, 3, 6]) [1, 4, 2, 5, 3, 6] """ return audio1 + audio2
86146ac415bba96b57dbea7a1fb69f1ecbc12253
45,504
def route_format(t_route_dest): """Shorten destinations to save space.""" # add more route name formatting to your choice to save dsiplay space # If there's a substitution in the dictionary, use it, otherwise return the original return {"TO GRANVILLE": "GRANVILLE", "COMM'L-BDWAY STN": "COM-BW STN"}.get(t_route_dest, t_route_dest)
17af8f25dc17c82292a23255676d4584bf44b89e
45,506
def reduce_commutators(element, commutators): """ >>> reduce_commutators((0, 1), {(0, 1): [(2, 1)], (0, 2): [], (1, 2): [] }) ((2, 1, 0), True) >>> reduce_commutators((1, 0, 1), {(0, 1): [(2, 1)], (0, 2): [], (1, 2): [] }) ((2, 1, 1, 0), True) >>> reduce_commutators((1, 2, 1, 0), {(0, 1): [(2, 1)], (0, 2): [], (1, 2): [] }) ((2, 1, 1, 0), True) """ elem = list(element) changed = False for a in sorted(list(set(element))): count = 0 n = [] for i in elem: if i == a: count = count + 1 else: if count > 0: if a < i: for j in range(0, count-1): n.append(a) changed = True k = (a, i) for g, c in commutators[k]: for f in range(0, c): n.append(g) n.append(i) count = 1 else: for j in range(0, count): n.append(a) n.append(i) count = 0 else: n.append(i) count = 0 for j in range(0, count): n.append(a) elem = n return tuple(elem), changed
b60ddf0bfa8ce2fb8de165d423b839f880f7ec71
45,508
import pickle def load_pkl(file): """Loads data from pickle Parameters ---------- file : str Returns ------- data """ f = open(file, 'rb') data = pickle.load(f) f.close() return data
ff7a9cefa4231c9dee6030b8fd14b72e55a11548
45,510
def get_rce_bpd(rce): """ Gets the BPD pointed-to by the given RemoteCustomEvent """ string_build = [] # This is actually a bit weird 'cause the output of these vars uses # bracket-based indexes, as if they were top-level vars, but they're # not. So we're doing some special processing on 'em pcns = {} for (name, value) in rce['ProviderDefinitionPathName'].items(): if value and value != '': if name.startswith('PathComponentNames['): pcns[int(name[19:-1])] = value.strip('"') for index in sorted(pcns.keys()): element = pcns[index] if len(string_build) == 0: string_build.append(element) elif (element.startswith('AIBehaviorProviderDefinition_') or element.startswith('BehaviorProviderDefinition_')): string_build.append(':{}'.format(element)) else: string_build.append('.{}'.format(element)) return ''.join(string_build)
924290ffedd98bbaea2926a23b04124fad8027e8
45,511
def encode_pos(i, j): """Encodes a pair (i, j) as a scalar position on the board.""" return 3 * i + j
583d2e8370edc5801760f1c59c0c3aadad61876f
45,512
def division(a, b): """Функция деления""" try: return a / b except ZeroDivisionError as err: message = f'Получили ошибку деления на ноль: {err}' raise ZeroDivisionError(message)
cf8e3e9b18a02e2524772f261abafc18040a046f
45,513
import random def key_gen(key_size, max_flips_per_node, num_high_and_med): """ num_high_and_med: the total number of high- and medium-degree vertices returns a lexicographically sorted list of 2-tuples, with the indices in the tuples in increasing order """ assert (max_flips_per_node == 1 and num_high_and_med >= key_size*2) or \ num_high_and_med*max_flips_per_node > key_size*2 # if max_flips_per_node > 1 and num_high_and_med*max_flips_per_node == key_size*2, # the function could hang depending on the sampling order #sample pairs one by one until we have key_size of them key = set() incidencies = dict() while len(key) < key_size: ind1 = random.randint(0, num_high_and_med-1) ind2 = random.randint(0, num_high_and_med-1) #fix index order to avoid duplicates if ind2 < ind1: ind1, ind2 = ind2, ind1 valid_pair = True if ind1 == ind2: valid_pair = False if (ind1, ind2) in key: valid_pair = False if ind1 in incidencies and incidencies[ind1] == max_flips_per_node: valid_pair = False if ind2 in incidencies and incidencies[ind2] == max_flips_per_node: valid_pair = False if valid_pair: key.add((ind1, ind2)) for ind in ind1, ind2: if ind in incidencies: incidencies[ind] += 1 else: incidencies[ind] = 1 res = list(key) res.sort() return res
d81ac36b2658d2cfdd9e0da3ab39e97d60a3503f
45,514
import jinja2 import os def generate_content(input_dict): """ Use jinja2 template to generate connector content """ env = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(__file__)), trim_blocks=True) config_template = env.get_template('connector.template') export = config_template.render(input_dict) return export
8f37880b583785183de7be05c6019a1b7e346c0f
45,515
import json def save(model, prefix): """save model for future inspection and continuous training """ model_file = prefix + ".json" weight_file = prefix + ".h5" json.dump(model.to_json(), open(model_file, "w")) model.save_weights(weight_file) print("Model saved.") return model
fd7d3be1f6b8760ae943bead50697ce3e3a330f7
45,516
def peek(env): """Peak at the env's board""" x, y, board, moves = env.reset() env.close() m, n = board.shape return m, n, board, moves
90507357bf05bd5c0ed8c4eeb99033937b0de460
45,517
def switch(*pairs): """Helper function for cond-logic in a Python lambda expression. Part of a poor-man's functional programming suite used to define several pfainspector commands as one-liners. :type pairs: callables :param pairs: sequence of predicate1, consequent1, predicate2, consequent2, ..., alternate; the predicates will each be called in turn until one returns ``True``, and then the corresponding consequent will be called *or* the alternate will be called if none of the predicates return ``True`` """ if len(pairs) % 2 != 1 or len(pairs) < 3: raise TypeError for predicate, consequent in zip(pairs[:-1][::2], pairs[:-1][1::2]): if callable(predicate): predicate = predicate() if predicate: if callable(consequent): consequent = consequent() return consequent alterante = pairs[-1] if callable(alterante): alterante = alterante() return alterante
1ff3c23e22e316a0959a46da8dda6bfdbe274e86
45,518
import os from pathlib import Path def create_numbered_dirs(parent_dir: str, start: int, end: int): """ Creates folders from start to end (both inclusive) at the specified parent directory (absolute path). If a directory to add already exists, it skips adding it and moves on to the next directory to add """ new_dirs = [] for i in range(start, end + 1): new_dir = os.path.join(parent_dir, str(i)) try: Path(new_dir).mkdir() new_dirs.append(new_dir) print(f'Added directory {new_dir} ...') except FileExistsError: print(f'Cannot add directory {new_dir} as it already exists. Skipping...') return new_dirs
7f50201f8bad89f50e19b4ff349fb314739cda39
45,520
import os import tempfile def build_path(selected,selected_dir,default_dir,outdir): """ selected should be True or False If selected==False (or None), return False If selected is True: if selected_dir is set: return it directly if it is an absolute path, or append it to outdir if it is a relative path if selected dir is not set (or is "None"/"none"): if it is "False" or "false" or False, return the path to a tempdir if it is None, return the supplied default (appended to outdir) Always returns a tuple. The first element is the directory path, or False. The second element is None, unless a temporary directory has been produced, in which case it also contains the directory path. """ if not selected: return(False,None) if isinstance(selected_dir, str): #if selected_dir, use it as the basis for the return value if selected_dir.lower() == 'false': #unless its a string version of False, in which case set to False selected_dir = False elif selected_dir.lower() == 'none': #or a string version of None, in which case set to None selected_dir = None else: if os.path.isabs(selected_dir): return(selected_dir,None) else: return(os.path.join(outdir,selected_dir),None) if selected_dir is None: #is selected_dir is None, return the default return(os.path.join(outdir,default_dir),None) elif selected_dir is False: #is selected_dir is False, return the path to a temporary directory td = tempfile.mkdtemp() return(td,td) else: raise ValueError('selected_dir must be False, None, or a string')
1f16d17ee6808c19a68a007b78ed9851d9fb6539
45,522
def pct_to_value(data, d_pct): """ Function takes dictionary with base value, and dictionary with percentage and converts percentage to value. """ if not data or not d_pct: return data out_map = {} for _k in data: if _k not in d_pct: continue out_map[_k] = (float(data[_k])/100.0) * float(d_pct[_k]) return out_map
3c5b1f75f484b1f767bc3acf6cce5bac4b136608
45,523
def arr_to_d(dic,arr,K): """ Append values to a dictionary. """ ln=len(K) for j in range(ln): key=K[j] dic[key]=arr[j] return dic
720d4a77aa73358309b24e989c911391dc058093
45,526
def concat_string(target, msg=[], delimiter="", last=""): """ Concatenate a series of strings to the end of the target Delimiter is optional filler between items :param target: :param msg: :return: target """ result = target for m in msg[:-1]: result = result + m + delimiter result = result + msg[-1] + last return result
00a175d167a82427244ab805269b32677676d292
45,527
def parse_commenttree(commentlist, comment): """获得当前评论子评论的列表 用法: {% parse_commenttree article_comments comment as childcomments %} """ datas = [] def parse(c): childs = commentlist.filter(parent_comment=c, is_enable=True) for child in childs: datas.append(child) parse(child) parse(comment) return datas
b935f6ac3643637d314e566192a91fcdcf7e3d8a
45,528
def search_trie(trie, token, token_i=0): """ Search the given character-trie for paths that match the `token` string. """ if "*" in trie: return trie["*"] if "$" in trie and token_i == len(token): return trie["$"] if token_i < len(token): char = token[token_i] if char in trie: return search_trie(trie[char], token, token_i + 1) return []
46da1627299c2318f8adad59515c8d5fad4e821d
45,530
def is_image_file(s): """Checks to see if the string starts with 'img:'""" return s.startswith('img:')
68081021527dbff9d2da7235c9bf454d54a7fd68
45,531
def regexGetGroups(matches, groupsMapping: dict): """ :param matches: :param groupsMapping: {groupNumber: groupName} :return: """ """ Match 2 Full match | | @var2 .hword 0xABCD:10 Group 1. | | @var2 Group 2. | label | @var2 Group 3. | | .hword Group 4. | dataDir | .hword Group 5. | args | 0xABCD:10 Group 6. | | """ # {'groupName': string} results_dict = {} for groupNum in range(1, len(matches.groups()) + 1): if groupNum in groupsMapping: results_dict[groupsMapping.get(groupNum)] = matches.group(groupNum) return results_dict
83416c5a14fb75623ac35032a6266ac688091c06
45,532
def _node_matches(parse_tag, ppdb_tag): """ Match tags between Stanford parse and PPDB """ # print "Match? \t", parse_tag, ppdb_tag if parse_tag=="ST" and ppdb_tag.startswith('S'): return True # don't worry about final full stop if parse_tag.startswith(ppdb_tag): return True # mainly verbs: ignore verb info added to tag return parse_tag == ppdb_tag
770e1bd5c1ed899016055e6b15d68d6960d8a803
45,533
def pureDependency(dependency:str) -> str: """ Get the name of package Parameters ---------- dependency : str package Returns ------- str a name of package without the version >>> pureDependency('package==1.2.3') 'package' """ dependency = dependency.split("==")[0] dependency = dependency.split(">")[0] dependency = dependency.split("<")[0] dependency = dependency.split("~=")[0] dependency = dependency.split("=")[0] return dependency
1891c0291b84e10ff35369c0cc740eaa6ab9d4ff
45,534
def multiply_numbers(num1, numb2): """ Multiplies the numbers""" return num1 * numb2
041afaa677d6cc827abff9440101a13e2e987304
45,536
from pathlib import Path import sys def GetScriptDir(): """ Get the directory where the PinPlay Python scripts are located. @return script directory """ return f'{Path(sys.argv[0]).parent.resolve(strict=True)}'
a35ef1b656daf5d753f3ce75b5ed01b4f4bd2610
45,538
import torch def get_random_tensor(size, dtype, use_cuda): """Returns a random tensor of given type and size Args: size (int): Tensor size (number of elements) dtype (:obj:`torch.dtype`): One of `torch.float16` and `torch.flaot32` use_cuda (bool): Return CUDA tensor Returns: """ tensor = torch.rand(size).to(dtype=dtype) if use_cuda: tensor = tensor.cuda() return tensor
da17a8e33b9ccf9a5bf9bcc86104a1577a8f38a9
45,540
import os import re def get_special_paths(dirname): """Given a dirname, returns a list of all its special files.""" dirs_list = os.listdir(dirname) special_list = [] for item in dirs_list: is_special = re.search(r'_{2}\w+_{2}', item) if is_special: special_item_path = os.path.abspath(os.path.join(dirname, item)) special_list.append(special_item_path) return special_list
9f5fb35da24ed895662b235249b2df36c7d68b80
45,541
import torch def sub2ind(shape, rows, cols): """ A PyTorch implementation of MATLAB's "sub2ind" function Parameters ---------- shape : torch.Size | list | tuple shape of the 2D matrix rows : torch.Tensor (n,) row subscripts cols : torch.Tensor (n,) column subscripts Returns ------- index : torch.Tensor (n,) linear indices """ # checks assert isinstance(shape, tuple) or isinstance(shape, list) assert isinstance(rows, torch.Tensor) and len(rows.shape) == 1 assert isinstance(cols, torch.Tensor) and len(cols.shape) == 1 assert len(rows) == len(cols) assert torch.all(rows < shape[0]) and torch.all(cols < shape[1]) if not len(shape) == 2: raise NotImplementedError('only implemented for 2D case.') # compute inds ind_mat = torch.arange(shape[0]*shape[1]).view(shape) index = ind_mat[rows.long(), cols.long()] return index
8b89b58824b1c80327082afb74b4486816345d62
45,542
def get_variable_config(var_config, var): """ Retrieve configuration info for variable var as defined in main configuration file, """ vdict = { 'var names': var_config['var names'], 'input resolution': var_config['freq'], 'units': var_config['units'], 'scale factor': var_config['scale factor'], 'obs scale factor': var_config['obs scale factor'] if 'obs scale factor' in var_config else None, 'deacc': var_config['accumulated'], 'regrid': var_config['regrid to'] if 'regrid to' in var_config else None, 'rgr method': var_config['regrid method'] if 'regrid method' in var_config else None, } return vdict
c7a2a7136584402460fc97f34f3c37f21475ab98
45,543
def parsiraj_strelice_BKG(strelice): """Čitanje gramatike zapisane u standardnom obliku pomoću strelica. Svaki red je oblika varijabla -> ds1 | ds2 | ... (moguće desne strane). ε se može i ne mora pisati. Prvi red s lijeve strane ima početnu varijablu. Znakovi u svakoj desnoj strani moraju biti razdvojeni razmacima.""" varijable, simboli, pravila, početna = set(), set(), set(), None for linija in strelice.strip().splitlines(): linija = linija.replace('->', ' -> ', 1) varijabla, strelica, ostalo = linija.split(None, 2) varijable.add(varijabla) if početna is None: početna = varijabla for zamjena in ostalo.split('|'): zamjene = tuple(zamjena.split()) if zamjene == ('ε',): zamjene = () pravila.add((varijabla,) + zamjene) simboli.update(zamjene) return varijable, simboli - varijable, pravila, početna
593462a0d55419465a05f1cc98f4bb4f5005e043
45,545
import numpy def update_hull(hull,newx,newhx,newhpx,domain,isDomainFinite): """update_hull: update the hull with a new function evaluation Input: hull - the current hull (see setup_hull for a definition) newx - a new abcissa newhx - h(newx) newhpx - hp(newx) domain - [.,.] upper and lower limit to the domain isDomainFinite - [.,.] is there a lower/upper limit to the domain? Output: newhull History: 2009-05-21 - Written - Bovy (NYU) """ #BOVY: Perhaps add a check that newx is sufficiently far from any existing point #Find where newx fits in with the other xs if newx > hull[1][-1]: newxs= numpy.append(hull[1],newx) newhxs= numpy.append(hull[2],newhx) newhpxs= numpy.append(hull[3],newhpx) #new z newz= ( newhx - hull[2][-1] - newx*newhpx + hull[1][-1]*hull[3][-1])/( hull[3][-1] - newhpx) newzs= numpy.append(hull[4],newz) #New hu newhu= hull[3][-1]*(newz-hull[1][-1]) + hull[2][-1] newhus= numpy.append(hull[6],newhu) else: indx= 0 while newx > hull[1][indx]: indx=indx+1 newxs= numpy.insert(hull[1],indx,newx) newhxs= numpy.insert(hull[2],indx,newhx) newhpxs= numpy.insert(hull[3],indx,newhpx) #Replace old z with new zs if newx < hull[1][0]: newz= (hull[2][0]-newhx-hull[1][0]*hull[3][0]+newx*newhpx)/(newhpx-hull[3][0]) newzs= numpy.insert(hull[4],0,newz) #Also add the new hu newhu= newhpx*(newz-newx)+newhx newhus= numpy.insert(hull[6],0,newhu) else: newz1= (newhx-hull[2][indx-1] - newx*newhpx+hull[1][indx-1]*hull[3][indx-1])/(hull[3][indx-1]-newhpx) newz2= (hull[2][indx]-newhx - hull[1][indx]*hull[3][indx]+newx*newhpx)/(newhpx-hull[3][indx]) #Insert newz1 and replace z_old newzs= numpy.insert(hull[4],indx-1,newz1) newzs[indx]= newz2 #Update the hus newhu1= hull[3][indx-1]*(newz1-hull[1][indx-1])+hull[2][indx-1] newhu2= newhpx*(newz2-newx)+newhx newhus= numpy.insert(hull[6],indx-1,newhu1) newhus[indx]= newhu2 #Recalculate the cumulative sum nx= len(newxs) newscum= numpy.zeros(nx-1) if isDomainFinite[0]: newscum[0]= 1./newhpxs[0]*(numpy.exp(newhus[0])-numpy.exp( newhpxs[0]*(domain[0]-newxs[0])+newhxs[0])) else: newscum[0]= 1./newhpxs[0]*numpy.exp(newhus[0]) if nx > 2: for jj in range(nx-2): if newhpxs[jj+1] == 0.: newscum[jj+1]= (newzs[jj+1]-newzs[jj])*numpy.exp(newhxs[jj+1]) else: newscum[jj+1]=1./newhpxs[jj+1]*(numpy.exp(newhus[jj+1])-numpy.exp(newhus[jj])) if isDomainFinite[1]: newcu=1./newhpxs[nx-1]*(numpy.exp(newhpxs[nx-1]*( domain[1]-newxs[nx-1])+newhxs[nx-1]) - numpy.exp(newhus[nx-2])) else: newcu=- 1./newhpxs[nx-1]*numpy.exp(newhus[nx-2]) newcu= newcu+numpy.sum(newscum) newscum= numpy.cumsum(newscum)/newcu newhull=[] newhull.append(newcu) newhull.append(newxs) newhull.append(newhxs) newhull.append(newhpxs) newhull.append(newzs) newhull.append(newscum) newhull.append(newhus) return newhull
5c40150731ead1abe0e551cc4c924ec61bcb689b
45,546
def check_overlap(bbox1, bbox2): """ Checks if 2 boxes are overlapping. Also works for 2D tuples. Args: bbox1: [x1, y1, x2, y2] or [z1, z2] bbox2: [x1, y1, x2, y2] or [z1, z2] Returns: bool """ if bbox1[0] > bbox2[2] or bbox2[0] > bbox1[2]: return False if len(bbox1) > 2: if bbox1[1] > bbox2[3] or bbox2[1] > bbox1[3]: return False return True
2f39989661d421327b4a82da6e9b2fa4ae550575
45,547
def _parse_force_block(lines): """ Parse the block of total forces from the OUTCAR file :param lines: A list of lines containing lines including the TOTAL-FORCE block :returns: A tuple of position and forces """ forces = [] positions = [] istart = len(lines) for idx, line in enumerate(lines): if 'TOTAL-FORCE (eV/Angst)' in line: istart = idx elif idx > istart + 1: if not line.startswith(' -----'): # Still in the block values = list(map(float, line.split())) positions.append(values[:3]) forces.append(values[3:]) else: # Reached the end of the block break return positions, forces
37d9e488097749d4617364e23b296acee1d9bca5
45,548
def use_atomics(loopy_opts): """ Convenience method to detect whether atomic should be for double precision floating point operations. Useful in that we need to apply atomic modifiers to some instructions, but _not_ the sequential specializer Parameters ---------- loopy_opts: :class:`loopy_utils.loopy_opts` The loopy options used to create this kernel. Returns ------- use_atomics: bool Whether an atomic specializer would be returned by :meth:`get_deep_specializer` """ return loopy_opts.depth and loopy_opts.use_atomic_doubles
6121904e2b5c4d5fb227b1102e0434e73105b64a
45,550
import ast def filter_block(node_list): """ Remove no-op code (``pass``), or any code after an unconditional jump (``return``, ``break``, ``continue``, ``raise``). """ if len(node_list) == 1: return node_list new_list = [] for node in node_list: if type(node) == ast.Pass: continue new_list.append(node) if type(node) in (ast.Return, ast.Break, ast.Continue, ast.Raise): break if len(new_list) == len(node_list): return node_list else: return new_list
b88d3e4966e162d3e23e56e622ff47c63165b7e6
45,551
def getownattr(cls, attrib_name): """ Return the value of `cls.<attrib_name>` if it is defined in the class (and not inherited). If the attribute is not present or is inherited, an `AttributeError` is raised. >>> class A(object): ... a = 1 >>> >>> class B(A): ... pass >>> >>> getownattr(A, 'a') 1 >>> getownattr(A, 'unknown') Traceback (most recent call last): ... AttributeError: type object 'A' has no attribute 'unknown' >>> getownattr(B, 'a') Traceback (most recent call last): ... AttributeError: type object 'B' has no directly defined attribute 'a' """ attr = getattr(cls, attrib_name) for base_cls in cls.__mro__[1:]: a = getattr(base_cls, attrib_name, None) if attr is a: raise AttributeError("type object %r has no directly defined attribute %r" % (cls.__name__, attrib_name)) return attr
b59acbba4f75492fe52562443b7ca679691e7e10
45,552
def _raw_to_int(raw_data): """Converting list of raw hex values as strings to integers.""" return [int(x, 16) for x in raw_data]
e8ae4784e142bcfa3ba8d7b013871986a1b5173a
45,553
from pathlib import Path import json def load_json(filepath: Path) -> dict: """load json file and return dict. Args: filepath (Path): filepath to json file. Returns: dict: dict loaded from json file. """ with open(filepath, "r") as f: obj = json.load(f) return obj
5cc66b27a6335e29a540b98b3f29ed79cbbb7777
45,555
def bdev_rbd_register_cluster(client, name, user=None, config_param=None, config_file=None, key_file=None): """Create a Rados Cluster object of the Ceph RBD backend. Args: name: name of Rados Cluster user: Ceph user name (optional) config_param: map of config keys to values (optional) config_file: file path of Ceph configuration file (optional) key_file: file path of Ceph key file (optional) Returns: Name of registered Rados Cluster object. """ params = {'name': name} if user is not None: params['user_id'] = user if config_param is not None: params['config_param'] = config_param if config_file is not None: params['config_file'] = config_file if key_file is not None: params['key_file'] = key_file return client.call('bdev_rbd_register_cluster', params)
82c43cb070298bd983c9bf74cccfff6ddfeddd31
45,557
import torch def str_dtype_to_torch_dtype(dtype: str) -> torch.dtype: """Converts a string representation of a dtype to the corresponding PyTorch dtype.""" if dtype == "int32": return torch.int32 elif dtype == "int64": return torch.int64 elif dtype == "float32": return torch.float32 elif dtype == "float64": return torch.float64 else: raise ValueError(f"Unsupported dtype: {dtype}")
ddf64bb7fba63ff0395e08a199fa431cd8750972
45,558
def SAMflags(x): """ Explains a SAM flag. :param x: flag :returns: complete SAM flag explanaition """ flags=[] if x & 1: l="1: Read paired" else: l="0: Read unpaired" flags.append(l) if x & 2 : l="1: Read mapped in proper pair" else: l="0: Read not mapped in proper pair" flags.append(l) if x & 4 : l="1: Read unmapped" else: l="0: Read mapped" flags.append(l) if x & 8 : l="1: Mate unmapped" else: l="0: Mate mapped" flags.append(l) if x & 16 : l="1: Read reverse strand" else: l="0: Read direct strand" flags.append(l) if x & 32 : l="1: Mate reverse strand" else: l="0: Mate direct strand" flags.append(l) if x & 64 : l="1: First in pair" else: l="0: Second in pair" flags.append(l) if x & 128 : l="1: Second in pair" else: l="0: First in pair" flags.append(l) if x & 256 : l="1: Not primary alignment" else: l="0: Primary alignment" flags.append(l) if x & 512 : l="1: Read fails platform/vendor quality checks" else: l="0: Read passes platform/vendor quality checks" flags.append(l) if x & 1024 : l="1: Read is PCR or optical duplicate" else: l="0: Read is not PCR or optical duplicate" flags.append(l) if x & 2048 : l="1: Supplementary alignment" else: l="0: Not supplementary alignment" flags.append(l) return flags
e3d2c1942eac66acd4735cd4590a1905351cbc24
45,559
def bytes_xor(byte_seq1, byte_seq2): """ (bytes, bytes) -> (bytes) Do bit level XOR or two byte arrays. :param byte_seq1: byte sequence (bytes). :param byte_seq2: byte sequence (bytes). :return: XOR of the byte bytes sequences (bytes). """ assert len(byte_seq1) == len(byte_seq2), "Bytes must be of the same length." parts = [] for byte_seq1, byte_seq2 in zip(byte_seq1, byte_seq2): parts.append(bytes([byte_seq1 ^ byte_seq2])) return b''.join(parts)
539ca3707c6c07fbd64691a4b317d0d6eb8acef4
45,561
import csv def get_dataset(inputfile): """return """ handle = csv.DictReader(open(inputfile, "r"), delimiter=";") return handle
64fc0a706e4576fb67ed3c070478ec30fdfe59b8
45,562
import os def normpath(d): """Normalize absolute path.""" return os.path.abspath(os.path.normpath(d))
ed8abb4f14ac6974dcf68f7ec4de8f26f2e74ea4
45,563
def abbreviation(lng): """lng (str): Language name.""" if lng == 'Türkçe': return 'tr' else: return lng[:2].lower()
106b5deb6db691a2cb8c5cb63046e3b5acde3dfb
45,565
def dBm2W(W): """Converts an arbitrary power `W` in dBm to W.""" return 10 ** ((W - 3) / 10)
278f43aac26f5e38ef9ab4e73acb6496dedcb0f7
45,566
import torch def distortion_to_3d_conversion(x, y, distortion_func, params): """ Models image distortion as a warps image manifold """ dx, dy = distortion_func(x, y, params) return x + dx, y + dy, torch.zeros_like(x)
68a6fb7c9f849b44a280521e0f83f4f54eb8cfa1
45,567
import os import re import platform def normalize(exception): """Normalize exception output for reproducible test cases""" if os.name: exception = re.sub( r'File[^"]+"[^"]+\.py[^"]*"', lambda m: m.group().replace("\\", "/"), exception ) exception = re.sub(r"(\r\n|\r|\n)", "\n", exception) exception = re.sub(r'"[^"]*usersite/lib.py"', '"usersite/lib.py"', exception) exception = re.sub(r"\b0x[0-9a-fA-F]+\b", "0xDEADBEEF", exception) if platform.python_implementation() == "PyPy": exception = ( exception.replace( "<function str.isdigit at 0xDEADBEEF>", "<method 'isdigit' of 'str' objects>" ) .replace( "<function coroutine.send at 0xDEADBEEF>", "<method 'send' of 'coroutine' objects>" ) .replace( "<function NoneType.__bool__ at 0xDEADBEEF>", "<slot wrapper '__bool__' of 'NoneType' objects>", ) ) return exception
78244ebd2dd29d50df00f0dbc7449d47cef57771
45,568
def buildTADkey(gwas_snp): """ gwas_snp - a single row subset of the TAD-GWAS input file output - The lookup info in the TAD gene dictionary i.e. [(Chromosome, TAD_ID:TAD_Start-TAD_End) """ chrom = gwas_snp['chrom'].replace('chr', '') start = int(gwas_snp['TADStart']) end = int(gwas_snp['TADEnd']) tad_num = int(gwas_snp['TADidx']) output = str(tad_num) + ':' + str(start) + '-' + str(end) evidence_key = 'chr{}:{}'.format(str(chrom), output) return evidence_key
9226406335dcbf5e54fef8de57c6ec1c6b150528
45,569
import json def json_of_response(res): """Decode json from response""" return json.loads(res.data.decode('utf8'))
8c53a8a283994cf8b16e9d759d6fcdaa35731b04
45,570
def get_frame(epoch, step_size, frame_number): """ Crop an epoch based on a frame number Args: epoch: mne.epochs.Epochs Epoch to crop steps_size: int Number of time frames per step frame_number: int Current frame number Returns: mne.epochs.Epochs: Cropped epochs for the given frame """ times = epoch.times max_index = len(times)-1 tmax = (frame_number+1)*step_size # make sure we don't go outside the possible range if(tmax > max_index): tmax = max_index return epoch.copy().crop( times[frame_number*step_size], times[tmax], include_tmax=True )
23c3b730eaf4ac369ff91e2a16f92fc18f4209a5
45,571
def get_tokens(text_element): """Get the tokens annotated by NewsReader pipeline @param naf_tagged_doc: NAF annotated document generated by NewsReader pipeline """ tokens = [] tokens_to_offset = {} naf_tokens = [] for e in text_element: naf_tokens.append(e) for naf_token in naf_tokens: sentence_num = int(naf_token.attrib["sent"]) id_string = naf_token.attrib["id"] tok_start = int(naf_token.attrib["offset"]) token_end = tok_start + int(naf_token.attrib["length"]) - 1 token_text = naf_token.text tokens.append({"token":token_text, "id":id_string, "sentence_num":sentence_num, "char_start_offset":tok_start, "char_end_offset":token_end}) if token_text in tokens_to_offset: tokens_to_offset[token_text].append((tok_start, token_end)) else: tokens_to_offset[token_text] = [(tok_start, token_end)] return tokens, tokens_to_offset
b54f8b633ba150dd90803c8a66a1c15ecd628872
45,572
def full_request_url(base, text, wildcards={}): """ Build a full request URL from the API URL and endpoint. Any URL parameters will be replaced with the value set in the environment variables. """ for key in wildcards.keys(): text = text.replace(key, str(wildcards[key])) return str(base) + str(text)
217d921666a0cfa9ddd3fad09469085425398182
45,573
def collides_with_existing_words(word, line, column, direction, grid): """ Returns whether the given word collides with an existing one. """ for k, letter in enumerate(list(word)): if direction == "E": # Collisions if grid[line][column+k] != 0 and grid[line][column+k] != letter: return True if direction == "S": # Collisions if grid[line+k][column] != 0 and grid[line+k][column] != letter: return True return False
0e8863f725e29b81d9123f29be343cc55f339840
45,574
import requests def getGitHub(user: str, repo: str, request="content", path="", branch="master"): """ Obtain metadata from GitHub. Parameters ---------- user : str repo : str request : str, optional Choose from "content", "date", "version". Default is "content". path : str, optional Default is "". branch : str, optional Default is "master". Returns ------- str """ raw_content = ( "https://raw.githubusercontent.com/{user}/{repo}/{branch}/{path}".format( user=user, repo=repo, branch=branch, path=path ) ) api = ( "https://api.github.com/repos/{user}/{repo}/commits?sha=master&path={path}" .format(user=user, repo=repo, path=path) ) commits = requests.get(api).json() if request == "content": return raw_content if request == "date": return commits[0]["commit"]["author"]["date"] if request == "version": return commits[0]["sha"]
1c278bd86bd30d9188ba95174ae3961fd8cd1aac
45,575
def process_data(batch_size, max_seq_len, tokenizer): """process input cloze sentences""" input_text = ["What a _ day !", "Effective transformer is _ fast !"] # tokenize raw_tokens = [['[CLS]'] + tokenizer.tokenize(text) + ['[SEP]'] for text in input_text] # mask blanks to fill to_predict = [[] for _ in range(len(input_text))] for i in range(len(raw_tokens)): for j in range(len(raw_tokens[i])): if raw_tokens[i][j] == '_': raw_tokens[i][j] = '[MASK]' to_predict[i].append(j) # padding input_tokens = [tokens[:max_seq_len] for tokens in raw_tokens] input_mask = [[1] * len(tokens) + [0] * (max_seq_len - len(tokens)) for tokens in input_tokens] input_tokens = [tokens + ['[PAD]'] * (max_seq_len - len(tokens)) for tokens in input_tokens] # tokens to ids input_ids = [tokenizer.convert_tokens_to_ids(tokens) for tokens in input_tokens] input_ids += [input_ids[0] for _ in range((batch_size - len(input_ids)))] input_mask += [input_mask[0] for _ in range((batch_size - len(input_mask)))] return input_ids, input_mask, input_text, to_predict
064817163f2c4d764d5aa3f8fd093082e68409f3
45,578
import hmac import hashlib def HMAC_MD5(key, data): """ @summary: classic HMAC algorithm with MD5 sum @param key: {str} key @param data: {str} data """ return hmac.new(key, data, hashlib.md5).digest()
601595073554175e21caac49dc160357ac976e8e
45,579
def _verify_classifiers(classifiers, valid_classifiers): """Check classifiers against a set of known classifiers""" invalid = classifiers - valid_classifiers return ["Unrecognised classifier: {!r}".format(c) for c in sorted(invalid)]
64259f25b769361ddafcb83e63845de0d052c88c
45,580
def extract_csv_row(filename: str, row: int) -> str: """Extracts a selected line from the csv file. Args: filename: A path to the file. row: The row number to extract. Returns: The row from the csv file as a string. """ with open(filename, 'r') as file: extracted = file.readlines()[row - 1:row][0].strip('\n') return extracted
19f72a462e676675c192f3611d3bb46a8aecc887
45,582
def remove_empty_buckets(json_data): """Removes empty buckets in place""" if 'bucket' not in json_data: return json_data idxs_to_remove = [] for i, bucket in enumerate(json_data.get('bucket')): dataset = bucket['dataset'] if len(dataset) == 1 and dataset[0]['point'] == []: idxs_to_remove.append(i) for i in sorted(idxs_to_remove, reverse=True): del json_data['bucket'][i] return json_data
2f1ac471a616da2ba0376ff0cffc2196228d084b
45,583