content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
from typing import Dict from datetime import datetime def inject_now() -> Dict: """Inject current datetime into request context.""" return dict(request_datetime=datetime.now())
e186d4b478a5da5bcdf83e2e34cd381de8d039be
697,860
def _make_eof_producer(producer): """ Send a special EOF byte sequence to terminate the request. Args: producer (Callable[[str, socket.socket], None]): Request producer. Returns: Callable[[str, socket.socket], None]: Request producer that sends 'END-OF-FILE' at end of request. """ def new_producer(audio_input, sock): producer(audio_input, sock) sock.sendall(b'END-OF-FILE') return new_producer
9c035b1f790cab96c9cffc91e6dcf8a13017a4cc
697,861
def in_key_rd_dicts( key_rd_dict, key_rd_dicts ): """Return True if key_rd_dict is contained in the list of key_rd_dicts.""" k = key_rd_dict.keys()[ 0 ] v = key_rd_dict[ k ] for key_rd_dict in key_rd_dicts: for key, val in key_rd_dict.items(): if key == k and val == v: return True return False
04c691a1a78834c7d660ae3036e97ea0e78393de
697,862
def medical_covered_by_1(responses, derived): """ Return whether the children are covered under Claimant 1's plan """ if responses.get('medical_coverage_available', 'NO') == 'YES': return 'My plan' in responses.get('whose_plan_is_coverage_under', '') return False
a5116dbe07fa97e95a349acfe926b228a6829488
697,863
import re def split_text(text, pattern=r";|。|;|,|,"): """split text by pattern Args: text (str): text pattern (regexp, optional): expression. Defaults to r";|。|;|,|,". Returns: str: text split by pattern Examples: >>> s = String() >>> text = "收快递的时候最怕收不到货,所以购物的时候一定要把地址写清楚,这样才会精准的送到你手里,我告诉大家以后怎么写:“本宇宙-拉尼凯亚超星系团-室女座星系团-本星系群-银河系-猎户臂-太阳系-第三行星-地球-亚洲板块-中国-xxx-xxx-xxx”这样可以保证不会送到其他宇宙去" >>> s.split_text(text=text) ['收快递的时候最怕收不到货', '所以购物的时候一定要把地址写清楚', '这样才会精准的送到你手里', '我告诉大家以后怎么写:“本宇宙-拉尼凯亚超星系团-室女座星系团-本星系群-银河系-猎户臂-太阳系-第三行星-地球-亚洲板块-中国-xxx-xxx-xxx”这样可以保证不会送到其他宇宙去'] """ txts = re.split(pattern, text) return txts
080b63c947f5b9aca749e2ec4981f64c09e2ad43
697,864
def mixedomatic(cls): """ Mixed-in class decorator. """ classinit = cls.__dict__.get('__init__') # Possibly None. # Define an __init__ function for the class. def __init__(self, *args, **kwargs): # Call the __init__ functions of all the bases. for base in cls.__bases__: base.__init__(self, *args, **kwargs) # Also call any __init__ function that was in the class. if classinit: classinit(self, *args, **kwargs) # Make the local function the class's __init__. setattr(cls, '__init__', __init__) return cls
3891a748a5cd6ee5e31c1441a7645343c0862135
697,865
def sphere_sre(solution): """ Variant of the sphere function. Dimensions except the first 10 ones have limited impact on the function value. """ a = 0 bias = 0.2 x = solution.get_x() x1 = x[:10] x2 = x[10:] value1 = sum([(i-bias)*(i-bias) for i in x1]) value2 = 1/len(x) * sum([(i-bias)*(i-bias) for i in x2]) return value1 + value2
38987c77a6586a0bfab1d94cc4a1511e9418349f
697,867
from typing import Union import pytz from datetime import datetime def localnow(tz: Union[pytz.BaseTzInfo, str] = "US/Central") -> datetime: """ Get the current datetime as a localized datetime object with timezone information Keyword Arguments: tz {Union[pytz.timezone, str]} -- localize datetime to this timezone (default: "US/Central") Returns: datetime -- localized datetime with tzinfo """ if isinstance(tz, str): tz = pytz.timezone(tz) return datetime.now().astimezone(tz)
3dd9132c4adebacf5d348ec34523584642f7140b
697,868
def get_increment(msg_list): """ :param msg_list: list of on/off midi signals :return: minimum time of all signals """ result = float('inf') for msg in msg_list: if msg.time != 0.0 and msg.time < result: result = msg.time return result
5b41d4a26b91c6c98d191867f0a968b1a389ba2b
697,869
import timeit def tic(): """ Default timer Example: t = tic() ... code elapsed = toc(t) print( '{0}: {1:.4f}ms'.format(message, elapsed) ) """ global lastticstamp t = timeit.default_timer() lastticstamp = t return t
7b68cde350e931d9e8f479151ed3c142298a6366
697,870
from typing import List import torch from typing import Tuple def collate_fn(batch: List[torch.Tensor]) -> Tuple[Tuple[torch.Tensor]]: """[summary] Args: batch (List[torch.Tensor]): [description] Returns: Tuple[Tuple[torch.Tensor]]: [description] """ return tuple(zip(*batch))
798d12485f5e1b258b2bd3fe7137bfe1fe352a1b
697,871
import torch def threshold_mask(weights, threshold): """Create a threshold mask for the provided parameter tensor using magnitude thresholding. Arguments: weights: a parameter tensor which should be pruned. threshold: the pruning threshold. Returns: prune_mask: The pruning mask. """ return torch.gt(torch.abs(weights), threshold).type(weights.type())
43ca3f018f047e2ac46a83c36c368798b69e104a
697,874
def sigmoid(attrs, in_xlayers): """ Return sigmoid registration information (shape) """ assert len(in_xlayers) == 1 shape = in_xlayers[0].shapes[:] return {'shape': shape}
5637c532ed9eda6ec14abac0cf5e360a47c7333b
697,875
def get_answer(file_answer): """ """ with open(file_answer, 'r') as f: return [x.split(';')[0].rstrip() for x in f.readlines()]
0acfa654a328a5d4ac3a7c5cd291577be52b707b
697,876
def string_is_yes(string, default=None): """ Mapping of a given string to a boolean. If it is empty or None (evaluates to False), and `default` is set, `default` is returned. If the lowercase of the string is any of ['y', '1', 'yes', 'true', 'ja'], it will return `True`. Else it will return `False` :param string: The input :type string: str :param default: default result for empty input :type default: None | bool :return: result (True/False/default) :rtype: bool """ if not string and default is not None: return default # end if return string.lower() in ['y', '1', 'yes', 'true', 'ja']
68482f78f8d4464891f14c19953e1a4785fe0811
697,877
import argparse def default_tool_argparser(description, example_parameters): """Create default parser for single tools. """ epilog = '\n' for k, v in sorted(example_parameters.items()): epilog += ' ' + k + '\n' p = argparse.ArgumentParser( description=description, add_help=False, formatter_class=argparse.RawDescriptionHelpFormatter, epilog=('available values for examples (exkey):'+epilog)) return p
8a90854d432f134047d402ac9639790ac0c8a15e
697,878
import os def get_files_by_ext(dirname, *exts): """Returns list of paths to files with extensions in *exts. - dirname - path to directory - *exts - file extensions to filter on """ fnames = [f for f in os.listdir(dirname) if os.path.splitext(f)[-1] in exts] return [os.path.join(dirname, f) for f in fnames]
7b6179a448517bc8558bf9f87b128c3a79f37f48
697,879
def look_and_say(string): """ Expand a string of digits according to look-and-say process in the problem. Keyword arguments: string --- a string to be processed """ result = "" last = "" count = 0 for digit in string: if digit != last: if(count): result += str(count) + last last = digit count = 1 else: count += 1 if(count): result += str(count) + last return result
cd8fcce76302cdb1c9af10d9481559c1eb098e43
697,880
import torch def rel_to_rect(bb, sz_norm=None): """Inverts the effect of rect_to_rel. See above.""" sz = torch.exp(bb[...,2:]) if sz_norm is None: c = bb[...,:2] * sz else: c = bb[...,:2] * sz_norm tl = c - 0.5 * sz return torch.cat((tl, sz), dim=-1)
ed2c31d1add58b1d5af843159eab4fa20471f76d
697,881
def arrays_avg(values_array, weights_array=None): """ Computes the mean of the elements of the array. Parameters ---------- values_array : array. The numbers used to calculate the mean. weights_array : array, optional, default None. Used to calculate the weighted average, indicates the weight of each element in the array (values_array). Returns ------- result : float. The mean of the array elements. """ n = len(values_array) if weights_array is None: weights_array = [1] * n elif len(weights_array) != n: raise ValueError( "values_array and qt_array must have the same number of rows" ) result = 0 for i, j in zip(values_array, weights_array): result += i * j return result / n
2f24da412c548c1da53338f2f66d57d18473f798
697,882
import argparse def check_rate(arg): """confirm rate is in [0.25, 4.0] interval""" rate = float(arg) if not 0.25 <= rate <= 4.0: msg = ( "TTS rate only supported for floats in the range 0.25 (slow) to " "4.0 (fast). Default is 1.0." ) raise argparse.ArgumentTypeError(msg) return rate
90919858dfd40319e60436b229652cef14a7b1ba
697,883
import re def identify_all_potential_shifts(mut_desc, chain_sequence,allowed_shifts): """ identify all potential shifts """ md_re=r'(?P<wt>[A-Z])(?P<pos>[0-9]+)(?P<mut>[A-Z\(\)]+)$' saved_shifts=[] for shift in allowed_shifts: proposed_shift=shift for mut_desc_single in mut_desc.split('+'): search = re.search(md_re, mut_desc_single) if search is None: return None WT=search.group('wt') num=int(search.group('pos'))-1 # 0 index mut=search.group('mut') if (num+proposed_shift >= len(chain_sequence) or num+proposed_shift < 0 or chain_sequence[num+proposed_shift] != WT): proposed_shift =0 break if proposed_shift !=0: # found a shift that works saved_shifts.append(proposed_shift) return saved_shifts
4938a498ed696e590892f701a393d74e5d1786b9
697,884
def issue_dictionary_from(column, issue) -> dict: """Map issue to dictionary with field names.""" return { "project column": column.name, "issue title": issue.title, "issue description": f"{issue.body}\n\n---\n\n{issue.html_url}", "labels": ";".join( f"'{label.name}'" for label in issue.original_labels ), }
eae9e6cdf7ea3d715ad2e1dc8c31672c8c53762e
697,885
def age_bin(age, labels, bins): """ Return a label for a given age and bin. Argument notes: age -- int labels -- list of strings bins -- list of tuples, with the first tuple value being the inclusive lower limit, and the higher tuple value being the exclusive upper limit """ for x in range(len(bins)): if age < bins[x][1] and age >= bins[x][0]: return labels[x]
9caccc667b55f66824bcf8802161384590cc2a08
697,886
import argparse def parse_arguments(): """ Parses the command line arguments and returns the namespace with those arguments. :type return: Namespace object :param return: The Namespace object containing the values of all supported command-line arguments. """ arg_parse = argparse.ArgumentParser() arg_parse.add_argument("-d", "--dir", default="./grurnn", help="Directory for storing logs.") arg_parse.add_argument("-f", "--filename", default="grurnn.log", help="Name of the log file to use.") arg_parse.add_argument("-e", "--epochs", default=10, type=int, help="Number of epochs for which to train the RNN.") arg_parse.add_argument("-m", "--max", default=None, type=int, help="The maximum number of examples to train on.") arg_parse.add_argument("-p", "--patience", default=100000, type=int, help="Number of examples to train before evaluating" + " loss.") arg_parse.add_argument("-t", "--test", action="store_true", help="Treat run as test, do not save models") arg_parse.add_argument("-l", "--learn_rate", default=0.005, type=float, help="The learning rate to be used in training.") arg_parse.add_argument("-o", "--model", default=None, help="Previously trained model to load on init.") arg_parse.add_argument("-a", "--anneal", type=float, default=0.00001, help="Sets the minimum possible learning rate.") arg_parse.add_argument("-s", "--dataset", default="./datasets/stories.pkl", help="The path to the dataset to be used in " " training.") arg_parse.add_argument("-r", "--truncate", type=int, default=100, help="The backpropagate truncate value.") arg_parse.add_argument("-i", "--hidden_size", type=int, default=100, help="The size of the hidden layers in the RNN.") arg_parse.add_argument("-b", "--embed_size", type=int, default=100, help="The size of the embedding layer in the RNN.") return arg_parse.parse_args()
2b3a8b32d430417c18845e89ec096a8c0ac6365e
697,887
def list_intersect(first, second): """ Returns elements found in first that are in second :param first: :param second: :return: """ second = set(second) return [item for item in first if item in second]
74cf1fc791a57c299f7fcf2369c105d68bda772e
697,888
def process_covid_csv_data(covid_csv_data: list) -> tuple: """Function takes a list of data from an argument called covid csv data, and returns three variables; the number of cases in the last 7 days, the current number of hospital cases and the cumulative number of deaths, as contained in the given csv file.""" last_7_days_cases = 0 # initiate variables total_hospitalised = 0 total_deaths = 0 deaths_updated = False # flags to find the newest data current_hospitalised_updated = False for i in range(1, len(covid_csv_data)): # skip names of columns day = covid_csv_data[i] day = day.split(",") if 2 < i < 10: # ignore first and second day that are incomplete day_cases = int(day[-1]) last_7_days_cases += day_cases try: day_hospitalised = int(day[5]) except ValueError: # No data for this day day_hospitalised = False if day_hospitalised and (not current_hospitalised_updated): current_hospitalised_updated = True # unflag flag total_hospitalised = day_hospitalised try: cum_death = int(day[4]) except ValueError: # Date has not yet been updated cum_death = False if cum_death and (not deaths_updated): deaths_updated = True # unflag flag total_deaths = cum_death if i >= 10 and deaths_updated and current_hospitalised_updated: # check whether all information has been found break return last_7_days_cases, total_hospitalised, total_deaths
1fd7937656d25072aaba9a0cbf0ab16919f91b38
697,889
import io import base64 def image_file_to_b64(image_file: io.BytesIO) -> bytes: """ Encodes an image file as Base64. To obtain the stringified Base64 version of the image, you can convert the output like so: ````python image_file_to_b64(my_image_file).decode() ```` Arguments: image_file: The BytesIO file object to be converted. Returns: Bytes representation of the Base64 encoded image. """ return base64.b64encode(image_file.getvalue())
2606f21fca825ea22c06f6372576110c64dc511c
697,890
def search_theme(plot): """ >>> search_theme(['get up', 'discussion']) ['loss', 'loss'] """ plot = ['loss', 'loss'] return plot
9eb2104ba2f31a0386eccb13a3b1ad15a74f7fe6
697,891
import json def get_pretty_print(json_object): """ print json """ return json.dumps(json_object, sort_keys=True, indent=4, separators=(',', ': '))
2539d3572c260e4b4fe2061d4e3f6112d7ee8dd5
697,892
def base41_encode(input): """Encode an array of bytes to a Base41 string. input is the array of bytes to encode. The encoded string is returned. If input has an odd number of bytes, a TypeError is raised. """ rslt = "" i = 0 while i + 1 < len(input): x = input[i] + 256 * input[i+1] rslt += chr((x % 41) + 41) x //= 41 rslt += chr((x % 41) + 41) + chr((x // 41) + 41) i += 2 if i != len(input): raise TypeError("Invalid input length for Base41 encoding") return rslt
46ef331084a9932d8bd7ccf574dd9f5afa9face2
697,893
from typing import Dict from typing import Tuple def read_multiscale_params(cfg: Dict[str, dict]) -> Tuple[int, int]: """ Returns the multiscale parameters :param cfg: configuration :type cfg: dict :return: - num_scales: number of scales - scale_factor: factor by which each coarser layer is downsampled :rtype: tuple(int, int ) """ if "multiscale" in cfg: # Multiscale processing in conf multiscale_ = multiscale.AbstractMultiscale(**cfg["multiscale"]) # type: ignore num_scales = multiscale_.cfg["num_scales"] scale_factor = multiscale_.cfg["scale_factor"] else: # No multiscale selected num_scales = 1 scale_factor = 1 return num_scales, scale_factor
8021cc90c4a4740854a92a7d56524d7037e01988
697,894
import time def timeMS(): """Gives a count of miliseconds from 1.1.1970. Returns: int: Miliseconds form 1.1.1970 """ return round(time.time() * 1000)
c42a220499c528ecde869e2c74dc383a84c71ea4
697,897
from typing import Any from typing import Dict import os def file_parser(file_path: str, key_col: int, key_type: Any, val_col: int, val_type: Any, delimiter: str=',', rows_to_skip: int=0) -> Dict[Any, Any]: """Parses a text file and returns a data dictionary of the form d[key] = val Args: file_path: str path to file key_col: index of column containing keys key_type: all keys will be casted to this type val_col: index of column containing values val_type: all values will be casted to this type delimiter: separater between columns (defaults to comma) rows_to_skip: skip this number of rows at the beginning (defaults to 0) Returns: Dict[key_type, val_type]: dictionary of parsed data """ assert os.path.isfile(file_path), '{} not found'.format(file_path) data = dict() with open(file_path) as f: i = 0 for line in f: if i < rows_to_skip: i += 1 continue split_line = line.rstrip().split(sep=delimiter) data[key_type(split_line[key_col])] = val_type(split_line[val_col]) return data
a37eb14d427fa51e386397aa183228946b2f3bd6
697,898
import torch def predict(data_loader, encoder, dfc, device='cpu', encoder_type = 'vae'): """ Args: data_loader: encoder: dfc: device: encoder_type: Returns: feature, label """ features = [] labels = [] encoder.eval() dfc.eval() with torch.no_grad(): for loader in data_loader: for idx, (img, label) in enumerate(loader): img = img.to(device) feat = dfc(encoder(img)[0]) if encoder_type =='vae' else dfc(encoder(img)) features.append(feat.detach()) labels.append(label) return torch.cat(features).max(1)[1], torch.cat(labels).long()
1369ebb5d24a14371da6af5765b741cff2d42d35
697,899
def get_image_type(data): """Return a tuple of (content type, extension) for the image data.""" if data[:2] == "\xff\xd8": return ("image/jpeg", ".jpg") return ("image/unknown", ".bin")
1677cb375a7b405b8a229f1246bdb27ed36dc654
697,900
def parse_conjugations(group): """ Parse conjugations in a word group. :param group: string of a word group :return: list of parsed conjugations """ return list( map( lambda x: x.split('/')[0].strip(), group.split(' – ') ) )
0a1eca118a1f194a51a889a0fe30fe72488d6ada
697,901
import tempfile import os import subprocess def make_csr_from_key(private_key): """Create a certificate signing request (CSR) from the given private key. """ (csr_fd, csr_file) = tempfile.mkstemp() os.close(csr_fd) # Write private key to temporary file (key_fd, key_file) = tempfile.mkstemp() os.write(key_fd, private_key) os.close(key_fd) csr_request_args = ['/usr/bin/openssl', 'req', '-new', '-key', key_file, '-out', csr_file, '-batch'] subprocess.call(csr_request_args) os.unlink(key_file) return private_key, csr_file
645b89f18d920481926d1988c75f9f9660905342
697,902
def read_thresholds(path): """ Read the p-value thresholds for each tumour type """ thresholds = {} with open(path) as f: for line in f: line = line.strip() line = line.split('\t') tumour = line[0] threshold = float(line[1]) thresholds[tumour] = threshold return thresholds
2615cd3b38e4ce1d80baacd06cf955895f0984ee
697,904
def _one_contains_other(s1, s2): """ Whether one set contains the other """ return min(len(s1), len(s2)) == len(s1 & s2)
f7313cc0cbee823e39c30da22455e228162555be
697,905
import os def write_mets(http_response, package_uuid, subdir): """Given a http response containing our METS data, create the path we want to store our METS at, and then stream the response into a file. """ mets_file = "METS.{}.xml".format(package_uuid) download_file = os.path.join(subdir, mets_file) with open(download_file, "wb") as file: file.write(http_response.content) return download_file
f13236f7524486c7ad3be5fbcd07daedf4bf5bd4
697,906
def linear_regression_line(mb): """ Given the output of ``linear_regression()`` function, or provided with a tuple of ``(m, b)``, where ``m`` is the slope and ``b`` is the intercept, ``inear_regression_line()`` returns a function that calculates y values based on given x values. Args: mb: A list or tuple of [m, b] or (m, b) where m is the slope and b is the y intercept. Returns: A function that accepts ints, floats, lists, or tuples of x values and returns y values. Examples: >>> linear_regression_line(linear_regression([0, 1], [0, 1]))(1) 1.0 >>> linear_regression_line(linear_regression([1,3,5,7,9], [10,11,12,13,14]))([1, 2, 3]) [10.0, 10.5, 11.0] >>> linear_regression_line([.5, 9.5])([1, 2, 3]) [10.0, 10.5, 11.0] >>> linear_regression_line(9.5) # doctest: +ELLIPSIS Traceback (most recent call last): ... TypeError: linear_regression_line() expects a list or tuple of (slope, intercept)... >>> linear_regression_line([2, 3, 4]) Traceback (most recent call last): ... ValueError: The list or tuple containing the slope and intercept needs to be of length = 2. """ if type(mb) not in [list, tuple]: raise TypeError('linear_regression_line() expects a list or tuple of ' '(slope, intercept) or [slope, intercept] form.') if len(mb) != 2: raise ValueError('The list or tuple containing the slope and intercept ' 'needs to be of length = 2.') m = mb[0] b = mb[1] def line_function(x): """ Function created and returned by linear_regression_line(). """ # if int or float, return one value if type(x) in [int, float]: return((x * m) + b) # otherwise elif type(x) in [list, tuple]: y_values = [] for ii in x: y_values.append(((ii * m) + b)) return(y_values) return(line_function)
dbc61ffafe603e8e6ec21f87debc170e3b6eb289
697,907
def values(df, field, **kwargs): """ Return values from a specific field of the data frame within a given time extent Args: df (pd.DataFrame): The exported TAP tool data field (str): The field representing the column name Returns: np.ndarray: An array of the time-specified values """ return df[field].values
6416c18ba3891a133c9ed0d487ca4b82d7a5291d
697,908
import os def get_parent_dir(current_file): """Get the parth path""" current_dir = os.path.dirname(current_file) parent_dir = os.path.dirname(current_dir) return parent_dir
f1fc33dc0fbd6b358b672d695cd0ad89f9bb9b3e
697,909
import copy def return_prim_graph (dag, innodes, outnodes): """ remove input and output nodes from a graph """ G_primitive = copy.deepcopy(dag) nonprims = innodes+outnodes for node in nonprims: if node in dag.nodes(): G_primitive.remove_node(node) return G_primitive
e59fcfcb5796739cc940f6052e56aa5a4837fd47
697,910
import re def convert_doctoc(html): """ Convert doctoc to confluence macro :param html: html string :return: modified html string """ toc_tag = '''<p> <ac:structured-macro ac:name="toc"> <ac:parameter ac:name="printable">true</ac:parameter> <ac:parameter ac:name="style">disc</ac:parameter> <ac:parameter ac:name="maxLevel">7</ac:parameter> <ac:parameter ac:name="minLevel">1</ac:parameter> <ac:parameter ac:name="type">list</ac:parameter> <ac:parameter ac:name="outline">clear</ac:parameter> <ac:parameter ac:name="include">.*</ac:parameter> </ac:structured-macro> </p>''' html = re.sub('\<\!\-\- START doctoc.*END doctoc \-\-\>', toc_tag, html, flags=re.DOTALL) return html
b2a52126d6a234894dcf6f98f0d399c309587a7e
697,911
def flip_case(phrase, to_swap): """Flip [to_swap] case each time it appears in phrase. >>> flip_case('Aaaahhh', 'a') 'aAAAhhh' >>> flip_case('Aaaahhh', 'A') 'aAAAhhh' >>> flip_case('Aaaahhh', 'h') 'AaaaHHH' """ return ''.join(letter.lower() if letter.lower() == to_swap.lower() and letter.isupper() else (letter.upper() if letter.lower() == to_swap.lower() else letter) for letter in phrase)
ab3a18249e882bb0e8fda35a903048a20bd667e6
697,912
def apply_class_label_replacement(X, Y, replacement_method): """ Replace class labels using the replacement method :param X: data features :type X: numpy.Array() :param Y: data labels :type Y: numpy.Array() :param replacement_method: Method to update targets :type replacement_method: method """ return (X, replacement_method(Y, set(Y)))
eac01d31ff63e39d8c203803ae14c08365ec20a5
697,913
import torch def nt_xent_loss(out_1, out_2, temperature): """ Loss used in SimCLR """ out = torch.cat([out_1, out_2], dim=0) n_samples = len(out) # Full similarity matrix cov = torch.mm(out, out.t().contiguous()) sim = torch.exp(cov / temperature) # Negative similarity mask = ~torch.eye(n_samples, device=sim.device).bool() neg = sim.masked_select(mask).view(n_samples, -1).sum(dim=-1) # Positive similarity : pos = torch.exp(torch.sum(out_1 * out_2, dim=-1) / temperature) pos = torch.cat([pos, pos], dim=0) loss = -torch.log(pos / neg).mean() return loss
6634e42f540835f5a45c4812e54455d0328b6ec2
697,914
def fromScopus(scopus_author): """Fetch all publications associated with Scopus author Parameters ---------- scopus_author : AuthorRetrieval Scopus author retrieval object (scopus.author_retrieval.AuthorRetrieval) Returns ------- bibs : list List of Scopus search publications (scopus.scopus_search.Document) """ bibs = scopus_author.get_documents() return bibs
88aeca0f28af9ca3256e77f6e77207ffb51cefa0
697,915
def W(H) -> int: """ Default image width (equal to height) """ return H
4b3fdaaba9f5100096ca1ebbc22442de21b34ad5
697,917
def org_commits_by_day(df_raw): """Returns all commits with a count and a date index by day""" odf_day = df_raw.resample('D').sum() return odf_day
bb003b8ce1a0a28e632cfc78ffa3483359259e49
697,918
def make_queue_name(mt_namespace, handler_name): """ Method for declare new queue name in channel. Depends on queue "type", is it receive event or command. :param mt_namespace: string with Mass Transit namespace :param handler_name: string with queue time. MUST be 'command' or 'event' :return: new unique queue name for channel """ return '{}.{}'.format(mt_namespace, handler_name)
f74a7be8bf46a56f760b087fe47c9eda58d9f51e
697,919
import ipaddress import sys def calculate_ip_addressing(input_net, man_address, man_next_hop): """ Calculate IP addressing (address, neighbor, default route) on the specified interface This function is AFI agnostic, just feed it ipaddress objects. :param input_net: the IPv4/IPv6 network to use :param man_address: optional override for the host address :param man_next_hop: optional override for default route :return: tuple of (local_address, neighbor, next_hop, prefixlen) """ net = ipaddress.ip_network(input_net) if net.prefixlen == (net.max_prefixlen-1): address = net[0] neighbor = net[1] next_hop = net[1] else: address = net[1] neighbor = net[2] next_hop = net[2] # override default options if man_address: if ipaddress.ip_address(man_address) not in net: print("local address {} not in network {}".format(man_address, net), file=sys.stderr) sys.exit(1) address = ipaddress.ip_address(man_address) if man_next_hop: if ipaddress.ip_address(man_next_hop) not in net: print("next-hop address {} not in network {}".format(man_next_hop, net), file=sys.stderr) sys.exit(1) next_hop = ipaddress.ip_address(man_next_hop) # sanity checks if next_hop == address: print("default route next-hop address ({}) can not be the same as the local address ({})".format(next_hop, address), file=sys.stderr) sys.exit(1) print("network: {} using address: {}".format(net, address)) return str(address), str(neighbor), str(next_hop), net.prefixlen
dec5d929049f34fe8135bff51f562615e9465838
697,920
def _fix_cookiecutter_jinja_var(value, replace='cookiecutter.'): """Remove 'cookiecutter.' string from 'cookiecutter.varname' jinja strings Can be used to remove different substrings as well by passing a string to the optional ``replace`` parameter. :param value: The string value within which to replace text :type value: str :param replace: The string to be removed from the ``value`` input, defaults to 'cookiecutter.' :type replace: str :return: Returns the input value with the ``replace`` string removed if ``value`` is of type str, otherwise it just returns the ``value`` input """ if type(value) is str: return value.replace(replace, "") else: return value
fc04d36a368fe7dd8c21a8dd3abe46f60d4d3d5e
697,921
import subprocess def sync_call(*args, cwd, **kwargs): """Helper function which enforces cwd argument""" return subprocess.call(*args, cwd=cwd, **kwargs)
9e3670413d9a6c763a234f7d6e4bea36bfc439cb
697,922
def flatten_dict(row, keys=[('title',), ('street', 'city', 'postalCode')]): """Flatten a dict by concatenating string values of matching keys. Args: row (dict): Data to be flattened Returns: flat (str): Concatenated data. """ flat = '' # The output data for ks in keys: # If any keys are present, join the values if not any(k in row for k in ks): continue flat = '\n'.join(row[k] for k in ks if k in row) break assert len(flat) > 0 # Ensures that a key has been found, # otherwise you'll need to provide more keys return flat
b4bafb0ec19a19d4223d7e9a94e3b8630cad1066
697,923
import os def program_installed(app): """ Check if the given app is installed in the users PATH """ path = os.environ['PATH'] paths = path.split(os.pathsep) for dir in paths: if os.path.isdir(dir): if os.path.isfile(os.path.join(dir,app)): return True return False
9783927deb266407e89c0e446759d049824f7897
697,924
def sam_flag_check(query, subject): """ Check two numbers, query (for filtering) in subject or not convert to binary mode q: 0000010 (2) s: 1011011 (91) q in s range: 0 - 2048 (SAM flag) """ def to_bin(n): return '{0:012b}'.format(n) # convert to binary mode q = to_bin(eval(query)) s = to_bin(eval(subject)) # check q, s flag = True for j, k in zip(q[::-1], s[::-1]): if not j == '1': continue if eval(j) - eval(k) > 0: flag = False break return flag
939109ad1c1668327cb4af4fcada818cea4653f4
697,925
import string def get_range_to_here_hashes(repo, start): """Return a list of strings corresponding to commits from 'start' to here. The list begins with the revision closest to but not including 'start'. Raise a ValueError if any of the returned values are not valid hexadecimal. :repo: a callable supporting git commands, e.g. repo("status") :start: a reference that log will understand :returns: a list of strings corresponding to commits from 'start' to here. """ hashes = repo("log", start + "..", "--format=%H").split() if not all(c in string.hexdigits for s in hashes for c in s): raise ValueError( "phlgit_log__getRangeToHereHashes() invalid hashes\n" + str(hashes)) hashes.reverse() return hashes
41a8795f489d1646ba48366c791308a6d1435b3e
697,926
import os def get_all_lobster_files(directory, file_type='COOPCAR.lobster'): """ Get all DOSCAR and CONTCAR file paths Parameters ---------- directory : str root directory to look for DOSCAR files file_type : string file type for which to search and return file path Returns ------- lobster_files : list[str] list of paths to lobster files of type file_type """ lobster_directories = [os.path.join(r,subdirectory) for r,d,f in os.walk(directory) for subdirectory in d if file_type in os.listdir(os.path.join(r,subdirectory))] lobster_files = [os.path.join(d,file_type) for d in lobster_directories] return lobster_files
a5be225108408b4812f42bf89df8d84f46334ce8
697,927
import re def check_token(token): """ Checks if the given token is a valid UUID.""" valid = re.compile(r"^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-" r"[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") return valid.match(token)
be675b2fa04aac96b006ab94d8c0f65d8c02132f
697,929
from typing import List def lshift(array: List) -> List: """ Example ------- >>> lshift([0, 1, 2, 3, 4]) [1, 2, 3, 4, 0] >>> lshift([0]) [0] >>> lshift([]) [] """ return (array[1:] + [array[0]]) if len(array) > 1 else array
dbd9e23120eb136fb9beb02fda027174e75905da
697,930
import requests from bs4 import BeautifulSoup def url_to_soup(url): """url -> soup""" html = requests.get(url) return BeautifulSoup(html.text, 'html.parser')
8b3e6a67c2123ca134d581631321cb8a8daf132a
697,931
def is_vowel(char): """Check if it is vowel.""" return char in ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']
97f747d96e08cd15fe7a1b742b36f5ff6cc2f3fc
697,932
def gs_block(men, women, pref, blocked): """ Gale-shapley algorithm, modified to exclude unacceptable matches Inputs: men (list of men's names) women (list of women's names) pref (dictionary of preferences mapping names to list of preferred names in sorted order) blocked (list of (man,woman) tuples that are unacceptable matches) Output: dictionary of stable matches """ rank = {} for w in women: rank[w] = {} i = 1 for m in pref[w]: rank[w][m] = i i += 1 #print(rank) prefpointer = {} for m in men: prefpointer[m] = 0 freemen = set(men) numPartners = len(men) Start = {} Finished = {} listBlocked = list(blocked) #print(listBlocked) while(freemen): m = freemen.pop() w = pref[m][prefpointer[m]] #print(m + ' ' + w) prefpointer[m] += 1 if (m,w) not in listBlocked and w not in Start: Start[w] = m elif (m,w) not in listBlocked and not Finished: mprime = Start[w] if rank[w][m] < rank[w][mprime]: Start[w] = m freemen.add(mprime) else: freemen.add(m) else: #blocked couple freemen.add(m) if prefpointer[m] == numPartners and w not in Start: prefpointer[m] = 0 while prefpointer[m] < numPartners: w = pref[m][prefpointer[m]] if (m,w) not in listBlocked: mprime = Start[w] Start[w] = m Finished[w] = m freemen.pop() freemen.add(mprime) prefpointer[m] += 1 return Start
ea69e56604cac25d8f5c9d957157dbd2d9ad1b27
697,933
from typing import Iterable from typing import List def filter_wikipathways_files(file_names: Iterable[str]) -> List[str]: """Filter files that have not 'ttl' extension or not start with 'WP'.""" return [ file_name for file_name in file_names if file_name.startswith('WP') and file_name.endswith('.ttl') ]
abac5be763309986b951d574a72c0f2652b2a981
697,934
def attr_dict(cls: type, data: dict): """ Removes keys from the passed dict ``data`` which don't exist on ``cls`` (thus would get rejected as kwargs), then create and return an instance of ``cls``, passing the filtered data as keyword args. Ensures that any keys in your dictionary which don't exist on ``cls`` are automatically filtered out, instead of causing an error due to unexpected keyword arguments. Example:: >>> data = dict(timestamp="2019-01-01Z00:00", producer='eosio', block_num=1234, example='hello') >>> my_block = attr_dict(EOSBlock, data) :param cls: :param data: :return: """ if hasattr(cls, '__attrs_attrs__'): cls_keys = [atr.name for atr in cls.__attrs_attrs__] else: cls_keys = [k for k in cls.__dict__.keys() if k[0] != '_'] clean_data = {x: y for x, y in data.items() if x in cls_keys} return cls(**clean_data)
c764cda2ad772de8b239fe34d39ef3ac92e658e5
697,935
import glob def _find_my_yamls(myc_folder, specific_files=None): """Load all .yaml files in a folder or a specific list of yaml files.""" selected_files = [] if specific_files: yaml_list = specific_files.replace(" ", "").split(",") all_files = [myc_folder + yaml for yaml in yaml_list] else: all_files = glob.glob(myc_folder + "/*.yaml") return all_files
badb9f2165470516c7dd9c5ba2a7e7c1c76d7913
697,936
from typing import List def sbd_3_bitwise(ids: List): """ Solution: Use bitwise XOR on a single variable for each element. Complexity: Time: O(n) Space: O(1) """ unique_id = 0 for item in ids: unique_id ^= item return unique_id
d964fe78b2d1cf8de9e4d1cc165ea03a38eb6387
697,937
def process_spatial(geo): """Process time range so it can be added to the dates metadata Parameters ---------- geo : list [minLon, maxLon, minLat, maxLat] Returns ------- polygon : dict(list(list)) Dictionary following GeoJSON polygon format """ polygon = { "type": "Polygon", "coordinates": [ [ [geo[0], geo[2]], [geo[0], geo[3]], [geo[1], geo[3]], [geo[1], geo[2]] ] ]} return polygon
4ff7dea7d7dcefd5b6bfb1605dd50345fc379527
697,938
def normalized (string) : """Normalize a base32 encoded string.""" return string.upper ().replace ("-", "")
1998b41d9a27c78b5519983a880a1aa573a6e4ea
697,939
def list_bank_cards(account_id): # noqa: E501 """Return all bank cards for an account Return all cards for the specified account # noqa: E501 :param account_id: Id of account :type account_id: str :rtype: List[BankCard] """ return 'do some magic!'
6268d6a3584fa38cda3ae2294daaebb736f695d0
697,940
def int32(x): """Force conversion of x to 32-bit signed integer""" x = int(x) maxint = int(2**31-1) minint = int(-2**31) if x > maxint: x = maxint if x < minint: x = minint return x
854338b7fd748ba5ddbbbd8c697288c6331995c4
697,941
def video_no_found(error): """no such video exists""" return {'message': 'video does not exist'}, 404
ec4b3149e1c8f8c7d02e0ae7926cc41cc7e6ad29
697,942
def get_type(transaction): """ :return: the type of the transaction """ return transaction['type']
4f66830f7c1e3bdc5d6b2c4ccc5db493014b9e5f
697,943
def find_max_str(smiles: str) -> str: """ General functionality to choose a multi-smiles string, containing the longest string """ smiles = max(smiles.split("."), key=len) return smiles
8b3f495492e471c7aee5fa856745179ecb1e8f8f
697,944
from typing import List from typing import Any from typing import Optional import random def _subsample_array( arr: List[Any], max_values: int, can_modify_incoming_array: bool = True, rand: Optional[random.Random] = None, ) -> List[Any]: """Shuffles the array and returns `max_values` number of elements.""" if not can_modify_incoming_array: arr = arr[:] if rand is not None: rand.shuffle(arr) else: random.shuffle(arr) return arr[:max_values]
379d056fefbc7b4a828a56dd9999545683a46a30
697,945
def format_bytes(n): """Format bytes as text Copied from dask to avoid dependency. """ if n > 1e15: return "%0.2f PB" % (n / 1e15) if n > 1e12: return "%0.2f TB" % (n / 1e12) if n > 1e9: return "%0.2f GB" % (n / 1e9) if n > 1e6: return "%0.2f MB" % (n / 1e6) if n > 1e3: return "%0.2f kB" % (n / 1000) return "%d B" % n
f82a135f7f5308a04bf53bd3b8d1a05ef5d96de0
697,946
import os def pipe(cmd): """Runs a command in a subprocess. cmd: string Unix command Returns (res, stat), the output of the subprocess and the exit status. """ # Note: os.popen is deprecated # now, which means we are supposed to stop using it and start using # the subprocess module. But for simple cases, I find # subprocess more complicated than necessary. So I am going # to keep using os.popen until they take it away. fp = os.popen(cmd) res = fp.read() stat = fp.close() assert stat is None return res, stat
4c31602dbe882422f2e9b107c379c1edf5096489
697,947
def semideviation(r): """ Returns the semi-deviation (Negative Deviation) of r , r must be a Series or DataFrame """ is_negative = r <0 return r [is_negative].std(ddof=0)
78eb4092f2c7ed5439ef4ee44b5ddef898bd53fa
697,948
def compute_several_statistics(X, Args): """ A function to compute and stack the results of several statistics on data X. Inputs: * X = the data * Args = list constitued of statistics_list and statistics_args Outputs: * a list correspond to the value of each statistic on the data X. """ statistics_list, statistics_args = Args λ = [] for i_statistic, statistic in enumerate(statistics_list): λ.append(statistic(X, statistics_args[i_statistic])) return λ
7cefa8a25ef22e85271a56339cfb43b796c74933
697,949
import os def replace_file_name(filename: str, replace_with: str) -> str: """ Replace file name with a fixed string, preserving its extension. Result would look like `<replace_with>.<extension>` :param filename: str :param replace_with: str :return: str """ _, file_extension = os.path.splitext(filename) return f"{replace_with}.{file_extension[1:]}"
68caaf94ab921a862ba3b3f50735e5c96101aec8
697,950
def get_p_val(rank_table): """ Input: """ r = 1-rank_table["rank"]*1./(rank_table["out_of"]+1) r.sort() r.name = "p_value" return r
387b819f1a028e42bedd301e2324c0b25c159ca8
697,952
def _change_galician_cc_token_before_subtree(sentence, token): """Determine the token directly preceding a subtree in a sentence. Args: sentence (`Sentence`): The sentence. token (`Token`): The root token of the subtree. Returns: str: The ID of the token directly preceding the root token and all of its dependents. """ tokens = [token.id] a = True while a: a = False for t in sentence: if t.head in tokens and t.id not in tokens: tokens.append(t.id) a = True tokens = sorted(tokens) return str(int(tokens[0])-1)
21eebf0ba9ec02af4a7c65e8631a95e83b68372e
697,954
import torch def get_jacobian(inputs, func, **kwargs): """ Return Jacobian matrix of size m x n of a function f: R^n -> R^m """ size = inputs.size() n = size[0] # Input dim if (len(size)) > 1: dim_input = size[1] else: dim_input = 1 test = func(inputs, **kwargs) # Output dim if len(test.size()) > 1: dim_output = test.size()[1] else: dim_output = 1 J = torch.zeros((n, dim_output, dim_input)) # Loop over all batch samples for i in range(n): x = inputs[i,...].squeeze() x = x.repeat(dim_output, 1) x.requires_grad_(True) z = func(x, **kwargs) if dim_output == 1: z.backward([torch.ones_like(z)]) else: z.backward(torch.eye(dim_output)) J[i,:,:] = x.grad.data return J
5ff764a11512ed3813956c7549562761b430092f
697,955
def turn(p1, p2, p3): """ 0 if the points are colinear 1 if the points define a left-turn -1 if the points define a right-turn """ # Compute the z-coordinate of the vectorial product p1p2 x p2p3 z = (p2[0] - p1[0]) * (p3[1] - p1[1]) - (p2[1] - p1[1]) * (p3[0]- p1[0]) return 0 if z == 0 else int(z / abs(z))
2b8dccbe0111d9cd655f4a6a8bc2131b73f3a3ee
697,956
def find(node): """Find current canonical representative equivalent to node. Adjust the parent pointer of each node along the way to the root to point directly at the root for inverse-Ackerman-fast access. """ if node.parent is None: return node root = node while root.parent is not None: root = root.parent parent = node while parent.parent is not root: grandparent = parent.parent parent.parent = root parent = grandparent return root
c9827af585c2bcd284640213bb92ae951659abf8
697,957
from typing import Optional from pathlib import Path from typing import Union import logging import os def init_worker_env( gobbli_dir: Optional[Path] = None, log_level: Union[int, str] = logging.WARNING ) -> logging.Logger: """ Initialize environment on a ray worker. Args: gobbli_dir: Used as the value of the GOBBLI_DIR environment variable; determines where gobbli data is stored on the worker's filesystem. log_level: Level for logging coming from the worker. """ if gobbli_dir is not None: os.environ["GOBBLI_DIR"] = str(gobbli_dir) logging.basicConfig(level=log_level) # Provide a logger for our workers # Workers should only log using loggers they've created to avoid # logger pickling, which generally doesn't work # https://stackoverflow.com/questions/55272066/how-can-i-use-the-python-logging-in-ray return logging.getLogger(__name__)
62309256e5430759ea70c2bfb649cecc58369b95
697,958
from os.path import join def get_h5_file_name(cfg): """Return the HDF5 file name based on the configuration file""" return join(cfg['output']['filepath'], f"{cfg['output']['fileprefix']}.{cfg['output']['type']}")
70a6fef2198e3a5c70e4a176c1078c105dd38d04
697,959
def worker_number(worker_id): """Get the current worker number.""" id_as_str = "".join(ch for ch in worker_id if ch.isdigit()) if len(id_as_str) == 0: return 0 return int(id_as_str)
3cd6ca7e665fefda9427476e39de5ea23f843f72
697,960
import string import re def rm_non_textual_punct(dfcol): """ Aggressively remove almost all punctuation (except .,?:;- ) """ text_punct = '.,?:;-' #this also removes + <> $ % nontext_punct = [char for char in string.punctuation if char not in text_punct] nontext_punct = re.escape(''.join(nontext_punct)) return dfcol.str.replace(f'[{nontext_punct}]', '', regex=True)
ff9dfc87ec0015b4a0d0ca5d83db3b116e476470
697,963
def get_reference_value_from_spec(openapi_spec: dict, reference_path: str) -> dict: """Follows the reference path passed in and returns the object at the end of the path Args: openapi_spec (dict): The openapi.json specification object reference_path (str): a path formatted as "#/foo/bar/baz" Returns: dict: The object if you follow the path """ path_elements = reference_path.split("/") reference_val = openapi_spec for elem in path_elements: if elem == "#": continue else: reference_val = reference_val.get(elem, {}) return reference_val
c32962a3b008825de4c5108d8d1a3ad5db179b17
697,964
def hide_keys(dictionary, keys_to_hide, new_value="xxx"): """ Return a copy of the given dictionary on which specified keys will be replaced by the new_value word (or 'xxx'). :param dictionary: a dictionary :param keys_to_hide: keys to hide in the output dictionary :param new_value: replacement string for keys to hide :return: the new dictionary with hidden items """ _new_dict = {} for key, value in dictionary.items(): _new_dict[key] = new_value if key in keys_to_hide else value return _new_dict
d6b8bc7958c637212899c048a9958ac3d2755894
697,965
def get_seq(_seq, _start, _end): """ :param _seq: :param _start: !!! should be SSR start position on genome (begin from 1, not 0) :param _end: !!! should be SSR end position on genome (begin from 1, not 0) :return: """ if _start <= 200: return _seq.seq[0: _end+200] elif _end >= len(_seq.seq)-200: return _seq.seq[_start-201:] else: return _seq.seq[_start-201: _end+200]
c42db6e6a50fa46c30e948ac6f007d5c92e07708
697,967
import random def active_sample(network, size, evaluate): """ Sample from affected genes as null model. """ sample = random.sample(network.nodes(), size) subnet = network.subgraph(sample) result = evaluate(subnet) return result
d5f7ecf87e6b72b5930fe066e13fd593626fd352
697,968
def get_other_dims(da, dims_exclude): """ Returns all dimensions in provided dataset excluding dim_exclude | Author: Dougie Squire | Date: 22/04/2018 Parameters ---------- da : xarray DataArray Array to retreive dimensions from dims_exclude : str or sequence of str Dimensions to exclude Returns ------- dims : str or sequence of str Dimensions of input array, excluding dims_exclude Examples -------- >>> A = xr.DataArray(np.random.normal(size=(3,2)), coords=[('x', np.arange(3)), ... ('y', np.arange(2))]) >>> doppyo.utils.get_other_dims(A, 'y') 'x' """ dims = da.dims if dims_exclude == None: other_dims = dims else: if isinstance(dims, str): dims = [dims] if isinstance(dims_exclude, str): dims_exclude = [dims_exclude] other_dims = set(dims) - set(dims_exclude) return tuple([o for o in dims if o in other_dims])
506a8b7193ea95480c83e3d69a31774f77d4fbcc
697,969
def standard_split(text, row_length): """text = abcdefghijklmnopqrstuvwxyz and row_length = 5 abcde fghij klmno pqrst uvwxy z returns ['afkpuz', 'bglqv', 'chmrw', 'dinsx', 'ejoty'] """ output = [] text_length = len(text) # Takes output column by index in turn, taking e.g. the 0th, 5th, 10th ... char # for the 0th column, then the 1st, 6th, 11th ... char for the 1st column etc. for num in range(row_length): count = num output.append([]) while count < text_length: output[-1].append(text[count]) count += row_length return(output)
3a4732433b58777dcf08529b98b90f890e680a29
697,970
def median(x): """Return the median of a list of values.""" m, r = divmod(len(x), 2) if r: return sorted(x)[m] return sum(sorted(x)[m - 1:m + 1]) / 2
337584e40c61bd198496355e38f4a8d4da9310fa
697,971
def _get_args(tp): """Backport of typing.get_args for Python 3.6""" return getattr(tp, "__args__", ())
f265cb6d8d85601d9ee6f20be8f78c8d9819c920
697,972