content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def _compute_annot_reliability_score(tf_row, entities, freq_matrix): """ Compute likelihood of neg samples belonging to pos space based on onto annotation freqs :return: """ # Score is the sum of (freq in pos)-(freqs in neg) of the entities occurring in that text n_ent_found = sum(tf_row) if n_ent_found > 0: score = sum(tf_row * freq_matrix[entities])/n_ent_found else: score = 0 return score
87d6ca52a35582d7611d75ac957197e815543f57
32,887
def between(bound_min, bound_max, value): """Checks whether the value is between the min and max values, inclusive.""" return bound_min <= value and value <= bound_max
2233fd7670c27a5e75b96822cf047842efb4add0
32,889
def diff_to_step(min, max, step, value): """Returns '0' if value is in step or how much value should change to reach step""" round_by = len(str(value).split('.')[1])#round the value to avoid many decimal ponit 1 stuff in result if ( min == max and (min != None or min == '') ) or step == None or step == '' or min == value: return 0 if min == None or min == '': min = 0 try: min = float(min) step = float(step) value = float(value) except: pass if min < value: while min < value: value = round(value - step, round_by) if min == value: return 0 break return round((value + step) - min, round_by) elif min > value: while value < min: value = round(value + step, round_by) if min == value: return 0 break return round(min - (value - step), round_by)
84186a99b1e11785eacbfa8c5ab42b97b6fd954d
32,890
import string def preprocess_incoming(str_data) -> str: """Function to preprocess the incoming text for the inference.""" try: return str_data.translate(str.maketrans("", "", string.punctuation)).lower() except Exception: # pylint: disable=broad-except # noqa: B902 return str_data
a944870fef0b404a762fd5acdc3131780d27f089
32,891
import re def isGoogle(url): """ Check if the url is hiding data in a form which resembles Google searches Parameters: url- the url to check Note: since this is regular expression matching, it will not catch cases where jibberish is inserted into or appended onto urls. For instance, the string ...?q=the+@asdf would pass because the regular expression would only match the+a """ pattern = 'http://www.google.com/search\?q=(?P<query>[a-zA-Z0-9+]+)' matches = re.match(pattern, url) if matches != None: return True return False
f2527ef692d31f35ba6195b74fe1c1a7a142950d
32,892
def read_item_dict(item_file): """Reads an source/rir dict from a tab separated source/rir file.""" item_dict = {} with open(item_file, 'r') as f: for line in f: line = line.rstrip('\n') fields = line.split('\t') item_key = fields[0] items = fields[1:] subfolder = items[0].split('/')[0] if subfolder not in item_dict: item_dict[subfolder] = {} item_dict[subfolder][item_key] = items return item_dict
a98acd115f9724838108c6ee62bba20cd5c27e6d
32,893
def _swift_module_search_path_map_fn(module): """Returns the path to the directory containing a `.swiftmodule` file. This function is intended to be used as a mapping function for modules passed into `Args.add_all`. Args: module: The module structure (as returned by `swift_common.create_module`) extracted from the transitive modules of a `SwiftInfo` provider. Returns: The dirname of the module's `.swiftmodule` file. """ if module.swift: return module.swift.swiftmodule.dirname else: return None
f2b568879711a2caf12f615e3c9972044cff9e13
32,894
import gzip def _get_open_function_from_extension(filename, kind="yaml"): """Returns the function open is the extension is ``kind`` or 'gzip.open' if it is ``kind``.gz'; otherwise, raises ValueError """ if filename.endswith(".{}.gz".format(kind)): return gzip.open elif filename.endswith(".{}".format(kind)): return open else: raise ValueError("Invalid filename. Should be .{} or .{}.gz".format(kind, kind))
61229ec708b78632533e1adc4e491a5f5f169380
32,895
def normalize_framework(framework: str) -> str: """Normalize framework strings by lowering case.""" return framework.lower()
64d6bfc623d5e56d37c7ab64fd8d7165b93a7aef
32,896
import re def __modify_name_remove(file_name, string, position): """ Core method to remove a string from the base name of a file. """ file_newname = "" if position == "any": file_newname = file_name.replace(string, "") elif position == "prefix": file_newname = re.sub("^" + string, "", file_name) elif position == "suffix": file_newname = re.sub(string + "$", "", file_name) return file_newname
9dfd5236d34450409f01974cd27f490eed125b3f
32,897
def lazy_property(func): """惰性属性修饰器。 如我们可以定义下面一个 `Circle` 的类,定义其中 计算面积的属性为惰性属性 .. code-block:: python @dataclass class Circle: x: float y: float r: float @lazy_property def area(self): print("computing") return 3.14 * r * r 调用时结果如下,可以发现仅第一次发生了计算: >>> cir = Circle(0, 1, 1) >>> cir.area computing 3.14 >>> cir.area 3.14 """ attr_name = "_lazy_" + func.__name__ @property def _lazy_property(self): if not hasattr(self, attr_name): setattr(self, attr_name, func(self)) return getattr(self, attr_name) return _lazy_property
a82d07465b91c1334760b581f2e4b9e605b1790f
32,899
def cross_product(base, a, b): """cross product. Point base, Point a, Point b""" x1, y1, x2, y2 = a.x - base.x, a.y - base.y, b.x - base.x, b.y - base.y return x1 * y2 - x2 * y1
2ba3e77c351df276c08bc73b22f0ecb50a76d166
32,901
def agda(prelude: str, includes: list[str], library: str): """Start an Agda REPL. """ return ('repl', {'prelude': prelude, 'includes': includes, 'library': library})
8bd46c8341c3a6775d09b40ea81e373abdda4232
32,902
from typing import Sequence def joiner(values: Sequence, join_str: str = ", ", last_str: str = ", "): """Joins values together with an additional `last_str` to format how the final value is joined to the rest of the list Args: values (Sequence): Values to join join_str (str, optional): What to join values 0 - penultimate value with. Defaults to ", ". last_str (str, optional): [description]. What to use to join the last value to the rest. Defaults to ", ". """ if len(values) == 1: return values[0] return join_str.join(str(v) for v in values[:-1]) + last_str + str(values[-1])
c657cf4fcb93226e5d8b726519ba47d2a268017a
32,903
def compareimages(struct, oimg, imdict, keylist): """See if the current structure is held in the dictionary of images. If it is, then add it to the list. If it isn't then create a new entry """ #create the list of header parameters klist=[] for k in keylist: try: value=str(struct[0].header[k]).strip() except: value='' klist.append(value) if len(imdict)==0: imdict[oimg]=[klist, oimg] return imdict #compare each value of imdict to the structure for i in imdict.keys(): if klist==imdict[i][0]: imdict[i].append(oimg) return imdict #create a new one if it isn't found imdict[oimg]=[klist, oimg] return imdict
f10fbbd52981fe2e63fe0659223647d8d054c436
32,904
def binary(x): """Convert int x into a bitstring""" if x[0] == "-": # Negative number num = '{0:08b}'.format(int(x[1:])) # Convert to binary and strip '-' sign num = bin(int(''.join('1' if x == '0' else '0' for x in num), 2) + 1)[2:] # Do a 2's compliment else: num = '{0:08b}'.format(int(x)) # Positive number; Convert to binary return num
1b27503b5089bd468c58346f68dc8cb402cae8ae
32,907
import subprocess def exec_cmd(cmd, env=None, stdin=None, timeout=None): """Execute CLI command :param cmd: Program and arguments :type cmd: [str] :param env: Environment variables :type env: dict | None :param stdin: File to use for stdin :type stdin: file :param timeout: The timeout for the process to terminate. :type timeout: int :raises: subprocess.TimeoutExpired when the timeout is reached before the process finished. :returns: A tuple with the returncode, stdout and stderr :rtype: (int, bytes, bytes) """ print('CMD: {!r}'.format(cmd)) process = subprocess.Popen( cmd, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) try: streams = process.communicate(timeout=timeout) except subprocess.TimeoutExpired: # The child process is not killed if the timeout expires, so in order # to cleanup properly a well-behaved application should kill the child # process and finish communication. # https://docs.python.org/3.5/library/subprocess.html#subprocess.Popen.communicate process.kill() stdout, stderr = process.communicate() print('STDOUT: {}'.format(stdout.decode('utf-8'))) print('STDERR: {}'.format(stderr.decode('utf-8'))) raise # This is needed to get rid of '\r' from Windows's lines endings. stdout, stderr = [stream.replace(b'\r', b'').decode('utf-8') for stream in streams] # We should always print the stdout and stderr print('STDOUT: {}'.format(stdout)) print('STDERR: {}'.format(stderr)) return (process.returncode, stdout, stderr)
b477696ab6f9c05510d302358615c8b843051a48
32,908
def shift_tokens_left(input_ids, pad_token_id): """Shift input ids one token to the left""" prev_output_tokens = input_ids.clone() prev_output_tokens[:, :-1] = input_ids[:, 1:] prev_output_tokens[:, -1] = pad_token_id return prev_output_tokens
e139bb3a573e66e49994854742945466a4a5aa39
32,909
def create_layer_set(width=7000, height=7000): """Generate layer set data for xml file""" # I think this sets up the canvas? layer_set = f""" <t2_layer_set oid="3" width="{width:.1f}" height="{height:.1f}" transform="matrix(1.0,0.0,0.0,1.0,0.0,0.0)" title="Top Level" links="" layer_width="{width:.1f}" layer_height="{height:.1f}" rot_x="0.0" rot_y="0.0" rot_z="0.0" snapshots_quality="true" snapshots_mode="Full" color_cues="true" area_color_cues="true" avoid_color_cue_colors="false" n_layers_color_cue="0" paint_arrows="true" paint_tags="true" paint_edge_confidence_boxes="true" prepaint="false" preload_ahead="0" >""" return layer_set
542090a46c7165f31b237ece4c006abfecdef759
32,910
def format_array(array): """Format an array nicely for templating. Removes the square brackets and commas. Args: array (:obj:`np.ndarray`): The array to format. Returns: formatted (str): The formatted array. """ formatted = "" template = "{}\t{}\t{}\n" for rows in array: formatted += template.format(*rows) formatted = formatted.rstrip("\n") return formatted
50cf084e207a8cbabcaf0e7eb9f3aea2742f8f91
32,911
def defocus_to_image_displacement(W020, fno, wavelength=None): """Compute image displacment from wavefront defocus expressed in waves 0-P to. Parameters ---------- W020 : `float` or `numpy.ndarray` wavefront defocus, units of waves if wavelength != None, else units of length fno : `float` f/# of the lens or system wavelength : `float`, optional wavelength of light, if None W020 takes units of length Returns ------- `float` image displacement. Motion of image in um caused by defocus OPD """ if wavelength is not None: return 8 * fno**2 * wavelength * W020 else: return 8 * fno**2 * W020
3e4099d76dc4e4ae005397eb55ce1f3451808df9
32,912
def getWeightsFromIds(weights, kernelIds): """Get the weights corresponding to the provided indices. .. note:: The kernel indices are assumed to start with 1 instead of 0. This is because we use sparse matrices to represent kernel ids, which would remove zero-based ids. This method subtracts 1 from the kernel indices for proper indexing of the weight matrix. :param np.ndarray weights: Array of weight values. :param np.ndarray kernelIds: Array of indices. :return: Array of weights. :rtype: np.ndarray """ return weights[kernelIds - 1]
e350b88a95988978d6c3827b26db39c594561317
32,914
import os import re def _make_relative(reference_file, path_root, paths): """Return a list where any path in ``paths`` that starts with ``path_root`` is made relative to the directory in which the reference file is stored. After a path is made relative it is prefixed with the ``$ORIGIN`` string. Args: reference_file (str): file from which the reference directory is computed path_root (str): root of the relative paths paths: paths to be examined Returns: List of relative paths """ start_directory = os.path.dirname(reference_file) pattern = re.compile(path_root) relative_paths = [] for path in paths: if pattern.match(path): rel = os.path.relpath(path, start=start_directory) path = os.path.join('$ORIGIN', rel) relative_paths.append(path) return relative_paths
066134ef0ac41a451b7d839091ec728a50d2a2d2
32,915
def remove_empty_left_end(mps): """ Removes any extra empty indices on the first site of the mps """ nright = len(mps[-1].legs[2]) if nright > 1: mps[-1].unmerge_ind(2) for i in reversed(range(len(mps[-1].legs[0])+len(mps[-1].legs[1]),len(mps[-1].legs[0])+len(mps[-1].legs[1])+nright-1)): mps[-1] = mps[-1].remove_empty_ind(i) return mps
04c6da5fbdecfdaf1657a17d9d0a550b92c6ec29
32,916
def PackageFromFilename(fname: str) -> str: """! If filename has a folder use that as the package designation. @param fname (str): name of file (with path) to extract the package name (enclosing folder). @return (str) the extracted package name, if found in supplied path (fname). """ if fname.find('/') == -1: # no folder = no package for us. return '' # strip off filename x = len(fname) - fname[::-1].find('/') fname = fname[:x-1] # try to pull out folder if fname.find('/') != -1: x = len(fname) - fname[::-1].find('/') fname = fname[x:] return fname
f4475948b03e604c10579a4b2a2c2dedc5d9e373
32,919
def config_file(tmpdir_factory): """Creates a sample looker.ini file and returns its path""" filename = tmpdir_factory.mktemp("settings").join("looker.ini") filename.write( """ [Looker] # Base URL for API. Do not include /api/* in the url base_url=https://host1.looker.com:19999 # API 3 client id client_id=your_API3_client_id # API 3 client secret client_secret=your_API3_client_secret # Set to false if testing locally against self-signed certs. Otherwise leave True verify_ssl=True [OLD_API] base_url=https://host2.looker.com:19999 client_id=your_API3_client_id client_secret=your_API3_client_secret verify_ssl= [BARE_MINIMUM] base_url=https://host3.looker.com:19999/ [BARE] # Empty section [BARE_MIN_NO_VALUES] base_url="" [QUOTED_CONFIG_VARS] base_url="https://host4.looker.com:19999" verify_ssl='false' """ ) return filename
978488a8371435b8b2ca48115e32df6d0e389bda
32,921
def closest_intersection(points1, points2): """Return the index of intersection point between ``points1`` and ``points2``. points1[idx1] * * points1[idx2] \ / * <--- intersection point / \ points2[idx1] * * points2[idx2] Parameters ---------- points1, points2 : list of int Returns ------- int """ for (idx, (p1, p2)) in enumerate(zip(points1, points2)): if p2 > p1: idx1 = idx - 1 idx2 = idx diff1 = abs(points1[idx1] - points2[idx1]) diff2 = abs(points1[idx2] - points2[idx2]) if diff1 <= diff2: return idx else: return idx + 1
2f469073f604ca2d77a6d360f73e5134482e9724
32,925
import ast def get_names(expression): """get a list of names in expression""" names = set() for node in ast.walk(expression): if type(node) is ast.Name: names.add(node.id) return names
66f64a944b825a5be4a23c72260914df02d8180b
32,926
import os def read_bin_file(filename: str): """ reads binary data from `filename`""" if not os.path.isfile(filename): raise FileNotFoundError() with open(filename, "rb") as fd: return fd.read()
4a1c746307f25665b0afa4f16c4d5ba80cd62be2
32,928
def _GenerateGradleProperties(): """Returns the data for gradle.properties as a string.""" return '\n'.join([ '# Generated by //build/android/gradle/generate_gradle.py', '', '# Tells Gradle to show warnings during project sync.', 'org.gradle.warning.mode=all', '', ])
8a9b5962d047a45d96060c9c8ee7f1b5e89f5d9f
32,929
import re def parse_original(original_file): """Parse original wikitable content into a dictionary, keyed by atoms""" words = {} key = None pending = [] # <nowiki> tags are optional. Meant to extract the text inside the # template. pat = re.compile(r'\| \{\{ALB\|(?:<nowiki>)?([^<>]*)(?:</nowiki>)?\}\}') pat2 = re.compile(r'\| ?(.*)') for line in original_file: line = line.rstrip() if line == '|-': if key is not None: if key in words: pending = words[key] + ["*duplicate*"] + pending words[key] = pending key = None pending = [] continue if key is None: match = pat.fullmatch(line) if not match: raise ValueError("Couldn't match " + line) key = match[1] else: match = pat2.fullmatch(line) if not match: raise ValueError("Couldn't match " + line) pending.append(match[1]) if key is not None: if key in words: pending = words[key] + ["*duplicate*"] + pending words[key] = pending return words
04e46f5e945042065ca7be82110650b162c99084
32,931
import torch def nsgan_g_loss(D_out_fake, weight: float, **kwargs): """ Non-saturating criterion from Goodfellow et al. 2014 """ g_loss = torch.nn.functional.softplus(-D_out_fake["score"]) to_log = {"g_loss": g_loss.mean()} return g_loss.view(-1) * weight, to_log
ba1046bbb9e4f354a04e86eab76f236076ae990f
32,934
def calc_duration(blocks): """ Calculate the duration of an event or block Determine the maximum duration of t he provided events """ if blocks is not list: blocks = [blocks] duration = 0 for block in blocks: if block.type == 'delay': duration = max(duration, block.delay) elif block.type == 'rf': duration = max(duration, block.t[-1]+block.dead_time + block.ringdown_time) elif block.type == 'grad': duration = max(duration, block.t[-1]) elif block.type == 'adc': duration = max(duration, block.delay + block.num_samples*block.dwell + block.dead_time) elif block.type == 'trap': duration = max(duration, block.rise_time + block.flat_time + block.fall_time) return duration
64d73d954c7dc4b1acacce977d69857f4a2dd6cb
32,935
import turtle def create_turtle(c, s): """ Creates a turtle :param c: turtle's color :param s: turtle's size :return: returns the turtle object fully created """ t = turtle.Turtle() t.pencolor(c) t.pensize(s) return t
3d9d35133a0c8a9c29f9a6a0fa6ff8b101930a08
32,937
def clean_response(inp): """ Remove arbitrary characters from the response string """ # Remove all alert characters. inp = inp.replace('', '') return inp
c5c3b8ab7c4ce3470eba9ec1f3d8ad0765065041
32,938
def doble_queso(pizza): """ (list of str) -> list of str Agrega queso al principio y final de la pizza si no tiene >>> doble_queso(['queso', "jamon"]) ['queso', 'jamon', 'queso'] >>> doble_queso(["jamon", 'queso']) ['queso', 'jamon', 'queso'] >>> doble_queso(["jamon"]) ['queso', 'jamon', 'queso'] >>> doble_queso(['queso', "jamon", 'queso']) ['queso', 'jamon', 'queso'] :param pizza: list of str la pizza a adicionar :return: pizza con doble queso """ nueva_pizza = pizza.copy() if not ('queso' == nueva_pizza[0]): nueva_pizza.insert(0, 'queso') if not ('queso' == nueva_pizza[-1]): nueva_pizza.append('queso') return nueva_pizza
a8ea9d6b63989e616f00fd9684053fcdffdb1d9d
32,939
def dump_property(name, access): """Format the output of demangle_property() for use in var_dump()""" if access == '': return '"%s"' % name elif access == '*': return '"%s":protected' % name else: return '"%s":"%s":private' % (name, access)
4cff221ae43cff7fff22f5dfdfb024a2a9d58483
32,940
def normalise_dict(value_dict): """ normalise the values from a list using the total of all values, i.e. returning dictionary whose keys sum to one with same ratios between all the values in the dictionary after the function has been applied :param value_dict: dict dictionary whose values will be adjusted :return: dict same dictionary after values have been normalised to the total of the original values """ return {key: value_dict[key] / sum(value_dict.values()) for key in value_dict}
e7b7f59c4243f72468af0f6f57185da070c10e52
32,941
from typing import Iterable import hashlib def hash_sample_ids(sample_names: Iterable[str]) -> str: """ Return a unique hash string from a set of strings :param sample_names: set of strings :return: a string hash """ for sn in sample_names: assert ' ' not in sn, sn return hashlib.sha256(' '.join(sorted(sample_names)).encode()).hexdigest()[:32]
33709efe686c86f71da7230da1b93bfeaa1b7024
32,942
def parse_substring(allele, pred, max_len=None): """ Extract substring of letters for which predicate is True """ result = "" pos = 0 if max_len is None: max_len = len(allele) else: max_len = min(max_len, len(allele)) while pos < max_len and pred(allele[pos]): result += allele[pos] pos += 1 return result, allele[pos:]
2d3cd3a26f397e28660e5cd116943c3b619e50a8
32,945
def preprocess(words): """Returns a string of words stripped of punctuation""" punct_str = '!"#$%&\'()*+,-./:;<=>/?@[\\]^_`{|}~«» ' return ' '.join([w.strip(punct_str).lower() for w in words.split()])
24827adac7438189d78f3451a34e61a42cecb81f
32,947
def terminate_instances_dialog(context, request, batch_terminate_form=None): """Batch-terminate instances dialog""" return dict( batch_terminate_form=batch_terminate_form, )
14dbd07280735f2181348d4d8ad8d4c6c0958a63
32,949
import math def distance(start, end): """Calculate the distance between two points.""" x0, y0 = start x1, y1 = end dx = x1 - x0 dy = y1 - y0 start_to_end = math.sqrt(dx*dx + dy*dy) return start_to_end
4b9ef2b58686259a8de29b77cdba9b27753f2332
32,950
def strpbrk(cpu_context, func_name, func_args): """ Locate characters in string. Returns a pointer to the first occurrence of str1 of any of the characters that are part of str2, or a null pointer if there are no matches. """ str1_ptr, str2_ptr = func_args str1 = cpu_context.memory.read_data(str1_ptr) str2 = cpu_context.memory.read_data(str2_ptr) for offset, ch in enumerate(str1): if ch in str2: return str1_ptr + offset return 0
c069193d22c9d1e94f45aa77c46c9046a1239a96
32,951
def _ray_remote(function, params): """This is a ray remote function (see ray documentation). It runs the `function` on each ray worker. :param function: function to be executed remotely. :type function: callable :param params: Parameters of the run. :type params: dict :return: ray object """ r = function(params) return r
0aae675af23be189b8e15504ccde41e095f4b4d6
32,952
import random def get_random_color(): """Generates a random color.""" colors = ['red', 'orange', 'yellow', 'green', 'blue', 'purple'] return random.choice(colors)
bd6c352030ecbe705dc8c32d8e7f7a6ea925286f
32,953
def get_clean_paragraph(count): """ Creates a clean paragraph dict Returns -------- dict A dict with title, content en the count """ return { "title": "", "content": "", "count": count }
e6b16c6d9065e428547d14116db1451feaca3668
32,954
def loss_fn(model, batch): """ loss_fn as required by do_train """ return model(batch['image'], batch['annotations'])
ac5f99a6cea610594dffd5fdf56c4c685acf5241
32,955
def _xmlobj_to_jsonobj_flat(inxmlobj, inpreffix='', infields=[], inmaxlevel=0, inuseattrs=True, inskipfirsttag=False): """ Получение одной плоской записи из тега Пример XML: <parent1><parent2><item1>123</item1></parent2><parent21>456</parent21></parent1> Результат при передаче тега parent1: {"parent1_parent2_item1": "123", "parent1_parent21": "456"} :param inxmlobj: XML-тег :type inxmlobj: bs4.element.Tag :param inpreffix: Строка префикса для json поля :type inpreffix: str :param infields: Список наименований полей которые должны быть в результате. Пустой список - все поля :type infields: list :param inmaxlevel: Максимальное кол-во погружения в xml :type inmaxlevel: int :param inuseattrs: Выводить аттрибуты или нет :type inuseattrs: bool :param inskipfirsttag: Убрать из начала ключа словаря имя искомого тега :type inskipfirsttag: bool :return: Плоский словарь с полями из имен тегов через _ :rtype: dict """ # Получаем json из одного тега в плоском виде data = {} def get_json_rec(inxmlobj, inpreffix, level): if inxmlobj.findChildren(recursive=False): if inmaxlevel == 0 or inmaxlevel >= level: for item in inxmlobj.findChildren(recursive=False): get_json_rec(item, inpreffix + '_'+item.name, level+1) else: if inpreffix not in data: # Добавлять только если данных нет if not infields or inpreffix in infields: # Добавлять только если поле есть в infields key = inpreffix.lstrip(' _') data[key] = inxmlobj.text if inuseattrs and inxmlobj.attrs: for attr in inxmlobj.attrs: key = '{}_attr_{}'.format(inpreffix, attr).lstrip(' _') data[key] = inxmlobj.attrs[attr] if inskipfirsttag: get_json_rec(inxmlobj, '', 1) else: get_json_rec(inxmlobj, inpreffix + inxmlobj.name, 1) return data
40a4f5ab0dce2f4d063206e8c8925faff585506e
32,956
import torch def gelu(input_tensor): """Gaussian Error Linear Unit. This is a smoother version of the RELU. Original paper: https://arxiv.org/abs/1606.08415 Args: input_tensor: float Tensor to perform activation. Returns: `input_tensor` with the GELU activation applied. """ cdf = 0.5 * (1.0 + torch.erf(input_tensor / (2.0 ** 0.5))) return input_tensor * cdf
9f4bb523d287314ff4fc68385ea2778f4726deb2
32,957
def _calculate_wide_optimal_results(optimal_results, scoring, estimator_type): """Calculate optimal results in wide format.""" wide_optimal_results = optimal_results.pivot_table(index=['Dataset', 'Classifier', 'Metric'], columns=['Oversampler'], values='Score').reset_index() wide_optimal_results.columns.rename(None, inplace=True) if isinstance(scoring, list): wide_optimal_results['Metric'].replace('mean_test_', '', regex=True, inplace=True) elif isinstance(scoring, list): wide_optimal_results['Metric'] = scoring else: wide_optimal_results['Metric'] = 'accuracy' if estimator_type == 'classifier' else 'r2' return wide_optimal_results
487d4502a1188dab1fe4f33478f95a17dce2346d
32,959
def _query_for_log(query: bytes) -> str: """ Takes a query that ran returned by psycopg2 and converts it into nicely loggable format with no newlines, extra spaces, and converted to string :param query: Query ran by psycopg2 :return: Cleaned up string representing the query """ return ' '.join(query.decode().replace('\n', ' ').split())
047480bb6ad41621fb7f92ffa6fedfb03cfa4f6b
32,962
import subprocess def get_or_create_heroku_config(config_name, config_value, environment_name): """Checks is provided config variable is set, and if not, creates it.""" result = subprocess.run([ "heroku", "config:get", config_name, "--remote", environment_name], capture_output=True, encoding="utf-8" ) current_value = result.stdout.strip() if len(current_value) > 0: return 0 return subprocess.run( ["heroku", "config:set", f"{config_name}={config_value}", "--remote", environment_name] ).returncode
431a4977a950d45285c450c93026a41b87c578c3
32,964
def brent( t: float, x1: float, y1: float, x2: float, y2: float, x3: float, y3: float, x4: float, y4: float, ) -> float: """ Estimates the root using Brent's method. If it can, inverse quadratic interpolation is used. Otherwise, secant interpolation is used. If the result is outside of the current bracket, or if convergence is too slow, then bisection is used instead. """ # Check if bisection was previously used. flag = t == 0.5 # Try inverse quadratic interpolation. if y2 != y3: # Numerically stable formulas. al = (x3 - x2) / (x1 - x2) a = y2 / (y1 - y2) b = y3 / (y1 - y3) c = y2 / (y3 - y2) d = y1 / (y3 - y1) t = a*b + c*d*al # Default to linear interpolation. else: t = y2 / (y2 - y1) # Check if bisection should be used. if ( # Inverse quadratic interpolation out of bounds. not 0 < t < 1 # Bisection was previously used # but x is going to be closer to x2 than x1. or flag and t >= 0.5 # Interpolation was previously used # but the convergence was too slow. or not flag and t * abs(x1 - x2) >= 0.5 * abs(x3 - x4) ): t = 0.5 return t
6bae547313b3e3aa5720f5957859e3f727ba136e
32,965
def check_target_dir(args): """Checks the target directory to see if it exists, is it a directory or a file, and considers the force flag. Will raise FileExistsErrors if the directory exits, is not empty, and the force flag is not provided or if the target exists and is a file. :param args: Namespace of user provided arguments :type args: argparse.Namespace :raises FileExistsError: If the target directory is not empty and the force flag if not provided. :raises FileExistsError: If the target directory exists as a file :return: True if the program should proceed :rtype: bool """ target = args.target if target.exists(): if target.is_dir(): if len(list(target.iterdir())) == 0 or args.force: return True else: raise FileExistsError( f"Target directory provided ['{target}'] is not empty.") else: raise FileExistsError(f"Target directory provided ['{target}'] is a file.") else: target.mkdir(parents=args.force) return True
ccea02791bdf3b0817449c30fe831b54545004ee
32,968
def commit_to_url(full_commit_hash: str) -> str: """Provides a remote URL (eg. for GitHub) for a given commit ID Args: full_commit_hash (str): commit ID Returns: str: URL """ return f"https://github.com/hacksoc/hacksoc.org/commit/{full_commit_hash}"
f621ac8f3db23d3ce8fa780b0e355be55a4897a7
32,971
def readClassificationLine(class_line): """ Function for reading a classification line from a detection file """ if not len(class_line.strip()): return [] class_vals = [] values = class_line.split() class_vals.append(float(values[3])) class_vals.append(values[4]) return class_vals
f14365e73dfafc4d1de02b51896d2a1c1acde00e
32,973
import re def add_scheme(url): """ This method takes a URL and returns a well-formed URL. If the schema is missing, it will get added. :param url: A string containing a URL. :return: A string containing a well-formed URL. """ url = url.replace("www.", "") if not re.match('(?:http|ftp|https)://', url): return 'http://{}'.format(url) return url
d9ccd09f6a66ccbebb98fc7cc4d1c85c24dca6f2
32,974
def source_shorthand(source): """Expands source aliases (e.g. 4u ==> 4u1820) """ if source == '4u': return '4u1820' elif source == 'gs': return 'gs1826' else: return source
11d372edd97dbf03181473812ced177722c14487
32,975
def update(input_ch, answer, dashed): """ This function update the progress when users make a right guess (string manipulation) :param input_ch: str, an alphabet users enter in this round :param answer: str, the final answer :param dashed: str, the dashed answer :return decrypted: str, the updated progress """ # Create an empty string decrypted = '' for i in range(len(answer)): # Find the position of character users guess right and update it if answer[i] == input_ch: decrypted += input_ch else: # Remain the part users already solved, and fill the unsolved part with dash sign if dashed[i] != '-': decrypted += dashed[i] else: decrypted += '-' return decrypted
5e2528fff524d6a4082b04998c9cda68380531b3
32,976
def read_DDTpos(inhdr): """ Read reference wavelength and DDT-estimated position from DDTLREF and DDT[X|Y]P keywords. Will raise KeyError if keywords are not available. """ try: lddt = inhdr['DDTLREF'] # Ref. wavelength [A] xddt = inhdr['DDTXP'] # Predicted position [spx] yddt = inhdr['DDTYP'] except KeyError as err: raise KeyError("File has no DDT-related keywords (%s)" % err) # Some sanity check if not (abs(xddt) < 7 and abs(yddt) < 7): raise KeyError( "Invalid DDT position: %.2f x %.2f is outside FoV" % (xddt, yddt)) return lddt, xddt, yddt
39cce130305440f14db8e5378ae4e66b8e124b20
32,979
from pathlib import Path def get_project_root() -> Path: """Return the path to project (i.e. the top level dat_analysis folder which contains src etc""" return Path(__file__).parent.parent.parent
546015b3555ff7ace6109e064dd94ae7db9491de
32,980
def AR_6(dul): """Association release AR-6. On receiving P-DATA-TF during attempted association release request send P-DATA indication State-event triggers: Sta7 + Evt10 Parameters ---------- dul : dul.DULServiceProvider The DICOM Upper Layer Service instance for the local AE Returns ------- str ``'Sta7'``, the next state of the state machine """ # Issue P-DATA indication dul.to_user_queue.put(dul.primitive) return 'Sta7'
03f8464e6483421bddd5d570829506362f14092c
32,981
def normaliseFrequency(f, timespan): """Convert a frequency in the units of the data into normalised frequency. Useful for computing input arguments in {high|low}Pass() Inputs: f (float) Frequency in, e.g, Hz timespace (float) Range of data in , eg, seconds. This is the time interval between the last and first data points in your set. """ return f/float(timespan)
becf2a5753ef8b1fe9fb220d6b2b2f5288f71906
32,982
import random def erzeuge_zufall(laenge, sonderzeichen=3): """ gibt Zufallsstring zurück, wobei die Zeichen abwechselnd aus den Listen in s gewählt werden; sonderzeichen<4 wählt nicht alle Listen """ s = ['abcdefghijkmnopqrstuvwxyz', 'ABCDEFGHJKLMNPQRSTUVWXYZ', '23456789_-', '@.%&+!$?/()#*'] zufall = [] for i in range(laenge): zufall.append(random.sample(s[i % sonderzeichen], 1)[0]) return ''.join(zufall)
9c50d75338b2502c296e29059c8a37ca8d7b61c1
32,984
def get_votes(conn, track_id): """ Get votes for track_id from VoteData table :param conn: :param track_id: :return votes: """ sql = """ SELECT votes FROM votedata WHERE track_id = ?; """ cur = conn.cursor() cur.execute(sql, (track_id,)) return cur.fetchone()
4f6e572f329c3226811d4073fbe22ce56a62633b
32,987
def upper(): """ Convert data to upper case. """ def upper_inner(value): return value.upper() if value else value return upper_inner
d5390aaa6b8fb1bdbad783a2f4b083c673dda416
32,988
from pathlib import Path def mtimes(file_list): """Return a list of modified times for the given list of files""" files = {} for f in file_list: path = Path(f) files[f] = path.stat().st_mtime return files
d8f99989bb09a048582f1816d6a4a59604e5af68
32,989
def factor_sum(n): """ Given a positive integer n, returns the sum of the digits of the sum of the distinct factors of n, where a FACTOR of n is an integer that divides evenly into n. For example, if n is 28, this function returns 11, because: -- the distinct factors of n are: 1 2 4 7 14 28 -- and the sum of those numbers is 1 + 2 + 4 + 7 + 14 + 28, which is 56 -- and the sum of the digits of 56 is 11, so this function returns 11 when n is 28. As another example, if n is 25, this function returns 4, because: -- the distinct factors of n are: 1 5 25 -- and the sum of those numbers is 1 + 5 + 25, which is 31 -- and the sum of the digits of 31 is 4, so this function returns 4 when n is 28. *** ASK FOR AN EXPLANATION IF YOU DO NOT UNDERSTAND THE ABOVE. *** """ ########################################################################### # This function is PURPOSELY implemented INCORRECTLY (it just returns 0). # DO NOT IMPLEMENT factor_sum. Just leave it as it is (returning 0). ########################################################################### return 0 ########################################################################### # DO NOT modify the above line of code! ###########################################################################
973cb460956baf10d9d07dda4ddff82542df99ac
32,990
import re def get_map_seed(instructions): """Extract map seed from instructions.""" match = re.search(rb'\x00.*? (\-?[0-9]+)\x00.*?\.rms', instructions) seed = None if match: seed = int(match.group(1)) return seed
22d8ecf25ee50b2ee1e3cc358d1441c277ebe569
32,992
def mixin_with_subclass(module, mix_class): """Create a subclass of type(module) and mix_class while using all the data from the `module` object """ ModuleType = type(module) class SubClass(mix_class, ModuleType): def __init__(self, module): assert isinstance(module, ModuleType) # initialize the parent by copying the dict directly self.__dict__ = module.__dict__.copy() ret = SubClass(module) return ret
67cd292efba2aa19085dafb0f66c820351c20a62
32,993
import inspect def function_path(fn): """ Returns the name of the function along with the module containing it: module.submodule.name """ module = inspect.getmodule(fn) if module is not None: return '{}.{}'.format(module.__name__, fn.__name__) else: return fn.__name__
bc83c45e01c4f7b6789b80d727a06c4f31522fce
32,996
def m1m2_to_nu(m1,m2): """Symmetric mass ratio from m1, m2""" return m1*m2/(m1+m2)**2
a9b42f6c5360d190403c6c680cbf4406f2df7150
32,997
def make_fetch_args(): """Fixed helper function for ensuring consistency between point and HUC queries """ cov_ids = [ "iem_cru_2km_taspr_seasonal_baseline_stats", "iem_ar5_2km_taspr_seasonal", "iem_ar5_2km_taspr_seasonal", "iem_ar5_2km_taspr_seasonal", ] summary_decades = [None, (3, 5), (6, 8), None] return cov_ids, summary_decades
bcd8305242c31eb4d00e1bd33bfd2d2a5a79d91f
32,998
def select_trajectory(mdsys): """ Extract the trajectory from the simulation data """ mdt = mdsys.trajectory return mdt
8c493883389f8479a8ff7d3c9e4c857713d5627b
33,000
import copy def _shift_left(row): """Performs what happen at row level when you swipe left in Threes Adding next tile does not happen at this level. This is the fundamental operation of the board. All other behaviors are based on this one. """ row_copy = copy.copy(row) for i in range(1, len(row_copy)): # Move tile left if the left space is empty if row_copy[i-1] == 0: row_copy[i-1], row_copy[i] = row_copy[i], row_copy[i-1] # Merge left, if the two tiles are the same, and divisible by 3 elif row_copy[i-1] == row_copy[i] and row_copy[i] % 3 == 0: row_copy[i-1] *= 2 row_copy[i] = 0 # Merge left, if two tiles adds up to 3 elif row_copy[i-1] + row_copy[i] == 3: row_copy[i-1] = 3 row_copy[i] = 0 return row_copy, 0 if row == row_copy else 1
c2aaad5f53626268acdbe054fb07df4274e772dc
33,001
import requests def get_repo_public_key(username: str, repo_name: str, token: str) -> dict: """ Get the public key for a repository via the Github API. At least for private repos, a personal access token (PAT) with the repo scope is required. :param username: The users github username :param repo_name: The repositories name :param token: The PAT of the user with repo scope :return: A dict containing the public key and its ID """ query_url = f'https://api.github.com/repos/{username}/{repo_name}/actions/secrets/public-key' headers = {'Authorization': f'token {token}'} r = requests.get(query_url, headers=headers) return r.json()
e237df2ae549a39cb86551fbd9fafb52bd491aad
33,002
from typing import List def flatten_once(lst: List) -> List: """ Flattens the list lst one level. """ return [item for sublist in lst for item in sublist]
5b094b6c313cae15f2ce0a56d0783d4f723164ab
33,003
def isZeroArea(newFace): """If newFace has only two incident edges, it can be removed.""" if newFace.outer.next.next is newFace.outer: return True else: return False
fcde4ee89f6f41e0f12f768cd36e119c998b093c
33,004
import numpy def _split_params(items: numpy.ndarray): """Split the item matrix parameters into columns. :param items: an item matrix with four columns representing four parameters. :returns: a 4-tuple with each column in a different slot.""" return items[:, 0], items[:, 1], items[:, 2], items[:, 3]
d38105f39cd42e30ad449f7d180fcdff14f92301
33,008
def parse_WhatModulesVTKOutput(str_to_parse): """ Parse the string looking for any VTK modules. :param str_to_parse: The string to parse :return: The VTK modules found. """ words = str_to_parse.split('\n') vtk_modules = list() for word in words: if "find_package" in word: continue if ")" in word: continue if "Your" in word: continue if "All" in word: continue if word == "": continue vtk_modules.append(word.strip()) return vtk_modules
8c14a24b7c8ad0ca65822ddcdf80badccc22f3a5
33,009
def reconcile(current, desired): """Return sets (to_add, to_remove) indicating elements that should be added to or removed from ``current`` to produce ``desired``. """ to_remove = current - desired to_add = desired - current return to_add, to_remove
0fc5ad379b26543162457573a22c357d49f1b757
33,010
import os def remove_file(path): """ Removes the file at path returns: bool -- success """ if not path: return 1 try: os.remove(path) return 0 except FileNotFoundError: return 1
17da08c3176ba204bbfda5516fb7870665a61e73
33,011
import os import logging def unpack_files(basedir, txt): """ This unpacks a custom tar-like format in which multiple file paths can be specified, separated by ^^^s See the file template/_dynamic_files.jinja2 for an example of this """ MARKER = '^^^ ' lines = txt.split("\n") f = None tgts = [] for line in lines: if line.startswith(MARKER): path = os.path.join(basedir, line.replace(MARKER, "")) os.makedirs(os.path.dirname(path), exist_ok=True) if f != None: f.close() f = open(path,"w") tgts.append(path) logging.info(' Unpacking into: {}'.format(path)) else: if f is None: if line == "": continue else: raise Exception('File marker "{}" required in "{}"'.format(MARKER, line)) f.write(line + "\n") if f != None: f.close() return tgts
77a96a145bdc77f4d5c5acd461044d8ba378fdad
33,012
def facade_versions(name, versions): """ facade_versions returns a new object that correctly returns a object in format expected by the connection facades inspection. :param name: name of the facade :param versions: versions to support by the facade """ if name.endswith('Facade'): name = name[:-len('Facade')] return { name: {'versions': versions}, }
4378df3da64453ee8bd6f278cee260e877fc2cdd
33,014
def read_multiple(reader, batch_size, count, clip_last=True): """ Stack data from reader for multi-devices. """ def __impl__(): # one time read batch_size * count data for rnn for data in reader(): inst_num_per_part = batch_size split_data = {} len_check = True for k in data.keys(): if data[k] is not None: if len(data[k]) != batch_size * count: len_check = False print("data check error!!, data=" + data[k] + ", k=" + k) break if len_check: res = [] for i in range(count): split_data = {} for k in data.keys(): if data[k] is not None: split_data[k] = data[k][inst_num_per_part * i: inst_num_per_part * (i + 1)] res.append(split_data) yield res return __impl__
6e8c1de6f4d673bd2898f0c59480c3f944fa3a34
33,015
def _from_rgb(rgb): """translates an rgb tuple of int to a tkinter friendly color code """ return "#%02x%02x%02x" % rgb
464cab5007935824b8b7af28e97cc882ec0653ee
33,016
def is_enrollment_row(tag): """ True if the tag is an enrollment row :param tag: the tag to check :return: true if the tag is an enrollment row """ is_tr = tag.name == 'tr' cells = tag.find_all('td') has_2_cells = len(cells) == 2 has_enrollment_title = cells[0].get_text() == 'Enroll' \ if has_2_cells else False return is_tr and has_2_cells and has_enrollment_title
d110c63e9ff2556e455c188fa7cc6754f3d6bc60
33,017
def Series_info(series: list) -> dict: """[summary] Args: series (list): [Array Of Series] Attribute: third_term (flout): \n third_last_term (flout): \n sum_of_the_series (flout): \n Returns: dict: [Series Information] """ third_term = series[2] third_last_term = series[-3] sum_of_the_series = 0 for i in series: sum_of_the_series += i info = { "Third Term": third_term, "Third Last Term": third_last_term, "Sum Of The Series": sum_of_the_series, "Series": series } return info
fa2abb17edec8441b37fb8bb1e6e39093d5750b2
33,018
def to_spine_case(original: str) -> str: """ Convert this_thing to this-thing. """ return original.lower().replace("_", "-")
baf37fca4790018a4cbff77ed263da9cd44b6641
33,020
def uniq_list(in_list): """ Takes a list of elements and removes duplicates and returns this list :param in_list: Input list :return: List containing unique items. """ seen = set() result = [] for item in in_list: if item in seen: continue seen.add(item) result.append(item) return result
a3d69b3d76ae4f62ae0ea3faa6a6ff59d1dcde63
33,021
def to_set(labels_list): """given a list of labels from annotations, return the set of (unique) labels Parameters ---------- labels_list : list of lists, i.e. labels from annotations Returns ------- labelset Examples -------- >>> labels_list = [voc.annot.labels for voc in vds.voc_list] >>> labelset = to_set(labels_list) """ all_labels = [lbl for labels in labels_list for lbl in labels] labelset = set(all_labels) return labelset
f63e7ad1d3a88c25478a590c13399c04fd763c03
33,023
def get_metadata_actor(something): """ Try to get actor/agent information from a piece of metadata. """ fail = 'unknown' ret = fail if type(something) == dict: ret = something.get('agent', fail) return ret
1a5b47e60432e3371ad37e1e9e1ca80d1bfe3e58
33,024
import os def _get_shade_auth(): """Return shade credentials""" options = dict( auth_type="None", auth=dict(endpoint="http://localhost:6385/",) ) if os.environ.get('OS_AUTH_URL'): options['auth_type'] = "password" options['auth'] = dict( username=os.getenv('OS_USERNAME', ""), password=os.getenv('OS_PASSWORD', ""), auth_url=os.getenv('OS_AUTH_URL', ""), project_name=os.getenv('OS_PROJECT_NAME', ""), domain_id=os.getenv('OS_USER_DOMAIN_NAME', ""), ) return options
ed936f6c707303c13ec5c042950874c117be21ca
33,025
def filter_stories(stories, triggerlist): """ Takes in a list of NewsStory instances. Returns: a list of only the stories for which a trigger in triggerlist fires. """ storylist = list() for story in stories: for T in triggerlist: if T.evaluate(story): storylist.append(story) return storylist
a3c1a810ba4a6e67fb8d8a11299150da5d891c20
33,028
def get_unity_quota_tree_parameters(): """This method provide parameters required for the ansible quota tree module on Unity""" return dict( filesystem_id=dict(required=False, type='str'), filesystem_name=dict(required=False, type='str'), state=dict(required=True, type='str', choices=['present', 'absent']), hard_limit=dict(required=False, type='int'), soft_limit=dict(required=False, type='int'), cap_unit=dict(required=False, type='str', choices=['MB', 'GB', 'TB']), tree_quota_id=dict(required=False, type='str'), nas_server_name=dict(required=False, type='str'), nas_server_id=dict(required=False, type='str'), path=dict(required=False, type='str'), description=dict(required=False, type='str') )
91601f121f1a6f483895d81a9f2003a3f4cda503
33,029
def decode_imsi(imsi64): """ Convert from the compacted uint back to a string, using the second two bits to determine the padding Args: imsi64 - compacted representation of imsi with padding at end Returns: imsi string in the form IMSI00101... """ prefix_len = (imsi64 >> 1) & 0x3 return 'IMSI' + '0' * prefix_len + str(imsi64 >> 3)
8652b678aee5c43a68e8d3fc17590e1cac77863a
33,030
def check_peptide(peptide:str, AAs:set)->bool: """ Check if the peptide contains non-AA letters. Args: peptide (str): peptide sequence. AAs (set): the set of legal amino acids. See alphapept.constants.AAs Returns: bool: True if all letters in the peptide is the subset of AAs, otherwise False """ if set([_ for _ in peptide if _.isupper()]).issubset(AAs): return True else: return False
20509525ab8d5a8cc25c305a6e55475236785ddf
33,034
def _validate_structure(structure): """Validates the structure of the given observables collection. The collection must either be a dict, or a (list or tuple) of dicts. Args: structure: A candidate collection of observables. Returns: A boolean that is `True` if `structure` is either a list or a tuple, or `False` otherwise. Raises: ValueError: If `structure` is neither a dict nor a (list or tuple) of dicts. """ is_nested = isinstance(structure, (list, tuple)) if is_nested: is_valid = all(isinstance(obj, dict) for obj in structure) else: is_valid = isinstance(structure, dict) if not is_valid: raise ValueError( '`observables` should be a dict, or a (list or tuple) of dicts' ': got {}'.format(structure)) return is_nested
e65f1b0e5a21282cf6e08a80a76ef5dd9a2176be
33,035
def assemble_api_cmd(url, cmd): """ Adapt Address to Different Url Format """ if url.endswith('/'): return url + cmd else: return url + "/" + cmd
210b57f093990b9c8d44d87cce4efad82bc22813
33,036