content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def tokenized_dataset(dataset, tokenizer): """ tokenizer에 따라 sentence를 tokenizing 합니다.""" concat_entity = [] for e01, e02 in zip(dataset['subject_entity'], dataset['object_entity']): temp = '' temp = e01 + '[SEP]' + e02 concat_entity.append(temp) tokenized_sentences = tokenizer( concat_entity, list(dataset['sentence']), return_tensors="pt", padding=True, truncation=True, max_length=128, add_special_tokens=True, ) return tokenized_sentences
33697b168a2d465c476d982e98968aab884c183d
47,503
def barycentricCoordinatesOfE2(P, A, B, C): """ Implementation from Christer Ericson's book Real-Time Collision Detection """ #return barycentricCoordinatesOf(P, A, B, C, areaFunc=triangleSignedAreaE2) v0 = B - A v1 = C - A v2 = P - A d00 = v0.dot(v0) d01 = v0.dot(v1) d11 = v1.dot(v1) d20 = v2.dot(v0) d21 = v2.dot(v1) denom = d00 * d11 - d01 * d01 inv_denom = 1 / denom v = (d11 * d20 - d01 * d21) * inv_denom w = (d00 * d21 - d01 * d20) * inv_denom u = 1 - v - w return u, v, w
23d39c4a6eaa754f06b1599b5ce8238c28042e3a
47,504
def parse_unit(units): """Parses the MatML Units node (dictionary) with units u and power p into a string of the format u1p1u2p2u3p3...""" # if units is not a list, put it in a list units = units if isinstance(units, list) else [units] unit_str = '' for unit in units: unit_name = unit.get('Name') unit_power = unit.get('@power') if not unit_str == '': unit_str += '.' if unit_name: unit_str += unit_name if unit_power: unit_str += str(unit_power) return unit_str
903b2bcffa8c0fa93c41903afe2860b5d0bd765f
47,505
def are_all_0(lists, index): """Check if the values at the same index in different list are all to 0. :param list lists: a list of lists to check the value in. :param int index: the index of the values to check in the lists. :returns: True if all the values at the index in the lists are set to 0, False if at least one of them is not 0. """ for l in lists: if l[index] != 0: return False return True
1fe4f8777618eed459907b2995170691be639e5b
47,506
def modify(boxes, modifier_fns): """ Modifies boxes according to the modifier functions. Args: boxes (dict or list): Dictionary containing box objects per image ``{"image_id": [box, box, ...], ...}`` or list of bounding boxes modifier_fns (list): List of modifier functions that get applied Returns: (dict or list): boxes after modifications Warning: These modifier functions will mutate your bounding boxes and some of them can even remove bounding boxes. If you want to keep a copy of your original values, you should pass a copy of your bounding box dictionary: >>> import copy >>> import brambox.boxes as bbb >>> >>> new_boxes = bbb.modify(copy.deepcopy(boxes), [modfier_fns, ...]) """ if isinstance(boxes, dict): for _, values in boxes.items(): for i in range(len(values)-1, -1, -1): for fn in modifier_fns: values[i] = fn(values[i]) if values[i] is None: del values[i] break else: for i in range(len(boxes)-1, -1, -1): for fn in modifier_fns: boxes[i] = fn(boxes[i]) if boxes[i] is None: del boxes[i] break return boxes
386cb12b0b985a3702d0fe6a3dc7a38e712c7dc1
47,508
def remap_keys(key_func, d): """ Create a new dictionary by passing the keys from an old dictionary through a function. """ return dict((key_func(key), value) for key, value in d.items())
be7125b7bab735522e684d766c75b4745a8c11b3
47,510
import re def calculatedNormalisedDataForLines(lines): """ Get normalised data for the lines of the file. This function is intended as an example. With the help of the function the velocity data of the file are normalized to the absolute value of 1 to be able to measure the profile later with a individual current factor. The parser for the line content is developed as an example for both HUDDSCOL.txt and NYCCCOL.txt. The decimal separator is a dot and the column separator is a tab. The data structure required by the measureProfile() method is returned. The data is structured like the following example: [{"time": 0, "value": 0.1},{"time": 1, "value": 0.4},{"time": 2, "value": 0.3}] The structure is an array with a dictionary for each step. The dictionary has two keys: time: The time point of the value. value: The value, what the value is whether voltage, current or other is specified in the measureProfile() method. :param lines: Array with the data lines of the file as string. :returns: Explained data structure. """ maxValue = 0 normalisedData = [] seperatorRegex = re.compile(r"([0-9,.]+)[\W]+([0-9,.]+)") for line in lines: linematch = seperatorRegex.match(line) if linematch != None: data = dict() data["time"] = float(linematch.group(1)) value = float(linematch.group(2)) if abs(value) > abs(maxValue): maxValue = value data["value"] = value normalisedData.append(data) for data in normalisedData: """ Normalisation to the biggest Value from -1 to 1. """ data["value"] = data["value"] / abs(maxValue) return normalisedData
adbde503fa0152da6ffcd7eaec985f21636d4f6e
47,511
def velocity_diff_squared(particles,field_particles): """ Returns the total potential energy of the particles in the particles set. :argument field_particles: the external field consists of these (i.e. potential energy is calculated relative to the field particles) >>> from amuse.datamodel import Particles >>> field_particles = Particles(2) >>> field_particles.vx = [0.0, 2.0] | units.m >>> field_particles.vy = [0.0, 0.0] | units.m >>> field_particles.vz = [0.0, 0.0] | units.m >>> particles = Particles(3) >>> particles.vx = [1.0, 3.0, 4] | units.m >>> particles.vy = [0.0, 0.0, 0.0] | units.m >>> particles.vz = [0.0, 0.0, 0.0] | units.m >>> velocity_diff_squared(particles, field_particles) quantity<[[1.0, 1.0], [9.0, 1.0], [16.0, 4.0]] m**2> """ n = len(particles) dimensions = particles.velocity.shape[-1] transposed_positions = particles.velocity.reshape([n,1,dimensions]) dxdydz = transposed_positions - field_particles.velocity return (dxdydz**2).sum(-1)
57b153b89ee8ec258b3dbc07800cc27a14e36837
47,513
import itertools def serialize(*args, **kwargs) -> str: """Serialize function arguments.""" return ", ".join(itertools.chain(map(repr, args), (f"{key}={repr(value)}" for key, value in kwargs.items())))
67a3ad2fd93977d43aadec4369f0421e53a01884
47,514
from typing import List import subprocess def checkForExternalPrerequisites() -> List[str]: """ Returns a list of missing prerequisites """ missing = [] try: subprocess.run(["xvfb-run", "--help"], capture_output=True, check=True) except Exception as e: missing.append("xvfb") try: subprocess.run(["xdotool", "--help"], capture_output=True, check=True) except Exception as e: missing.append("xdotool") return missing
24fe4d0c2ce927b9552483e9954395309e0df81c
47,515
import pandas as pd def scheduler_progress_df(d): """ Convert status response to DataFrame of total progress Consumes dictionary from status.json route Examples -------- >>> d = {"ready": 5, "in-memory": 30, "waiting": 20, ... "tasks": 70, "failed": 9, ... "processing": 6, ... "other-keys-are-fine-too": ''} >>> scheduler_progress_df(d) # doctest: +SKIP Count Progress Tasks waiting 20 +++++++++++ ready 5 ++ failed 9 +++++ processing 6 +++ in-memory 30 +++++++++++++++++ total 70 ++++++++++++++++++++++++++++++++++++++++ """ d = d.copy() d['total'] = d.pop('tasks') names = ['waiting', 'ready', 'failed', 'processing', 'in-memory', 'total'] df = pd.DataFrame(pd.Series({k: d[k] for k in names}, index=names, name='Count')) if d['total']: barlength = (40 * df.Count / d['total']).astype(int) df['Progress'] = barlength.apply(lambda n: ('%-40s' % (n * '+').rstrip(' '))) else: df['Progress'] = 0 df.index.name = 'Tasks' return df
4cd65b93850094d8e138e30ee89b1a22a59afd6c
47,517
import math def solve(n, m): """ this is the alghorithm to calculate the permutation since it's said that it wants only the one starts with 1 so we just need to calculate m-1+n permutatiton """ mod = 10**9 + 7 m = m-1 tot_factorial = math.factorial(m+n)%mod n_factorial = math.factorial(n) mn_factorial = math.factorial(m) # use pow and mod in case the number is too big and returning false number den=n_factorial*mn_factorial den=pow(den,mod-2,mod) result = tot_factorial * den %mod return result
9c0eb44e09fd91a8ba77062bc0d543e0aff9ebf2
47,522
def cached(func): """Memoize a function result.""" ret = None def call_or_cache(*args, **kwargs): nonlocal ret if ret is None: ret = func(*args, **kwargs) return ret return call_or_cache
9a62a211e0aa49dc8394cc32614ec68cab4399f1
47,523
def is_time_invariant(ds): """Test if the dataset is time-invariant (has no time coordinate) Args: ds (xarray.Dataset or xarray.DataArray): Data Return: bool : True if no 'time' coordinate detected, False otherwise """ return 'time' not in list(ds.coords.keys())
06254dc660171f34f911ab4e99b9d14e7a789751
47,524
def pygame_rrdt_sampler_paint(sampler): """Visualisation function for rrdt sampler :param sampler: sampler to be visualised """ def get_color_transists(value, max_prob, min_prob): """ :param value: :param max_prob: :param min_prob: """ denominator = max_prob - min_prob if denominator == 0: denominator = 1 # prevent division by zero return 220 - 180 * (1 - (value - min_prob) / denominator) if sampler._last_prob is None: return max_num = sampler._last_prob.max() min_num = sampler._last_prob.min() for i, p in enumerate(sampler.p_manager.particles): sampler.particles_layer.fill((255, 128, 255, 0)) # get a transition from green to red c = get_color_transists(sampler._last_prob[i], max_num, min_num) c = max(min(255, c), 50) color = (c, c, 0) sampler.args.env.draw_circle( pos=p.pos, colour=color, radius=4, layer=sampler.particles_layer ) sampler.args.env.window.blit(sampler.particles_layer, (0, 0))
af0babdcb01e5edd6755b7ab0ee1f79ae2966fa9
47,525
def merge_dict(d1, d2): """Merge two dictionaries, i.e. {**d1, **d2} in Python 3.5 onwards.""" d12 = d1.copy() d12.update(d2) return d12
26c1e1700873c40bec46f5513c7eb5dbc0595325
47,526
import sys def py25_format(template): """Helper for testing under Python 2.5.""" return template if sys.version_info >= (2, 6) else template.replace("{", "%(").replace("}", ")s")
71a4be4cf3ebd79e5a67b050293e4cd05c5af273
47,527
def only_one(arr): """Many JSON RPC calls return an array; often we only expect a single entry """ assert len(arr) == 1 return arr[0]
7740a4b812e36a1ee3f9e1db7088ef4a87437bff
47,528
def table_all_row(list_rows): """function table_all_row Args: list_rows: Returns: """ rsp = ''' | file | type | name | List functions | | ------- | --- | --- | -------------- | ''' for row in list_rows: rsp += f'{row}\n' return rsp
c4d9a5a17b54ae9ac160ccd80c554935ff6fa38d
47,529
def crystal_search(crystals, histogram_type): """Creating a dictionary of values list ​​divided into centering types for a given histogram type. Parameters ---------- crystals : list A list of crystal. histogram_type : unicode str (on py3) Type of histogram e.g. 'a', 'gamma'. Returns ------- crystal_dict : dict A dict of values lists key- type centering value - list """ crystal_dict = {} for crystal in crystals: crystal_dict.setdefault(crystal['centering'], []).append( crystal[histogram_type]) return crystal_dict
406ff81a3865a594e43cb95e5e92330617af48df
47,531
def mean(ls): """ Takes a list and returns the mean. """ return float(sum(ls))/len(ls)
213632c6b905317175dbecbbf4f175392451af2e
47,532
def is_iscsi_uid(uid): """Validate the iSCSI initiator format. :param uid: format like iqn.yyyy-mm.naming-authority:unique """ return uid.startswith('iqn')
8670c7970e1ee5e077de3de02f2fb754fa4352aa
47,533
def equal(l1,l2): """ Parameters ---------- l1 : unicode char A character in unicode. l2 : unicode char A character in unicode. Returns ------- boolean True if l1 equals or equivalent to l2. """ if l1 == l2: return True equivalent = { ("\u0627","\uFE8D"), ("\u0627","\uFE8E"), ("\uFE8D","\uFE8E"), # ("\u0627","\u0623"), ("\u0627","\u0625"), ("\u0628","\uFE8F"), ("\u0628","\uFE90"), ("\u0628","\uFE92"), ("\u0628","\uFE91"), ("\uFE8F","\uFE90"), ("\uFE8F","\uFE92"), ("\uFE8F","\uFE91"), ("\uFE90","\uFE92"), ("\uFE90","\uFE91"), ("\uFE92","\uFE91"), ("\u062A","\uFE95"), ("\u062A","\uFE96"), ("\u062A","\uFE98"), ("\u062A","\uFE97"), ("\uFE95","\uFE96"), ("\uFE95","\uFE98"), ("\uFE95","\uFE97"), ("\uFE96","\uFE98"), ("\uFE96","\uFE97"), ("\uFE98","\uFE97"), ("\u062B","\uFE99"), ("\u062B","\uFE9A"), ("\u062B","\uFE9C"), ("\u062B","\uFE9B"), ("\uFE99","\uFE9A"), ("\uFE99","\uFE9C"), ("\uFE99","\uFE9B"), ("\uFE9A","\uFE9C"), ("\uFE9A","\uFE9B"), ("\uFE9C","\uFE9B"), ("\u062C","\uFE9D"), ("\u062C","\uFE9E"), ("\u062C","\uFEA0"), ("\u062C","\uFE9F"), ("\uFE9D","\uFE9E"), ("\uFE9D","\uFEA0"), ("\uFE9D","\uFE9F"), ("\uFE9E","\uFEA0"), ("\uFE9E","\uFE9F"), ("\uFEA0","\uFE9F"), ("\u062D","\uFEA1"), ("\u062D","\uFEA2"), ("\u062D","\uFEA4"), ("\u062D","\uFEA3"), ("\uFEA1","\uFEA2"), ("\uFEA1","\uFEA4"), ("\uFEA1","\uFEA3"), ("\uFEA2","\uFEA4"), ("\uFEA2","\uFEA3"), ("\uFEA4","\uFEA3"), ("\u062E","\uFEA5"), ("\u062E","\uFEA6"), ("\u062E","\uFEA8"), ("\u062E","\uFEA7"), ("\uFEA5","\uFEA6"), ("\uFEA5","\uFEA8"), ("\uFEA5","\uFEA7"), ("\uFEA6","\uFEA8"), ("\uFEA6","\uFEA7"), ("\uFEA8","\uFEA7"), ("\u062F","\uFEA9"), ("\u062F","\uFEAA"), ("\uFEA9","\uFEAA"), ("\u0630","\uFEAB"), ("\u0630","\uFEAC"), ("\uFEAB","\uFEAC"), ("\u0631","\uFEAD"), ("\u0631","\uFEAE"), ("\uFEAD","\uFEAE"), ("\u0632","\uFEAF"), ("\u0632","\uFEB0"), ("\uFEAF","\uFEB0"), ("\u0633","\uFEB1"), ("\u0633","\uFEB2"), ("\u0633","\uFEB4"), ("\u0633","\uFEB3"), ("\uFEB1","\uFEB2"), ("\uFEB1","\uFEB4"), ("\uFEB1","\uFEB3"), ("\uFEB2","\uFEB4"), ("\uFEB2","\uFEB3"), ("\uFEB4","\uFEB3"), ("\u0634","\uFEB5"), ("\u0634","\uFEB6"), ("\u0634","\uFEB8"), ("\u0634","\uFEB7"), ("\uFEB5","\uFEB6"), ("\uFEB5","\uFEB8"), ("\uFEB5","\uFEB7"), ("\uFEB6","\uFEB8"), ("\uFEB6","\uFEB7"), ("\uFEB8","\uFEB7"), ("\u0635","\uFEB9"), ("\u0635","\uFEBA"), ("\u0635","\uFEBC"), ("\u0635","\uFEBB"), ("\uFEB9","\uFEBA"), ("\uFEB9","\uFEBC"), ("\uFEB9","\uFEBB"), ("\uFEBA","\uFEBC"), ("\uFEBA","\uFEBB"), ("\uFEBC","\uFEBB"), ("\u0636","\uFEBD"), ("\u0636","\uFEBE"), ("\u0636","\uFEC0"), ("\u0636","\uFEBF"), ("\uFEBD","\uFEBE"), ("\uFEBD","\uFEC0"), ("\uFEBD","\uFEBF"), ("\uFEBE","\uFEC0"), ("\uFEBE","\uFEBF"), ("\uFEC0","\uFEBF"), ("\u0637","\uFEC1"), ("\u0637","\uFEC2"), ("\u0637","\uFEC4"), ("\u0637","\uFEC3"), ("\uFEC1","\uFEC2"), ("\uFEC1","\uFEC4"), ("\uFEC1","\uFEC3"), ("\uFEC2","\uFEC4"), ("\uFEC2","\uFEC3"), ("\uFEC4","\uFEC3"), ("\u0638","\uFEC5"), ("\u0638","\uFEC6"), ("\u0638","\uFEC8"), ("\u0638","\uFEC7"), ("\uFEC5","\uFEC6"), ("\uFEC5","\uFEC8"), ("\uFEC5","\uFEC7"), ("\uFEC6","\uFEC8"), ("\uFEC6","\uFEC7"), ("\uFEC8","\uFEC7"), ("\u0639","\uFEC9"), ("\u0639","\uFECA"), ("\u0639","\uFECC"), ("\u0639","\uFECB"), ("\uFEC9","\uFECA"), ("\uFEC9","\uFECC"), ("\uFEC9","\uFECB"), ("\uFECA","\uFECC"), ("\uFECA","\uFECB"), ("\uFECC","\uFECB"), ("\u063A","\uFECD"), ("\u063A","\uFECE"), ("\u063A","\uFED0"), ("\u063A","\uFECF"), ("\uFECD","\uFECE"), ("\uFECD","\uFED0"), ("\uFECD","\uFECF"), ("\uFECE","\uFED0"), ("\uFECE","\uFECF"), ("\uFED0","\uFECF"), ("\u0641","\uFED1"), ("\u0641","\uFED2"), ("\u0641","\uFED4"), ("\u0641","\uFED3"), ("\uFED1","\uFED2"), ("\uFED1","\uFED4"), ("\uFED1","\uFED3"), ("\uFED2","\uFED4"), ("\uFED2","\uFED3"), ("\uFED4","\uFED3"), ("\u0642","\uFED5"), ("\u0642","\uFED6"), ("\u0642","\uFED8"), ("\u0642","\uFED7"), ("\uFED5","\uFED6"), ("\uFED5","\uFED8"), ("\uFED5","\uFED7"), ("\uFED6","\uFED8"), ("\uFED6","\uFED7"), ("\uFED8","\uFED7"), ("\u0643","\uFED9"), ("\u0643","\uFEDA"), ("\u0643","\uFEDC"), ("\u0643","\uFEDB"), ("\uFED9","\uFEDA"), ("\uFED9","\uFEDC"), ("\uFED9","\uFEDB"), ("\uFEDA","\uFEDC"), ("\uFEDA","\uFEDB"), ("\uFEDC","\uFEDB"), ("\u0644","\uFEDD"), ("\u0644","\uFEDE"), ("\u0644","\uFEE0"), ("\u0644","\uFEDF"), ("\uFEDD","\uFEDE"), ("\uFEDD","\uFEE0"), ("\uFEDD","\uFEDF"), ("\uFEDE","\uFEE0"), ("\uFEDE","\uFEDF"), ("\uFEE0","\uFEDF"), ("\u0645","\uFEE1"), ("\u0645","\uFEE2"), ("\u0645","\uFEE4"), ("\u0645","\uFEE3"), ("\uFEE1","\uFEE2"), ("\uFEE1","\uFEE4"), ("\uFEE1","\uFEE3"), ("\uFEE2","\uFEE4"), ("\uFEE2","\uFEE3"), ("\uFEE4","\uFEE3"), ("\u0646","\uFEE5"), ("\u0646","\uFEE6"), ("\u0646","\uFEE8"), ("\u0646","\uFEE7"), ("\uFEE5","\uFEE6"), ("\uFEE5","\uFEE8"), ("\uFEE5","\uFEE7"), ("\uFEE6","\uFEE8"), ("\uFEE6","\uFEE7"), ("\uFEE8","\uFEE7"), ("\u0647","\uFEE9"), ("\u0647","\uFEEA"), ("\u0647","\uFEEC"), ("\u0647","\uFEEB"), ("\uFEE9","\uFEEA"), ("\uFEE9","\uFEEC"), ("\uFEE9","\uFEEB"), ("\uFEEA","\uFEEC"), ("\uFEEA","\uFEEB"), ("\uFEEC","\uFEEB"), ("\u0648","\uFEED"), ("\u0648","\uFEEE"), ("\uFEED","\uFEEE"), ("\u064A","\uFEF1"), ("\u064A","\uFEF2"), ("\u064A","\uFEF4"), ("\u064A","\uFEF3"), ("\uFEF1","\uFEF2"), ("\uFEF1","\uFEF4"), ("\uFEF1","\uFEF3"), ("\uFEF2","\uFEF4"), ("\uFEF2","\uFEF3"), ("\uFEF4","\uFEF3"), ("\u0622","\uFE81"), ("\u0622","\uFE82"), ("\uFE81","\uFE82"), ("\u0629","\uFE93"), ("\u0629","\uFE94"), ("\uFE93","\uFE94"), ("\u0649","\uFEEF"), ("\u0649","\uFEF0"), ("\uFEEF","\uFEF0") } return ((l1,l2) in equivalent) or ((l2,l1) in equivalent)
ce274031c895f03f4a34244305fb19caca8b5846
47,537
from typing import cast from typing import Iterable import itertools def _test_for_equality_nestedly_and_block_implicit_bool_conversion( o1: object, o2: object ) -> bool: """test objects, or sequences, for equality. sequences are tested recursively. Block implicit conversion of values to bools. >>> import methodfinder >>> methodfinder._test_for_equality_nestedly_and_block_implicit_bool_conversion(1,1) True >>> methodfinder._test_for_equality_nestedly_and_block_implicit_bool_conversion(1,2) False >>> methodfinder._test_for_equality_nestedly_and_block_implicit_bool_conversion([1,2,3],[1,2,3]) True >>> methodfinder._test_for_equality_nestedly_and_block_implicit_bool_conversion([1,2,3],[2,1,3]) False >>> methodfinder._test_for_equality_nestedly_and_block_implicit_bool_conversion(1,True) False""" try: # if they have iterators, no exception will be thrown # take 100 elements from them. any user of methodfinder # will not be putting in more than 100 elements # if it's not an iterator, an exception will be thrown o1_iter = cast(Iterable[object], o1) o2_iter = cast(Iterable[object], o2) for e1, e2 in itertools.zip_longest( itertools.islice(o1_iter, 100), itertools.islice(o2_iter, 100) ): if not _test_for_equality_nestedly_and_block_implicit_bool_conversion( e1, e2 ): return False return True except: # since at least one of the objects does not have an iterator, # just test for equality normally. # test that the types are the same to suppress implicit # conversion of values to bools, which returns # way too many useless results for the purpose of methodfinder return (type(o1) == type(o2)) and (o1 == o2)
351fb24ec20f8967559ecfff54d1608213c04f8b
47,539
def _get_duration_in_seconds(selected_duration): """ Converts hours/minutes to seconds Args: selected_duration (string): String with number followed by unit (e.g. 3 hours, 2 minutes) Returns: int: duration in seconds """ num_time, num_unit = selected_duration.split(' ') if num_unit == 'hours': num_time = int(num_time) * 3600 elif num_unit == 'minutes': num_time = int(num_time) * 60 return int(num_time)
0f17f3f4ed678dfb9fdf0d4ed819cb2308311981
47,541
async def handle_async_reader(reader, writer): """Read from the asynchronous reader until EOF, simultaneously logging read bytes to `buffer` and accumulating read bytes. Args: reader: A byte reader with an async read method writer: A byte writer to log read bytes to Returns: bytes: All accumulated bytes read from the reader """ storage = b"" while True: latest = await reader.read(1024) if not latest: break storage += latest writer(latest) return storage
91be72adb83f51594fe4a3668e4cae83f9346d04
47,542
def check_sentence_quality(left_match_right): """ Take a tuple with the left and right side of the matched word and check a few conditions to determine whether it's a good example or not Args: left_match_right (tuple): a tuple of three strings: the left side of the NKJP match, the match itself (in [[baseform|match]] form) and the right side Returns: int: 0 for bad quality, 1 for good quality """ joined_sentence = ''.join(left_match_right[:3]) # the proportion of upper case letters to all letters is too high allowed_uppercase_proportion = 0.1 if sum(1 for c in joined_sentence if c.isupper())/len(joined_sentence) > allowed_uppercase_proportion: return 0 #too many titlecase words allowed_titlecase_proportion = 0.4 if sum(1 for c in joined_sentence.split() if c[0].isupper())/len(joined_sentence.split()) > allowed_titlecase_proportion: return 0 # the sentence is too long allowed_length = 200 minimum_length = 60 if len(joined_sentence) > allowed_length: return 0 if len(joined_sentence) < minimum_length: return 0 # there are too many newlines (most likely a list) allowed_newlines = 3 if joined_sentence.count('\n') > allowed_newlines: return 0 return 1
7a50e860d251e2f2ed2fd1f1c7e7ea406a7a4043
47,543
def cropImage(img, windowSize): """ crop the supplied image & center within specified window; return Rect """ # find center coords of image imgCenterX, imgCenterY = img.get_rect().center # calculate the upper left corner coords for crop mark x1 = imgCenterX - (windowSize[0]/2) y1 = imgCenterY - (windowSize[1]/2) # return Rect (x1, y1, width, height) cropRect = (x1, y1, windowSize[0], windowSize[1]) return cropRect
b35b0dd20dac86703dfe3b258ce7388d7201b31b
47,544
def toggle_legend_collapse(_, is_open): """Open or close legend view. :param _: Toggle legend btn was clicked :param is_open: Current visibility of legend :return: New visbility for legend; opposite of ``is_open`` :rtype: bool """ return not is_open
50aeccdeb79dde4a6941a94cdd05ee77494cd1b9
47,546
def multiples(m, n): """ Builds a list of the first m multiples of the real number n. :param m: an positive integer value. :param n: an real integer number. :return: an array of the first m multiples of the real number n. """ return [n * x for x in range(1, m + 1)]
3a182d95dafa1d56ce120ff1d04f108f9d9c5e37
47,547
import re def get_version_string(init_file): """ Read __version__ string for an init file. """ with open(init_file, 'r') as fp: content = fp.read() version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", content, re.M) if version_match: return version_match.group(1) raise RuntimeError('Unable to find version string in %s.' % (init_file))
22a9faa0d6686106de7d5cb736762859cc9c483a
47,549
def has_primary_repo_remote(repo, verbose=False): """ Checks to see if the repo has a primary remote. """ return False
ccb0e366c2ac7e984e6d1c90e6e22801dbea2a57
47,550
import base64 def addResult(rDB, passFail, tcName, desc, expect, seen, tag, identifier=None): """ addResult() adds a new result to the existing database Variable | Description ----------+-------------------------------------------------------------------------------- rDB | memory-cached results DB variable (list of dictionaries) passFail | int = 0 (fail) or = 1 (pass) tcName | string of type ABC_nn, where ABC is a test indicator and nn is the priority expect | results that the TC was expecting seen | results that were observed by the TC tab | additional information to the TC identifier| can be optionally added (for e.g. to allow run.py/checker.py to label a finding that feedback.py will pick up) NOTE: file name of the JSON results file is given by internal variable rDBFile """ r = {} r["tc"] = tcName r["msg"] = desc r["expect"] = str( base64.b64encode(expect.encode("utf-8")), "utf-8" ) r["seen"] = str( base64.b64encode(seen.encode("utf-8")), "utf-8" ) r["tag"] = tag r["result"] = "OK" if (passFail==1) else "FAIL" if identifier is not None: r["identifier"] = identifier rDB.append(r) return rDB
db5f256e22223f297a97d5fcf1b5340485551de8
47,551
import re def compute(sample: str, substr: str) -> list[int]: """ Given two strings s and t, t is a substring of s if t is contained as a contiguous collection of symbols in s (as a result, t must be no longer than s). The position of a symbol in a string is the total number of symbols found to its left, including itself (e.g., the positions of all occurrences of 'U' in "AUGCUUCAGAAAGGUCUUACG" are 2, 5, 6, 15, 17, and 18). The symbol at position i of s is denoted by s[i]. :param sample: A string. :param substr: A string that is equal to or shorter than sample string. :return: A list of integers representing the start position of the occurrence of substr in string sample. """ return [m.start() + 1 for m in re.finditer(f'(?={substr})', sample)]
ed3a157fa74e953e56d2ed1d13e0b757da23c135
47,552
def fib(n): """ fib(number) -> number fib takes a number as an Argurment and returns the fibonacci value of that number >>> fib(8) -> 21 >>> fib(6) -> 8 >>> fib(0) -> 1 """ y = {}; if n in y.keys(): return y[n] if n <= 2: f = 1 else: f = fib(n-1) + fib(n-2) y[n] = f return f
a8bc29e4631ef71483cd46b43b6681aff48b9aa5
47,553
def V_terms(Q_x, Q_u, Q_xx, Q_ux, Q_uu, K, k): """ Quadratic approximation of value function """ V_x = Q_x.T + Q_u.T @ K + k @ Q_ux + k.T @ Q_uu @ K V_xx = Q_xx + K.T @ Q_ux + Q_ux.T @ K + K.T @ Q_uu @ K return V_x, V_xx
8b357d9e9b3827f43d7d215a7683e1a67927c220
47,554
import math def softmax(y_pred: dict): """Normalizes a dictionary of predicted probabilities, in-place. Parameters ---------- y_pred """ if not y_pred: return y_pred maximum = max(y_pred.values()) total = 0.0 for c, p in y_pred.items(): y_pred[c] = math.exp(p - maximum) total += y_pred[c] for c in y_pred: y_pred[c] /= total return y_pred
c77d81c3d88116e84e517b045471846ea40e5fde
47,555
def onRequest(event): """ req is parserd dict like this: { 'pathParameters': { 'action': action }, 'queryStringParameters': dict(request.args), 'body': body if _.predicates.is_dict(body) else json.loads(body or '{}'), 'headers': dict(request.headers) } """ body = event['body'] action = body['action'] print('body:', body) print('action:', action) result = None # check https://github.com/ringcentral/engage-digital-source-sdk/wiki for more info if action == 'implementation.info': result = { 'objects': { 'messages': ['create', 'show', 'list'], 'private_messages': ['create', 'show', 'list'], 'threads': ['create', 'show', 'list'] }, 'options': [] } elif action == 'threads.list' or action == 'private_messages.list' or action == 'messages.list': result = [] elif action == 'threads.show' or action == 'private_messages.show' or action == 'messages.show': result = '' else: result = {} print('result', result) return result
120edca6b5a579d53e27d38849abf3a3f43e00f7
47,556
def block_combine(arr, nrows, ncols): """Combine a list of blocks (m * n) into nrows * ncols 2D matrix. Arguments: arr {3D np.array} -- A list of blocks in the format: arr[# of block][block row size][block column size] nrows {int} -- The target row size after combination. ncols {int} -- The target column size after combination. Returns: 2D np.array -- Combined matrix. Raise: ValueError -- The size of `arr` is not equal to `nrows * ncols`. """ if arr.size != nrows * ncols: raise ValueError(f'The size of arr ({arr.size}) should be equal to ' f'nrows * ncols ({nrows} * {ncols})') _, block_nrows, block_ncols = arr.shape return (arr.reshape(nrows // block_nrows, -1, block_nrows, block_ncols) .swapaxes(1, 2) .reshape(nrows, ncols))
e6330606ea63eb16faee305d646b60b8327785ac
47,557
def return_cash(cash): """ A fake deposit slot function to return cash that user requests with withdrawal transaction """ return True if cash else False
ec654161d6fcd0ab82ca945fc5ecc78dd3513f21
47,560
def getCentroids(layer): """ This function calculates the centroids for the polygons of a map and returns it as a dictionary with the coordinates of each area's centroid. For computational efficiency it's recommended to store the results on the layer database using the addVariable layer function. :param layer: layer with the areas to be calculated :type layer: Layer Object Users must call this function through a layer object as in the following example. **Example** >>> import clusterpy >>> china = clusterpy.importArcData("clusterpy/data_examples/china") >>> china.getCentroids() """ area = 0 polygons = layer.areas geometricAreas = layer.getGeometricAreas() centroids = {} for a, area in enumerate(polygons): areaAcum = 0 areaXAcum = 0 areaYAcum = 0 for r, ring in enumerate(area): ringAcum = 0 ringXAcum = 0 ringYAcum = 0 for p, point in enumerate(ring): p1 = point if p == len(ring) - 1: p2 = ring[0] else: p2 = ring[p + 1] ringAcum += p1[0] * p2[1] - p1[1] * p2[0] ringXAcum += (p1[0] + p2[0]) * (p1[0] * p2[1] - p2[0] * p1[1]) ringYAcum += (p1[1] + p2[1]) * (p1[0] * p2[1] - p2[0] * p1[1]) areaAcum += ringAcum / float(2) ringPercent = abs(areaAcum) / float(geometricAreas[a]) areaXAcum += ringPercent * ringXAcum / float(6 * areaAcum) areaYAcum += ringPercent * ringYAcum / float(6 * areaAcum) centroids[a] = [areaXAcum, areaYAcum] return centroids
5409ff6e379cf22e2fb85b0e6c2a57b63feba011
47,563
from typing import Tuple import torch def truncated_normal_fill(shape: Tuple[int], mean: float = 0, std: float = 1, limit: float = 2) -> torch.Tensor: """ truncate normal """ num_examples = 8 tmp = torch.empty(shape + (num_examples, )).normal_() valid = (tmp < limit) & (tmp > -limit) _, ind = valid.max(-1, keepdim=True) return tmp.gather(-1, ind).squeeze(-1).mul_(std).add_(mean)
6c11583ee3114be9bf754958ef88e69010085264
47,564
import hashlib def hash_code(source): """get hash """ hash = hashlib.sha256() hash.update(source.encode()) return hash.hexdigest()
83a14b41d5401e8fcf097cfe624e0ebfca4f559a
47,565
import torch def get_token_distances(trigger_spans, arg_spans): """ Just returns distances as a basic metric """ trigger_starts = trigger_spans[:,:,0].unsqueeze(2) # (batch_size, num_triggers, 1) trigger_ends = trigger_spans[:,:,1].unsqueeze(2) # (batch_size, num_triggers, 1) arg_starts = arg_spans[:,:,0].unsqueeze(1) # (batch_size, 1, num_args) arg_ends = arg_spans[:,:,1].unsqueeze(1) # (batch_size, 1, num_args) # token_distances[batch][i][j] = number of tokens between trigger `i` and argument `j` # It also returns a matching tensor; 0 if trigger is after arg, 1 if trigger is before arg # Shape: (batch_size, num_triggers, num_args) [broadcasted] token_distances, _ = torch.stack([trigger_starts - arg_ends, arg_starts - trigger_ends], dim=-1).max(dim=-1) return token_distances
aebc08a4aefe13c6faa2ea92920db8266aca4d58
47,566
def trailing(target="close", days=40, over_under="under"): """ | Calculates the if the target is trailing under or over the current in the past days. | Name: trailling\_\ **over\_under**\ \_\ **days**\ \_of\_\ **target** :param target: Data column to use for the calculation, defaults to "close" :type target: str :param days: Size of the window in days, defaults to 40 :type days: int """ def f(): def g(x): return all(x[-1] > x[:-1]) return g def return_function(data): column_name = f"trailling_{over_under}_{days}_of_{target}" if column_name not in data.columns: data[column_name] = ( data[target].rolling(window=days, min_periods=1).apply(f(), raw=True) ) return data[column_name].copy() return return_function
cb2b55e23db328913623729f27ca5281bbb13ae0
47,567
import math def get_interactions_stats(S_edgelist, embedding, target_adjacency): """ Interactions are edges between chains that are connected in the source adjacency. Args: S (iterable): An iterable of label pairs representing the edges in the source graph. embedding (dict): Mapping from source graph to target graph as a dict of form {s: {t, ...}, ...}, where s is a source-model variable and t is a target-model variable. target_adjacency (dict/:class:`networkx.Graph`): Adjacency of the target graph as a dict of form {t: Nt, ...}, where t is a variable in the target graph and Nt is its set of neighbours. Returns: *stats: Max, min, tota, average, standard deviation """ total = 0 max_inters = 0 N = len(embedding) min_inters = float('inf') # Get max min and total interactions_dict = {} for edge in S_edgelist: (u, v) = edge available_interactions = {(s, t) for s in embedding[u] for t in embedding[v] if s in target_adjacency[t]} if not available_interactions: raise ValueError("no edges in target graph between source variables {}, {}".format(u, v)) num_interactions = len(available_interactions) interactions_dict[(u,v)] = num_interactions total += num_interactions if num_interactions > max_inters: max_inters = num_interactions if num_interactions < min_inters: min_inters = num_interactions # Get avg and standard deviation avg_inters = total/N sum_deviations = 0 for (u, v), num_interactions in interactions_dict.items(): deviation = (num_interactions - avg_inters)**2 sum_deviations += deviation std_dev = math.sqrt(sum_deviations/N) return max_inters, min_inters, total, avg_inters, std_dev
69b6f818b25a2cb164cf3e2d6f24a80fc7a73efc
47,568
def replace_None_zero(data, col_kwargs): """Replace all missing values with 0.""" return data.fillna(0)
9035e117168f1415563cafe97a8eda17a2f34f0d
47,569
def fklist(self, kpoi="", lab="", **kwargs): """Lists the forces at keypoints. APDL Command: FKLIST Parameters ---------- kpoi List forces at this keypoint. If ALL (default), list for all selected keypoints [KSEL]. If KPOI = P, graphical picking is enabled and all remaining command fields are ignored (valid only in the GUI). A component name may also be substituted for KPOI. lab Force label to be listed (defaults to ALL). See the DOFSEL command for labels. Notes ----- Listing applies to the selected keypoints [KSEL] and the selected force labels [DOFSEL]. This command is valid in any processor. """ command = f"FKLIST,{kpoi},{lab}" return self.run(command, **kwargs)
dfcb4547520d8840763ba11fd4f993ffee70279d
47,570
def taken(diff): """Convert a time diff to total number of microseconds""" microseconds = diff.seconds * 1_000 + diff.microseconds return abs(diff.days * 24 * 60 * 60 * 1_000 + microseconds)
64e8022bb0a80fcccc1a755fffb79121e61cad17
47,571
from typing import List from typing import Counter def remove_rare(sentences: List[List[str]]) -> List[List[str]]: """ Remove rare words (those that appear at most once) from sentences. Parameters ---------- sentences: List of tokenized sentences. """ counts: Counter = Counter() for sentence in sentences: counts.update(sentence) return [[word for word in sentence if counts[word] > 1] for sentence in sentences]
1af60b7bb0393abf99db02abf6f4fea9d9529c15
47,572
def _general_direction(model_rxn1, model_rxn2): """ picks the more general of the two directions from reactions passed in """ r1d = model_rxn1.get_direction() r2d = model_rxn2.get_direction() if r1d == r2d: return r1d else: return '<=>'
362ee4a6f033323328f869ef6e5650cc9fef9fa3
47,574
import os def get_file_structure(base_folder,cameras,extension='.MP4'): """ Returns directory listing matching extension for each camera. """ structure = [] for i,camera in enumerate(cameras): path = os.path.abspath(os.path.join(base_folder, camera)) structure.append( [] ) for f in sorted(os.listdir(path)): if extension in f: clip_path = os.path.join(path,f) structure[i].append(clip_path) return structure
85751ac268b45a3cdffa5499ad5f04ed8fd7c0d1
47,575
def get_test_dict(): """Without "include" lines. """ return { 'backend': { 'default': 'gcp', 'providers': { 'Local': { 'actor-factory': 'cromwell.backend.impl.sfs.config.ConfigBackendLifecycleActorFactory', 'config': { 'default-runtime-attributes': {'docker': 'ubuntu:latest'}, 'root': '/mnt/data/scratch/leepc12/caper_out', }, } }, } }
44711f57db5275507bfecf3b0a2381e5ece2140a
47,577
import subprocess def probe_command_availability(cmd): """Try to run 'cmd' in a subprocess and return availability. 'cmd' should be provided in the format expected by subprocess: a string, or a list of strings if multiple arguments. Raises RuntimeError if the called process crashes (eg, via Ctrl+C) Returns: command_available, stdout, stderr stdout and stderr will be '' if command was not available. """ # Try to initialize a pipe which will only work if it is available command_available = True try: # If it fails here due to nonexistence of command, pipe is # never initialized pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError: return False, '', '' # This extract data from the pipe # I think this will always work? try: stdout, stderr = pipe.communicate() except: raise RuntimeError("process crashed") # Try to terminate it if it didn't already happen # Add some point it seemed this was necessary to restored stdout # but this no longer seems to be the case try: pipe.terminate() except OSError: pass return command_available, stdout, stderr
3580c53b160f10045a12c61b03963aa2e445c64d
47,578
def del_non_numeric(dataframe): """ This function deletes the non-numeric rows of the resultant dataframe. It is particularly useful for those functions that sometimes return stastical results for non-numeric function (e.g. calculate_max_and_min()). This is because of the underlying structure of certain data types wherein they are stored in the system as numbers, but do not serve any practical purpose for the end user. :param dataframe: the resultant dataframe with numeric results for non-numeric values (e.g. maximum value of company name) :return: the resultant dataframe without the numeric results for non-numeric values. """ for idx, row in dataframe.iterrows(): if type(dataframe["Maximum"].loc[idx]) in ['float', 'float64', 'int', 'int64']: dataframe.drop([idx], inplace=True) else: pass return dataframe
729675b4cfa77b39d8fb591ef0af655421abe6d7
47,580
def get_reciprocal_weighting_function(): """ Returns a function that weights its input by raising the input to the -1 power; see :ref:`selection-strategy` The reciprocal weighting function is useful in cases where fitness should be minimized as the function results in granting higher selection probabilities to individuals with lower scores :returns: a function that returns ``1/input`` .. note:: This fitness weighting function will throw an error for fitness scores equal to 0. """ return lambda x: 1 / x
65eced160111ee757ba64c16b25a41638471a09c
47,581
def _getTableKeys(table): """Return an array of the keys for a given table""" keys = None if table == "Users": keys = [ "Id", "Reputation", "CreationDate", "DisplayName", "LastAccessDate", "WebsiteUrl", "Location", "AboutMe", "Views", "UpVotes", "DownVotes", "ProfileImageUrl", "Age", "AccountId", ] elif table == "Badges": keys = ["Id", "UserId", "Name", "Date"] elif table == "PostLinks": keys = ["Id", "CreationDate", "PostId", "RelatedPostId", "LinkTypeId"] elif table == "Comments": keys = ["Id", "PostId", "Score", "Text", "CreationDate", "UserId"] elif table == "Votes": keys = ["Id", "PostId", "VoteTypeId", "UserId", "CreationDate", "BountyAmount"] elif table == "Posts": keys = [ "Id", "PostTypeId", "AcceptedAnswerId", "ParentId", "CreationDate", "Score", "ViewCount", "Body", "OwnerUserId", "LastEditorUserId", "LastEditorDisplayName", "LastEditDate", "LastActivityDate", "Title", "Tags", "AnswerCount", "CommentCount", "FavoriteCount", "ClosedDate", "CommunityOwnedDate", ] elif table == "Tags": keys = ["Id", "TagName", "Count", "ExcerptPostId", "WikiPostId"] elif table == "PostHistory": keys = [ "Id", "PostHistoryTypeId", "PostId", "RevisionGUID", "CreationDate", "UserId", "Text", ] elif table == "Comments": keys = ["Id", "PostId", "Score", "Text", "CreationDate", "UserId"] return keys
d645b013e40407c6d537100a7e55338d25244cca
47,582
def get_subset(df, age, gender): """ This function returns a subset of a data frame based on gender and/or age. :param dataframe df: a dataframe containing marathon split times for individuals :param str age: an age group, originally selcted via user input :param str gender: a gender category, originally se3lected via user input :return dataframe subset: a dataframe containing times for selected individuals """ if age == 'all': if gender == 'all': subset = df.copy() else: subset = df.loc[(df['gender'] == str(gender))].copy() elif gender == 'all': subset = df.loc[(df['age_range'] == str(age))].copy() else: subset = df.loc[(df['gender'] == str(gender)) & (df['age_range'] == age)].copy() return subset
18fe30992dba71c329d95297847a56827f8c52c7
47,583
import random import copy def sample_and_rank_results(cdf_results_list, ranked_results, random_seed=None): """ :param cdf_results_list: output of annotations.build_cdf_sampling_list(results) :param ranked_results: list of results, usually initallized with [] :param random_seed: value to guarantee consistent ranking :return: return a ranked list of results using CDF sampling """ # set random seed to guarantee rank order if random_seed is not None: random.seed(a=random_seed) # make copy to prevent mutation cdf_results_list = copy.copy(cdf_results_list) # make a random choise from cdf results list sampled_result = random.choice(cdf_results_list) # append ranked results list with the random choice, this is next result in list ranked_results.append(sampled_result) # get url for sampled result, used to filter out all occurences of this result in cdf sample url_sampled_result = sampled_result['URL'] # filter out all results in cdf sample list matching url filtered_cdf_results_list = list(filter(lambda result: result['URL'] != url_sampled_result, cdf_results_list)) # recursion to build rank results if filtered_cdf_results_list == []: return ranked_results else: return sample_and_rank_results(filtered_cdf_results_list, ranked_results, random_seed)
d3624cf3daee407fae2d242d342e3da1311ca34d
47,584
def read_table(filename, usecols=(0, 1), sep='\t', comment='#', encoding='utf-8', skip=0): """Parse data files from the data directory Parameters ---------- filename: string Full path to file usecols: list, default [0, 1] A list of two elements representing the columns to be parsed into a dictionary. The first element will be used as keys and the second as values. Defaults to the first two columns of `filename`. sep : string, default '\t' Field delimiter. comment : str, default '#' Indicates remainder of line should not be parsed. If found at the beginning of a line, the line will be ignored altogether. This parameter must be a single character. encoding : string, default 'utf-8' Encoding to use for UTF when reading/writing (ex. `utf-8`) skip: int, default 0 Number of lines to skip at the beginning of the file Returns ------- A dictionary with the same length as the number of lines in `filename` """ with open(filename, 'r') as f: # skip initial lines for _ in range(skip): next(f) # filter comment lines lines = (line for line in f if not line.startswith(comment)) d = dict() for line in lines: columns = line.split(sep) key = columns[usecols[0]].lower() value = columns[usecols[1]].rstrip('\n') d[key] = value return d
81e70a1db8530940d73cf8242b791c3cab473b9c
47,585
import time def time_it(func, *args, **kwargs): """Benchmarks a given function.""" start = time.time() res = func(*args, **kwargs) print(f'{func.__name__} t: {time.time() - start:.{8}f} s') return res
07023c77f29ca03171ac8f725e41212f605bcdaf
47,586
def is_isogram(word): """This method check if given word is isogram.""" chars = set() if len(word) == 0: return True for letter in word: letter = letter.lower() if letter.isalpha() and letter in chars: return False chars.add(letter) return True
7890439112c46dd9d2a3eb803ec9ce22dedb6797
47,587
def retimePadding(frame, retime, size): """ Return the frame with the padding size specified. """ return str(int(frame) + int(retime)).zfill(int(size))
728e63aa4730b4a7d1510e53c74c821a5511d130
47,590
def define_limit_offset(request): """ Define limit and offset variables from request.args """ if request.args: try: limit = int(request.args['limit']) offset = int(request.args['offset']) except: # Default limit and offset limit = 12 offset = 0 else: # Default limit and offset limit = 12 offset = 0 return (limit, offset)
31ef7fbc70ec67c0d646024c580b591238cfecbf
47,591
import re def comment_cleaner(dirty_text): """ Remove blank lines and special characters from text data collected from surveys. Input: dirty_text -- raw text data. Output: clean text data """ if dirty_text is not None: text = dirty_text.replace("(<br>)", "") # remove blank line text = text.replace('(<a).*(>).*(</a>)', "") # remove <a> tag text = text.replace('(&amp)', "") # remove "&" text = text.replace('(&gt)', '') # remove ">" text = text.replace('(&lt)', '') # remove "<" text = text.replace('nan', '') # remove NA semi_clean_text = " ".join(re.sub(r"(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)"," ", dirty_text).split()) clean_text = " ".join(re.sub(r"\d+", "", semi_clean_text).split()) # remove numbers return clean_text
b54ceb211d830b48686c8df1f4a71aa940fc42e0
47,592
def denormalize_dict(source, lower_site_req, upper_site_req, separator="."): """ Only denormalizes a single level and only dicts, so "k.k1":v will get transformed into "k": {"k1": v} """ result = {} for k, v in source.items(): if separator in k: top, lower = k.split(separator) d = result.get(top) or {} d[lower] = v result[top] = d else: result[k] = v result['lower_site_req'] = lower_site_req result['upper_site_req'] = upper_site_req return result
f1ec52640b2684bb31e4154ba333dfe3d29f71db
47,593
def _dot(fqdn): """ Append a dot to a fully qualified domain name. DNS and Designate expect FQDNs to end with a dot, but human's conventionally don't do that. """ return '{0}.'.format(fqdn)
53ee7c41dab6b88523a68fd1ee387031c0314eb1
47,594
def _stringListEncoderHelper( n, maxLenS ): """ helper method for string encoding for list iterator """ maxLenS[0]= max( maxLenS[0], len( n ) ) return n.encode( "ascii", "ignore" )
63d9375a5a2a6fea7e8a6a8b8142dab8730b28a8
47,595
import re import os def create_input_lists(bids_dir: str): """This function creates the input lists that are used to run the patient-wise analysis in parallel Args: bids_dir (str): path to BIDS dataset folder Returns: all_subdirs (list): list of subdirs (i.e. path to parent dir of n4bfc angio volumes) all_files (list): list of filenames of n4bfc angio volumes """ regexp_sub = re.compile(r'sub') # create a substring template to match ext_gz = '.gz' # type: str # set zipped files extension all_subdirs = [] # type: list # it will be the input list for the parallel computation all_files = [] # type: list # it will be the second input list for the parallel computation for subdir, dirs, files in os.walk(bids_dir): for file in files: ext = os.path.splitext(file)[-1].lower() # get the file extension # only retain paths of skull-stripped N4 bias field corrected volumes if regexp_sub.search(file) and ext in ext_gz and "N4bfc_brain_mask" in file and "N4_bias_field_corrected" in subdir: all_subdirs.append(subdir) all_files.append(file) assert all_subdirs and all_files, "Input lists must be non-empty" return all_subdirs, all_files
75a3c917599e94f2e5f60efa620dff53bdfdf211
47,596
def intersect1D(min1, max1, min2, max2): """ Return the overlapping state on a 1 dimensional level. :param int/float min1: :param int/float max1: :param int/float min2: :param int/float max2: :return: Overlapping state :rtype: bool """ return min1 < max2 and min2 < max1
2ad6b9926614b3785aab2a28ee2afb088170d2b9
47,597
import os import re def generateDisorder(fastas, outDir, vsl2): """ Generate predicted protein disorder information by bysing VSL2 software. Parameters ---------- file : file the file, which include the protein sequences in fasta format. vsl2: the VSL2 software path VSL2 software path, which could be download at: http://www.dabi.temple.edu/disprot/download/VSL2.tar.gz Returns ------- a string: A directory name, which include the predicted protein disorder information. """ if os.path.exists(outDir) == False: os.mkdir(outDir) for i in fastas: name, sequence = re.sub('\|', '', i[0]), i[1] with open(name + '.txt', 'w') as f: f.write(sequence + '\n') myCmd = 'java -jar ' + vsl2 + ' -s:' + name + '.txt >' + outDir + '/' + name + '.dis' if os.path.exists(outDir + '/' + name + '.dis') == False: os.system(myCmd) return outDir
fb1ad560f0182e000d32666599275c9183b4ee1d
47,598
def hex_from_evenr(newsystem, coord): """convert from hex coordinate to evenr coordinate""" x = coord.x - (coord.y + (coord.y&1)) // 2 z = -coord.y y = -x+z return newsystem.coord(x=x, y=y, z=z)
ca74da9b2ed80ce21b0d58049fbd3f801add922e
47,599
from typing import Optional def _format_key(key: str, prefix: Optional[str] = None) -> str: """Prepend prefix is necessary.""" if prefix is None: return key return f"{prefix}.{key}"
14720afa7e6825d07696c12ab1c689e6485cda1a
47,600
def add(g, start, end): """ Add edge information into g @type: g, graph (2D array) @param: g, adjacency list @type: start, integer @param: start, start vertex point for edge @type: end, integer @param: end, end vertex point for edge """ g[start].append(end) g[end].append(start) return g
30032f20717034dce57b1e5adfb09e45ce2614be
47,601
def file_size(file, unit): """ Convert the size from bytes to other units like KB, MB or GB Adapted from: https://thispointer.com/python-get-file-size-in-kb-mb-or-gb-human-readable-format/ """ base = 1024 if unit == 'KB': size = file.size/base elif unit == 'MB': size = file.size/(base**2) elif unit == 'GB': size = file.size/(base**3) else: size = file.size return f'{round(size, 2)} {unit}'
cab5c01470489c126470c0c5179bb3da8d30072b
47,602
import torch def warp_points(points, homographies, device='cpu'): """ Warp a list of points with the given homography. Arguments: points: list of N points, shape (N, 2(x, y))). homography: batched or not (shapes (B, 3, 3) and (...) respectively). Returns: a Tensor of shape (N, 2) or (B, N, 2(x, y)) (depending on whether the homography is batched) containing the new coordinates of the warped points. """ # expand points len to (x, y, 1) no_batches = len(homographies.shape) == 2 homographies = homographies.unsqueeze(0) if no_batches else homographies # homographies = homographies.unsqueeze(0) if len(homographies.shape) == 2 else homographies batch_size = homographies.shape[0] points = torch.cat((points.float(), torch.ones((points.shape[0], 1)).to(device)), dim=1) points = points.to(device) homographies = homographies.view(batch_size*3,3) # warped_points = homographies*points # points = points.double() warped_points = homographies@points.transpose(0,1) # warped_points = np.tensordot(homographies, points.transpose(), axes=([2], [0])) # normalize the points warped_points = warped_points.view([batch_size, 3, -1]) warped_points = warped_points.transpose(2, 1) warped_points = warped_points[:, :, :2] / warped_points[:, :, 2:] return warped_points[0,:,:] if no_batches else warped_points
779be021b06eb3b00e4ae154f2ba203f4be706cd
47,603
def degrees_as_hex(angle_degrees, arcseconds_decimal_places=2): """ Takes degrees, returns hex representation. TESTS OK 2020-10-24. :param angle_degrees: any angle as degrees. [float] :param arcseconds_decimal_places: dec. places at end of hex string (no period if zero). [int] :return: same angle in hex notation, with proper sign, unbounded. [string] """ if angle_degrees < 0: sign = "-" else: sign = "+" abs_degrees = abs(angle_degrees) arcseconds_decimal_places = int(max(0, arcseconds_decimal_places)) # ensure int and non-negative. total_arcseconds = abs_degrees * 3600 int_degrees = int(total_arcseconds // 3600) remaining_arcseconds = total_arcseconds - 3600 * int_degrees int_arcminutes = int(remaining_arcseconds // 60) remaining_arcseconds -= 60 * int_arcminutes if arcseconds_decimal_places > 0: arcseconds, fract_arcseconds = divmod(remaining_arcseconds, 1) int_fract_arcseconds = int(round(fract_arcseconds * 10 ** arcseconds_decimal_places)) else: arcseconds, fract_arcseconds, int_fract_arcseconds = round(remaining_arcseconds), 0, 0 int_arcseconds = int(arcseconds) if arcseconds_decimal_places > 0: if int_fract_arcseconds >= 10 ** arcseconds_decimal_places: int_fract_arcseconds -= 10 ** arcseconds_decimal_places int_arcseconds += 1 if int_arcseconds >= 60: int_arcseconds -= 60 int_arcminutes += 1 if int_arcminutes >= 60: int_arcminutes -= 60 int_degrees += 1 if int_degrees >= 360: int_degrees -= 360 if arcseconds_decimal_places > 0: format_string = '{0}{1:02d}:{2:02d}:{3:02d}.{4:0' + str(int(arcseconds_decimal_places)) + 'd}' else: format_string = '{0}{1:02d}:{2:02d}:{3:02d}' hex_string = format_string.format(sign, int(int_degrees), int(int_arcminutes), int_arcseconds, int_fract_arcseconds) return hex_string
87c362b3504ddcee16a07c7a7ac4c4e9db2cb17f
47,605
def get_topic_name_by_arn(topic_arn): """ function for handling the somewhat tricky built topic-arns used for topic targeting """ i = 0 topic_name = '' for character in topic_arn: if i == 5: topic_name += character if character == ":": i += 1 return topic_name
48768ece1a2662d056c441917fb613e88551f5d1
47,607
def read_mappings_from_dict(index_mapping): """ Read event_class and event_type mappings from a python dict. """ evclass = [[], [], []] evtype = [[], [], []] for v in index_mapping['classes']: evclass[0].append(v[0]) evclass[1].append(v[1]) evclass[2].append(v[2]) for v in index_mapping['types']: evtype[0].append(v[0]) evtype[1].append(v[1]) evtype[2].append(v[2]) return evclass, evtype
cdf2706fed3cdf5786cc238e090401eeae99a8c2
47,608
def most_similar(indices, values, similarity): """Find the value with greatest score of similarity. """ max_result = None, None, None for (rang, index) in enumerate(indices): score = similarity(values[index]) max_score = max_result[0] if score == 1: return score, rang, index elif max_score is None or score > max_score: max_result = score, rang, index return max_result
4fc45c9e5b8a9df128c125d21a879b3f4e40702c
47,609
import torch def top_p_filter( logits: torch.Tensor, top_p: float, min_tokens_to_keep: int, is_probs: bool = False ) -> torch.Tensor: """Helper function for nucleus sampling decoding, aka. top-p decoding. :param logits: :param top_p: :param min_tokens_to_keep: :param is_probs: :return: """ sorted_logits, sorted_indices = torch.sort(logits, descending=True) cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1) sorted_indices_to_remove = cumulative_probs > top_p if min_tokens_to_keep > 1: # keep at least min tokens sorted_indices_to_remove[..., :min_tokens_to_keep - 1] = 0 # Shift the indices to the right to keep also the first token above the threshold sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() sorted_indices_to_remove[..., 0] = 0 # scatter sorted tensors to original indexing indices_to_remove = sorted_indices_to_remove.scatter( 1, sorted_indices, sorted_indices_to_remove ) if is_probs: scores = logits.masked_fill(indices_to_remove, 0.0) else: scores = logits.masked_fill(indices_to_remove, -float("Inf")) return scores
7e54e5cc87afa4eb90ca7316e48b947f9647f210
47,610
def standard_formats(workbook): # Add standard formatting to a workbook and return the set of format objects # for use when writing within the workbook """the formats used in the spreadsheet""" # darkgray = '#413839' # optima_blue = '#18C1FF' atomica_blue = "#98E0FA" optional_orange = "#FFA500" BG_COLOR = atomica_blue OPT_COLOR = optional_orange BORDER_COLOR = "white" formats = {} # Locked formats formats["bold"] = workbook.add_format({"bold": 1}) formats["center"] = workbook.add_format({"align": "center"}) formats["center_bold"] = workbook.add_format({"bold": 1, "align": "center"}) formats["rc_title"] = {} formats["rc_title"]["right"] = {} formats["rc_title"]["right"]["T"] = workbook.add_format({"bold": 1, "align": "right", "text_wrap": True}) formats["rc_title"]["right"]["F"] = workbook.add_format({"bold": 1, "align": "right", "text_wrap": False}) formats["rc_title"]["left"] = {} formats["rc_title"]["left"]["T"] = workbook.add_format({"bold": 1, "align": "left", "text_wrap": True}) formats["rc_title"]["left"]["F"] = workbook.add_format({"bold": 1, "align": "left", "text_wrap": False}) # Unlocked formats formats["unlocked"] = workbook.add_format({"locked": 0, "bg_color": BG_COLOR, "border": 1, "border_color": BORDER_COLOR}) formats["center_unlocked"] = workbook.add_format({"align": "center", "locked": 0, "bg_color": BG_COLOR, "border": 1, "border_color": BORDER_COLOR}) formats["general"] = workbook.add_format({"locked": 0, "num_format": 0x00, "bg_color": BG_COLOR, "border": 1, "border_color": BORDER_COLOR}) # Conditional formats formats["unlocked_boolean_true"] = workbook.add_format({"bg_color": OPT_COLOR}) formats["unlocked_boolean_false"] = workbook.add_format({"bg_color": BG_COLOR}) formats["not_required"] = workbook.add_format({"bg_color": "#EEEEEE", "border": 1, "border_color": "#CCCCCC"}) formats["white_bg"] = workbook.add_format({"bg_color": "#FFFFFF", "border": 1, "border_color": "#CCCCCC"}) formats["ignored"] = workbook.add_format({"pattern": 14}) # Hatched with diagonal lines - this represents a cell whose value will not be used in the model run (e.g., an assumption that also has time-specific points) formats["warning"] = workbook.add_format({"bg_color": "#FF0000"}) formats["ignored_warning"] = workbook.add_format({"pattern": 14, "bg_color": "#FF0000"}) # hatched, with red background formats["ignored_not_required"] = workbook.add_format({"pattern": 14, "bg_color": "#EEEEEE", "border": 1, "border_color": "#CCCCCC"}) # hatched, with grey background return formats
c1b4915cf5b7e89c0c255f4a167bbd60afa8382e
47,611
def table_to_report(table, measure): """Return a report by taking arguments table and measure. Args: table: an instance generated by c.fetchall(). table is a list of tuples and each tuple has only two values. measure: a unit of measure in string format. Returns: A report in the following form, where the first vaule in an above said tuple is before dash, the second value in the tuple is after dash, then a space followed by measure: 'first value in tuple' - 'second value in tuple' 'measure' Example: John Smith - 1000 views Sun will also rise tomorrow - 10 clicks """ report = '' for i in table: report += '{} - {}{}\n'.format(i[0], i[1], measure) return report
e3417b159cd7b826c856697d48c34aecd1a81719
47,612
def escape(t): """HTML-escape the text in `t`. This is only suitable for HTML text, not attributes. """ # Convert HTML special chars into HTML entities. return t.replace("&", "&amp;").replace("<", "&lt;")
e7a81959969829ac2477cdcb6af081e7011685bc
47,613
def has_code(line: str) -> bool: """ Return True if there's code on the line (so it's not a comment or an empty line). """ return not line.strip().startswith("#") or (line.strip() == "")
ef0975ee21deda1206a1bfc728f47d1119132c70
47,614
def PairsFromGroups(groups): """Returns dict such that d[(i,j)] exists iff i and j share a group. groups must be a sequence of sequences, e.g a list of strings. """ result = {} for group in groups: for i in group: for j in group: result[(i, j)] = None return result
f449612579e54e1365da459d936e633f38d0ceac
47,615
import re import itertools def digestInSilico(proteinSequence, cleavageRule='[KR]', missedCleavage=0, removeNtermM=True, minLength=5, maxLength=55): """Returns a list of peptide sequences and cleavage information derived from an in silico digestion of a polypeptide. :param proteinSequence: amino acid sequence of the poly peptide to be digested :param cleavageRule: cleavage rule expressed in a regular expression, see :attr:`maspy.constants.expasy_rules` :param missedCleavage: number of allowed missed cleavage sites :param removeNtermM: booo, True to consider also peptides with the N-terminal methionine of the protein removed :param minLength: int, only yield peptides with length >= minLength :param maxLength: int, only yield peptides with length <= maxLength :returns: a list of resulting peptide enries. Protein positions start with ``1`` and end with ``len(proteinSequence``. :: [(peptide amino acid sequence, {'startPos': int, 'endPos': int, 'missedCleavage': int} ), ... ] .. note:: This is a regex example for specifying N-terminal cleavage at lysine sites ``\\w(?=[K])`` """ passFilter = lambda startPos, endPos: (endPos - startPos >= minLength and endPos - startPos <= maxLength ) _regexCleave = re.finditer(cleavageRule, proteinSequence) cleavagePosList = set(itertools.chain(map(lambda x: x.end(), _regexCleave))) cleavagePosList.add(len(proteinSequence)) cleavagePosList = sorted(list(cleavagePosList)) #Add end of protein as cleavage site if protein doesn't end with specififed #cleavage positions numCleavageSites = len(cleavagePosList) if missedCleavage >= numCleavageSites: missedCleavage = numCleavageSites -1 digestionresults = list() #Generate protein n-terminal peptides after methionine removal if removeNtermM and proteinSequence[0] == 'M': for cleavagePos in range(0, missedCleavage+1): startPos = 1 endPos = cleavagePosList[cleavagePos] if passFilter(startPos, endPos): sequence = proteinSequence[startPos:endPos] info = dict() info['startPos'] = startPos+1 info['endPos'] = endPos info['missedCleavage'] = cleavagePos digestionresults.append((sequence, info)) #Generate protein n-terminal peptides if cleavagePosList[0] != 0: for cleavagePos in range(0, missedCleavage+1): startPos = 0 endPos = cleavagePosList[cleavagePos] if passFilter(startPos, endPos): sequence = proteinSequence[startPos:endPos] info = dict() info['startPos'] = startPos+1 info['endPos'] = endPos info['missedCleavage'] = cleavagePos digestionresults.append((sequence, info)) #Generate all remaining peptides, including the c-terminal peptides lastCleavagePos = 0 while lastCleavagePos < numCleavageSites: for missedCleavage in range(0, missedCleavage+1): nextCleavagePos = lastCleavagePos + missedCleavage + 1 if nextCleavagePos < numCleavageSites: startPos = cleavagePosList[lastCleavagePos] endPos = cleavagePosList[nextCleavagePos] if passFilter(startPos, endPos): sequence = proteinSequence[startPos:endPos] info = dict() info['startPos'] = startPos+1 info['endPos'] = endPos info['missedCleavage'] = missedCleavage digestionresults.append((sequence, info)) lastCleavagePos += 1 return digestionresults
2acbab8b585bf31defc77cabb09f8e32ad7a020a
47,616
def set_focus_on_entry(entry): """sets focus on a given entry""" entry.focus() return "break"
91657ec5613a95f11965f495b38248ca2ef9e23f
47,617
def currentTurn(turn): """ Simply returns the string version of whose turn it is as opposed to adding if/else everywhere """ if turn == 0: return "Red Team" elif turn == 1: return "Blue Team"
81f8ec74bae351951d18539b08bb7a454be5359b
47,618
def _version_to_tuple(version): """Converts the version string ``major.minor`` to ``(major, minor)`` int tuple.""" major, minor = version.split('.') return (int(major), int(minor))
921a3855fd23a597f13dab27f660cf4b0113926b
47,620
def parseExtn(extn=None): """ Parse a string representing a qualified fits extension name as in the output of `parseFilename` and return a tuple ``(str(extname), int(extver))``, which can be passed to `astropy.io.fits` functions using the 'ext' kw. Default return is the first extension in a fits file. Examples -------- :: >>> parseExtn('sci, 2') ('sci', 2) >>> parseExtn('2') ('', 2) >>> parseExtn('sci') ('sci', 1) """ if not extn: return ('', 0) try: lext = extn.split(',') except: return ('', 1) if len(lext) == 1 and lext[0].isdigit(): return ("", int(lext[0])) elif len(lext) == 2: return (lext[0], int(lext[1])) else: return (lext[0], 1)
ddbc2c3e16161431ce458eb2ff441ab8a21145ee
47,622
import json def get_config(): """Read the config file""" try: with open('default.json', mode='r', encoding='utf-8') as config_file: return json.load(config_file) except FileNotFoundError: with open('config.json', mode='r', encoding='utf-8') as config_file: return json.load(config_file)
e39d96d34df9915dad97bd42b446ea0298a40628
47,623
def list_to_str(seq, sep="-"): """Transform the input sequence into a ready-to-print string Parameters ---------- seq : list, tuple, dict Input sequence that must be transformed sep : str Separator that must appears between each `seq` items Returns ------- str Printable version of input list """ return sep.join(str(i) for i in seq)
e666ae9beff7733bf3f050a6ff65b69dddd8edcc
47,624
def makeTokenSet(): """ Make a token set The set of tokens must be the same as it used for trainning the DNN Here we make a CPP56X token set of C++ tokens Returns a dictionary of tokens: - Key is a string representing the token - Value is integer value of token """ #CPP56 OPERATORS operators = [ "=", "+", "-", "*", "/", #Assignment and arithmetic operators "%", "&", "|", "^", "~", "<<", ">>", #Bitwise Operators "+=", "-=", "*=", "/=", "%=", "++", "--", #Compound arithmetic assignment operators "&=", "|=", "^=", "<<=", ">>=", #Compound bitwise assignment operators "==", "!=", "<", "<=", ">", ">=", #Comparison operators "?", "&&", "||", "!", #Logical operators "(", ")", "{", "}", "[", "]", "->", ";", ","] #Others #CPP56 KEYWORDS keywords= ["if", "else", "for", "while", "switch", "enum", "int", "char", "short", "long", "float", "double", "bool"] #CPP SYNONYMS synonyms = {"and": "&&", "or": "||", "not": "!"} token_dict = {} for _i, _op in enumerate(operators + keywords): token_dict[_op] = _i print(f"Token set of {len(token_dict)} tokens is constructed") for _syn, _orig in synonyms.items(): token_dict[_syn] = token_dict[_orig] print(f"Additionally it has {len(synonyms)} synonym tokens") return token_dict
c77e4b38cf62a22d1cf36eac1fd10a91222dfa9c
47,630
import requests def check_http_response(response, expected_status_code): """ validate http status code """ status = response.status_code if status == expected_status_code: return True if status == 401: # unauthorized msg = "ERROR: status = %s - Invalid credentials?" % status raise requests.ConnectionError(msg) if status in [404, 500]: # unknown resource, operation cannot be performed """ response 404 cannot be jsonified """ error = "Operation cannot be performed, retry with valid parameters" raise IOError(error) return False
d8715acba9a8e7dd60bc0c778235ff9989827d93
47,634
def create_pyramid(wall, ip): """ Shorten the rows to relevant values - integers in range 800-999 under the initial position brick.""" ni = ip # Negative index pi = ip+1 # Positive index for row in range(0, len(wall)): if row % 2 == 0 and row != 0: pi += 1 elif row != 0: ni -= 1 wall[row] = wall[row][ni:pi] wall[row] = list(map(int, wall[row])) return wall
26722fae0b02d974545e597203d708c042a5669e
47,636
def _report_body(*, image: str, repo: str, run: str, stacktrace: str) -> str: """Format the error report.""" return ( f"Repo: {repo}\n" f"Run URL: {run}\n" f"Image ID: {image}\n" f"Stacktrace:\n```py\n{stacktrace}\n```\n" )
a81ea924078f0225aba3ab441aef42c3393118cb
47,637
import importlib import time def time_algo(call_string, module_name): """ Times the execution of a python call string. :param call_string: str string that calls a python module and executes an algorithm :param module_name: str name of module from which function is called :return run_time: float time in seconds required to execute python call string """ module = importlib.import_module(module_name) start = time.time() exec(call_string) finish = time.time() run_time = finish - start return run_time
f24e708c04a765487b3c009b7ef5f9929e4c885b
47,638
import torch def normalizePi(pi, logPi, mu): """Apply squashing function. See appendix C from https://arxiv.org/pdf/1812.05905.pdf. """ # action_max = envEval.action_space.high[0] # action_min = envEval.action_space.low[0] # action_scale = torch.tensor((action_max - action_min).item() / 2.) # action_bias = torch.tensor((action_max + action_min) / 2.) action_scale = 1 action_bias = 0 mu = torch.tanh(mu) * action_scale + action_bias pi = torch.tanh(pi) epsilon = 1e-6 # Avoid NaN (prevents division by zero or log of zero) LogPi_jacobian = torch.log(action_scale * (1 - pi.pow(2)) + epsilon).sum( -1, keepdim=True ) logPi -= LogPi_jacobian pi = pi * action_scale + action_bias return pi, logPi, mu, LogPi_jacobian
bdc8db77b9f650bd3cbb5b17b573fd6d7aada3b7
47,639
def compute_columns(n_items, n_rows): """Compute the required number of columns given a number of items n_items to be displayed in a grid n_rows x n_cols""" if n_rows > n_items: return n_items, 1 d = n_items // n_rows n_cols = d + (1 if n_items % n_rows else 0) return n_rows, n_cols
dbc4a87d0d335055ea8f8b6115289b94cb15a655
47,640