content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def is_not_extreme_outlier(x, _min, _max): """Returns true if x >=min and x<=max.""" return x >= _min and x <= _max
22514eedba0eeb44a3f25f2ba255ebce075c9ab6
56,412
def pop_and_rotate_text_list(text) -> tuple: """Rotates once, a python list contained within a string Returns rotated list in string form and the popped item. """ pylist = text.strip().strip('[]').strip().strip(',').split(',') pylist = [i.strip() for i in pylist] first_item = pylist.pop(0) pylist.append(first_item) return f'[{", ".join(pylist)}]', first_item
c737bd9fe4becea7fc946c4db2b04a40e0152256
379,742
import torch def get_spatial_meshgrid(x: torch.Tensor, scale=False): """ Get grid which contains spatial coordinates at each pixel location Args: x: image of shape [batch_size, channels, height, width] for which we want to generate the spatial grid """ batch_size, _, height, width = x.size() # Generate mesh grid xx = torch.arange(0, width).view(1, -1).repeat(height, 1) yy = torch.arange(0, height).view(-1, 1).repeat(1, width) xx = xx.view(1, 1, height, width).repeat(batch_size, 1, 1, 1) yy = yy.view(1, 1, height, width).repeat(batch_size, 1, 1, 1) grid = torch.cat((xx, yy), 1).float() if x.is_cuda: grid = grid.cuda() if scale: grid[:, 0] = grid[:, 0] / width grid[:, 1] = grid[:, 1] / height return grid
daaf6e0e6c164bab2c9ce3d932ef849e2c4adb07
85,729
import ssl def build_ssl_context(local_certs_file: str, ca_certs_file: str, key_file: str, ): """ :param local_certs_file: The filename of the certificate to present for authentication. :param ca_certs_file: The filename of the CA certificates as passed to ssl.SSLContext.load_verify_locations :param key_file: The filename of the private key for the certificate identified by local_certs_file. """ # Ensure Client authentication ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) ssl_ctx.load_cert_chain(local_certs_file, key_file) # The docs suggest we have to specify both that we must verify the client cert and the locations ssl_ctx.verify_mode = ssl.CERT_REQUIRED ssl_ctx.load_verify_locations(ca_certs_file) return ssl_ctx
1643fb82319e421ce87054f00d0a18a288e08dfa
659,280
def calc_offset(offset): """Part 1 offset is incremented by 1 after it's used""" return offset + 1
8b9b0a4235b8e84849de666304b3d0873522b685
275,666
import json from shutil import copyfile def LoadJSONFile(path,appendStr,count=0): """ Loads data from a JSON file and corrects unfinished runs by appending a string to the file. Args: path: path to json file appendStr: string to append in case of a JSONDecodeError count: number of times this function was called recursively Returns: dict object representing the loaded data Raises: Exception: Unable to correctly load the file with the appended string. Note: For portability, this function detects a ValueError instead of a JSONDecodeError which inherits from it. """ filemod = path[:-5]+"-mod"+".json" try: if count == 0: data = json.load(open(path)) elif count == 1: data = json.load(open(filemod)) else: raise Exception('LoadJSONFile was not able to correct the file "{}" with an appended "{}". ' 'Check the file manually.'.format(path,appendStr)) return data except ValueError: copyfile(path,filemod) fh = open(filemod, "a") fh.write(appendStr) fh.close() return LoadJSONFile(path,appendStr,count+1)
e477817e5e1725954302ae42b17254aac52fb55d
281,041
def parse_psipred_file(psipred_output): """ This function parses the psipred output file and returns the secondary structure predicted. """ opened_file = open(psipred_output).readlines() seq = "" for line in opened_file: line = line.strip().split(" ") seq += line[2] return seq
ceef189d62c73f82bb8cb1e589cf217234050d53
687,951
def _B(slot): """Convert slot to Byte boundary""" return slot*2
97f13e9fd99989a83e32f635193a0058656df68b
925
import re def _parse_gpus(s: str) -> str: """ Accept either a comma separated list of numbers 'a,b,c' or a range 'a-c' and return as a string of list of ints. """ # Perhaps unnecessary sanity check? s = s.replace(' ', '') range_re = re.compile(r'^(\d+)-(\d+)$') m = range_re.match(s) if m: range_ints = list(range(int(m.group(1)), int(m.group(2))+1)) return ','.join([str(n) for n in range_ints]) # Otherwise, it's just a comma-separated string already, but we do checks: try: vals = list(int(n) for n in s.split(',')) except ValueError: print('Oops, wrong GPU number!') raise return s
edeae4dfacf5e133f588fd09a61afc61d86746d9
609,625
import re def get_pattern(delimiter, s): """ Get the pattern info between delimiters from the string """ regex = "%s(.*?)%s" % (delimiter, delimiter) return re.findall(regex, s)
9233549de43a9c7d8d19240d0eaf7076fc9ce5a8
75,349
def s3_list_objects_v1( s3_obj, bucketname, prefix="", delimiter="", max_keys=1000, marker="" ): """ Boto3 client based list object version1 Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket prefix (str): Limits the response to keys that begin with the specified prefix. delimiter (str): Character used to group keys. max_keys (int): Maximum number of keys returned in the response. Default 1,000 keys. marker (str): key to start with when listing objects in a bucket. Returns: dict : list object v1 response """ return s3_obj.s3_client.list_objects( Bucket=bucketname, Prefix=prefix, Delimiter=delimiter, MaxKeys=max_keys, Marker=marker, )
8aed3dce53c286039de63662ea82437e3f2c828a
198,253
import random def weighted_choice(choices): """Returns a value from choices chosen by weighted random selection choices should be a list of (value, weight) tuples. Examples: weighted_choice([('val1', 5), ('val2', 0.3), ('val3', 1)]) """ values, weights = zip(*choices) value = random.choices(population=values, weights=weights, k=1)[0] return value
d65a211bced6d022c6bc01c9430609df152a58ad
501,537
def inputs_recursive_indent(text, depth): """ Add an indentation depending on the depth in a <tr> """ return '<td style="padding-left:' + str((depth - 1) * 10) + 'px">' + text + '</td>'
b212d8ff9edd87e91599e57b923ad5b084306bc3
92,290
def _gen_find(subseq, generator): """Returns the first position of `subseq` in the generator or -1 if there is no such position.""" if isinstance(subseq, bytes): subseq = bytearray(subseq) subseq = list(subseq) pos = 0 saved = [] for c in generator: saved.append(c) if len(saved) > len(subseq): saved.pop(0) pos += 1 if saved == subseq: return pos return -1
ec89e787a61d684e2a7d0c8c2d0fb9c89cf73ada
705,996
import torch def ts_feature_map_loss(output, label): """compute smooth L1 loss""" return torch.nn.functional.smooth_l1_loss(output, label)
8ca0e195dc177f6a35ac9e48ac17531ab1909009
586,697
def count_frequencies(file) -> dict: """ This function opens a text file as an argument and then returns the frequencies each character occurs in the text file. The characters are then sorted out by order starting from the lowest frequency to the highest. Values sorted by ascending order. """ dictionary = {} # loops through every line in the text file in config.json for line in file: # loops through every character in the text file in config.json for char in line: # checks if character has already been added to the dictionary if char not in dictionary: dictionary[char] = 1 else: # if not added, then it proceeds to add that character into dictionary dictionary[char] = dictionary[char] + 1 # dictionary is returned return dictionary
09beb23f80576a073ed88306ee5d7c423e2eae6e
225,045
from typing import List def get_combinations(candidates: List[int], target: int) -> List[List[int]]: """Returns a list of lists representing each possible set of drops. This function (and its recursive helper function) was adapted from https://wlcoding.blogspot.com/2015/03/combination-sum-i-ii.html. Args: candidates: A list of possible numbers of pieces to drop on a square. Effectively, this arg is equivalent to range(1, carry_size + 1). target: The number of stones in the carry_size. The number of dropped stones must equal the number of stones picked up. Returns: A list of lists of possible combinations. Note that these lists do not contain every permutation of drops, merely every combination of valid ints that sums to the target value. """ def get_combinations_rec(candidates, target, index, partial_sum, list_t, combinations) -> None: """A recursive helper function for get_combinations.""" if partial_sum == target: combinations.append(list(list_t)) for i in range(index, len(candidates)): if partial_sum + candidates[i] > target: break list_t.append(candidates[i]) get_combinations_rec( candidates, target, i, partial_sum+candidates[i], list_t, combinations ) list_t.pop() combinations: List = [] get_combinations_rec(candidates, target, 0, 0, [], combinations) return combinations
9d68f3b69c23697d924e40d4471296223871165a
28,260
def crop(image, startX, startY, endX, endY): """ Crop an image :param image: Image to be cropped :param startX: Starting X coord :param startY: Starting Y coord :param endX: Ending X coord :param endY: Ending Y coord :return: Cropped image """ cropped = image[startY:endY, startX:endX] return cropped
361d9f799aef03f3ce9d621550801c4d2a293b3b
660,342
def parse_slice_inv(text): """Parse a string into a slice notation. This function inverts the result from 'parse_slice'. :param str text: the input string. :return str: the slice notation. :raise ValueError Examples: parse_slice_inv('[None, None]') == ":" parse_slice_inv('[1, 2]') == "1:2" parse_slice_inv('[0, 10, 2]') == "0:10:2" """ err_msg = f"Failed to convert '{text}' to a slice notation." if len(text) > 1: try: parts = [None if v.strip() == 'None' else int(v) for v in text[1:-1].split(',')] except ValueError: raise ValueError(err_msg) if len(parts) == 2: s0 = '' if parts[0] is None else str(parts[0]) s1 = '' if parts[1] is None else str(parts[1]) return f"{s0}:{s1}" if len(parts) == 3: s0 = '' if parts[0] is None else str(parts[0]) s1 = '' if parts[1] is None else str(parts[1]) s2 = '' if parts[2] is None else str(parts[2]) return f"{s0}:{s1}:{s2}" raise ValueError(err_msg)
4da2080910c15852de676829521e6d99eb990b6a
334,936
def linear(z): """Linear activation function""" return z
c6123a14e08001e6d506f5d548f2eae0770958e0
171,472
def _is_valid_ticker(ticker: str) -> bool: """Helper to drop tickers/terms with unwanted patterns""" restricted = ["Overnight", "W"] for el in restricted: if el in ticker: return False return True
558199124f59aabcbfb7730a26df3164e32427c3
59,062
def _cli_bytes_from_str(text): """ Python 2/3 compatibility function to ensure that what is sent on the command line is converted into bytes. In Python 2 this is a no-op. """ if isinstance(text, bytes): return text else: return text.encode("utf-8", errors="surrogateescape")
e710819cb60ccfcdcd0a840c20c19882fec709ea
653,198
def build_mssql_trusted_connection_string(server, database): """ Given a server and database name, build a Trusted Connection MSSQL connection string """ return 'DRIVER={SQL Server Native Client 11.0};Server=' + server + ';Database=' + database + ';Trusted_Connection=yes;'
9375c2b9efb1bf92dd66e01a5fd13de1243e1bb6
525,581
def find_join(df, id, downstream_col="downstream", upstream_col="upstream"): """Find the joins for a given segment id in a joins table. Parameters ---------- df : DataFrame data frame containing the joins id : any id to lookup in upstream or downstream columns downstream_col : str, optional (default "downstream") name of downstream column upstream_col : str, optional (default "upstream") name of upstream column Returns ------- Joins that have the id as an upstream or downstream. """ return df.loc[(df[upstream_col] == id) | (df[downstream_col] == id)]
504da29838f60979c441273a795d2e1c328b7988
92,749
import re def sanitize_user_identified_syntax(msg): """ Captures variations of the ALTER/CREATE USER...IDENTIFIED... syntax. See: https://mariadb.com/kb/en/library/alter-user/ """ if "ALTER USER" in msg.upper() or "CREATE USER" in msg.upper(): msg = re.sub(r"BY .*('|\")", 'BY ***', msg, flags=re.IGNORECASE) msg = re.sub(r"USING .*('|\")", 'USING ***', msg, flags=re.IGNORECASE) msg = re.sub(r"AS .*('|\")", 'AS ***', msg, flags=re.IGNORECASE) return msg
5173f8ec8f240f53a996d4eaa58f006f9ae2f490
418,750
def unicode_to_int(unicode_str): """Convert the unicode 'unicode_str' to corresponding integer """ return ord(unicode_str)
27bfece829952ac48282d0cfa0780cb2e71323c8
326,095
def season(x): """Returns season based on month""" x = x.month if (x > 3) and (x <= 6): return 1 elif (x > 6) and (x <= 9): return 2 elif (x > 9) and (x <= 11): return 3 else: return 4
f7304e65933c681731050fb67c85724c6ac492e9
44,476
def condor_str(str_in): """ Returns a double-quoted string """ return str('"%s"' % str_in)
e2d1680ce5878031e70d84bc68e9ab2a82ddfdc3
347,904
def array_core(ary): """ Extract the repeated core of a broadcast array. Broadcast arrays are by definition non-contiguous due to repeated dimensions, i.e., dimensions with stride 0. In order to ascertain memory contiguity and copy the underlying data from such arrays, we must create a view without the repeated dimensions. """ if not ary.strides: return ary core_index = [] for stride in ary.strides: core_index.append(0 if stride == 0 else slice(None)) return ary[tuple(core_index)]
0f028f16a3e727de2e5d589aef8266a3139ccf84
375,562
def get_function_responses(function): """ Return a list of responses that are documented using api.decorators.response """ responses = [] for code, description in getattr(function, 'doc_responses', []): responses.append({ 'code': code, 'message': description }) return responses
95fd8873d0210e02070164f2fe696fbece909db5
531,427
from typing import Any from typing import Dict def get_regions(youtube: Any) -> Dict[str, str]: """retrieves content regions that the YouTube website supports cost = 1 Args: youtube (Any): a resource object with methods for interacting with the service. Returns: Dict[str, str]: region id mapped with its name """ request = youtube.i18nRegions().list(part="snippet") response = request.execute() return {region["id"]: region["snippet"]["name"] for region in response["items"]}
0e74137b8698c13b86897dcd8502c6b85291c375
326,930
def nParams(self): """Get the number of parameters of this module. Example:: # returns 9, because 6 (2*3) for weight, and 3 for bias nn.Linear(2, 3).nParams """ return sum([p.numel() for p in self.parameters()])
c5f5c920f997c73960ec126a258ab869eaeb2071
395,897
import requests from bs4 import BeautifulSoup def get_definition(search): """ Get the first definition of the word using merriam-webster dictionary online. :param search: Word to find the definition of. :return: Definition of search. """ # Read HTML website req = requests.get("https://merriam-webster.com/dictionary/" + search) req.raise_for_status() soup = BeautifulSoup(req.text, "html.parser") elems = soup.select('p[class="definition-inner-item with-sense"] > span') if len(elems) == 0: elems = soup.select('p[class="definition-inner-item"] > span') return elems[0]
7bc375736f9c0cc8c0a64f06926fbade8681518a
643,500
def Take(*args): # pylint: disable=invalid-name """Layer to pick subset of inputs from parallel input stream. Args: *args: a sequence of ints Returns: A new layer that selects inputs from an incoming parallel stream. In numpy notation: outputs = parallel_inputs[args] If the resulting output list has only one member, it is automatically unwrapped and the contents are passed on directly. """ def init_fun(_, input_shape): output_shape = [] for arg in args: output_shape.append(input_shape[arg]) if len(output_shape) == 1: output_shape = output_shape[0] return (output_shape, ()) def apply_fun(params, inputs, **kwargs): del params, kwargs outputs = [] for arg in args: outputs.append(inputs[arg]) if len(outputs) == 1: outputs = outputs[0] return outputs return init_fun, apply_fun
08cc9643592abce766a2c0dbd45080693eb4b164
288,676
def getIntegerInput(request, fieldname, default=None, minval=None, maxval=None): """ Get an integer value from a request parameter. If the value is out of bounds, it's made to fit into those bounds. Returns `default` in case of errors (not a valid integer, or field is missing). """ try: result = int(request.values[fieldname]) except (KeyError, ValueError): return default else: if minval is not None: result = max(result, minval) if maxval is not None: result = min(result, maxval) return result
5d6dd65ad742b3bd5b037251f2fd0d2e5f927a93
332,402
from datetime import datetime def parse_datetime(timestamp: str) -> datetime: """ Parse a string value from an AWS response as "2020-09-21 01:53:07.692000+00:00" into a datetime Parameters ---------- timestamp: str A timestamp to be parsed """ return datetime.strptime(timestamp, "%Y-%m-%d %H:%M:%S.%f%z")
110ec04a029b206ba34cf2e6829d053fa6d3bd28
578,557
def sqlite_db_tables(c=None): """ List the tables of a sqlite database. How do I list all tables/indices contained in an SQLite database : https://www.sqlite.org/faq.html#q7 """ db_tables = [] if c is not None: c.execute('select name from sqlite_master where type = "table" and name NOT LIKE "%sqlite_%"') rows = c.fetchall() for r in rows: db_tables.append(r['name']) return db_tables
e0b98eccbbe0a197d00385d115208ac3bf7cf31c
45,140
def _bonferroni(p_values, alpha): """ Perform the `Bonferroni correction`_ on a set of p-values and test for significance. :param p_values: p-values to adjust for family-wise error rate :type p_values: sequence :param alpha: Confidence level between 0 and 1 :type alpha: float :return: Whether or not each result was significant, and corrected p-values. :rtype: (numpy.array[bool], numpy.array[float]) .. _`Bonferroni correction`: http://en.wikipedia.org/wiki/Bonferroni_correction """ n = len(p_values) corrected_pvals = p_values*n return corrected_pvals < alpha, corrected_pvals
9f361ca388fceb5481b26e6e3f589825e6afe996
175,189
def is_valid(array, index): """Verify that the index is in range of the data structure's contents.""" row, column = index return 0 <= row < len(array) and 0 <= column < len(array[row])
3b96a30874d6bfeb3047468ce38c5139f5add968
439,727
import types def copy_func(f, name=None): """ Returns a function with the same code, globals, defaults, closure, and name (unless provided a different name). """ fn = types.FunctionType(f.__code__, f.__globals__, name or f.__name__, f.__defaults__, f.__closure__) fn.__dict__.update(f.__dict__) return fn
638ec10b267a61141d2ef2028c63e8ee3b33b8c4
506,108
def try_int(x): """Converts the input to an int, or returns None if conversion fails""" if x is None: return x try: return int(x) except ValueError: return None
6985da8007e6e34e67c288c981c94cc57b607c98
474,847
import torch from typing import Union def first_element( x: torch.Tensor, element: Union[int, float], dim: int = 1, ) -> torch.Tensor: """Find the first occurence of element in x along a given dimension. Args: x: The input tensor to be searched. element: The number to look for. dim: The dimension to reduce. Returns: Indices of the first occurence of the element in x. If not found, return the length of x along dim. Usage: >>> first_element(torch.tensor([[1, 2, 3], [2, 3, 3], [1, 1, 1]]), 3) tensor([2, 1, 3]) Reference: https://discuss.pytorch.org/t/first-nonzero-index/24769/9 I fixed an edge case where the element we are looking for is at index 0. The original algorithm will return the length of x instead of 0. """ mask = x == element found, indices = ((mask.cumsum(dim) == 1) & mask).max(dim) indices[(~found) & (indices == 0)] = x.shape[dim] return indices
da23827fb58b15a9b959dea1c5ebc0916a6a7a09
429,262
def create_time_variant_multiplicative_function(time_variant_function): """ similar to create_multiplicative_function, except that the value to multiply by can be a function of time, rather than a single value :param time_variant_function: function a function with the independent variable of time that returns the value that the input should be multiplied by :return: function function that will multiply the input value by the output value of the time_variant_function """ return lambda input_value, time: time_variant_function(time) * input_value
cb17c2fbfeb48f660b3f36fd0093bf52afe07f43
98,276
import turtle def polygon(sides, length, x_pos, y_pos, color, fill=False): """ (int, int, int, int, str, bool) -> turtle Draw a regular polygon with the given number of <sides> of <length>. Pen begins at point(<x_pos>, <y_pos>) and is <color> <fill>, default False, fills the polygon with the specifed <color> """ try: turtle.bgcolor('black') turtle.penup() turtle.setposition(x_pos, y_pos) turtle.pendown() turtle.color(color) if fill: turtle.begin_fill() for poly_sides in range(sides): turtle.forward(length) turtle.left(360 // sides) if fill: turtle.end_fill() except TypeError as err1: return err1 except Exception as err2: # Handle TurtleGraphicsError return err2
c0003d78f047a82fb9ac582b30779bb34cd9e85c
153,764
def isReplacementNeeded(config): """ Check if we need to clean some junk from the source code files path :param config: configuration of the flask app :return (should_replace_path, new_src_dir) """ should_replace_path = False if 'REPLACE_KERNEL_SRC' in config: should_replace_path = config['REPLACE_KERNEL_SRC'] new_src_dir = None if 'SOURCECODE_DIR' in config: new_src_dir = config['SOURCECODE_DIR'] return (should_replace_path, new_src_dir)
b728db79715cd9fcb7bed5f20edc1bdba73eda2b
377,009
def bricksnpieces_cookies(country='no'): """ Bricks&Pieces requires a couple of cookies to be set to fetch data. State 3 is sale. """ return {'csAgeAndCountry': '{"age":"18","countrycode":"%s"}' % country.upper(), 'csRpFlowState': '{"state":3}', 'country': country.upper(), 'AGE_GATE': 'grown_up'}
0e19357a57c6204a0c1d29d06558853574aa5335
180,980
from datetime import datetime def _n64_to_datetime(n64): """ Converts Numpy 64 bit timestamps to datetime objects. Units in seconds """ return datetime.utcfromtimestamp(n64.tolist() / 1e9)
1d46e67edb29da3c0c340bfcc346e80c0fd36541
19,280
def caption(picture): """ Filter to format the caption for a picture. Usage: {{ picture|caption }} If an object without a name or desciption is used, an empty string is returned. """ if hasattr(picture, 'name') and hasattr(picture, 'description'): return '<a href="%s">%s</a><p>%s</p>' % \ (picture.get_absolute_url(), picture.name, picture.description or '') else: return ''
d1ff47639708aeb6f1ba764ff298fb5df90c9107
283,000
def get_value(matrix: list, cell: tuple) -> str: """ Gets the value at the x,y coordinates in the matrix""" row, col = cell return str(matrix[row][col])
a164c8dcb67566d78b02c85edd633979b61ed93b
226,028
def getTableName(table): """ Here the table name is just the name attribute """ return table.getAttribute('name')
5f810150331a7947167ca9f880f1fbe0a34540d8
319,938
def get_sql_type(var) -> str: """ Gets the type of the variable in python, and returns the respective SQL type """ try: float(var) return 'NUMERIC' except ValueError: return 'TEXT'
db48a936305377192d06dbf3c3ef0b0b9e467830
541,792
def is_identity(u): """Checks if the unit is equivalent to 1""" return u.unitSI == 1 and u.unitDimension == (0,0,0,0,0,0,0)
afc9029eeb44739a38869f06f84ba36b2a19da6f
9,883
import base64 def getB64Decode(data): """Base64 Decode Data if possible.""" try: result = base64.b64decode(data) return result except (TypeError, UnicodeEncodeError) as e: return data
07352ad3d7a9c902f19297122358eabeb1b533a0
444,164
def distance(plane): """Distance the plane is from the origin along its the normal. Negative value indicates the plane is facing the origin. """ return plane[3]
0c795889608f52faea30932642fbe86ff65fe985
127,860
def sample(rng, N, k, i): """ Function sampling k indices from the range 0-N without the i index. """ S = set() while len(S) < k: random_index = rng.randint(0, N - 1) if random_index == i: continue S.add(random_index) return list(S)
95c54430c3d1393614c6bf8cd6ac4b5e5ea6ba3c
608,795
def get_mac_s(output: bytes) -> bytes: """Support function to get the 64-bit resynchronisation authentication code (MAC-S) from OUT1, the output of 3GPP f1* function. :param output: OUT1 :returns: OUT1[64] .. OUT1[127] """ edge = 8 # = ceil(63/8) return output[edge:]
c5f1d2d14819e9a9bf660aeb82dd24b8111263b5
45,823
import pathlib def missing_options_file(tmpdir, request): """Return a path to an config file that is valid, but missing expected options (commented out below). Remove the file when a test is finished with it. """ data_dir = pathlib.Path(str(tmpdir)) missing_options_file = data_dir.joinpath('invalid') with missing_options_file.open('w') as f: f.write( '[gui]\n' # + 'message_duration = 5\n' + 'gui_welcome_label = Welcome to the STEM Learning Center!\n' # + 'full_user_names = True\n' + 'large_font_size = 30\n' + 'medium_font_size = 18\n' + 'small_font_size = 15\n' + 'tiny_font_size = 10\n' ) def tearDown(): if missing_options_file.exists(): missing_options_file.unlink() request.addfinalizer(tearDown) return missing_options_file
c2896e6e4f4997cb2ee94270acf634ad559550b2
611,892
def get_child_by_tag(elem, tag_name): """ Return child elements with a given tag """ tags = [child for child in elem.getchildren() if child.tag == tag_name] return tags[0] if 0 < len(tags) else None
6d04bf7f1a6a9e7eba70e44bfc464083e2029c13
436,533
def PrintHtml(log): """Prints a log as HTML. Args: log: a Log namedtuple. """ def Tag(tag, cls, value): return '<%s class="%s">%s</%s>' % (tag, cls, value, tag) classes = ['filename', 'date', 'log'] line = ' '.join([Tag('span', cls, value) for cls, value in zip(classes, log)]) print(Tag('div', 'line', line))
df0836e13c090e058c49d73a2b6ee86b720588c9
688,858
def ordered_keys(subscriptable_object, *keys): """ returns a tuple with the values for KEYS in the order KEYS are provided, from SUBSCRIPTABLE_OBJECT. Useful for working with dictionaries when parameter ordering is important. Used for sql transactions. """ return tuple([subscriptable_object[key] for key in keys])
cccd03d2351b692e30dcba170331e939f51f5c29
337,633
def load_rfam_accessions_from_file(accession_list): """ This function parses a .txt file containing Rfam accessions and returns those accession_list: This is a .txt file containing a list of Rfam accessions return: list of Rfam family accessions """ fp = open(accession_list, 'r') accessions = [x.strip() for x in fp] fp.close() return accessions
9ada7394a3a6e5c596bf80b0f610affb89838403
377,678
import torch def convert_to_radar_frame(pixel_coords, config): """Converts pixel_coords (B x N x 2) from pixel coordinates to metric coordinates in the radar frame. Args: pixel_coords (torch.tensor): (B,N,2) pixel coordinates config (json): parse configuration file Returns: torch.tensor: (B,N,2) metric coordinates """ cart_pixel_width = config['cart_pixel_width'] cart_resolution = config['cart_resolution'] gpuid = config['gpuid'] if (cart_pixel_width % 2) == 0: cart_min_range = (cart_pixel_width / 2 - 0.5) * cart_resolution else: cart_min_range = cart_pixel_width // 2 * cart_resolution B, N, _ = pixel_coords.size() R = torch.tensor([[0, -cart_resolution], [cart_resolution, 0]]).expand(B, 2, 2).to(gpuid) t = torch.tensor([[cart_min_range], [-cart_min_range]]).expand(B, 2, N).to(gpuid) return (torch.bmm(R, pixel_coords.transpose(2, 1)) + t).transpose(2, 1)
404236c1c886d5425fe270a78fbcf7c25b5a28e6
693,661
import re def rreplace(pattern, sub, string): """ Replaces 'pattern' in 'string' with 'sub' if 'pattern' ends 'string'. """ return re.sub('%s$' % pattern, sub, string)
ad67c0920a313d676e9903c3811bc97f854506c2
489,474
def replica_keyword(replica_count, mirror_count): """Replica volume can be created using `replica` or `mirror` keyword.""" if replica_count > 0: return "replica" if mirror_count > 0: return "mirror" return ""
9a9ea8d7985cae2ef9e1592e227bc6054e88b925
515,902
def compute_sparsity(tensor): """ Compute L1 sparsity of the input tensor. Parameters ---------- tensor : torch.Tensor (N,) or (B, N) tensor array. Returns ------- sparsity : torch.Tensor Mean L1 sparsity of `tensor`, scalar. """ if tensor.ndim == 1: # make a batch of size 1 tensor = tensor.unsqueeze(dim=0) sparsity = tensor.norm(p=1, dim=1).mean() / tensor.shape[1] return sparsity.squeeze()
92d665fa2b1b482ef8da32149763aad63a6b9418
337,669
def collect_digits(text): """Given string, collect only digits""" return " ".join([c for c in text if c.isdigit()])
eaa5f2f8a25e3abec4406894da755f86e0a59a4b
171,607
import re def read_phones(path): """ From a buckeye file, reads the phone lines, appends label, begin, and end to output Parameters ---------- path : str path to file Returns ------- output : list of tuples each tuple is label, begin, end for a phone """ output = [] with open(path, 'r') as file_handle: header_pattern = re.compile("#\r{0,1}\n") line_pattern = re.compile("\s+\d{3}\s+") label_pattern = re.compile(" {0,1};| {0,1}\+") f = header_pattern.split(file_handle.read())[1] flist = f.splitlines() begin = 0.0 for l in flist: line = line_pattern.split(l.strip()) try: end = float(line[0]) except ValueError: # Missing phone label print('Warning: no label found in line: \'{}\''.format(l)) continue label = label_pattern.split(line[1])[0] output.append((label, begin, end)) begin = end return output
3a80eb0a5ccb0a66fa3ec8f6259e0573655d2a46
177,200
def build_tr_create_module_page_link(region, module_type_id): """ Build the direct link to the corresponding Threat Response page in the given region for creating a module of the given type. """ if module_type_id is None: return 'N/A' return ( f'https://securex.{region}.security.cisco.com/settings/modules/' f'available/{module_type_id}/new' )
bde4888194beb532b82e6e2e0cb7c428b7005ac5
687,001
def bytes_to_list(input_bytes, block_len): """将字节串切分成指定长度的子串 Args: input_bytes (bytes): 输入字节串 block_len (int): 子串长度 Returns: list: 切分后的字节串数组 """ block_len = int(block_len) return [ input_bytes[i:i + block_len] for i in range(0, len(input_bytes), block_len) ]
e89d3de3bca21571068e3dae767144565c21cb85
476,948
def normalize(features): """ Scale data in provided series into [0,1] range. :param features: :return: """ return (features - features.min()) / (features.max() - features.min())
a85d77e37e71c732471d7dcd42ae1aef2181f6dc
2,915
def coordinateToIndex(coordinate): """Return a raw index (e.g [4, 4]) from board coordinate (e.g. e4)""" return [abs(int(coordinate[1]) - 8), ("a", "b", "c", "d", "e", "f", "g", "h").index(coordinate[0])]
d3dcf6d01c4bec2058cffef88867d45ba51ea560
3,069
import torch def random_angular_vel(num: int, device: str, magnitude_stdev: float) -> torch.Tensor: """Samples a random angular velocity with standard deviation `magnitude_stdev`""" axis = torch.randn((num, 3,), dtype=torch.float, device=device) axis /= torch.norm(axis, p=2, dim=-1).view(-1, 1) magnitude = torch.randn((num, 1,), dtype=torch.float, device=device) magnitude *= magnitude_stdev return magnitude * axis
226c32de9a76473f53971333842be40e9a980bf1
405,397
def _check_ogrn(ogrn): """Validates OGRN code""" if not ogrn: return False if len(ogrn) == 13: delimeter = 11 elif len(ogrn) == 15: delimeter = 13 else: return False main_part = int(ogrn[:-1]) % delimeter % 10 checksum = int(ogrn[-1]) return main_part == checksum
b30eca3c9b3ff49c5413967915fcbb9e05fe77c7
502,175
def build_dir_list(project_dir, year_list, product_list): """Create a list of full directory paths for downloaded MODIS files.""" dir_list = [] for product in product_list: for year in year_list: dir_list.append("{}\{}\{}".format(project_dir, product, year)) return dir_list
38683b30fac0b944f17a27351384066670afda11
670,074
def _check_color_image(image): """ verify proper image :param ndarray image: :return bool: >>> _check_color_image(np.zeros((200, 250, 1))) # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: image is not RGB with dims (200, 250, 1) """ if image.ndim != 3 or image.shape[2] != 3: raise ValueError('image is not RGB with dims %s' % repr(image.shape)) return True
dba30fa9beca864c7e41848292d924262539d768
512,089
def mult (x, y): """ multiply : 2 values """ return x * y
f4292f0b88be0ee49df3572066a8cef4c3cb11fe
682,631
import torch def cat(tensors, dim=0, out=None): """As torch.cat, but returns the original tensor if len(tensors) == 1, so that an unneeded copy is not made.""" if len(tensors) == 1: return tensors[0] else: return torch.cat(tensors, dim=dim, out=out)
d9e30839aac685b5e8f20eef8911631addeb5970
311,286
def get_dset_dict (configuration: dict) -> dict: """ Returns the dictionary of datasets. Keys are names, values are lists containing datatype, list of dimensions and group name Parameters: configuration (dict) : configuration from `trex.json` Returns: dset_dict (dict) : dictionary of datasets """ dset_dict = {} for k1,v1 in configuration.items(): for k2,v2 in v1.items(): if len(v2[1]) != 0: tmp_dset = f'{k1}_{k2}' dset_dict[tmp_dset] = v2 # append a group name for postprocessing dset_dict[tmp_dset].append(k1) return dset_dict
697b6787c42a6ef97aa1f02d8d8c618ae68762dc
279,134
def evaluate_multilabel_recall(class_true, class_pred): """ Compute multi-label recall. Parameters ========== class_true : np.ndarray or pd.DataFrame True labels, (# of classes)-hot encoded. class_pred : np.ndarray or pd.DataFrame Predicted labels, (# of classes)-hot encoded. Returns ======= recalls : dict Keys for classes and values for recalls for that class. """ recalls = (((class_true ==1) & (class_pred == 1)).sum(axis=0)/(class_true == 1).sum(axis=0)).fillna('NULL').to_dict() return recalls
361cabb297c6d5121a6989a8eb17e13fa332ab2f
468,450
def boost_overlap(group_overlap, overlap): """ Return a fraction of boost based on the number of overlap events in the IAR. :param int group_overlap: The number of overla events in the IAR. :param overlap: The total boost amount allotted to this criterion :returns: The percent of boost provided by this criterion :rtype: float >>> boost_overlap(0, 1) 0.0 >>> boost_overlap(1, 1) 0.3 >>> boost_overlap(2, 1) 0.55 >>> boost_overlap(3, 1) 0.75 >>> boost_overlap(4, 1) 0.9 >>> boost_overlap(5, 1) 1.0 """ return round(overlap * ((group_overlap >= 1) * 0.3 + (group_overlap >= 2) * 0.25 + (group_overlap >= 3) * 0.20 + (group_overlap >= 4) * 0.15 + (group_overlap >= 5) * 0.10), 2)
caefa6bbfeceabd78f2f03457e29da41501e04c0
614,724
def get_gzipped_name(name): """ returns the location of the gzipped version of a specified file """ file_ext_index = name.rfind('.') file_name, file_ext = name[:file_ext_index], name[file_ext_index:] return '%s.gz%s' % (file_name, file_ext)
90200ec24f33014ebb5d2a427b3411f8ff53b876
239,990
import json def load_json(fname): """ load json object from file """ with open(fname) as f: data = json.load(f) return data
547380ea3f323567fec2297db921f2851f5cb297
191,275
import random def get_rand_number(min_value, max_value): """ This function gets a random number from a uniform distribution between the two input values [min_value, max_value] inclusively Args: - min_value (float) - max_value (float) Return: - Random number between this range (float) """ range = max_value - min_value choice = random.uniform(0,1) return min_value + range*choice
39937e7bd281a19bd3bb9cbdad9e327f9829acac
556,807
def GetPayloadPropertiesFileName(payload): """Returns the payload properties file given the path to the payload.""" return payload + '.json'
f3d7e258e72a913fe7383b53d46d55830cd7d5e3
256,633
import pytz def aware_to_utc(date): """Accepts an aware Datetime and returns a naive one in UTC.""" return date.astimezone(pytz.utc).replace(tzinfo=None)
fee861d34175e3d31bfec531eb066d8d84785a3d
376,484
from typing import Hashable def _remove_special(col_name: Hashable) -> str: """Remove special characters from column name.""" return "".join(item for item in str(col_name) if item.isalnum() or "_" in item)
735069aec5210cc25233fe5331a91d74a550bfe9
547,970
import torch def hard_example_mining(dist_mat, is_pos, is_neg): """For each anchor, find the hardest positive and negative sample. Args: dist_mat: pair wise distance between samples, shape [N, M] is_pos: positive index with shape [N, M] is_neg: negative index with shape [N, M] Returns: dist_ap: pytorch Variable, distance(anchor, positive); shape [N] dist_an: pytorch Variable, distance(anchor, negative); shape [N] p_inds: pytorch LongTensor, with shape [N]; indices of selected hard positive samples; 0 <= p_inds[i] <= N - 1 n_inds: pytorch LongTensor, with shape [N]; indices of selected hard negative samples; 0 <= n_inds[i] <= N - 1 NOTE: Only consider the case in which all labels have same num of samples, thus we can cope with all anchors in parallel. """ assert len(dist_mat.size()) == 2 # `dist_ap` means distance(anchor, positive) # both `dist_ap` and `relative_p_inds` with shape [N] dist_ap, _ = torch.max(dist_mat * is_pos, dim=1) # `dist_an` means distance(anchor, negative) # both `dist_an` and `relative_n_inds` with shape [N] inf = dist_mat.max() + 1 dist_an, _ = torch.min(dist_mat * is_neg + is_pos * inf, dim=1) return dist_ap, dist_an
9fe2f644397720c84b6ae2753bba525c3b973c00
421,018
def boxcontains(box, p): """True iff *box* (4-tuple of (s,n,w,e) ) contains point *p* (pair of (lat,lon).""" s, n, w, e = box return s <= p[0] < n and w <= p[1] < e
bfe24ceccbf24b50c05741a86256092bcdd2465d
288,332
def tree_unflatten(flat, tree, copy_from_tree=None): """Unflatten a list into a tree given the tree shape as second argument. Args: flat: a flat list of elements to be assembled into a tree. tree: a tree with the structure we want to have in the new tree. copy_from_tree: optional list of elements that we just copy from tree. This argument is used when the flat version does not contain all elements of the expected tree but just a subset, while the rest are filled from the tree itself. It allows to omit "unnecessary" elements. For example, consider trees (A, (B, X), X) and (X, (A, X), B) where X is some element we do not care about. Flattening the first tree and removing X will yield a flat list [A, B] and the second tree can then be reconstructed from this list and the tree (X, (E, X), E) with copy_from_tree=[X]. One example where this is used is the weights-tree of a model, where layers with no weights have () in the tree and we use copy_from_tree=[()] to restore a model from a file that only has a list of trainable weights. Returns: A pair (new_tree, rest_of_flat) where the new tree that has the structure of tree but with leaves from flat, and the remaining elements of flat if more were provided than the number of leaves of tree (useful for recursion). """ if copy_from_tree is not None and tree in copy_from_tree: return tree, flat if isinstance(tree, (list, tuple)): new_tree, rest = [], flat for t in tree: new_t, rest = tree_unflatten(rest, t, copy_from_tree=copy_from_tree) new_tree.append(new_t) new_tree = tuple(new_tree) if isinstance(tree, tuple) else new_tree return new_tree, rest if isinstance(tree, dict): new_tree, rest = {}, flat for k in tree: new_v, rest = tree_unflatten(rest, tree[k], copy_from_tree=copy_from_tree) new_tree[k] = new_v return new_tree, rest return flat[0], flat[1:]
711bc67a20835091360d0fbc64e0a8842eec53ba
709,470
def _to_string(s): """Convert a value into a CSV-safe string.""" if isinstance(s, list): return '[' + ' '.join([str(x) for x in s]) + "]" return str(s)
e3743683e4dc63cc610ca77284981459162b238d
86,386
def crop(image): """Crop the image (removing the sky at the top and the car front at the bottom) Credit: https://github.com/naokishibuya/car-behavioral-cloning """ return image[60:-25, :, :]
d2ae9039b8941a7b7a10d3659d6272de3d6f0987
652,470
import re def strip_multiple_whitespaces(s): """ Remove repeating whitespace characters (spaces, tabs, line breaks) from `s` using RE_WHITESPACE """ RE_WHITESPACE = re.compile(r"(\s)+", re.UNICODE) return RE_WHITESPACE.sub(" ", s)
c27effcb59dd6ebd3c79eeb4d09c41fb111b51c1
614,202
def get_non_classic(templates_non_classic, js): """ Getting edges with non classic peptide bonds. Parameters ---------- templates_non_classic : list List of atoms of non classic peptide bonds. js : dict Opend rBAN peptideGraph.json. Returns ------- non_classic : list List of amino acids, bonded with non classic peptide bonds. """ non_classic = [] for tp in templates_non_classic: for atom in js['atomicGraph']['atomicGraph']['atoms']: if atom['cdk_idx'] in tp: if atom['matchIdx'] in non_classic: continue non_classic.append(atom['matchIdx']) #Adding amino return non_classic
134e623636068cd96886cd365c10fd070ce755b4
68,831
def try_int(x): """Safely convert anything that can be converted to int""" try: return int(x) except ValueError: return x
991e3cebf45c76336b851ab6d59d07efcc1842e7
599,548
def remove_consecutive_dups(lst): """ return a copy of lst with consecutive duplicates of elements eliminated. For example, for lst = [a, a, a, a, b, c, c, a, a, d, e, e, e, e], the returned list is [a, b, c, a, d, e]. """ return [v for i, v in enumerate(lst) if i == 0 or v != lst[i-1]]
7d791c87c5c51c37c7ca5ffa01d04e48b3de0286
31,466
import io import yaml def dict_to_yaml(data, width=None, sort=False): """ Convert dictionary into yaml """ output = io.StringIO() yaml.safe_dump( data, output, sort_keys=sort, encoding='utf-8', allow_unicode=True, width=width, indent=4, default_flow_style=False) return output.getvalue()
1e8a2b6eda961ac1a3d72e7ae3b802e6d6004137
511,619
def identityfrom(identitykey): """Get the identity for the key. :param identitykey: name@identity, insertkey or requestkey: USK@...,...,AQ.CAAE/WebOfTrust/N >>> identityfrom("USK@pAOgyTDft8bipMTWwoHk1hJ1lhWDvHP3SILOtD1e444,Wpx6ypjoFrsy6sC9k6BVqw-qVu8fgyXmxikGM4Fygzw,AQACAAE/WebOfTrust/0") 'pAOgyTDft8bipMTWwoHk1hJ1lhWDvHP3SILOtD1e444' """ if "@" in identitykey: identitykey = identitykey.split("@")[1] if "/" in identitykey: identitykey = identitykey.split("/")[0] if "," in identitykey: identitykey = identitykey.split(",")[0] return identitykey
ec39e0906d8ade9ffeb9a1b8011ef665700e5ad3
321,694
import re def file_id (s): """Return a conventional file name from a pseudo-file name. (ast/FooBar.hh -> ast/foo-bar.hh).""" return re.sub ("([a-z])([A-Z])", "\\1-\\2", s).lower ()
316248c3cb701466ba9189b1a8bf24ee21384042
40,848
import inspect def _iscallable(obj): """Check if an object is a callable object safe for inspect.find_file.""" return inspect.ismodule(obj) or inspect.isclass(obj) or inspect.ismethod( obj) or inspect.isfunction(obj) or inspect.iscode(obj)
6697973dfdc96c4095175c74c7847cc8ec689f2f
112,233
def unicode_encode(encvalue): """ Unicode encode the specified value in the %u00 format. Example: %u0048%u0065%u006c%u006c%u006f%u0020%u0057%u006f%u0072%u006c%u0064 """ hexval = "" for item in encvalue: val = hex(ord(item)).replace("0x", "%u00") hexval += val return(hexval)
d32bcf5bca517841d71aaa3085d8ffd4c81fd50e
277,149