content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def read_file(file, encoding="utf8"): """ Simple reading text file """ with open(file, "r", encoding=encoding) as f: result = f.read() return result
d84dd9b30fae644046692758acfbc2037676583f
116,191
def drop_below(threshold=5, **kwargs): """Trigger function that returns True if the price falls below the threshold price_today < threshold and price_yest >= threshold """ if ( # kwargs['price_today'] and kwargs['price_yest'] and # not np.isnan(kwargs['price_today'] and not kwargs['price_yest'] and kwargs['price_today'] < threshold and kwargs['price_yest'] >= threshold ): return True else: return False
19a0be6ea404c3f0ff0ac336a864a4e4d7f56d4e
116,194
def find_correlation(x,y,cov): """ The purpose of this function is to calculate the correlation value of a graph. Attributes: - x(float): std deviation value of x value column - y(float): std deviation value of y value column - cov(float): covalent value of graph Returns: - correlation(float): the correlation value of a graph. """ return round((cov / (x * y)),2)
3694493c916e5c95d771d783e4632d7ae2daf934
116,195
def reverse(node): """ reverses a linked list, returns the input list's tail node as the new head Time : O(N) Space: O(1) """ previous_node = None while node: # keep track of the next node next_node = node.next # point the current node backwards node.next = previous_node # advance pointers previous_node = node node = next_node return previous_node
0d7847edcc060f1e32641f3f22919b93598ab3ad
116,198
def bytes2str(string): """Converts b'...' string into '...' string. On PY2 they are equivalent. On PY3 its utf8 decoded.""" return string.decode("utf8")
4d8aa0720ad0ae013ed6078e84c9eb74179daa37
116,199
def get_web_url_from_stage(stage): """Return the full URL of given web environment. :param stage: environment name. Can be one of 'preview', 'staging', 'production' or 'dev' (aliases: 'local', 'development'). """ if stage in ['local', 'dev', 'development']: return 'http://localhost' stage_domains = { 'preview': 'https://www.preview.marketplace.team', 'staging': 'https://www.staging.marketplace.team', 'production': 'https://www.digitalmarketplace.service.gov.uk', } return stage_domains[stage]
71df04227022fbf163069bee84af9dfb6b0f09b8
116,205
def bisection(func, min_guess, max_guess, err_tolerance): """ Find the root of a function using bisection method. (Bracketing method) arguments: func: f(x) min_guess: minimum x as guess max_guess: maximum x as guess err_tolerance: value where f(root) must be less than err_tolerance """ x_new = 0.5 * (min_guess + max_guess) y_mid = func(x_new) print("x_new: ", x_new) if abs(y_mid) < err_tolerance: return x_new else: min_guess = x_new if y_mid <= 0 else min_guess max_guess = x_new if y_mid > 0 else max_guess return bisection(func, min_guess, max_guess, err_tolerance)
242157856e068bd23b5cba749ebda99a05110d24
116,206
def _gradient_extrapolation(extrapolation): """ Given the extrapolation of a field, returns the extrapolation mode of the corresponding gradient field. :param extrapolation: string or struct of strings :return: same type as extrapolation """ return {'periodic': 'periodic', 'boundary': 'constant', 'constant': 'constant'}[extrapolation]
95d4ea7cf5cd413b5dd69d25335b3270dedfb6ea
116,207
from datetime import datetime def time_duration(time_start, time_end): """ This function is used to calculate the time difference in seconds. :param time_start: The start time represented by a string. :param time_end: The end time represented by a string. :return: The time difference in seconds, integer representation. """ datetime_object1 = datetime.strptime(time_start, '%H:%M:%S') datetime_object2 = datetime.strptime(time_end, '%H:%M:%S') return (datetime_object2 - datetime_object1).total_seconds()
3420c3fd5625057f8406b675248b214e87ee52b7
116,210
def line(x, a, b): """Linear model - used in fitting.""" return a*x + b
94e024ba513b1847ec1cab443f8165897c23b3a4
116,211
import json def load_config(config_path): """ Reads a .json config file and returns as a dict :return: config (dict) """ with open(config_path) as f: config = json.load(f) return config
effa02beecfe3f213517b8cb6a0a6029ff6adcce
116,213
def format_date(s): """Return the given datetime string (expected to be UTC and as returned by datetime.isoformat()) in a more friendly format. """ return ' '.join(s[:16].split('T')) + ' UTC'
93b03d89e5fbb95e96e2193f6f61e8e99169bfe1
116,215
from pathlib import Path def llvm_ir_path(request) -> Path: """A test fixture which yields an LLVM-IR path.""" return request.param[1]
fc3c1bce2866364b0f38652691e288c490ce6cd6
116,217
from typing import Union from typing import List def max_length_v2(obj: Union[int, List]) -> int: """Return the maximum length of any list in nested list <obj>. The *maximum length* of a nested list is defined as: 1. 0, if <obj> is a number. 2. The maximum of len(obj) and the lengths of the nested lists contained in <obj>, if <obj> is a list. >>> max_length_v2(17) 0 >>> max_length_v2([1, 2, 3, 17]) 4 >>> max_length_v2([[1, 2, 3, 3], 4, [4, 5]]) 4 >>> max_length_v2([2, [2, 2, 2], 2, 3, [3, 4], 5, 6]) 7 >>> max_length_v2([3, 1, [1, [2, 3, 4, 5, 6, 7], 4]]) 6 """ if isinstance(obj, int): return 0 elif obj == []: return 0 else: # obj is a list. return max(len(obj), max(max_length_v2(x) for x in obj)) # version 2.. intuitive # if isinstance(obj, int): # return 0 # else: # # obj is a list. # len_lst = len(obj) # for item in obj: # # item is either an int or another list. # len_lst = max(len_lst, max_length_v2(item)) # return len_lst
6a58c6200ece74151ffbae67eb2b453d02f6fe00
116,218
import requests def get_page(url): """ Gets a url as string and returns the corresponding html object. :param url: str :return: html object """ try: r = requests.get(url) return r.text except: return None
866e70921531984b934b92ddeda3f07e7d00ee37
116,223
def pull_log_metadata(line_list): """ Extracts metadata to be stored in SippingMetadata model :param line_list: Raw list of output pulled from logfile :return: Returns strings containing values for SippingMetadata model """ miseq_path = str() miseq_folder = str() fastq_destination = str() samplesheet = str() for item in line_list[:10]: if item.startswith('MiSeqPath:'): miseq_path = item.replace(',', '').replace('MiSeqPath: ', '') elif item.startswith('MiSeqFolder'): miseq_folder = item.replace(',', '').replace('MiSeqFolder: ', '') elif item.startswith('Fastq destination'): fastq_destination = item.replace(',', '').replace('Fastq destination: ', '') elif item.startswith('SampleSheet'): samplesheet = item.replace(',', '').replace('SampleSheet: ', '') return miseq_path, miseq_folder, fastq_destination, samplesheet
ee1e35b4be1f0382b1d026608105164cccc7d84a
116,224
import struct import base64 def encode_hashes(version: int, starting_class: int, ascendency: int, fullscreen: int, hashes: list) -> str: """ Creates a valid poe skilltree url payload :param version: :param starting_class: :param ascendency: :param fullscreen: :param hashes: :return: """ # [ver,charclass,asc,[4byte ints]] bytes = bytearray(struct.pack('>ibbb', version, starting_class, ascendency, fullscreen)) for tmpHash in hashes: for byte in struct.pack('>H', tmpHash): bytes.append(byte) return base64.urlsafe_b64encode(bytes).decode("utf-8")
84e0c326ac131db4a5e270964c3c7d4fb259aa6c
116,226
def map_ensemble_member(**facets): """ Returns ensemble member facet value. :param facets: dictionary of current facets. :return: ensemble member value. """ return facets['ensemble_member']
17d200efa1f8a784a0f48c58e53c9c5918dcebc7
116,233
def merger_mass_loss(q, a_1, a_2): """Compute mass lost during a GW merger between two BHs. Expression from Tichy & Marronetti 2008. Parameters ---------- q : `float/array` Mass ratio of binary a_1 : `float/array` Spin of primary BH a_2 : `float/array` Spin of secondary BH Returns ------- loss : `float/array` Fraction of total binary mass lost during merger """ v = q / (1 + q)**2 loss = 0.2 * v + 0.208 * v**2 * (a_1 + a_2) return loss
708e8194f34279f53ed883e2765770446670e082
116,239
import inspect def find_subclasses_in_module(base_classes, module): """Finds the subclasses of the given classes in the given module. Args: base_classes: list of classes, the base classes to look for the subclasses of in the module. module: module, the module to look for the subclasses in. Returns: A list of all of the subclasses found in the module. """ subclasses = [] for _, module_member in module.__dict__.items(): if inspect.isclass(module_member): for base_class in base_classes: if issubclass(module_member, base_class): subclasses.append(module_member) return subclasses
bd2f3829f903b38805894676ce81e87a3faa8069
116,243
def get_free_index(dictionary): """ Get the first free integer index of dictionary starting at 0. """ index = 0 while True: if index in dictionary: index += 1 else: return index
8751f5e2ff286d61d355fae149e4b7bcbedf7e42
116,245
def query_with_limit_offset(query, limit, offset): """Add limit and offset constraints to a SQLAlchemy query. :param Query query: A SQLAlchemy query. :param int limit: The limit to add. If None or 0, it will be skipped. :param int offset: The offset to add. If None or 0, it will be skipped. :returns Query: The updated query object. """ if limit: query = query.limit(limit) if offset: query = query.offset(offset) return query
4519d0e6ff5acc865990e052e91e93b1b00e7a80
116,246
def is_escaped_newline(line): """ Checks if the final newline in a string is escaped Parameters ---------- line : string String that ends with a newline Returns ------- bool True if the last newline in the string is escaped Examples -------- >>> is_escaped_newline("Line\n") False >>> is_escaped_newline("Line\\\n") True >>> is_escaped_newline("Line\\\\n") False >>> is_escaped_newline("Line") False """ if line[-1] != '\n': return False line = line[:-1] cnt = 0 for c in reversed(line): if c == '\\': cnt += 1 else: break # An odd number of backslashes means the newline is escaped return cnt % 2 == 1
d924e4d22b825859ab29a7311a8aeb217b37f4e6
116,247
def get_children(parent): """ Function for traversals to get the children of a block """ if parent.has_children: return parent.get_children() else: return []
c08d981698ed9865af3fb3e33a3481d99e22816e
116,249
def parseLinks(links, weighted): """Parse node links in Pajek format return [(dest_id, weight), ...] """ links = links.split() if weighted: links = [(v, 1) for v in links] return links
c3ddf818c548ec1410820f031074d8b593b814be
116,250
def get_caption(prop, dic): """ Get a textual caption of a keras property that my be $ - a literal string label (e.g. 'mse') - an instance of a class - a reference to a function """ if prop is not None: if isinstance(prop, str): prop_extract = prop.lower().replace('_', '') if prop_extract in dic: return dic[prop_extract] else: return prop else: prop_extract = type(prop).__name__.lower() if prop_extract == 'function': prop_extract = prop.__name__.lower().replace('_', '') if prop_extract in dic: return dic[prop_extract] else: return prop.__name__ elif prop_extract in dic: return dic[prop_extract] else: return type(prop).__name__
5e009e9da9481d149beb347fa34b415148fdcc1e
116,253
def edgelist_for_workflow_steps( steps ): """ Create a list of tuples representing edges between ``WorkflowSteps`` based on associated ``WorkflowStepConnection``s """ edges = [] steps_to_index = dict( ( step, i ) for i, step in enumerate( steps ) ) for step in steps: edges.append( ( steps_to_index[step], steps_to_index[step] ) ) for conn in step.input_connections: edges.append( ( steps_to_index[conn.output_step], steps_to_index[conn.input_step] ) ) return edges
94c09e277c664f835694c5514a78c17518275fdc
116,255
import random import click def main(filename, n, max_words, min_word_length, max_word_length): """ Generate an xkcd passphrase randomly selected from a list of words. """ def get_words(filename): return filename.readlines() def get_candidates(words, min_length, max_length): return [x for x in words if min_length <= len(x) <= max_length] def get_random_words(words, num_words): return random.sample(words, num_words) def get_phrase(words): return ''.join([x.strip().lower() for x in words]) words = get_words(filename) candidates = get_candidates(words, min_word_length, max_word_length) for _ in range(0, n): random_words = get_random_words(candidates, max_words) click.echo(get_phrase(random_words))
1fde702a6d5197213280aaddc90b10a0e70987e4
116,256
from typing import Iterable def flatten_until(is_leaf, xs): """ Flatten a nested sequence. A sequence could be a nested list of lists or tuples or a combination of both :param is_leaf: Predicate. Predicate to determine whether an item in the iterable `xs` is a leaf node or not. :param xs: Iterable. Nested lists or tuples :return: list. """ def _flatten_until(items): if isinstance(Iterable, items) and not is_leaf(items): for item in items: for i in _flatten_until(item): yield i else: yield items return list(_flatten_until(xs))
e73a61fa2c3a86916a90a9d0ced23ab5fc5d39d1
116,266
def linear_constraint(u, Lin_lhs, Lin_rhs, tol = 0.05): """Check the additional linear constraint on u Parameters ---------- u : np.array of shape(None,) Beamlet radiation distribution Lin_lhs : np.array of shape(None, u.shape[0]) Stacked lhs of the constraints Lin_rhs : np.array of shape(None,) Stacked rhs of the constraints Returns ------- bool Indicator of constraints satisfaction within tol """ return Lin_lhs.dot(u) <= Lin_rhs
cb4732ef62a0209efcce7d9a1828c95097a1b50c
116,271
import torch def remove_large_pred_bbx(bbx_3d): """ Remove large bounding box. Parameters ---------- bbx_3d : torch.Tensor Predcited 3d bounding box, shape:(N,8,3) Returns ------- index : torch.Tensor The keep index. """ bbx_x_max = torch.max(bbx_3d[:, :, 0], dim=1)[0] bbx_x_min = torch.min(bbx_3d[:, :, 0], dim=1)[0] x_len = bbx_x_max - bbx_x_min bbx_y_max = torch.max(bbx_3d[:, :, 1], dim=1)[0] bbx_y_min = torch.min(bbx_3d[:, :, 1], dim=1)[0] y_len = bbx_y_max - bbx_y_min bbx_z_max = torch.max(bbx_3d[:, :, 1], dim=1)[0] bbx_z_min = torch.min(bbx_3d[:, :, 1], dim=1)[0] z_len = bbx_z_max - bbx_z_min index = torch.logical_and(x_len <= 6, y_len <= 6) index = torch.logical_and(index, z_len) return index
3e38a5258c32080c2380ee2aa07574a3aaa2118a
116,272
def find_scan_node(scan_node): """ utility function to find the parent node of "scan" type, meaning some of its children (DAQ_scan case) or co-nodes (daq_logger case) are navigation axes Parameters ---------- scan_node: (pytables node) data node from where this function look for its navigation axes if any Returns ------- node: the parent node of 'scan' type list: the data nodes of type 'navigation_axis' corresponding to the initial data node """ try: while True: if scan_node.attrs['type'] == 'scan': break else: scan_node = scan_node.parent_node children = list(scan_node.children().values()) # for data saved using daq_scan children.extend([scan_node.parent_node.children()[child] for child in scan_node.parent_node.children_name()]) # for data saved using the daq_logger nav_children = [] for child in children: if 'type' in child.attrs.attrs_name: if child.attrs['type'] == 'navigation_axis': nav_children.append(child) return scan_node, nav_children except Exception: return None, []
6c92e698ba0f4aa6e0dddf2a95716791f941ff78
116,277
def _bin_area(count, bin_edges): """ Returns the area of a single histogram bin (width * height) """ return count * abs(bin_edges[1] - bin_edges[0])
5bdeab76e188ed6e6c46ca63e115833c5af43b57
116,280
def findSmallerInRight(string, start, end): """ Counts the number of caracters that are smaller than string[start] and are at the right of it. :param string: Input string. :param start: Integer corresponding to the index of the starting character. :param end: Integer corresponding to the index of the string. :return: Integer corresponding to the number of chars, on the right, that are smaller. """ countRight = 0 i = start + 1 while i <= end: if string[i] < string[start]: countRight = countRight + 1 i = i + 1 return countRight
b9cc36373706a305565804831415efe6e0b0d424
116,281
def count_missing_rows(data_frame, col="any") -> int: """Count rows containing missing values in a Pandas DataFrame. Parameters ---------- data_frame : Pandas DataFrame A Pandas DataFrame to count NA/NANs col : str The column to count NA/NANs from. If "any" is specified, considers any NA in any column as a hit. Returns ------- int representing the number of missing values counted. """ i = 0 if col == "any": for index, row in data_frame.iterrows(): if any(row.isna()) is True: i += 1 else: for index, row in data_frame[col].iterrows(): if any(row.isna()) is True: i += 1 return i
fe06c3ad3f6e9d24d5604b77b58330f7685ceb97
116,283
def get_generation(searcher, tag): """ Return the identifiers of all members of a population associated with a given searcher tag :param searcher: :param tag: :return: """ lista = [x['_id'] for x in searcher.get_all_generations(tag) if searcher.population.is_evaluated(x['_id'])] return searcher.population.ids_sorted(lista)
301cfaa208c6593be16bddd348eaa03870aaf6e1
116,286
def get_traj_point(box): """Get person traj point given person boxes.""" x1, _, x2, y2 = box return [(x1 + x2) / 2.0, y2]
10e96f75e3b0faeff9f7262836db74081b876365
116,290
def _GetProvides(sources): """Get all namespaces provided by a collection of sources.""" provides = set() for source in sources: provides.update(source.provides) return provides
6e8c1c5859a0ea9f89d20f8f2812ff1a474db106
116,296
def dotprod(u,v): """ Simple dot (scalar) product of two lists of numbers (assumed to be the same length) """ result = 0.0 for i in range (0,len(u)): result = result + (u[i]*v[i]) return result
0449219a1ceffa557c6ff8f4ff5ca382b4d12c05
116,302
from typing import Callable import asyncio def make_async(func: Callable): """ wrap a function with a future to make it async """ def wrapper(*args, **kwargs): future = asyncio.Future() future.set_result(func(*args, **kwargs)) return future return wrapper
b78660f7d7e24b3a7e05a21328e4844426c568e4
116,307
def capfirst(text): """Uppercase the first character of text.""" if not text: return text return text[0].upper() + text[1:]
f395487ccb5fd207e5820119f3f5dfba48c39789
116,314
def host_name() -> str: """Mock host name.""" return '127.0.0.1'
583c84e540db195181e961050e55812aad8e170f
116,316
import secrets def gen_rand_bytes(num_bytes=32): """ Return a random, cryptographically strong string of length num_bytes """ return secrets.token_hex(num_bytes // 2)
5c43e6a2975f758f7a2654505eb4b72a5f3b9ef1
116,318
import re def base_version(version): """Extract the final release and if available pre-release (alpha, beta, release candidate) segments of a PEP440 version, defined with three components (major.minor.micro). Useful to avoid nbsite/sphinx to display the documentation HTML title with a not so informative and rather ugly long version (e.g. ``0.13.0a19.post4+g0695e214``). Use it in ``conf.py``:: version = release = base_version(package.__version__) Return the version passed as input if no match is found with the pattern. """ # look at the start for e.g. 0.13.0, 0.13.0rc1, 0.13.0a19, 0.13.0b10 pattern = r"([\d]+\.[\d]+\.[\d]+(?:a|rc|b)?[\d]*)" match = re.match(pattern, version) if match: return match.group() else: return version
cad7d0f03c51fe723ee1d04f3d4b648c9eb6f96a
116,321
import json def get_result(start, end, path): """ Returns json object of shortest path result. """ if path: result = path else: result = "No path! :( " d = {"start": start, "end": end, "path": result} return json.dumps(d, indent=4)
3cc0eab6feb1df2d089654c6fa95dff6bc2bb869
116,323
from typing import Any import pickle def load_pickle(fpath: str) -> Any: """Load an arbitrary object from a pickled format. Note that no safety checking is performed. Args: fpath (str): Path to the pickled object to load. Returns: Any: The object as hydrated from disk. """ with open(fpath, 'rb') as fp: return pickle.load(fp)
9e8430d9d37ef62a9e146832a53dedd2c80daed0
116,324
def _find_str(s: str, char: str) -> int: """ Finds a sequence within a string, and returns the position. If not exists, returns ``-1``. :param s: Latex string code :param char: Sequence :return: Position """ index = 0 if char in s: c = char[0] for ch in s: if ch == c: if s[index:index + len(char)] == char: return index index += 1 return -1
a13628f758323862520b5be040c25070b653bd12
116,329
def isOpen(node, closedNode): """ To check if the node is already visited or not """ if node.key in closedNode: return 0 return 1
81915963bf975205347fc8e701b34e64a9c196ea
116,331
def Pad(string, length): """Pad string to length with @ characters. Args: string: String to pad out. length: Number of characters that the string must be. Returns: str of the require length that is right padded. """ pad = length - len(string) if pad > 0: string += '@' * pad return string
3c18c066e5e31d2a8dd40542dd035d422dad1672
116,332
def num_truncate(value: float) -> int: """Truncate a floating point value. Parameters ---------- value: float Examples -------- >>> num_truncate(6.2) 6 >>> num_truncate(-9.1) -9 """ return int(value)
780f7bcf7af3ceec17e14cf13c052914d927ec50
116,333
import math def delta(l, m, delta0): """Convert a coordinate in l, m into an coordinate in Dec Keyword arguments: l, m -- direction cosines, given by (offset in cells) x cellsi (radians) alpha_0, delta_0 -- centre of the field Return value: delta -- Dec in decimal degrees """ return math.degrees(math.asin(m * math.cos(math.radians(delta0)) + (math.sqrt(1 - (l*l) - (m*m)) * math.sin(math.radians(delta0)))))
63efa0b386304d50e7c14846c8121f33764622ca
116,337
def _list(xs): """Convert the given argument to a list.""" try: return list(xs) except TypeError: return [xs]
d44309d7c6983e694c8928eb2635851bc181d6ec
116,338
def url(quad): """ creates the url for a given quadkey. """ key = 'AuZ2rzQL4BFpZbThyMutLjCSwreiFEPg66lx4ZTsbWIP2fvhEupamMwphvkb82sb' root = 'http://ecn.t3.tiles.virtualearth.net/tiles' return '{}/a{}.jpeg?g=195&mkt=en-US&key={}'.format(root, quad, key)
dff6b4af1b6de11905aa15edd942db407ad10cde
116,342
import torch def reshape_for_torch(I): """Transpose image for PyTorch coordinates.""" # out = np.swapaxes(I,1,2) # out = np.swapaxes(out,0,1) # out = out[np.newaxis,:] out = I.transpose((2, 0, 1)) return torch.from_numpy(1.0 * out)
9277c97eaaa58e3b9c0c93a8ff7ced87ff3770ea
116,346
import random import string def random_str(size=10): """ create random string of selected size :param size: int, length of the string :return: the string """ return ''.join(random.choice(string.ascii_lowercase) for _ in range(size))
9e5d4857f0c4ea84dfe6f7f31bf0ed9969ef20a2
116,348
def filter_onlyDirect(itineraries): """ filter the input itineraries and select only direct (stop count = 0) :param itineraries: input itineraries :return: direct itineraries :rtype: list """ return [itinerary for itinerary in itineraries if itinerary.legs[0].stop_count == 0]
a44981ccdb9028fe333a463dd973144ccc10ad89
116,349
def _data(response): """ Get the serialized data dictionary from the given REST API test response. """ return response.data
941608ef4b61ced851bbc96ed1aa7c6c4ef5d94b
116,350
def StartsWith(this, that): """Checks whether an items of one iterable are a prefix of another. Args: this: An iterable that needs to be checked. that: An iterable of which items must match the prefix of `this`. Returns: `True` if `that` is a prefix of `this`, `False` otherwise. """ this_iter = iter(this) that_iter = iter(that) while True: try: this_value = next(that_iter) except StopIteration: return True try: that_value = next(this_iter) except StopIteration: return False if this_value != that_value: return False
6734e5fab8504c0853bf74e5e07649845ae4db21
116,359
def _get_tag_text(tag): """ If a <p> tag contains text, return it, otherwise return an empty string (rather than None). """ if tag.text is not None: return tag.text return ''
0cd799a1a20de608b95a96bb5893c1c78c3d8521
116,363
def _htmlescape(string): """ Convert problematic characters in string to use HTML entities. Handles angle brackets, double quotes and ampersand. """ for char, rep in [('&', 'amp'), ('<', 'lt'), ('>', 'gt'), ('"', 'quot')]: string = string.replace(char, '&%s;' % rep) return string
a96dfc30a3de1d3a408e0ffcdebe2f69a424bd56
116,366
import torch def loss_fn(outputs, labels): """Computes the cross entropy loss given outputs and labels. Args: outputs: Output of the model (dimension batch_size x 10 (10 classes)) labels: Correspondent true labels (dimension batch_size, where each element is a value between 0-9) Returns: loss: cross entropy loss for all images in the batch Note: The standard loss function from pyTorch nn.CrossEntropyLoss() can also be used and should turn the same result. This function is an example on how you to easily define a custom loss function. """ num_examples = outputs.size()[0] return -torch.sum(outputs[range(num_examples), labels])/num_examples
06feaecf98f7874994c5138e45f64afb2c8fd1c5
116,367
def _parse_arguments(argstring): """ Parses argstring from nginx -V :param argstring: configure string :return: {} of parsed string """ arguments = {} current_key = None current_value = None for part in argstring.split(' --'): if '=' in part: # next part of compound if current_key and current_value: current_value += part if part.endswith("'"): arguments[current_key] = current_value current_key = None current_value = None else: k, v = part.split('=', 1) # compound argument if v.startswith("'") and v.endswith("'"): arguments[k] = v elif v.startswith("'"): current_key = k current_value = v # simple argument else: arguments[k] = v else: # boolean if part: arguments[part] = True return arguments
924053659c5c57ef49d7e51246825fc1d9824489
116,369
def parse_csv_data(csv_filename: str) -> list[str]: """ Takes in covid data csv file and returns list of each row as a string Note: Returns the same data format as the parse_json_data function Keyword arguments: csv_filename (file.csv) : Title of csv file containing covid data Returns: content (list) : Content of csv file as a list of strings for the rows in the file """ with open(csv_filename,'r', encoding="utf-8") as file: content = file.read().splitlines() # removes any blank lines for row in content: if row == "": content.remove("") return content
d0992f3b1f7d262e5f7f068a3834ca19fddff8c9
116,371
from typing import Tuple def get_axis_collision_distances( p1: float, w1: float, v1: float, p2: float, w2: float ) -> Tuple[float, float]: """ Gets the distance to the entry and exit points of a collision on one axis. Parameters: p1: float - Position of first object w1: float - Width of first object v1: float - Velocity of first object p2: float - Position of other object w2: float - Width of other object Returns: Tuple of entry and exit distances. Tuple[float, float] """ # The distance from the right side of the first # object to the left side of the other object r_to_2l = p2 - (p1 + w1) # The distance from the left side of the first # object to the right side of the other object l_to_2r = (p2 + w2) - p1 if v1 > 0: distance_entry = r_to_2l distance_exit = l_to_2r else: distance_entry = l_to_2r distance_exit = r_to_2l return (distance_entry, distance_exit)
8ff6e39e7426099ab29017f4b976867a9b2c375a
116,374
def regenerated_configure(file_paths): """Check if configure has been regenerated.""" if 'configure.ac' in file_paths: return "yes" if 'configure' in file_paths else "no" else: return "not needed"
9af5cff461fc5908ef9723cff0d46332d236629c
116,376
def lorenz(num_points=15000, start=[0.10, 0.10, 0.10], scaling_factor=20, delta=0.008, a=0.1, b=4.0, c=14.0, d=0.08): """ Generates Lorenz strange attractor. """ verts = [] x = start[0] y = start[1] z = start[2] for i in range(0, num_points): # calculate delta values dx = -a * x + (y * y) - (z * z) + a * c dy = x * (y - b * z) + d dz = z + x * (b * y + z) # calculate new coordinates x = x + delta * dx y = y + delta * dy z = z + delta * dz # scale and add them to the list verts.extend([ x * scaling_factor, z * scaling_factor, y * scaling_factor ]) return verts
534571418a021f1a2454a5c85c1edcef920936ba
116,379
def _is200(status_code): """Returns whether HTTP GET status-code 200 received Args: status_code (int): Status code as function of GET request Returns: True : status_code == 200 False: status_code != 200 """ return status_code == 200
1dfe93fcb00a179ade10b4ef356299b17d094672
116,382
def benchmark_select_skl_metric(metric: str) -> str: """ Convert `MuyGPyS` metric names to `scikit-learn` equivalents. Args: metric: The `MuyGPyS` name of the metric. Returns: The equivalent `scikit-learn` name. Raises: ValueError: Any value other than `"l2"` or `"F2"` will produce an error. """ if metric == "l2": return "l2" elif metric == "F2": return "sqeuclidean" else: raise ValueError(f"Metric {metric} is not supported!")
6ad1112332c3767c2758f58124de4921cf0a22bb
116,384
def replace_if_match(xs, y): """ Find the first pattern match in list. Return the corresponding value of patterns if matches, return the same value of x if there has no match. Parameters ---------- xs : [(pattern, value)] y : object Returns ------- result : object """ # Find the first match in ys matches = next(filter(lambda tup: tup[0] == y, xs), None) if matches is None: return y else: return matches[1]
080eed2cf0b95ef1d7d42b04c90994e9f2617873
116,388
def name_sorting(nodes, direction, data): """Sort nodes by name.""" reverse = direction == 'desc' return sorted(nodes, reverse=reverse, key=lambda n: str(n))
abb1d2ff558d826dc54c4e88abfb184bc504ec45
116,389
import re def natural_sort(string): """ Natural sorting function which sorts by numerical value of a string, rather than raw ASCII value. """ return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string)]
ebb771e4ffc877da74b8d4014eff4a7ab8e04fa5
116,390
def note_hash(channel, pitch): """Generate a note hash.""" return channel * 128 + pitch
87e15d26501dc6780284b284c1cac66e83fee081
116,394
import tempfile import requests def file_from_url(url: str): """ file_from_url requests a file from an URL. Raises an exception if the request fails. Parameters ========== url : str The resource URL. Returns ======= _TemporaryFileWrapper Requested file as temporary file handler. """ CHUNK_SIZE = 1024*1024 file = tempfile.NamedTemporaryFile() res = requests.get(url, stream=True) if not res.ok: raise Exception( 'request failed with status code {0}'.format(res.status_code)) for chunk in res.iter_content(chunk_size=CHUNK_SIZE): file.write(chunk) return file
f0ae2801e49604129f359bc8d5d6cbb97d58a887
116,395
def square(x): """a function to calculate the square of number x""" # the docstring of function square() return x * x
6fb58aa6a90835e5cfddf002d7c74cbf9a52d0db
116,402
def get_citations_per_doi(dois_by_year, citations_by_year): """ Returns citations per DOI. """ citations_per_doi = {} for year, dois in dois_by_year.items(): if len(dois) > 0: citations_per_doi[year] = round(citations_by_year[year] / len(dois), 2) return citations_per_doi
1ef9f82648b0164070046dde0481c4b7cf5e48e3
116,407
def argmax(module, x, axes=None): """ Get indices of maximum in tensor. Arguments: module: The backend array corresponding to the given array. x: The tensor to calculate the exponential of. axes: Tuple specifying the axes along the which compute the maximum. Return: Tensor containing indices of the maximum in 'x'. """ return module.argmax(x, axes)
4500b695e79a76d6045b1b570e2ace1e626bb371
116,409
def coalesce_repeated_switches(cmd): """Combines known repeated command line switches. Repetition of a switch notably happens when both per-test switches and the additional driver flags specify different --enable-features. For instance: --enable-features=X --enable-features=Y Conceptually, this indicates to enable features X and Y. However Chrome's command line parsing only applies the last seen switch, resulting in only feature Y being enabled. To solve this, transform it to: --enable-features=X,Y """ def parse_csv_switch(prefix, switch, values_set): """If |switch| starts with |prefix|, parses it as a comma-separated list of values and adds them all to |values_set|. Returns False if the switch was not a match for |prefix|.""" if not switch.startswith(prefix): return False values = switch[len(prefix):].split(',') for value in values: values_set.add(value) return True def add_csv_switch(prefix, values_set, result): if len(values_set) == 0: return sorted_values = sorted(list(values_set)) result.append('%s%s' % (prefix, ','.join(sorted_values))) result = [] ENABLE_FEATURES_FLAG = '--enable-features=' DISABLE_FEATURES_FLAG = '--disable-features=' enabled_set = set() disabled_set = set() for switch in cmd: if parse_csv_switch(ENABLE_FEATURES_FLAG, switch, enabled_set): continue if parse_csv_switch(DISABLE_FEATURES_FLAG, switch, disabled_set): continue result.append(switch) # Append any coalesced (comma separated) flags to the end. add_csv_switch(ENABLE_FEATURES_FLAG, enabled_set, result) add_csv_switch(DISABLE_FEATURES_FLAG, disabled_set, result) return result
6ad19dd99adf2faf4ed6719b316c0d83dcb1e759
116,410
def gen_stdout_test_msg(bibfile_data, verbose=False): """ Generate appropriate message for STDOUT This method creates the string to be printed to STDOUT from the items of the `bibfile_data` list argument. It generates either a terse or verbose message based on the state of the `verbose` argument. :param list bibfile_data: List containing `RefFile`s. :param bool verbose: Directive to construct verbose/terse STDOUT string. :rtype: str """ msg_list = [bibfile.test_msg(verbose) for bibfile in bibfile_data] msg = "\n".join(msg_list) return msg
7d7a60400a03e0ef79f89f94f3d2ec7bc1ab0dbb
116,418
def _add(shape, solution): """ Adds all points of the shape to the solution if they are not already contained. Returns True if all points could be added or False otherwise """ if any([ point in solution for point in shape ]): return False for point in shape: solution.append(point) return True
2795cfb2b8c167294e9dc4c1104fe6887fa472d4
116,419
def input_manager(default, args): """Handles the user input for this exercice. There are 2 use cases: 1) no input - use default. 2) phrase provided. ARGS: args tuple RETURNS: a single string """ return default if len(args) == 1 else " ".join(args[1:])
5508277def31f81b1e26af88db4c5bc251c20f1d
116,426
import torch def process_output_logits(output, prediction_length=30): """Processes output logits from a model. Args: output (tensor): output logits of shape (batch_size, num_classes), containing predicted target probabilities. Assumes that the last dimension is a probability distribution. prediction_length (int): the length of the predicted ranking of classes. Defaults to 30. Returns: tensor: the predicted class labels ordered, with shape (batch_size, prediction_length). tensor: the predicted class labels probas, with same shape. """ assert output.ndim() == 2, \ 'Wrong number of dimensions for argument output: {}'.format(output.ndim()) sorted_indices = torch.argsort(output, dim=-1, descending=True) output_labels = sorted_indices[:, :prediction_length] output_probs = output[:, sorted_indices][:, :prediction_length] return output_labels, output_probs
4b52bc08d4f05d2791fbbce1b7c9fd6bc090e938
116,427
import json def read_twint_file(file_name): """ Reads a twint file and returns tweets with replies in json format """ tweets = [] with open(file_name, "r", encoding='utf8') as file: for line in file.readlines(): tweet = json.loads(line) if int(tweet['replies_count']) > 0 or int(tweet['has_parent_tweet']) > 0: tweets.append(tweet) return tweets
43404d0ff788925f2254028272190861d52a6b04
116,429
def __url_path_format(version_id: str, resource_name: str, method_name: str) -> str: """ Function makes the method path using specific format. :param version_id: Version of API :param method_name: Name of method :param resource_name: Name of resource :return: str """ return f"/{version_id}/{resource_name}.{method_name}"
32ed2754b7af1fca1b2c3770ce8673f2ddacdecf
116,430
def clean_post_data(post_data): """ Removes None values from data, so that it can posted to Django's test client without TypeError being raised in Django 2.2+ """ return { key: value for key, value in post_data.items() if value is not None }
3326bf47381058f83e0ad5540e9fcdbca03e7b69
116,432
def get_user_jwt_identity(user): """ Return the value to use as the JWT identity. This function is called whenever create_access_token is called. """ return user.email
421c6b3383bfbde3afac44be65d0e454e5fb254b
116,433
def conddb_url(api=''): """Return CondDB URL for given API name""" return 'http://cms-conddb.cern.ch/%s' % api
3363ad27ec211fd372298868ffe46c665c7053c9
116,441
def retrieve_captions(data_list): """ Reads the captions from a list and returns them Args: data_list (list<dict>): List of image metadata Returns: merged_captions (list<str>): List of captions from the dataset """ caption_list = [] for data in data_list: for article in data['articles']: caption_list.append(article['caption']) return caption_list
0a80c9e5f2638ec69b389fe6f596e24c5fc67feb
116,449
import json import logging def parseJSON(file_path: str) -> dict: """Function to parse a JSON file. Arguments: file_path {str} -- File path of target JSON file. Raises: FileNotFoundError -- Raised if the target file is not found. JSONDecodeError -- Raised if there is an error parsing the JSON file. Returns: dict -- Dictionary of parsed JSON file contents. """ try: data_str = open(file_path).read() data_parsed = json.loads(data_str) return data_parsed except FileNotFoundError as e: logging.error('File %s not found' % file_path) logging.error(e) raise e except json.decoder.JSONDecodeError as e: logging.error('Error parsing JSON file %s' % file_path) logging.error(e) raise e
2993fcc925f371d8ba2878e07145bdb223565089
116,452
def split_uri(uri): """ Get the slash-delimited pieces of a URI. >>> split_uri('/c/en/cat/n/animal') ['c', 'en', 'cat', 'n', 'animal'] >>> split_uri('/') [] """ if not uri.startswith('/'): return [uri] uri2 = uri.lstrip('/') if not uri2: return [] return uri2.split('/')
818f23e738a9ea8575d1e139dc4f270661b28b0b
116,458
import torch def dict2device(data, device): """Convert torch.Tensor dictionary container to designated device""" if isinstance(data, dict): for k, v in data.items(): if isinstance(v, dict): data[k] = dict2device(v, device) else: data[k] = v.to(device) return data elif isinstance(data, torch.Tensor): data = data.to(device) return data elif isinstance(data, list): return [o.to(device) for o in data]
d738d4a8e93701edbece2a78775b7a85592f758e
116,465
import requests def feedstock_name(package): """ Check to see if a package has a conda-forge feedstock Parameters ------------ package : str Name of a package to check Returns ------------- name : str or None None if it doesn't exist """ # base url to check base = 'https://github.com/conda-forge/{}-feedstock' #check_yes = requests.get(base.format('triangle')) #check_no = requests.get(base.format('blahahshdrraaa1123')) #assert check_no.status_code == 404 #assert check_yes.status_code == 200 # make sure name is clean package = package.lower().strip() # map packages to different name if known here name_map = {'msgpack': 'msgpack-python'} if package in name_map: package = name_map[package] # check the feedstock on github fetch = requests.get(base.format(package)) exists = fetch.status_code == 200 print(f'{package} exists={exists}') if exists: return package return None
c22ce99e165abb83ce718d55f5351f9983f61720
116,469
def axes_included(img, label): """ e.g.) img.axes = "tyx", label.axes = "yx" -> True img.axes = "tcyx", label.axes = "zyx" -> False """ return all([a in img.axes for a in label.axes])
735fe82d9ed990df56eb7212c2f4287873a93110
116,473
import sqlite3 def login(username, password): """ sql语句查询 :param username: 账号 :param password: 密码 :return: Ture|False """ # 连接数据库 connection = sqlite3.connect(r'sqlite/student_system.db') # 获取一个游标 cursor = connection.cursor() # 定义sql语句 sql = """select * from admin where username = ? and password = ?""" # 执行sql语句 cursor.execute(sql, (username, password)) # 获取游标执行sql语句的结果集 result = cursor.fetchall() # 关闭游标 cursor.close() # 关闭连接 connection.close() if len(result) == 0: return False else: return True
936cb53a2b456d5b25c68a3b779501c463f96f02
116,475
def findNext ( v, pos, pattern, bodyFlag = 1 ): """ findNext: use string.find() to find a pattern in a Leo outline. v the vnode to start the search. pos the position within the body text of v to start the search. pattern the search string. bodyFlag true: search body text. false: search headline text. returns a tuple (v,pos) showing where the match occured. returns (None,0) if no further match in the outline was found. Note: if (v,pos) is a tuple returned previously from findNext, findNext(v,pos+len(pattern),pattern) finds the next match. """ while v != None: if bodyFlag: s = v.bodyString() else: s = v.headString() pos = s.find(pattern,pos ) if pos != -1: return v, pos v = v.threadNext() pos = 0 return None, 0
ad64fbb8133903fa62403b584a48946e11d10b90
116,478
def get_runtime(sec): """ Get the runtime in hour:minute:second. Args: sec (float): Runtime in seconds. Return: str for the formatted runtime. """ hour = sec // 3600 sec %= 3600 minute = sec // 60 sec %= 60 return "%d:%02d:%02d" % (hour, minute, sec)
95f5250f2d2c9a28f8c8924b49b788e175135cbc
116,479
def yoToZA( yo_i ) : """Returns ZA designator, or special code, from yo id. That is, :: yo_i = 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 is converted to 0, 1, 1001, 1002, 1003, 2003, 2004, 0, -8, -9, -10, 1, 1001, 1002, 1003, 2003, 2004 with the following meanings :: none, n, p, d, t, He3, He, g, b+, b-, EC, n, p, d, t, He3, He. yo_i must be a python integer.""" if ( yo_i < 0 ) or ( yo_i > 16 ) : raise Exception( "\nError in yoToZA: unsupported yo value = %s" % repr(yo_i) ) yo = yo_i if ( yo > 10 ) : yo -= 10 return ( 0, 1, 1001, 1002, 1003, 2003, 2004, 0, -8, -9, -10 )[yo]
4a877b729aa9a5ab397409a8f1784844f188fa52
116,484
import torch def torch_cuda_total_memory(device): """ Return CUDA device VRAM available in GB. """ return torch.cuda.get_device_properties(device).total_memory / 1024.0**3
a399109187fe1b39a7b56722c7cdb132c35b6ce7
116,485
def gen_tuples_for_loops(range_len: int, limit: int) -> list: """ generate a list of tuples of range borders like [(0,...,100), (100,...,200), ..., ( ...,limit)] It is supposed that the tuples will be used in a loop "for" """ ranges = [(n * range_len, (n + 1) * range_len) for n in range(limit // range_len)] if limit % range_len > 0: ranges.append((range_len * (limit // range_len), limit)) return ranges
cfd715c8c49fe60b4ae2be38bd959b290cb40514
116,489
def indent(block_of_text, indentation): """ Helper function to indent a block of text. Take a block of text, an indentation string and return the indented block. """ return "\n".join(map(lambda x: indentation + x, block_of_text.split("\n")))
55644a9692ad9ecc7b97808b4196820160d125e0
116,490
def get_neighbours(point, image, visited): """ Get the valid unvisited neighbours for a given point. Arguments: point {tuple} -- point representation, elements order: x, y, intensity. image {np.array} -- the original image. visited {np.array} -- visited matrix (booleans). Returns: Array - includes all the valid unvisited neighbours, can be empty. """ coordinate_differences = [(-1, 0), (0, -1), (1, 0), (0, 1)] valid_neighbours = [] base_x, base_y, _ = point height = len(image) width = len(image[0]) for x_offset, y_offset in coordinate_differences: new_x = base_x + x_offset new_y = base_y + y_offset if 0 <= new_x < width and 0 <= new_y < height: if visited[new_y][new_x]: continue valid_neighbours.append((new_x, new_y, image[new_y][new_x])) visited[new_y][new_x] = True return valid_neighbours
a4d713f49d3fc8edba90fef5fd48b2c1805416de
116,492