content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def get_cluster_logpdf_score(view, k): """Return marginal likelihood of cluster k in View (0 for fresh cluster).""" return sum([ d.clusters[k].logpdf_score() for d in view.dims.itervalues() ]) if k in view.crp.clusters[0].counts else 0
b7c4faa2140fd130690f94628556634fafe9c342
118,323
import json def get_counts_from_json(months_start, months_end): """ Reads a json file containing counts for each month and calculates no. of total phrases and no. of total documents in the required periods based on the arguments months_start and months_end, which are lists of months in which the calculation has to be done. These 2 lists contain keys in the 2 dicts of the JSON file and can be used directly to access the values""" # Read the Json file which has the monthly total phrases and documents -- 2 Json objects in a # json array. Assign each object to a dictionary. with open('phrases_and_docs_monthly.json', 'r') as file: json_array= json.load(file) # json_array is a list of 2 dicts. monthly_phrases_total = json_array[0] monthly_docs_total = json_array[1] start_months_phrases_sum = sum([monthly_phrases_total.get(month) for month in months_start]) start_months_docs_sum = sum([monthly_docs_total.get(month) for month in months_start]) end_months_phrases_sum = sum([monthly_phrases_total.get(month) for month in months_end]) end_months_docs_sum = sum([monthly_docs_total.get(month) for month in months_end]) return start_months_phrases_sum, start_months_docs_sum, end_months_phrases_sum, end_months_docs_sum
a6b32e82b51a3998f6aa6e1abf58880bf7648662
118,324
def get_function_return(function): """ Return a tuple with the type of the return value of the function and the item type (if type is an array) """ return_type = getattr(function, 'doc_return_type', None) if not return_type: return None, None elif return_type != 'array': return return_type, None else: item_type = getattr(function, 'doc_return_item_type', 'string') return 'array', item_type
8f275e10915c584951869ca4f3b922cfe6d10311
118,326
import torch def expected_log_dirichlet(concentration: torch.Tensor) -> torch.Tensor: """Calculate the expected log of a Dirichlet distribution """ return torch.digamma(input=concentration) - torch.digamma(input=torch.sum(input=concentration, dim=-1, keepdim=True))
d6e6efe5089ae9245c948233f58019a8101f933c
118,332
def no_validate(inds, val_force, fname, max_wt, max_stress): """ Do not perform FEM validation. Used on solutions with more traditional constraints. """ return inds
0569465c2a0e60a5a5214558bb233ccd74716e46
118,335
def ConvertStringToListFloat(line, space = " ", endline = ""): """This function converts a string into a list of floats """ list_values = [] string_values = (line.replace(endline,"")).split(space) for string in string_values: if (string != ""): list_values.append(float(string)) return list_values
64778a4225235049db9b4e355c2d9c7915e96166
118,338
def func_star(a_b, func): """Convert `f([1,2])` to `f(1,2)` call.""" return func(*a_b)
836b1fe67d280fa304d34af4f9e83e846f8d9085
118,339
def partition(data): """ Partitions a list of data into three sections which are lower, equal, and equal to the pivot which is selected to be the last element in the list. """ pivot = len(data) -1 lower, equal, upper = [], [], [] for i in range(0, len(data), 1): if data[i] > data[pivot]: upper.append(data[i]) elif data[i] == data[pivot]: equal.append(data[i]) else: lower.append(data[i]) return lower, equal, upper
d5c724a7801145ed51d1a2f1b13ec33daf6cba8e
118,342
def r_to_rt(r): """Convert R matrix to RT, assuming T=0.""" result = [] for i in range(3): for j in range(3): result.append(r[i * 3 + j]) result.append(0) return result
f29154bb3552357a4d13489c64db556acc33520e
118,345
import re def generate_id(*args): """ Generate an id value from a list of arguments (lowercase with - separators) :param args: Arbitrary length list of arguments to form the id from :return: A str id value """ _id = '-'.join([str(x) for x in args if x != '']) _id = _id.lower() parts = re.sub('[^0-9a-zA-Z]+', '-', _id) return ''.join(parts)
93d3f4fce740f333fece7710f332ed10b8037af2
118,346
def findstringpattern(strings): """Find a common patter in a list of string.""" if not len(strings): return "" if all(strings[0] == s for s in strings[1:]): return strings[0] prefix = "" while strings[0] and all(strings[0][0] == s[0] for s in strings[1:] if s): prefix += strings[0][0] strings = [s[1:] for s in strings] suffix = "" while strings[0] and all(strings[0][-1] == s[-1] for s in strings[1:] if s): suffix = strings[0][-1] + suffix strings = [s[:-1] for s in strings] return prefix + "*" + suffix
11c8e17388351def0245f747c935bfd9abcbee1b
118,348
def area(span: float, aspect: float) -> float: """Calculates the surface area using ``span`` and ``aspect``.""" return span ** 2 / aspect
90b7b389ebbf35c33d6e17ee7459ed631fde1e58
118,349
import re def normalize_chord_markup(line): """ Put space around chords before and after word boundaries but not within words """ line = re.sub("(\w)(\[[^\]]*?\])( |$)","\\1 \\2\\3", line) line = re.sub("(^| )(\[[^\]]*?\])(\w)","\\1\\2 \\3", line) return line
154716302b2251e39e9ffb9a0452b90b820d0fb8
118,350
def getFirstCol(array): """Returns the first column of entered array.""" col = array[:,:1] return col
d0d9948315e2ac3dec1f722d8ef14588f362f358
118,354
def generate_bar_chart(win_percentage): """ Generate a string of emojis representing a bar (10 chars) that indicates wins vs. losses :param win_percentage: The percentage of wins :return: Example (55.0%-64.9%) 'πŸ†πŸ†πŸ†πŸ†πŸ†πŸ†πŸ”΄πŸ”΄πŸ”΄πŸ”΄' """ win_portion = round(win_percentage / 10) loss_portion = 10 - win_portion return "πŸ†" * win_portion + "πŸ”΄" * loss_portion
d5b6d636c78fa3872150996ebb735fecd901e64b
118,358
def createStyleState(kmlDoc, scale, idtag): """ Used to create a doucment wide style state with the LabelStyle internal field scale. Sets the Labelstyle scale field to value "scale" and names the field "idtag" """ styleState = kmlDoc.createElement('Style') styleState.setAttribute('id', idtag) labels = kmlDoc.createElement('LabelStyle') lscale = kmlDoc.createElement('scale') lscale.appendChild(kmlDoc.createTextNode(str(scale))) labels.appendChild(lscale) styleState.appendChild(labels) return styleState
2ca7d695a50807fa5cab275914e68e7332306d69
118,360
def assert_rows_are_concordant(df, ignore_columns=None): """\ Asserts that rows are concordant (have same values) in all but the specified columns :param df: DataFrame to check :param ignore_columns: columns to ignore. Default: None :return: dictionary of concordant columns and their (one unique) value """ if ignore_columns is None: ignore_columns = [] unique_col_values = {col: df[col].unique() for col in df.columns if col not in ignore_columns} bad_col_values = {col: unique_vals for col, unique_vals in unique_col_values.items() if len(unique_vals) > 1} if len(bad_col_values) > 0: raise ValueError('Expected unique values, but got multiple values for at least one DataFrame column: {}'.format( ', '.join(['{}={}'.format(k, v) for k, v in bad_col_values.items()]))) return {k: v for k, (v,) in unique_col_values.items() if k not in ignore_columns}
799b1197189c54008a504ec3eed8dcae96dfa59f
118,361
def _get_first_last(details): """ Gets a user's first and last name from details. """ if "first_name" in details and "last_name" in details: return details["first_name"], details["last_name"] elif "first_name" in details: lst = details["first_name"].rsplit(" ", 1) if len(lst) == 2: return lst else: return lst[0], "" elif "last_name" in details: return "", details["last_name"] return "", ""
2a2ef17441c4243c5e58b74567709b9acb52e01f
118,368
def create_module(project, name, sourcefolder=None): """Creates a module and returns a `rope.base.resources.File`""" if sourcefolder is None: sourcefolder = project.root packages = name.split('.') parent = sourcefolder for package in packages[:-1]: parent = parent.get_child(package) return parent.create_file(packages[-1] + '.py')
56dd10540e626f9a471d9a271502d60f8731bfde
118,371
from typing import List import pathlib def get_merged_file_paths(merge: List[str], outfile: str, outdir: str, create_parents: bool = True) -> List[str]: """Get the file paths for results of merging translations. Args: merge: The files to merge into outfile: Filename where to write the result outdir: Directory where to write the result create_parents: If true, create the outdir if it doesn't exist Returns: The file paths where the results of merging should be saved """ if len(merge) == 1 and outfile: return [outfile] if len(merge) > 1 and outfile: msg = f'Ignoring outfile {outfile} since merging into multiple files' print(msg) file_paths = [] default_directory = pathlib.Path('.') result_directory = default_directory if outdir: result_directory = pathlib.Path(outdir) if result_directory.is_file(): msg = f'Cannot use outdir {outdir} as directory. It is a file!' raise IOError(msg) if create_parents: result_directory.mkdir(parents=True, exist_ok=True) default_suffix = '-borrow' for merge_file in merge: this_merge = pathlib.Path(merge_file) merge_result = this_merge.stem + default_suffix + this_merge.suffix full_merge_result = result_directory / merge_result file_paths.append(str(full_merge_result)) return file_paths
aea109797fd1498786d0340de41672208f80894c
118,382
def check_policy_deleted(event): """Check for S3 Bucket Policy Deletion. Trigger violation if True.""" try: if "DeleteBucketPolicy" in event["detail"]["eventName"]: print("Policy Deleted! No encryption") return True else: return False except KeyError as err: print(err) return False
2e012a1532fe92b6efbfec28c526b123f38f09ce
118,383
def swap(input_list, switch): """ Given an input list and a tuple of indices, the indexed list elements are swapped. """ input_list[switch[1]], input_list[switch[0]] = input_list[switch[0]], input_list[switch[1]] return input_list
9ce3e45f858220bd1bdf6bece6082855e4cda105
118,384
import base64 import dill def deserialize_obj(obj: str) -> str: """Deserialize given object with the following steps: 1. Decode it using base64 to obtain bytes representation. 2. Load it into an object using dill. """ obj_dill = base64.b64decode(obj) obj_str = dill.loads(obj_dill) return obj_str
6e27eb25e2840a632eb92c32b69c9bb9e9092b14
118,385
def absolute_round(number: float) -> int: """ Rounds the value of number and then produces the absolute value of that result >>> absolute_round(-2.1) 2 >>> absolute_round(3.4) 3 >>> absolute_round(3.7) 4 >>> absolute_round(-2.9) 3 """ return abs(round(number))
446786ad83dfb42e8643917d1e656b22e750654b
118,388
import pickle def load(filename: str = "temp"): """ Loads a previously created Python obj to prevent repeated work. :param filename: Optional file name. :return: Original object. """ with open(filename + ".dat", "rb") as file: return pickle.load(file)
7e2d8d4065e66c58f012d92919a76fda3e4c1771
118,389
import requests def user(gateway, user_name): """Fetch a user object by user_name from the server.""" uri = gateway + '/users/' + user_name response = requests.get(uri) return response.json()
e7d52301cf64430252947bf785119f2a9acf3fc3
118,390
import yaml def load_conf(conf_path: str) -> dict: """ Loads the configuration from provided YAML file. :param conf_path: path to the configuration file :return: configuration loaded as dict """ with open(conf_path, 'rt') as in_fd: conf = yaml.load(in_fd, Loader=yaml.FullLoader) return conf
d9c72069ccd3f40b71f661acdd0db0a6eb9b8675
118,394
def centralmoment(vi, k): """ Converts raw distribution moments to central moments Parameters ---------- vi : array The first four raw distribution moments k : int The central moment (0 to 4) to calculate (i.e., k=2 is the variance) Returns ------- cm : scalar The central moment itself """ if k==0: ans = 1 elif k==1: ans = 0 elif k==2: ans = vi[2] - vi[1]**2 elif k==3: ans = vi[3] - 3*vi[2]*vi[1] + 2*vi[1]**3 elif k==4: ans = vi[4] - 4*vi[3]*vi[1] + 6*vi[2]*vi[1]**2 - 3*vi[1]**4 else: print('Can only calculate central moments k = 0 to 4. Sorry.') ans = None return ans
3ff6fa81c6334c1a334345f8293b0eb4c6a40476
118,395
from functools import reduce def is_list_of(list_, type_func, min_len=0): """ Returns boolean indicating if list contains only elements of the type checked by type_func. """ if type(list_) is not list: return False return (reduce(lambda a, b: a and type_func(b), list_, True) and min_len <= len(list_))
630dfc8f13918c5e3fab0815aec63212b88e19b9
118,396
def __replace_nbsps(rawtext: str) -> str: """Replace nbsps with whitespaces""" return rawtext.replace('\xa0', ' ')
4716e6bb7d416feaccfdd519208350a4fa75dbb1
118,400
def Upper(words): """Makes an upper-case identifier from words. Args: words: a list of lower-case words. Returns: the upper-case identifier. """ return '_'.join(s.upper() for s in words)
0524c468d8031b2c17f255f0db6a80f6b5fb2f9e
118,402
def compute_memory_extents(context, builder, lower, upper, data): """ Given [lower, upper) byte offsets and a base data pointer, compute the memory pointer bounds as pointer-sized integers. """ data_ptr_as_int = builder.ptrtoint(data, lower.type) start = builder.add(data_ptr_as_int, lower) end = builder.add(data_ptr_as_int, upper) return start, end
418640f0588175f3f7f670a4bea8bcba7ba405a0
118,403
import torch from typing import Tuple def stats(x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Compute mean and covariance of x. Args: x: torch tensor of shape (n_samples, cardinality) Returns: mean, covariance """ n_samples = x.shape[0] mean = torch.mean(x, dim=0, keepdim=True) c = x - mean cv = 1 / (n_samples - 1) * c.T @ c return mean.squeeze(), cv
c5d7596a6731404bbe5d48cf9a001d308eed8cf5
118,405
def pre_replace(internal_values, fixed_values, pre_replacements): """Return pre-replaced parameters. Args: internal (numpy.ndarray): 1d numpy array with internal parameter. fixed_values (numpy.ndarray): 1d numpy array of length n_external. It contains NaN for parameters that are not fixed and an internal representation of the value to which a parameter has been fixed for all others. pre_replacements (numpy.ndarray): 1d numpy of length n_external. The i_th element in array contains the position of the internal parameter that has to be copied to the i_th position of the external parameter vector or -1 if no value has to be copied. Returns: pre_replaced (numpy.ndarray): 1d numpy array with pre-replaced params. Examples: >>> internal_values = np.array([1., 2.]) >>> fixed_values = np.array([np.nan, 0, np.nan]) >>> pre_replacements = np.array([1, -1, 0]) >>> pre_replace(internal_values, fixed_values, pre_replacements) array([2., 0., 1.]) """ pre_replaced = fixed_values.copy() mask = pre_replacements >= 0 positions = pre_replacements[mask] pre_replaced[mask] = internal_values[positions] return pre_replaced
2ae4a10b2a5779388e4e692529521bed34e3390d
118,408
def make_me_iterable(a): """This makes sure the given value is an array. """ if hasattr(a,'__iter__'): return a else: return [a]
76f91727476612a13d64968bda3f405788302857
118,410
def uppercase(string): """Return string converted to uppercase.""" return string.upper()
94003f1a3d8b0dc36e0c58637ad67fa481a1e9b9
118,411
def detokenize(tokens): """Convert SoMaJo tokens to sentence (str).""" result_list = [] for token in tokens: if token.original_spelling is not None: result_list.append(token.original_spelling) else: result_list.append(token.text) if token.space_after: result_list.append(" ") result = "".join(result_list) result = result.strip() return result
6d52f227a7daf3419a12ce46b08835589fe3748c
118,415
def CountTupleTree(tu): """Count the nodes in a tuple parse tree.""" if isinstance(tu, tuple): s = 0 for entry in tu: s += CountTupleTree(entry) return s elif isinstance(tu, int): return 1 elif isinstance(tu, str): return 1 else: raise AssertionError(tu)
eea38c376f2aba4ecf3e832607bc597e66b250e3
118,416
def circle_line_intersection(circle_center, circle_radius, pt1, pt2, full_line=True, tangent_tol=1e-9): """ Find the points at which a circle intersects a line-segment. This can happen at 0, 1, or 2 points. :param circle_center: The (x, y) location of the circle center :param circle_radius: The radius of the circle :param pt1: The (x, y) location of the first point of the segment :param pt2: The (x, y) location of the second point of the segment :param full_line: True to find intersections along full line - not just in the segment. False will just return intersections within the segment. :param tangent_tol: Numerical tolerance at which we decide the intersections are close enough to consider it a tangent :return Sequence[Tuple[float, float]]: A list of length 0, 1, or 2, where each element is a point at which the circle intercepts a line segment. Note: We follow: http://mathworld.wolfram.com/Circle-LineIntersection.html """ (p1x, p1y), (p2x, p2y), (cx, cy) = pt1, pt2, circle_center (x1, y1), (x2, y2) = (p1x - cx, p1y - cy), (p2x - cx, p2y - cy) dx, dy = (x2 - x1), (y2 - y1) dr = (dx ** 2 + dy ** 2)**.5 big_d = x1 * y2 - x2 * y1 discriminant = circle_radius ** 2 * dr ** 2 - big_d ** 2 if discriminant < 0: # No intersection between circle and line return [] else: # There may be 0, 1, or 2 intersections with the segment intersections = [ (cx + (big_d * dy + sign * (-1 if dy < 0 else 1) * dx * discriminant**.5) / dr ** 2, cy + (-big_d * dx + sign * abs(dy) * discriminant**.5) / dr ** 2) for sign in ((1, -1) if dy < 0 else (-1, 1))] # This makes sure the order along the segment is correct # If only considering the segment, filter out intersections that do not fall within the segment if not full_line: fraction_along_segment = [(xi - p1x) / dx if abs(dx) > abs(dy) else (yi - p1y) / dy for xi, yi in intersections] intersections = [pt for pt, frac in zip(intersections, fraction_along_segment) if 0 <= frac <= 1] # If line is tangent to circle, return just one point (as both intersections have same location) if len(intersections) == 2 and abs(discriminant) <= tangent_tol: return [intersections[0]] else: return intersections
bf25e107fc56b4b7bed277c5100cf09448e4af6d
118,419
async def root(): """Welcome message.""" return {"message": "Welcome to DNA Analysis"}
bfb28030973aeccc4b286b8c80a99ee331ff2eb4
118,423
def is_none(value): """Convert a string indicating 'None' or missing value to None.""" if value == 'NA' or value == 'None': return None else: return value
c5e5854a3d9a0c5205797f76a94ed96019fcfc84
118,426
def getPSA(ChargeSA): """The calculation of total polar surface area -->PSA """ res=0.0 for i in ChargeSA: if abs(float(i[1]))>=0.2: res=res+i[2] return res
73fa9ef923a0b7f63e6faaddeb63bb4cace5f68a
118,428
def is_number(x): """Is x a number? We say it is if it has a __int__ method.""" return hasattr(x, '__int__')
44a18a72afb4baff5622e740319e4235da2030cd
118,437
def col(ctx): """Get this cell's column.""" return ctx["cell"].col
d18ea9cdfbcb9ebdceeeb573c2866234740dd16a
118,446
import time def _datetime_to_epoch_seconds(dt): """Converts a datetime.datetime to seconds since the epoch.""" if dt is None: return None return int(time.mktime(dt.utctimetuple()))
fd0aa87872fb57c6b5e386c338cf8be41e9d61d8
118,448
def day2k_to_date(day2k): """Convert integer day number since 2000-01-01 to date as (year, month, day) tuple. """ # ref: https://en.wikipedia.org/wiki/Julian_day#Julian_or_Gregorian_calendar_from_Julian_day_number # Gregorian date formula applied since 1582-10-15 # Julian date formula applied until 1582-10-04 d__ = int(day2k) + 2451545 f__ = d__ + 1401 # Julian calender if d__ > 2299160: # Gregorian calender f__ += (((4 * d__ + 274277) // 146097) * 3) // 4 - 38 e__ = 4 * f__ + 3 h__ = 5 * ((e__ % 1461) // 4) + 2 day = (h__ % 153) // 5 + 1 month = (h__ // 153 + 2) % 12 + 1 year = e__ // 1461 - 4716 + (14 - month) // 12 return year, month, day
13b6ab6ff7533d6dbf08903895d79e8b92d9c0ab
118,451
import math def calculate_offset_point(x, y, d, b): """Calculate an offset point for a given lat, lon, distance and bearing. formula source: https://www.movable-type.co.uk/scripts/latlong.html :param x: longitude in decimal degrees :param y: latitude in decimal degrees :param d: distance in km :param b: bearing in degrees :return: new x, y in decimal degrees """ R = 6378.137 # km x, y, b = math.radians(x), math.radians(y), math.radians(b) new_y = math.asin( (math.sin(y) * math.cos(d / R)) + (math.cos(y) * math.sin(d / R) * math.cos(b)) ) new_x = x + math.atan2( math.sin(b) * math.sin(d / R) * math.cos(y), math.cos(d / R) - math.sin(y) * math.sin(new_y), ) return math.degrees(new_x), math.degrees(new_y)
9d5928effe20abae399d0bdce18428571db96663
118,454
def pref_to_str(pref_value): """If the value of a preference is None return an empty string type so we can write this data to a plist. Convert Bool values to strings for easy display in MunkiReport.""" if pref_value is None: # convert to empty string for values that are not set pref_value = '' elif pref_value is True: pref_value = 'True' elif pref_value is False: pref_value = 'False' else: return pref_value return pref_value
a5511dc37098da5675603a83dd5dd0b8947a757c
118,455
import uuid def issue(project): """Issue fixture for issue API resource tests.""" _id = uuid.uuid4().hex data = {"title": f"Issue {_id}", "description": f"Issue {_id} description"} return project.issues.create(data)
269863a5fe82601c1c818bdeddf901091fbe84cc
118,459
def utf8(text): """Returns text in utf-8 encoding """ return text.encode("utf-8")
90924c6cddfdc897f80871128a2e4dd8169720be
118,463
def carbon_emissions_imports_rule(mod, tx, tmp): """ **Constraint Name**: Carbon_Emissions_Imports_Constraint **Defined Over**: CRB_TX_OPR_TMPS Constrain the *Import_Carbon_Emissions_Tons* variable to be at least as large as the calculated imported carbon emissions for each transmission line, based on its CO2-intensity. """ if mod.carbon_cap_zone_import_direction[tx] == "positive": return mod.Import_Carbon_Emissions_Tons[tx, tmp] \ >= mod.Transmit_Power_MW[tx, tmp] \ * mod.tx_co2_intensity_tons_per_mwh[tx] elif mod.carbon_cap_zone_import_direction[tx] == "negative": return mod.Import_Carbon_Emissions_Tons[tx, tmp] \ >= -mod.Transmit_Power_MW[tx, tmp] \ * mod.tx_co2_intensity_tons_per_mwh[tx] else: raise ValueError("The parameter carbon_cap_zone_import_direction " "have a value of either 'positive' or " "'negative,' not {}.".format( mod.carbon_cap_zone_import_direction[tx] ) )
b2d3a335738742b1441d5a6a065a2a9d3d009fe3
118,465
def in_sequence(function_list): """ Create a new function that execute the functions in the list in sequence. :param function_list: List of functions :return: Function """ def seqfun(): for f in function_list: f() return seqfun
7e2b6c5836f03c5d85752bd793012b8ca5959550
118,468
def not_empty(collection): """ For collection objects (lists and dictionaries), check if the collection is empty. Returns True if not empty; false otherwise """ if isinstance(collection, (dict, list)): return bool(collection) return True
9ac8ef828e5fb381970d971eda58bbc285a83dad
118,469
def get_percentage(a, b) -> str: """Print percentage ratio of a/b.""" return f"{round(100 * a / b, 2)}% ({a}/{b})"
2ad44df939a1ac3adcc3f06bac43cece5bd4a1d5
118,470
def decode_exit(exit_code): """Decodes the exit code returned by os.system() into a tuple containing the exit status, the signal, and 1 if the core was dumped. See os.wait() for the specification of exit_code""" status = exit_code >> 8 # the high byte signal = exit_code & 0x7f # the lowest 7 bits core = (exit_code & 0xff) >> 7 # the high bit of the low byte return (status, signal, core)
cdd53624e13048114ec55df44764a044270f1341
118,471
def fwhm(x, y, shift=0.5, return_as_dict=False): # MR21032017 """The function searches x-values (roots) where y=0 (after normalization to values between 0 and 1 and shifting the values down by 0.5 (default value)) based on linear interpolation, and calculates full width at half maximum (FWHM). :param x: an array of x values. :param y: an array of y values. :param shift: an optional shift to be used in the process of normalization (between 0 and 1). :param return_as_dict: if to return a dict with 'fwhm' and 'x_range' :return: a value of the FWHM or dictionary consisting of 'fwhm' and 'x_range' """ def is_positive(num): return True if num > 0 else False # Normalize values first: #y = (y - min(y)) / (max(y) - min(y)) - shift # roots are at Y=0 #OC18112017 (making it work with standard Python lists / arrays) minY = min(y) maxY = max(y) if(maxY == minY): raise Exception('FWHM can not be calculated') mult = 1./(maxY - minY) lenY = len(y) for i in range(lenY): y[i] = (y[i] - minY)*mult - shift positive = is_positive(y[0]) list_of_roots = [] #for i in range(len(y)): for i in range(lenY): current_positive = is_positive(y[i]) if current_positive != positive: list_of_roots.append(x[i - 1] + (x[i] - x[i - 1]) / (abs(y[i]) + abs(y[i - 1])) * abs(y[i - 1])) positive = not positive if len(list_of_roots) >= 2: if not return_as_dict: return abs(list_of_roots[-1] - list_of_roots[0]) else: return { 'fwhm': abs(list_of_roots[-1] - list_of_roots[0]), 'x_range': list_of_roots, } else: raise Exception('Number of roots is less than 2!')
5e334e9e5db06dcdfcee5ae57c865589bd901342
118,472
def pythonize_path(path): """ Replace argument to valid python dotted notation. ex. foo/bar/baz -> foo.bar.baz """ return path.replace('/', '.')
c5d1a399d8b31223c19775c4c10fdffa06bdbe94
118,477
def calc_expected_value(counts_dict): """Returns the expected value of the histogram of the counts provided as a dict, if the results can be interpreted as binary numbers """ result = 0 for key in counts_dict.keys(): result += int(key, 2) * counts_dict[key] return result
cbc4bdd7cf711b0b44811d4a11b05d606ee996ce
118,482
import urllib3 def get_html(url): """Use urllib3 to retrieve the html source of a given url""" http = urllib3.PoolManager() r = http.request('GET', url) data = r.data r.release_conn() return data
fa81314c8a1eee21ef6e7ee05f3affe43cefc85f
118,485
import pkg_resources def status(request): """This is used to 'ping' the web service to check if its running. :returns: a status dict which the configured view will return as JSON. The dict has the form:: dict( status="ok", name="<project name>", version="<egg version of nozama.cloudsearch.service>" ) """ pkg = pkg_resources.get_distribution('nozama-cloudsearch') return dict( status="ok", name="nozama-cloudsearch", version=pkg.version, )
bf42ba156ff670a507d6fb7998d779ae24a8e68d
118,487
def frags_in_cutoff(fragList, cutoff, center_ip_id): """Return list of indices of frags within cutoff from central ion pair excluding central ion pair.""" indexes = [] for i in range(len(fragList)): if fragList[i]["dist"] < cutoff and i != center_ip_id: indexes.append(i) return indexes
40a20823eb30b3c6a2b91a5802bbe1d94e6a37db
118,488
def make_list_response(reponse_list, cursor=None, more=False, total_count=None): """Creates reponse with list of items and also meta data useful for pagination Args: reponse_list (list): list of items to be in response cursor (Cursor, optional): ndb query cursor more (bool, optional): whether there's more items in terms of pagination total_count (int, optional): Total number of items Returns: dict: response to be serialized and sent to client """ return { 'list': reponse_list, 'meta': { 'nextCursor': cursor.urlsafe() if cursor != None else None, 'more': more, 'totalCount': total_count } }
ce45f4a13a926bb99d620cd13c707abe296f8d8d
118,491
import json def serialize(c): """ Takes a dictionary and turns it into a json string. """ return json.dumps(c, default=lambda o: vars(o))
c37d1e6c6dd84d3fc330ee3e272ca26309f9f620
118,496
def calc_probs(wordList): """ Creates a dictionary of all the words and their associated word count. Parameters: wordlist (list[str]): list containing all of the words of the chosen text Returns: list[str]: a list of all the words probs (list[float]): a list of corresponding probabilites of all the words """ totalWords = len(wordList) wordCount = {word:0 for word in wordList} for word in wordList: wordCount[word] += 1 probs = [freq/totalWords for freq in wordCount.values()] return list(wordCount.keys()), probs
93d24d970d9d5c00b322305dac52d42bd1caad6f
118,500
def format_filesize(value): """ Return a human readable filesize for a given byte count """ if not value: return "" for x in ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB']: if value < 1024.0: return "%3.1f %s" % (value, x) value /= 1024.0 return value
17ed071f0c4cdf12cc66ed2da5396757e0292579
118,507
import math def equalateral(center, sideLength, angle=0): """Defining relations for the tringle can be found at http://mathworld.wolfram.com/EquilateralTriangle.html Function takes up to 3 parameters center: Center point of the triangle. - needs to be a tuple (x, y) coordinate pair sideLenght: Side length of triangle angle: angle anticlockwise from horizontal to one of the corners of the triangle - needsto be in degrees """ R = math.sqrt(3) / 3 * sideLength # Circumference radius # r = math.sqrt(3)/6 * sideLength # inner radius # h = math.sqrt(3)/2 * sideLength # Height h=R + r # Coordinates of each apex A = (center[0] + R * math.cos(math.radians(angle)), center[1] + R * math.sin(math.radians(angle))) B = (center[0] + R * math.cos(math.radians(angle + 120)), center[1] + R * math.sin(math.radians(angle + 120))) C = (center[0] + R * math.cos(math.radians(angle + 240)), center[1] + R * math.sin(math.radians(angle + 240))) return A, B, C
d6ba09febacb02b7dce40e3bb197b95545079b5c
118,522
def front_back(str_: str) -> str: """Swap the first and last characters of a string.""" if len(str_) > 1: return f'{str_[-1]}{str_[1:-1]}{str_[0]}' return str_
b7d447dbadf2e8fe90c0d7dccf81666be4602e06
118,523
def mask_filter_bounding_boxes(mask, bbList): """Keeps only bounding boxes that have overlap with mask Args: mask (np.array, 2-dim): binary mask bbList (list of (x,xp,y,yp)) Returns: reduced bounding boxes list (list of (x, xp,y, yp)), convention of bounding box (top, bottom, left, right) """ bbListFiltered = [] for bb in bbList: mask_patch = mask[bb[0]:bb[1],bb[2]:bb[3]] if (mask_patch).sum() > 0: bbListFiltered.append(bb) return bbListFiltered
5d55cb32730000d6efda240e5076776e343ec7ec
118,524
from typing import Any def get_test_name(param: Any) -> str: """Get the test name from the parameter list of a parametrized test. Parameters ---------- param : Any A parameter of the test Returns ------- str : The name of the test or an empty string. """ if isinstance(param, str) and param[0] == "#": return param[1:] return ""
5856a4ebfc7352a70d1315d91e79ce33611eba3b
118,526
def adj(p): """ Generate the four neighbouring coordinates as a set. """ return {(p[0] - 1, p[1]), (p[0] + 1, p[1]), (p[0], p[1] - 1), (p[0], p[1] + 1)}
4cb611c842598cdb97ade3d62fef8b3dddeac947
118,531
def get_all_layers(layer): """ This function gathers all layers below one or more given :class:`Layer` instances, including the given layer(s). Its main use is to collect all layers of a network just given the output layer(s). :usage: >>> from lasagne.layers import InputLayer, DenseLayer >>> l_in = InputLayer((100, 20)) >>> l1 = DenseLayer(l_in, num_units=50) >>> all_layers = get_all_layers(l1) >>> all_layers == [l1, l_in] True >>> l2 = DenseLayer(l_in, num_units=10) >>> all_layers = get_all_layers([l2, l1]) >>> all_layers == [l2, l1, l_in] True :parameters: - layer : Layer the :class:`Layer` instance for which to gather all layers feeding into it, or a list of :class:`Layer` instances. :returns: - layers : list a list of :class:`Layer` instances feeding into the given instance(s) either directly or indirectly, and the given instance(s) themselves. """ if isinstance(layer, (list, tuple)): layers = list(layer) else: layers = [layer] layers_to_expand = list(layers) while len(layers_to_expand) > 0: current_layer = layers_to_expand.pop(0) children = [] if hasattr(current_layer, 'input_layers'): children = current_layer.input_layers elif hasattr(current_layer, 'input_layer'): children = [current_layer.input_layer] # filter the layers that have already been visited, and remove None # elements (for layers without incoming layers) children = [child for child in children if child not in layers and child is not None] layers_to_expand.extend(children) layers.extend(children) return layers
5864720fdd6131e0206d03b0b54a57732dfa175c
118,540
def start (cave): """Indicates whether or not `cave` is 'start'.""" return cave == 'start'
576e711de7266f3376c03df0ad74d308df9b63ff
118,544
def parse_input_table(input_file, query_col, target_col, table): """ Parses input file and returns query_ids and target_ids or just target_ids. Arguments: input_file {string} -- Tab-separated input file with blast results or just target ids. query_col {int} -- Column with query ids. target_col {int} -- Column with target ids. Returns: [list] -- List of (query_ids and target_ids) or just target_ids. """ input_list = [] with open(input_file, 'r') as infile: if table == "swissprot" or table == "trembl": for line in infile: line = line.strip().split("\t") db_hit = line[target_col].split("|")[2] if query_col is not None: input_query = line[query_col] input_list.append((input_query, db_hit)) else: input_list.append(db_hit) else: for line in infile: line = line.strip().split("\t") db_hit = line[target_col] if query_col is not None: input_query = line[query_col] input_list.append((input_query, db_hit)) else: input_list.append(db_hit) return input_list
fc77a583bbf8e63659890bbec32002f86ab2bfdb
118,552
def cleanup(code): """Remove anything that is not a brainfuck command from a string.""" return ''.join( filter(lambda x: x in ['.', ',', '[', ']', '<', '>', '+', '-'], code))
cb0e4733ecdfbc55c47fbe84a8864102eaa27f8b
118,557
def translate_error(error, translation_list, format_str=None): """Translates error or returns original error if no matches. Note, an error will be translated if it is a child class of a value in translation_list. Also, translations earlier in the list take priority. Args: error (Exception): Error to translate. translation_list (list): List of (Exception, Exception) tuples. Translates errors that are instances of first error type to second. If there is a hierarchy, error types earlier in list are translated first. format_str (str|None): An api_lib.util.exceptions.FormattableErrorPayload format string. Note that any properties that are accessed here are on the FormattableErrorPayload object, not the object returned from the server. Returns: Error (Exception). Translated if match. Else, original error. """ for untranslated_error, translated_error in translation_list: if isinstance(error, untranslated_error): return translated_error(error, format_str) return error
9e717ac8978f11d120fd78aaff86dfc84bb1f56f
118,558
import requests import json def get_asset_change_notifications(channel_id, auth): """Get asset change notifications for a specific channel""" url = "https://api.gettyimages.com/v3/asset-changes/change-sets" query_params = {"channel_id": channel_id} headers = { "Api-Key": auth["api_key"], "Authorization": f"Bearer {auth['access_token']}" } response = requests.put(url, params=query_params, headers=headers) asset_change_notifications = json.loads(response.content) return asset_change_notifications
8fc948ef1cedc9216069b0ac4611d04c42c33606
118,562
def join(separator, iterable: list) -> str: """ Joins an iterable of objects together with a separator in between :param separator: Separator to use in between joined strings :param iterable: Iterable of objects to convert to a joined string :return: Joined String """ return str(separator).join(list(map(str, iterable)))
7503e916ed8f6725d02d4c992749c9a4df13a022
118,574
def _sort_table(table, mdvar): """ Sorts metadata category and aligns with table. Parameters ---------- table : pd.DataFrame Contain sample/feature labels along with table of values. Rows correspond to samples, and columns correspond to features. mdvar : pd.Series Metadata values for samples. The index must correspond to the index of `table`. Returns ------- pd.DataFrame Aligned feature table. pd.Series Aligned metadata. """ mdvar = mdvar.sort_values() table = table.reindex(columns=mdvar.index) return table, mdvar
46a3b05485e6a6553947f1996da489bc505ab61e
118,580
def seq_match(exseq, inseq, allowMismatch): """ Return True if <exseq> and <inseq> are same length and either (1) identical OR (2) has at most one mismatch (if allowMismatch is True) :return: bool, num_mismatch """ if len(exseq)!=len(inseq): return False, None elif exseq == inseq: return True, 0 elif allowMismatch: # allow at most one mismatch num_mismatch = 0 for a,b in zip(exseq, inseq): if a!=b: if num_mismatch == 1: return False, None # second mismatch, return False! else: num_mismatch += 1 return True, num_mismatch else: return False, None
9358f4071bbd5d340d2222131b4a79ee33123cfd
118,583
import re def parse_tagged(message): """Returns either None or a 2-tuple of (tag, value) from a tagged message.""" m = re.match(r'^:([^:]+):=(.*)$', message) if not m: return None return m.groups()
cf6e881b29d3c3b96b987afb79fcfc42d439b7c5
118,584
import math def get_photon_density_gaussian( elec_x, elec_y, elec_z, ct, photon_n_lab_max, inv_laser_waist2, inv_laser_ctau2, laser_initial_z0, gamma_boost, beta_boost ): """ Get the photon density in the scattering Gaussian laser pulse, at the position of a given electron, and at the current time. Parameters ---------- elec_x, elec_y, elec_z: floats The position of the given electron (in the frame of the simulation) ct: float Current time in the simulation frame (multiplied by c) photon_n_lab_max: float Peak photon density (in the lab frame) (i.e. at the peak of the Gaussian pulse) inv_laser_waist2, inv_laser_ctau2, laser_initial_z0: floats Properties of the Gaussian laser pulse (in the lab frame) gamma_boost, beta_boost: floats Properties of the Lorentz boost between the lab and simulation frame. Returns ------- photon_n_sim: float The photon density in the frame of the simulation """ # Transform electrons coordinates from simulation frame to lab frame elec_zlab = gamma_boost*( elec_z + beta_boost*ct ) elec_ctlab = gamma_boost*( ct + beta_boost*elec_z ) # Get photon density *in the lab frame* photon_n_lab = photon_n_lab_max * math.exp( - 2*inv_laser_waist2*( elec_x**2 + elec_y**2 ) \ - 2*inv_laser_ctau2*(elec_zlab - laser_initial_z0 + elec_ctlab)**2 ) # Get photon density *in the simulation frame* photon_n_sim = gamma_boost*photon_n_lab*( 1 + beta_boost) return( photon_n_sim )
8766a342fcfe6b03321de32a50efe1641426d9d9
118,588
def read_num(file): """Reads a line from 'file', converting it to an integer.""" line = file.readline() return int(line)
44826747cec36bbc7ea29b01d97257dab1e3d18c
118,590
def flatten(l): """Flattens nested lists into a single list. :param list l: a list that potentially contains other lists. :rtype: list""" ret=[] for i in l: if isinstance(i, list): ret+=flatten(i) else: ret.append(i) return ret
f8f20d355008032f0fc95bfd6c3f50fc43de7a33
118,595
import warnings def _check_report_dims(report_size): """ Warns user & reverts to default if report dimensions are non-numerical. Parameters ---------- report_size: Tuple[int, int] Report width, height in jupyter notebook. Returns ------- report_size: Tuple[int, int] Valid values for report width, height in jupyter notebook. """ width, height = report_size try: width = int(width) height = int(height) except ValueError: warnings.warn('Report size has invalid values. ' 'Using default 1600x800') width, height = (1600, 800) return width, height
0d2d50378ddd066270b66e8f3b71f78db0f3b829
118,597
def is_unmapped_read(flag): """ Interpret bitwise flag from SAM field. Returns True if the read is unmapped. """ IS_UNMAPPED = 0x4 return (int(flag) & IS_UNMAPPED) != 0
0893402cea99a426010ead09d4a005088eb3c8f0
118,601
def compute_specificity(cm): """ Computes specificity for binary classification problems :param cm: A Numpy-like matrix that represents a confusion matrix :return: The specificity of the confusion matrix """ print(cm.shape) print(len(cm.shape)) assert len(cm.shape) and cm.shape[0] == cm.shape[1] and cm.shape[0] == 2 TP = cm[1, 1] TN = cm[0, 0] FN = cm[1, 0] FP = cm[0, 1] if FP+TN == 0: return 0 return float(TN)/(FP + TN)
4815d74db80be0b953dade3692092634ff7c9b7b
118,602
def rename_fields(col_names, datatime_fields): """ Rename field names :param datatime_fields: fields with date/time :param col_names: from the data frame :return: renamed column names """ # random.shuffle(cols) field_names_mapping = dict(zip(col_names, map(lambda x: f'F{x+1}', range(len(col_names))))) for field_name in datatime_fields: field_names_mapping[field_name] += 'DT' return field_names_mapping
4496ee3491130a2a09b06a201fa175783fe95797
118,603
def create_bom(merged_manifest, approved_list, denied_list): """Creates a BOM If a BOM package is approved or denied, the 'copyright_notice', 'interaction_types', and 'resolution' will be copied over. Otherwise, these attributes will be added to the BOM with empty values. Args: merged_manifest: dictionary representing all the packages used by a project denied_list: dictionary representing the denied packages approved_list: dictionary representing the approved packages Returns: Dictionary representing the BOM. """ resolved_packages = dict(list(approved_list.items()) + list(denied_list.items())) bom = merged_manifest.copy() for key in bom.keys(): package_info = { "copyright_notices": "", "interaction_types": [], "resolution": "", } resolved_package = resolved_packages.get(key) # standardize all version numbers to be strings bom[key]["version"] = str(bom[key]["version"]) if resolved_package is not None: for k in package_info.keys(): package_info[k] = resolved_package[k] bom[key].update(package_info) return bom
f65e20cfa0850d34b0e2bfb01ec66f058dad8a10
118,605
def algorithm_2(array: list) -> int: """ Algorithm 2 - Brute Force Optimized It is easy to make Algorithm 1 more efficient by removing one loop from it. This is possible by calculating the sum at the same time when the right end of the subarray moves. The time complexity is O(n^2). """ best = 0 size = len(array) for i in range(0, size): curr = 0 for j in range(i, size): curr += array[j] best = max(best, curr) return best
811e00bbf5612e099fe5af2f3660924ca6132ce8
118,607
def printPoint3d(p): """format Point for printing""" return '(' + ', '.join([str(round(c,2)) for c in p]) + ')'
3578c4f9b97011030437d09dac8caf28c007dbe0
118,622
from typing import Callable def run_crcmod_func(crc_func: Callable[[bytes], int], *nums: int) -> int: """ Pass inputs to the CRC function and return the output :param crc_func: CRC function created using crcmod.mkCrcFun :param nums: CRC inputs :return: CRC output """ crc_input = bytes(0) for num in nums: num = int(num) crc_input += num.to_bytes((num.bit_length() + 7) // 8, byteorder='big') return crc_func(crc_input)
9018909d0e05aa7e4161fedc02fad7d944c8ab6a
118,624
import string import random def id_generator( size: int = 6, chars: str = ( string.ascii_uppercase + string.digits + string.ascii_lowercase ) ) -> str: """Random ID generator. By default the IDs are 6 characters long. """ return ''.join(random.choice(chars) for _ in range(size))
6f3b8f8575032a533be78fd003a25aa1e634801d
118,626
from typing import List def parse_header(header_string: str, sep: str = '\t') -> List: """Parses header data. Args: header_string: A string containing header items. sep: A string containing a delimiter. Returns: A list of header items. """ header = header_string.strip().split(sep) return [i.replace('"', '') for i in header]
deca42ef09410cbe58a841724b87b43eb9d2db53
118,646
from pathlib import Path def read(name, **kwargs): """Read and return file contents.""" with open( Path(__file__).parent / name, encoding=kwargs.get('encoding', 'utf8'), ) as fh: return fh.read()
06588330d184a8912386f19cc25d13befa24239a
118,649
def A_real_boiler(Q_boiler, q_boiler): """ Calculates the boiler's real heatransfer area. Parameters ---------- Q_boiler : float The heat load of boiler, [W] q_boiler : float The unit heat load of boiler, [W/m**2] Returns ------- A_real_boiler : float The boiler's real heatransfer area, [m**2] References ---------- &&&&& """ return Q_boiler/q_boiler
1533a09790158c076e4f194e579690cbcef0617d
118,659
def turn_on_last_zero(S): """ Turns on the rightmost 0-bit in word `S`, producing all 1's if none. By product, the position of toggled 0-bit is returned. Examples ======== When a 0-bit is present: >>> S, z = turn_on_last_zero(0b10100111) >>> bin(S) '0b10101111' >>> z 3 When a 0-bit is *implicitly* present: >>> S, z = turn_on_last_zero(0b11111) >>> bin(S) '0b111111' >>> z 5 """ SS = S | (S + 1) z = (SS ^ S).bit_length()-1 return SS, z
b42606080303d7f5a88aed530320f099d36050c2
118,661
def table_name(table, column): """Compute the table name table (string) : the originate table's name column : the column name return : string """ return "{0}__{1}_agg".format(table, column)
3d247d424f558d22559df2dbc9ac9e5e0fa2768f
118,664
def compute_change(df, column_source, column_target_change, column_target_change_pc, time_periods): """ Compute the change and percentage change of the values in the source column for the specified period in (trading) days. :param df: dataframe (sorted in ascending time order) :param column_source: name of source column in dataframe with values to compute change (e.g. close price) :param column_target_change: name of target column in dataframe for change to add to dataframe :param column_target_change_pc: name of target column in dataframe for change pc to add to dataframe :param time_periods: list of time periods in (trading) days :return: modified dataframe """ # compute change over time period and add result back to dataframe for time_period in time_periods: key_change = column_target_change + "-{:d}".format(time_period) key_change_pc = column_target_change_pc + "-{:d}".format(time_period) #df2 = df[column_source].asfreq("D", method="ffill") #change_series = df2.diff(time_period) #change_pc_series = df2.pct_change(time_period) #df[key_change] = change_series #df[key_change_pc] = change_pc_series change_series = df[column_source].diff(time_period) change_pc_series = df[column_source].pct_change(time_period) df[key_change] = change_series df[key_change_pc] = change_pc_series return df
63a754ff10cc25aab12bf076e556bddb4d6d6541
118,673
def param_count(module): """ Counts parameters in passed module. Args: module (torch.nn.Module): module to count parameters in. Returns: Parameter count. """ return sum(map(lambda p: p.numel(), module.parameters()))
d2178befa25005a8d0a6227e32d7db62f3df30f9
118,675
def object_sizes(sobjs): """Return an array of the object sizes""" return [obj['Size'] for obj in sobjs]
34ddc0191cb3c96ffac471ac85061ca10f13b08d
118,676