content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def f(p, x): """ Parameters ---------- p : list A that has a length of at least 2. x : int or float Scaling factor for the first variable in p. Returns ------- int or float Returns the first value in p scaled by x, aded by the second value in p. Examples -------- >>> import numpy as np >>> from .pycgmKinetics import f >>> p = [1, 2] >>> x = 10 >>> f(p, x) 12 >>> p = np.array([5.16312215, 8.79307163]) >>> x = 2.0 >>> np.around(f(p, x),8) 19.11931593 """ return (p[0] * x) + p[1]
3a5e464e7599b6233086e3dddb623d88c6e5ccb6
3,648,300
def get_contributors_users(users_info) -> list: """ Get the github users from the inner PRs. Args: users_info (list): the response of get_inner_pr_request() Returns (list): Github users """ users = [] for item in users_info: user = item.get('login') github_profile = item.get('html_url') pr_body = item.get('body') if not user == 'xsoar-bot': users.append({ 'Contributor': f"<img src='{item.get('avatar_url')}'/><br></br> " f"<a href='{github_profile}' target='_blank'>{user}</a>" }) if user == 'xsoar-bot': if 'Contributor' in pr_body: contributor = USER_NAME_REGEX.search(pr_body)[0].replace('\n', '') user_info = get_github_user(contributor) github_avatar = user_info.get('avatar_url') github_profile = user_info.get('html_url') if not github_avatar and not github_profile: print(f'The user "{contributor}" was not found.') continue users.append({ 'Contributor': f"<img src='{github_avatar}'/><br></br> " f"<a href='{github_profile}' target='_blank'>{contributor}</a>" }) for user in users: prs = users.count(user) user.update({'Number of Contribution(s)': prs}) list_users = [] result = {i['Contributor']: i for i in reversed(users)}.values() new_res = sorted(result, key=lambda k: k['Number of Contribution(s)'], reverse=True) for user in new_res: user['Contributor'] += f'<br></br>{user["Number of Contribution(s)"]} Contributions' list_users.append(user['Contributor']) return list_users
1a7bdb6608600c2959ec3961dfd9567cf674f471
3,648,301
def euler2mat(roll, pitch, yaw): """ Create a rotation matrix for the orientation expressed by this transform. Copied directly from FRotationTranslationMatrix::FRotationTranslationMatrix in Engine/Source/Runtime/Core/Public/Math/RotationTranslationMatrix.h ln 32 :return: """ angles = _TORAD * np.array((roll, pitch, yaw)) sr, sp, sy = np.sin(angles) cr, cp, cy = np.cos(angles) return np.array([ [cp * cy, sr * sp * cy - cr * sy, -(cr * sp * cy + sr * sy)], [cp * sy, sr * sp * sy + cr * cy, cy * sr - cr * sp * sy], [sp, -sr * cp, cr * cp] ])
2b635f50bb42f7a79938e38498e6e6fefd993d0f
3,648,302
def remove_articles(string: str, p: float = 1.0) -> str: """Remove articles from text data. Matches and removes the following articles: * the * a * an * these * those * his * hers * their with probability p. Args: string: text p: probability of removing a given article Returns: enriched text """ mapping = {article: "" for article in ARTICLES} return _sub_words(string, probability=p, mapping=mapping)
af2b9f61dc36159cb027eae03dbf1b645e48be62
3,648,303
import os def _remove_overlaps(in_file, out_dir, data): """Remove regions that overlap with next region, these result in issues with PureCN. """ out_file = os.path.join(out_dir, "%s-nooverlaps%s" % utils.splitext_plus(os.path.basename(in_file))) if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: with open(in_file) as in_handle: with open(tx_out_file, "w") as out_handle: prev_line = None for line in in_handle: if prev_line: pchrom, pstart, pend = prev_line.split("\t", 4)[:3] cchrom, cstart, cend = line.split("\t", 4)[:3] # Skip if chromosomes match and end overlaps start if pchrom == cchrom and int(pend) > int(cstart): pass else: out_handle.write(prev_line) prev_line = line out_handle.write(prev_line) return out_file
16a26ca0bcabf142f99b59bfb3e677c4fa81e159
3,648,304
import collections import re def _get_definitions(source): # type: (str) -> Tuple[Dict[str, str], int] """Extract a dictionary of arguments and definitions. Args: source: The source for a section of a usage string that contains definitions. Returns: A two-tuple containing a dictionary of all arguments and definitions as well as the length of the longest argument. """ max_len = 0 descs = collections.OrderedDict() # type: Dict[str, str] lines = (s.strip() for s in source.splitlines()) non_empty_lines = (s for s in lines if s) for line in non_empty_lines: if line: arg, desc = re.split(r"\s\s+", line.strip()) arg_len = len(arg) if arg_len > max_len: max_len = arg_len descs[arg] = desc return descs, max_len
a97fe58c3eb115bff041e77c26868bae3bc54c88
3,648,305
import requests def pairs_of_response(request): """pairwise testing for content-type, headers in responses for all urls """ response = requests.get(request.param[0], headers=request.param[1]) print(request.param[0]) print(request.param[1]) return response
f3a67b1cbf41e2c2e2aa5edb441a449fdff0d8ae
3,648,306
def setup(): """ The setup wizard screen """ if DRIVER is True: flash(Markup('Driver not loaded'), 'danger') return render_template("setup.html")
1c13ba635fcdd3dd193e511002d2d289786980a3
3,648,307
def ldns_pkt_set_edns_extended_rcode(*args): """LDNS buffer.""" return _ldns.ldns_pkt_set_edns_extended_rcode(*args)
3fed71706554170d07281a59ff524de52487244d
3,648,308
def sim_spiketrain_poisson(rate, n_samples, fs, bias=0): """Simulate spike train from a Poisson distribution. Parameters ---------- rate : float The firing rate of neuron to simulate. n_samples : int The number of samples to simulate. fs : int The sampling rate. Returns ------- spikes : 1d array Simulated spike train. Examples -------- Simulate a spike train from a Poisson distribution. >>> spikes = sim_spiketrain_poisson(0.4, 10, 1000, bias=0) """ spikes = np.zeros(n_samples) # Create uniform sampling distribution unif = np.random.uniform(0, 1, size=n_samples) # Create spikes mask = unif <= ((rate + bias) * 1/fs) spikes[mask] = 1 return spikes
853a16ae50b444fad47dcbea5f7de4edf58f34b5
3,648,309
def getHausdorff(labels, predictions): """Compute the Hausdorff distance.""" # Hausdorff distance is only defined when something is detected resultStatistics = sitk.StatisticsImageFilter() resultStatistics.Execute(predictions) if resultStatistics.GetSum() == 0: return float('nan') # Edge detection is done by ORIGINAL - ERODED, keeping the outer boundaries of lesions. Erosion is performed in 2D eTestImage = sitk.BinaryErode(labels, (1, 1, 0)) eResultImage = sitk.BinaryErode(predictions, (1, 1, 0)) hTestImage = sitk.Subtract(labels, eTestImage) hResultImage = sitk.Subtract(predictions, eResultImage) hTestArray = sitk.GetArrayFromImage(hTestImage) hResultArray = sitk.GetArrayFromImage(hResultImage) # Convert voxel location to world coordinates. Use the coordinate system of the test image # np.nonzero = elements of the boundary in numpy order (zyx) # np.flipud = elements in xyz order # np.transpose = create tuples (x,y,z) # labels.TransformIndexToPhysicalPoint converts (xyz) to world coordinates (in mm) testCoordinates = np.apply_along_axis(labels.TransformIndexToPhysicalPoint, 1, np.transpose(np.flipud(np.nonzero(hTestArray))).astype(int)) resultCoordinates = np.apply_along_axis(labels.TransformIndexToPhysicalPoint, 1, np.transpose(np.flipud(np.nonzero(hResultArray))).astype(int)) # Compute distances from test to result; and result to test dTestToResult = getDistancesFromAtoB(testCoordinates, resultCoordinates) dResultToTest = getDistancesFromAtoB(resultCoordinates, testCoordinates) return max(np.percentile(dTestToResult, 95), np.percentile(dResultToTest, 95))
933206c551f2abd6608bf4cdbb847328b8fee113
3,648,310
import re def _filesizeformat(file_str): """ Remove the unicode characters from the output of the filesizeformat() function. :param file_str: :returns: A string representation of a filesizeformat() string """ cmpts = re.match(r'(\d+\.?\d*)\S(\w+)', filesizeformat(file_str)) return '{} {}'.format(cmpts.group(1), cmpts.group(2))
c9811120a257fda8d3fe6c3ee1cd143f17fc4f6e
3,648,311
import math def radec_to_lb(ra, dec, frac=False): """ Convert from ra, dec to galactic coordinates. Formulas from 'An Introduction to Modern Astrophysics (2nd Edition)' by Bradley W. Carroll, Dale A. Ostlie (Eq. 24.16 onwards). NOTE: This function is not as accurate as the astropy conversion, nor as the Javascript calculators found online. However, as using astropy was prohibitively slow while running over large populations, we use this function. While this function is not as accurate, the under/over estimations of the coordinates are equally distributed meaning the errors cancel each other in the limit of large populations. Args: ra (string): Right ascension given in the form '19:06:53' dec (string): Declination given in the form '-40:37:14' frac (bool): Denote whether coordinates are already fractional or not Returns: gl, gb (float): Galactic longitude and latitude [fractional degrees] """ if not frac: ra, dec = frac_deg(ra, dec) a = math.radians(ra) d = math.radians(dec) # Coordinates of the galactic north pole (J2000) a_ngp = math.radians(12.9406333 * 15.) d_ngp = math.radians(27.1282500) l_ngp = math.radians(123.9320000) sd_ngp = math.sin(d_ngp) cd_ngp = math.cos(d_ngp) sd = math.sin(d) cd = math.cos(d) # Calculate galactic longitude y = cd*math.sin(a - a_ngp) x = cd_ngp*sd - sd_ngp*cd*math.cos(a - a_ngp) gl = - math.atan2(y, x) + l_ngp gl = math.degrees(gl) % 360 # Shift so in range -180 to 180 if gl > 180: gl = -(360 - gl) # Calculate galactic latitude gb = math.asin(sd_ngp*sd + cd_ngp*cd*math.cos(a - a_ngp)) gb = math.degrees(gb) % 360. if gb > 270: gb = -(360 - gb) return gl, gb
85156dae81a636f34295bcb8aab6f63243d9c2b3
3,648,312
from typing import Counter from datetime import datetime def status_codes_by_date_stats(): """ Get stats for status codes by date. Returns: list: status codes + date grouped by type: 2xx, 3xx, 4xx, 5xx, attacks. """ def date_counter(queryset): return dict(Counter(map( lambda dt: ms_since_epoch(datetime.combine( make_naive(dt), datetime.min.time())), list(queryset.values_list('datetime', flat=True))))) codes = {low: date_counter( RequestLog.objects.filter(status_code__gte=low, status_code__lt=high)) for low, high in ((200, 300), (300, 400), (400, 500))} codes[500] = date_counter(RequestLog.objects.filter(status_code__gte=500)) codes['attacks'] = date_counter(RequestLog.objects.filter( status_code__in=(400, 444, 502))) stats = {} for code in (200, 300, 400, 500, 'attacks'): for date, count in codes[code].items(): if stats.get(date, None) is None: stats[date] = {200: 0, 300: 0, 400: 0, 500: 0, 'attacks': 0} stats[date][code] += count stats = sorted([(k, v) for k, v in stats.items()], key=lambda x: x[0]) return stats
e56491e32a774f2b3399eb252bc5c7660539d573
3,648,313
def r8_y1x ( t ): """ #*****************************************************************************80 # #% R8_Y1X evaluates the exact solution of the ODE. # # Licensing: # # This code is distributed under the GNU LGPL license. # # Modified: # # 30 August 2010 # # Author: # # John Burkardt # # Parameters: # # Input, real T, the value of the independent variable. # # Output, real Y1X, the exact solution. # """ y1x = 20.0 / ( 1.0 + 19.0 * exp ( - 0.25 * t ) ) return(y1x)
b6146173c09aede82fab599a9e445d3d062bf71a
3,648,314
def get_target_model_ops(model, model_tr): """Get operations related to the target model. Args: * model: original model * model_tr: target model Returns: * init_op: initialization operation for the target model * updt_op: update operation for the target model """ init_ops, updt_ops = [], [] for var, var_tr in zip(model.vars, model_tr.vars): init_ops.append(tf.assign(var_tr, var)) if var not in model.trainable_vars: updt_ops.append(tf.assign(var_tr, var)) # direct update for non-trainable variables else: updt_ops.append(tf.assign(var_tr, (1. - FLAGS.ddpg_tau) * var_tr + FLAGS.ddpg_tau * var)) return tf.group(*init_ops), tf.group(*updt_ops)
0967e754e2731140ca5d0d85c9c1b6cff7b2cbd2
3,648,315
import itertools import functools def next_count(start: int = 0, step: int = 1): """Return a callable returning descending ints. >>> nxt = next_count(1) >>> nxt() 1 >>> nxt() 2 """ count = itertools.count(start, step) return functools.partial(next, count)
299d457b2b449607ab02877eb108c076cb6c3e16
3,648,316
def show_locale(key_id: int): """Get a locale by ID""" return locales[key_id]
6bce0cc45e145a6bdfb5e84cdde8c6d386525094
3,648,317
def refresh(): """Pull fresh data from Open AQ and replace existing data.""" DB.drop_all() DB.create_all() update_db() DB.session.commit() records = Record.query.all() return render_template('aq_base.html', title='Refreshed!', records=records)
a1f138f92f8e7744d8fc5b659bb4d99fc32341e9
3,648,318
def get_similar_taxa(): """ Get a list of all pairwise permutations of taxa sorted according to similarity Useful for detecting duplicate and near-duplicate taxonomic entries :return: list of 2-tuples ordered most similar to least """ taxa = Taxon.objects.all() taxon_name_set = set([t.name for t in taxa]) plist = [pair for pair in permutations(taxon_name_set, 2)] return sorted(plist, key=similar, reverse=True)
cb94efad4103edc8db0fe90f0e2bad7b52bf29f5
3,648,319
import json def make_img_id(label, name): """ Creates the image ID for an image. Args: label: The image label. name: The name of the image within the label. Returns: The image ID. """ return json.dumps([label, name])
4ddcbf9f29d8e50b0271c6ee6260036b8654b90f
3,648,320
def col2rgb(color): """ Convert any colour known by matplotlib to RGB [0-255] """ return rgb012rgb(*col2rgb01(color))
075dbf101d032bf1fb64a8a4fd86407ec0b91b2d
3,648,321
from typing import List import random def quick_select_median(values: List[tuple], pivot_fn=random.choice, index=0) -> tuple: """ Implementation quick select median sort :param values: List[tuple] :param pivot_fn: :param index: int :return: tuple """ k = len(values) // 2 return quick_select(values, k, pivot_fn, index=index)
a2977424a9fc776b2448bed4c17eea754003242c
3,648,322
import time import hashlib def get_admin_token(key, previous=False): """Returns a token with administrative priviledges Administrative tokens provide a signature that can be used to authorize edits and to trigger specific administrative events. Args: key (str): The key for generating admin tokens previous (bool, optional): Retrieve the most recently issued token for this key Returns: Token """ if key is None: raise ValueError('Value for "key" was expected') expires = get_admin_lifetime() secret = __get_admin_salt() argset = [secret, key] ts = int(time.time()) if previous: ts = ts - expires argset.extend(str(int(ts / expires))) str_argset = [str(a) for a in argset if True] msg = ':'.join(str_argset) tok = Token(hashlib.sha256(msg.encode('utf-8')).hexdigest()[ 0:settings.TOKEN_LENGTH]) return tok
6f92378676905b8d035bd201abe30d1d951a7fc0
3,648,323
def modSymbolsFromLabelInfo(labelDescriptor): """Returns a set of all modiciation symbols which were used in the labelDescriptor :param labelDescriptor: :class:`LabelDescriptor` describes the label setup of an experiment :returns: #TODO: docstring """ modSymbols = set() for labelStateEntry in viewvalues(labelDescriptor.labels): for labelPositionEntry in viewvalues(labelStateEntry['aminoAcidLabels']): for modSymbol in aux.toList(labelPositionEntry): if modSymbol != '': modSymbols.add(modSymbol) return modSymbols
323382cd5eaae963a04ec6a301260e1f2aed9877
3,648,324
def vulnerability_weibull(x, alpha, beta): """Return vulnerability in Weibull CDF Args: x: 3sec gust wind speed at 10m height alpha: parameter value used in defining vulnerability curve beta: ditto Returns: weibull_min.cdf(x, shape, loc=0, scale) Note: weibull_min.pdf = c/s * (x/s)**(c-1) * exp(-(x/s)**c) c: shape, s: scale, loc=0 weibull_min.cdf = 1 - exp(-(x/s)**c) while Australian wind vulnerability is defined as DI = 1 - exp(-(x/exp(beta))**(1/alpha)) therefore: s = exp(beta) c = 1/alpha """ # convert alpha and beta to shape and scale respectively shape = 1 / alpha scale = np.exp(beta) return weibull_min.cdf(x, shape, loc=0, scale=scale)
4bb36643b483309e4a4256eb74bc3bbd7b447416
3,648,325
def _find_best_deals(analysis_json) -> tuple: """Finds the best deal out of the analysis""" best_deals = [] for deal in analysis_json: if _get_deal_value(analysis_json, deal) > MINIMUM_ConC_PERCENT: best_deals.append(deal) best_deals.sort(key=lambda x: _get_deal_value(analysis_json, x), reverse=True ) best_deal = best_deals[0] return best_deal, best_deals
5415f7104ec01a56249df9a142ff3c31b2964c42
3,648,326
def deserialize(s_transform): """ Convert a serialized :param s_transform: :return: """ if s_transform is None: return UnrealTransform() return UnrealTransform( location=s_transform['location'] if 'location' in s_transform else (0, 0, 0), rotation=s_transform['rotation'] if 'rotation' in s_transform else (0, 0, 0) )
13daf861e84545d2f50b10617ece6d23976eacf0
3,648,327
def spectrum(x, times=None, null_hypothesis=None, counts=1, frequencies='auto', transform='dct', returnfrequencies=True): """ Generates a power spectrum from the input time-series data. Before converting to a power spectrum, x is rescaled as x - > (x - counts * null_hypothesis) / sqrt(counts * null_hypothesis * (1-null_hypothesis)), where the arithmetic is element-wise, and `null_hypothesis` is a vector in (0,1). If `null_hypothesis` is None it is set to the mean of x. If that mean is 0 or 1 then the power spectrum returned is (0,1,1,1,...). Parameters ---------- x: array The time-series data to convert into a power spectrum times: array, optional The times associated with the data in `x`. This is not optional for the `lsp` transform null_hypothesis: None or array, optional Used to normalize the data, and should be the null hypothesis that is being tested for the probability trajectory from which `x` is drawn. If `null_hypothesis` is None it is set to the mean of x. counts: int, optional The number of counts per time-step, whereby all values of `x` are within [0,counts]. In the main usages for drift detection, `x` is the clickstream for a single measurement outcome -- so `x` contains integers between 0 and the number of measurements at a (perhaps coarse-grained) time. `counts` is this number of measurements per time. frequencies: 'auto' or array, optional The frequencies to generate the power spectrum for. Only relevant for transform=`lsp`. transform: 'dct', 'dft' or 'lsp', optional The transform to use to generate power spectrum. 'dct' is the Type-II discrete cosine transform with an orthogonal normalization; 'dft' is the discrete Fourier transform with a unitary normalization; 'lsp' is the float-meaning Lomb-Scargle periodogram with an orthogonal-like normalization. returnfrequencies: bool, optional Whether to return the frequencies corrsponding to the powers Returns ------- if returnfrequencies: array or None The frequencies corresponding to the power spectrum. None is returned if the frequencies cannot be ascertained (when `times` is not specified). array or None The amplitudes, that are squared to obtain the powers. None is returned when the transform does not generate amplitudes (this is the case for `lsp`) array The power spectrum """ if transform == 'dct' or transform == 'dft': if transform == 'dct': modes = dct(x, null_hypothesis, counts) powers = modes**2 elif transform == 'dft': modes = dft(x, null_hypothesis, counts) powers = _np.abs(modes)**2 if returnfrequencies: if isinstance(frequencies, str): if times is None: freqs = None else: freqs = fourier_frequencies_from_times(times) else: freqs = frequencies return freqs, modes, powers else: return modes, powers elif transform == 'lsp': freqs, powers = lsp(x, times, frequencies, null_hypothesis, counts) modes = None if returnfrequencies: return freqs, modes, powers else: return modes, powers else: raise ValueError("Input `transform` type invalid!")
5a847f75eaa3fda0bc2906e14d56f7870da1edfa
3,648,328
def CalculatePercentIdentity(pair, gap_char="-"): """return number of idential and transitions/transversions substitutions in the alignment. """ transitions = ("AG", "GA", "CT", "TC") transversions = ("AT", "TA", "GT", "TG", "GC", "CG", "AC", "CA") nidentical = 0 naligned = 0 ndifferent = 0 ntransitions = 0 ntransversions = 0 nunaligned = 0 for x in range(min(len(pair.mAlignedSequence1), len(pair.mAlignedSequence2))): if pair.mAlignedSequence1[x] != gap_char and \ pair.mAlignedSequence2[x] != gap_char: naligned += 1 if pair.mAlignedSequence1[x] == pair.mAlignedSequence2[x]: nidentical += 1 else: ndifferent += 1 if (pair.mAlignedSequence1[x] + pair.mAlignedSequence2[x]) in transitions: ntransitions += 1 if (pair.mAlignedSequence1[x] + pair.mAlignedSequence2[x]) in transversions: ntransversions += 1 else: nunaligned += 1 return nidentical, ntransitions, ntransversions, naligned, nunaligned
84d67754d9f63eaee5a172425ffb8397c3b5a7ff
3,648,329
def render_ellipse(center_x, center_y, covariance_matrix, distance_square): """ Renders a Bokeh Ellipse object given the ellipse center point, covariance, and distance square :param center_x: x-coordinate of ellipse center :param center_y: y-coordinate of ellipse center :param covariance_matrix: NumPy array containing the covariance matrix of the ellipse :param distance_square: value for distance square of ellipse :return: Bokeh Ellipse object """ values, vectors = np.linalg.eigh(covariance_matrix) order = values.argsort()[::-1] values = values[order] vectors = vectors[:, order] angle_rads = np.arctan2(*vectors[:, 0][::-1]) # Width and height are full width (the axes lengths are thus multiplied by 2.0 here) width, height = 2.0 * np.sqrt(values * distance_square) ellipse = Ellipse( x=center_x, y=center_y, width=width, height=height, angle=angle_rads, line_width=line_width, line_color=line_color, fill_color=fill_color, fill_alpha=fill_alpha ) return ellipse
8f26a9a41a8f179f87925f0a931fbc81d2d8549b
3,648,330
def flow_to_image(flow): """Transfer flow map to image. Part of code forked from flownet. """ out = [] maxu = -999. maxv = -999. minu = 999. minv = 999. maxrad = -1 for i in range(flow.shape[0]): u = flow[i, :, :, 0] v = flow[i, :, :, 1] idxunknow = (abs(u) > 1e7) | (abs(v) > 1e7) u[idxunknow] = 0 v[idxunknow] = 0 maxu = max(maxu, np.max(u)) minu = min(minu, np.min(u)) maxv = max(maxv, np.max(v)) minv = min(minv, np.min(v)) rad = np.sqrt(u ** 2 + v ** 2) maxrad = max(maxrad, np.max(rad)) u = u / (maxrad + np.finfo(float).eps) v = v / (maxrad + np.finfo(float).eps) img = compute_color(u, v) out.append(img) return np.float32(np.uint8(out))
301ef598b2e6aeda2e2f673854850faf0409e0e8
3,648,331
def joint_dataset(l1, l2): """ Create a joint dataset for two non-negative integer (boolean) arrays. Works best for integer arrays with values [0,N) and [0,M) respectively. This function will create an array with values [0,N*M), each value representing a possible combination of values from l1 and l2. Essentially, this is equivalent to zipping l1 and l2, but much faster by using the NumPy native implementations of elementwise addition and multiplication. :param l1: first integer vector (values within 0-n) :type l1: numpy.array or similar :param l2: second integer vector (values with 0-m) :type l2: numpy.array or similar :returns: integer vector expressing states of both l1 and l2 """ N = np.max(l1) + 1 return l2 * N + l1
6ba767739793f7c188d56e24e6e07d6e594c775e
3,648,332
import uuid def parse(asset, image_data, product): """ Parses the GEE metadata for ODC use. Args: asset (str): the asset ID of the product in the GEE catalog. image_data (dict): the image metadata to parse. product (datacube.model.DatasetType): the product information from the ODC index. Returns: a namedtuple of the data required by ODC for indexing. """ bands = tuple(zip(product.measurements, image_data['bands'])) _id = str(uuid.uuid5(uuid.NAMESPACE_URL, f'EEDAI:{product.name}/{image_data["name"]}')) creation_dt = image_data['startTime'] spatial_reference = image_data['bands'][0]['grid']\ .get('crsCode', image_data['bands'][0]['grid'].get('crsWkt')) # Handle special GEE Infinity GeoJSON responses image_data['geometry']['coordinates'][0] = [[float(x), float(y)] for (x, y) \ in image_data['geometry']['coordinates'][0]] geometry = Geometry(image_data['geometry']) grids = [band['grid'] for band in image_data['bands']] grids_copy = grids.copy() grids = list(filter(lambda grid: grids_copy.pop(grids_copy.index(grid)) \ not in grids_copy, grids)) shapes = [[grid['dimensions']['height'], grid['dimensions']['width']] \ for grid in grids] affine_values = [list(grid['affineTransform'].values()) \ for grid in grids] transforms = [list(Affine(affine_value[0], 0, affine_value[1], affine_value[2], 0, affine_value[3]))\ for affine_value in affine_values] bands = tuple(zip(product.measurements, image_data['bands'])) metadata = Metadata(id=_id, product=product.name, creation_dt=creation_dt, format='GeoTIFF', platform=product.metadata_doc['properties'].get('eo:platform'), instrument=product.metadata_doc['properties'].get('eo:instrument'), from_dt=creation_dt, to_dt=creation_dt, center_dt=creation_dt, asset=asset, geometry=geometry, shapes=shapes, transforms=transforms, grids=grids, spatial_reference=spatial_reference, path=f'EEDAI:{image_data["name"]}:', bands=bands) return metadata
815da17849b240a291332a695ca38374bb957d8a
3,648,333
def scale(pix, pixMax, floatMin, floatMax): """ scale takes in pix, the CURRENT pixel column (or row) pixMax, the total # of pixel columns floatMin, the min floating-point value floatMax, the max floating-point value scale returns the floating-point value that corresponds to pix """ return (pix / pixMax) * (floatMax - floatMin) + floatMin
455d0233cbeeafd53c30baa4584dbdac8502ef94
3,648,334
def most_distinct(df): """ :param df: data frame :return: """ headers = df.columns.values dist_list = [] # list of distinct values per list for idx, col_name in enumerate(headers): col = df[col_name] col_list = col.tolist() # if len(col_list) == 0: # dist_list.append(-1) # continue avg_token_size = sum([len(str(a)) for a in col_list]) * 1.0 / len(col_list) if avg_token_size < 4: dist_list.append(-1) else: nums = get_numerics_from_list(col_list) if nums is None: dist_list.append(len(set(col_list))) else: dist_list.append(-1) max_num = max(dist_list) if max_num == -1 or max_num == 0: return -1 for i, c in enumerate(dist_list): if c == max_num: return i
f21ba5ffd2bfcf5262ffbbe30f24a77522a10bb0
3,648,335
def make_set(value): """ Takes a value and turns it into a set !!!! This is important because set(string) will parse a string to individual characters vs. adding the string as an element of the set i.e. x = 'setvalue' set(x) = {'t', 'a', 'e', 'v', 'u', 's', 'l'} make_set(x) = {'setvalue'} or use set([x,]) by adding string as first item in list. :param value: :return: """ if isinstance(value, list): value = set(value) elif not isinstance(value, set): value = set([value]) return value
c811729ea83dc1fbff7c76c8b596e26153aa68ee
3,648,336
def _get_parent_cache_dir_url(): """Get parent cache dir url from `petastorm.spark.converter.parentCacheDirUrl` We can only set the url config once. """ global _parent_cache_dir_url # pylint: disable=global-statement conf_url = _get_spark_session().conf \ .get(SparkDatasetConverter.PARENT_CACHE_DIR_URL_CONF, None) if conf_url is None: raise ValueError( "Please set the spark config {}.".format(SparkDatasetConverter.PARENT_CACHE_DIR_URL_CONF)) conf_url = normalize_dir_url(conf_url) _check_parent_cache_dir_url(conf_url) _parent_cache_dir_url = conf_url logger.info( 'Read %s %s', SparkDatasetConverter.PARENT_CACHE_DIR_URL_CONF, _parent_cache_dir_url) return _parent_cache_dir_url
34abb96b64ab5338a6c9a5ef700a6fbb00f3905f
3,648,337
def make_variable(data, variances=None, **kwargs): """ Make a Variable with default dimensions from data while avoiding copies beyond what sc.Variable does. """ if isinstance(data, (list, tuple)): data = np.array(data) if variances is not None and isinstance(variances, (list, tuple)): variances = np.array(variances) if isinstance(data, np.ndarray): dims = ['x', 'y'][:np.ndim(data)] return sc.array(dims=dims, values=data, variances=variances, **kwargs) return sc.scalar(data, **kwargs)
a712800df05c8c8f5f968a0fee6127919ae56d8f
3,648,338
def dt642epoch(dt64): """ Convert numpy.datetime64 array to epoch time (seconds since 1/1/1970 00:00:00) Parameters ---------- dt64 : numpy.datetime64 Single or array of datetime64 object(s) Returns ------- time : float Epoch time (seconds since 1/1/1970 00:00:00) """ return dt64.astype('datetime64[ns]').astype('float') / 1e9
f7cdaf44312cb0564bf57393a5fde727bc24e566
3,648,339
def rpn_losses(anchor_labels, anchor_boxes, label_logits, box_logits): """ Calculate the rpn loss for one FPN layer for a single image. The ground truth(GT) anchor labels and anchor boxes has been preprocessed to fit the dimensions of FPN feature map. The GT boxes are encoded from fast-rcnn paper https://arxiv.org/abs/1506.01497 page 5. Args: anchor_labels: GT anchor labels, H_feature x W_feature x NA anchor_boxes: GT boxes for each anchor, H_feature x W_feature x NA x 4, encoded label_logits: label logits from the rpn head, H_feature x W_feature x NA box_logits: box logits from the rpn head, H_feature x W_feature x NA x 4 Returns: label_loss, box_loss """ with tf.device('/cpu:0'): valid_mask = tf.stop_gradient(tf.not_equal(anchor_labels, -1)) pos_mask = tf.stop_gradient(tf.equal(anchor_labels, 1)) nr_valid = tf.stop_gradient(tf.count_nonzero(valid_mask, dtype=tf.int32), name='num_valid_anchor') nr_pos = tf.identity(tf.count_nonzero(pos_mask, dtype=tf.int32), name='num_pos_anchor') # nr_pos is guaranteed >0 in C4. But in FPN. even nr_valid could be 0. valid_anchor_labels = tf.boolean_mask(anchor_labels, valid_mask) valid_label_logits = tf.boolean_mask(label_logits, valid_mask) # with tf.name_scope('label_metrics'): # valid_label_prob = tf.nn.sigmoid(valid_label_logits) # summaries = [] # with tf.device('/cpu:0'): # for th in [0.5, 0.2, 0.1]: # valid_prediction = tf.cast(valid_label_prob > th, tf.int32) # nr_pos_prediction = tf.reduce_sum(valid_prediction, name='num_pos_prediction') # pos_prediction_corr = tf.count_nonzero( # tf.logical_and( # valid_label_prob > th, # tf.equal(valid_prediction, valid_anchor_labels)), # dtype=tf.int32) # placeholder = 0.5 # A small value will make summaries appear lower. # recall = tf.cast(tf.truediv(pos_prediction_corr, nr_pos), tf.float32) # recall = tf.where(tf.equal(nr_pos, 0), placeholder, recall, name='recall_th{}'.format(th)) # precision = tf.cast(tf.truediv(pos_prediction_corr, nr_pos_prediction), tf.float32) # precision = tf.where(tf.equal(nr_pos_prediction, 0), # placeholder, precision, name='precision_th{}'.format(th)) # summaries.extend([precision, recall]) # add_moving_summary(*summaries) # Per-level loss summaries in FPN may appear lower due to the use of a small placeholder. # But the total RPN loss will be fine. TODO make the summary op smarter placeholder = 0. label_loss = tf.nn.sigmoid_cross_entropy_with_logits( labels=tf.cast(valid_anchor_labels, tf.float32), logits=valid_label_logits) label_loss = tf.reduce_sum(label_loss) * (1. / cfg.RPN.BATCH_PER_IM) label_loss = tf.where(tf.equal(nr_valid, 0), placeholder, label_loss, name='label_loss') pos_anchor_boxes = tf.boolean_mask(anchor_boxes, pos_mask) pos_box_logits = tf.boolean_mask(box_logits, pos_mask) delta = 1.0 / 9 box_loss = tf.losses.huber_loss( pos_anchor_boxes, pos_box_logits, delta=delta, reduction=tf.losses.Reduction.SUM) / delta box_loss = box_loss * (1. / cfg.RPN.BATCH_PER_IM) box_loss = tf.where(tf.equal(nr_pos, 0), placeholder, box_loss, name='box_loss') # add_moving_summary(label_loss, box_loss, nr_valid, nr_pos) return [label_loss, box_loss]
00a554a536350c7d053ae3a4f776008a32f2d8a8
3,648,340
import math def calc_val_resize_value(input_image_size=(224, 224), resize_inv_factor=0.875): """ Calculate image resize value for validation subset. Parameters: ---------- input_image_size : tuple of 2 int Main script arguments. resize_inv_factor : float Resize inverted factor. Returns: ------- int Resize value. """ if isinstance(input_image_size, int): input_image_size = (input_image_size, input_image_size) resize_value = int(math.ceil(float(input_image_size[0]) / resize_inv_factor)) return resize_value
5a8bcb77d849e62ef5ecfad74f5a3470ab4cfe59
3,648,341
import requests def fetch_http(url, location): """ Return a `Response` object built from fetching the content at a HTTP/HTTPS based `url` URL string saving the content in a file at `location` """ r = requests.get(url) with open(location, 'wb') as f: f.write(r.content) content_type = r.headers.get('content-type') size = r.headers.get('content-length') size = int(size) if size else None resp = Response(location=location, content_type=content_type, size=size, url=url) return resp
b1229bec9c09528f5fb9dcdd14ee2cc6678410c4
3,648,342
def mat_normalize(mx): """Normalize sparse matrix""" rowsum = np.array(mx.sum(1)) r_inv = np.power(rowsum, -0.5).flatten() r_inv[np.isinf(r_inv)] = 0. r_mat_inv = sp.diags(r_inv) mx = r_mat_inv.dot(mx).dot(r_mat_inv) return mx
9caeaf660e7a11b7db558248deb3097e9cca2f57
3,648,343
from datetime import datetime import os import logging import tqdm def runjcast(args): """ main look for jcast flow. :param args: parsed arguments :return: """ # Get timestamp for out files now = datetime.datetime.now() write_dir = os.path.join(args.out, 'jcast_' + now.strftime('%Y%m%d%H%M%S')) os.makedirs(write_dir, exist_ok=True) # Main logger setup main_log = logging.getLogger('jcast') main_log.propagate = False main_log.setLevel(logging.INFO) # create file handler which logs even debug messages fh = logging.FileHandler(os.path.join(write_dir, 'jcast_main.log')) fh.setLevel(logging.DEBUG) # create formatter and add it to the handlers formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') fh.setFormatter(formatter) # add the handlers to the logger main_log.addHandler(fh) # ch = logging.StreamHandler() ch.setLevel(logging.WARNING) ch.setFormatter(formatter) main_log.addHandler(ch) main_log.info(args) main_log.info(__version__) # # Open the rMATS output file (MXE) here, rename the columns # assert os.path.exists(os.path.join(args.rmats_folder, 'MXE.MATS.JC.txt')), 'rMATS files not found, check directory.' rmats_results = RmatsResults(rmats_dir=args.rmats_folder) # Model read count cutoff # # Read the gtf file using the gtfpase package. # Then write as a pandas data frame. # gtf = ReadAnnotations(args.gtf_file) gtf.read_gtf() # # Read genome file into memory # genome = ReadGenome(args.genome) # # Model read count cutoff. # TODO: move this to a separate class # if args.model: main_log.info('The -m flag is set. The modeled read count will override -r --read values.') # Make a numpy array of all junction SJC sum counts rmats_results.get_junction_count_array() pt, gmm, min_count = model.gaussian_mixture(sum_sjc_array=rmats_results.sum_sjc_array) # Plot out the model model.plot_model(sum_sjc_array=rmats_results.sum_sjc_array, pt=pt, gmm=gmm, min_count=min_count, write_dir=write_dir, filename='model', ) # If the m flag is not set, use the r argument value as min count else: min_count = args.read # # Main loop through every line of each of the five rMATS files to make junction object, then translate them # for rma in [rmats_results.rmats_mxe, rmats_results.rmats_se, rmats_results.rmats_ri, rmats_results.rmats_a5ss, rmats_results.rmats_a3ss, ]: junctions = [Junction(**rma.iloc[i].to_dict()) for i in range(len(rma))] translate_one_partial = partial(_translate_one, gtf=gtf, genome=genome, args=args, write_dir=write_dir, pred_bound=min_count, ) # # Concurrent futures # # import concurrent.futures # with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()-1) as pool: # for i, f in enumerate(tqdm.tqdm(pool.map( # translate_one_partial, # junctions, # ), # total=len(junctions), # desc='Processing {0} Junctions'.format(rma.jxn_type[0]), # )): # main_log.info('>>>>>> Doing {0} junction {1} for gene {2} {3}'.format(junctions[i].junction_type, # junctions[i].name, # junctions[i].gene_symbol, # junctions[i].gene_id, # )) # main_log.info(f) # # Single threaded for-loop # for jx in tqdm.tqdm(junctions, total=len(junctions), desc='Processing {0} Junctions'.format(rma.jxn_type[0]), ): main_log.info('>>>>>> Doing {0} junction {1} for gene {2} {3}'.format(jx.junction_type, jx.name, jx.gene_symbol, jx.gene_id, )) main_log.info(translate_one_partial(jx)) return True
ed6c35818159e297c4adbc75907da10e79219524
3,648,344
def user_syntax_error(e, source_code): """Returns a representation of the syntax error for human consumption. This is only meant for small user-provided strings. For input files, prefer the regular Python format. Args: e: The SyntaxError object. source_code: The source code. Returns: A multi-line error message, where the first line is the summary, and the following lines explain the error in more detail. """ summary = 'Failed to parse Python-like source code ({msg}).'.format( msg=e.msg or '<unknown reason>') if e.text is None: # Only output the source code. return '\n'.join([summary, _indent(source_code)]) # Alternatively, we could use the middle two lines from # traceback.format_exception_only(SyntaxError, e), but it isn't clear that # this is an improvement in terms of maintainability. (e.g. we do not then # control the indent, and if the format changes in the future the output # becomes nonsense). error_information = '\n'.join([ e.text.rstrip('\r\n'), # \n is added by ast.parse but not exec/eval. ' ' * (e.offset - 1) + '^', # note: offset is 1-based. ]) if '\n' in source_code: return '\n'.join([ summary, '', 'Source:', _indent(source_code), '', 'Location:', _indent(error_information), ]) else: return '\n'.join([summary, _indent(error_information)])
79272de37844b043656a98d796913769e89ebb17
3,648,345
import re def check_comment(comment, changed): """ Check the commit comment and return True if the comment is acceptable and False if it is not.""" sections = re.match(COMMIT_PATTERN, comment) if sections is None: print(f"The comment \"{comment}\" is not in the recognised format.") else: indicator = sections.group(1) if indicator == "M": # Allow modification comments to have practically any format return True elif indicator == "A" or indicator == "P": if not changed: print( "You have indicated that you have added or removed a rule, but no changes were initially noted by " "the repository.") else: address = sections.group(4) if not valid_url(address): print("Unrecognised address \"{address}\".".format(address=address)) else: # The user has changed the subscription and has written a suitable comment # message with a valid address return True print() return False
6ad96bb465e2079895ad87d35b4bc7a000312eaf
3,648,346
def GetBankTaskSummary(bank_task): """ Summarizes the bank task params: bank_task = value of the object of type bank_task_t returns: String with summary of the type. """ format_str = "{0: <#020x} {1: <16d} {2: <#020x} {3: <16d} {4: <16d} {5: <16d} {6: <16d} {7: <16d}" out_string = format_str.format(bank_task, bank_task.bt_proc_persona.pid, bank_task.bt_ledger, unsigned(bank_task.bt_elem.be_refs), unsigned(bank_task.bt_elem.be_made), bank_task.bt_proc_persona.persona_id, bank_task.bt_proc_persona.uid, bank_task.bt_proc_persona.gid) #if DEVELOPMENT format_str = "{0: <#020x} {1: <20s}" if hasattr(bank_task.bt_elem, 'be_task'): out_string += " " + format_str.format(bank_task.bt_elem.be_task, GetProcNameForTask(bank_task.bt_elem.be_task)) #endif return out_string
afbc3b4e8707428dc951d5d199441923e477ac0c
3,648,347
def angle(o1,o2): """ Find the angles between two DICOM orientation vectors """ o1 = np.array(o1) o2 = np.array(o2) o1a = o1[0:3] o1b = o1[3:6] o2a = o2[0:3] o2b = o2[3:6] norm_a = np.linalg.norm(o1a) * np.linalg.norm(o2a) norm_b = np.linalg.norm(o1b) * np.linalg.norm(o2b) dot_a = np.dot(o1a,o2a) / norm_a dot_b = np.dot(o1b,o2b) / norm_b if dot_a > 1.0 and dot_a - 1.0 <= np.finfo(dot_a.dtype).eps: dot_a = 1.0 if dot_b > 1.0 and dot_b - 1.0 <= np.finfo(dot_b.dtype).eps: dot_b = 1.0 angle_a = np.arccos(dot_a) * (180.0 / np.pi) angle_b = np.arccos(dot_b) * (180.0 / np.pi) return (angle_a, angle_b)
db6211f4067339b7740eb52cc3f101f6ef69f08c
3,648,348
def get_project_id_v3(user_section='user'): """Returns a project ID.""" r = authenticate_v3_config(user_section, scoped=True) return r.json()["token"]["project"]["id"]
7cbb004609e3623d6a5d4bbf45766ea753027f5c
3,648,349
def get_platform_arches(pkgs_info, pkg_name): """.""" package_info = get_package_info(pkgs_info, pkg_name) platforms_info = package_info.get('platforms', {}) platform_arches = platforms_info.get('arches', []) return platform_arches
d6da2a95592f1ecf1e89935dfecef84fe2ee9313
3,648,350
import glob import os def get_preview_images_by_proposal(proposal): """Return a list of preview images available in the filesystem for the given ``proposal``. Parameters ---------- proposal : str The five-digit proposal number (e.g. ``88600``). Returns ------- preview_images : list A list of preview images available in the filesystem for the given ``proposal``. """ preview_images = glob.glob(os.path.join(PREVIEW_IMAGE_FILESYSTEM, 'jw{}'.format(proposal), '*')) preview_images = [os.path.basename(preview_image) for preview_image in preview_images] return preview_images
e0e952d745899272152824b05a594342bc25ea30
3,648,351
from typing import Optional def unformat_number(new_str: str, old_str: Optional[str], type_: str) -> str: """Undoes some of the locale formatting to ensure float(x) works.""" ret_ = new_str if old_str is not None: if type_ in ("int", "uint"): new_str = new_str.replace(",", "") new_str = new_str.replace(".", "") ret_ = new_str else: end_comma = False if new_str.endswith(",") or new_str.endswith("."): # Si acaba en coma, lo guardo end_comma = True ret_ = new_str.replace(",", "") ret_ = ret_.replace(".", "") if end_comma: ret_ = ret_ + "." # else: # comma_pos = old_str.find(".") # if comma_pos > -1: print("Desformateando", new_str, ret_) # else: # pos_comma = old_str.find(".") # if pos_comma > -1: # if pos_comma > new_str.find("."): # new_str = new_str.replace(".", "") # ret_ = new_str[0:pos_comma] + "." + new_str[pos_comma:] # print("l2", ret_) return ret_
419698cf46c1f6d3620dbb8c6178f0ba387ef360
3,648,352
import os def mkdirs(path, raise_path_exits=False): """Create a dir leaf""" if not os.path.exists(path): os.makedirs(path) else: if raise_path_exits: raise ValueError('Path %s has exitsted.' % path) return path
d2491589f2ee9d9aa9b9ceeb5ae8d0f678fc5473
3,648,353
def is_triplet(tiles): """ Checks if the tiles form a triplet. """ return len(tiles) == 3 and are_all_equal(tiles)
a0223a0fde80c147de7e255db0a5563424d1a427
3,648,354
def cli(ctx, comment, metadata=""): """Add a canned comment Output: A dictionnary containing canned comment description """ return ctx.gi.cannedcomments.add_comment(comment, metadata=metadata)
bacfab650aac1a1785a61756a7cbf84aab7df77a
3,648,355
def get_latest_slot_for_resources(latest, task, schedule_set): """ Finds the latest opportunity that a task may be executed :param latest: type int A maximum bound on the latest point where a task may be executed :param task: type DAGSubTask The task to obtain the latest starting slot for :param schedule_set: type list List of occupied time slots of the resources used for the task :return: type int The latest slot where task may begin """ # Obtain set of occupied slots across resources occupied_slots = set() for rs in schedule_set: occupied_slots |= set(rs) # Filter ones that are earlier than latest opportunity occupied_slots = list(filter(lambda s: s <= latest, list(sorted(occupied_slots)))) # Settle for latest if nothing else found if not occupied_slots or occupied_slots[-1] < latest: return latest else: occupied_ranges = list(reversed(list(to_ranges(occupied_slots)))) for (s1, e1), (s2, e2) in zip(occupied_ranges, occupied_ranges[1:]): if s1 - e2 >= task.c: return e2 + 1 return occupied_ranges[-1][0] - ceil(task.c)
455f852152877c856d49882facabdd6faabad175
3,648,356
import requests def _get_text(url: str): """ Get the text from a message url Args: url: rest call URL Returns: response: Request response """ response = requests.get(url["messageUrl"].split("?")[0]) return response
fe711167748ca6b0201da7501f3b38cc8af8651d
3,648,357
def divideFacet(aFacet): """Will always return four facets, given one, rectangle or triangle.""" # Important: For all facets, first vertex built is always the most south-then-west, going counter-clockwise thereafter. if len(aFacet) == 5: # This is a triangle facet. orient = aFacet[4] # get the string expressing this triangle's orientation # Cases, each needing subdivision: # ______ ___ ___ # |\ /| \ / /\ | / \ | ^ # | \ / | \ / / \ | / \ | N # |__\ /__| \/ /____\ |/ \| # # up up down up down down -- orientations, as "u" or "d" in code below. # Find the geodetic bisectors of the three sides, store in sequence using edges defined # by aFacet vertex indeces: [0]&[1] , [1]&[2] , [2]&[3] newVerts = [] for i in range(3): if aFacet[i][0] == aFacet[i+1][0] or aFacet[i][1] == aFacet[i+1][1]: newVerts.append(GetMidpoint(aFacet[i], aFacet[i+1])) else: newLat = (aFacet[i][0] + aFacet[i+1][0]) / 2 newLon1, newLon2 = findCrossedMeridiansByLatitude(aFacet[i], aFacet[i + 1], newLat) newLon = lonCheck(newLon1, newLon2, aFacet[i][1], aFacet[i+1][1]) newVert = (newLat, newLon) newVerts.append(newVert) if orient == "u": # In the case of up facets, there will be one "top" facet # and 3 "bottom" facets after subdivision; we build them in the sequence inside the triangles: # # 2 # /\ Outside the triangle, a number is the index of the vertex in aFacet, # / 1\ and a number with an asterisk is the index of the vertex in newVerts. # 2* /____\ 1* # /\ 0 /\ # /2 \ /3 \ # /____\/____\ # 0or3 0* 1 newFacet0 = [newVerts[0], newVerts[1], newVerts[2], newVerts[0], "d"] newFacet1 = [newVerts[2], newVerts[1], aFacet[2], newVerts[2], "u"] newFacet2 = [aFacet[0], newVerts[0], newVerts[2], aFacet[0], "u"] newFacet3 = [newVerts[0], aFacet[1], newVerts[1], newVerts[0], "u"] if orient == "d": # In the case of down facets, there will be three "top" facets # and 1 "bottom" facet after subdivision; we build them in the sequence inside the triangles: # # 2_____1*_____1 # \ 2 /\ 3 / # \ / 0\ / Outside the triangle, a number is the index of the vertex in aFacet, # \/____\/ and a number with an asterisk is the index of the vertex in newVerts. # 2*\ 1 /0* # \ / # \/ # 0or3 newFacet0 = [newVerts[2], newVerts[0], newVerts[1], newVerts[2], "u"] newFacet1 = [aFacet[0], newVerts[0], newVerts[2], aFacet[0], "d"] newFacet2 = [newVerts[2], newVerts[1], aFacet[2], newVerts[2], "d"] newFacet3 = [newVerts[0], aFacet[1], newVerts[1], newVerts[0], "d"] if len(aFacet) == 6: # This is a rectangle facet. northBoolean = aFacet[5] # true for north, false for south if northBoolean: # North pole rectangular facet. # Build new facets in the sequence inside the polygons: # 3..........2 <-- North Pole # | | # | 1 | Outside the polys, a number is the index of the vertex in aFacet, # | | and a number with an asterisk is the index of the vertex in newVerts. # | | # 2*|--------|1* /\ # |\ /| on globe /__\ # | \ 0 / | -------> /\ /\ # | \ / | /__\/__\ # | 2 \/ 3 | # 0or4''''''''''1 # 0* newVerts = [] for i in range(4): if i != 2: # on iter == 1 we're going across the north pole - don't need this midpoint. if aFacet[i][0] == aFacet[i+1][0] or aFacet[i][1] == aFacet[i+1][1]: newVerts.append(GetMidpoint(aFacet[i], aFacet[i+1])) else: newLat = (aFacet[i][0] + aFacet[i+1][0])/2 newLon1, newLon2 = findCrossedMeridiansByLatitude(aFacet[i], aFacet[i + 1], newLat) newLon = lonCheck(newLon1, newLon2, aFacet[i][1], aFacet[i+1][1]) newVert = (newLat, newLon) newVerts.append(newVert) newFacet0 = [newVerts[0], newVerts[1], newVerts[2], newVerts[0], "d"] # triangle newFacet1 = [newVerts[2], newVerts[1], aFacet[2], aFacet[3], newVerts[2], True] # rectangle newFacet2 = [aFacet[0], newVerts[0], newVerts[2], aFacet[0], "u"] # triangle newFacet3 = [newVerts[0], aFacet[1], newVerts[1], newVerts[0], "u"] # triangle else: # South pole rectangular facet # 1* # 3..........2 # | 2 /\ 3 | Outside the polys, a number is the index of the vertex in aFacet, # | / \ | and a number with an asterisk is the index of the vertex in newVerts. # | / 0 \ | # |/ \| ________ # 2*|--------|0* \ /\ / # | | on globe \/__\/ # | 1 | -------> \ / # | | \/ # | | # 0or4'''''''''1 <-- South Pole newVerts = [] for i in range(4): if i != 0: # on iter == 3 we're going across the south pole - don't need this midpoint if aFacet[i][0] == aFacet[i+1][0] or aFacet[i][1] == aFacet[i+1][1]: newVerts.append(GetMidpoint(aFacet[i], aFacet[i+1])) else: newLat = (aFacet[i][0] + aFacet[i+1][0])/2 newLon1, newLon2 = findCrossedMeridiansByLatitude(aFacet[i], aFacet[i + 1], newLat) newLon = lonCheck(newLon1, newLon2, aFacet[i][1], aFacet[i+1][1]) newVert = newLat, newLon newVerts.append(newVert) newFacet0 = [newVerts[2], newVerts[0], newVerts[1], newVerts[2], "u"] # triangle newFacet1 = [aFacet[0], aFacet[1], newVerts[0], newVerts[2], aFacet[0], False] # rectangle newFacet2 = [newVerts[2], newVerts[1], aFacet[3], newVerts[2], "d"] # triangle newFacet3 = [newVerts[1], newVerts[0], aFacet[2], newVerts[1], "d"] # triangle # In all cases, return the four facets made in a list return [newFacet0, newFacet1, newFacet2, newFacet3]
2e5891cb0ab7d23746ca18201be0f7360acc76b4
3,648,358
def compute_g(n): """g_k from DLMF 5.11.3/5.11.5""" a = compute_a(2*n) g = [] for k in range(n): g.append(mp.sqrt(2)*mp.rf(0.5, k)*a[2*k]) return g
86aeb38e4ecec67f539586b0a96aa95b396d0639
3,648,359
import sys def initialize_hs(IMAG_counter): """Initialize the HiSeq and return the handle.""" global n_errors experiment = config['experiment'] method = experiment['method'] method = config[method] if n_errors is 0: if not userYN('Initialize HiSeq'): sys.exit() hs.initializeCams(logger) x_homed = hs.initializeInstruments() if not x_homed: error('HiSeq:: X-Stage did not home correctly') # HiSeq Settings inlet_ports = int(method.get('inlet ports', fallback = 2)) hs.move_inlet(inlet_ports) # Move to 2 or 8 port inlet # Set laser power for color in hs.lasers.keys(): laser_power = int(method.get(color+' laser power', fallback = 10)) hs.lasers[color].set_power(laser_power) if IMAG_counter > 0: if not hs.lasers[color].on: error('HiSeq:: Lasers did not turn on.') hs.f.LED('A', 'off') hs.f.LED('B', 'off') LED('all', 'startup') hs.move_stage_out() return hs
053fca1b93dad6c0d00a8e07a914f0fdde8df134
3,648,360
import argparse def print_toc() -> int: """ Entry point for `libro print-toc` The meat of this function is broken out into the generate_toc.py module for readability and maintainability. """ parser = argparse.ArgumentParser(description="Build a table of contents for an SE source directory and print to stdout.") parser.add_argument("-i", "--in-place", action="store_true", help="overwrite the existing toc.xhtml file instead of printing to stdout") parser.add_argument("directories", metavar="DIRECTORY", nargs="+", help="a Standard Ebooks source directory") args = parser.parse_args() if not args.in_place and len(args.directories) > 1: se.print_error("Multiple directories are only allowed with the [bash]--in-place[/] option.") return se.InvalidArgumentsException.code for directory in args.directories: try: se_epub = SeEpub(directory) except se.SeException as ex: se.print_error(ex) return ex.code try: if args.in_place: toc_path = se_epub.path / "src/epub/toc.xhtml" with open(toc_path, "r+", encoding="utf-8") as file: file.write(se_epub.generate_toc()) file.truncate() else: print(se_epub.generate_toc()) except se.SeException as ex: se.print_error(ex) return ex.code except FileNotFoundError as ex: se.print_error(f"Couldn’t open file: [path][link=file://{toc_path}]{toc_path}[/][/].") return se.InvalidSeEbookException.code return 0
e4334e24b9cb886a83f43a20ec3ecc561387b5c0
3,648,361
def get_colden(theta_xy, theta_xz, theta_yz, n_sample_factor=1.0, directory=None, file_name='save.npy', quick=False, gridrate=0.5, shift=[0, 0, 0], draw=False, save=False, verbose=False): """ Rotate gas into arbitrary direction """ if gridrate < 2**(-7): boxsize=10**2 elif gridrate < 2**(-6): boxsize=3*10**2 else: boxsize = 10**4 x = np.random.randint(1000, size=boxsize*n_sample_factor) y = np.random.randint(1000, size=boxsize*n_sample_factor) z = np.random.randint(1000, size=boxsize*n_sample_factor) gridsize = 1000 * gridrate #### notice that gridsize is a half of box's side length x, y, z = x - 500, y - 500, z - 500 x, y = rotation(x, y, theta_xy) x, z = rotation(x, z, theta_xz) y, z = rotation(y, z, theta_yz) x, y, z = x + shift[0], y + shift[1], z + shift[2] dsort = np.where((np.sqrt(np.square(x) + np.square(y)) < gridsize * np.sqrt(2)) & (abs(x) <= gridsize) & (abs(y) <= gridsize)) if draw: plt.show() else: pass z_sort = np.where( abs(z) <= gridsize )[0] X_zsorted = x[z_sort] Y_zsorted = y[z_sort] min_xshift = min(X_zsorted)/2/gridsize max_xshift = max(X_zsorted)/2/gridsize min_yshift = min(Y_zsorted)/2/gridsize max_yshift = max(Y_zsorted)/2/gridsize min_xshi, min_yshi, min_zshi = -1000*np.sqrt(3)/gridsize/2/2,-1000*np.sqrt(3)/gridsize/2/2,-1000*np.sqrt(3)/gridsize/2/2 max_xshi, max_yshi, max_zshi = 1000*np.sqrt(3)/gridsize/2/2, 1000*np.sqrt(3)/gridsize/2/2, 1000*np.sqrt(3)/gridsize/2/2 base_grid_ddx = int(max(max_xshi, abs(min_xshi)))+1 base_grid_ddy = int(max(max_yshi, abs(min_yshi)))+1 base_grid_ddz = int(max(max_zshi, abs(min_zshi)))+1 print("\n","######################","\n","base_grid_ddx is ",base_grid_ddx,"\n","#####################","\n") base_grid = np.zeros([2*base_grid_ddz+2+1, 2*base_grid_ddy+1, 2*base_grid_ddx+1]) i = -base_grid_ddx while i <= base_grid_ddx: j = -base_grid_ddy while j <= base_grid_ddy: k = -base_grid_ddz while k <= base_grid_ddz: component_ijk = np.sum((abs(x + 2 * gridsize * i) <= gridsize) * (abs(y + 2 * gridsize * j) <= gridsize) * (abs(z + 2 * gridsize * k) <= gridsize))/boxsize base_grid[0][j+base_grid_ddy][i+base_grid_ddx] = i base_grid[1][j+base_grid_ddy][i+base_grid_ddx] = j base_grid[k+base_grid_ddz+2][j+base_grid_ddy][i+base_grid_ddx] = component_ijk #base_grid[i+base_grid_ddx][j+base_grid_ddy][k+base_grid_ddz] = component_ijk k = k + 1 j = j +1 if i%10 == 1: print("{:.2f} % \r".format(100*abs(i+base_grid_ddx)/base_grid_ddx/2)) i = i +1 if verbose: print(base_grid) if save: save_route = directory route_name = save_route+file_name np.save(route_name,base_grid) return len(dsort[0]), base_grid
bb19845492adfde70c85f3a8faf64784130ab7b9
3,648,362
def PyException_GetCause(space, w_exc): """Return the cause (another exception instance set by raise ... from ...) associated with the exception as a new reference, as accessible from Python through __cause__. If there is no cause associated, this returns NULL.""" w_cause = space.getattr(w_exc, space.wrap('__cause__')) if space.is_none(w_cause): return None return w_cause
dce5c1df12af7074ce25387e493ccac1aaac27ec
3,648,363
import os def _get_style_data(stylesheet_file_path=None): """Read the global stylesheet file and provide the style data as a str. Args: stylesheet_file_path (str) : The path to the global stylesheet. Returns: str : The style data read from the stylesheet file """ global __style_data if not stylesheet_file_path: stylesheet_file_path = os.getenv("QSS_STYLESHEET", None) if stylesheet_file_path == "": stylesheet_file_path = None if __style_data: return __style_data __style_data = None load_default = True if stylesheet_file_path is not None: try: with open(stylesheet_file_path, 'r') as stylesheet_file: LOG.info( "Opening style file '{0}'...".format(stylesheet_file_path)) __style_data = stylesheet_file.read() load_default = False except Exception as ex: __style_data = None LOG.error( "Error reading the stylesheet file '{0}'. Exception: {1}".format( stylesheet_file_path, str(ex))) if load_default: try: with open(GLOBAL_STYLESHEET, 'r') as default_stylesheet: LOG.info("Opening the default stylesheet '{0}'...".format( GLOBAL_STYLESHEET)) __style_data = default_stylesheet.read() except Exception as ex: __style_data = None LOG.exception("Cannot find the default stylesheet file '{0}'.".format(GLOBAL_STYLESHEET)) return __style_data
cf72309b07124620c1116efea4334aeae2e7b308
3,648,364
def row_interval(rows: int) -> Expression: """ Creates an interval of rows. Example: :: >>> tab.window(Over >>> .partition_by(col('a')) >>> .order_by(col('proctime')) >>> .preceding(row_interval(4)) >>> .following(CURRENT_ROW) >>> .alias('w')) :param rows: the number of rows """ return _unary_op("rowInterval", rows)
eaa998eb3498eeed8034d43efb89eaa3cbaa5b2b
3,648,365
from typing import Any def build_post_async_retry_failed_request(*, json: Any = None, content: Any = None, **kwargs: Any) -> HttpRequest: """Long running post request, service returns a 202 to the initial request, with an entity that contains ProvisioningState=’Creating’. Poll the endpoint indicated in the Azure-AsyncOperation header for operation status. See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder into your code flow. :keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in our example to find the input shape. Product to put. :paramtype json: any :keyword content: Pass in binary content you want in the body of the request (typically bytes, a byte iterator, or stream input). Product to put. :paramtype content: any :return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's `send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this response into your code flow. :rtype: ~azure.core.rest.HttpRequest Example: .. code-block:: python # JSON input template you can fill out and use as your body input. json = { "id": "str", # Optional. Resource Id. "location": "str", # Optional. Resource Location. "name": "str", # Optional. Resource Name. "properties": { "provisioningState": "str", # Optional. "provisioningStateValues": "str" # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK". }, "tags": { "str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`. }, "type": "str" # Optional. Resource Type. } """ content_type = kwargs.pop("content_type", None) # type: Optional[str] accept = "application/json" # Construct URL url = kwargs.pop("template_url", "/lro/postasync/retry/failed") # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] if content_type is not None: header_parameters["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="POST", url=url, headers=header_parameters, json=json, content=content, **kwargs)
867cf51bec949367ce2722ca16d35462947623f3
3,648,366
def splitclass(classofdevice): """ Splits the given class of device to return a 3-item tuple with the major service class, major device class and minor device class values. These values indicate the device's major services and the type of the device (e.g. mobile phone, laptop, etc.). If you google for "assigned numbers bluetooth baseband" you might find some documents that discuss how to extract this information from the class of device. Example: >>> splitclass(1057036) (129, 1, 3) >>> """ if not isinstance(classofdevice, int): try: classofdevice = int(classofdevice) except (TypeError, ValueError): raise TypeError("Given device class '%s' cannot be split" % \ str(classofdevice)) data = classofdevice >> 2 # skip over the 2 "format" bits service = data >> 11 major = (data >> 6) & 0x1F minor = data & 0x3F return (service, major, minor)
37c19ab17293b4fd0c46cff24c30e349459f7bd0
3,648,367
def change_to_local_price(us_fee): """Get us dollar change price from redis and apply it on us_fee. """ dollar_change = RedisClient.get('dollar_change') if not dollar_change: raise ValueError(ERRORS['CHANGE_PRICE']) Rial_fee = float(us_fee) * int(dollar_change) return int(Rial_fee)
bdd89a461e84a6acb6f49f2fb0159a9fa7404b17
3,648,368
def get_positive(data_frame, column_name): """ Query given data frame for positive values, including zero :param data_frame: Pandas data frame to query :param column_name: column name to filter values by :return: DataFrame view """ return data_frame.query(f'{column_name} >= 0')
2aec7f611a1b181132f55f2f3ca73bf5025f2474
3,648,369
def axes(*args, **kwargs): """ Add an axes to the figure. The axes is added at position *rect* specified by: - ``axes()`` by itself creates a default full ``subplot(111)`` window axis. - ``axes(rect, axisbg='w')`` where *rect* = [left, bottom, width, height] in normalized (0, 1) units. *axisbg* is the background color for the axis, default white. - ``axes(h)`` where *h* is an axes instance makes *h* the current axis. An :class:`~matplotlib.axes.Axes` instance is returned. ======= ============== ============================================== kwarg Accepts Description ======= ============== ============================================== axisbg color the axes background color frameon [True|False] display the frame? sharex otherax current axes shares xaxis attribute with otherax sharey otherax current axes shares yaxis attribute with otherax polar [True|False] use a polar axes? aspect [str | num] ['equal', 'auto'] or a number. If a number the ratio of x-unit/y-unit in screen-space. Also see :meth:`~matplotlib.axes.Axes.set_aspect`. ======= ============== ============================================== Examples: * :file:`examples/pylab_examples/axes_demo.py` places custom axes. * :file:`examples/pylab_examples/shared_axis_demo.py` uses *sharex* and *sharey*. """ nargs = len(args) if len(args) == 0: return subplot(111, **kwargs) if nargs > 1: raise TypeError('Only one non keyword arg to axes allowed') arg = args[0] if isinstance(arg, Axes): a = gcf().sca(arg) else: rect = arg a = gcf().add_axes(rect, **kwargs) return a
1277afb8b3a6513129632216d8c3a6c2b5718449
3,648,370
def scrape_options_into_new_groups(source_groups, assignments): """Puts options from the :py:class:`OptionParser` and :py:class:`OptionGroup` objects in `source_groups` into the keys of `assignments` according to the values of `assignments`. An example: :type source_groups: list of :py:class:`OptionParser` and :py:class:`OptionGroup` objects :param source_groups: parsers/groups to scrape options from :type assignments: dict with keys that are :py:class:`OptionParser` and :py:class:`OptionGroup` objects and values that are lists of strings :param assignments: map empty parsers/groups to lists of destination names that they should contain options for """ all_options = scrape_options_and_index_by_dest(*source_groups) return populate_option_groups_with_options(assignments, all_options)
4524a975b604a146814c9c913d3727e0bd296368
3,648,371
def resnext56_32x2d_cifar10(classes=10, **kwargs): """ ResNeXt-56 (32x2d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_resnext_cifar(classes=classes, blocks=56, cardinality=32, bottleneck_width=2, model_name="resnext56_32x2d_cifar10", **kwargs)
110b9b4443d4761b7f89a3d01b28d2c4ec8eba00
3,648,372
import argparse def _get_server_argparser(): """ Create a :class:`argparse.ArgumentParser` with standard configuration options that cli subcommands which communicate with a server require, e.g., hostname and credential information. :return: the argparser :rtype: argparse.ArgumentParser """ parser = argparse.ArgumentParser(add_help=False) parser.add_argument("host", metavar="HOST_NAME", help="hostname where the management service resides") parser.add_argument("-u", "--user", metavar="USERNAME", default=None, required=False, help="user registered at the management service") parser.add_argument("-p", "--password", metavar="PASSWORD", default=None, required=False, help="password for the management service user") parser.add_argument("-t", "--port", metavar="PORT", required=False, default=8443, help="port where the management service resides") parser.add_argument("-e", "--truststore", metavar="TRUSTSTORE_FILE", default=False, required=False, help="""name of file containing one or more CA pems to use in validating the management server""") return parser
0e9300600640e622cd26ba76145f33ca682e6e4c
3,648,373
def is_internet_file(url): """Return if url starts with http://, https://, or ftp://. Args: url (str): URL of the link """ return ( url.startswith("http://") or url.startswith("https://") or url.startswith("ftp://") )
00f9d90d580da3fe8f6cbc3604be61153b17a154
3,648,374
from .observable.zip import zip_ from typing import Any from typing import Tuple def zip(*args: Observable[Any]) -> Observable[Tuple[Any, ...]]: """Merges the specified observable sequences into one observable sequence by creating a :class:`tuple` whenever all of the observable sequences have produced an element at a corresponding index. .. marble:: :alt: zip --1--2---3-----4---| -a----b----c-d------| [ zip() ] --1,a-2,b--3,c-4,d-| Example: >>> res = rx.zip(obs1, obs2) Args: args: Observable sources to zip. Returns: An observable sequence containing the result of combining elements of the sources as a :class:`tuple`. """ return zip_(*args)
c33915df586bb2c337d7fee275d4df30364cd704
3,648,375
def GetFilter(image_ref, holder): """Get the filter of occurrences request for container analysis API.""" filters = [ # Display only packages 'kind = "PACKAGE_MANAGER"', # Display only compute metadata 'has_prefix(resource_url,"https://www.googleapis.com/compute/")', ] client = holder.client resource_parser = holder.resources if image_ref: image_expander = image_utils.ImageExpander(client, resource_parser) self_link, image = image_expander.ExpandImageFlag( user_project=properties.VALUES.core.project.Get(), image=image_ref.image, image_project=image_ref.project, return_image_resource=True ) image_url = self_link+'/id/'+str(image.id) filters.append('has_prefix(resource_url,"{}")'.format(image_url)) return ' AND '.join(filters)
276104cab3c9348151437548ecd69801f20e5363
3,648,376
def predict_image_paths(image_paths, model_path, target_size=(128, 128)): """Use a trained classifier to predict the class probabilities of a list of images Returns most likely class and its probability :param image_paths: list of path(s) to the image(s) :param model_path: path to the pre-trained model :param target_size: :type image_paths: list :return: :rtype: list """ desired_size = target_size[0] if model_path in LOADED_MODELS: loaded_model = LOADED_MODELS[model_path] else: with open(json_path, 'r') as json_file: loaded_model = model_from_json(json_file.read()) loaded_model.load_weights(model_path) LOADED_MODELS[model_path] = loaded_model img_list = [] for image_path in image_paths: im = Image.open(image_path) old_size = im.size ratio = float(desired_size) / max(old_size) new_size = tuple([int(x * ratio) for x in old_size]) im = im.resize(new_size, Image.ANTIALIAS) new_im = Image.new("RGB", (desired_size, desired_size), color='White') new_im.paste(im, ((desired_size - new_size[0]) // 2, (desired_size - new_size[1]) // 2)) img_array = np.asarray(new_im) img_array = img_array.astype('float32') img_array = (img_array / 255) img_list.append(img_array) predictions = loaded_model.predict(np.array(img_list)) return predictions
8071a178751ce25edbf664f1a69d1dd43b3e6290
3,648,377
def in_bounding_box(point): """Determine whether a point is in our downtown bounding box""" lng, lat = point in_lng_bounds = DOWNTOWN_BOUNDING_BOX[0] <= lng <= DOWNTOWN_BOUNDING_BOX[2] in_lat_bounds = DOWNTOWN_BOUNDING_BOX[1] <= lat <= DOWNTOWN_BOUNDING_BOX[3] return in_lng_bounds and in_lat_bounds
c4756c10bc45b81850f0e998be7bf420e355aa4d
3,648,378
def __DataContainerERT_addFourPointData(self, *args, **kwargs): """Add a new data point to the end of the dataContainer. Add a new 4 point measurement to the end of the dataContainer and increase the data size by one. The index of the new data point is returned. Parameters ---------- *args: [int] At least for index values for A, B, M and N. **args: dict Values for the actual data configuration. Returns ------- ret: int Index of this new data point. Examples -------- >>> import pygimli as pg >>> d = pg.DataContainerERT() >>> d.setSensors(pg.utils.grange(0, 3, n=4)) >>> d.addFourPointData(0,1,2,3) 0 >>> d.addFourPointData([3,2,1,0], rhoa=1.0) 1 >>> print(d) Data: Sensors: 4 data: 2 >>> print(d('rhoa')) 2 [0.0, 1.0] """ try: if len(args) == 1: idx = self.createFourPointData(self.size(), args[0][0], args[0][1], args[0][2], args[0][3]) else: idx = self.createFourPointData(self.size(), args[0], args[1], args[2], args[3]) except: print("args:", args) critical("Can't interpret arguments:", *args) for k, v in kwargs.items(): if not self.haveData(k): self.add(k) self.ref(k)[idx] = v return idx
11d42774e3e422aaa9a8fe664e5e4641b51248d4
3,648,379
import traceback def spin_up(work_func, cfgs, max_workers = 8, log=None, single_thread=False, pass_n=True): """ Run a threadable function (typically a subprocess) in parallel. Parameters ---------- work_func : callable This does the work. It gets called with one or two arguments. The first argument in always a config item from the cfgs list; the second is the integer enumeration (if `pass_n` is True). cfgs : iterable An iterator of config items to pass to the workers. max_workers : int Maximum number of worker threads. log : logging.logger, default None If not None, log to this logger. single_thread : bool, default False If True, the work_func is not multithreaded, just run in sequence. Useful for debugging. pass_n : bool, default True Should the enumerator be passed to the worker function? """ if single_thread: return _spin_up_single_thread(work_func, cfgs, log, pass_n) if log is not None: log('=== pines.multirunner.spin_up begins ===') with cf.ThreadPoolExecutor(max_workers=max_workers) as executor: exec_futures = {} for n,cfg in enumerate(cfgs): if log is not None: log(f' = ThreadPoolExecutor {n} =') try: skip = cfg.skip except AttributeError: skip = False if not skip: if pass_n: fut = executor.submit(work_func, cfg, n) else: fut = executor.submit(work_func, cfg) exec_futures[fut] = n for future in cf.as_completed(exec_futures): n_future = exec_futures[future] try: data = future.result() except Exception as exc: if log is not None: log(f'=== Thread {n_future} generated an exception ===') y = ("".join(traceback.format_exception(type(exc), exc, exc.__traceback__))) log(y) if log is not None: log('=== pines.multirunner.spin_up complete ===')
0c88376f0d892c32749a2a2eac3aa2ab4d0e3863
3,648,380
import os import logging def list_run_directories(solid_run_dir): """Return list of matching run directories Given the name of a SOLiD run directory, find all the 'matching' run directories based on the instrument name and date stamp. For example, 'solid0127_20120123_FRAG_BC' and 'solid0127_20120123_FRAG_BC_2' would form a matching set, as would 'solid0127_20120123_PE_BC' etc. For "nonstandard" names (e.g. 'solid0127_20120123_PE_BC_COPY', if no matches are found then just the input is returned. Returns a list of matching directories which includes the input. """ # Break up the input base_dir = os.path.dirname(os.path.abspath(solid_run_dir)) run_name = os.path.basename(solid_run_dir.rstrip(os.sep)) # Get the run info from the name try: base_run_info = SolidRunInfo(run_name) except Exception: # Wrong format for name logging.error("'%s' not a valid SOLiD run directory name" % solid_run_dir) return [] # List all directories in the base dir and look for matches dirs = [] for f in os.listdir(base_dir): if os.path.isdir(os.path.join(base_dir,f)): try: # Check if instrument name and datestamp match run_info = SolidRunInfo(f) if run_info.instrument != base_run_info.instrument or \ run_info.datestamp != base_run_info.datestamp: # Not a match continue except Exception: # Wrong format for name, not a match continue # Check for run definition file if not os.path.exists(os.path.join(base_dir,f,f+'_run_definition.txt')): continue # Must be a match, add to the list dirs.append(os.path.join(base_dir,f)) # Check that the original run is also included if os.path.abspath(solid_run_dir) not in dirs: dirs = [solid_run_dir] # Sort and return directories dirs.sort() return dirs
9306baf2ca913d18a9fba1f90491ead3d4d5cc36
3,648,381
def refresh_remote_vpsa(session, rvpsa_id, return_type=None, **kwargs): """ Refreshes information about a remote VPSA - such as discovering new pools and updating how much free space remote pools have. :type session: zadarapy.session.Session :param session: A valid zadarapy.session.Session object. Required. :type rvpsa_id: str :param rvpsa_id: The remote VPSA 'name' value as returned by get_all_remote_vpsas. For example: 'rvpsa-00000001'. Required. :type return_type: str :param return_type: If this is set to the string 'json', this function will return a JSON string. Otherwise, it will return a Python dictionary. Optional (will return a Python dictionary by default). :rtype: dict, str :returns: A dictionary or JSON data set as a string depending on return_type parameter. """ verify_remote_vpsa_id(rvpsa_id) path = '/api/remote_vpsas/{0}/refresh.json'.format(rvpsa_id) return session.post_api(path=path, return_type=return_type, **kwargs)
6acc4a049397862c72a21deb8e38f65af5c424a7
3,648,382
def zeros(shape, int32=False): """Return a blob of all zeros of the given shape with the correct float or int data type. """ return np.zeros(shape, dtype=np.int32 if int32 else np.float32)
68bb2960a3a364f01b8fcc39495e44562936c98f
3,648,383
def _connect(): """Connect to a XMPP server and return the connection. Returns ------- xmpp.Client A xmpp client authenticated to a XMPP server. """ jid = xmpp.protocol.JID(settings.XMPP_PRIVATE_ADMIN_JID) client = xmpp.Client(server=jid.getDomain(), port=settings.XMPP_PRIVATE_SERVER_PORT) client.connect() client.auth( user=jid.getNode(), password=settings.XMPP_PRIVATE_SERVER_PASSWORD, resource=jid.getResource(), ) return client
1d407d80c22a371205c85e9223164cfa01063781
3,648,384
def index(): """Return the main page.""" return send_from_directory("static", "index.html")
2dbbf0d103e78bcd503f8254aac7f8a1a45f9176
3,648,385
from typing import List def get_groups(records_data: dict, default_group: str) -> List: """ Returns the specified groups in the SQS Message """ groups = records_data["Groups"] try: if len(groups) > 0: return groups else: return [default_group] except IndexError as err: raise err
29ffe05da86816750b59bab03041d8bf43ca8961
3,648,386
def build_stats(history, eval_output, time_callback): """Normalizes and returns dictionary of stats. Args: history: Results of the training step. Supports both categorical_accuracy and sparse_categorical_accuracy. eval_output: Output of the eval step. Assumes first value is eval_loss and second value is accuracy_top_1. time_callback: Time tracking callback likely used during keras.fit. Returns: Dictionary of normalized results. """ stats = {} if eval_output: stats['accuracy_top_1'] = eval_output[1].item() stats['eval_loss'] = eval_output[0].item() if history and history.history: train_hist = history.history # Gets final loss from training. stats['loss'] = train_hist['loss'][-1].item() # Gets top_1 training accuracy. if 'categorical_accuracy' in train_hist: stats[TRAIN_TOP_1] = train_hist['categorical_accuracy'][-1].item() elif 'sparse_categorical_accuracy' in train_hist: stats[TRAIN_TOP_1] = train_hist['sparse_categorical_accuracy'][-1].item() if time_callback: timestamp_log = time_callback.timestamp_log stats['step_timestamp_log'] = timestamp_log stats['train_finish_time'] = time_callback.train_finish_time if len(timestamp_log) > 1: stats['avg_exp_per_second'] = ( time_callback.batch_size * time_callback.log_steps * (len(time_callback.timestamp_log)-1) / (timestamp_log[-1].timestamp - timestamp_log[0].timestamp)) return stats
3419bcc0b2441fd2ea67ddec0b50574017b71a75
3,648,387
def truncate_single_leafs(nd): """ >>> truncate_single_leafs(node(name='a', subs=[node(name='a', subs=None, layer='a')], layer=None)) node(name='a', subs=None, layer='a') """ if nd.layer: return nd if nd.subs and len(nd.subs) == 1: if nd.subs[0].layer: return node(nd.name, None, nd.subs[0].layer) nd2 = truncate_single_leafs(nd.subs[0]) return node(name=(nd.name, nd.subs[0].name), subs=nd2.subs, layer=nd2.layer, ) return node(nd.name, [truncate_single_leafs(n) for n in nd.subs], None)
46e837dbd84df3c2cad5c5597d56f3ba716146f8
3,648,388
def postprocess_output(output, example, postprocessor): """Applies postprocessing function on a translation output.""" # Send all parts to the postprocessing. if postprocessor is None: text = output.output[0] score = None align = None else: tgt_tokens = output.output src_tokens = example.source_tokens text = postprocessor.process_input( src_tokens, tgt_tokens, metadata=example.metadata, config=example.config, options=example.options, ) score = sum(output.score) if all(s is not None for s in output.score) else None attention = output.attention if attention and len(attention) == 1: attention = attention[0] align = ( align_tokens(src_tokens, tgt_tokens, attention) if attention else None ) else: align = None result = {"text": text} if score is not None: result["score"] = score if align is not None: result["align"] = align return result
57c30c1cf9178ae28ef97c8662ed2fe6559f5dd6
3,648,389
def get_aqua_timestamp(iyear,ichunk,branch_flag): """ outputs a timestamp string for model runs with a predifined year-month-day timestamp split into 5 x 73 day chunks for a given year """ if branch_flag == 0: if ichunk == 0: timestamp = format(iyear,"04") + '-01-01-00000' elif ichunk == 1: timestamp = format(iyear,"04") + '-03-15-00000' elif ichunk == 2: timestamp = format(iyear,"04") + '-05-27-00000' elif ichunk == 3: timestamp = format(iyear,"04") + '-08-08-00000' elif ichunk == 4: timestamp = format(iyear,"04") + '-10-20-00000' else: # branch run chunk start days shifted by 1 day if ichunk == 0: timestamp = format(iyear,"04") + '-01-02-00000' elif ichunk == 1: timestamp = format(iyear,"04") + '-03-16-00000' elif ichunk == 2: timestamp = format(iyear,"04") + '-05-28-00000' elif ichunk == 3: timestamp = format(iyear,"04") + '-08-09-00000' elif ichunk == 4: timestamp = format(iyear,"04") + '-10-21-00000' return timestamp
7566da7f22ee31e7e17a86a908bb510c176d32ea
3,648,390
def aggregate_native(gradients, f, m=None, **kwargs): """ Multi-Krum rule. Args: gradients Non-empty list of gradients to aggregate f Number of Byzantine gradients to tolerate m Optional number of averaged gradients for Multi-Krum ... Ignored keyword-arguments Returns: Aggregated gradient """ # Defaults if m is None: m = len(gradients) - f - 2 # Computation return native.krum.aggregate(gradients, f, m)
6a0b6309c9296587f581d8d941896643e096a3d5
3,648,391
import types def isNormalTmpVar(vName: types.VarNameT) -> bool: """Is it a normal tmp var""" if NORMAL_TMPVAR_REGEX.fullmatch(vName): return True return False
4ca52c849f913d15ede4c3ed4d4888d68ca5cd8b
3,648,392
import os import stat import pickle def dump_obj(obj, path): """Dump object to file.""" file_name = hex(id(obj)) file_path = path + file_name with open(file_path, 'wb') as f: os.chmod(file_path, stat.S_IWUSR | stat.S_IRUSR) pickle.dump(obj, f) return file_name
d392ffa14e5eb8965ba84e353427358219c9eacc
3,648,393
import time def count_time(start): """ :param start: :return: return the time in seconds """ end = time.time() return end-start
1945f6e6972b47d7bbdb6941ee7d80b8a6eedd9a
3,648,394
def split_by_state(xs, ys, states): """ Splits the results get_frame_per_second into a list of continuos line segments, divided by state. This is to plot multiple line segments with different color for each segment. """ res = [] last_state = None for x, y, s in zip(xs, ys, states): if s != last_state: res.append((s, [], [])) last_state = s res[-1][1].append(x) res[-1][2].append(y) return res
0a872617bd935f7c52ee0d10e759674969a19c4e
3,648,395
def final_spectrum(t, age, LT, B, EMAX, R, V, dens, dist, Tfir, Ufir, Tnir, Unir, binss, tmin, ebreak, alpha1, alpha2): """ GAMERA computation of the particle spectrum (for the extraction of the photon sed at the end of the evolution of the PWN) http://libgamera.github.io/GAMERA/docs/time_dependent_modeling.html Returns ------- sed : array-like Array with the evolved particle spectrum (erg/cm**2/s vs TeV) at the last step tot : array-like Array with the total photon spectrum (erg/cm**2/s vs TeV) ic : array-like Array with the inverse compton photon spectrum (erg/cm**2/s vs TeV) ic : array-like Array with the inverse compton contribution to the total photon spectrum (erg/cm**2/s vs TeV) ic_cmb : array-like Array with the cmb inverse compton contribution to the total photon spectrum (erg/cm**2/s vs TeV) ic_fir : array-like Array with the fir inverse compton contribution to the total photon spectrum (erg/cm**2/s vs TeV) ic_nir : array-like Array with the nir inverse compton contribution to the total photon spectrum (erg/cm**2/s vs TeV) ic_ssc : array-like Array with the self-synchrotron compton contribution to the total photon spectrum (erg/cm**2/s vs TeV) ic_synch : array-like Array with the synchrotron contribution to the total photon spectrum (erg/cm**2/s vs TeV) """ fp = gp.Particles() p_spectrum = broken_powerlaw(ebreak,alpha1,alpha2,EMAX, 500) e = np.logspace(np.log10(gp.m_e),np.log10(3*np.max(EMAX)),100) #particle escape t_m, e_m = np.meshgrid(t, e) #particle escape fp.SetTimeAndEnergyDependentEscapeTime(t, e, t_esc(e_m, t_m, B, R)) #particle escape fp.SetCustomInjectionSpectrum(p_spectrum) fp.SetLuminosity(list(zip(t,LT))) fp.SetBField(list(zip(t,B))) fp.SetEmax(list(zip(t,EMAX))) fp.SetRadius(list(zip(t,R))) fp.SetExpansionVelocity(list(zip(t,V))) fp.SetAmbientDensity(dens) fp.AddThermalTargetPhotons(2.7,0.25*gp.eV_to_erg) fp.AddThermalTargetPhotons(Tfir, Ufir) fp.AddThermalTargetPhotons(Tnir, Unir) fp.SetTmin(tmin) erad = np.logspace(-21,4.,binss) * gp.TeV_to_erg # energies(in ergs) where radiation will be calculated fr = gp.Radiation() fr.SetDistance(dist) fr.AddThermalTargetPhotons(2.7,0.25*gp.eV_to_erg) fr.AddThermalTargetPhotons(Tfir, Ufir) fr.AddThermalTargetPhotons(Tnir, Unir) fr.SetAmbientDensity(dens) fp.SetAge(age) fp.CalculateElectronSpectrum(binss) sed = np.array(fp.GetParticleSED()) sp = np.array(fp.GetParticleSpectrum()) fr.SetElectrons(sp[:]) fr.SetBField(fp.GetBField()) fr.AddSSCTargetPhotons(fp.GetRadius()) fr.CalculateDifferentialPhotonSpectrum(erad) tot = np.array(fr.GetTotalSED()) ic = np.array(fr.GetICSED()) ic_cmb = np.array(fr.GetICSED(0)) ic_fir = np.array(fr.GetICSED(1)) ic_nir = np.array(fr.GetICSED(2)) ic_ssc = np.array(fr.GetICSED(3)) synch = np.array(fr.GetSynchrotronSED()) return sed, tot, ic, ic_cmb, ic_fir, ic_nir, ic_ssc, synch
00ce850d759739ffcb69a5af5c62325b39bd5446
3,648,396
def modal(): """Contributions input controller for modal view. request.vars.book_id: id of book, optional request.vars.creator_id: id of creator, optional if request.vars.book_id is provided, a contribution to a book is presumed. if request.vars.creator_id is provided, a contribution to a creator is presumed. if neither request.vars.book_id nor request.vars.creator_id are provided a contribution to zco.mx is presumed. request.vars.book_id takes precendence over request.vars.creator_id. """ book = None creator = None if request.vars.book_id: book = Book.from_id(request.vars.book_id) creator = Creator.from_id(book.creator_id) elif request.vars.creator_id: creator = Creator.from_id(request.vars.creator_id) if not creator: raise LookupError( 'Creator not found, id %s', request.vars.creator_id) return dict( book=book, creator=creator, )
13222d8e4d611fe0005f3df3db05f10c6c9fb057
3,648,397
def returns(data): """Returns for any number of days""" try: trading_days = len(data) logger.info( "Calculating Returns for {} trading days".format(trading_days)) df = pd.DataFrame() df['daily_returns'] = data.pct_change(1) mean_daily_returns = df['daily_returns'].mean() returns_data = mean_daily_returns * trading_days return returns_data * 100 except Exception as exception: logger.error('Oops! An error Occurred ⚠️') raise exception
c533433e23cb2f246cdb3f3d8f445afc9d0ea0bc
3,648,398
def do_rot13_on_input(input_string, ordered_radix=ordered_rot13_radix): """ Perform a rot13 encryption on the provided message. """ encrypted_message = str() for char in input_string: # Two possibilities: in radix, or NOT in radix. if char in ordered_radix: # must find index of the char in the ordered_radix char_index = ordered_radix.index(char) mod_char_index = (char_index + 13) % len(ordered_radix) mod_char = ordered_radix[mod_char_index] encrypted_message += mod_char else: encrypted_message += char return encrypted_message
ccf37364860a661498290245a408d7cc4edbf896
3,648,399