content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_mgga_data(mol, grid, rdm1): """ Get atomic orbital and density data. See eval_ao and eval_rho docs for details. Briefly, returns 0-3 derivatives of the atomic orbitals in ao_data; and the density, first derivatives of density, Laplacian of density, and kinetic energy density in rho_data. """ ao_data = eval_ao(mol, grid.coords, deriv=3) if len(rdm1.shape) == 2: rho_data = eval_rho(mol, ao_data, rdm1, xctype='mGGA') else: part0 = eval_rho(mol, ao_data, rdm1[0], xctype='mGGA') part1 = eval_rho(mol, ao_data, rdm1[1], xctype='mGGA') rho_data = np.array([part0, part1]) return ao_data, rho_data
11de048bbe320721204171d9786a997411b953d6
3,645,000
def _stringify_lmer_warnings(fg_lmer): """create grid w/ _ separated string of lme4::lmer warning list items, else "" """ warning_grids = fitgrid.utils.lmer.get_lmer_warnings( fg_lmer ) # dict of indicator dataframes warning_string_grid = pd.DataFrame( np.full(fg_lmer._grid.shape, ""), index=fg_lmer._grid.index.copy(), columns=fg_lmer._grid.columns.copy(), ) # collect multiple warnings into single sorted "_" separated strings # on a tidy time x channel grid for warning, warning_grid in warning_grids.items(): for idx, row_vals in warning_grid.iterrows(): for jdx, col_val in row_vals.iteritems(): if col_val: if len(warning_string_grid.loc[idx, jdx]) == 0: warning_string_grid.loc[idx, jdx] = warning else: # split, sort, reassemble wrns = "_".join( sorted( warning_string_grid.loc[idx, jdx].split("_") + [warning] ) ) warning_string_grid.loc[idx, jdx] = wrns return warning_string_grid
bbd0fedb2480d4d1ef2a98689861c112552c0b59
3,645,001
def index(): """Loads the index page for the 'Admin' controller :returns: a dictionary to pass to the view with the list of ctr_enabled and the active module ('admin') """ ctr_data = get_ctr_data() users = db().select(db.auth_user.ALL) approvals = db(db.auth_user.registration_key=='pending').select(db.auth_user.ALL) return dict(ctr_enabled=ctr_enabled, ctr_data=ctr_data, active_module='jadmin', users=users, approvals=approvals, doctypes=DOC_TYPES)
4b5dc978361b970d2dc6b2f5c6df28b06c9f28bf
3,645,002
def get_language_codes(): """Returns a list of available languages and their 2 char input codes """ languages = get_languages() two_dig_codes = [k for k, v in languages.items()] return two_dig_codes
2e368b73783630835ee1ec32875318725f62d72e
3,645,003
def fun_evaluate_ndcg(user_test_recom_zero_one): """ 计算ndcg。所得是单个用户test的,最后所有用户的求和取平均 :param test_lst: 单个用户的test集 :param zero_one: 0/1序列 :param test_mask: 单个用户的test列表对应的mask列表 :return: """ test_lst, zero_one, test_mask, _ = user_test_recom_zero_one test_lst = test_lst[:np.sum(test_mask)] zero_one = np.array(zero_one) if 0 == sum(zero_one): # 没有命中的 return 0.0 s = 0.0 idxs = list(np.nonzero(zero_one))[0] for idx in idxs: s += 1.0 / np.log2(idx + 2) m = 0.0 length = min(len(test_lst), len(zero_one)) # 序列短的,都命中为1,此时是最优情况 for idx in range(length): m += 1.0 / np.log2(idx + 2) return s / m
018dcf1095ebdd02e253ae6c1c36e17d1f13431a
3,645,004
def prettyDataSize(size_in_bytes): """ Takes a data size in bytes and formats a pretty string. """ unit = "B" size_in_bytes = float(size_in_bytes) if size_in_bytes > 1024: size_in_bytes /= 1024 unit = "kiB" if size_in_bytes > 1024: size_in_bytes /= 1024 unit = "MiB" if size_in_bytes > 1024: size_in_bytes /= 1024 unit = "GiB" if size_in_bytes > 1024: size_in_bytes /= 1024 unit = "TiB" print size_in_bytes, "%.1f "%size_in_bytes + unit return "%.1f "%size_in_bytes + unit
30eb068bafe2d9457ea43b59f2f62bdd0ce1c927
3,645,005
import os def get_env(env_name: str) -> str: """ Safely read an environment variable. Raises errors if it is not defined or it is empty. :param env_name: the name of the environment variable :return: the value of the environment variable """ if env_name not in os.environ: raise KeyError(f"{env_name} not defined") env_value: str = os.environ[env_name] if not env_value: raise ValueError(f"{env_name} has yet to be configured") return env_value
742a251561e02f59da667d8ebc586d5e0b399103
3,645,006
def image_resize_and_sharpen(image, size, preserve_aspect_ratio=False, factor=2.0): """ Create a thumbnail by resizing while keeping ratio. A sharpen filter is applied for a better looking result. :param image: PIL.Image.Image() :param size: 2-tuple(width, height) :param preserve_aspect_ratio: boolean (default: False) :param factor: Sharpen factor (default: 2.0) """ origin_mode = image.mode if image.mode != 'RGBA': image = image.convert('RGBA') image.thumbnail(size, Image.ANTIALIAS) if preserve_aspect_ratio: size = image.size sharpener = ImageEnhance.Sharpness(image) resized_image = sharpener.enhance(factor) # create a transparent image for background and paste the image on it image = Image.new('RGBA', size, (255, 255, 255, 0)) image.paste(resized_image, ((size[0] - resized_image.size[0]) / 2, (size[1] - resized_image.size[1]) / 2)) if image.mode != origin_mode: image = image.convert(origin_mode) return image
7f581a0a8b1dccf62a3840269e7b2cea1e78a13b
3,645,007
import subprocess import pipes def callHgsql(database, command): """ Run hgsql command using subprocess, return stdout data if no error.""" cmd = ["hgsql", database, "-Ne", command] p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmdout, cmderr = p.communicate() if p.returncode != 0: # keep command arguments nicely quoted cmdstr = " ".join([pipes.quote(arg) for arg in cmd]) raise Exception("Error from: " + cmdstr + ": " + cmderr) return cmdout
d44dce04452323b417830fad1c34fb1ebca300fe
3,645,008
def validate_mash(seq_list, metadata_reports, expected_species): """ Takes a species name as a string (i.e. 'Salmonella enterica') and creates a dictionary with keys for each Seq ID and boolean values if the value pulled from MASH_ReferenceGenome matches the string or not :param seq_list: List of OLC Seq IDs :param metadata_reports: Dictionary retrieved from get_combined_metadata() :param expected_species: String containing expected species :return: Dictionary with Seq IDs as keys and True/False as values """ seq_status = {} for seqid in seq_list: print('Validating MASH reference genome for {} '.format(seqid)) df = metadata_reports[seqid] observed_species = df.loc[df['SeqID'] == seqid]['MASH_ReferenceGenome'].values[0] if observed_species == expected_species: seq_status[seqid] = True else: seq_status[seqid] = False return seq_status
9eb4fd6e1f156a4fed3cc0be0c5b7153a05b038b
3,645,009
def style_strokes(svg_path: str, stroke_color: str='#ff0000', stroke_width: float=0.07559055) -> etree.ElementTree: """Modifies a svg file so that all black paths become laser cutting paths. Args: svg_path: a file path to the svg file to modify and overwrite. stroke_color: the color, as a hex code, to set paths to. stroke_width: the stroke width, in pixels (at 96 pixels per inch), to set paths to. Returns: The modified XML tree. """ xml = etree.parse(svg_path) svg = xml.getroot() paths = svg.findall('.//{http://www.w3.org/2000/svg}path' '[@style="stroke:#000000;fill:none"]') for path in paths: path.set('style', ( 'fill:none;stroke:{};stroke-opacity:1;stroke-width:{};' 'stroke-miterlimit:4;stroke-dasharray:none' ).format(stroke_color, stroke_width)) return xml
6387625cd71143edb632833cd40006858f239089
3,645,010
import json def preview_pipeline( pipeline: Pipeline, domain_retriever: DomainRetriever, limit: int = 50, offset: int = 0 ) -> str: """ Execute a pipeline but returns only a slice of the results, determined by `limit` and `offset` parameters, as JSON. Return format follows the 'table' JSON table schema used by pandas (see https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#orient-options), with a few addition related to pagination. Note: it's required to use pandas `to_json` methods, as it convert NaN and dates to an appropriate format. """ df, _ = execute_pipeline(pipeline, domain_retriever) return json.dumps( { 'schema': build_table_schema(df, index=False), 'offset': offset, 'limit': limit, 'total': df.shape[0], 'data': json.loads(df[offset : offset + limit].to_json(orient='records')), } )
f1e640ec73bbdeb978762774c81d80e00e98adc9
3,645,011
def redirect_to_url(url): """ Return a bcm dictionary with a command to redirect to 'url' """ return {'mode': 'redirect', 'url': url}
01e4deb80bbd8f8e119c99d64001866c6cd644d9
3,645,012
def get_xml_tagged_data(buffer, include_refstr=True): """ figure out what format file it is and call the respective function to return data for training :param buffer: :param include_refstr: during training do not need refstr :return: """ if len(buffer) > 1 and 'http://www.elsevier.com/xml/document' in buffer[1]: return get_elsevier_tagged_data(REGEX_XML_TAG_FORMAT.sub('', ' '.join(buffer))) if len(buffer) > 1 and 'ADSBIBCODE' in buffer[0] and 'citation_list' in buffer[1]: buffer = '<?xml version="1.0"?>' + ' '.join(buffer[1:]) return get_crossref_tagged_data(buffer, include_refstr) if len(buffer) > 1 and 'ADSBIBCODE' in buffer[0] and 'Citation ID' in buffer[1]: selected_buffer = ['<?xml version="1.0"?>', '<CitationList>'] for line in buffer: line = line.strip() if line.startswith('<Citation ID='): selected_buffer.append(line) selected_buffer.append('</CitationList>') return get_springer_tagged_data('\n'.join(selected_buffer), include_refstr) return None
e51cdde728d3e4c5c1f4936bfeb874be8aabf292
3,645,013
from typing import Dict from typing import Any def de_dup( data: pd.DataFrame, drop_duplicates_kwargs: Dict[str, Any] = {}, ) -> pd.DataFrame: """Drop duplicate rows """ return data.drop_duplicates(**drop_duplicates_kwargs)
2cd78627226170af7e184bc60caa5ee39290ab42
3,645,014
def get_task_defs(workspace: str, num_validators: int, num_fullnodes: int) -> dict: """ Builds a dictionary of: family -> current_task_def task_def can be used to get the following when updating a service: - containerDefinitions - volumes - placementConstraints NOTE: only possible to get the current running list of tasks, so tf apply needed after need a way to restore to steady state if an ecs update makes node boot loop """ ret = {} print("Fetching ECS tasks") def_fams = [] def_fams += [f"{workspace}-validator-{i}" for i in range(num_validators)] def_fams += [f"{workspace}-fullnode-{i}" for i in range(num_fullnodes)] for fam in def_fams: print(f"Fetching task definition for {fam}") task_def = execute_cmd_with_json_output( ["aws", "ecs", "describe-task-definition", "--task-definition", fam,], err=f"could not get task definition for {fam}", ) key = task_def.get("taskDefinition").get("family") ret[key] = task_def.get("taskDefinition") # put the tags separately tags = task_def.get("tags") ret[key]["tags"] = tags if tags else [] print() return ret
99c131ac60999dd1b2b657575e40f2b813633f61
3,645,015
from azure.mgmt.marketplaceordering.models import OfferType def get_terms(cmd, urn=None, publisher=None, offer=None, plan=None): """ Get the details of Azure Marketplace image terms. :param cmd:cmd :param urn:URN, in the format of 'publisher:offer:sku:version'. If specified, other argument values can be omitted :param publisher:Image publisher :param offer:Image offer :param plan:Image billing plan :return: """ publisher, offer, plan = _terms_prepare(cmd, urn, publisher, offer, plan) op = cf_vm_image_term(cmd.cli_ctx, '') terms = op.get(offer_type=OfferType.VIRTUALMACHINE, publisher_id=publisher, offer_id=offer, plan_id=plan) return terms
7483e6d150b784535dd47ead0b63d079e66c391d
3,645,016
def encode_payload( result ): """JSON encodes a dictionary, named tuple, or object for sending to the server """ try: return tornado.escape.json_encode( result ) except TypeError: if type( result ) is list: return [ tornado.escape.json_encode( r ) for r in result ] d = { k: getattr( result, k ) for k in result.__dict__ } return tornado.escape.json_encode( d )
551264dcb0b9ad6d380b8ae393b2dfbfafb93dee
3,645,017
def relu(shape) -> np.ndarray: """ Creates a gaussian distribution numpy array with a mean of 0 and variance of sqrt(2/m). Arguments: shape : tuple : A tuple with 2 numbers, specifying size of the numpy array. Returns: output : np.ndarray : A uniform numpy array. """ return np.random.normal(0, np.sqrt(2/shape[1]), shape)
e41835dc5e6de8f0b6161c8ea73afeffd8f84c31
3,645,018
def reset_all(): """Batch reset of batch records.""" _url = request.args.get("url") or request.referrer task_id = request.form.get("task_id") task = Task.get(task_id) try: count = utils.reset_all_records(task) except Exception as ex: flash(f"Failed to reset the selected records: {ex}") else: flash(f"{count} {task.task_type.name} records were reset for batch processing.", "info") return redirect(_url)
a4241ce81531db7d18d226327f6a96b600a50fd0
3,645,019
def update_to_report(db, data, section_name, img_path,id): """ Update data of report """ query = '''UPDATE report SET data = "{}" , section_name = "{}", image_path = "{}" WHERE id = "{}" '''.format(data, section_name, img_path, id) result = get_db_with_actions(db, query) return(result)
995d17b364bcb6a0b338b1b0323b0fd1f7692f25
3,645,020
from datetime import datetime def keyboard(table, day=None): """Handler for showing the keyboard statistics page.""" cols, group = "realkey AS key, COUNT(*) AS count", "realkey" where = (("day", day),) if day else () counts_display = counts = db.fetch(table, cols, where, group, "count DESC") if "combos" == table: counts_display = db.fetch(table, "key, COUNT(*) AS count", where, "key", "count DESC") events = db.fetch(table, where=where, order="stamp") for e in events: e["dt"] = datetime.datetime.fromtimestamp(e["stamp"]) stats, collatedevents = stats_keyboard(events, table) days, input = db.fetch("counts", order="day", type=table), "keyboard" return bottle.template("heatmap.tpl", locals(), conf=conf)
d3053f06b85d6606353b71a76096558d760e5a8e
3,645,021
import base64 import logging def fetch(uri, username='', password=''): """Can fetch with Basic Authentication""" headers = {} if username and password: headers['Authorization'] = 'Basic ' + base64.b64encode('%s:%s' % (username, password)) headers['User-Agent'] = 'Twimonial' f = urlfetch.fetch(uri, headers=headers) logging.debug('Fetching %s (%s): %d' % (uri, username, f.status_code)) return f
e28e96c049d7b4844ca23bd8943b00b29ede3419
3,645,022
def tsCrossValidationScore(params, series,loss_function=mean_squared_error, nsplits=3, slen=1): """ #Parameters: params : vector of parameters for optimization (three parameters: alpha, beta, gamma for example series : dataset with timeseries sle: Returns: error on CrossValidation """ # errors array errors = [] values = series.values alpha, beta, gamma = params # set the number of folds for cross-validation tscv = TimeSeriesSplit(n_splits=nsplits) # iterating over folds, train model on each, forecast and calculate error for train, test in tscv.split(values): model = HoltWinters(series=values[train], slen=slen, alpha=alpha, beta=beta, gamma=gamma, n_preds=len(test)) model.triple_exponential_smoothing() predictions = model.result[-len(test):] actual = values[test] error = loss_function(predictions, actual) errors.append(error) return np.mean(np.array(errors))
f3278ad55a423f6df0b108636c88151a72451cfa
3,645,023
from typing import Any def get_psf_fwhm(psf_template: np.ndarray) -> float: """ Fit a symmetric 2D Gaussian to the given ``psf_template`` to estimate the full width half maximum (FWHM) of the central "blob". Args: psf_template: A 2D numpy array containing the unsaturated PSF template. Returns: The FWHM of the PSF template (in pixels). """ # Crop PSF template: too large templates (which are mostly zeros) can # cause problems when fitting them with a 2D Gauss function psf_cropped = np.copy(psf_template) if psf_template.shape[0] >= 33 or psf_template.shape[1] >= 33: psf_cropped = crop_center(psf_cropped, (33, 33)) # Define the grid for the fit x, y = np.meshgrid( np.arange(psf_cropped.shape[0]), np.arange(psf_cropped.shape[1]) ) # Create a new Gaussian2D object center = get_center(psf_cropped.shape) gaussian = models.Gaussian2D(x_mean=center[0], y_mean=center[1]) # Define auxiliary function for tieing the standard deviations def tie_stddev(gaussian: Any) -> Any: return gaussian.y_stddev # Enforce symmetry: tie standard deviation parameters to same value to # ensure that the resulting 2D Gaussian is always circular gaussian.x_stddev.tied = tie_stddev # Fix the position (= mean) of the 2D Gaussian gaussian.x_mean.fixed = True gaussian.y_mean.fixed = True # Fit the model to the data fit_p = fitting.LevMarLSQFitter() gaussian_model = fit_p(gaussian, x, y, np.nan_to_num(psf_cropped)) # Make sure the returned FWHM is positive return abs(float(gaussian_model.x_fwhm))
1982d89eb5f952d10336003e4d580fda3ab210b7
3,645,024
def sqrt(number): """ Calculate the floored square root of a number Args: number(int): Number to find the floored squared root Returns: (int): Floored Square Root """ assert number >= 0, 'Only square root of positive numbers are valid' start = 0 end = number res = None while start <= end: middle = (start + end) // 2 square = middle ** 2 next_square = (middle + 1) ** 2 if square <= number and next_square > number: res = middle break if square > number: end = middle - 1 else: start = middle + 1 return res
7ed4d547e0dbabebff7ffdf1e368817a415cbb9e
3,645,025
def repetition_sigmoid(M): """ Used to model repetition-driven effects of STDP. More repetitions results in stronger increase/decrease. """ return 1.0/(1+np.exp(-0.2*M+10))
5cfae40b12f2ff871b60f9cad13308b8bb9189b9
3,645,026
def generate_features(ids0, ids1, forcefield, system, param): """ This function performs a minimization of the energy and computes the matrix features. :param ids0: ids of the atoms for the 1st protein :param ids1: ids of the atoms for the 2nd protein :param forcefield: forcefield for OpenMM simulation :param system: system for OpenMM simulation :param param: OpenMM parameters :return: features - to be used by ML models """ # sources # https://en.wikipedia.org/wiki/Electrostatics # https://en.wikipedia.org/wiki/Lennard-Jones_potential # https://en.wikipedia.org/wiki/Combining_rules # constants eps0 = 8.8541878128e-12 * su.farad * su.meter**-1 e = 1.60217662e-19 * su.coulomb N = 6.02214179e23 * su.mole**-1 # Avogadro # scaling factors k0 = (N * (e*e) / (4.0 * np.pi * eps0)) # get nonbonded interactions parameters for all atoms # (Lennard-Jones and electrostatics) epsilon = np.array([a.epsilon for a in param.atoms]) sigma = np.array([a.sigma for a in param.atoms]) charge = np.array([a.charge for a in param.atoms]) # pairwise epsilon with units E = np.sqrt(epsilon[ids0].reshape(-1, 1) * epsilon[ids1].reshape(1, -1)) * param.atoms[0].uepsilon.unit # pairwise sigma with units S = 0.5 * (sigma[ids0].reshape(-1, 1) + sigma[ids1].reshape(1, -1)) * param.atoms[0].usigma.unit # pairwise partial charges Q = charge[ids0].reshape(-1, 1) * charge[ids1].reshape(1, -1) # setup MD engine integrator = so.LangevinIntegrator(300*su.kelvin, 1/su.picosecond, 0.002*su.picoseconds) try: platform = so.Platform.getPlatformByName('CUDA') except Exception e: platform = so.Platform.getPlatformByName('CPU') simulation = so.app.Simulation(param.topology, system, integrator, platform) # set atom coordinates simulation.context.setPositions(param.get_coordinates()[0] * su.angstrom) # minimize energy simulation.minimizeEnergy() # get atom coordinates and compute distance matrix between subunits state = simulation.context.getState(getPositions=True) xyz = state.getPositions(asNumpy=True) D = np.linalg.norm(np.expand_dims( xyz[ids0], 1) - np.expand_dims(xyz[ids1], 0), axis=2) * su.angstrom # To choose the most relevant residues, we will first choose the pair of atoms with the lowest distance, and then # extract a submatrix around it. This way we preserve the chain order of the distance matrix. min_i = np.argmin(D) min_r, min_c = int(min_i/D.shape[1]), min_i % D.shape[1] # Number of interacting residues/particles considered relevant to be stored in the features n_interactions = 256 ids0_min, ids0_max = min_r - n_interactions/2, min_r + n_interactions/2 ids1_min, ids1_max = min_c - n_interactions/2, min_c + n_interactions/2 if ids0_min < 0: ids0_max -= ids0_min ids0_min = 0 elif ids0_max >= D.shape[0]: ids0_min -= ids0_max - D.shape[0] + 1 ids0_max = D.shape[0]-1 if ids1_min < 0: ids1_max -= ids1_min ids1_min = 0 elif ids1_max >= D.shape[1]: ids1_min -= ids1_max - D.shape[1] + 1 ids1_max = D.shape[1]-1 ids0_interacting = np.arange(ids0_min, ids0_max, dtype=np.int32) ids1_interacting = np.arange(ids1_min, ids1_max, dtype=np.int32) D = D[np.ix_(ids0_interacting, ids1_interacting)] S = S[np.ix_(ids0_interacting, ids1_interacting)] Q = Q[np.ix_(ids0_interacting, ids1_interacting)] E = E[np.ix_(ids0_interacting, ids1_interacting)] # compute nonbonded potential energies U_LJ = (4.0 * E * (np.power(S/D, 12) - np.power(S/D, 6))).value_in_unit(su.kilojoule / su.mole) U_el = (k0 * Q / D).value_in_unit(su.kilojoule / su.mole) features = {'U_LJ': U_LJ, 'U_el': U_el, 'D_mat': D} return features
86fbc0fa67ea5a14b38af4c1fc37566b066b50a3
3,645,027
def extract_bow_feature_vectors(reviews, dictionary): """ Inputs a list of string reviews Inputs the dictionary of words as given by bag_of_words Returns the bag-of-words feature matrix representation of the data. The returned matrix is of shape (n, m), where n is the number of reviews and m the total number of entries in the dictionary. Feel free to change this code as guided by Problem 9 """ num_reviews = len(reviews) feature_matrix = np.zeros([num_reviews, len(dictionary)]) stopwords = np.loadtxt('stopwords.txt', dtype = str) for i, text in enumerate(reviews): word_list = extract_words(text) for word in stopwords: word_list = list(filter((word).__ne__, word_list)) word_list_pairs = [(word1, word2) for (word1, word2) in zip(word_list[:-1], word_list[1:])] for (word1, word2) in word_list_pairs: if (word1, word2) in dictionary: # This code gets a non-binary indicator feature_matrix[i, dictionary[(word1, word2)]] += 1 return feature_matrix
3373179024127ab0202ab50f3e20184d02ed016c
3,645,028
from astropy.io.fits import Header from scipy.optimize import least_squares def model_wcs_header(datamodel, get_sip=False, order=4, step=32): """ Make a header with approximate WCS for use in DS9. Parameters ---------- datamodel : `jwst.datamodels.ImageModel` Image model with full `~gwcs` in `with_wcs.meta.wcs`. get_sip : bool If True, fit a `astropy.modeling.models.SIP` distortion model to the image WCS. order : int Order of the SIP polynomial model. step : int For fitting the SIP model, generate a grid of detector pixels every `step` pixels in both axes for passing through `datamodel.meta.wcs.forward_transform`. Returns ------- header : '~astropy.io.fits.Header` Header with simple WCS definition: CD rotation but no distortion. """ sh = datamodel.data.shape try: pipe = datamodel.meta.wcs.pipeline[0][1] if 'offset_2' in pipe.param_names: # NIRISS WCS c_x = pipe.offset_2.value c_y = pipe.offset_3.value else: # Simple WCS c_x = pipe.offset_0.value c_y = pipe.offset_1.value crpix = np.array([-c_x+1, -c_y+1]) except: crpix = np.array(sh)/2.+0.5 crval = datamodel.meta.wcs.forward_transform(crpix[0], crpix[1]) cdx = datamodel.meta.wcs.forward_transform(crpix[0]+1, crpix[1]) cdy = datamodel.meta.wcs.forward_transform(crpix[0], crpix[1]+1) header = Header() header['RADESYS'] = 'ICRS' header['CTYPE1'] = 'RA---TAN' header['CTYPE2'] = 'DEC--TAN' header['CUNIT1'] = header['CUNIT2'] = 'deg' header['CRPIX1'] = crpix[0] header['CRPIX2'] = crpix[1] header['CRVAL1'] = crval[0] header['CRVAL2'] = crval[1] cosd = np.cos(crval[1]/180*np.pi) header['CD1_1'] = (cdx[0]-crval[0])*cosd header['CD1_2'] = (cdy[0]-crval[0])*cosd header['CD2_1'] = cdx[1]-crval[1] header['CD2_2'] = cdy[1]-crval[1] cd = np.array([[header['CD1_1'], header['CD1_2']], [header['CD2_1'], header['CD2_2']]]) if not get_sip: return header #### Fit a SIP header to the gwcs transformed coordinates v, u = np.meshgrid(np.arange(1,sh[0]+1,step), np.arange(1,sh[1]+1,step)) x, y = datamodel.meta.wcs.forward_transform(u, v) y -= crval[1] x = (x-crval[0])*np.cos(crval[1]/180*np.pi) a_names = [] b_names = [] #order = 4 for i in range(order+1): for j in range(order+1): ext = '{0}_{1}'.format(i,j) if (i+j) > order: continue if ext in ['0_0', '0_1','1_0']: continue a_names.append('A_'+ext) b_names.append('B_'+ext) p0 = np.zeros(4+len(a_names)+len(b_names)) p0[:4] += cd.flatten() args = (u.flatten(), v.flatten(), x.flatten(), y.flatten(), crpix, a_names, b_names, cd, 0) # Fit the SIP coeffs fit = least_squares(_objective_sip, p0, jac='2-point', bounds=(-np.inf, np.inf), method='lm', ftol=1e-08, xtol=1e-08, gtol=1e-08, x_scale=1.0, loss='linear', f_scale=1.0, diff_step=None, tr_solver=None, tr_options={}, jac_sparsity=None, max_nfev=1000, verbose=0, args=args, kwargs={}) # Get the results args = (u.flatten(), v.flatten(), x.flatten(), y.flatten(), crpix, a_names, b_names, cd, 1) cd_fit, a_coeff, b_coeff = _objective_sip(fit.x, *args) # Put in the header for i in range(2): for j in range(2): header['CD{0}_{1}'.format(i+1, j+1)] = cd_fit[i,j] header['CTYPE1'] = 'RA---TAN-SIP' header['CTYPE2'] = 'DEC--TAN-SIP' header['A_ORDER'] = order for k in a_coeff: header[k] = a_coeff[k] header['B_ORDER'] = order for k in b_coeff: header[k] = b_coeff[k] return header
1b43d5382b92f7da47d72dcf5acaaca65c6329df
3,645,029
def create_session(): """Return a session to be used for database connections Returns: Session: SQLAlchemy session object """ # Produces integrity errors! # return _Session() # db.session is managed by Flask-SQLAlchemy and bound to a request return db.session
8c7dbc2ee1db64cfbbb3466704a7e4f70ef073be
3,645,030
def meta_to_indexes(meta, table_name=None, model_name=None): """Find all the indexes (primary keys) based on the meta data """ indexes, pk_field = {}, None indexes = [] for meta_model_name, model_meta in meta.iteritems(): if (table_name or model_name) and not (table_name == model_meta['Meta'].get('db_table', '') or model_name == meta_model_name): continue field_name, field_infodict, score = find_index(model_meta) indexes.append(('%s.%s' % (meta_model_name, field_name), field_infodict, score)) return indexes
12c12055f424680a68d81d5466dc6d3515d797a5
3,645,031
def index(): """Render and return the index page. This is a informational landing page for non-logged-in users, and the corp homepage for those who are logged in. """ success, _ = try_func(auth.is_authenticated) if success: module = config.get("modules.home") if module: return config.modules[module].home() return render_template("default_home.mako") return render_template("landing.mako")
9ebc7a98ee60a59a5bed6ec5a726c5c1a5a11ca7
3,645,032
def _(origin, category="", default=None): """ This function returns the localized string. """ return LOCALIZED_STRINGS_HANDLER.translate(origin, category, default)
58f1d7033b1689068bf1bcb532eea78e8bf51250
3,645,033
from datetime import datetime def next_month(month: datetime) -> datetime: """Find the first day of the next month given a datetime. :param month: the date :type month: datetime :return: The first day of the next month. :rtype: datetime """ dt = this_month(month) return datetime((dt+_A_MONTH).year, (dt+_A_MONTH).month, 1)
1d5cb70fa7b3d98689e3dd967aa95deb29f5de45
3,645,034
from typing import Mapping from typing import OrderedDict def walk_json(d, func): """ Walk over a parsed JSON nested structure `d`, apply `func` to each leaf element and replace it with result """ if isinstance(d, Mapping): return OrderedDict((k, walk_json(v, func)) for k, v in d.items()) elif isinstance(d, list): return [walk_json(v, func) for v in d] else: return func(d)
cc977f4cf3eaec03bd591fa4cd1e44ab5717caee
3,645,035
def getUser(): """This method will be called if a GET request is made to the /user/ route It will get the details of a specified user Parameters ---------- username the name of the user to get info about Raises ------ DoesNotExist Raised if the username provided does not match a user in the database Returns ------- String: Either the json of the user, or an error message saying the user doesn't exists """ try: user = User.objects(username=request.args["username"]).get() return str(user.json()) except DoesNotExist: return "That user does not exist, please try again :)"
edc8570b83e4a173ac8028f2d8b51e93a19b27a1
3,645,036
import types def _create_ppo_agent( time_step_spec: types.NestedTensorSpec, action_spec: types.NestedTensorSpec, preprocessing_layers: types.NestedLayer, policy_network: types.Network) -> tfa.agents.TFAgent: """Creates a ppo_agent.""" actor_network = policy_network( time_step_spec.observation, action_spec, preprocessing_layers=preprocessing_layers, name='ActorDistributionNetwork') critic_network = constant_value_network.ConstantValueNetwork( time_step_spec.observation, name='ConstantValueNetwork') return ppo_agent.PPOAgent( time_step_spec, action_spec, actor_net=actor_network, value_net=critic_network)
931e7078bdf634187ac0a506decb2d651373fbab
3,645,037
def stiffness_matrix_CST(element=tetra_4()): """Calculate stiffness matrix for linear elasticity""" element.volume() B = strain_matrix_CST(element) D = material() print('B') print(B) print('V',element.V) return element.V * np.dot(np.dot(np.transpose(B),D),B)
5c5c443ab1007997848357d698fcce91f069a13f
3,645,038
async def can_action_member(bot, ctx: SlashContext, member: discord.Member) -> bool: """ Stop mods from doing stupid things. """ # Stop mods from actioning on the bot. if member.id == bot.user.id: return False # Stop mods from actioning one another, people higher ranked than them or themselves. if member.top_role >= ctx.author.top_role: role_muted = discord.utils.get(member.guild.roles, id=settings.get_value("role_muted")) role_restricted = discord.utils.get(member.guild.roles, id=settings.get_value("role_restricted")) # Enable mods to use /unmute and /unrestrict on others since the role "Muted" and "Restricted" is placed higher than "Staff". if role_muted in member.roles or role_restricted in member.roles: return True return False # Checking if Bot is able to even perform the action if member.top_role >= member.guild.me.top_role: return False # Allow owner to override all limitations. if member.id == ctx.guild.owner_id: return True # Otherwise, the action is probably valid, return true. return True
c3fa4eee66ec80df2c4f91cfee181d900f0b8c45
3,645,039
import types def trim_waveform_signal( tr: obspy.Trace, cfg: types.ModuleType = config ) -> obspy.Trace: """Cut the time series to signal window Args: tr: time series cfg: configuration file Returns: tr: trimmed time series """ starttime, endtime = signal_window(tr, cfg) tr.trim(starttime=starttime, endtime=endtime) return tr
bce3a0f88903b7c62c287f6e40bb7d377215a45d
3,645,040
def animation_plot( x, y, z_data, element_table, ani_fname, existing_fig, ani_funcargs=None, ani_saveargs=None, kwargs=None, ): """ Tricontourf animation plot. Resulting file will be saved to MP4 """ global tf # Subtract 1 from element table to align with Python indexing t = tri.Triangulation(x, y, element_table - 1) # Preassign fig and ax if existing_fig is None: fig, ax, tf = filled_mesh_plot(x, y, z_data[:, 0], element_table, **kwargs) else: fig, ax, tf = existing_fig # animation function def animate(i): global tf z = z_data[:, i] for c in tf.collections: c.remove() # removes only the contours, leaves the rest intact tf = ax.tricontourf(t, z, **kwargs) anim = animation.FuncAnimation(fig, animate, frames=z_data.shape[1], repeat=True,) anim.save(ani_fname, writer=animation.FFMpegWriter(**ani_funcargs), **ani_saveargs) return fig, ax, tf
e9ef60c6240900de6fd082ff5933dbbbc471933a
3,645,041
def preprocess_adj(adj): """Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation.""" # adj_appr = np.array(sp.csr_matrix.todense(adj)) # # adj_appr = dense_lanczos(adj_appr, 100) # adj_appr = dense_RandomSVD(adj_appr, 100) # if adj_appr.sum(1).min()<0: # adj_appr = adj_appr- (adj_appr.sum(1).min()-0.5)*sp.eye(adj_appr.shape[0]) # else: # adj_appr = adj_appr + sp.eye(adj_appr.shape[0]) # adj_normalized = normalize_adj(adj_appr) # adj_normalized = normalize_adj(adj+sp.eye(adj.shape[0])) # adj_appr = np.array(sp.coo_matrix.todense(adj_normalized)) # # adj_normalized = dense_RandomSVD(adj_appr,100) # adj_normalized = dense_lanczos(adj_appr, 100) adj_normalized = normalize_adj(sp.eye(adj.shape[0]) + adj) # adj_normalized = sp.eye(adj.shape[0]) + normalize_adj(adj) return sparse_to_tuple(adj_normalized)
82674be0b9573c24135e56f2a5e3988fbb0966e1
3,645,042
def plot_figure_one_input_resource_2(style_label=""): """ Plot two bar graphs side by side, with letters as x-tick labels. latency_dev_num_non_reuse.log """ prng = np.random.RandomState(96917002) #plt.set_cmap('Greys') #plt.rcParams['image.cmap']='Greys' # Tweak the figure size to be better suited for a row of numerous plots: # double the width and halve the height. NB: use relative changes because # some styles may have a figure size different from the default one. (fig_width, fig_height) = plt.rcParams['figure.figsize'] fig_size = [fig_width * 1.8, fig_height / 2] fig, ax = plt.subplots(ncols=1, nrows=1, num=style_label, figsize=fig_size, squeeze=True) plt.set_cmap('Greys') ax.set_ylabel("Latency (s)", fontsize=larger_size) ax.set_xlabel("Number of devices", fontsize=larger_size) grid = "3x3" config = ["MR-BODPv2", "WST-FGP-R"] #np.array(load_data_vector(config[0]+"/" + grid + "/single_resource/commu_size_steal.log"))+ #np.array(load_data_vector(config[0] + "/" + grid + "/single_resource/commu_size_gateway.log")) y1 = load_data_vector(config[0] + "/single_resource/latency.log") y2 = load_data_vector(config[1] + "/" + grid + "/single_resource/latency.log") x = np.arange(len(y1)) print x width = 0.2 latency1 = ax.bar(x-0.5*width, y1, width, label='MR-BODP', color=[0.4, 0.4, 0.4], edgecolor =[0, 0, 0]) latency2 = ax.bar(x+0.5*width, y2, width, label='WST-FGP (Shuffle)', color=[0.8, 0.8, 0.8], edgecolor =[0, 0, 0], hatch='//') ax.set_xticks(x) ax.set_xticklabels(['1','2','3','4','5','6']) ax.set_xlim([-0.5,len(x)-0.3]) ax.set_ylim([0, 30]) plt.tick_params(labelsize=larger_size) y1 = np.array(load_data_vector(config[0]+ "/single_resource/commu_size.log")) y2 = np.array(load_data_vector(config[1]+"/" + grid + "/single_resource/commu_size_steal.log"))+np.array(load_data_vector(config[1] + "/" + grid + "/single_resource/commu_size_gateway.log")) ax2 = ax.twinx() comm1 = ax2.plot(x-width, y1, label='MR-BODP', linestyle='-.', linewidth=4, color=[0.4, 0.4, 0.4], marker="s", markersize=16) comm2 = ax2.plot(x+width, y2, label='WST-FGP (Shuffle)', linestyle='-.', linewidth=4, color=[0.8, 0.8, 0.8], marker="<", markersize=16) ax2.set_ylabel("Commu. size (MB)", fontsize=larger_size) ax2.set_xticklabels(['1','2','3','4','5','6']) ax2.set_ylim([-30, 25]) ax2.set_yticks([0, 10, 20]) plt.tick_params(labelsize=larger_size) #plt.legend(loc=9, ncol=4, bbox_to_anchor=(0.5, 1.16), framealpha=1, prop={'size': larger_size}) plt.figlegend((latency1[0], comm1[0], latency2[0], comm2[0]), ('MR-BODP',' ', 'WST-FGP ('+grid+' Shuffle)', ' '), loc=9, ncol=2, bbox_to_anchor=(0.5, 1), framealpha=1, prop={'size': larger_size}) #fig.tight_layout() return fig
69c25829adf9ef3d5dc818621bb724bb92c29f31
3,645,043
def comp_rot_dir(self): """Compute the rotation direction of the winding Parameters ---------- self : LamSlotWind A LamSlotWind object Returns ------- rot_dir : int -1 or +1 """ MMF = self.comp_mmf_unit() p = self.get_pole_pair_number() # Compute rotation direction from unit mmf results = MMF.get_harmonics(1, "freqs", "wavenumber") H1 = results[MMF.symbol] return sign(H1[0])
524fdfd195a70bcd07b89e594a90e624ec4db4ea
3,645,044
def search_pk(uuid): """uuid can be pk.""" IterHarmonicApprox = WorkflowFactory("phonopy.iter_ha") qb = QueryBuilder() qb.append(IterHarmonicApprox, tag="iter_ha", filters={"uuid": {"==": uuid}}) PhonopyWorkChain = WorkflowFactory("phonopy.phonopy") qb.append(PhonopyWorkChain, with_incoming="iter_ha") qb.order_by({PhonopyWorkChain: {"ctime": "asc"}}) pks = [n[0].pk for n in qb.all() if n[0].is_finished_ok] return pks
d1a8d9d32e6d3272d49163ee8abf74363a520c8d
3,645,045
def _region_bulk(mode='full', scale=.6): """ Estimate of the temperature dependence of bulk viscosity zeta/s. """ plt.figure(figsize=(scale*textwidth, scale*aspect*textwidth)) ax = plt.axes() def zetas(T, zetas_max=0, zetas_width=1): return zetas_max / (1 + ((T - Tc)/zetas_width)**2) chain = mcmc.Chain() keys, ranges = map(list, zip(*( i for i in zip(chain.keys, chain.range) if i[0].startswith('zetas') ))) T = Tc*np.linspace(.5, 1.5, 1000) maxdict = {k: r[1] for k, r in zip(keys, ranges)} ax.fill_between( T, zetas(T, **maxdict), label='Prior range', **region_style ) ax.set_xlim(T[0], T[-1]) ax.set_ylim(0, 1.05*maxdict['zetas_max']) auto_ticks(ax, minor=2) ax.set_xlabel('Temperature [GeV]') ax.set_ylabel(r'$\zeta/s$') if mode == 'empty': return if mode == 'examples': for args in [ (.025, .01), (.050, .03), (.075, .05), ]: ax.plot(T, zetas(T, *args), color=plt.cm.Blues(.7)) return # use a Gaussian mixture model to classify zeta/s parameters samples = chain.load(*keys, thin=10) gmm = GaussianMixture(n_components=3, covariance_type='full').fit(samples) labels = gmm.predict(samples) for n in range(gmm.n_components): params = dict(zip( keys, (mcmc.credible_interval(s)[1] for s in samples[labels == n].T) )) if params['zetas_max'] > .05: cmap = 'Blues' elif params['zetas_width'] > .03: cmap = 'Greens' else: cmap = 'Oranges' curve = zetas(T, **params) color = getattr(plt.cm, cmap)(.65) ax.plot(T, curve, color=color, zorder=-10) ax.fill_between(T, curve, color=color, alpha=.1, zorder=-20) ax.legend(loc='upper left')
9a76f28b47a5e8c1ce45a9ea0dcaef723032bcce
3,645,046
def bias(struct,subover=True,trim=True, subbias=False, bstruct=None, median=False, function='polynomial',order=3,rej_lo=3,rej_hi=3,niter=10, plotover=False, log=None, verbose=True): """Bias subtracts the bias levels from a frame. It will fit and subtract the overscan region, trim the images, and subtract a master bias if required. struct--image structure subover--subtract the overscan region trim--trim the image subbias--subtract master bias bstruct--master bias image structure median--use the median instead of mean in image statistics function--form to fit to the overscan region order--order for the function rej_lo--sigma of low points to reject in the fit rej_hi--sigma of high points to reject in the fit niter--number of iterations log--saltio log for recording information verbose--whether to print to stdout """ infile=saltkey.getimagename(struct[0]) # how many extensions? nsciext = saltkey.get('NSCIEXT',struct[0]) nextend = saltkey.get('NEXTEND',struct[0]) nccd = saltkey.get('NCCDS',struct[0]) # how many amplifiers?--this is hard wired amplifiers = 2 * nccd #log the process if subover and log: message = '%28s %7s %5s %4s %6s' % \ ('HDU','Overscan','Order','RMS','Niter') log.message('\n --------------------------------------------------', with_header=False, with_stdout=verbose) log.message(message, with_header=False, with_stdout=verbose) log.message(' --------------------------------------------------', with_header=False, with_stdout=verbose) if (plotover): plt.figure(1) plt.axes([0.1,0.1,0.8,0.8]) plt.xlabel('CCD Column') plt.ylabel('Pixel Counts (e-)') plt.ion() #loop through the extensions and subtract the bias for i in range(1,nsciext+1): if struct[i].name=='SCI': #get the bias section biassec = saltkey.get('BIASSEC',struct[i]) y1,y2,x1,x2 = saltio.getSection(biassec, iraf_format=True) #get the data section datasec = saltkey.get('DATASEC',struct[i]) dy1,dy2, dx1, dx2 = saltio.getSection(datasec, iraf_format=True) #setup the overscan region if subover: yarr=np.arange(y1,y2, dtype=float) data=struct[i].data odata=struct[i].data[y1:y2,x1:x2] if median: odata=np.median((struct[i].data[y1:y2,x1:x2]),axis=1) olevel=np.median((struct[i].data[y1:y2,x1:x2])) saltkey.new('OVERSCAN','%f' % (olevel),'Overscan median value', struct[i]) else: odata=np.mean((struct[i].data[y1:y2,x1:x2]),axis=1) olevel=np.mean((struct[i].data[y1:y2,x1:x2])) saltkey.new('OVERSCAN','%f' % (olevel),'Overscan mean value', struct[i]) #fit the overscan region ifit=saltfit.interfit(yarr, odata, function=function, \ order=order, thresh=rej_hi, niter=niter) try: ifit.interfit() coeffs=ifit.coef ofit=ifit(yarr) omean, omed, osigma=saltstat.iterstat((odata-ofit), sig=3, niter=5) except ValueError: #catch the error if it is a zero array ofit=np.array(yarr)*0.0 osigma=0.0 except TypeError: #catch the error if it is a zero array ofit=np.array(yarr)*0.0 osigma=0.0 #subtract the overscan region for j in range(len(struct[i].data[0])): struct[i].data[y1:y2,j] -= ofit #report the information if log: message = '%25s[%1d] %8.2f %3d %7.2f %3d' % \ (infile, i, olevel, order, osigma, niter) log.message(message, with_stdout=verbose, with_header=False) #add the statistics to the image header saltkey.new('OVERRMS','%f' % (osigma),'Overscan RMS value', struct[i]) #update the variance frame if saltkey.found('VAREXT', struct[i]): vhdu=saltkey.get('VAREXT', struct[i]) try: vdata=struct[vhdu].data #The bias level should not be included in the noise from the signal for j in range(len(struct[i].data[0])): vdata[y1:y2,j] -= ofit #add a bit to make sure that the minimum error is the rednoise rdnoise= saltkey.get('RDNOISE',struct[i]) vdata[vdata<rdnoise**2]=rdnoise**2 struct[vhdu].data=vdata+osigma**2 except Exception as e: msg='Cannot update the variance frame in %s[%i] because %s' % (infile, vhdu, e) raise SaltError(msg) #plot the overscan region if plotover: plt.plot(yarr, odata) plt.plot(yarr, ofit) #trim the data and update the headers if trim: struct[i].data=struct[i].data[dy1:dy2,dx1:dx2] datasec = '[1:'+str(dx2-dx1)+',1:'+str(dy2-dy1)+']' saltkey.put('DATASEC',datasec,struct[i]) #update the variance frame if saltkey.found('VAREXT', struct[i]): vhdu=saltkey.get('VAREXT', struct[i]) struct[vhdu].data=struct[vhdu].data[dy1:dy2,dx1:dx2] datasec = '[1:'+str(dx2-dx1)+',1:'+str(dy2-dy1)+']' saltkey.put('DATASEC',datasec,struct[vhdu]) #update the BPM frame if saltkey.found('BPMEXT', struct[i]): bhdu=saltkey.get('BPMEXT', struct[i]) struct[bhdu].data=struct[bhdu].data[dy1:dy2,dx1:dx2] datasec = '[1:'+str(dx2-dx1)+',1:'+str(dy2-dy1)+']' saltkey.put('DATASEC',datasec,struct[bhdu]) #subtract the master bias if necessary if subbias and bstruct: struct[i].data -= bstruct[i].data #update the variance frame if saltkey.found('VAREXT', struct[i]): vhdu=saltkey.get('VAREXT', struct[i]) try: vdata=struct[vhdu].data struct[vhdu].data=vdata+bstruct[vhdu].data except Exception as e: msg='Cannot update the variance frame in %s[%i] because %s' % (infile, vhdu, e) raise SaltError(msg) if plotover: plt.ioff() plt.show() return struct
035da6b5eaa73a30ffec3f148d33199b275529a8
3,645,047
def getItemStatus(selected_item, store_id, num_to_average): """ Method pulls the stock status of the selected item in the given store :param selected_item: current item being processed (toilet paper or hand sanitizer) :param store_id: id of the current store :param num_to_average: number of recent status updates to include in the cumulative moving average status update :return: returns the status of the item (integer between 1-5) """ db = mysql.connect() cursor = db.cursor() query = "SELECT rating FROM status_list WHERE id = '" + str(store_id) + "' AND item = " + str(selected_item) +";" cursor.execute(query) status_values = [] status = cursor.fetchall() moving_average = 0 for i in range(len(status)): status_values.append(5-(status[i][0])+1) if len(status_values) != 0: for i in range(min(len(status_values),num_to_average)): moving_average += status_values[i] moving_average = moving_average/min(num_to_average, len(status_values)) cursor.close() db.close() return round(moving_average)
00c0ecdec56ac4446b3db247fd234daeecada589
3,645,048
def filter_and_copy_table(tab, to_remove): """ Filter and copy a FITS table. Parameters ---------- tab : FITS Table object to_remove : [int ...} list of indices to remove from the table returns FITS Table object """ nsrcs = len(tab) mask = np.zeros((nsrcs), '?') mask[to_remove] = True inv_mask = np.invert(mask) out_tab = tab[inv_mask] return out_tab
cc13a002715c36cc2c07b836a5045cfb62311529
3,645,049
def _archive_logs(conn, node_type, logger, node_ip): """Creates an archive of all logs found under /var/log/cloudify plus journalctl. """ archive_filename = 'cloudify-{node_type}-logs_{date}_{ip}.tar.gz'.format( node_type=node_type, date=get_host_date(conn), ip=node_ip ) archive_path = '/tmp/{}'.format(archive_filename) journalctl_destination_path = '/var/log/cloudify/journalctl.log' conn.sudo( 'bash -c "journalctl > /tmp/jctl && mv /tmp/jctl {0}"' .format(journalctl_destination_path), ) logger.info('Creating logs archive in {0}: {1}'.format(node_type, archive_path)) conn.sudo( 'tar -czf {0} -C /var/log cloudify ' '--warning=no-file-changed'.format(archive_path), warn=True ) conn.run('test -e {0}'.format(archive_path)) conn.sudo('rm {0}'.format(journalctl_destination_path)) return archive_path
970683258d664a1170fe9ab5253287313fa9f871
3,645,050
def get_scripts(): """Returns the list of available scripts Returns: A dict holding the result message """ return Response.ok("Script files successfully fetched.", { "scripts": list_scripts() })
5aa2ecd9a19c3e4d577c679e5249b71b01b62f20
3,645,051
from typing import Any from typing import Optional import functools import contextvars import asyncio from typing import cast async def invoke( fn: callbacks.BaseFn, *args: Any, settings: Optional[configuration.OperatorSettings] = None, cause: Optional[causation.BaseCause] = None, **kwargs: Any, ) -> Any: """ Invoke a single function, but safely for the main asyncio process. Used mostly for handler functions, and potentially slow & blocking code. Other callbacks are called directly, and are expected to be synchronous (such as handler-selecting (lifecycles) and resource-filtering (``when=``)). A full set of the arguments is provided, expanding the cause to some easily usable aliases. The function is expected to accept ``**kwargs`` for the args that it does not use -- for forward compatibility with the new features. The synchronous methods are executed in the executor (threads or processes), thus making it non-blocking for the main event loop of the operator. See: https://pymotw.com/3/asyncio/executors.html """ if is_async_fn(fn): kwargs = build_kwargs(cause=cause, _sync=False, **kwargs) result = await fn(*args, **kwargs) # type: ignore else: kwargs = build_kwargs(cause=cause, _sync=True, **kwargs) # Not that we want to use functools, but for executors kwargs, it is officially recommended: # https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.loop.run_in_executor real_fn = functools.partial(fn, *args, **kwargs) # Copy the asyncio context from current thread to the handlr's thread. # It can be copied 2+ times if there are sub-sub-handlers (rare case). context = contextvars.copy_context() real_fn = functools.partial(context.run, real_fn) # Prevent orphaned threads during daemon/handler cancellation. It is better to be stuck # in the task than to have orphan threads which deplete the executor's pool capacity. # Cancellation is postponed until the thread exits, but it happens anyway (for consistency). # Note: the docs say the result is a future, but typesheds say it is a coroutine => cast()! loop = asyncio.get_event_loop() executor = settings.execution.executor if settings is not None else None future = cast(aiotasks.Future, loop.run_in_executor(executor, real_fn)) cancellation: Optional[asyncio.CancelledError] = None while not future.done(): try: await asyncio.shield(future) # slightly expensive: creates tasks except asyncio.CancelledError as e: cancellation = e if cancellation is not None: raise cancellation result = future.result() return result
4f132bab3eaae0ebe64f4cfdcfefa89cd2b59f3f
3,645,052
import sys def getLatestFare(_origin, _destination, _date): """ _origin and _destination take airport codes , e.g. BLR for Bangalore _date in format YYYY-MM-DD e.g.2016-10-30 Returns either: 10 latest results from the results page. 1 lastest result from the results page. """ try: _url = base_url.format(_origin, _destination, _date) soup = getSoup(_url) fare_list = soup.find('ul').find_all('li',{"class":["active","not-active"]}) fares = [] for fare in fare_list: fares.append({'price':getPrice(fare.find('a').find('div').findChildren()[2].string),'date':fare.find('a').find('div').findChildren()[0].string}) except Exception: sys.exit("No Route found.") return fares
209c9c4ee05dc25b575565b0953097049ba21c28
3,645,053
from datetime import datetime def home(): """Renders the home page.""" return render_template( 'index.html', title='Rococal', year=datetime.now().year, )
4cee1eed6c45d79aad0acebdd55ed61ef7dff9da
3,645,054
def Dx(x): """Nombre de survivants actualisés. Args: x: l'âge. Returns: Nombre de survivants actualisés. """ return lx(x)*v**x
886fdfb8b5337e9520f94fa937cb967328391823
3,645,055
def tracek(k,aee,aii,see,sii,tau=1,alpha=0): """ Trace of recurrently connected network of E,I units, analytically determined input: k: spatial frequency aee: ampltidue E to E connectivity aii: ampltidue I to I connectivity see: standard deviation/width of E to E connectivity sii: standard deviation/width of I to I connectivity tau: ratio of excitatory to inhibitory time constant, default is 1 alpha: float, 0<=alpha<=1, strength of self-inhibitory connections """ aii_s = aii*(1-alpha) aii_a = aii*alpha return -1 - (1 + aii_s*H(k,sii) + aii_a)/tau + aee*H(k,see)
005c612f8d54b4ba61b7e701d40b9814136695b4
3,645,056
import os def set_working_dir_repo_root(func): """ Decorator for checking whether the current working dir is set as root of repo. If not, changes the working dir to root of repo Returns ------- """ def inner(*args, **kwargs): git_repo = git.Repo(".", search_parent_directories=True) git_root = git_repo.working_tree_dir if os.getcwd() != git_root: logger.info( f"current working dir: {os.getcwd()}," f"is not correctly set as repo root, " f"so changing to {git_root}" ) os.chdir(git_root) else: logger.info( f"current working dir correctly set as" f" repo root {os.getcwd()}" ) result = func(*args, **kwargs) return result return inner
07a6d667129d94557fdd20956124933920338e30
3,645,057
def get_verbose_name(model_or_queryset, field): """ returns the value of the ``verbose_name`` of a field typically used in the templates where you can have a dynamic queryset :param model_or_queryset: target object :type model_or_queryset: :class:`django.db.models.Model`, :class:`django.db.query.Queryset` :param field: field to get the verbose name :type field: :class:`django.db.models.Field`, basestring :return: translated field verbose name :rtype: unicode Valid uses: >>> from django.contrib.auth.models import User, Permission >>> user = User() >>> p = Permission() >>> print unicode(get_verbose_name(user, 'username')) username >>> print unicode(get_verbose_name(User, 'username')) username >>> print unicode(get_verbose_name(User.objects.all(), 'username')) username >>> print unicode(get_verbose_name(User.objects, 'username')) username >>> print unicode(get_verbose_name(User.objects, user._meta.get_field_by_name('username')[0])) username >>> print unicode(get_verbose_name(p, 'content_type.model')) python model class name >>> get_verbose_name(object, 'aaa') Traceback (most recent call last): ... ValueError: `get_verbose_name` expects Manager, Queryset or Model as first parameter (got <type 'type'>) """ if isinstance(model_or_queryset, models.Manager): model = model_or_queryset.model elif isinstance(model_or_queryset, QuerySet): model = model_or_queryset.model elif isinstance(model_or_queryset, models.Model): model = model_or_queryset elif type(model_or_queryset) is models.base.ModelBase: model = model_or_queryset else: raise ValueError('`get_verbose_name` expects Manager, Queryset or Model as first parameter (got %s)' % type( model_or_queryset)) if isinstance(field, basestring): field = get_field_by_path(model, field) elif isinstance(field, models.Field): field = field else: raise ValueError('`get_verbose_name` field_path must be string or Field class') return field.verbose_name
1f395d62a20b307dce2f802c498630fd237aec33
3,645,058
def if_then_else(cond, t, f, span=None): """Conditional selection expression. Parameters ---------- cond : PrimExpr The condition t : PrimExpr The result expression if cond is true. f : PrimExpr The result expression if cond is false. span : Optional[Span] The location of this operator in the source. Returns ------- result : Node The result of conditional expression. Note ---- Unlike Select, if_then_else will not execute the branch that does not satisfy the condition. You can use it to guard against out of bound access. Unlike Select, if_then_else cannot be vectorized if some lanes in the vector have different conditions. """ return _ffi_api._OpIfThenElse(convert(cond), convert(t), convert(f), span)
dba758c13d3108244f389f0cbae97c1eeb1f8e04
3,645,059
import argparse def get_args() -> argparse.Namespace: """Get arguments.""" parser = argparse.ArgumentParser(description="Dump Instance") parser.add_argument( "network_state_path", type=str, help="File path to network state dump JSON." ) parser.add_argument("--host", type=str, help="Host to bind to", default="127.0.0.1") parser.add_argument( "--port", type=int, help="Port to run on (defaults to 3000)", default=3000 ) parser.add_argument( "--log-level", type=str.upper, help="Log level", default="INFO", choices=["DEBUG", "INFO", "WARNING", "ERROR"], ) parser.add_argument( "--events-to-replay-path", type=str, help=( "File path to events to replay JSON. Events provided by " "--combined-replay-dump-path option will be first, followed by events " "from this file." ), default=None, ) parser.add_argument( "--command-results-path", type=str, help=( "File path to command result JSON. Command results provided by " "--combined-replay-dump-path option will be first, followed by results " "from this file." ), default=None, ) parser.add_argument( "--combined-replay-dump-path", type=str, help=( "File path to the combined event and command result dump JSON. Events and " "command results will be extracted in the order they were received." ), default=None, ) return parser.parse_args()
30b1aacc2c13d1ac8fad3411b95d9aeabf625f62
3,645,060
def get_resources(filetype): """Find all HTML template or JavaScript files in the package. Caches the results for quick access. Parameters ---------- filetype : {'templates', 'js'} The type of file resource needed. Returns ------- :class:`dict` A dictionary mapping filename to the contents of the file. Raises ------ ValueError If `filetype` is unknown. """ global _resource_cache if filetype not in _resource_cache: raise ValueError("Unknown filetype '{0}' for get_resources()!".format(filetype)) if _resource_cache[filetype] is None: _resource_cache[filetype] = dict() for f in resource_listdir('prospect', filetype): if not f.startswith("."): _resource_cache[filetype][f] = resource_string('prospect', filetype + '/' + f).decode('utf-8') return _resource_cache[filetype]
73244ac590db5a310170142b6b43b7840cfc94ca
3,645,061
def sum_fn(xnum, ynum): """ A function which performs a sum """ return xnum + ynum
61a1ae2e4b54348b9e3839f7f2779edd03f181df
3,645,062
def categorical_iou(y_true, y_pred, target_classes=None, strict=True): """画像ごとクラスごとのIoUを算出して平均するmetric。 Args: target_classes: 対象のクラスindexの配列。Noneなら全クラス。 strict: ラベルに無いクラスを予測してしまった場合に減点されるようにするならTrue、ラベルにあるクラスのみ対象にするならFalse。 """ axes = list(range(1, K.ndim(y_true))) y_classes = K.argmax(y_true, axis=-1) p_classes = K.argmax(y_pred, axis=-1) active_list = [] iou_list = [] for c in target_classes or range(y_true.shape[-1]): with tf.name_scope(f"class_{c}"): y_c = K.equal(y_classes, c) p_c = K.equal(p_classes, c) inter = K.sum(K.cast(tf.math.logical_and(y_c, p_c), "float32"), axis=axes) union = K.sum(K.cast(tf.math.logical_or(y_c, p_c), "float32"), axis=axes) active = union > 0 if strict else K.any(y_c, axis=axes) iou = inter / (union + K.epsilon()) active_list.append(K.cast(active, "float32")) iou_list.append(iou) return K.sum(iou_list, axis=0) / (K.sum(active_list, axis=0) + K.epsilon())
b79f399479127271c3af3ee9b28203622f8d17fe
3,645,063
def convert_string(string: str, type: str) -> str: """Convert the string by [e]ncrypting or [d]ecrypting. :param type: String 'e' for encrypt or 'd' for decrypt. :return: [en/de]crypted string. """ hash_string = hash_() map_ = mapping(hash_string) if type.lower() == 'e': output = encrypt(string, map_) elif type.lower() == 'd': map_ = {v: k for k, v in map_.items()} output = decrypt(string, map_) else: output = '' return output
824122fa035dcb164f21eadb5c0e840f8acd2914
3,645,064
def create_saml_security_context(token, private_key): """ Create a security context for SAML token based authentication scheme :type token: :class:`str` :param token: SAML Token :type private_key: :class:`str` :param private_key: Absolute file path of the private key of the user :rtype: :class:`vmware.vapi.core.SecurityContext` :return: Newly created security context """ private_key_data = None with open(private_key, 'r') as fp: private_key_data = fp.read() return SecurityContext({SCHEME_ID: SAML_SCHEME_ID, PRIVATE_KEY: private_key_data, SAML_TOKEN: token, SIGNATURE_ALGORITHM: DEFAULT_ALGORITHM_TYPE})
430e71697eb3e5b3df438f400a7f07ab8e936af7
3,645,065
def predictIsDeviceLeftRunning(): """ Returns if the device is presumed left running without a real need --- parameters: name: -device_id in: query description: the device id for which the prediction is made required: false style: form explode: true schema: type: integer format: int32 responses: "200": description: A boolean, True or False, if the device is left running or not content: application/json: schema: type: boolean "400": description: Bad argument """ device_id = int(request.args.get("id", -1)) if device_id == -1: return Response(jsonify("Bad argument"), status=400) device = DeviceObject(UserObject.getMockUser(), device_id) return jsonify(device.predictDeviceLeftRunning())
8a50962f3c52e100d79a74413df0d4bf8230bafd
3,645,066
from typing import Optional from typing import Callable from pathlib import Path def load( source: AnyPath, wordnet: Wordnet, get_synset_id: Optional[Callable] = None, ) -> Freq: """Load an Information Content mapping from a file. Arguments: source: A path to an information content weights file. wordnet: A :class:`wn.Wordnet` instance with synset identifiers matching the offsets in the weights file. get_synset_id: A callable that takes a synset offset and part of speech and returns a synset ID valid in *wordnet*. Raises: :class:`wn.Error`: If *wordnet* does not have exactly one lexicon. Example: >>> import wn, wn.ic >>> pwn = wn.Wordnet('pwn:3.0') >>> path = '~/nltk_data/corpora/wordnet_ic/ic-brown.dat' >>> freq = wn.ic.load(path, pwn) """ source = Path(source).expanduser().resolve(strict=True) assert len(wordnet.lexicons()) == 1 lexid = wordnet.lexicons()[0].id if get_synset_id is None: get_synset_id = synset_id_formatter(prefix=lexid) freq = _initialize(wordnet, 0.0) with source.open() as icfile: for offset, pos, weight, is_root in _parse_ic_file(icfile): ssid = get_synset_id(offset=offset, pos=pos) # synset = wordnet.synset(ssid) freq[pos][ssid] = weight if is_root: freq[pos][None] += weight return freq
962bdfbe5d101bdeae966566c16d8a7216c36d8b
3,645,067
from datetime import datetime def iso_time_str() -> str: """Return the current time as ISO 8601 format e.g.: 2019-01-19T23:20:25.459Z """ now = datetime.datetime.utcnow() return now.isoformat()[:-3]+'Z'
203617006175079181d702f7d7ed6d2974714f2e
3,645,068
def mass(snap: Snap) -> Quantity: """Particle mass.""" massoftype = snap._file_pointer['header/massoftype'][()] particle_type = np.array( np.abs(get_dataset('itype', 'particles')(snap)).magnitude, dtype=int ) return massoftype[particle_type - 1] * snap._array_code_units['mass']
cf67d66f1e1a47f162b5e538444d2c406b377238
3,645,069
import os def _build_pytest_test_results_path(cmake_build_path): """ Build the path to the Pytest test results directory. :param cmake_build_path: Path to the CMake build directory. :return: Path to the Pytest test results directory. """ pytest_results_path = os.path.join(cmake_build_path, TEST_RESULTS_DIR, 'Pytest') return pytest_results_path
52198ce508d5d30c322da285c66ff9ba45418ebd
3,645,070
import logging async def create_object_detection_training( train_object_detection_model_request: TrainImageModel, token: str = Depends(oauth2_scheme), ): """[API router to train AutoML object detection model] Args: train_object_detection_model_request (TrainImageModel): [Train AutoML Object detection model request] token (str, optional): [Bearer token for authentication]. Defaults to Depends(oauth2_scheme). Raises: HTTPException: [Unauthorized exception when invalid token is passed] error: [Exception in underlying controller] Returns: [TrainModelResponse]: [AutoML train object detection model response] """ try: logging.info("Calling /gcp/automl/train_object_detection_model endpoint") logging.debug(f"Request: {train_object_detection_model_request}") if decodeJWT(token=token): response = TrainModelController().train_object_detection_model_controller( request=train_object_detection_model_request ) return TrainModelResponse(**response) else: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid access token", headers={"WWW-Authenticate": "Bearer"}, ) except Exception as error: logging.error( f"Error in /gcp/automl/train_object_detection_model endpoint: {error}" ) raise error
293dc0bfd8acb52a3207138e13028b6766c7be20
3,645,071
import torch def batchify_rays(rays_flat, chunk=1024*32, random_directions=None, background_color=None, **kwargs): """Render rays in smaller minibatches to avoid OOM. """ all_ret = {} for i in range(0, rays_flat.shape[0], chunk): ret = render_rays(rays_flat[i:i+chunk], random_directions=random_directions, background_color=background_color, **kwargs) if random_directions is not None: ret, mean_regularization_term = ret for k in ret: if k not in all_ret: all_ret[k] = [] all_ret[k].append(ret[k]) all_ret = {k : torch.cat(all_ret[k], 0) for k in all_ret} if random_directions is None: return all_ret else: return all_ret, mean_regularization_term
9897575462e47f98016ebd0a2fcaee186c440f9a
3,645,072
def extract_surfaces(pvol): """ Extracts surfaces from a volume. :param pvol: input volume :type pvol: abstract.Volume :return: extracted surface :rtype: dict """ if not isinstance(pvol, BSpline.abstract.Volume): raise TypeError("The input should be an instance of abstract.Volume") # Get data from the volume object vol_data = pvol.data rational = vol_data['rational'] degree_u = vol_data['degree'][0] degree_v = vol_data['degree'][1] degree_w = vol_data['degree'][2] kv_u = vol_data['knotvector'][0] kv_v = vol_data['knotvector'][1] kv_w = vol_data['knotvector'][2] size_u = vol_data['size'][0] size_v = vol_data['size'][1] size_w = vol_data['size'][2] cpts = vol_data['control_points'] # Determine object type obj = NURBS.Surface if rational else BSpline.Surface # u-v plane surflist_uv = [] for w in range(size_w): surf = obj() surf.degree_u = degree_u surf.degree_v = degree_v surf.ctrlpts_size_u = size_u surf.ctrlpts_size_v = size_v surf.ctrlpts2d = [[cpts[v + (size_v * (u + (size_u * w)))] for v in range(size_v)] for u in range(size_u)] surf.knotvector_u = kv_u surf.knotvector_v = kv_v surflist_uv.append(surf) # u-w plane surflist_uw = [] for v in range(size_v): surf = obj() surf.degree_u = degree_u surf.degree_v = degree_w surf.ctrlpts_size_u = size_u surf.ctrlpts_size_v = size_w surf.ctrlpts2d = [[cpts[v + (size_v * (u + (size_u * w)))] for w in range(size_w)] for u in range(size_u)] surf.knotvector_u = kv_u surf.knotvector_v = kv_w surflist_uw.append(surf) # v-w plane surflist_vw = [] for u in range(size_u): surf = obj() surf.degree_u = degree_v surf.degree_v = degree_w surf.ctrlpts_size_u = size_v surf.ctrlpts_size_v = size_w surf.ctrlpts2d = [[cpts[v + (size_v * (u + (size_u * w)))] for w in range(size_w)] for v in range(size_v)] surf.knotvector_u = kv_v surf.knotvector_v = kv_w surflist_vw.append(surf) # Return shapes as a dict object return dict(uv=surflist_uv, uw=surflist_uw, vw=surflist_vw)
cd2b24f200adf9f5ff29cc847693d57d450521ad
3,645,073
import os import logging def read_file(filepath: str, config: Config = DEFAULT_CONFIG) -> pd.DataFrame: """ Read .csv, .xlsx, .xls to pandas dataframe. Read only a certain sheet name and skip to header row using sheet_name and header_index. :filepath: path to file (str) :config: dtype.Config Returns pd.DataFrame """ filename = os.path.basename(filepath).lower() excel_header_row = utils._or(config.excel_header_row, config.header_row) csv_header_row = utils._or(config.csv_header_row, config.header_row) if filename == "pdappend.csv": logging.warning("Cannot read reserved result filename (pdappend.csv)") return pd.DataFrame() if not is_filetype(filename): raise ValueError(f"file {filename} is not .csv, .xslx, or .xls") if ".xls" in filename: return pd.read_excel( filepath, sheet_name=config.sheet_name, skiprows=list(range(0, int(excel_header_row))), ) if filename.endswith(".csv"): return pd.read_csv(filepath, skiprows=list(range(0, int(csv_header_row))))
09f9e626be020cf2d3c02a09863489cf84735c05
3,645,074
def euler2quaternion( euler_angs ): """ Description ----------- This code is directly from the following reference [REF] https://computergraphics.stackexchange.com/questions/8195/how-to-convert-euler-angles-to-quaternions-and-get-the-same-euler-angles-back-fr Converting a R4 quaternion vector (w, x, y, z) to Euler Angle (Roll, Pitch, Yaw) Arguments --------- [NAME] [TYPE] [DESCRIPTION] (1) yaw, pitch, roll The euler angles of the given quaternion vector. [OUTPUTS] ----------- [NAME] [TYPE] [DESCRIPTION] (1) quatVec List The quaternion vector, ordered in w, x, y and z """ yaw, pitch, roll = euler_angs[ : ] cy = np.cos( yaw * 0.5 ) sy = np.sin( yaw * 0.5 ) cp = np.cos( pitch * 0.5 ) sp = np.sin( pitch * 0.5 ) cr = np.cos( roll * 0.5 ) sr = np.sin( roll * 0.5 ) w = cr * cp * cy + sr * sp * sy; x = sr * cp * cy - cr * sp * sy; y = cr * sp * cy + sr * cp * sy; z = cr * cp * sy - sr * sp * cy; return w,x,y,z
307385911573af8a6e65617a8b438ca680130b79
3,645,075
def run_server(server, thread=False, port=8080): """ Runs the server. @param server if None, it becomes ``HTTPServer(('localhost', 8080), SimpleHandler)`` @param thread if True, the server is run in a thread and the function returns right away, otherwite, it runs the server. @param port port to use @return server if thread is False, the thread otherwise (the thread is started) @warning If you kill the python program while the thread is still running, python interpreter might be closed completely. """ if server is None: server = HTTPServer(('localhost', port), SimpleHandler) if thread: th = ThreadServer(server) th.start() return th else: server.serve_forever() return server
524b58f012a1029e52d845f40a20e2ae1f7f9c0a
3,645,076
def validate(aLine): """ >>> validate(b"$GPGSA,A,2,29,19,28,,,,,,,,,,23.4,12.1,20.0*0F") [b'GPGSA', b'A', b'2', b'29', b'19', b'28', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'23.4', b'12.1', b'20.0'] >>> validate(b"$GPGSA,A,2,29,19,28,,,,,,,,,,23.4,") # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/doctest.py", line 1330, in __run compileflags, 1), test.globs) File "<doctest __main__.validate[1]>", line 1, in <module> validate(b"$GPGSA,A,2,29,19,28,,,,,,,,,,23.4,") File "/Users/slott/Documents/Projects/NMEA-Tools/nmea_checksum.py", line 23, in validate assert sentence[0] in b'$!', "Unexpected {} not in ({}, {})".format(sentence[0], b'$', b'!') IndexError: index out of range >>> validate(b"29,19,28,,,,,,,,,,23.4,12.1,20.0*0F") # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/doctest.py", line 1330, in __run compileflags, 1), test.globs) File "<doctest __main__.validate[2]>", line 1, in <module> validate(b"29,19,28,,,,,,,,,,23.4,12.1,20.0*0F") # doctest: +IGNORE_EXCEPTION_DETAIL File "/Users/slott/Documents/Projects/NMEA-Tools/nmea_checksum.py", line 32, in validate assert sentence[0] in b'$!', "Unexpected {} not in ({}, {})".format(sentence[0], b'$', b'!') AssertionError: Unexpected 50 not in (b'$', b'!') >>> validate(b'$GPGLL,2542.9243,N,08013.6310,W,162823.000,A*29') [b'GPGLL', b'2542.9243', b'N', b'08013.6310', b'W', b'162823.000', b'A'] """ sentence, star, checksum = aLine.rpartition(b'*') assert sentence[0] in b'$!', f"Unexpected {sentence[0]} not in b'$!' if star == b'*': cs = reduce(xor, sentence[1:]) assert int(checksum, 16) == cs return sentence[1:].split(b',')
a8f302f0a03f567bc3c61930ecdf147ff9670b04
3,645,077
def matlabize(s): """Make string s suitable for use as a MATLAB function/script name""" s = s.replace(' ', '_') s = s.replace('.', '_') s = s.replace('-', '_') assert len(s) <= 63 # MATLAB function/script name length limitation return s
5dccb9497a3ee28dae5fb7de6e15a1fa02f144cf
3,645,078
def getApiResults(case, installer, version, criteria): """ Get Results by calling the API criteria is to consider N last results for the case success criteria """ results = json.dumps([]) # to remove proxy (to be removed at the end for local test only) # proxy_handler = urllib2.ProxyHandler({}) # opener = urllib2.build_opener(proxy_handler) # urllib2.install_opener(opener) # url = "http://127.0.0.1:8000/results?case=" + case + \ # "&period=30&installer=" + installer period = get_config('general.period') url_base = get_config('testapi.url') nb_tests = get_config('general.nb_iteration_tests_success_criteria') url = (url_base + "?case=" + case + "&period=" + str(period) + "&installer=" + installer + "&version=" + version) if criteria: url += "&last=" + str(nb_tests) proxy = get_config('general.proxy') response = requests.get(url, proxies=proxy) try: results = json.loads(response.content) except Exception: # pylint: disable=broad-except print "Error when retrieving results form API" return results
d54eaf785bc1e80e633cf3f6588f135c54425b79
3,645,079
def generate_noisy_gaussian(center, std_dev, height, x_domain, noise_domain, n_datapoints): """ Generate a gaussian with some aspect of noise. Input: center = central x value std_dev = standard deviation of the function height = height (y-off set) of the function noise_range = uniform random distribution of noise from perfect gauss function x_range = absolute domain of the gaussian function n_datapoints = total number of input datapoints of gaussian function Output: x_values,y_values x_values = the x-axial array of the gaussian function within the domain y_values = the y-axial array of the gaussian function within the domain """ # Type check. center = valid.validate_float_value(center) std_dev = valid.validate_float_value(std_dev, greater_than=0) height = valid.validate_float_value(height) x_domain = valid.validate_float_array(x_domain, shape=(2,), size=2) noise_domain = valid.validate_float_array(noise_domain, shape=(2,), size=2) n_datapoints = valid.validate_int_value(n_datapoints, greater_than=0) # Generate the gaussian function and map to an output with the input # parameters. x_values, y_values = generate_gaussian(center, std_dev, height, x_domain=x_domain, n_datapoints=n_datapoints) # Imbue the gaussian with random noise. y_values = misc.generate_noise(y_values, noise_domain, distribution='uniform') return x_values, y_values
5120bb23be1b98663b61ad67df0aa43c61ed1714
3,645,080
from typing import Tuple def filter_group_delay( sos_or_fir_coef: np.ndarray, N: int = 2048, fs: float = None, sos: bool = True, ) -> Tuple[np.ndarray, np.ndarray]: """ Given filter spec in second order sections or (num, den) form, return group delay. Uses method in [1], which is cited by `scipy.signal.group_delay` but incorrectly implemented. Inputs: - sos_or_fir_coef: np.ndarray, second order section of iir filter or fir_coef of a FIR filter. - N: int, number of samples to calculate for the impulse and frequency response - fs: float, sampling rate in Hz. If not None, will return the frequency in Hz, otherwise normalized frequency will be returned. - sos: bool. If true, assume `sos_or_fir_coef` is sos, otherwise as fir_coef Output: - frequency: np.ndarray, frequency of the frequency response. If fs is None, unit will be in radians/sample (ranging from 0 to np.pi), otherwise will be in Hz (ranging from 0 to fs / 2). - group_delay: np.ndarray, group delay of filter as function of frequency, unit is in samples. [1] Richard G. Lyons, "Understanding Digital Signal Processing, 3rd edition", p. 830. """ impulse_response = filter_impulse_response(sos_or_fir_coef, N, sos=sos) k = np.arange(N) fft_gd = np.real(fft(k * impulse_response) / fft(impulse_response))[0 : N // 2] omega = (fftfreq(N) * 2 * np.pi)[0 : N // 2] if fs is not None: freq = omega_to_f(omega, fs)[0 : N // 2] return freq, fft_gd else: return omega, fft_gd
9cdcf30db5f1308dac27ce057f392fb38d805f1f
3,645,081
import re def query(): """Perform a query on the dataset, where the search terms are given by the saleterm parameter""" # If redis hasn't been populated, stick some tweet data into it. if redis_db.get("tweet_db_status") != "loaded": tweet_scraper.add_tweets(default_num_tweets_to_try) sale_term = request.form['saleterm'] subterms = re.split('\W+', sale_term) saleterm_keys = ['saleterm-{}'.format(w) for w in subterms if len(w) > 1] result_dict = {} num_tweets = 0 if saleterm_keys: common_tweet_ids = redis_db.sinter(saleterm_keys) if common_tweet_ids: result_dict['tweets'] = [redis_db.hgetall(tweet_id) for tweet_id in common_tweet_ids] num_tweets = len(common_tweet_ids) result_dict['num_tweets'] = num_tweets result_dict['saleterm'] = sale_term resp = jsonify(result_dict) resp.status_code = 200 return resp
9cdb937d45b1314884afb0d53aee174ef160f8a8
3,645,082
def get_spec_res(z=2.2, spec_res=2.06, pix_size=1.8): """ Calculates the pixel size (pix_size) and spectral resolution (spec_res) in km/s for the MOCK SPECTRA. arguments: z, redshift. spec_res, spectral resoloution in Angst. pixel_size in sngst. returns: (pixel_size, spec_res) in km/s """ # conversion factor from Angstrom to km/s at any redshift conv_fac = 3e5*0.000823/(1+z) return(pix_size*conv_fac, spec_res*conv_fac)
597db8ce00c071624b0877fe211ab9b01ec889de
3,645,083
from datetime import datetime import os def _process_general_config(config: ConfigType) -> ConfigType: """Process the `general` section of the config Args: config (ConfigType): Config object Returns: [ConfigType]: Processed config """ general_config = deepcopy(config.general) general_config.id = general_config.id.replace("/", "_") if not general_config.commit_id: general_config.commit_id = utils.get_current_commit_id() if not general_config.date: general_config.date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M") slurm_id = [] env_var_names = ["SLURM_JOB_ID", "SLURM_STEP_ID"] for var_name in env_var_names: if var_name in os.environ: slurm_id.append(str(os.environ[var_name])) if slurm_id: general_config.slurm_id = "-".join(slurm_id) config.general = general_config return config
370367f718c3830964c6f5f276b1d699400fd1ab
3,645,084
def api_response(response): """Response generation for ReST API calls""" # Errors present if response.message: messages = response.message if not isinstance(messages, list): messages = [messages] # Report the errors return Response({'errors': messages}, status=status.HTTP_400_BAD_REQUEST) # All OK return Response(response.data) if not isinstance(response.data, HttpResponseBase) else response.data
f41bca36b1cabc6002f730b3b40170415baffc62
3,645,085
from tensorflow.python.training import moving_averages def batch_norm(name, inpvar, decay=0.9, epsilon=1e-5, use_affine=True, param_dtype=__default_dtype__): """ Batch normalization. :param name: operator name :param inpvar: input tensor, of data type NHWC :param decay: decay for moving average :param epsilon: epsilon :param use_affine: add affine transformation after the normalization (to preserve the bias and scale) :param param_dtype: param dtype :return: output tensor """ assign_moving_average = moving_averages.assign_moving_average inpvar = as_varnode(inpvar) shape = inpvar.static_shape assert len(shape) in [2, 4] nr_channels = shape[-1] if len(shape) == 2: inpvar = inpvar.reshape(-1, 1, 1, nr_channels) if use_affine: beta = O.variable('beta', tf.constant_initializer(), shape=[nr_channels], dtype=param_dtype) gamma = O.variable('gamma', tf.constant_initializer(1.0), shape=[nr_channels], dtype=param_dtype) else: beta = O.zeros([nr_channels], name='beta') gamma = O.ones([nr_channels], name='gamma') moving_mean = O.variable('mean/ema', tf.constant_initializer(), shape=[nr_channels], trainable=False) moving_var = O.variable('variance/ema', tf.constant_initializer(1.0), shape=[nr_channels], trainable=False) env = get_default_env() if env.flags.compute_update_batch_normalization(name): xn, batch_mean, batch_var = tf.nn.fused_batch_norm(inpvar, gamma, beta, epsilon=epsilon, is_training=True, name='bn') else: xn = tf.nn.batch_normalization(inpvar, moving_mean, moving_var, beta, gamma, variance_epsilon=epsilon, name='bn') if len(shape) == 2: xn = O.remove_axis(xn, [1, 2]) if env.flags.compute_update_batch_normalization(name) and \ (not env.has_current_dpc() or env.current_dpc.is_master_device): update_mean_op = assign_moving_average(moving_mean.impl, batch_mean, decay, zero_debias=False, name='mean_ema_op') update_var_op = assign_moving_average(moving_var.impl, batch_var, decay, zero_debias=False, name='var_ema_op') with tf.control_dependencies([update_mean_op, update_var_op]): return tf.identity(xn, name='out') else: return tf.identity(xn, name='out')
74565379d15d4ec7cfa647a4f7833328f2c86ac7
3,645,086
import os def get_engine(onnx_file_path, engine_file_path="", input_shapes=((1, 3, 640, 640)), force_rebuild=False): """Attempts to load a serialized engine if available, otherwise builds a new TensorRT engine and saves it.""" assert len(input_shapes) in [1, 3], 'length of input_shapes should be 1 or 3, 3 for dynamic input size, got {}'.format(len(input_shapes)) def build_engine(): """Takes an ONNX file and creates a TensorRT engine to run inference with""" with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER) as parser, builder.create_builder_config() as config: builder.strict_type_constraints = True # builder.max_workspace_size = 1 << 30 # deprecated, use config to set max_workspace_size # builder.fp16_mode = True # deprecated, use config to set FP16 mode # builder.max_batch_size = 1 # deprecated, use EXPLICIT_BATCH config.set_flag(trt.BuilderFlag.FP16) config.max_workspace_size=GiB(1) # Parse model file # Try to load a previously generated graph in ONNX format: if not os.path.exists(onnx_file_path): print('ONNX file {} not found, please generate it first.'.format( onnx_file_path)) exit(0) print('Loading ONNX file from path {}...'.format(onnx_file_path)) with open(onnx_file_path, 'rb') as model: print('Beginning ONNX file parsing') if not parser.parse(model.read()): print('ERROR: Failed to parse the ONNX file.') for error in range(parser.num_errors): print(parser.get_error(error)) exit(0) # Reference: https://blog.csdn.net/weixin_43953045/article/details/103937295 last_layer = network.get_layer(network.num_layers - 1) if not last_layer.get_output(0): network.mark_output(last_layer.get_output(0)) print("input shape {}".format(network.get_input(0).shape)) network.get_input(0).shape = [1, 3, -1, -1] if len(input_shapes) != 1 else input_shapes[0] print('Completed parsing of ONNX file') print('Building an engine from file {}; this may take a while...'.format(onnx_file_path)) # ######################## SET DYNAMIC INPUT SHAPE ################################# if len(input_shapes) == 3: profile = builder.create_optimization_profile() profile.set_shape(network.get_input(0).name, *input_shapes) config.add_optimization_profile(profile) engine = builder.build_engine(network, config) else: engine = builder.build_cuda_engine(network) # ######################################################## print("Completed creating Engine") with open(engine_file_path, "wb") as f: f.write(engine.serialize()) return engine # return build_engine() if os.path.exists(engine_file_path) and not force_rebuild: # If a serialized engine exists, use it instead of building an engine. print("Reading engine from file {}".format(engine_file_path)) with open(engine_file_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime: return runtime.deserialize_cuda_engine(f.read()) else: return build_engine()
57489225c3854408f880f024443811d59c88df9f
3,645,087
from typing import Callable def _window_when(closing_mapper: Callable[[], Observable]) -> Callable[[Observable], Observable]: """Projects each element of an observable sequence into zero or more windows. Args: source: Source observable to project into windows. Returns: An observable sequence of windows. """ def window_when(source: Observable) -> Observable: def subscribe(observer, scheduler=None): m = SerialDisposable() d = CompositeDisposable(m) r = RefCountDisposable(d) window = Subject() observer.on_next(add_ref(window, r)) def on_next(value): window.on_next(value) def on_error(error): window.on_error(error) observer.on_error(error) def on_completed(): window.on_completed() observer.on_completed() d.add(source.subscribe_(on_next, on_error, on_completed, scheduler)) def create_window_on_completed(): try: window_close = closing_mapper() except Exception as exception: observer.on_error(exception) return def on_completed(): nonlocal window window.on_completed() window = Subject() observer.on_next(add_ref(window, r)) create_window_on_completed() m1 = SingleAssignmentDisposable() m.disposable = m1 m1.disposable = window_close.pipe(ops.take(1)).subscribe_(noop, on_error, on_completed, scheduler) create_window_on_completed() return r return Observable(subscribe) return window_when
d0f51f8385b2d45f1cbd64649953c312247644eb
3,645,088
def generate_features(df): """Generate features for a stock/index based on historical price and performance Args: df(dataframe with columns "Open", "Close", "High", "Low", "Volume", "Adjusted Close") Returns: dataframe, data set with new features """ df_new = pd.DataFrame() # 6 original features df_new['open'] = df['Open'] df_new['open_1'] = df['Open'].shift(1) # Shift index by 1, in order to take the value of previous day. For example, [1, 3, 4, 2] -> [N/A, 1, 3, 4] df_new['close_1'] = df['Close'].shift(1) df_new['high_1'] = df['High'].shift(1) df_new['low_1'] = df['Low'].shift(1) df_new['volume_1'] = df['Volume'].shift(1) # 31 original features # average price df_new['avg_price_5'] = df['Close'].rolling(5).mean().shift(1) df_new['avg_price_30'] = df['Close'].rolling(21).mean().shift(1) df_new['avg_price_365'] = df['Close'].rolling(252).mean().shift(1) df_new['ratio_avg_price_5_30'] = df_new['avg_price_5'] / df_new['avg_price_30'] df_new['ratio_avg_price_5_365'] = df_new['avg_price_5'] / df_new['avg_price_365'] df_new['ratio_avg_price_30_365'] = df_new['avg_price_30'] / df_new['avg_price_365'] # average volume df_new['avg_volume_5'] = df['Volume'].rolling(5).mean().shift(1) df_new['avg_volume_30'] = df['Volume'].rolling(21).mean().shift(1) df_new['avg_volume_365'] = df['Volume'].rolling(252).mean().shift(1) df_new['ratio_avg_volume_5_30'] = df_new['avg_volume_5'] / df_new['avg_volume_30'] df_new['ratio_avg_volume_5_365'] = df_new['avg_volume_5'] / df_new['avg_volume_365'] df_new['ratio_avg_volume_30_365'] = df_new['avg_volume_30'] / df_new['avg_volume_365'] # standard deviation of prices df_new['std_price_5'] = df['Close'].rolling(5).std().shift(1) # rolling_mean calculates the moving standard deviation given a window df_new['std_price_30'] = df['Close'].rolling(21).std().shift(1) df_new['std_price_365'] = df['Close'].rolling(252).std().shift(1) df_new['ratio_std_price_5_30'] = df_new['std_price_5'] / df_new['std_price_30'] df_new['ratio_std_price_5_365'] = df_new['std_price_5'] / df_new['std_price_365'] df_new['ratio_std_price_30_365'] = df_new['std_price_30'] / df_new['std_price_365'] # standard deviation of volumes df_new['std_volume_5'] = df['Volume'].rolling(5).std().shift(1) df_new['std_volume_30'] = df['Volume'].rolling(21).std().shift(1) df_new['std_volume_365'] = df['Volume'].rolling(252).std().shift(1) df_new['ratio_std_volume_5_30'] = df_new['std_volume_5'] / df_new['std_volume_30'] df_new['ratio_std_volume_5_365'] = df_new['std_volume_5'] / df_new['std_volume_365'] df_new['ratio_std_volume_30_365'] = df_new['std_volume_30'] / df_new['std_volume_365'] # return df_new['return_1'] = ((df['Close'] - df['Close'].shift(1)) / df['Close'].shift(1)).shift(1) df_new['return_5'] = ((df['Close'] - df['Close'].shift(5)) / df['Close'].shift(5)).shift(1) df_new['return_30'] = ((df['Close'] - df['Close'].shift(21)) / df['Close'].shift(21)).shift(1) df_new['return_365'] = ((df['Close'] - df['Close'].shift(252)) / df['Close'].shift(252)).shift(1) df_new['moving_avg_5'] = df_new['return_1'].rolling(5).mean() df_new['moving_avg_30'] = df_new['return_1'].rolling(21).mean() df_new['moving_avg_365'] = df_new['return_1'].rolling(252).mean() # the target df_new['close'] = df['Close'] df_new = df_new.dropna(axis=0) # This will drop rows with any N/A value, which is by-product of moving average/std. return df_new
ec64c9562287e0dd32b7cfd07c477acd8d799dc3
3,645,089
from typing import Dict from typing import Union from typing import Optional def format_plate(barcode: str) -> Dict[str, Union[str, bool, Optional[int]]]: """Used by flask route /plates to format each plate. Determines whether there is sample data for the barcode and if so, how many samples meet the fit to pick rules. Arguments: barcode (str): barcode of plate to get sample information for. Returns: Dict[str, Union[str, bool, Optional[int]]]: sample information for the plate barcode """ logger.info(f"Getting information for plate with barcode: {barcode}") ( fit_to_pick_samples, count_fit_to_pick_samples, count_must_sequence, count_preferentially_sequence, count_filtered_positive, ) = get_fit_to_pick_samples_and_counts(barcode) return { "plate_barcode": barcode, "has_plate_map": fit_to_pick_samples is not None and len(fit_to_pick_samples) > 0, "count_fit_to_pick_samples": count_fit_to_pick_samples if count_fit_to_pick_samples is not None else 0, "count_must_sequence": count_must_sequence if count_must_sequence is not None else 0, "count_preferentially_sequence": count_preferentially_sequence if count_preferentially_sequence is not None else 0, "count_filtered_positive": count_filtered_positive if count_filtered_positive is not None else 0, }
5508ee508ef6d2a8329a2899bf9e90c9ac399874
3,645,090
def method_only_in(*states): """ Checks if function has a MethodMeta representation, calls wrap_method to create one if it doesn't and then adds only_in to it from *states Args: *args(list): List of state names, like DefaultStateMachine.RESETTING Returns: function: Updated function """ def decorator(func): if not hasattr(func, "MethodMeta"): MethodMeta.wrap_method(func) func.MethodMeta.only_in = states return func return decorator
33fbd619deb4b2a1761b3bf7f860ed2ae728df44
3,645,091
import igraph def to_igraph(adjacency_matrix:Image, centroids:Image=None): """ Converts a given adjacency matrix to a iGraph [1] graph data structure. Note: the given centroids typically have one entry less than the adjacency matrix is wide, because those matrices contain a first row and column representing background. When exporting the networkx graph, that first column will be ignored. Parameters ---------- adjacency_matrix : Image m*m touch-matrix, proximal-neighbor-matrix or n-nearest-neighbor-matrix centroids : Image, optional d*(m-1) matrix, position list of centroids Returns ------- iGraph graph See Also -------- ..[1] https://igraph.org/ """ try: except ImportError: raise ImportError("igraph is not installed. Please refer to the documentation https://igraph.org/python/") igraph_graph = igraph.Graph(adjacency_matrix.shape[0] - 1) edge_list = np.nonzero(np.asarray(adjacency_matrix)[1:,1:]) igraph_graph.add_edges(np.asarray(edge_list).T) if centroids is not None: igraph_graph.vs['x'] = centroids[0] igraph_graph.vs['y'] = centroids[1] if centroids.shape[0] > 2: # 3D data igraph_graph.vs['z'] = centroids[2] return igraph_graph
e0cac1dd85b79b30e3f7e3139201b97e092603eb
3,645,092
def Laplacian(src, ddepth, dst=None, ksize=1, scale=1, delta=0, borderType=cv2.BORDER_DEFAULT): """dst = cv.Laplacian( src, ddepth[, dst[, ksize[, scale[, delta[, borderType]]]]] ) Executes the Laplacian operator on hardware if input parameters fit to hardware constraints. Otherwise the OpenCV Laplacian function is called.""" if (ksize == 1 or ksize ==3 or ksize == 5) and (scale == 1) and (delta == 0) and (borderType == cv2.BORDER_DEFAULT) : if (src.dtype == np.uint8) and (src.ndim == 2) : if (src.shape[0] <= cv2pynq.MAX_HEIGHT) and (src.shape[0] > 0) and (src.shape[1] <= cv2pynq.MAX_WIDTH) and (src.shape[1] > 0) : if (ddepth == -1) : return c.Laplacian(src, ddepth, dst, ksize) return cv2.Laplacian(src, ddepth, dst, ksize, scale, delta, borderType)
88237f83ed9b2159829f4a9b194c18007699c1a9
3,645,093
def get_docptr(n_dw_matrix): """ Parameters ---------- n_dw_matrix: array-like Returns ------- np.array row indices for the provided matrix """ return _get_docptr(n_dw_matrix.shape[0], n_dw_matrix.indptr)
7a20ca17f16475d6fd836bb5b7b70221f5cf4378
3,645,094
def check_if_shift_v0(data, column_name, start_index, end_index, check_period): """ using median to see if it changes significantly in shift """ period_before = data[column_name][start_index - check_period: start_index] period_in_the_middle = data[column_name][start_index:end_index] period_after = data[column_name][end_index: end_index + check_period] period_before_median = abs(np.nanmedian(period_before)) period_in_the_middle_median = abs(np.nanmedian(period_in_the_middle)) period_after_median = abs(np.nanmedian(period_after)) upper_threshold = period_in_the_middle_median * 2 down_threshold = period_in_the_middle_median / 2 if (upper_threshold < period_before_median and upper_threshold < period_after_median) or\ (down_threshold > period_before_median and down_threshold > period_after_median): return True else: return False
e73629dae7d6cce70b344f24acb98a3ae24c4e64
3,645,095
def opening2d(value, kernel, stride=1, padding="SAME"): """ erode and then dilate Parameters ---------- value : Tensor 4-D with shape [batch, in_height, in_width, depth]. kernel : Tensor Must have the same type as 'value'. 3-D with shape '[kernel_height, kernel_width, depth]' stride : int The stride of the sliding window for the spatial dimensions '[1, 2]' of the input tensor. padding : string from '"SAME", "VALID"'. The type of padding algorithm to use. Returns ------- out : tensor opened output """ strides = [1, stride, stride, 1] rates = [1, 1, 1, 1] out = tf.nn.erosion2d(value, kernel, strides, rates, padding) out = tf.nn.dilation2d(out, kernel, strides, rates, padding) return out
b425735dacceac825b4394fdc72a744b168acc91
3,645,096
def convert_npy_mat(user_num, item_num, df): """ method of convert dataframe to numpy matrix Parameters ---------- user_num : int, the number of users item_num : int, the number of items df : pd.DataFrame, rating dataframe Returns ------- mat : np.matrix, rating matrix """ mat = np.zeros((user_num, item_num)) for _, row in df.iterrows(): u, i, r = row['user'], row['item'], row['rating'] mat[int(u), int(i)] = float(r) return mat
627fcc45a490be1554445582dc8a2312e25b1152
3,645,097
def user_enter_state_change_response(): """ Prompts the user to enter a key event response. nothing -> str """ return input('>> ')
22da5cb99fa603c3dff04e8afd03cb9fae8210cd
3,645,098
def call_worker(job_spec): """Calls command `cron_worker run <job_spec>` and parses the output""" output = call_command("cron_worker", "run", job_spec) status = exc_class_name = exc_message = None if output: result_match = RESULT_PATTERN.match(output) if result_match: status = result_match.group("status") else: exc_match = EXCEPTION_PATTERN.match(output) if exc_match: exc_class_name = exc_match.group("exc_class_name") exc_message = exc_match.group("exc_message") ok = status == "OK" return CronWorkerRunResult(output, status, exc_class_name, exc_message, ok)
5a914c742319e2528b1668309ff57e507efd26bb
3,645,099