content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import os def create_app() -> Flask: """Create flask app.""" app_settings = os.getenv('APP_SETTINGS') app.config.from_object(app_settings) db.init_app(app) migrate.init_app(app, db) register_blueprints(app) app.register_error_handler(400, handle_validation_errors) app.register_error_handler(404, handle_not_found_error) app.register_error_handler(422, handle_validation_errors) app.register_error_handler(500, handle_internal_server_error) @app.shell_context_processor def ctx(): return {'app': app, 'db': db} return app
53924e1fbb9332cfa940771cd57117c569fa9555
25,900
def roll(y, z): """Estimate angular roll from gravitational acceleration. Args: y, z (float, int, array-like): y, and z acceleration Returns: (float, int, array-like): roll """ return np.arctan2(y, z) * 180/np.pi
ccb0bf948baf7fee9853f4b842139e8a964c25b6
25,901
def eq_to_az_za(ra, dec, lst, latitude=HERA_LATITUDE): """ Convert equatorial (RA/Dec) coordinates to azimuth and zenith angle. Basic expression from http://star-www.st-and.ac.uk/~fv/webnotes/chapter7.htm Parameters ---------- ra, dec : array_like, float RA and Dec in radians. lst : array_like, float Local sidereal time, in radians. latitude : float, optional Latitude, in radians. Default: HERA_LATITUDE. Returns ------- za : array_like, float Zenith angle, in radians. az : array_like, float Azimuth, in radians. """ # FIXME: There is a bug in this code lat = latitude sin_alt = tf.math.sin(dec) * tf.math.sin(lat) \ + tf.math.cos(dec) * tf.math.cos(lat) * tf.math.cos(lst - ra) alt = tf.math.asin(sin_alt) sin_az = tf.math.sin(ra - lst) * tf.math.cos(dec) / tf.math.cos(alt) cos_az = (tf.math.sin(dec) - tf.math.sin(lat) * sin_alt) \ / (tf.math.cos(lat) * tf.math.cos(alt)) return tf.constant(0.5*np.pi, dtype=FLOAT_TYPE) - alt, \ tf.math.sign(tf.math.asin(sin_az)) * tf.math.acos(cos_az)
e8c847779cfd440c5df37e4d80cf650a87c0c2c3
25,902
def log_cc(image, sigma, threshold): """Find connected regions above a fixed threshold on a LoG filtered image. Parameters ---------- image : np.ndarray Image with shape (z, y, x) or (y, x). sigma : float or Tuple(float) Sigma used for the gaussian filter (one for each dimension). If it's a float, the same sigma is applied to every dimensions. threshold : float or int A threshold to detect peaks. Considered as a relative threshold if float. Returns ------- cc : np.ndarray, np.int64 Image labelled with shape (z, y, x) or (y, x). """ # check parameters stack.check_array(image, ndim=[2, 3], dtype=[np.uint8, np.uint16, np.float32, np.float64]) stack.check_parameter(sigma=(float, int, tuple), threshold=(float, int)) # cast image in np.float and apply LoG filter image_filtered = stack.log_filter(image, sigma, keep_dtype=True) # find connected components cc = get_cc(image_filtered, threshold) # TODO return coordinate of the centroid return cc
6a0347b846e5bc1c8d9557d66142a8c3b834db9b
25,903
def check_valid_move(grid: np.ndarray, current_position: tuple, move: tuple) -> bool: """ Checking if move is valid for the current position in provided grid :param grid: validated array of a grid :param current_position: current position :param move: move in tuple form :return: True or False """ # getting coordinates for moved position moved_position = tuple(np.add(current_position, move)) def compare_coordinates(a: tuple, b: tuple) -> bool: """ Helper function to compare coordinates Checks if a is smaller than b """ return all(np.array(a) < np.array(b)) # checking if coordinates are inside the array (between (0,0) and (N,N)) if compare_coordinates((0, 0), moved_position) and compare_coordinates(moved_position, grid.shape): # checking if the coordinates are not on the obstacle if grid[moved_position] == 'x': return False else: return True else: return False
60f58c618a01aad744e1ae7c6d425fc69db20686
25,904
def check_valid_game(season, game): """ Checks if gameid in season schedule. :param season: int, season :param game: int, game :return: bool """ try: get_game_status(season, game) return True except IndexError: return False
5bc95a2dc397b933c1e5716eb5a7e79641b87968
25,905
def console(session_console): """Return a root console. Be sure to use this fixture if the GUI needs to be initialized for a test. """ console = session_console assert libtcodpy.console_flush() == 0 libtcodpy.console_set_default_foreground(console, libtcodpy.white) libtcodpy.console_set_default_background(console, libtcodpy.black) libtcodpy.console_set_background_flag(console, libtcodpy.BKGND_SET) libtcodpy.console_set_alignment(console, libtcodpy.LEFT) libtcodpy.console_clear(console) return console
25c13c549a40c24f7abc90ecb7c303bbc791643b
25,906
def conv(out_channels, kernel_size, stride=2, padding=1, batch_norm=True): """Creates a convolutional layer, with optional batch normalization. """ layers = [] conv_layer = Conv2D(out_channels, kernel_size, strides = stride, padding = 'same', use_bias = False, data_format = "channels_first") # bias is set to False, so the layers are not offset by any amount # append conv layer layers.append(conv_layer) if batch_norm: # append batchnorm layer layers.append(BatchNormalization()) ## rtype: List[conv_layer, batch_norm] or List[conv_layer] return layers
fe568a8b3cd5092db6751677f1accd3d73e36e77
25,907
def t90_from_t68(t68): """ ITS-90 temperature from IPTS-68 temperature This conversion should be applied to all in-situ data collected between 1/1/1968 and 31/12/1989. """ return t68 / 1.00024
a2d8c7ccc0797d47fa8f732bdb61c1ec1e15700e
25,908
def prettify(elem): """Return a pretty-printed XML strong for the Element. """ rough_string = ElementTree.tostring(elem, "utf-8") reparsed = minidom.parseString(rough_string) return reparsed.toprettyxml(indent=" ")
b7686ca0b6d6def2e86f465fa0eb5726fe29d2ab
25,909
import re import json def course_search(search_str, include_inactive=False, debug=False): """ Parse search string to get institution, discipline, and catalog_number, then find all matching courses and return an array of catalog entries. Search strings by example qns csci * All CSCI courses at QC * csci 101 CSCI-101 at any college QNS01 CSCI101 CSCI 101 at QC QNS csci-101 CSCI 100 at QC """ if debug: print(f'\n*** couirse_search("{search_str}", {include_inactive})') parts = search_str.split() if len(parts) < 2 or len(parts) > 3: raise ValueError('invalid search string') institution = parts[0] if len(parts) == 2: discipline, catalog_number = re.match(r'^\s*([a-z]+)-?(.+)\s*$', parts[1], re.I).groups() else: discipline, catalog_number = parts[1], parts[2] catalog_number = re.match(r'^\s*(\*|[\d\.]+)\D*$', catalog_number).group(1) if include_inactive: status_str = "course_status = 'A' or course_status = 'I'" else: status_str = "course_status = 'A' and can_schedule = 'Y'" if institution != '*': institution_str = f"and institution ~* '{institution}'" else: institution_str = '' if catalog_number != '*': under = float(catalog_number) - 0.5 over = under + 1.0 cat_num_str = """and numeric_part(catalog_number) > {} and numeric_part(catalog_number) < {} """.format(under, over) else: cat_num_str = '' query = f""" select course_id, offer_nbr, course_status, institution, discipline, catalog_number from cuny_courses where {status_str} {institution_str} and discipline ~* %s {cat_num_str} """ conn = PgConnection() cursor = conn.cursor() cursor.execute(query, (discipline, )) return_list = [] for row in cursor.fetchall(): return_list.append(lookup_course(row.course_id, offer_nbr=row.offer_nbr)[1]) conn.close() return json.dumps(return_list)
42468dfc70dfda19cf776aab97fc17c1f5978747
25,910
def _pad( s: str, bs: int, ) -> str: """Pads a string so its length is a multiple of a specified block size. :param s: The string that is to be padded :type s: str :param bs: The block size :type bs: int :returns: The initial string, padded to have a length that is a multiple of the specified block size :rtype: str """ number_of_bytes_to_pad = bs - len(s) % bs ascii_string = chr(number_of_bytes_to_pad) padding_str = number_of_bytes_to_pad * ascii_string return s + padding_str
1da441d51c57da688ebcf46b7a30feb36cd007fe
25,911
import json def get_json(headers) -> str: """Construct a str formatted like JSON""" body: dict = {} for key, value in headers.items(): body[key] = value return json.dumps(body, indent=2)
8471f044ae986acd2173d5e9be26c110ee1b1976
25,912
from sys import path def get_targets(units, assembly_basename, outdir, extensions = ['.fasta.dammit.gff3', '.fasta.dammit.fasta'], se_ext = ['se'], pe_ext = ['pe']): """ Use the sample info provided in the tsv file to generate required targets for dammit """ dammit_dir = assembly_basename + ".fasta.dammit" outdir = path.join(outdir, dammit_dir) dammit_targs = [assembly_basename + i for i in extensions] return [path.join(outdir, targ) for targ in dammit_targs]
8edd84c9012c20d55f0087ae34eb9bf64a91b686
25,913
from typing import Any def is_jsonable_object(obj: Any) -> bool: """ Return `True` if ``obj`` is a jsonable object """ cls = obj if isinstance(obj, type) else type(obj) return isinstance(getattr(cls, PHERES_ATTR, None), ObjectData)
f0492544c88efc46d135bc913bc7f8dd7a7f7462
25,914
def Dir(obj): """As the standard dir, but also listup fields of COM object Create COM object with [win32com.client.gencache.EnsureDispatch] for early-binding to get what methods and params are available. """ keys = dir(obj) try: ## if hasattr(obj, '_prop_map_get_'): ## keys += obj._prop_map_get_.keys() if hasattr(obj, '_dispobj_'): keys += dir(obj._dispobj_) finally: return keys
8abc62fbe09e953fb171626a888838e21346ad9e
25,915
def image_classify(request): """ Image classification """ if request.method == 'POST': # Get upload image img = request.FILES.get('img', None) if img: return JsonResponse(dict(name=img.name, size=img.size)) else: return JsonResponse(dict(code=401, msg='Bad request'))
4f459f7a7afd90b1c6de7f174b4926d0d90b35cb
25,916
def calculate_performance_indicators_V1(df): """Compute indicators of performances from df of predictions and GT: - MAE: absolute distance of predicted value to ground truth - Accuracy: 1 if predicted value falls within class boundaries Note: Predicted and ground truths coverage values are ratios between 0 and 1. """ # round to 3rd to avoid artefacts like 0.8999999 for 0.9 as key of dict df[["vt_veg_b", "vt_veg_moy", "vt_veg_h"]] = ( df[["vt_veg_b", "vt_veg_moy", "vt_veg_h"]].astype(np.float).round(3) ) # MAE errors df["error_veg_b"] = (df["pred_veg_b"] - df["vt_veg_b"]).abs() df["error_veg_moy"] = (df["pred_veg_moy"] - df["vt_veg_moy"]).abs() df["error_veg_h"] = (df["pred_veg_h"] - df["vt_veg_h"]).abs() df["error_veg_b_and_moy"] = df[["error_veg_b", "error_veg_moy"]].mean(axis=1) df["error_all"] = df[["error_veg_b", "error_veg_moy", "error_veg_h"]].mean(axis=1) # Accuracy try: df["acc_veg_b"] = df.apply( lambda x: compute_accuracy(x.pred_veg_b, x.vt_veg_b), axis=1 ) df["acc_veg_moy"] = df.apply( lambda x: compute_accuracy(x.pred_veg_moy, x.vt_veg_moy), axis=1 ) df["acc_veg_h"] = df.apply( lambda x: compute_accuracy(x.pred_veg_h, x.vt_veg_h), axis=1 ) df["acc_veg_b_and_moy"] = df[["acc_veg_b", "acc_veg_moy"]].mean(axis=1) df["acc_all"] = df[["acc_veg_b", "acc_veg_moy"]].mean(axis=1) except KeyError: logger.info( "Cannot calculate class-based performance indicators due to continuous ground truths." ) return df
f5c374ffb558eaf65a4c29894fbcb831162a451d
25,917
def PH2_Calc(KH2, tH2, Kr, I, qH2): """ Calculate PH2. :param KH2: hydrogen valve constant [kmol.s^(-1).atm^(-1)] :type KH2 : float :param tH2: hydrogen time constant [s] :type tH2 : float :param Kr: modeling constant [kmol.s^(-1).A^(-1)] :type Kr : float :param I: cell load current [A] :type I : float :param qH2: molar flow of hydrogen [kmol.s^(-1)] :type qH2 : float :return: PH2 [atm] as float """ try: result = ((1 / KH2) / (1 + tH2)) * (qH2 - 2 * Kr * I) return result except (TypeError, ZeroDivisionError): print( "[Error] PH2 Calculation Failed (KH2:%s, tH2:%s, Kr:%s, I:%s, qH2:%s)" % (str(KH2), str(tH2), str(Kr), str(I), str(qH2)))
fe69353bfdde4f301439b89f9946782457d07645
25,918
import scipy def log_transform(image): """Renormalize image intensities to log space Returns a tuple of transformed image and a dictionary to be passed into inverse_log_transform. The minimum and maximum from the dictionary can be applied to an image by the inverse_log_transform to convert it back to its former intensity values. """ orig_min, orig_max = scipy.ndimage.extrema(image)[:2] # # We add 1/2 bit noise to an 8 bit image to give the log a bottom # limage = image.copy() noise_min = orig_min + (orig_max - orig_min) / 256.0 + np.finfo(image.dtype).eps limage[limage < noise_min] = noise_min d = {"noise_min": noise_min} limage = np.log(limage) log_min, log_max = scipy.ndimage.extrema(limage)[:2] d["log_min"] = log_min d["log_max"] = log_max return stretch(limage), d
8e8d6779b313c7ff02e7aafa291e4d2abd687ac1
25,919
from bs4 import BeautifulSoup from typing import Optional def parse_cpu(website: BeautifulSoup, product_id: int) -> Optional[CPU]: """Parses the given Intel ARK website for a CPU.""" # thanks for making accessing so easy btw. # a simple string used for identification of the CPU raw = website.find(attrs={"data-key": "ProcessorNumber"}) if raw is None: # too old CPU, got no processor ID, I have no other idea how I could # identify it - just skip it return None model = raw.string.strip().casefold() # just a number like 42 or 0 or... 8 raw = website.find(attrs={"data-key": "CoreCount"}).string corecount = int(raw) # a bit more complicated, could be "4.2 GHz" but also " 1337.42 MHz" raw = website.find(attrs={"data-key": "ClockSpeed"}).string.strip().split() value = float(raw[0]) unit = raw[1] corespeed = human_readable_to_hertz(value, unit) return CPU( model, product_id, "intel", corecount, corespeed )
1e7d068caba63947c39ce3a2391009986c5d6ad3
25,920
from typing import List def create_result_dict( begin_date: str, end_date: str, total_downloads: int, downloads_per_country: List[dict], multi_row_columns: dict, single_row_columns: dict, ) -> dict: """Create one result dictionary with info on downloads for a specific eprint id in a given time period. :param begin_date: The begin date of download period :param end_date: The end date of download period :param total_downloads: Total of downloads in that period :param downloads_per_country: List of downloads per country :param multi_row_columns: Dict of column names & values for columns that have values over multiple rows of an eprintid :param single_row_columns: Dict of column names & values for columns that have values only in the first row of an eprint id :return: Results dictionary """ result = dict( begin_date=begin_date, end_date=end_date, total_downloads=total_downloads, downloads_per_country=downloads_per_country, **multi_row_columns, **single_row_columns, ) # change empty strings to None so they don't show up in BigQuery table for k, v in result.items(): result[k] = v if v != "" else None return result
59ed6c40e98a8a68e1914f8f14b992b702851ccd
25,921
def getConcentricCell(cellNum, matNum, density, innerSurface, outerSurface, universe, comment): """Create a cell which has multiple components inside a cell.""" uCard = '' if type(universe) is int: uCard = 'u=' + str(universe) listType = [] if type(innerSurface) == type(listType): newInnerSurface = '' i = 1 for surface in innerSurface: if i % 5 == 0: newInnerSurface += ' {}\n '.format(surface) else: newInnerSurface += ' {}'.format(surface) i += 1 innerSurface = newInnerSurface cellCard = "{} {} {} {} -{} {} imp:n=1 {}".format(cellNum, matNum, round(density, 5), innerSurface, outerSurface, uCard, comment) return cellCard
f0e8af3210774500eac0fde195896f3b85473e3f
25,922
import os import glob def is_downloaded(folder): """ Returns whether CIFAR has been downloaded """ return os.path.isdir(folder) and len(glob(os.path.join(folder, '*'))) == 8
ddae821b88eaec059dba66bbfefe4bd2d90707d5
25,923
from pathlib import Path import subprocess def main(): """Validates individual trigger files within the raidboss Cactbot module. Current validation only checks that the trigger file successfully compiles. Returns: An exit status code of 0 or 1 if the tests passed successfully or failed, respectively. """ exit_status = 0 for filepath in Path(CactbotModule.RAIDBOSS.directory(), DATA_DIRECTORY).glob("**/*.js"): # Run individual trigger tests for test_file in TRIGGER_TEST_DIRECTORY.iterdir(): exit_status |= subprocess.call(["node", str(test_file), str(filepath)]) return exit_status
b67137a115fa88334cd5e44b7715f19d64482045
25,924
import tqdm def weight_compression(weights, bits, axis=0, quantizer=None): """Creates an in, out table that maps weight values to their codebook values. Based on the idea presented by https://arxiv.org/pdf/1911.02079.pdf Arguments: weights: Numpy array bits: Number of bits to compress weights to. This will results in 2**bits codebook values axis: axis to apply quantization by quantizer: quantizer function that will be applied to codebook values Returns: index_table: array of indices that maps to codebook values for all weights codebook_table: array of codebook values """ assert bits <= 8 n = 2**bits index_table = [] codebook_table = np.zeros((weights.shape[axis], n)) km_models = [None] * weights.shape[axis] for i, w in tqdm(enumerate(np.split(weights, weights.shape[axis], axis))): original_shape = w.shape w = w.ravel() km = KMeans(n) km.fit(w.reshape(-1, 1)) if quantizer: km.cluster_centers_ = quantizer(km.cluster_centers_).numpy() km.cluster_centers_.sort(axis=0) km_models[i] = km codebook_table[i, :] = km.cluster_centers_.flatten() preds = km.predict(w.reshape(-1, 1)) index_table.append(preds.reshape(original_shape)) index_table = np.concatenate(index_table, axis) return index_table, codebook_table
f7fd3a1908c51a1781367bfd717d9db6f7740934
25,925
import multiprocessing def noncoherent_dedispersion(array, dm_grid, nu_max, d_nu, d_t, threads=1): """ Method that de-disperse dynamical spectra with range values of dispersion measures and average them in frequency to obtain image in (t, DM)-plane. :param array: Numpy 2D array (#freq, #t) with dynamical spectra. :param dm_grid: Array-like of values of DM on which to de-disperse [cm^3/pc]. :param nu_max: Maximum frequency [MHz]. :param d_nu: Value of frequency step [MHz]. :param d_t: Value of time step [s]. :param threads: (optional) Number of threads used for parallelization with ``multiprocessing`` module. If ``1`` then it isn't used. (default: 1) """ n_nu, n_t = array.shape nu = np.arange(n_nu, dtype=float) nu = (nu_max - nu * d_nu)[::-1] pool = None if threads > 1: pool = multiprocessing.Pool(threads, maxtasksperchild=1000) if pool: m = pool.map else: m = map params = [(array, dm, nu, nu_max, d_t) for dm in dm_grid] # Accumulator of de-dispersed frequency averaged frames result = list(m(_de_disperse_by_value_freq_average, params)) result = np.array(result) if pool: # Close pool pool.close() pool.join() return result
866e129e74ae121c093c70a67c811f6a0bf0d3bc
25,926
import struct def parse_cstring(stream, offset): """ parse_cstring will parse a null-terminated string in a bytestream. The string will be decoded with UTF-8 decoder, of course since we are doing this byte-a-byte, it won't really work for all Unicode strings. TODO: add proper Unicode support """ stream.seek(offset) string = "" while True: char = struct.unpack('c', stream.read(1))[0] if char == b'\x00': return string else: string += char.decode()
986073f9b473ec36c08e5d041e8a4b8c1ea6bbf1
25,927
def ParseLabelTensorOrDict(labels): """Return a tensor to use for input labels to tensor_forest. The incoming targets can be a dict where keys are the string names of the columns, which we turn into a single 1-D tensor for classification or 2-D tensor for regression. Converts sparse tensors to dense ones. Args: labels: `Tensor` or `dict` of `Tensor` objects. Returns: A 2-D tensor for labels/outputs. """ if isinstance(labels, dict): return math_ops.to_float( array_ops.concat( [ sparse_ops.sparse_tensor_to_dense( labels[k], default_value=-1) if isinstance( labels, sparse_tensor.SparseTensor) else labels[k] for k in sorted(labels.keys()) ], 1)) else: if isinstance(labels, sparse_tensor.SparseTensor): return math_ops.to_float(sparse_ops.sparse_tensor_to_dense( labels, default_value=-1)) else: return math_ops.to_float(labels)
d0f5dcd32fc04418caa9715be2779897703927cb
25,928
def showCallGraph(pyew, doprint=True, addr=None): """ Show the callgraph of the whole program """ dot = CCallGraphGenerator(pyew) buf = dot.generateDot() if doprint: showDotInXDot(buf) return buf
936176e312652536dc4cea8eaf3da531ec519615
25,929
def make_template(center, data): """Make templated data.""" if isinstance(data, dict): return {key: make_template(center, val) for key, val in data.items()} if isinstance(data, list): return [make_template(center, val) for val in data] env = get_env(center) return env.from_string(str(data))
54763209c2b65604c3c781bdbf7553198048757f
25,930
def get_sequana_adapters(type_, direction): """Return path to a list of adapters in FASTA format :param tag: PCRFree, Rubicon, Nextera :param type_: fwd, rev, revcomp :return: path to the adapter filename """ # search possible types registered = _get_registered_adapters() if type_ not in registered: logger.error("This adapter type (%s) is not valid" % type_) logger.error("choose one in %s types" % registered) raise ValueError directions = ["fwd", "rev", "revcomp"] if direction not in directions: logger.error("This kind of tag (%s) is not valid" % direction) logger.error("choose one in %s " % directions) raise ValueError return sequana_data("adapters_%s_%s.fa" % (type_, direction))
a331f9f0839d1193b9deefb3dbbdc8e31f882843
25,931
def board_str(board): """ String representation of the board. Unicode character for the piece, 1 for threat zone and 0 for empty zone. """ mat = '' for row in board: for squ in row: if squ > 1: mat += '%s ' % chr(squ) else: mat += '. ' mat += '\n' return mat
769d846c5b03c8b75145e3b81cab17ed7331fbbf
25,932
def build_pubmed_url(pubmed_id) -> str: """ Generates a Pubmed URL from a Pubmed ID :param pubmed_id: Pubmed ID to concatenate to Pubmed URL :return: Pubmed URL """ return "https://pubmed.ncbi.nlm.nih.gov/" + str(pubmed_id)
5794fbec75de0451547d6f0570bb89964026c394
25,933
def create_build_job_query(user, time_frame, local=False): """Create the query to get build jobs from graylog Args: user(str): Fed ID time_frame(int): Graylog search period in hours local(bool): If True also search string for local builds Returns: str: Query string for a graylog request to get build jobs """ query_str = 'application:dls-release.py AND (message:' if local: query_str += FIND_BUILD_STR[BUILD] + ' OR message:' \ + FIND_BUILD_STR[LOCAL] + ")" else: query_str += FIND_BUILD_STR[BUILD] + ")" if user != "all": query_str += " AND username:" + user return create_graylog_query(query_str, time_frame)
ada2dd0c40ef0de8221e03dbdf5c2410705ff2cf
25,934
def convert_to_json(payload_content): """Convert the OPC DA array data to JSON (Dict) and return the aggregated JSON data.""" try: json_response = {} for t in payload_content: # tuple in payload_content temp = {} key = t[0].replace(".", "-").replace("/", "_") if len(t) == 4: temp["value"] = t[1] temp["quality"] = t[2] temp["timestamp"] = t[3] else: temp["value"] = "Parameters cannot be read from server" json_response.setdefault(key, []).append(temp) return json_response except Exception as err: logger.error("Failed to convert the data to JSON: %s", str(err)) return {"error": "Failed to covert the data to JSON: {}".format(err)}
be7aa3a60c9d8ad48e5a48e09bb16e5d456f2cba
25,935
def unites(value=32767): """ Restock all resistance messages. """ invoker = spellbook.getInvoker() value = min(value, 32767) invoker.restockAllResistanceMessages(value) return 'Restocked %d unites!' % value
6cb8e977216b0559c1de85c14ff95983b92a11a1
25,936
def gif_summary(name, tensor, max_outputs, fps, collections=None, family=None): """Outputs a `Summary` protocol buffer with gif animations. Args: name: Name of the summary. tensor: A 5-D `uint8` `Tensor` of shape `[batch_size, time, height, width, channels]` where `channels` is 1 or 3. max_outputs: Max number of batch elements to generate gifs for. fps: frames per second of the animation collections: Optional list of tf.GraphKeys. The collections to add the summary to. Defaults to [tf.GraphKeys.SUMMARIES] family: Optional; if provided, used as the prefix of the summary tag name, which controls the tab name used for display on Tensorboard. Returns: A scalar `Tensor` of type `string`. The serialized `Summary` protocol buffer. """ tensor = tf.image.convert_image_dtype(tensor, dtype=tf.uint8, saturate=True) # tensor = tf.convert_to_tensor(tensor) if skip_summary(): return tf.constant("") with summary_op_util.summary_scope(name, family, values=[tensor]) as (tag, scope): val = tf.py_func( py_gif_summary, [tag, tensor, max_outputs, fps], tf.string, stateful=False, name=scope) summary_op_util.collect(val, collections, [tf.GraphKeys.SUMMARIES]) return val
c0c4fda8e988c6f5a3918ae45531692ef36588e4
25,937
def fFargIm(k,phi, x): """Imaginary part of the argument for the integral in fF() """ theta=phi*x return (1/np.sqrt(1-k*k*np.sin(theta)**2)).imag
ec858c9b81e881e6d91299546904670723300b82
25,938
import warnings def deprecated(func): """Prints a warning for functions marked as deprecated""" def new_func(*args, **kwargs): warnings.simplefilter('always', DeprecationWarning) # turn off filter warnings.warn('Call to deprecated function "{}".'.format(func.__name__), category=DeprecationWarning, stacklevel=2) warnings.simplefilter('default', DeprecationWarning) # reset filter return func(*args, **kwargs) return new_func
f92e71f8662d71d3ed5a93f914cf3352282d940c
25,939
def _simulate_dataset( latent_states, covs, log_weights, pardict, labels, dimensions, n_obs, update_info, control_data, observed_factor_data, policies, transition_info, ): """Simulate datasets generated by a latent factor model. Args: See simulate_data Returns: See simulate_data """ policies = policies if policies is not None else [] n_states = dimensions["n_latent_factors"] n_periods = dimensions["n_periods"] weights = np.exp(log_weights)[0] loadings_df = pd.DataFrame( data=pardict["loadings"], index=update_info.index, columns=labels["latent_factors"], ) control_params_df = pd.DataFrame( data=pardict["controls"], index=update_info.index, columns=labels["controls"] ) meas_sds = pd.DataFrame( data=pardict["meas_sds"].reshape(-1, 1), index=update_info.index ) transition_params = pardict["transition"] shock_sds = pardict["shock_sds"] dist_args = [] for mixture in range(dimensions["n_mixtures"]): args = { "mean": latent_states[0][mixture], "cov": covs[0][mixture].T @ covs[0][mixture], } dist_args.append(args) latent_states = np.zeros((n_periods, n_obs, n_states)) latent_states[0] = generate_start_states(n_obs, dimensions, dist_args, weights) for t in range(n_periods - 1): # if there is a shock in period t, add it here policies_t = [p for p in policies if p["period"] == t] for policy in policies_t: position = labels["latent_factors"].index(policy["factor"]) latent_states[t, :, position] += _get_shock( mean=policy["effect_size"], sd=policy["standard_deviation"], size=n_obs ) # get combined states and observed factors as jax array to_concat = [latent_states[t], observed_factor_data[t]] states = jnp.array(np.concatenate(to_concat, axis=-1)) # reshaping is just needed for transform sigma points states = states.reshape(1, 1, *states.shape) # extract trans coeffs for the period trans_coeffs = {k: arr[t] for k, arr in transition_params.items()} # get anchoring_scaling_factors for the period anchoring_scaling_factors = pardict["anchoring_scaling_factors"][ jnp.array([t, t + 1]) ] # get anchoring constants for the period anchoring_constants = pardict["anchoring_constants"][jnp.array([t, t + 1])] # call transform_sigma_points and convert result to numpy next_states = np.array( transform_sigma_points( sigma_points=states, transition_info=transition_info, trans_coeffs=trans_coeffs, anchoring_scaling_factors=anchoring_scaling_factors, anchoring_constants=anchoring_constants, ) ).reshape(n_obs, -1) errors = multivariate_normal( mean=np.zeros(n_states), cov=np.diag(shock_sds[t] ** 2), size=n_obs ) next_states = next_states + errors latent_states[t + 1] = next_states observed_data_by_period = [] for t in range(n_periods): meas = pd.DataFrame( data=measurements_from_states( latent_states[t], control_data[t], loadings_df.loc[t].to_numpy(), control_params_df.loc[t].to_numpy(), meas_sds.loc[t].to_numpy().flatten(), ), columns=loadings_df.loc[t].index, ) meas["period"] = t observed_data_by_period.append(meas) observed_data = pd.concat(observed_data_by_period, axis=0, sort=True) observed_data["id"] = observed_data.index observed_data.sort_values(["id", "period"], inplace=True) latent_data_by_period = [] for t in range(n_periods): lat = pd.DataFrame(data=latent_states[t], columns=labels["latent_factors"]) lat["period"] = t latent_data_by_period.append(lat) latent_data = pd.concat(latent_data_by_period, axis=0, sort=True) latent_data["id"] = latent_data.index latent_data.sort_values(["id", "period"], inplace=True) return observed_data, latent_data
5f3c046ffea328e01580e607762311a96b9bf66d
25,940
def has_user_data(node: hou.Node, name: str) -> bool: """Check if a node has user data under the supplied name. :param node: The node to check for user data on. :param name: The user data name. :return: Whether or not the node has user data of the given name. """ return _cpp_methods.hasUserData(node, name)
5953a24fa369f7d8c1aed7635d5fde3f30324c27
25,941
def load_ptsrc_catalog(cat_name, freqs, freq0=1.e8, usecols=(10,12,77,-5), sort=False): """ Load point sources from the GLEAM catalog. Parameters ---------- cat_name : str Filename of piunt source catalogue. freqs : array_like Array of frequencies to evaluate point source SEDs at (in Hz). freq0 : float, optional Reference frequency for power law spectra, in Hz. Default: 1e8. usecols : tuple of int, optional Which columns to extract the catalogue data from. Columns required (in order) are (RA, Dec, flux, spectral_index). Assumes angles in degrees, fluxes in Jy. Default (for GLEAM catalogue): (10,12,77,-5). sort : bool, optional Whether to sort the sources by flux in the first frequency channel (descending order). Default: False. Returns ------- ra_dec : array_like RA and Dec of sources, in radians. flux : array_like Fluxes of point sources as a function of frequency, in Jy. """ bb = np.genfromtxt(cat_name, usecols=usecols) # Get angular positions ra_dec = np.deg2rad(bb[:,0:2]) # Calculate SEDs flux = (freqs[:,np.newaxis]/freq0)**bb[:,3].T * bb[:,2].T # Sort by flux if requested if sort: idxs = np.argsort(flux[0,:])[::-1] flux_sorted = flux[:,idxs] rad_dec_sorted = ra_dec[idxs,:] return ra_dec, flux
54f830e9fef746cdabe8b29cc9a7481b67593476
25,942
from datetime import datetime import uuid def create(): """ Create a new experiment record and the container associated with it. """ # Process form submission if flask.request.method == 'POST': experiment_id = flask.request.form['experiment_id'] container = experiment_id # TODO validate container name # https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata # Create container # TODO transaction service_client = owast.blob.get_service_client() container_client = service_client.create_container( container) # type: azure.storage.blob.ContainerClient container_client.set_container_metadata( dict(experiment_id=experiment_id)) # Get document collection experiments = app.mongo.db.experiments # type: Collection # Create new experiment record experiment = dict( experiment_id=experiment_id, # Parse timestamp start_time=datetime.datetime.fromisoformat( flask.request.form['start_time']), meta=owast.utils.get_metadata(), container=container, deleted=False, ) experiments.insert_one(experiment) flask.flash(f'Added experiment {experiment_id}') return flask.redirect(flask.url_for('experiment.detail', experiment_id=experiment[ 'experiment_id'])) # Create default values for form fields time = owast.utils.html_datetime() # Default random experiment identifier experiment_id = str(uuid.uuid4()) return flask.render_template('experiment/create.html', time=time, experiment_id=experiment_id)
6537372eb7a2345d5cd38954fb69e5fac446d2f1
25,943
def get_model(): """ Reference : http://images.nvidia.com/content/tegra/automotive/images/2016/solutions/pdf/end-to-end-dl-using-px.pdf I have added dropout layers to reduce overfitting """ model = Sequential() # Normalization and zero centering. model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape = IMG_SHAPE)) # 66x200 model.add(Conv2D(24, 5, activation = 'relu', )) # 64x196x24 model.add(MaxPooling2D(strides = (2, 2))) # 32x98x24 model.add(Dropout(0.3)) model.add(Conv2D(36, 5, activation = 'relu')) # 28x94x36 model.add(MaxPooling2D(strides = (2, 2))) # 14x47x36 model.add(Dropout(0.3)) model.add(Conv2D(48, 5, activation = 'relu')) # 10x43x48 model.add(MaxPooling2D(strides = (2, 2))) # 5x21x48 model.add(Dropout(0.3)) model.add(Conv2D(64, 3, activation = 'relu')) # 3x19x64 model.add(Conv2D(64, 3, activation = 'relu')) # 1x17x64 model.add(Dropout(0.3)) model.add(Flatten()) model.add(Dense(100, activation = 'relu')) model.add(Dropout(0.3)) model.add(Dense(50, activation = 'relu')) model.add(Dense(10, activation = 'relu')) model.add(Dense(1)) model.summary() return model
4591745f43719450f2da579b13ebd45a103bf76e
25,944
def ser_iuwt_decomposition(in1, scale_count, scale_adjust, store_smoothed): """ This function calls the a trous algorithm code to decompose the input into its wavelet coefficients. This is the isotropic undecimated wavelet transform implemented for a single CPU core. INPUTS: in1 (no default): Array on which the decomposition is to be performed. scale_count (no default): Maximum scale to be considered. scale_adjust (default=0): Adjustment to scale value if first scales are of no interest. store_smoothed (default=False):Boolean specifier for whether the smoothed image is stored or not. OUTPUTS: detail_coeffs Array containing the detail coefficients. C0 (optional): Array containing the smoothest version of the input. """ wavelet_filter = (1./16)*np.array([1,4,6,4,1]) # Filter-bank for use in the a trous algorithm. # Initialises an empty array to store the coefficients. detail_coeffs = np.empty([scale_count-scale_adjust, in1.shape[0], in1.shape[1]]) C0 = in1 # Sets the initial value to be the input array. # The following loop, which iterates up to scale_adjust, applies the a trous algorithm to the scales which are # considered insignificant. This is important as each set of wavelet coefficients depends on the last smoothed # version of the input. if scale_adjust>0: for i in range(0, scale_adjust): C0 = ser_a_trous(C0, wavelet_filter, i) # The meat of the algorithm - two sequential applications fo the a trous followed by determination and storing of # the detail coefficients. C0 is reassigned the value of C on each loop - C0 is always the smoothest version of the # input image. for i in range(scale_adjust,scale_count): C = ser_a_trous(C0, wavelet_filter, i) # Approximation coefficients. C1 = ser_a_trous(C, wavelet_filter, i) # Approximation coefficients. detail_coeffs[i-scale_adjust,:,:] = C0 - C1 # Detail coefficients. C0 = C if store_smoothed: return detail_coeffs, C0 else: return detail_coeffs
3a0e22ef55b14dfce3eb706800439d55f97bcd19
25,945
def delete_ref(profile, ref): """Delete a ref. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. ref The ref to fetch, e.g., ``heads/my-feature-branch``. Returns The response of the DELETE request. """ resource = "/refs/" + ref return api.delete_request(profile, resource)
4009c3bb787914ed4130760cf5d3fcfad4032496
25,946
def mean_ratio_reversion_test(hd1, hd2, n=20, offset=20, hold_time=30, return_index=False): """ Tests over the time period offset:offset-hold_time to see if the price ratio of the price pair reverts to the mean. """ #Get initial price ratio init_pr = hd1.close[offset]/hd2.close[offset] #Get mean for the pair pr_mean = mean_price_ratio(hd1, hd2, n=n, offset=offset) #Calculate coefficient to use to see if the price ratio switched sides of mean pr coeff = 1 if init_pr > pr_mean else -1 for i in xrange(offset, max(offset-hold_time, 0), -1): if coeff*(hd1.close[i]/hd2.close[offset] - pr_mean) < 0: if return_index: return i return 1 #The pair has not reverted to the mean if return_index: return i return 0
248987fbbb718303b413fe1eb63384fba42edc07
25,947
def list_contents(path, root): """ @return list of relative paths rooted at "root" """ cmd = "svn ls %s/%s" % (root, path) code, out = util.execute(cmd, return_out=True) dirs = out.strip().split('\n') return dirs
4fd02b8aeb362bdca1d37f4dc3d481b38f1d9059
25,948
def evaluate_voting(): """Evaluates the Voting-Results in the instance_dict. Returns Array with the Player-ID(s)""" poll_dict = {} for ip in instance_dict.keys(): if 'poll' in instance_dict[ip]: if instance_dict[ip]['poll'] in poll_dict: old = poll_dict[instance_dict[ip]['poll']] poll_dict.update({instance_dict[ip]['poll']: old + 1}) else: poll_dict.update({instance_dict[ip]['poll']: 1}) # Count votes: return max(poll_dict.keys(), key=lambda k: poll_dict[k])
4c86e453e24114ad00239a3ad97c23f9b25dd243
25,949
import torch def mmd(x1, x2, sigmas): """the loss of maximum mean discrepancy.""" x1 = torch.reshape(x1, [x1.shape[0], -1]) x2 = torch.reshape(x2, [x2.shape[0], -1]) # print('x1x2shape:', x1.shape) diff = torch.mean(gaussian_kernel(x1, x1, sigmas)) # mean_x1x1 diff -= 2 * torch.mean(gaussian_kernel(x1, x2, sigmas)) # mean_x1x2 diff += torch.mean(gaussian_kernel(x2, x2, sigmas)) # mean_x2x2 # print('diff:', diff, diff.shape) return diff
1248de57d5aac658c54c6f67b4ba072f3dcd3978
25,950
async def api_audio_summaries() -> str: """Turn audio summaries on or off.""" assert core is not None toggle_off = (await request.data).decode().lower() in ["false", "off"] if toggle_off: # Disable core.disable_audio_summaries() _LOGGER.debug("Audio summaries disabled.") return "off" # Enable core.enable_audio_summaries() _LOGGER.debug("Audio summaries enabled.") return "on"
4091678ed60d2569178d45ba30b25c6580cab601
25,951
import sys def get_image(): """Gets an image file via POST request, feeds the image to the FaceNet model then saves both the original image and its resulting embedding from the FaceNet model in their designated folders. 'uploads' folder: for image files 'embeddings' folder: for embedding numpy files. """ if request.method == 'POST': if 'file' not in request.files: return render_template( template_name_or_list="warning.html", status="No 'file' field in POST request!" ) file = request.files['file'] filename = file.filename if filename == "": return render_template( template_name_or_list="warning.html", status="No selected file!" ) if file and allowed_file(filename=filename, allowed_set=allowed_set): # common setting for all models, need not modify. model_path = 'models' # face detection model setting. scene = 'non-mask' model_category = 'face_detection' model_name = model_conf[scene][model_category] logger.info('Start to load the face detection model...') try: faceDetModelLoader = FaceDetModelLoader(model_path, model_category, model_name) model, cfg = faceDetModelLoader.load_model() faceDetModelHandler = FaceDetModelHandler(model, 'cuda:0', cfg) except Exception as e: logger.error('Falied to load face detection Model.') logger.error(e) sys.exit(-1) else: logger.info('Success!') # face landmark model setting. model_category = 'face_alignment' model_name = model_conf[scene][model_category] logger.info('Start to load the face landmark model...') try: faceAlignModelLoader = FaceAlignModelLoader(model_path, model_category, model_name) model, cfg = faceAlignModelLoader.load_model() faceAlignModelHandler = FaceAlignModelHandler(model, 'cuda:0', cfg) except Exception as e: logger.error('Failed to load face landmark model.') logger.error(e) sys.exit(-1) else: logger.info('Success!') # face recognition model setting. model_category = 'face_recognition' model_name = model_conf[scene][model_category] logger.info('Start to load the face recognition model...') try: faceRecModelLoader = FaceRecModelLoader(model_path, model_category, model_name) model, cfg = faceRecModelLoader.load_model() faceRecModelHandler = FaceRecModelHandler(model, 'cuda:0', cfg) except Exception as e: logger.error('Failed to load face recognition model.') logger.error(e) sys.exit(-1) else: logger.info('Success!') face_cropper = FaceRecImageCropper() filename = secure_filename(filename=filename) # Read image file as numpy array of RGB dimension image = imread(name=file, mode='RGB') # Detect and crop a 160 x 160 image containing a human face in the image file dets = faceDetModelHandler.inference_on_image(image) bboxs = dets face_nums = dets.shape[0] feature_list = [] for i in range(face_nums): landmarks = faceAlignModelHandler.inference_on_image(image, dets[i]) landmarks_list = [] for (x, y) in landmarks.astype(np.int32): landmarks_list.extend((x, y)) cropped_image = face_cropper.crop_image_by_mat(image, landmarks_list) # If a human face is detected if cropped_image is not None: embedding = faceRecModelHandler.inference_on_image(cropped_image) # Save cropped face image to 'uploads/' folder save_image(img=cropped_image, filename=filename, uploads_path=uploads_path) # Remove file extension from image filename for numpy file storage being based on image filename filename = remove_file_extension(filename=filename) # Save embedding to 'embeddings/' folder save_embedding( embedding=embedding, filename=filename, embeddings_path=embeddings_path ) return render_template( template_name_or_list="upload_result.html", status="Image uploaded and embedded successfully!" ) else: return render_template( template_name_or_list="upload_result.html", status="Image upload was unsuccessful! No human face was detected!" ) else: return render_template( template_name_or_list="warning.html", status="POST HTTP method required!" )
ee9b59c695f11b5797356012a9c7dccfa49a5569
25,952
def choose(n,r): """ number of combinations of n things taken r at a time (order unimportant) """ if (n < r): return 0 if (n == r): return 1 s = min(r, (n - r)) t = n a = n-1 b = 2 while b <= s: t = (t*a)//b a -= 1 b += 1 return t
5852054f1a6381278039b0ec2184d0887e2b1d2b
25,953
def _bisearch(ucs, table): """ Auxiliary function for binary search in interval table. :arg int ucs: Ordinal value of unicode character. :arg list table: List of starting and ending ranges of ordinal values, in form of ``[(start, end), ...]``. :rtype: int :returns: 1 if ordinal value ucs is found within lookup table, else 0. """ lbound = 0 ubound = len(table) - 1 if ucs < table[0][0] or ucs > table[ubound][1]: return 0 while ubound >= lbound: mid = (lbound + ubound) // 2 if ucs > table[mid][1]: lbound = mid + 1 elif ucs < table[mid][0]: ubound = mid - 1 else: return 1 return 0
f9b985771fa94138ae9b0dfbb8fa9ee413c65a48
25,954
def get_xml_serial_number (root): """ Get the serial number from the system global settings XML. Parameters: root -- An XML element to the root of the system global settings. Return: The serial number. """ return get_xml_string_value (root, "serialNumber", "serial number")
5b1dd2ef70f34980cddce442a1c8707c5d81e478
25,955
def ParseMachineType(resource_parser, machine_type_name, project, location, scope): """Returns the location-specific machine type uri.""" if scope == compute_scopes.ScopeEnum.ZONE: collection = 'compute.machineTypes' params = {'project': project, 'zone': location} elif scope == compute_scopes.ScopeEnum.REGION: collection = 'compute.regionMachineTypes' params = {'project': project, 'region': location} machine_type_uri = resource_parser.Parse( machine_type_name, collection=collection, params=params).SelfLink() return machine_type_uri
70b5311525569a4981fd0170a62a3f0d53a8a8f1
25,956
def redirect_to_default(): """ Redirects users to main page if they make a GET request to /generate Generate should only be POSTed to """ log("Received GET request for /generate, returning to default page") return redirect(url_for("default"))
951e610ac3e56ec6e84dbeacb3174fe79a1c6f9c
25,957
def client_credential_grant_session(): """Create a Session from Client Credential Grant.""" oauth2credential = OAuth2Credential( client_id=None, redirect_url=None, access_token=ACCESS_TOKEN, expires_in_seconds=EXPIRES_IN_SECONDS, scopes=SCOPES_SET, grant_type=auth.CLIENT_CREDENTIAL_GRANT, client_secret=None, refresh_token=None, ) return Session(oauth2credential=oauth2credential)
54b37e3a6ae582982e47e2135058ce6d7bafd6ea
25,958
import requests def _query(server, method, parameters, timeout=DEFAULT_TIMEOUT, verify_ssl=True, proxies=None): """Formats and performs the query against the API. :param server: The MyGeotab server. :type server: str :param method: The method name. :type method: str :param parameters: The parameters to send with the query. :type parameters: dict :param timeout: The timeout to make the call, in seconds. By default, this is 300 seconds (or 5 minutes). :type timeout: float :param verify_ssl: If True, verify the SSL certificate. It's recommended not to modify this. :type verify_ssl: bool :param proxies: The proxies dictionary to apply to the request. :type proxies: dict or None :raise MyGeotabException: Raises when an exception occurs on the MyGeotab server. :raise TimeoutException: Raises when the request does not respond after some time. :raise urllib2.HTTPError: Raises when there is an HTTP status code that indicates failure. :return: The JSON-decoded result from the server. """ api_endpoint = get_api_url(server) params = dict(id=-1, method=method, params=parameters or {}) headers = get_headers() with requests.Session() as session: session.mount("https://", GeotabHTTPAdapter()) try: response = session.post( api_endpoint, data=json_serialize(params), headers=headers, allow_redirects=True, timeout=timeout, verify=verify_ssl, proxies=proxies, ) except Timeout: raise TimeoutException(server) response.raise_for_status() content_type = response.headers.get("Content-Type") if content_type and "application/json" not in content_type.lower(): return response.text return _process(json_deserialize(response.text))
a7615543dffc7270fddc12bc909488cdd03ad0be
25,959
def volume_update(context, volume_id, values): """Set the given properties on an volume and update it. Raises NotFound if volume does not exist. """ return IMPL.volume_update(context, volume_id, values)
1f696458654daf25767ec2a888a2bfa7a8c1872d
25,960
def get_E_Elc_microwave_d_t(P_Elc_microwave_cook_rtd, t_microwave_cook_d_t): """時刻別消費電力量を計算する Parameters ---------- P_Elc_microwave_cook_rtd : float 調理時の定格待機電力, W t_microwave_cook_d_t : ndarray(N-dimensional array) 1年間の全時間の調理時間を格納したND配列, h d日t時の調理時間が年開始時から8760個連続して格納されている Returns ---------- E_Elc_microwave_d_t : ndarray(N-dimensional array) 1年間の全時間の消費電力量を格納したND配列, Wh d日t時の消費電力量が年開始時から8760個連続して格納されている """ P_Elc_microwave_cook = get_P_Elc_microwave_cook(P_Elc_microwave_cook_rtd) E_Elc_microwave_d_t = P_Elc_microwave_cook * t_microwave_cook_d_t E_Elc_microwave_d_t = E_Elc_microwave_d_t * 10**(-3) return E_Elc_microwave_d_t
c49666272e86c8e10b8df15e639056ba8701f88b
25,961
from typing import Tuple from typing import Any def set_up_text_location( image: ImageDraw, img_opened: Image, text: str, font: ImageFont ) -> Tuple[Any, Any]: """ Returns coordinates of text location on the image :param image: ImageDraw object :param img_opened: opened PIL image :param text: text :param font: ImageFont :return: Tuple[Any, Any] """ width_text, height_text = image.textsize(text, font) width = (img_opened.size[0] - width_text) * WIDTH_PROPORTION height = (img_opened.size[1] - height_text) * HEIGHT_PROPORTION return width, height
26f8a7719e033cd81db66e50ecd638abd67a6846
25,962
def service_detail(request, service_id): """This view shows the details of a service""" service = get_object_or_404(Service, pk=service_id) job_statuses = ( enumerations.QUEUED, enumerations.IN_PROCESS, enumerations.FAILED, ) resources_being_harvested = HarvestJob.objects.filter( service=service, status__in=job_statuses) already_imported_layers = Layer.objects.filter(remote_service=service) service_list = service.service_set.all() all_resources = (list(resources_being_harvested) + list(already_imported_layers) + list(service_list)) paginator = Paginator( all_resources, getattr(settings, "CLIENT_RESULTS_LIMIT", 25), orphans=3 ) page = request.GET.get("page") try: resources = paginator.page(page) except PageNotAnInteger: resources = paginator.page(1) except EmptyPage: resources = paginator.page(paginator.num_pages) # pop the handler out of the session in order to free resources # - we had stored the service handler on the session in order to # speed up the register/harvest resources flow. However, for services # with many resources, keeping the handler in the session leads to degraded # performance try: request.session.pop(service.base_url) except KeyError: pass return render( request, template_name="services/service_detail.html", context={ "service": service, "layers": (r for r in resources if isinstance(r, Layer)), "services": (r for r in resources if isinstance(r, Service)), "resource_jobs": ( r for r in resources if isinstance(r, HarvestJob)), "permissions_json": _perms_info_json(service), "resources": resources, "total_resources": len(all_resources), } )
13abb413973428eda57c0c18e9ee71044f9e32ab
25,963
def convolutional_block(X, f, filters, stage, block, s): """ Implementation of the convolutional block as defined in Figure 4 Arguments: X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev) f -- integer, specifying the shape of the middle CONV's window for the main path filters -- python list of integers, defining the number of filters in the CONV layers of the main path stage -- integer, used to name the layers, depending on their position in the network block -- string/character, used to name the layers, depending on their position in the network s -- Integer, specifying the stride to be used Returns: X -- output of the convolutional block, tensor of shape (n_H, n_W, n_C) """ # Retrieve Filters F1, F2, F3 = filters # Save the input value X_shortcut = X ##### MAIN PATH ##### # First component of main path X = Conv2D(F1, (1, 1), strides = (s,s), kernel_initializer = glorot_uniform(seed=0))(X) X = BatchNormalization(axis = 3)(X) X = Activation('relu')(X) # Second component of main path (≈3 lines) X = Conv2D(filters = F2, kernel_size = (f, f), strides = (1,1), padding = 'same', kernel_initializer = glorot_uniform(seed=0))(X) X = BatchNormalization(axis = 3)(X) X = Activation('relu')(X) # Third component of main path (≈2 lines) X = Conv2D(filters = F3, kernel_size = (1, 1), strides = (1,1), padding = 'valid', kernel_initializer = glorot_uniform(seed=0))(X) X = BatchNormalization(axis = 3)(X) ##### SHORTCUT PATH #### (≈2 lines) X_shortcut = Conv2D(filters = F3, kernel_size = (1, 1), strides = (s,s), padding = 'valid', kernel_initializer = glorot_uniform(seed=0))(X_shortcut) X_shortcut = BatchNormalization(axis = 3)(X_shortcut) # Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines) X = Add()([X, X_shortcut]) X = Activation('relu')(X) return X
ab695bd12b7179c02f2cc8696eeeedf3f153bfce
25,964
def process_input(values, puzzle_input, u_input): """Takes input from the user and records them in the location specified by the "value", and returns the resulting puzzle input """ puzzle_input[values[0]] = u_input return puzzle_input
300c0850afa977d738f5a5ebbcb1b36ccd557b23
25,965
import json def init_store(): """ Function to initialize the store listener. Parameters ---------- Returns ------- dict request response """ challenge_id = str(request.form.get('challenge_id')) flag = str(request.form.get('flag')) user_email = str(request.form.get('user_email')) user_id = get_user_id(user_email) user_ip = request.remote_addr log_received_flag(user_email, user_ip, flag, challenge_id) if not user_id: return {"success": False, "message": "User does not exist.", "uploaded": False} flags_list = IndividualFlag.query.filter_by(user_id=user_id).all() for current_flag in flags_list: if current_flag and Flags.query.filter_by(id=current_flag.id).first().challenge_id \ == int(challenge_id): return {"success": False, "message": "Flag was already uploaded", "uploaded": True} req = json.loads("{}") req["challenge_id"] = challenge_id req["content"] = flag req["data"] = "Case Sensitive" req["type"] = "individual" req["user_id"] = user_id try: # fill IndividualFlags and Flags table FlagModel = IndividualFlag flag_model = FlagModel(**req) db.session.add(flag_model) db.session.commit() db.session.close() except Exception as err: return {"success": False, "message": "Database Error :" + str(err), "uploaded": False} return {"success": True, "Flag_data": req, "uploaded": True}
c7c3c6d9f3b645edfdcf82a4fd9cdbf1abd62b01
25,966
def PyLong_AsSsize_t(space, w_long): """Return a C Py_ssize_t representation of the contents of pylong. If pylong is greater than PY_SSIZE_T_MAX, an OverflowError is raised and -1 will be returned. """ return space.int_w(w_long)
c0d64c9be4333a0d40133478bcc5fcad221d0efc
25,967
import os def validate_prmtop(prmtop, target_dir=None, override=False): """ Check that file exists and create a symlink if it doesn't have a prmtop extension (often *.top is used but mdtraj cant't detect type with ambiguous extensions). Parameters ---------- prmtop : str Path to supposed prmtop file target_dir : str Directory in which to create symlink if required. If None the symlink will be created in the same directory. override: bool Override possible already existing file with .prmtop extension. Default is false Returns ------- Path Location of verified prmtop (with potentially edited filename) """ if not os.path.isfile(prmtop): raise IOError() _, ext = os.path.splitext(prmtop) if ext is 'prmtop': return prmtop target_dir = target_dir or os.path.dirname(os.path.abspath(prmtop)) new_prmtop = os.path.join(target_dir, os.path.basename(prmtop) + '.prmtop') if os.path.islink(new_prmtop) and override: os.unlink(new_prmtop) os.symlink(os.path.abspath(prmtop), os.path.abspath(new_prmtop)) return new_prmtop
08763ed68a822069274177bc7ecb27521ec231b0
25,968
from typing import List from typing import Tuple import os def make_dataset(image_fp: List[str], titles: List[str], im_size: Tuple[int, int], batch_size: int) -> tf.data.Dataset: """ Parameters ---------- image_fp: List[str] List of image filepaths. titles: List[str] List of book titles, matching the order of the image_fp. im_size: Tuple[int, int] Desired image size. batch_size: int Batch size. Returns ------- tf.data.Dataset TensorFlow dataset. """ buffer_size = 1000 cache_fn = os.path.join(os.getcwd(), "../data", "cache") dataset = tf.data.Dataset.from_tensor_slices((titles, image_fp)) dataset = dataset.map(lambda title, fp: (title, read_image(fp, im_size)), num_parallel_calls=tf.data.AUTOTUNE) dataset = dataset.cache().shuffle(buffer_size).batch(batch_size) dataset = dataset.prefetch(buffer_size=tf.data.AUTOTUNE) return dataset
a6805a9730b4698c3df7506172ba668cac9fd29e
25,969
import requests def getssa_handler() -> Response: """Software Statement Assertion retrieval""" if request.method == 'POST': try: r = requests.get( '{}/tpp/{}/ssa/{}'.format( cache.get('tpp_ssa_url'), cache.get('tpp_id'), cache.get('software_statement_id') ), headers=dict( Authorization='Bearer {}'.format( cache.get('access_token') ) ) ) except Exception as e: app.logger.error('Could not retrieve the SSA because: {}'.format(e)) else: if r.status_code == 200: cache.set('software_statement_assertion', r.text, timeout=CACHE_TIMEOUT) else: app.logger.error('Could not retrieve the SSA, because: {}, {}'.format(r.status_code, r.reason)) context = dict(settings=get_context()) try: return render_template('getssa.html', context=context) except TemplateNotFound: abort(404)
ddf9c7538c12073906caf934eba07995dd6ab590
25,970
import copy def dataTeapotShallow(): """ Values set interactively by Dave Hale. Omit deeper samples. """ txf = [ 30, 69,0.50, 99, 72,0.50, 63, 71,0.90, 128, 72,0.90, 29,172,0.35, 97,173,0.35, 63,173,0.75, 127,174,0.75, 33,272,0.20, 103,270,0.20, 70,271,0.60, 134,268,0.60] n = len(txf)/3 t = zerofloat(n) x = zerofloat(n) f = zerofloat(n) copy(n,0,3,txf,0,1,t) copy(n,1,3,txf,0,1,x) copy(n,2,3,txf,0,1,f) #t = add(0.5,mul(0.004,t)) #x = add(0.0,mul(0.025,x)) return t,x,f
ef03e417424d975b24e258741c57db319f918ac1
25,971
from pathlib import Path def count_labels(l_path, selected_class,num_workers=4): """Calculate anchor size. Args: l_path: path to labels. selected_class: class to be calculated. Returns: (w, l, h) """ def count_single_label(label_file): size = [] z_axis = [] num = 0 with open(label_file,"r") as f: label_lines = f.readlines() for label_line in label_lines: label_line = label_line.split(" ") if label_line[0] == selected_class: num += 1 size.append([float(label_line[8]), float(label_line[9]), float(label_line[10])]) z_axis.append(float(label_line[13])) np_size = np.array(size) np_z_axis = np.array(z_axis) if np_size.shape[0] == 0: return 0,0,0,0,0 s_h = np_size[:, 0].sum() s_w = np_size[:, 1].sum() s_l = np_size[:, 2].sum() s_z = np_z_axis.sum() return s_h, s_w, s_l, s_z, num label_list = list(Path(l_path).glob("**/*.txt")) sum_h = 0 sum_w = 0 sum_l = 0 sum_z = 0 total_num = 0 with futures.ThreadPoolExecutor(num_workers) as executor: for result in executor.map(count_single_label, label_list): sum_h += result[0] sum_w += result[1] sum_l += result[2] sum_z += result[3] total_num += result[4] avg_h = sum_h / total_num avg_w = sum_w / total_num avg_l = sum_l / total_num avg_z = sum_z / total_num print("the mean height of %s" % selected_class, avg_h) print("the mean width of %s" % selected_class, avg_w) print("the mean length of %s" % selected_class, avg_l) print("the mean z coordinate of %s" % selected_class, avg_z) return [round(avg_w,2), round(avg_l,2), round(avg_h,2)]
e027ace155e6b7ca83dc98b7b0191109ca12420d
25,972
def sorted_chromosome(all_samples): """ sorted_chromosome(AllSamples) -> list :return: list of chromosome found in all samples """ sorted_chromosome_list = sorted(all_samples.chr_list.keys()) print(sorted_chromosome_list) return sorted_chromosome_list
c1e49ac974e16c7f9b69581442186c3efc23ef70
25,973
def adjust_bb_size(bounding_box, factor, resample=False): """Modifies the bounding box dimensions according to a given factor. Args: bounding_box (list or tuple): Coordinates of bounding box (x_min, x_max, y_min, y_max, z_min, z_max). factor (list or tuple): Multiplicative factor for each dimension (list or tuple of length 3). resample (bool): Boolean indicating if this resize is for resampling. Returns: list: New coordinates (x_min, x_max, y_min, y_max, z_min, z_max). """ coord = [] for i in range(len(bounding_box) // 2): d_min, d_max = bounding_box[2 * i: (2 * i) + 2] if resample: d_min, d_max = d_min * factor[i], d_max * factor[i] dim_len = d_max - d_min else: dim_len = (d_max - d_min) * factor[i] # new min and max coordinates min_coord = d_min - (dim_len - (d_max - d_min)) // 2 coord.append(int(round(max(min_coord, 0)))) coord.append(int(coord[-1] + dim_len)) return coord
93a3c5947cb7c3335421084092dbae8840f8164b
25,974
def round_dt64(t,dt,t0=np.datetime64("1970-01-01 00:00:00")): """ Round the given t to the nearest integer number of dt from a reference time (defaults to unix epoch) """ return clamp_dt64_helper(np.round,t,dt,t0)
5e3695993110f875cc62402a770c9cb56ff9e544
25,975
def distill_base(): """Set of hyperparameters.""" # Base hparams = common_hparams.basic_params1() # teacher/student parameters hparams.add_hparam("teacher_model", "") hparams.add_hparam("teacher_hparams", "") hparams.add_hparam("student_model", "") hparams.add_hparam("student_hparams", "") # Distillation parameters # WARNING: distill_phase hparam will be overwritten in /bin/t2t_distill.py hparams.add_hparam("distill_phase", None) hparams.add_hparam("task_balance", 1.0) hparams.add_hparam("distill_temperature", 1.0) hparams.add_hparam("num_classes", 10) # Optional Phase-specific hyperparameters hparams.add_hparam("teacher_learning_rate", None) hparams.add_hparam("student_learning_rate", None) # Training parameters (stolen from ResNet) hparams.batch_size = 128 hparams.optimizer = "Momentum" hparams.optimizer_momentum_momentum = 0.9 hparams.optimizer_momentum_nesterov = True hparams.weight_decay = 1e-4 hparams.clip_grad_norm = 0.0 # (base_lr=0.1) * (batch_size=128*8 (on TPU, or 8 GPUs)=1024) / (256.) hparams.learning_rate = 0.4 hparams.learning_rate_decay_scheme = "cosine" # For image_imagenet224, 120k training steps, which effectively makes this a # cosine decay (i.e. no cycles). hparams.learning_rate_cosine_cycle_steps = 120000 hparams.initializer = "normal_unit_scaling" hparams.initializer_gain = 2. return hparams
7bbf85971e0323282a801c7db4e646f7b745d453
25,976
def authorize(*args, **kwargs): """Handle for authorization of login information.""" next_url = url_for('oauth.authorize', **{ 'response_type': request.args.get('response_type'), 'client_id': request.args.get('client_id'), 'redirect_uri': request.args.get('redirect_uri'), 'scope': request.args.get('scope') }) if not hasattr(current_user, 'id'): return redirect(url_for('security.login', next=next_url)) """ Assign the current_user object to a variable so that we don't accidently alter the object during this process. """ this_user = current_user if request.method == 'GET': client_id = kwargs.get('client_id') client = Client.query.filter_by(client_id=client_id).first() kwargs['client'] = client kwargs['user'] = this_user return render_template('oauth/authorize.html', **kwargs) confirm = request.form.get('confirm', 'no') return confirm == 'yes'
e38b958567b9fa7427c2b3dd7b04cabadbbf4a8c
25,977
def summary_stats(r, riskfree_rate=0.027): """ Return a DataFrame that contains aggregated summary stats for the returns in the columns of r """ ann_r = np.round(r.aggregate(annualize_rets, periods_per_year=4), 2) ann_vol = np.round(r.aggregate(annualize_vol, periods_per_year=4), 2) ann_sr = np.round(r.aggregate(sharpe_ratio, riskfree_rate=riskfree_rate, periods_per_year=4),2 ) dd = np.round(r.aggregate(lambda r: drawdown(r).Drawdown.min()), 2) skew = np.round(r.aggregate(skewness), 2) kurt = np.round(r.aggregate(kurtosis), 2) cf_var5 = np.round(r.aggregate(var_gaussian, modified=True), 2) hist_cvar5 = np.round(r.aggregate(cvar_historic), 2) return pd.DataFrame({ "Annualized Return": ann_r, "Annualized Vol": ann_vol, "Skewness": skew, "Kurtosis": kurt, "Cornish-Fisher VaR (5%)": cf_var5, "Historic CVaR (5%)": hist_cvar5, "Sharpe Ratio": ann_sr, "Max Drawdown": dd })
230534298d907d0bff9dd843d14e3a6c5481ddbf
25,978
def mid_longitude(geom: Geometry) -> float: """Return longitude of the middle point of the geomtry.""" ((lon,), _) = geom.centroid.to_crs("epsg:4326").xy return lon
ada60c2b72fc36af7d37cd3e063d0154484727d0
25,979
def unique_count_weight(feature): """Normalize count number of unique values relative to length of feature. Args: feature: feature/column of pandas dataset Returns: Normalized Number of unique values relative to length of feature. """ return len(feature.value_counts()) / len(feature)
0345208cd7d9d4bc2303db377206e5362db6cdde
25,980
def mode_glass(frame_bg, frame_fg, args_glass): """ Description: In glass mode, your eyes will be located with a pair of glass. Params: frame_bg: The background layer. frame_fg: The canvas layer. args_glass: The arguments used in glass mode. """ frame_bg = wear_glasses.wear_glasses( frame_bg, args_glass ) return frame_bg, frame_fg
4f2957a1aed66383fe2f8ea394fb4fca0f2ba550
25,981
def line_meshes(verts, edges, colors=None, poses=None): """Create pyrender Mesh instance for lines. Args: verts: np.array floats of shape [#v, 3] edges: np.array ints of shape [#e, 3] colors: np.array floats of shape [#v, 3] poses: poses : (x,4,4) Array of 4x4 transformation matrices for instancing this object. """ prim = pyrender.primitive.Primitive( positions=verts, indices=edges, color_0=colors, mode=pyrender.constants.GLTF.LINES, poses=poses) return pyrender.mesh.Mesh(primitives=[prim], is_visible=True)
22b5fa7aaa475e56bcb120427437881f4fa28209
25,982
import math def isPrime(n): """ check if the input number n is a prime number or not """ if n <= 3: return n > 1 if n % 6 != 1 and n % 6 != 5: return False sqrt = math.sqrt(n) for i in range(5, int(sqrt)+1, 6): if n % i == 0 or n % (i+2) == 0: return False return True
91da5b13840181d039902e2db3efb8cc09609465
25,983
def seqprob_forward(alpha): """ Total probability of observing the whole sequence using the forward algorithm Inputs: - alpha: A numpy array alpha[j, t] = P(Z_t = s_j, x_1:x_t) Returns: - prob: A float number of P(x_1:x_T) """ prob = 0 ################################################### prob = np.sum(alpha[:, -1]) ################################################### return prob
edc24df276db1e4d0ffcdd51231036f1dc1eadaa
25,984
def center_filter(gt_boxes, rect): """ 过滤边框中心点不在矩形框内的边框 :param gt_boxes: [N,(y1,x1,y2,x2)] :param rect: [y1,x1,y2,x2] :return keep: 保留的边框索引号 """ # gt boxes中心点坐标 ctr_x = np.sum(gt_boxes[:, [1, 3]], axis=1) / 2. # [N] ctr_y = np.sum(gt_boxes[:, [0, 2]], axis=1) / 2. # [N] y1, x1, y2, x2 = rect # 矩形框坐标 keep = np.logical_and(np.logical_and(np.logical_and(ctr_x >= x1 + 1, ctr_x <= x2 - 1), ctr_y >= y1 + 1), ctr_y <= y2 - 1) return keep
3a9d946789b1ee2de732f6cc29970d0fdf051e1f
25,985
def named_entities(s, package="spacy"): """ Return named-entities. Use Spacy named-entity-recognition. PERSON: People, including fictional. NORP: Nationalities or religious or political groups. FAC: Buildings, airports, highways, bridges, etc. ORG: Companies, agencies, institutions, etc. GPE: Countries, cities, states. LOC: Non-GPE locations, mountain ranges, bodies of water. PRODUCT: Objects, vehicles, foods, etc. (Not services.) EVENT: Named hurricanes, battles, wars, sports events, etc. WORK_OF_ART: Titles of books, songs, etc. LAW: Named documents made into laws. LANGUAGE: Any named language. DATE: Absolute or relative dates or periods. TIME: Times smaller than a day. PERCENT: Percentage, including ”%“. MONEY: Monetary values, including unit. QUANTITY: Measurements, as of weight or distance. ORDINAL: “first”, “second”, etc. CARDINAL: Numerals that do not fall under another type. """ entities = [] nlp = spacy.load('en_core_web_sm', disable=["tagger", "parser"]) # nlp.pipe is now 'ner' for doc in nlp.pipe(s.astype("unicode").values, batch_size=32): entities.append([(ent.text, ent.label_, ent.start_char, ent.end_char) for ent in doc.ents]) return pd.Series(entities, index=s.index)
239f7f4887ee72ac5f698b2b32098019ccde3965
25,986
def get_premium_client(): """Get a connection to the premium Minio tenant""" return __get_minio_client__("premium")
f6c28453e7f04835c2dd2c91458712c7c80f3ded
25,987
from typing import List from typing import Tuple import os def get_all_versions(lang: str) -> List[Tuple[int, int, int]]: """Return the list of version available for a given lang""" versions = [] for filename in os.listdir(GRAMMAR_PACKAGE): match = VERSION_REGEX.match(filename) if match: if match['name'] == lang: version = (int(match['major']), int(match['minor']), int(match['patch'])) versions.append(version) return versions
5ae335241ec8d364f6e7a3898aa08bbf5f0d6ff1
25,988
def chunk_sample_text(path: str) -> list: """Function to chunk down a given vrt file into pieces sepparated by <> </> boundaries. Assumes that there is one layer (no nested <> </> statements) of text elements to be separated.""" # list for data chunks data = [] # index to refer to current chunk i = 0 # index of seen xml elements xml_seen = 0 with open(path, "r") as myfile: # iterate .vrt for line in myfile: # if line starts with "<" and sml seen == 0 we have the first chunk if line.startswith("<") and xml_seen == 0: # we have now seen an xml element xml_seen += 1 # add chunk to list-> chunk is list of three strings: # chunk[0]: Opening "<>" statement # chunk[1]: Text contained in chunk, every "\n" replaced with " " # chunk[2]: Next "<>" statement data.append(["", "", ""]) data[i][0] += line.replace("\n", " ") elif line.startswith("<") and xml_seen > 0: # we've seen another one xml_seen += 1 # if we encounter a closing statement we end the current chunk if line.startswith("</"): data[i][2] = line.replace("\n", " ") i += 1 data.append(["", "", ""]) # else we encountered another opening xml element and are in a nested environment # we also start a new chunk but leave the closing statement of the previous one empty else: i += 1 data.append(["", "", ""]) data[i][0] = line.replace("\n", " ") # if we are not on a line with an xml element we can just write the text to the # text entry (idx 1) for the current chunk, "inter-chunk indexing" should be handled # by the above case selection else: # append line to chunk[1], replacing "\n" with " " data[i][1] += line.replace("\n", " ") # if we appended empty chunks we remove them here for chunk in data: if all(elems == "" for elems in chunk): data.remove(chunk) return data
6e6c36db38383283bd6076f0b6b346dcfd608243
25,989
from typing import Dict def describe_dag_diffs(x, y): """Returns a list of strings describing differences between x and y.""" diffs = [] # A pair of dictionaries mapping id(x_val) or id(y_val) to the first path at # which that value was reached. These are used to check that the sharing # stucture of `x` and `y` is the same. In particular, if x_val is in x_memo, # then x_memo[id(x_val)] should be equal to y_memo[id(y_val)]. If not, then # the sharing structure is different. x_memo: Dict[int, daglish.Path] = {} y_memo: Dict[int, daglish.Path] = {} def values_diff_message(x_val, y_val, path): """A message indicating that `x_val` != `y_val` at `path`.""" path_str = daglish.path_str(path) x_repr = repr(x_val) y_repr = repr(y_val) if len(x_repr) + len(y_repr) + len(path_str) < 70: return f'* x{path_str}={x_repr} but y{path_str}={y_repr}' else: # For longer values, it's easier to spot differences if the two # values are displayed on separate lines. return f'* x{path_str}={x_repr} but\n y{path_str}={y_repr}' def find_diffs(x_val, y_val, path): """Adds differences between `x_val` and `y_val` to `diffs`.""" # Compare the sharing structure of x_val and y_val. shared_x_path = x_memo.get(id(x_val)) shared_y_path = y_memo.get(id(y_val)) if shared_x_path is not None and shared_x_path == shared_y_path: return # We have already compared x_val with y_val. if shared_x_path is None: x_memo[id(x_val)] = path else: path_str = daglish.path_str(path) x_path = daglish.path_str(shared_x_path) diffs.append(f'* Sharing diff: x{path_str} is x{x_path} but ' f'y{path_str} is not y{x_path}') if shared_y_path is None: y_memo[id(y_val)] = path else: path_str = daglish.path_str(path) y_path = daglish.path_str(shared_y_path) diffs.append(f'* Sharing diff: y{path_str} is y{y_path} but ' f'x{path_str} is not x{y_path}') # Compare x_val and y_val by type. if type(x_val) is not type(y_val): path_str = daglish.path_str(path) diffs.append(f'* type(x{path_str}) != type(y{path_str}): ' f'{type(x_val)} vs {type(y_val)}') return # Don't report any futher differences between x_val and y_val. # Compare x_val and y_val by value. node_traverser = daglish.find_node_traverser(type(x_val)) if node_traverser is None: if x_val != y_val: diffs.append(values_diff_message(x_val, y_val, path)) else: x_children, x_metadata = node_traverser.flatten(x_val) y_children, y_metadata = node_traverser.flatten(y_val) x_path_elements = node_traverser.path_elements(x_val) y_path_elements = node_traverser.path_elements(y_val) if x_path_elements != y_path_elements: for path_elt in set(x_path_elements) - set(y_path_elements): child_path = daglish.path_str(path + (path_elt,)) diffs.append( f'* x{child_path} has a value but y{child_path} does not.') for path_elt in set(y_path_elements) - set(x_path_elements): child_path = daglish.path_str(path + (path_elt,)) diffs.append( f'* y{child_path} has a value but x{child_path} does not.') elif x_metadata != y_metadata: diffs.append(values_diff_message(x_val, y_val, path)) else: # Recursively check children. Note: we only recurse if type, # path_elements, and metadata are all equal. assert len(x_children) == len(y_children) == len(x_path_elements) for x_child, y_child, path_elt in zip(x_children, y_children, x_path_elements): find_diffs(x_child, y_child, path + (path_elt,)) find_diffs(x, y, ()) return sorted(diffs)
4e3a767af9dd64b119111860a039a99b2b210a0f
25,990
def ec_cert(cert_dir): """Pass.""" return cert_dir / "eccert.pem"
71cf6d98b05e4fc80515936d7aedc6f184fbe0a6
25,991
from datetime import datetime def get_expiries(body): """ :type body: BeautifulSoup """ _ex = body.find_all('select', {'id': 'date', 'name': 'date'}) ex = [] for ch in _ex: for _e in ch: try: ex.append(datetime.strptime(_e.text, '%d%b%Y').date()) except ValueError: pass except AttributeError: pass return ex
09d7f067aa283ff930151b129378785dcbc17b09
25,992
def _add_experimental_function_notice_to_docstring(doc): """Adds an experimental notice to a docstring for experimental functions.""" return decorator_utils.add_notice_to_docstring( doc, '', 'EXPERIMENTAL FUNCTION', '(experimental)', ['THIS FUNCTION IS EXPERIMENTAL. It may change or ' 'be removed at any time, and without warning.'])
449ab32b4ddae2d383776d0c90dbc56dc6041da6
25,993
def format_args(args): """Formats the command line arguments so that they can be logged. Args: The args returned from the `config` file. Returns: A formatted human readable string representation of the arguments. """ formatted_args = "Training Arguments: \n" args = args.__dict__ for key in args.keys(): formatted_args += "\t > {} : {} \n".format(key, args[key]) return formatted_args
22d4334daba7cdfd77329f5a6de93a2411f0594d
25,994
import uuid def generate_id() -> str: """Generates random string with length of `ID_LENGTH`""" return int_to_base36(uuid.uuid4().int)[:LENGTH_OF_ID]
8cf317741edf02ca79ef72bf51c7958877a98d98
25,995
def getHG37PositionsInRange(chromosome, startPos, endPos): """Return a DataFrame containing hg37 positions for all rsids in a range. args: chromosome (int or str): the chromosome number startPos (int or str): the start position on the chromosome endPos (int or str): the end position on the chromosome returns: df (DataFrame): all the rsids found in the genomic range between startPos and endPos, indexed by rsid chromosome (int or str): the chromosome number """ queryString = f'chr{chromosome}:{startPos}-{endPos}' mv = myvariant.MyVariantInfo() gen = mv.query(queryString, scopes='dbsnp.rsid', fields='dbsnp.rsid, dbsnp.hg19.start', fetch_all=True, assembly='hg37') rsids = {} for row in gen: try: rsid = (row['dbsnp']['rsid']) start = (row['dbsnp']['hg19']['start']) rsids[rsid] = start except KeyError: continue df = pd.DataFrame.from_dict(rsids, orient='index') return df, chromosome
64107a375588737e6fedcd336fe5d1a648a93efc
25,996
def spherical_from_cart_np(xyz_vector): """ Convert a vector from cart to spherical. cart_vector is [idx][x, y, z] """ if len(xyz_vector.shape) != 2: xyz_vector = np.expand_dims(xyz_vector, axis=0) expanded = True else: expanded = False sph_vector = np.zeros(xyz_vector.shape) xy = xyz_vector[:, 0]**2 + xyz_vector[:, 1]**2 sph_vector[:, 0] = np.sqrt(xy + xyz_vector[:, 2]**2) # for elevation angle defined from Z-axis down sph_vector[:, 1] = np.arctan2(np.sqrt(xy), xyz_vector[:, 2]) # for elevation angle defined from XY-plane up # sph_vector[:,1] = np.arctan2(xyz_vector[:,2], np.sqrt(xy)) sph_vector[:, 2] = np.arctan2(xyz_vector[:, 1], xyz_vector[:, 0]) if expanded: return sph_vector[0] return sph_vector
86da4d3426b6327c5fd00f431e36655b9213a027
25,997
def _execute_query(connection, query): """Executes the query and returns the result.""" with connection.cursor() as cursor: cursor.execute(query) return cursor.fetchall()
9f71eb650d323f7a5ead3451810a7b9f9d77b4b0
25,998
def mediaValues(x): """ return the media of a list """ return sum(x)/len(x)
ab4a436d3383e5df7d8d891c9661eabb0af81ef8
25,999