content
stringlengths
22
815k
id
int64
0
4.91M
def bias_scan( data: pd.DataFrame, observations: pd.Series, expectations: Union[pd.Series, pd.DataFrame] = None, favorable_value: Union[str, float] = None, overpredicted: bool = True, scoring: Union[str, ScoringFunction] = "Bernoulli", num_iters: int = 10, penalty: float = 1e-17, mode: str = "binary", **kwargs, ): """ scan to find the highest scoring subset of records :param data (dataframe): the dataset (containing the features) the model was trained on :param observations (series): ground truth (correct) target values :param expectations (series, dataframe, optional): pandas series estimated targets as returned by a model for binary, continuous and ordinal modes. If mode is nominal, this is a dataframe with columns containing expectations for each nominal class. If None, model is assumed to be a dumb model that predicts the mean of the targets or 1/(num of categories) for nominal mode. :param favorable_value(str, float, optional): Should be high or low or float if the mode in [binary, ordinal, or continuous]. If float, value has to be minimum or maximum in the observations column. Defaults to high if None for these modes. Support for float left in to keep the intuition clear in binary classification tasks. If mode is nominal, favorable values should be one of the unique categories in the observations. Defaults to a one-vs-all scan if None for nominal mode. :param overpredicted (bool, optional): flag for group to scan for. True means we scan for a group whose expectations/predictions are systematically higher than observed. In other words, True means we scan for a group whose observeed is systematically lower than the expectations. False means we scan for a group whose expectations/predictions are systematically lower than observed. In other words, False means we scan for a group whose observed is systematically higher than the expectations. :param scoring (str or class): One of 'Bernoulli', 'Gaussian', 'Poisson', or 'BerkJones' or subclass of :class:`aif360.metrics.mdss.ScoringFunctions.ScoringFunction`. :param num_iters (int, optional): number of iterations (random restarts). Should be positive. :param penalty (float,optional): penalty term. Should be positive. The penalty term as with any regularization parameter may need to be tuned for ones use case. The higher the penalty, the less complex (number of features and feature values) the highest scoring subset that gets returned is. :param mode: one of ['binary', 'continuous', 'nominal', 'ordinal']. Defaults to binary. In nominal mode, up to 10 categories are supported by default. To increase this, pass in keyword argument max_nominal = integer value. :returns: the highest scoring subset and the score or dict of the highest scoring subset and the score for each category in nominal mode """ # Ensure correct mode is passed in. modes = ["binary", "continuous", "nominal", "ordinal"] assert mode in modes, f"Expected one of {modes}, got {mode}." # Set correct favorable value (this tells us if higher or lower is better) min_val, max_val = observations.min(), observations.max() uniques = list(observations.unique()) if favorable_value == 'high': favorable_value = max_val elif favorable_value == 'low': favorable_value = min_val elif favorable_value is None: if mode in ["binary", "ordinal", "continuous"]: favorable_value = max_val # Default to higher is better elif mode == "nominal": favorable_value = "flag-all" # Default to scan through all categories assert favorable_value in [ "flag-all", *uniques, ], f"Expected one of {uniques}, got {favorable_value}." assert favorable_value in [ min_val, max_val, "flag-all", *uniques, ], f"Favorable_value should be high, low, or one of categories {uniques}, got {favorable_value}." # Set appropriate direction for scanner depending on mode and overppredicted flag if mode in ["ordinal", "continuous"]: if favorable_value == max_val: kwargs["direction"] = "negative" if overpredicted else "positive" else: kwargs["direction"] = "positive" if overpredicted else "negative" else: kwargs["direction"] = "negative" if overpredicted else "positive" # Set expectations to mean targets for non-nominal modes if expectations is None and mode != "nominal": expectations = pd.Series(observations.mean(), index=observations.index) # Set appropriate scoring function if scoring == "Bernoulli": scoring = Bernoulli(**kwargs) elif scoring == "BerkJones": scoring = BerkJones(**kwargs) elif scoring == "Gaussian": scoring = Gaussian(**kwargs) elif scoring == "Poisson": scoring = Poisson(**kwargs) else: scoring = scoring(**kwargs) if mode == "binary": # Flip observations if favorable_value is 0 in binary mode. observations = pd.Series(observations == favorable_value, dtype=int) elif mode == "nominal": unique_outs = set(sorted(observations.unique())) size_unique_outs = len(unique_outs) if expectations is not None: # Set expectations to 1/(num of categories) for nominal mode expectations_cols = set(sorted(expectations.columns)) assert ( unique_outs == expectations_cols ), f"Expected {unique_outs} in expectation columns, got {expectations_cols}" else: expectations = pd.Series( 1 / observations.nunique(), index=observations.index ) max_nominal = kwargs.get("max_nominal", 10) assert ( size_unique_outs <= max_nominal ), f"Nominal mode only support up to {max_nominal} labels, got {size_unique_outs}. Use keyword argument max_nominal to increase the limit." if favorable_value != "flag-all": # If favorable flag is set, use one-vs-others strategy to scan, else use one-vs-all strategy observations = observations.map({favorable_value: 1}) observations = observations.fillna(0) if isinstance(expectations, pd.DataFrame): expectations = expectations[favorable_value] else: results = {} orig_observations = observations.copy() orig_expectations = expectations.copy() for unique in uniques: observations = orig_observations.map({unique: 1}) observations = observations.fillna(0) if isinstance(expectations, pd.DataFrame): expectations = orig_expectations[unique] scanner = MDSS(scoring) result = scanner.scan( data, expectations, observations, penalty, num_iters, mode=mode ) results[unique] = result return results scanner = MDSS(scoring) return scanner.scan(data, expectations, observations, penalty, num_iters, mode=mode)
28,900
def int_from_bin_list(lst): """Convert a list of 0s and 1s into an integer Args: lst (list or numpy.array): list of 0s and 1s Returns: int: resulting integer """ return int("".join(str(x) for x in lst), 2)
28,901
def validate_array_input(arr, dtype, arr_name): """Check if array has correct type and is numerical. This function checks if the input is either a list, numpy.ndarray or pandas.Series of numerical values, converts it to a numpy.ndarray and throws an error in case of incorrect data. Args: arr: Array of data dtype: One of numpy's dtypes arr_name: String specifing the variable name, so that the error message can be adapted correctly. Returns: A as numpy.ndarray converted array of values with a datatype specified in the input argument. Raises: ValueError: In case non-numerical data is passed TypeError: If the error is neither a list, a numpy.ndarray nor a pandas.Series """ # Check for correct data type if isinstance(arr, (list, np.ndarray, pd.Series)): # Try to convert as numpy array try: arr = np.array(arr, dtype=dtype).flatten() except: msg = ["The data in the parameter array '{}'".format(arr_name), " must be purely numerical."] raise ValueError("".join(msg)) else: msg = ["The array {} must be either a list, ".format(arr_name), "numpy.ndarray or pandas.Series"] raise TypeError("".join(msg)) # return converted array return arr
28,902
def graph_2d_markers(x: np.ndarray, y: np.ndarray, xaxis_title: str, yaxis_title: str, title: str, dirname: str, filename: str) -> None: """Creates a simple 2D plot using markers. """ path = os.path.join(dirname, filename) fig = go.Figure() fig.add_trace( go.Scatter( x=x, y=y, mode='markers', marker_color=COLORS[0], marker_size=10, ) ) fig.update_layout( title_text=title, xaxis_title_text=xaxis_title, yaxis_title_text=yaxis_title, yaxis_scaleanchor = 'x', yaxis_scaleratio = 1, ) pio.write_html(fig, path)
28,903
def ClassifyBehavior(data, bp_1="snout",bp_2="ear_L", bp_3="ear_R", bp_4="tail", dimensions = 2,distance=28,**kwargs): """ Returns an array with the cluster by frame, an array with the embedding data in low-dimensional space and the clusterization model. Parameters ---------- data : pandas DataFrame The input tracking data. bp_1 : str Body part representing snout. bp_2 : str Body part representing left ear. bp_3 : str Body part representing right ear. bp_4 : str Body part representing tail. dimensions : int Dimension of the embedded space. distance : int The linkage distance threshold above which, clusters will not be merged. startIndex : int, optional Initial index. n_jobs : int, optional The number of parallel jobs to run for neighbors search. verbose : int, optional Verbosity level. perplexity : float, optional The perplexity is related to the number of nearest neighbors that is used in other manifold learning algorithms. Larger datasets usually require a larger perplexity. Returns ------- cluster_labels : array Array with the cluster by frame. X_transformed : array Embedding of the training data in low-dimensional space. model : Obj AgglomerativeClustering model. See Also -------- For more information and usage examples: https://github.com/pyratlib/pyrat Notes ----- This function was developed based on DLC outputs and is able to support matplotlib configurations.""" from sklearn.manifold import TSNE from sklearn.cluster import AgglomerativeClustering from sklearn.preprocessing import StandardScaler startIndex = kwargs.get('startIndex') n_jobs = kwargs.get('n_jobs') verbose = kwargs.get('verbose') perplexity = kwargs.get("perplexity") if type(startIndex) == type(None): startIndex = 0 if type(n_jobs) == type(None): n_jobs=-1 if type(verbose) == type(None): verbose=0 if type(perplexity) == type(None): perplexity=500 values = (data.iloc[2:,1:].values).astype(np.float) lista1 = (data.iloc[0][1:].values +" - " + data.iloc[1][1:].values).tolist() nose = np.concatenate(((values[:,lista1.index(bp_1+" - x")]).reshape(1,-1).T,(values[:,lista1.index(bp_1+" - y")]).reshape(1,-1).T), axis=1) earr = np.concatenate(((values[:,lista1.index(bp_2+" - x")]).reshape(1,-1).T,(values[:,lista1.index(bp_2+" - y")]).reshape(1,-1).T), axis=1) earl = np.concatenate(((values[:,lista1.index(bp_3+" - x")]).reshape(1,-1).T,(values[:,lista1.index(bp_3+" - y")]).reshape(1,-1).T), axis=1) tail = np.concatenate(((values[:,lista1.index(bp_4+" - x")]).reshape(1,-1).T,(values[:,lista1.index(bp_4+" - y")]).reshape(1,-1).T), axis=1) bodyparts = [nose, earr, earl, tail] distances = [] for k in range(len(bodyparts[0])): frame_distances = [] for i in range(len(bodyparts)): distance_row = [] for j in range( len(bodyparts) ): distance_row.append(np.linalg.norm(bodyparts[i][k] - bodyparts[j][k])) frame_distances.append(distance_row) distances.append(frame_distances) distances2 = np.asarray(distances) for i in range(4): for k in range(4): distances2[:, i, j] = distances2[:, i, j]/np.max(distances2[:, i, j]) d = [] for i in range(distances2.shape[0]): d.append(distances2[i, np.triu_indices(4, k = 1)[0], np.triu_indices(4, k = 1)[1]]) d = StandardScaler().fit_transform(d) embedding = TSNE(n_components=dimensions, n_jobs=n_jobs, verbose=verbose, perplexity=perplexity) X_transformed = embedding.fit_transform(d[startIndex:]) model = AgglomerativeClustering(n_clusters=None,distance_threshold=distance) model = model.fit(d[startIndex:]) cluster_labels = model.labels_ return cluster_labels, X_transformed, model
28,904
def grab_inputs(board): """ Asks for inputs and returns a row, col. Also updates the board state. """ keepasking = True while keepasking: try: row = int(input("Input row")) col = int(input("Input column ")) except (EOFError, KeyboardInterrupt): print('Cya nerd') exit() except: print("That's not an integer you mongoloid.") else: # If it's an int valid_board = board.update_board(row, col) if valid_board == False: print("Your row or col is out of range. Try ranges 0-2 and make sure there's nothing there already.") else: # If it's a valid board keepasking = False return row, col
28,905
def view(filename, show_attributes=False): """ NCVIEW Args: filename: show_attributes: """ import os import netCDF4 as nc if not os.path.isfile(filename): raise IOError("Unknonw file: %s" % filename) if 'gz' in filename: import gzip with gzip.open(filename) as g: with nc.Dataset("dummy", 'r', memory=g.read()) as f: # HEADER INFORMATION print("File: %s" % filename) _header(f) # other variables ? if show_attributes: print() _detailed_information(f) print() else: with nc.Dataset(filename, 'r', ) as f: # HEADER INFORMATION print("File: %s" % filename) _header(f) # other variables ? if show_attributes: print() _detailed_information(f) print()
28,906
def start_session(): """ This function is what initializes the application.""" welcome_msg = render_template('welcome') return question(welcome_msg)
28,907
def solve(filename): """ Run a sample, do the analysis and store a program to apply to a test case """ arc = Arc(filename) arc.print_training_outputs() return arc.solve()
28,908
def extract_item(item, prefix=None, entry=None): """a helper function to extract sequence, will extract values from a dicom sequence depending on the type. Parameters ========== item: an item from a sequence. """ # First call, we define entry to be a lookup dictionary if entry is None: entry = {} # Skip raw data elements if not isinstance(item, RawDataElement): header = item.keyword # If there is no header or field, we can't evaluate if header in [None, ""]: return entry if prefix is not None: header = "%s__%s" % (prefix, header) value = item.value if isinstance(value, bytes): value = value.decode("utf-8") if isinstance(value, Sequence): return extract_sequence(value, prefix=header) entry[header] = value return entry
28,909
def route_counts(session, origin_code, dest_code): """ Get count of flight routes between origin and dest. """ routes = session.tables["Flight Route"] # airports = session.tables["Reporting Airport"] # origin = airports["Reporting Airport"] == origin_code origin = SelectorClause( "Reporting Airport", REPORTING_AIRPORT_CODE, [origin_code], session=session ) dest = routes["Origin Destination"] == dest_code audience = routes * origin & dest return audience.select().count
28,910
def generate_master_bias( science_frame : CCDData, bias_path : Path, use_cache : bool=True ) -> CCDData: """ """ cache_path = generate_cache_path(science_frame, bias_path) / 'bias' cache_file = cache_path / 'master.fits' if use_cache and cache_file.is_file(): ccd = CCDData.read(cache_file) if ccd is not None: return ccd cache_path.mkdir(parents=True, exist_ok=True) ccd = calibrate_bias(science_frame, bias_path) if ccd is not None: ccd.write(cache_file) return ccd
28,911
def RetryOnException(retry_checker, max_retries, sleep_multiplier=0, retry_backoff_factor=1): """Decorater which retries the function call if |retry_checker| returns true. Args: retry_checker: A callback function which should take an exception instance and return True if functor(*args, **kwargs) should be retried when such exception is raised, and return False if it should not be retried. max_retries: Maximum number of retries allowed. sleep_multiplier: Will sleep sleep_multiplier * attempt_count seconds if retry_backoff_factor is 1. Will sleep sleep_multiplier * ( retry_backoff_factor ** (attempt_count - 1)) if retry_backoff_factor != 1. retry_backoff_factor: See explanation of sleep_multiplier. Returns: The function wrapper. """ def _Wrapper(func): def _FunctionWrapper(*args, **kwargs): return Retry(retry_checker, max_retries, func, sleep_multiplier, retry_backoff_factor, *args, **kwargs) return _FunctionWrapper return _Wrapper
28,912
def ParseCsvFile(fp): """Parse dstat results file in csv format. Args: file: string. Name of the file. Returns: A tuple of list of dstat labels and ndarray containing parsed data. """ reader = csv.reader(fp) headers = list(itertools.islice(reader, 5)) if len(headers) != 5: raise ValueError( 'Expected exactly 5 header lines got {}\n{}'.format( len(headers), headers)) if 'Dstat' not in headers[0][0]: raise ValueError( 'Expected first header cell to contain "Dstat"\n{}'.format( headers[0])) if 'Host:' not in headers[2][0]: raise ValueError(('Expected first cell in third line to be ' '"Host:"\n{}').format(headers[2])) categories = next(reader) # Categories are not repeated; copy category name across columns in the # same category for i, category in enumerate(categories): if not categories[i]: categories[i] = categories[i - 1] labels = next(reader) if len(labels) != len(categories): raise ValueError(( 'Number of categories ({}) does not match number of ' 'labels ({})\nCategories: {}\nLabels:{}').format( len(categories), len(labels), categories, labels)) # Generate new column names labels = ['%s__%s' % x for x in zip(labels, categories)] data = [] for i, row in enumerate(reader): # Remove the trailing comma if len(row) == len(labels) + 1: if row[-1]: raise ValueError(('Expected the last element of row {0} to be empty,' ' found {1}').format(row, row[-1])) row = row[:-1] if len(labels) != len(row): raise ValueError(('Number of labels ({}) does not match number of ' 'columns ({}) in row {}:\n{}').format( len(labels), len(row), i, row)) data.append(row) return labels, np.array(data, dtype=float)
28,913
def uid_to_device_name(uid): """ Turn UID into its corresponding device name. """ return device_id_to_name(uid_to_device_id(uid))
28,914
def zonal_convergence(u, h, dx, dy, dy_u, ocean_u): """Compute convergence of zonal flow. Returns -(hu)_x taking account of the curvature of the grid. """ res = create_var(u.shape) for j in range(u.shape[-2]): for i in range(u.shape[-1]): res[j, i] = (-1) * ( h[j, cx(i + 1)] * u[j, cx(i + 1)] * dy_u[j, cx(i + 1)] * ocean_u[j, cx(i + 1)] - h[j, i] * u[j, i] * dy_u[j, i] * ocean_u[j, i] ) / (dx[j, i] * dy[j, i]) return res
28,915
def filterPoints(solutions, corners): """Remove solutions if they are not whithin the perimeter. This function use shapely as the mathematical computaions for non rectangular shapes are quite heavy. Args: solutions: A list of candidate points. corners: The perimeter of the garden (list of LEDs). Returns: A list of points filtered. """ coords = [] for i in corners: if i.inPerimeter: coords.append((i.point.X, i.point.Y)) polygon = shapely.geometry.polygon.Polygon(coords) solutions_2 = [value.toShapely() for value in solutions if polygon.contains(value.toShapely())] return [Point(v.x, v.y) for v in solutions_2]
28,916
def renamePath(dir_path, path): """ Renames a folder to match a standard Plex format { Title (year) }. If the Dry run flag is set then we will just print the text but not make the move. Parameters: ----------- dir_path: Full path to the related folder path: Folder name """ new_name = guessit(path)['title'] + ' (' + str(guessit(path)['year']) + ')' src = dir_path + '/' + path dest = dir_path + '/' + new_name print(' ' + src + bcolors.OKGREEN + ' ==> ' + bcolors.ENDC + dest) if not DRYRUN: os.rename(src, dest)
28,917
def get_migrations_from_old_config_key_startswith(old_config_key_start: str) -> Set[AbstractPropertyMigration]: """ Get all migrations where old_config_key starts with given value """ ret = set() for migration in get_history(): if isinstance(migration, AbstractPropertyMigration) and \ migration.old_config_key and \ migration.old_config_key.startswith(old_config_key_start): ret.add(migration) return ret
28,918
def bbox_mapping(bboxes, img_shape, scale_factor, flip, flip_direction, # ='horizontal', tile_offset): """Map bboxes from the original image scale to testing scale.""" new_bboxes = bboxes * bboxes.new_tensor(scale_factor) if flip: new_bboxes = bbox_flip(new_bboxes, img_shape, flip_direction) # add by hui ############################################ assert tile_offset is None or (isinstance(tile_offset, (tuple, list)) and len(tile_offset) == 2), \ "tile_offset must be None or (dx, dy) or [dx, dy]" if tile_offset is not None: dx, dy = tile_offset new_bboxes[:, [0, 2]] -= dx new_bboxes[:, [1, 3]] -= dy h, w, c = img_shape new_bboxes[:, [0, 2]] = new_bboxes[:, [0, 2]].clamp(0, w - 1) new_bboxes[:, [1, 3]] = new_bboxes[:, [1, 3]].clamp(0, h - 1) W, H = new_bboxes[:, 2] - new_bboxes[:, 0], new_bboxes[:, 3] - new_bboxes[:, 1] keep = (W >= 2) & (H >= 2) new_bboxes = new_bboxes[keep] # ################################################################# return new_bboxes
28,919
def getRNA_X(sample_list, DATAPATH, ctype, lab_type): """ Get X for RNA. The required columns are retained and all other rows and columns dropped. This function also labels the data for building models. Parameters ---------- sample_list : list List of tumour samples to be retained. DATAPATH : str Complete path to SNV data for the samples and other data for different laabelling techniques. ctype : str Cancer-type. lab_type : str Labelling stratergy to be used. Returns ------- data : DataFrame DataFrame containing feature matrix to be trained on and labels. data_meta : DataFrame DataFrame containing mata data for the feature matrix. """ # Load SNV data (for labelling) os.chdir(DATAPATH + "/GDC_{}/SNV".format(ctype)) fname="{}_snv.tsv".format(ctype) snv_lab = pd.read_csv(fname, sep="\t", header=0) snv_lab.Tumor_Sample_Barcode = [samp[:16] for samp in snv_lab.Tumor_Sample_Barcode] snv_lab = snv_lab[snv_lab.Tumor_Sample_Barcode.isin(sample_list)] snv_lab.index = ["{};{}".format(samp[:16], gene) for samp, gene in zip(snv_lab.Tumor_Sample_Barcode, snv_lab.Hugo_Symbol)] # Add labels if lab_type == "civic": snv_lab = snv.getCivicLabels(snv_lab, DATAPATH) if lab_type == "martellotto": snv_lab = snv.getMartelottoLabels(snv_lab, DATAPATH) if lab_type == "cgc": snv_lab = snv.getCGCLabels(snv_lab, DATAPATH) if lab_type == "bailey": snv_lab = snv.getBaileyLabels(snv_lab, DATAPATH, ctype) # Remove duplicates and keep labelled data_snp snv_lab = snv_lab[snv_lab.Label != "Unlabelled"] snv_lab = snv_lab[~snv_lab.index.duplicated()] # load data path_network = DATAPATH + "/network" data = [None] * len(sample_list) datapath = DATAPATH + "/GDC_{}/RNA-seq".format(ctype) for idx, file in enumerate(sample_list): temp = getRNAFeatures(datapath, file, ctype, path_network, n=1) # Assign labels to RNA data temp["Label"] = [snv_lab.loc[idx, "Label"] if idx in snv_lab.index else "Unlabelled" for idx in temp.index] temp = temp[temp["Label"] != "Unlabelled"] # Drop nan rows data[idx] = temp.dropna(axis=0) # Concat data data = pd.concat(data) # Define meta-data and drop meta-data columns from RNA data data_meta = data[['genes', 'Tumor_Sample_Barcode', 'Label']] data_meta.index = data.index d_cols = [x for x in data.columns if x in ['genes', 'unshrunk.logFC', 'PValue', 'FDR', 'Tumor_Sample_Barcode']] data = data.drop(d_cols, axis=1) return (data, data_meta)
28,920
def rotation_matrix_to_quaternion(rotation_matrix, eps=1e-6): """Convert 3x4 rotation matrix to 4d quaternion vector This algorithm is based on algorithm described in https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py#L201 Args: rotation_matrix (Tensor): the rotation matrix to convert. Return: Tensor: the rotation in quaternion Shape: - Input: :math:`(N, 3, 4)` - Output: :math:`(N, 4)` Example: >>> input = torch.rand(4, 3, 4) # Nx3x4 >>> output = tgm.rotation_matrix_to_quaternion(input) # Nx4 """ if not torch.is_tensor(rotation_matrix): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if len(rotation_matrix.shape) > 3: raise ValueError( "Input size must be a three dimensional tensor. Got {}".format( rotation_matrix.shape)) if not rotation_matrix.shape[-2:] == (3, 4): raise ValueError( "Input size must be a N x 3 x 4 tensor. Got {}".format( rotation_matrix.shape)) rmat_t = torch.transpose(rotation_matrix, 1, 2) mask_d2 = rmat_t[:, 2, 2] < eps mask_d0_d1 = rmat_t[:, 0, 0] > rmat_t[:, 1, 1] mask_d0_nd1 = rmat_t[:, 0, 0] < -rmat_t[:, 1, 1] t0 = 1 + rmat_t[:, 0, 0] - rmat_t[:, 1, 1] - rmat_t[:, 2, 2] q0 = torch.stack([rmat_t[:, 1, 2] - rmat_t[:, 2, 1], t0, rmat_t[:, 0, 1] + rmat_t[:, 1, 0], rmat_t[:, 2, 0] + rmat_t[:, 0, 2]], -1) t0_rep = t0.repeat(4, 1).t() t1 = 1 - rmat_t[:, 0, 0] + rmat_t[:, 1, 1] - rmat_t[:, 2, 2] q1 = torch.stack([rmat_t[:, 2, 0] - rmat_t[:, 0, 2], rmat_t[:, 0, 1] + rmat_t[:, 1, 0], t1, rmat_t[:, 1, 2] + rmat_t[:, 2, 1]], -1) t1_rep = t1.repeat(4, 1).t() t2 = 1 - rmat_t[:, 0, 0] - rmat_t[:, 1, 1] + rmat_t[:, 2, 2] q2 = torch.stack([rmat_t[:, 0, 1] - rmat_t[:, 1, 0], rmat_t[:, 2, 0] + rmat_t[:, 0, 2], rmat_t[:, 1, 2] + rmat_t[:, 2, 1], t2], -1) t2_rep = t2.repeat(4, 1).t() t3 = 1 + rmat_t[:, 0, 0] + rmat_t[:, 1, 1] + rmat_t[:, 2, 2] q3 = torch.stack([t3, rmat_t[:, 1, 2] - rmat_t[:, 2, 1], rmat_t[:, 2, 0] - rmat_t[:, 0, 2], rmat_t[:, 0, 1] - rmat_t[:, 1, 0]], -1) t3_rep = t3.repeat(4, 1).t() mask_c0 = mask_d2 * mask_d0_d1 # mask_c1 = mask_d2 * (1 - mask_d0_d1) mask_c1 = mask_d2 * (~mask_d0_d1) # mask_c2 = (1 - mask_d2) * mask_d0_nd1 mask_c2 = (~mask_d2) * mask_d0_nd1 # mask_c3 = (1 - mask_d2) * (1 - mask_d0_nd1) mask_c3 = (~mask_d2) * (~mask_d0_nd1) mask_c0 = mask_c0.view(-1, 1).type_as(q0) mask_c1 = mask_c1.view(-1, 1).type_as(q1) mask_c2 = mask_c2.view(-1, 1).type_as(q2) mask_c3 = mask_c3.view(-1, 1).type_as(q3) q = q0 * mask_c0 + q1 * mask_c1 + q2 * mask_c2 + q3 * mask_c3 q /= torch.sqrt(t0_rep * mask_c0 + t1_rep * mask_c1 + # noqa t2_rep * mask_c2 + t3_rep * mask_c3) # noqa q *= 0.5 return q
28,921
def get_first_day_and_last_day_by_month(months=0): """获取某月份的第一天的日期和最后一天的日期 :param months: int, 负数表示过去的月数,正数表示未来的 :return tuple: (某月第一天日期, 某月最后一天日期) """ day = get_today() + relativedelta(months=months) year = day.year month = day.month # 获取某年某月的第一天的星期和该月总天数 _, month_range = calendar.monthrange(year, month) first = datetime.date(year=year, month=month, day=1) last = datetime.date(year=year, month=month, day=month_range) return first, last
28,922
def kmeans(X, C): """The Loyd's algorithm for the k-centers problems. X : data matrix C : initial centers """ C = C.copy() V = np.zeros(C.shape[0]) for x in X: idx = np.argmin(((C - x)**2).sum(1)) V[idx] += 1 eta = 1.0 / V[idx] C[idx] = (1.0 - eta) * C[idx] + eta * x return C
28,923
def test_make_png(): """Test to ensure that make_png functions correctly.""" # Save random RGBA and RGB arrays onto disk as PNGs using make_png. # Read them back with an image library and check whether the array # saved is equal to the array read. # Create random RGBA array as type ubyte rgba_save = np.random.randint(256, size=(100, 100, 4)).astype(np.ubyte) # Get rid of the alpha for RGB rgb_save = rgba_save[:, :, :3] # Output file should be in temp png_out = op.join(temp_dir, 'random.png') # write_png implicitly tests _make_png for rgb_a in (rgba_save, rgb_save): write_png(png_out, rgb_a) rgb_a_read = read_png(png_out) assert_array_equal(rgb_a, rgb_a_read)
28,924
def test_deploy_script_register_without_limit( token_address: HexAddress, deployer_0_4_0: ContractDeployer, deployed_raiden_info_0_4_0: DeployedContracts, ) -> None: """ Run token register function used in the deployment script This checks if register_token_network() works correctly in the happy case for 0.4.0 version, to make sure no code dependencies have been changed, affecting the deployment script. This does not check however that the cli command works correctly. """ token_registry_abi = deployer_0_4_0.contract_manager.get_contract_abi( CONTRACT_TOKEN_NETWORK_REGISTRY ) token_registry_address = deployed_raiden_info_0_4_0["contracts"][ CONTRACT_TOKEN_NETWORK_REGISTRY ]["address"] token_network_address = deployer_0_4_0.register_token_network( token_registry_abi=token_registry_abi, token_registry_address=token_registry_address, token_address=token_address, channel_participant_deposit_limit=None, token_network_deposit_limit=None, ) assert token_network_address is not None assert isinstance(token_network_address, str)
28,925
def gap_init(points, D, d, C, L=None, st=None, K=None, minimize_K=True, find_optimal_seeds=True, seed_method="cones", seed_edge_weight_type='EUC_2D', use_adaptive_L_constraint_weights=True, increase_K_on_failure=False): #REMOVEME, disable! #increase_K_on_failure=True): """ An implementation of a three phase cluster-first-route-second CVRP construction / route initialization algorithm. The first two phases involve the clustering. First, a seed point is generated for each route, which is then used in approximating customer node service costs in solving generalized assignment problem (GAP) relaxation of the VRP. The resulting assignments are then routed using a TSP solver. The algorithm has been first proposed in (Fisher and Jaikumar 1981). The algorithm assumes that the problem is planar and this implementation allows seed in two ways: * seed_method="cones", the initialization method of Fisher and Jaikumar (1981) which can be described as Sweep with fractional distribution of customer demand and placing the seed points approximately to the center of demand mass of created sectors. * seed_method="kmeans", intialize seed points to k-means cluster centers. * seed_method="large_demands", according to Fisher and Jaikumar (1981) "Customers for which d_i > 1/2 C can also be made seed customers". However applying this rule relies on human operator who then decides the intuitively best seed points. This implementation selects the seed points satisfying the criteria d_i>mC, where m is the fractional capacity multipier, that are farthest from the depot and each other. The m is made iteratively smaller if there are no at least K seed point candidates. * seed_method="ends_of_thoroughfares", this option was descibed in (Fisher and Jaikumar 1981) as "Most distant customers at the end of thoroughfares leaving from the depot are natural seed customers". They relied on human operator. To automate this selection we make a DBSCAN clustering with eps = median 2. nearest neighbor of all nodes and min_samples of 3. The other parameters are: * points is a list of x,y coordinates of the depot [0] and the customers. * D is a numpy ndarray (or equvalent) of the full 2D distance matrix. including the service times (st/2.0 for leaving and entering nodes). * d is a list of demands. d[0] should be 0.0 as it is the depot. * C is the capacity constraint limit for the identical vehicles. * L is the optional constraint for the maximum route length/duration/cost. * st is the service time. However, also the D should be modified with service times to allow straight computation of the TSP solutions (see above) * K is the optional parameter specifying the required number of vehicles. The algorithm is only allowed to find solutions with this many vehicles. * minimize_K, if set to True (default), makes the minimum number of routes the primary and the solution cost the secondary objective. If set False the algorithm optimizes for mimimum solution / route cost by increasing K as long as it seems beneficial. WARNING: the algorithm suits this use case (cost at the objective) poorly and setting this option to False may significantly increase the required CPU time. * find_optimal_seeds if set to True, tries all possible Sweep start positions / k-Means with N different seeds. If False, only one sweep from the node closest to the depot is done / k-Means clustering is done only once with one random seed value. * seed_edge_weight_type specifies how to round off the distances from the customer nodes (points) to the seed points. Supports all TSPLIB edge weight types. Note1: The GAP is optimized using Gurobi solver. If L constraint is set, the side constraints may make the GAP instance tricky to solve and it is advisable to set a sensible timeout with config.MAX_MIP_SOLVER_RUNTIME * use_adaptive_L_constraint_weights if set True, and the L constraint is set, the algorithm adaptively adjusts the route cost approximation of the relevant side constraints so that a solution which is not L infeasible or GAP infeasible is found. The exact handling of L consraint is vague in (Fisher and Jaikumar 1981) and this was our best guess on how the feasible region of the problem can be found. Note that if GAP solver is terminated due to a timeout, the adaptive multipier is increased and GAP solution is attempted again. However, if increase_K_on_failure is set, (see below) it takes priority over this. * increase_K_on_failure (default False) is another countermeasure against long running GAP solving attempts for problem instances without L constraint (if there is L constraint, and use_adaptive_L_constraint_- weights is enabled, this is ignored) or instances where K estimation does not work and it takes excessively long time to check all initial seed configurations before increasing K. If Gurobi timeout is encountered or the solution is GAP infeasible, and this option is enabled, the K is temporately increased, new seeds points generated for current sweep start location and another GAP solution attempt is made. K is allowed to increased temporarely up to 10% of the mimimum K allowed (or 1, whichever is larger). Note2: logger controls the debug level but running the script with Python -O option disables all debug output. Fisher, M. L. and Jaikumar, R. (1981), A generalized assignment heuristic for vehicle routing. Networks, 11: 109-124. doi:10.1002/net.3230110205 """ #TODO: other alternatives # customers with maximum demand or most distant customer from origin if seed_method=="cones": seed_f = _sweep_seed_points if seed_method=="kmeans": seed_f = _kmeans_seed_points if seed_method=="large_demands": if not C: raise ValueError("""The "large_demands" seed initialization method requires demands and C constraint to be known.""") seed_f = _large_demand_seed_points if seed_method=="ends_of_thoroughfares": seed_f = _end_of_thoroughfares_seed_points int_dists = issubclass(D.dtype.type, np.integer) if seed_edge_weight_type=="EXPLICIT": seed_edge_weight_type = "EUC_2D" if int_dists else "EXACT_2D" if not points: raise ValueError("The algorithm requires 2D coordinates for the points") N = len(D) if K: startK = K maxK = K else: # start from the smallest K possible if C: startK = int(ceil(sum(d)/C)) elif L: # find a lower bound by checking how many visits from the TSP # tour need to add to have any chance of making this L feasible. _,tsp_f = solve_tsp(D, list(range(1,N))) shortest_depot_edges = list(D[0,1:]) shortest_depot_edges.sort() startK = int(ceil(tsp_f/L)) while True: if tsp_f+sum(shortest_depot_edges[:startK*2])<=startK*L: break startK+=1 else: raise ValueError("If C and L have not been set, K is required") maxK = N-1 # We only need first row of the distance matrix to calculcate insertion # costs for GAP objective function D_0 = np.copy( D[0,:] ) best_sol = None best_f = None best_K = None seed_trial = 0 incK = 0 maxKinc = max(startK+1, int(startK*INCREASE_K_ON_FAILURE_UPTO)) L_ctr_multipiler = L_MPLR_DEFAULT if L and use_adaptive_L_constraint_weights: # Adaptive L constraint multipier L_ctr_multipiler = L_ADAPTIVE_MPLR_INIT L_ctr_multipiler_tries = 0 try: for currentK in range(startK, maxK+1): found_improving_solution_for_this_K = False seed_trial=0 while True: if __debug__: log(DEBUG, "ITERATION:K=%d, trial=%d, L_ctr_mul=%.6f\n"% (currentK+incK,seed_trial,L_ctr_multipiler)) log(DEBUG-1, "Getting %d seed points...\n"%(currentK+incK)) # Get seed points seed_points = seed_f(points, D, d, C, currentK+incK, seed_trial) if __debug__: log(DEBUG-1, "...got seed points %s\n"%str(seed_points)) # Extend the distance matrix with seed distances S = calculate_D(seed_points, points, seed_edge_weight_type) if st: # include the "leaving half" of the service_time in the # distances (the other half is already added to the D # prior to gapvrp_init) halftst = int(st/2) if int_dists else st/2.0 S[:,1:] += halftst D_s = np.vstack( (D_0, S) ) GAP_infeasible = False L_infeasible = False solution = [0] sol_f = 0 solved = False sol_K = 0 take_next_seed = False try: # Distribute the nodes to vehicles using the approxmate # service costs in D_s and by solving it as GAP # #TODO: the model has the same dimensions for all iterations # with the same K and only the weights differ. Consider # replacing the coefficient matrix e.g. via C interface #https://stackoverflow.com/questions/33461329 assignments = _solve_gap(N, D_s, d, C, currentK+incK, L, L_ctr_multipiler) if not assignments: if __debug__: log(DEBUG, "INFEASIBILITY: GAP infeasible solution") corrective_action = "try with another seed = %d"%seed_trial GAP_infeasible = True else: if __debug__: log(DEBUG-1, "Assignments = %s"%str(assignments)) # Due to floating point inaccuracies in L constrained # cases the feasrelax may be used, which, in turn, can # in some corner cases return solutions that are not # really feasible. Make sure it is not the case if L: served = set([0]) for route_nodes in assignments: if not route_nodes: continue route,route_l = solve_tsp(D, [0]+route_nodes) # Check for feasibility violations due to feasrelax if L: served |= set(route_nodes) if C and d and totald(route,d)-C_EPS>C: if __debug__: log(DEBUG, "INFEASIBILITY: feasRelax "+ "caused GAP infeasible solution "+ " (capacity constraint violation)") GAP_infeasible = True break # the route loop solution += route[1:] sol_f += route_l sol_K += 1 if __debug__: log(DEBUG-2, "DEBUG: Got TSP solution %s (%.2f)"% (str(route),route_l)) if L and route_l-S_EPS>L: if __debug__: log(DEBUG, "INFEASIBILITY: L infeasible solution") L_infeasible = True break # break route for loop # Check for feasibility violations due to feasrelax. # Have all customers been served? if not GAP_infeasible and not L_infeasible and\ L and len(served)<len(D): if __debug__: log(DEBUG, "INFEASIBILITY: feasRelax caused GAP "+ "infeasible solution (all customers "+ "are not served)") GAP_infeasible = True if not GAP_infeasible and not L_infeasible: if __debug__: log(DEBUG, "Yielded feasible solution = %s (%.2f)"%(str(solution), sol_f)) solved = True except GurobiError as grbe: if __debug__: log(WARNING, str(grbe)) if L and use_adaptive_L_constraint_weights and \ L_ctr_multipiler_tries<L_ADAPTIVE_MPLR_MAX_TRIES: L_ctr_multipiler+=L_ADAPTIVE_MPLR_INC L_ctr_multipiler_tries+=1 if __debug__: corrective_action = "Gurobi timeout, try with another L_ctr_multipiler = %.2f"%L_ctr_multipiler elif increase_K_on_failure and currentK+incK+1<=maxKinc: if L and use_adaptive_L_constraint_weights and\ L_ctr_multipiler_tries>=L_ADAPTIVE_MPLR_MAX_TRIES: # try with all multiplier values for larger K L_ctr_multipiler = L_ADAPTIVE_MPLR_INIT L_ctr_multipiler_tries = 0 incK+=1 if __debug__: corrective_action = "Gurobi timeout, temporarely increase K by %d"%incK elif find_optimal_seeds: take_next_seed = True else: grbe.message+=", consider increasing the MAX_MIP_SOLVER_RUNTIME in config.py" raise grbe else: if L and use_adaptive_L_constraint_weights: ## Adaptive GAP/L constraint multiplier reset # reset multiplier in case it the L feasibility was not violated # or it has reached the max_value. if solved or L_ctr_multipiler_tries>=L_ADAPTIVE_MPLR_MAX_TRIES: L_ctr_multipiler = L_ADAPTIVE_MPLR_INIT L_ctr_multipiler_tries = 0 take_next_seed = True if not solved and increase_K_on_failure and currentK+incK+1<=maxKinc: incK+=1 take_next_seed = False if __debug__: corrective_action = "temporarely increase K by %d"%incK else: if __debug__: corrective_action = "try with another seed = %d"%seed_trial ## Adaptive GAP/L constraint multiplier update else: L_ctr_multipiler+=L_ADAPTIVE_MPLR_INC L_ctr_multipiler_tries+=1 if __debug__: corrective_action = "try with another L_ctr_multipiler = %.2f"%L_ctr_multipiler else: if not solved and increase_K_on_failure and currentK+incK+1<=maxKinc: incK+=1 if __debug__: corrective_action = "temporarely increase K by %d"%incK else: take_next_seed = True # Store the best so far if solved: if is_better_sol(best_f, best_K, sol_f, sol_K, minimize_K): best_sol = solution best_f = sol_f best_K = sol_K found_improving_solution_for_this_K = True else: # No feasible solution was found for this trial (max route cost # or capacity constraint was violated). if __debug__: if GAP_infeasible or L_infeasible: log(DEBUG, "Constraint is violated, "+corrective_action) else: log(DEBUG, "Continuing search, "+corrective_action) if take_next_seed: incK = 0 seed_trial+=1 if not find_optimal_seeds: break # seed loop, possibly try next K if seed_trial==N: incK = 0 break # seed loop, possibly try next K if minimize_K: # do not try different K if we found a solution if best_sol: break # K loop else: # not minimize_K # We already have an feasible solution for K<K_current, and could # not find a better solution than that on K_current. Therefore, it # is improbable we will find one even if we increase K and we # should stop here. if best_sol and not found_improving_solution_for_this_K: break except KeyboardInterrupt: #or SIGINT # pass on the current best_sol raise KeyboardInterrupt(best_sol) return best_sol
28,926
def data_block(block_str): """ Parses all of the NASA polynomials in the species block of the mechanism file and subsequently pulls all of the species names and thermochemical properties. :param block_str: string for thermo block :type block_str: str :return data_block: all the data from the data string for each species :rtype: list(list(str/float)) """ thm_dstr_lst = data_strings(block_str) thm_dat_lst = tuple(zip( map(species_name, thm_dstr_lst), map(temperatures, thm_dstr_lst), map(low_coefficients, thm_dstr_lst), map(high_coefficients, thm_dstr_lst))) return thm_dat_lst
28,927
def risch_norman(f, x, rewrite=False): """Computes indefinite integral using extended Risch-Norman algorithm, also known as parallel Risch. This is a simplified version of full recursive Risch algorithm. It is designed for integrating various classes of functions including transcendental elementary or special functions like Airy, Bessel, Whittaker and Lambert. The main difference between this algorithm and the recursive one is that rather than computing a tower of differential extensions in a recursive way, it handles all cases in one shot. That's why it is called parallel Risch algorithm. This makes it much faster than the original approach. Another benefit is that it doesn't require to rewrite expressions in terms of complex exponentials. Rather it uses tangents and so antiderivatives are being found in a more familliar form. Risch-Norman algorithm can also handle special functions very easily without any additional effort. Just differentiation method must be known for a given function. Note that this algorithm is not a decision procedure. If it computes an antiderivative for a given integral then it's a proof that such function exists. However when it fails then there still may exist an antiderivative and a fallback to recurrsive Risch algorithm would be necessary. The question if this algorithm can be made a full featured decision procedure still remains open. For more information on the implemented algorithm refer to: [1] K. Geddes, L.Stefanus, On the Risch-Norman Integration Method and its Implementation in Maple, Proceedings of ISSAC'89, ACM Press, 212-217. [2] J. H. Davenport, On the Parallel Risch Algorithm (I), Proceedings of EUROCAM'82, LNCS 144, Springer, 144-157. [3] J. H. Davenport, On the Parallel Risch Algorithm (III): Use of Tangents, SIGSAM Bulletin 16 (1982), 3-6. [4] J. H. Davenport, B. M. Trager, On the Parallel Risch Algorithm (II), ACM Transactions on Mathematical Software 11 (1985), 356-362. """ f = Basic.sympify(f) if not f.has(x): return f * x rewritables = { (sin, cos, cot) : tan, (sinh, cosh, coth) : tanh, } if rewrite: for candidates, rule in rewritables.iteritems(): f = f.rewrite(candidates, rule) else: for candidates in rewritables.iterkeys(): if f.has(*candidates): break else: rewrite = True terms = components(f) for g in set(terms): h = g.diff(x) if not isinstance(h, Basic.Zero): terms |= components(h) terms = [ g for g in terms if g.has(x) ] V, in_terms, out_terms = [], [], {} for i, term in enumerate(terms): V += [ Symbol('x%s' % i) ] N = term.count_ops(symbolic=False) in_terms += [ (N, term, V[-1]) ] out_terms[V[-1]] = term in_terms.sort(lambda u, v: int(v[0] - u[0])) def substitute(expr): for _, g, symbol in in_terms: expr = expr.subs(g, symbol) return expr diffs = [ substitute(g.diff(x)) for g in terms ] denoms = [ g.as_numer_denom()[1] for g in diffs ] denom = reduce(lambda p, q: lcm(p, q, V), denoms) numers = [ normal(denom * g, *V) for g in diffs ] def derivation(h): return Basic.Add(*[ d * h.diff(v) for d, v in zip(numers, V) ]) def deflation(p): for y in p.atoms(Basic.Symbol): if not isinstance(derivation(p), Basic.Zero): c, q = p.as_polynomial(y).as_primitive() return deflation(c) * gcd(q, q.diff(y)) else: return p def splitter(p): for y in p.atoms(Basic.Symbol): if not isinstance(derivation(y), Basic.Zero): c, q = p.as_polynomial(y).as_primitive() q = q.as_basic() h = gcd(q, derivation(q), y) s = quo(h, gcd(q, q.diff(y), y), y) c_split = splitter(c) if s.as_polynomial(y).degree() == 0: return (c_split[0], q * c_split[1]) q_split = splitter(normal(q / s, *V)) return (c_split[0]*q_split[0]*s, c_split[1]*q_split[1]) else: return (S.One, p) special = [] for term in terms: if isinstance(term, Basic.Function): if isinstance(term, Basic.tan): special += [ (1 + substitute(term)**2, False) ] elif isinstance(term.func, tanh): special += [ (1 + substitute(term), False), (1 - substitute(term), False) ] #elif isinstance(term.func, Basic.LambertW): # special += [ (substitute(term), True) ] ff = substitute(f) P, Q = ff.as_numer_denom() u_split = splitter(denom) v_split = splitter(Q) s = u_split[0] * Basic.Mul(*[ g for g, a in special if a ]) a, b, c = [ p.as_polynomial(*V).degree() for p in [s, P, Q] ] candidate_denom = s * v_split[0] * deflation(v_split[1]) monoms = monomials(V, 1 + a + max(b, c)) linear = False while True: coeffs, candidate, factors = [], S.Zero, set() for i, monomial in enumerate(monoms): coeffs += [ Symbol('A%s' % i, dummy=True) ] candidate += coeffs[-1] * monomial candidate /= candidate_denom polys = [ v_split[0], v_split[1], u_split[0]] + [ s[0] for s in special ] for irreducibles in [ factorization(p, linear) for p in polys ]: factors |= irreducibles for i, irreducible in enumerate(factors): if not isinstance(irreducible, Basic.Number): coeffs += [ Symbol('B%s' % i, dummy=True) ] candidate += coeffs[-1] * Basic.log(irreducible) h = together(ff - derivation(candidate) / denom) numerator = h.as_numer_denom()[0].expand() if not isinstance(numerator, Basic.Add): numerator = [numerator] collected = {} for term in numerator: coeff, depend = term.as_independent(*V) if depend in collected: collected[depend] += coeff else: collected[depend] = coeff solutions = solve(collected.values(), coeffs) if solutions is None: if linear: break else: linear = True else: break if solutions is not None: antideriv = candidate.subs_dict(solutions) for C in coeffs: if C not in solutions: antideriv = antideriv.subs(C, S.Zero) antideriv = simplify(antideriv.subs_dict(out_terms)).expand() if isinstance(antideriv, Basic.Add): return Basic.Add(*antideriv.as_coeff_factors()[1]) else: return antideriv else: if not rewrite: return risch_norman(f, x, rewrite=True) else: return None
28,928
def get_vgg(blocks, bias=True, use_bn=False, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create VGG model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bias : bool, default True Whether the convolution layer uses a bias vector. use_bn : bool, default False Whether to use BatchNorm layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if blocks == 11: layers = [1, 1, 2, 2, 2] elif blocks == 13: layers = [2, 2, 2, 2, 2] elif blocks == 16: layers = [2, 2, 3, 3, 3] elif blocks == 19: layers = [2, 2, 4, 4, 4] else: raise ValueError("Unsupported VGG with number of blocks: {}".format(blocks)) channels_per_layers = [64, 128, 256, 512, 512] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = VGG( channels=channels, bias=bias, use_bn=use_bn, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net
28,929
def plot_var(ax, var_samples, X, Xnew, y_err): """Plots the median and 95% CI from samples of the variance""" Xnew_ = Xnew.flatten() var_samples = np.exp(var_samples) if var_samples.squeeze().ndim == 1: ax.plot(Xnew, var_samples, "C0", label="Median") else: l, m, u = get_quantiles(var_samples) ax.plot(Xnew, m, "C0", label="Median") ax.fill_between(Xnew.flatten(), l, u, facecolor="C0", alpha=0.5, label="95% CI") # ax.plot(Xnew, noise(signal(Xnew_)) ** 2, "--k", label="Noise Function") # ax.plot(X, y_err ** 2, "C1.", label="Observed Variance") ax.set_title("Variance Behavior") ax.legend(loc="upper left")
28,930
def main(): """This function runs the program""" gui_install_update = AWCGUI awc = AWC try: if not os.path.isfile("config.cfg"): gui_install_update().run() awc() elif os.path.isfile("config.cfg"): awc() # Logs all errors except Exception as e: sg.PopupError("An Error has occurred! Program shutting down!") if os.path.isfile("error.log"): with open("error.log", "a") as f: f.write("AWC.exe - ERROR: " + str(e) + '\n') else: with open("error.log", "w") as f: f.write("AWC.exe - ERROR: " + str(e) + '\n')
28,931
def configure_logger(): """ Declare and validate existence of log directory; create and configure logger object :return: instance of configured logger object """ log_dir = os.path.join(os.getcwd(), 'log') create_directory_if_not_exists(None, log_dir) configure_logging(log_dir) logger = logging.getLogger('importer_logger') return logger
28,932
def preprocess_spectra(fluxes, interpolated_sn, sn_array, y_offset_array): """preprocesses a batch of spectra, adding noise according to specified sn profile, and applies continuum error INPUTS fluxes: length n 2D array with flux values for a spectrum interpolated_sn: length n 1D array with relative sn values for each pixel sn_array: 2d array dims (num examples, 1) with sn selected for each example y_offset_array: same as sn array but with y_offsets OUTPUTS fluxes: length n 2D array with preprocessed fluxes for a spectrum """ n_pixels = np.size(fluxes[0, :]) n_stars = np.size(fluxes[:, 1]) base_stddev = 1.0 / sn_array[:, 0] for i in range(n_stars): noise_array = np.random.normal(0.0, scale=base_stddev[i], size=n_pixels) fluxes[i, :] += noise_array*interpolated_sn fluxes += y_offset_array return fluxes
28,933
def copy_file(args, day_directory, text_converters, conv_after, file_info): """Copy a single language file to day directory""" # 1. Get full path of output file raw_file_name, raw_file_text = file_info out_file_name = get_file_name(day_directory, raw_file_name, text_converters) # 2. Don't write if the file already exists if os.path.isfile(out_file_name): print("File %s already exists, skipping" % out_file_name) return # 3. Convert the text for this file converted_text = convert_text(text_converters, raw_file_text) # 4. Do any final conversion final_text = conv_after(args, text_converters, converted_text) # 5. Write file with open(out_file_name, 'w') as output_file: output_file.write(final_text)
28,934
def read_config(path=None): """ Function for reading in the config.json file """ #create the filepath if path: if "config.json" in path: file_path = path else: file_path = f"{path}/config.json" else: file_path = "config.json" #load in config try: with open(file_path, "r") as json_file: config = json.load(json_file) except Exception: raise Exception("Your config file is corrupt (wrong syntax, missing values, ...)") return config
28,935
def ema_incentive(ds): """ Parse stream name 'incentive--org.md2k.ema_scheduler--phone'. Convert json column to multiple columns. Args: ds: Windowed/grouped DataStream object Returns: ds: Windowed/grouped DataStream object. """ schema = StructType([ StructField("timestamp", TimestampType()), StructField("localtime", TimestampType()), StructField("user", StringType()), StructField("version", IntegerType()), StructField("incentive", FloatType()), StructField("total_incentive", FloatType()), StructField("ema_id", StringType()), StructField("data_quality", FloatType()) ]) @pandas_udf(schema, PandasUDFType.GROUPED_MAP) def parse_ema_incentive(user_data): all_vals = [] for index, row in user_data.iterrows(): ema = row["incentive"] if not isinstance(ema, dict): ema = json.loads(ema) incentive = ema["incentive"] total_incentive = ema["totalIncentive"] ema_id = ema["emaId"] data_quality = ema["dataQuality"] all_vals.append([row["timestamp"],row["localtime"], row["user"],1,incentive,total_incentive,ema_id,data_quality]) return pd.DataFrame(all_vals,columns=['timestamp','localtime', 'user', 'version','incentive','total_incentive','ema_id','data_quality']) # check if datastream object contains grouped type of DataFrame if not isinstance(ds._data, GroupedData): raise Exception( "DataStream object is not grouped data type. Please use 'window' operation on datastream object before running this algorithm") data = ds._data.apply(parse_ema_incentive) return DataStream(data=data, metadata=Metadata())
28,936
def read_fingerprint(finger_name: str) -> np.ndarray: """ Given the file "x_y_z" name this function returns a vector with the fingerprint data. :param finger_name: A string with the format "x_y_z". :return: A vector (1x256) containing the fingerprint data. """ base_path = "rawData/QFM16_" path = base_path + finger_name + ".txt" return read_finger_file(path)
28,937
def plot_tree(T, res=None, title=None, cmap_id="Pastel2"): """Plots a given tree, containing hierarchical segmentation. Parameters ---------- T: mir_eval.segment.tree A tree object containing the hierarchical segmentation. res: float Frame-rate resolution of the tree (None to use seconds). title: str Title for the plot. `None` for no title. cmap_id: str Color Map ID """ import matplotlib.pyplot as plt def round_time(t, res=0.1): v = int(t / float(res)) * res return v # Get color map cmap = plt.get_cmap(cmap_id) # Get segments by level level_bounds = [] for level in T.levels: if level == "root": continue segments = T.get_segments_in_level(level) level_bounds.append(segments) # Plot axvspans for each segment B = float(len(level_bounds)) #plt.figure(figsize=figsize) for i, segments in enumerate(level_bounds): labels = utils.segment_labels_to_floats(segments) for segment, label in zip(segments, labels): #print i, label, cmap(label) if res is None: start = segment.start end = segment.end xlabel = "Time (seconds)" else: start = int(round_time(segment.start, res=res) / res) end = int(round_time(segment.end, res=res) / res) xlabel = "Time (frames)" plt.axvspan(start, end, ymax=(len(level_bounds) - i) / B, ymin=(len(level_bounds) - i - 1) / B, facecolor=cmap(label)) # Plot labels L = float(len(T.levels) - 1) plt.yticks(np.linspace(0, (L - 1) / L, num=L) + 1 / L / 2., T.levels[1:][::-1]) plt.xlabel(xlabel) if title is not None: plt.title(title) plt.gca().set_xlim([0, end])
28,938
def read_data(data_path): """This function reads in the histogram data from the provided path and returns a pandas dataframe """ histogram_df = None # Your code goes here return histogram_df
28,939
def test_hash_dict(test_input, expected): """Test hash dict function.""" result = data_obfus.hash_dict(test_input) for key in test_input: check.equal(test_input[key] != result[key], expected)
28,940
def _interpolate_face_to_bar(nodes, eid, eid_new, nid_new, mid, area, J, fbdf, inid1, inid2, inid3, xyz1_local, xyz2_local, xyz3_local, xyz1_global, xyz2_global, xyz3_global, nodal_result, local_points, global_points, geometry, result, rod_elements, rod_nids, rod_xyzs, plane_atol, plane_bdf_offset=0.): """ These edges have crossings. We rework: y = m*x + b into the long form: y = (y2-y1) / (x2-x1) * (x-x1) + y1 to get: y = y2 * (x-x1)/(x2-x1) + y1 * (1 - (x-x1)/(x2-x1)) or: p = (x-x1)/(x2-x1) # percent y = y2 * p + y1 * (1 - p) Then we sub the y for the point (3 floats) and sub out x for the y-coordinate: percent = (y - y1_local) / (y2_local - y1_local) avg_xyz = xyz2 * percent + xyz1 * (1 - percent) Then we just crank the formula where we set the value of "y" to 0.0: percent = (0. - y1_local) / (y2_local - y1_local) That's how you do 1 edge, so we do this 3 times. One of the edges won't be a crossing (the percent is not between 0 and 1.), but 2 edges are. Thus, two points create a line. We also need to handle the dot case. We're using a triangle (nodes 1, 2, and 3), so we have 3 vectors: e0 = e12 = p2 - p1 e1 = e13 = p3 - p1 e2 = e23 = p3 - p2 As metioned previously, only two vectors are used (e.g., e12 and e13). When combined with the percentage, we find that for a dot, using e12 and e13, node 1 must be a source (both vectors originate from node 1). Thus the percentages for e12=0. and e13=0. Similarly, node 3 is a sink (both vectors end at node 3) and node 2 is a corner/mixed (one vector ends at node 2). In summary: Node Combination Percentages for Dot ==== =========== =================== 1 e12, e13 0., 0. 2 e12, e23 1., 0. 3 e13, e23 1., 1. """ #print('edge =', edge) #if eid == 11029: #print('eid=%s inid1=%s, inid2=%s, inid3=%s' % (eid, inid1, inid2, inid3)) #print('nid1=%s, nid2=%s, nid3=%s' % (nodes[inid1], nodes[inid2], nodes[inid3])) edgesi = ( # (nid_index, xyz in local frame, xyz in global frame ((inid1, xyz1_local, xyz1_global), (inid2, xyz2_local, xyz2_global)), # edge 1-2 ((inid2, xyz2_local, xyz2_global), (inid3, xyz3_local, xyz3_global)), # edge 2-3 ((inid1, xyz1_local, xyz1_global), (inid3, xyz3_local, xyz3_global)), # edge 1-3 ) nid_a_prime = nid_new nid_b_prime = nid_new + 1 #projected_points = [] #lengths = [] # we need to prevent dots msg = '' results_temp = [] geometry_temp = [] i_values = [] percent_values = [] local_points_temp = [] global_points_temp = [] is_result = nodal_result is not None for i, (edge1, edge2) in enumerate(edgesi): (inid_a, p1_local, p1_global) = edge1 (inid_b, p2_local, p2_global) = edge2 #print(' inid_a=%s, p1_local=%s, p1_global=%s' % (inid_a, p1_local, p1_global)) #print(' inid_b=%s, p2_local=%s, p2_global=%s' % (inid_b, p2_local, p2_global)) py1_local = p1_local[1] py2_local = p2_local[1] #length = np.linalg.norm(p2_global - p1_global) #lengths.append(length) dy = py2_local - py1_local if np.allclose(dy, 0.0, atol=plane_atol): # We choose to ignore the triangle edge on/close to the symmetry plane. # Instead, we use the neighboring projected edges as it's more correct. # Also, that way do things in a more consistent way. # continue # the second number is on the top percent = (0. - py1_local) / dy abs_percent_shifted = abs(percent - 0.5) #print(' percent = %s' % percent) #print(' abs_percent_shifted = %s' % abs_percent_shifted) # catching the case where all edges will intersect with the plane # if the edges are extended to infinity # # a "valid" percent is ranged from [0.-tol, 1.+tol], so: # b = [0.-tol, 1.+tol] - 0.5 = [-0.5-tol, 0.5+tol] # is the same thing # in_range = abs(b) < 0.5+tol # in_range = abs_percent_shifted < 0.5 + plane_atol if not in_range: #print(' **too big...\n') continue cut_edgei = [inid_a, inid_b] cut_edgei.sort() avg_local = p2_local * percent + p1_local * (1 - percent) avg_global = p2_global * percent + p1_global * (1 - percent) #projected_points.append(avg_global) xl, yl, zl = avg_local xg, yg, zg = avg_global local_points_temp.append(avg_local) global_points_temp.append(avg_global) #print(' inid1=%s inid2=%s edge1=%s' % (inid1, inid2, str(edge1))) #print(' xyz1_local=%s xyz2_local=%s' % (xyz1_local, xyz2_local)) #print(' avg_local=%s' % avg_local) #print(' avg_global=%s' % avg_global) sid = 1 out_grid = ['GRID', nid_new, None, ] + list(avg_local) #rod_elements, rod_nids, rod_xyzs rod_nids.append(nid_new) rod_xyzs.append(avg_local) out_grid[4] += plane_bdf_offset msg += print_card_8(out_grid) #print(' ', out_grid) #print(' plane_atol=%s dy=%s\n' % (plane_atol, dy)) if is_result: result1 = nodal_result[inid_a] result2 = nodal_result[inid_b] resulti = result2 * percent + result1 * (1 - percent) out_temp = ['TEMP', sid, nid_new, resulti] #+ resulti.tolist() msg += print_card_8(out_temp) geometry_temp.append([eid, nid_new] + cut_edgei) # TODO: doesn't handle results of length 2+ results_temp.append([xl, yl, zl, xg, yg, zg, resulti]) else: geometry_temp.append([eid, nid_new] + cut_edgei) results_temp.append([xl, yl, zl, xg, yg, zg]) i_values.append(i) percent_values.append(percent) nid_new += 1 #p1 = global_points[-2] #p2 = global_points[-1] #dxyz = np.linalg.norm(p2 - p1) if _is_dot(i_values, percent_values, plane_atol): #print('dot!!!') mid = 2 return eid_new, nid_new fbdf.write(msg) local_points.extend(local_points_temp) global_points.extend(global_points_temp) geometry.extend(geometry_temp) result.extend(results_temp) #projected_points = np.array(projected_points) #p1 = projected_points[0, :] #p2 = projected_points[1, :] #min_edge_length = min(lengths) # hack to get rid of dot intersections #dist = np.linalg.norm(p2 - p1) #if dist < min_edge_length / 2.: ##print(projected_points) #print('removing dot...inid1=%s inid2=%s d=%s mel=%s' % ( #inid1, inid2, dist, min_edge_length)) #for unused_i in range(2): #global_points.pop() #local_points.pop() #geometry.pop() #result.pop() #return eid_new, nid_new #print(' cut_edge =', cut_edge) # if there are 3 nodes in the cut edge, it's fine # we'll take the first two conrod = ['CONROD', eid, nid_a_prime, nid_b_prime, mid, area, J] #print(' ', conrod) fbdf.write(print_card_8(conrod)) rod_elements.append([eid, nid_a_prime, nid_b_prime]) eid_new += 1 nid_new += 2 return eid_new, nid_new
28,941
def get_number_of_tickets(): """Get number of tickets to enter from user""" num_tickets = 0 while num_tickets == 0: try: num_tickets = int(input('How many tickets do you want to get?\n')) except: print ("Invalid entry for number of tickets.") return num_tickets
28,942
def p_comment_left(p): """ tag : DOXYGEN_BEGIN skip_doxy DOXYGEN_END """
28,943
def scrape(file): """ scrapes rankings, counts from agg.txt file""" D={} G={} with open(file,'r') as f: for line in f: L = line.split(' ') qid = L[1][4:] if qid not in D: D[qid]=[] G[qid]=[] #ground truth G[qid].append(int(L[0])) #extract ranks ranks=[] for i in range(2,27): [l,rank]=L[i].split(':') if rank != 'NULL': ranks.append(int(rank)) else: ranks.append(0) D[qid].append(ranks) C={};N={} for qid in D: C[qid]=[] N[qid] = len(D[qid]) A= np.array(D[qid]) assert A.shape[1] == 25 for i in range(25): l = A[:,i] ranked = np.where(l>0)[0] ranking = ranked[np.argsort(l[ranked])] C[qid].append(ranking) #pickle.dump(C,open('MQ-lists.p','wb')) return C,N,G
28,944
def private_key_to_WIF(private_key): """ Convert the hex private key into Wallet Import Format for easier wallet importing. This function is only called if a wallet with a balance is found. Because that event is rare, this function is not significant to the main pipeline of the program and is not timed. """ digest = hashlib.sha256(binascii.unhexlify('80' + private_key)).hexdigest() var = hashlib.sha256(binascii.unhexlify(digest)).hexdigest() var = binascii.unhexlify('80' + private_key + var[0:8]) alphabet = chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' value = pad = 0 result = '' for i, c in enumerate(var[::-1]): value += 256**i * c while value >= len(alphabet): div, mod = divmod(value, len(alphabet)) result, value = chars[mod] + result, div result = chars[value] + result for c in var: if c == 0: pad += 1 else: break return chars[0] * pad + result
28,945
def woodbury_solve_vec(C, v, p): """ Vectorzed woodbury solve --- overkill Computes the matrix vector product (Sigma)^{-1} p where Sigma = CCt + diag(exp(a)) C = D x r real valued matrix v = D dimensional real valued vector The point of this function is that you never have to explicitly represent the full DxD matrix to do this multiplication --- hopefully that will cut down on memory allocations, allow for better scaling in comments below, we write Sigma = CCt + A, where A = diag(exp(v)) """ # set up vectorization if C.ndim == 2: C = np.expand_dims(C, 0) assert v.ndim == 1, "v shape mismatched" assert p.ndim == 1, "p shape mismatched" v = np.expand_dims(v, 0) p = np.expand_dims(p, 0) bsize, D, r = np.shape(C) # compute the inverse of the digaonal copmonent inv_v = np.exp(-v) # A^{-1} aC = C*inv_v[:, :, None] # A^{-1} C # low rank, r x r term: (Ir + Ct A^{-1} C) r_term = np.einsum('ijk,ijh->ikh', C, aC) + \ np.eye(r) # compute inverse term (broadcasts over first axis) # (Ir + Ct A^{-1} C)^{-1} (Ct A^{-1}) # in einsum notation: # - i indexes minibatch (vectorization) # - r indexes rank dimension # - d indexes D dimension (obs dimension) inv_term = np.linalg.solve(r_term, np.swapaxes(aC, 1, 2)) back_term = np.einsum('idr,id->ir', aC, p) # (Ct A^{-1} p) Sigvs = inv_v*p - np.einsum('ird,ir->id', inv_term, back_term) return Sigvs
28,946
async def remove_roles(guild): """Remove all roles for this guild.""" Rules = Query() db.remove(Rules.guild == guild.id) del RULES[guild.id]
28,947
def subsample(inputs, factor, scope=None): """Subsamples the input along the spatial dimensions. Args: inputs: A `Tensor` of size [batch, height_in, width_in, channels]. factor: The subsampling factor. scope: Optional variable_scope. Returns: output: A `Tensor` of size [batch, height_out, width_out, channels] with the input, either intact (if factor == 1) or subsampled (if factor > 1). """ if factor == 1: return inputs else: return slim.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)
28,948
def pvfactors_engine_run(data, pvarray_parameters, parallel=0, mode='full'): """My wrapper function to launch the pvfactors engine in parallel. It is mostly for Windows use. In Linux you can directly call run_parallel_engine. It uses MyReportBuilder to generate the output. Args: data (pandas DataFrame): The data to fit the model. pvarray_parameters (dict): The pvfactors dict describing the simulation. parallel (int, optional): Number of threads to launch. Defaults to 0 (just calls PVEngine.run_all_timesteps) mode (str): full or fast depending on the type of back irraadiances. See pvfactors doc. Returns: pandas DataFrame: The results of the simulation, as desired in MyReportBuilder. """ n, row = _get_cut(pvarray_parameters['cut']) rb = Report(n, row) if parallel>1: report = run_parallel_engine(rb, pvarray_parameters, data.index, data.dni, data.dhi, data.zenith, data.azimuth, data.surface_tilt, data.surface_azimuth, data.albedo, n_processes=parallel) else: pvarray = OrderedPVArray.init_from_dict(pvarray_parameters) engine = PVEngine(pvarray) engine.fit(data.index, data.dni, data.dhi, data.zenith, data.azimuth, data.surface_tilt, data.surface_azimuth, data.albedo, data.ghi) if mode == 'full': report = engine.run_full_mode(rb.build) else: report = engine.run_fast_mode(rb.build, pvrow_index=0, segment_index=0) df_report = pd.DataFrame(report, index=data.index).fillna(0) return df_report
28,949
def generate_suite(pairs, save_path, suite_name, functions): """ Turn pairs produced by the combinator into a syntaxgym suite. Parameters ---------- pairs : [FoilPair] Duh. save_path : str Filepath to save the suite to. suite_name : str Name of suite. functions : (str, str, str) Names of (generator, selector, combinator) functions used to create suite. """ suite = dict() suite["meta"] = dict() suite["meta"]["name"] = suite_name suite["meta"]["metric"] = "mean" suite["meta"]["comments"] = dict() suite["meta"]["comments"]["functions"] = functions suite["predictions"] = [{"type": "formula", "formula": pairs[0].formula}] suite["region_meta"] = pairs[0].region_meta suite["items"] = [] for i, pair in enumerate(pairs): item = dict() item["item_number"] = i item["conditions"] = [pair.correct, pair.foiled] suite["items"].append(item) with open(save_path, "w") as suite_file: json.dump(suite, suite_file)
28,950
def get_service_button(button_text, service, element="#bottom_right_div"): """ Generate a button that calls the std_srvs/Empty service when pressed """ print "Adding a service button!" return str(render.service_button(button_text, service, element))
28,951
def create_train_test_set(data, labels, test_size): """ Splits dataframe into train/test set Inputs: data: encoded dataframe containing encoded name chars labels: encoded label dataframe test_size: percentage of input data set to use for test set Returns: data_train: Subset of data set for training data_test : Subset of data set for test label_train: Subset of label set for training label_test: Subset of label set for testing """ data_train, data_test, label_train, label_test = skMS.train_test_split(data, labels, test_size=test_size) return [data_train, data_test, label_train, label_test]
28,952
def connect_to_service(service_name, client=True, env=None, region_name=None, endpoint_url=None): """ Generic method to obtain an AWS service client using boto3, based on environment, region, or custom endpoint_url. """ env = get_environment(env, region_name=region_name) my_session = None if CUSTOM_BOTO3_SESSION: my_session = CUSTOM_BOTO3_SESSION elif CREATE_NEW_SESSION_PER_BOTO3_CONNECTION: my_session = boto3.session.Session() else: my_session = boto3 method = my_session.client if client else my_session.resource if not endpoint_url: if env.region == REGION_LOCAL: endpoint_url = os.environ['TEST_%s_URL' % (service_name.upper())] return method(service_name, region_name=env.region, endpoint_url=endpoint_url)
28,953
def hdf_diff(*args, **kwargs): """:deprecated: use `diff_blocks` (will be removed in 1.1.1)""" return diff_blocks(*args, **kwargs)
28,954
def determine_epsilon(): """ We follow Learning Compact Geomtric Features to compute this hyperparameter, which unfortunately we didn't use later. """ base_dir = '../dataset/3DMatch/test/*/03_Transformed/*.ply' files = sorted(glob.glob(base_dir), key=natural_key) etas = [] for eachfile in files: pcd = o3d.io.read_point_cloud(eachfile) pcd = pcd.voxel_down_sample(0.025) pcd_tree = o3d.geometry.KDTreeFlann(pcd) distances = [] for i, point in enumerate(pcd.points): [count, vec1, vec2] = pcd_tree.search_knn_vector_3d(point, 2) distances.append(np.sqrt(vec2[1])) etai = np.median(distances) etas.append(etai) return np.median(etas)
28,955
def hex2twelve(hex_gen): """ 转换16进制为12进制 其中,对于大于10的十六进制数字, 采取随机两种方式: 10 + hex_num - 10 表示,即 a -> 100, f -> 105 11 + hex_num - 6 表示, 即 a -> 114, f -> 119 :param hex_gen: :return: generator() """ for h in hex_gen: h = int(h, 16) if h < 10: yield h elif random.randint(0, 1): yield 10 yield h - 10 else: yield 11 yield h - 6
28,956
def sort_ipv4_addresses_with_mask(ip_address_iterable): """ Sort IPv4 addresses in CIDR notation | :param iter ip_address_iterable: An iterable container of IPv4 CIDR notated addresses | :return list : A sorted list of IPv4 CIDR notated addresses """ return sorted( ip_address_iterable, key=lambda addr: ( int(addr.split('.')[0]), int(addr.split('.')[1]), int(addr.split('.')[2]), int(addr.split('.')[3].split('/')[0]), int(addr.split('.')[3].split('/')[1]) ) )
28,957
def _subattribute_from_json(data: JsonDict) -> SubAttribute: """Make a SubAttribute from JSON data (deserialize) Args: data: JSON data received from Tamr server. """ cp = deepcopy(data) d = {} d["name"] = cp["name"] d["is_nullable"] = cp["isNullable"] d["type"] = from_json(cp["type"]) return SubAttribute(**d)
28,958
def compare_asts(ast1, ast2): """Compare two ast trees. Return True if they are equal.""" # import leo.core.leoGlobals as g # Compare the two parse trees. try: _compare_asts(ast1, ast2) except AstNotEqual: dump_ast(ast1, tag='AST BEFORE') dump_ast(ast2, tag='AST AFTER') if g.unitTesting: raise return False except Exception: g.warning(f"Unexpected exception") g.es_exception() return False return True
28,959
def shit(): """Ready to go deep into the shit? Parse --data from -X POST -H 'Content-Type: application/json' and send it to the space background """ try: body = json.loads(request.data) except Exception as e: abort(400, e) if not body: abort(400, "Missing data") if "title" not in body: abort(400, "Missing `title` param") if "artist" not in body: abort(400, "Missing `artist` param") if "client_id" not in body: """client_id is used to send back the lyriks through the Notifier aka Flash. """ abort(400, "Missing `client_id` param") # send data to our Background Worker aka Iron Rogue rogue(body["title"], body["artist"], body["client_id"]) return make_response(jsonify({ "code": 202, "message": "request accepted and send into the shit" }), 202)
28,960
def process_change(n_of_food, old_val): """ does the nutrient change activate any of our triggers :param n_of_food: :param old_val: :return: """ # Kcal <-> KJoul amino_acids = ['TRP', 'THR', 'ISO', 'LEU', 'LYS', 'MET', 'CYS', 'PHE', 'TYR', 'VAL', 'ARG', 'HIS', 'ALA', 'ASP', 'GLU', 'GLY', 'PRO', 'SER', 'HYP'] fats = ['CHOL', 'TSAT', 'MUFA', 'PUFA', 'TRFA', 'TRMO', 'TRPO', '4:0', '6:0', '8:0', '10:0', '12:0', '14:00', '16:0', '18:0', '15:0', '17:0', '20:0', '22:0', '24:0', '14:1', '15:1', '16:1undiff', '16:1c', '16:1t', '17:1', '18:1undiff', '18:1c', '18:1t', '20:1', '22:1undiff', '22:1c', '22:1t', '24:1c', '18:2undiff', '18:2ccn-6', '18:2tt', '18:2i', '18:2cla', '18:3undiff', '18:3cccn-3', '18:3cccn-6', '18:3i', '18:4', '20:2cc', '20:3undiff', '20:3n-3', '20:3n-6', '20:4undiff', '20:4n-6', '20:5n-3EPA', '21:5', '22:2', '22:3', '22:4n-6', '22:5n-3DPA', '22:6n-3DHA', 'TPST', 'STIG', 'CAMSTR', 'SITSTR'] sugars = ['TSUG', 'TMOS', 'TDIS', 'GLUC', 'FRUC', 'GAL', 'SUCR', 'LACT', 'MALT', 'RAFF', 'STAC', 'MANN', 'SORB', 'TDF', 'NDF', 'PEC', 'STAR'] if n_of_food.NUTR.NUTR_SYMBOL == "KCAL": update_database_nutrient(n_of_food, "KJ", int(round(n_of_food.NUTR_VALUE * 4.182,0))) if n_of_food.NUTR.NUTR_SYMBOL == "KJ": update_database_nutrient(n_of_food, "KCAL", int(round(n_of_food.NUTR_VALUE / 4.182))) if n_of_food.NUTR.NUTR_SYMBOL == "PROT": if n_of_food.NUTR_VALUE == 0: # If we are setting this to zero. f = get_food_from_nutr(n_of_food) set_group(f, amino_acids, 0) if (old_val == 0) & (n_of_food.NUTR_VALUE != 0): # If we are setting this away from zero f = get_food_from_nutr(n_of_food) set_group(f, amino_acids, None) if n_of_food.NUTR.NUTR_SYMBOL == "FAT": if n_of_food.NUTR_VALUE == 0: # If we are setting this to zero. f = get_food_from_nutr(n_of_food) set_group(f, fats, 0) if (old_val == 0) & (n_of_food.NUTR_VALUE != 0): # If we are setting this away from zero f = get_food_from_nutr(n_of_food) set_group(f, fats, None) if n_of_food.NUTR.NUTR_SYMBOL == "CARB": if n_of_food.NUTR_VALUE == 0: # If we are setting this to zero. f = get_food_from_nutr(n_of_food) set_group(f, sugars, 0) if (old_val == 0) & (n_of_food.NUTR_VALUE != 0): # If we are setting this away from zero f = get_food_from_nutr(n_of_food) set_group(f, sugars, None) # Niacin update if n_of_food.NUTR.NUTR_SYMBOL == "N-MG": # get protein factor f = get_food_from_nutr(n_of_food) trp = get_nof(f, "TRP") if trp.NUTR_VALUE is not None: if trp.NUTR_VALUE > 0: protein_factor = trp.NUTR_VALUE else: prot = get_nof(f, "PROT") protein_factor = prot.NUTR_VALUE * 0.011 else: prot = get_nof(f, "PROT") if prot.NUTR_VALUE is not None: protein_factor = prot.NUTR_VALUE * 0.011 else: protein_factor = None nmg_val = n_of_food.NUTR_VALUE # Update N-NE ne = get_nof(f, "N-NE") if protein_factor is not None: ne.NUTR_VALUE = protein_factor * 1000 / 60 + nmg_val ne.save(update_fields=["NUTR_VALUE"])
28,961
def clear_settings(site_name): # untested - do I need/want this? """update settings to empty dict instead of initialized) """ return update_settings(site_name, {})
28,962
def reorder_jmultis_det_terms(jmulti_output, constant, seasons): """ In case of seasonal terms and a trend term we have to reorder them to make the outputs from JMulTi and sm2 comparable. JMulTi's ordering is: [constant], [seasonal terms], [trend term] while in sm2 it is: [constant], [trend term], [seasonal terms] Parameters ---------- jmulti_output : ndarray (neqs x number_of_deterministic_terms) constant : bool Indicates whether there is a constant term or not in jmulti_output. seasons : int Number of seasons in the model. That means there are seasons-1 columns for seasonal terms in jmulti_output Returns ------- reordered : ndarray (neqs x number_of_deterministic_terms) jmulti_output reordered such that the order of deterministic terms matches that of sm2. """ if seasons == 0: return jmulti_output constant = int(constant) const_column = jmulti_output[:, :constant] season_columns = jmulti_output[:, constant:constant + seasons - 1].copy() trend_columns = jmulti_output[:, constant + seasons - 1:].copy() return np.hstack((const_column, trend_columns, season_columns))
28,963
def test_require_gdal_version_param_values(): """Parameter values are allowed for all versions >= 1.0""" for values in [('bar',), ['bar'], {'bar'}]: @require_gdal_version('1.0', param='foo', values=values) def a(foo=None): return foo assert a() is None assert a('bar') == 'bar' assert a(foo='bar') == 'bar'
28,964
def nav_get_element(nav_expr, side, dts, xule_context): """Get the element or set of elements on the from or to side of a navigation expression' This determines the from/to elements of a navigation expression. If the navigation expression includes the from/to component, this will be evaluated. The result can be a qname, concept or a set/list of qname or concepts. Arguments: nav_expr (dictionary): The navigation expression AST node side (string): Either 'from' or 'to'. xule_context (XuleRuleContext): The processing context Returns: None - indicates that the side is not in the navigation expression set of concepts - the set of the concepts if the side evaluates to a set or list of concept/concepts """ if side in nav_expr: side_value = evaluate(nav_expr[side], xule_context) if side_value.type == 'qname': concept = XuleProperties.get_concept(dts, side_value.value) if concept is None: return set() else: return {concept, } elif side_value.type == 'concept': return {side_value.value, } elif side_value.type in ('set', 'list'): concepts = set() for item in side_value.value: if item.type == 'qname': concept = XuleProperties.get_concept(dts, item.value) if concept is not None: concepts.add(concept) elif item.type == 'concept': concepts.add(item.value) else: raise XuleProcessingError(_( "In navigation, expecting a collection of concepts or concepts, but found {}.".format( item.type))) return concepts else: raise XuleProcessingError( _("In navigation, expecting a concept or qname, but found {}.".format(side_value.type))) else: return None
28,965
def mp_worker(call): """ Small function that starts a new thread with a system call. Used for thread pooling. :param call: :return: """ call = call.split(' ') verbose = call[-1] == '--verbose' if verbose: call = call[:-1] subprocess.run(call) else: #subprocess.run(call, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) # suppress outputs subprocess.run(call, stdout=subprocess.DEVNULL)
28,966
def do_pivot(df: pd.DataFrame, row_name: str, col_name: str, metric_name: str): """ Works with df.pivot, except preserves the ordering of the rows and columns in the pivoted dataframe """ original_row_indices = df[row_name].unique() original_col_indices = df[col_name].unique() pivoted = df.pivot(index=row_name, columns=col_name, values=metric_name) pivoted = pivoted[original_col_indices] pivoted = pivoted.reindex(original_row_indices).reset_index() pivoted.columns.name = None return pivoted
28,967
def log(ctx, **kwargs): """ \b - DESCRIPTION: Download Log Files of A Specified Job. \b - USAGE: flow job log -j JOB_ID --output-path ./examples/ """ config_data, dsl_data = preprocess(**kwargs) job_id = config_data['job_id'] tar_file_name = 'job_{}_log.tar.gz'.format(job_id) extract_dir = os.path.join(config_data['output_path'], 'job_{}_log'.format(job_id)) with closing(access_server('get', ctx, 'job/log', config_data, False, stream=True)) as response: if response.status_code == 200: download_from_request(http_response=response, tar_file_name=tar_file_name, extract_dir=extract_dir) res = {'retcode': 0, 'directory': extract_dir, 'retmsg': 'download successfully, please check {} directory'.format(extract_dir)} else: res = response.json() if isinstance(response, requests.models.Response) else response prettify(res)
28,968
def plot_rsp_dists(rsp, rsp_cols, savepath=None): """ Plot distributions of all response variables. Args: rsp : df of response values rsp_cols : list of col names savepath : full path to save the image """ ncols = 4 nrows = int(np.ceil(len(rsp_cols)/ncols)) fig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex=False, sharey=False, figsize=(10, 10)) for i, ax in enumerate(axes.ravel()): if i >= len(rsp_cols): fig.delaxes(ax) # delete un-used ax else: target_name = rsp_cols[i] x = rsp[target_name].copy() x = x[~x.isna()].values sns.distplot(x, bins=100, kde=True, ax=ax, label=target_name, # fit=norm, kde_kws={'color': 'k', 'lw': 0.4, 'alpha': 0.8}, hist_kws={'color': 'b', 'lw': 0.4, 'alpha': 0.5}) ax.tick_params(axis='both', which='major', labelsize=7) txt = ax.yaxis.get_offset_text(); txt.set_size(7) # adjust exponent fontsize in xticks txt = ax.xaxis.get_offset_text(); txt.set_size(7) ax.legend(fontsize=5, loc='best') ax.grid(True) # plt.tight_layout(pad=0.5, w_pad=0.5, h_pad=1.0) if savepath is not None: plt.savefig(savepath, bbox_inches='tight') # dpi=200 else: plt.savefig('rsp_dists.png', bbox_inches='tight')
28,969
def batch_norm_for_fc(inputs, is_training, bn_decay, scope): """ Batch normalization on FC data. Args: inputs: Tensor, 2D BxC input is_training: boolean tf.Varialbe, true indicates training phase bn_decay: float or float tensor variable, controling moving average weight scope: string, variable scope Return: normed: batch-normalized maps """ return batch_norm_template(inputs, is_training, scope, [0,], bn_decay)
28,970
def stat(file_name): """ Read information from a FreeSurfer stats file. Read information from a FreeSurfer stats file, e.g., `subject/stats/lh.aparc.stats` or `aseg.stats`. A stats file is a text file that contains a data table and various meta data. Parameters ---------- file_name: string The path to the stats file. Returns ------- dictionary of strings (includes nested sub dicts) The result dictionary, containing the following 4 keys: - 'ignored_lines': list of strings. The list of lines that were not parsed in a special way. This is raw data. - 'measures': string list of dimension (n, m) if there are n measures with m properties each stored in the stats file. - 'table_data': string list of dimension (i, j) when there are i lines containing j values each in the table stored in the stats file. You may want to convert the columns to the proper data types and put the result into several numpy arrays or a single Pandas data frame. - 'table_column_headers': string list. The names for the columns for the table_data. This information is parsed from the table_meta_data and given here for convenience. - 'table_meta_data': dictionary. The full table_meta_data. Stores properties in key, value sub dictionaries. For simple table properties, the dictionaries are keys of the returned dictionary. The only exception is the information on the table columns (header data). This information can be found under the key `column_info_`, which contains one dictionary for each column. In these dictionaries, data is stored as explained for simple table properties. Examples -------- Read the `aseg.stats` file for a subject: >>> import brainload as bl >>> stats = bl.stats('/path/to/study/subject1/stats/aseg.stats') Collect some data, just to show the data structures. >>> print(len(stats['measures'])) # Will print the number of measures. >>> print("|".join(stats['measures'][0])) # Print all data on the first measure. Now lets print the table_data: >>> num_data_rows = len(stats['table_data']) >>> num_entries_per_row = len(stats['table_data'][0]) And get some information on the table columns (the table header): >>> print stats['table_meta_data']['NTableCols'] # will print "10" (from a simple table property stored directly in the dictionary). Get the names of all the data columns: >>> print ",".join(stats['table_column_headers']) Get the name of the first column: >>> first_column_name = stats['table_column_headers'][0] More detailed information on the individual columns can be found under the special `column_info_` key if needed: >>> column2_info_dict = stats['table_meta_data']['column_info_']['2'] >>> print(column2_info_dict['some_key']) # will print the value Note that all data is returned as string type, you will need to covert it to float (or whatever) yourself. """ lines = nit._read_text_file_lines(file_name) return _parse_stats_lines(lines)
28,971
def indented(indentation, lines): """Iterator adaptor which stops if there is less indentation. Blank lines are forwarded as empty lines. """ while True: try: line = lines.peek() except StopIteration: break if line.startswith(' ' * indentation): line = line[indentation:] elif not line.strip(): line = '' # Blank line else: break lines.advance() yield line
28,972
def report_advising_load(filename,database,base_set): """ Generate report of advising/coadvising load. Arguments: filename (str) : filename for output stream database (list of dict) : student database base_set (list) : base set of faculty to include, even if unassigned """ report_stream = open(filename,"w") # header print( "Advising and research committee loads\n" "\n" " {}\n" "\n" " advisor + coadvisor / committee\n" " {}" "".format(DATE_STRING,faculty_legend_tenure), file=report_stream ) (advisor_tally,coadvisor_tally,committee_tally) = tally_advising_assignments(database,base_set) # take all faculty names, excluding "DGS" sorted_names = sorted( (set(advisor_tally.keys()) | set(coadvisor_tally.keys()) | set(committee_tally.keys())), key=key_kicking_dgs_to_end ) for name in sorted_names: if name in coadvisor_tally: coadvisor_tally_string = "+{:1d}".format(coadvisor_tally.get(name,0)) else: coadvisor_tally_string = "" print( "{:34} {:2d} {:2s} / {:<2d} {:1s}" "".format( name,advisor_tally.get(name,0),coadvisor_tally_string,committee_tally.get(name,0), tenure_flag_str(name,faculty_list) ), file=report_stream ) ## print( ## "{:31} {:1d} / {:1d} / {:1d}" ## "".format( ## name,advisor_tally.get(name,0),coadvisor_tally.get(name,0),committee_tally.get(name,0) ## ), ## file=report_stream ## ) report_stream.close()
28,973
def updateSiteInfo(self, site, stype='', cache='', enabled=True, ssl=False, fs='', db=''): """updates site record in database""" try: q = SiteDB.query.filter(SiteDB.sitename == site).first() except Exception as e: Log.debug(self, "{0}".format(e)) Log.error(self, "Unable to query database for site info") if not q: Log.error(self, "{0} does not exist in database".format(site)) # Check if new record matches old if not then only update database if stype and q.site_type != stype: q.site_type = stype if cache and q.cache_type != cache: q.cache_type = cache if q.is_enabled != enabled: q.is_enabled = enabled if ssl and q.is_ssl != ssl: q.is_ssl = ssl try: q.created_on = func.now() db_session.commit() except Exception as e: Log.debug(self, "{0}".format(e)) Log.error(self, "Unable to update site info in application database.")
28,974
def _r_long(int_bytes): """Convert 4 bytes in little-endian to an integer. XXX Temporary until marshal's long function are exposed. """ x = int_bytes[0] x |= int_bytes[1] << 8 x |= int_bytes[2] << 16 x |= int_bytes[3] << 24 return x
28,975
def parse_model(data: Union[Dict[str, Any], Iterable[Any], Any], cls: Union[Type[TModel], Type[Any]], rename_keys: Optional[Dict[str, str]] = None) \ -> Union[TModel, Any]: """Instantiates an object of the provided class cls for a provided mapping. Instantiates an object of a class specifying a model for the provided mapping. An entry in the mapping must be provided for all non-optional attribute of the class. Keys are expected to be in CapsWords, matching the snake_case corresponding class attributes. Any additional entries found in the mapping that do not correspond to class attributes are ignored. Args: data: Dictionary containing pairs with the names of the attributes and their respective values provided to instantiate the model class. cls: The model class to instantiate. rename_keys: Key names to rename to match model attribute names, used when an automated translation of the name from CapsWords to snake_case is to sufficient. Renaming must provide the name in CapsWords. Returns: The instantiated model class object. Raises: TypeError: Cannot parse the value of a class attribute to the appropriate type. NotImplementedError: The type of a class attribute is not supported. """ if cls is not NoneType and dataclasses.is_dataclass(cls) \ and isinstance(data, dict): if rename_keys: for k, r, in rename_keys.items(): if k in data: data[r] = data.pop(k) field_names = set(f.name for f in dataclasses.fields(cls)) field_types = {f.name: f.type for f in dataclasses.fields(cls)} parsed_data: Dict[str, Any] = {} for key, value in data.items(): key = _to_snake_case(key) if key in field_names: field_type = field_types[key] parsed_data[key] = parse_model(value, field_type) args = [] for f in dataclasses.fields(cls): if f.name in parsed_data: a = parsed_data[f.name] elif f.default is not dataclasses.MISSING: a = f.default else: fc = getattr(f, 'default_factory') if fc is not dataclasses.MISSING: a = fc() else: raise TypeError(f'Cannot initialize class {cls}. ' f'Missing required parameter {f.name}') args.append(a) return cls(*args) field_type_origin = getattr(cls, '__origin__', None) if field_type_origin is Union: for candidate_cls in getattr(cls, '__args__', []): try: return parse_model(data, candidate_cls) except (TypeError, ValueError): pass raise ValueError(f'Cannot parse value {data} as {cls}') if field_type_origin is list and isinstance(data, Iterable): list_field_type = getattr(cls, '__args__', [])[0] if type(list_field_type) is TypeVar: return list(data) return [parse_model(v, list_field_type) for v in data] if field_type_origin is tuple and isinstance(data, Iterable): tuple_field_types = getattr(cls, '__args__', []) if not tuple_field_types: return tuple(data) return tuple(parse_model(v, tuple_field_types[0]) for v in data) parsable_classes = tuple(getattr(ParsableClass, '__args__', [])) if cls in parsable_classes: return _parse_class(data, cast(Type[ParsableClass], cls)) raise NotImplementedError(f'Cannot parse data {data} as {cls}.')
28,976
def findTopEyelid(imsz, imageiris, irl, icl, rowp, rp, ret_top=None): """ Description: Mask for the top eyelid region. Input: imsz - Size of the eye image. imageiris - Image of the iris region. irl - icl - rowp - y-coordinate of the inner circle centre. rp - radius of the inner circle centre. ret_top - Just used for returning result when using multiprocess. Output: mask - Map of noise that will be masked with NaN values. """ topeyelid = imageiris[0: rowp - irl - rp, :] lines = findline(topeyelid) mask = np.zeros(imsz, dtype=float) if lines.size > 0: xl, yl = linecoords(lines, topeyelid.shape) yl = np.round(yl + irl - 1).astype(int) xl = np.round(xl + icl - 1).astype(int) yla = np.max(yl) y2 = np.arange(yla) mask[yl, xl] = np.nan grid = np.meshgrid(y2, xl) mask[grid] = np.nan # Return if ret_top is not None: ret_top[0] = mask return mask
28,977
def script_with_queue_path(tmpdir): """ Pytest fixture to return a path to a script with main() which takes a queue and procedure as arguments and adds procedure process ID to queue. """ path = tmpdir.join("script_with_queue.py") path.write( """ def main(queue, procedure): queue.put(procedure.pid) """ ) return f"file://{str(path)}"
28,978
def readini(inifile): """ This function will read in data from a configureation file. Inputs inifile- The name of the configuration file. Outputs params - A dictionary with keys from INIOPTIONS that holds all of the plotting parameters. """ if inifile is None: return config =configparser() config.read(inifile) params={i:None for i in INIOPTIONS} # Read in data from ini file for ip in config.options('params'): # get the original param name rname = config.get('paramsnames',ip) # get the parameter and split it up params[rname] = config.get('params',ip) params[rname]=params[rname].split(" ") # If its a single object try to if len(params[rname])==1: params[rname]=params[rname][0] try: params[rname]=float(params[rname]) except Exception: pass else: for a in range(len(params[rname])): try: params[rname][a]=float(params[rname][a]) except Exception: pass # turn the time bounds to time stamps if not params['timebounds']is None: timelist = params['timebounds'] params['timebounds']=str2posix(timelist) # which times will have names if params['TextList'] is None: params['TextList']=[] # change param height to a list of lists if not params['paramheight'] is None: l1 = params['paramheight'][::2] l2 = params['paramheight'][1::2] params['paramheight']=[[i,j] for i,j in zip(l1,l2)] if not params['paramlim'] is None: l1 = params['paramlim'][::2] l2 = params['paramlim'][1::2] params['paramlim']=[[i,j] for i,j in zip(l1,l2)] # Default for reinterp is false if params['reinterp']is None: params['reinterp']=False else: params['reinterp'] = params['reinterp'].lower()=='yes' return params
28,979
def get_param_store(): """ Returns the ParamStore """ return _PYRO_PARAM_STORE
28,980
def secret_add(secret): """ Return a lambda that adds the argument from the lambda to the argument passed into secret_add. :param secret: secret number to add (integer) :return: lambda that takes a number and adds it to the secret """ return lambda addend: secret + addend
28,981
def push_terms(obj: State, overwrite: bool, sync_terms: bool): """ Uploads list of terms in your local project to POEditor. """ if not obj.config_path.exists(): raise click.FileError(obj.config_path.name, 'Config file does not exist') config = obj.config client = obj.client reference_language = obj.language for n, project in enumerate(config['projects']): if not reference_language: project_details = client.view_project_details(project['id']) reference_language = project_details.get('reference_language') if not reference_language: raise click.ClickException(f"project {project_details['name']} does not define reference language. Please pass --reference-language option to select which language to use.") name = client.view_project_details(project_id=project['id']).get('name') click.echo(f"Pushing terms to {name} using '{reference_language}'...", nl=False) try: translation_file = project['terms'][reference_language] except KeyError: translation_file = project['terms_path'].format(language_code=reference_language) if n: sleep(30) client.update_terms( project_id=project['id'], file_path=translation_file, sync_terms=sync_terms, overwrite=overwrite, ) click.echo('done!')
28,982
def morseToBoolArr(code, sps, wpm, fs=None): """ morse code to boolean array Args: code (str): morse code sps: Samples per second wpm: Words per minute fs: Farnsworth speed Returns: boolean numpy array """ dps = wpmToDps(wpm) # dots per second baseSampleCount = sps / dps samplesPerDot = int(round(baseSampleCount)) samplesPerDash = int(round(baseSampleCount * DASH_WIDTH)) samplesBetweenElements = int(round(baseSampleCount)) farnsworthScale = farnsworthScaleFactor(wpm, fs) samplesBetweenLetters = int(round(baseSampleCount * CHAR_SPACE * farnsworthScale)) samplesBetweenWords = int(round(baseSampleCount * WORD_SPACE * farnsworthScale)) dotArr = np.ones(samplesPerDot, dtype=np.bool) dashArr = np.ones(samplesPerDash, dtype=np.bool) eGapArr = np.zeros(samplesBetweenElements, dtype=np.bool) cGapArr = np.zeros(samplesBetweenLetters, dtype=np.bool) wGapArr = np.zeros(samplesBetweenWords, dtype=np.bool) pieces = [] prevWasSpace = False prevWasElement = False for c in code: if (c == DOT or c == DASH) and prevWasElement: pieces.append(eGapArr) if c == DOT: pieces.append(dotArr) prevWasSpace, prevWasElement = False, True elif c == DASH: pieces.append(dashArr) prevWasSpace, prevWasElement = False, True else: # Assume the char is a space otherwise if prevWasSpace: pieces[-1] = wGapArr else: pieces.append(cGapArr) prevWasSpace, prevWasElement = True, False return np.concatenate(pieces)
28,983
def LaserOptikMirrorTransmission(interpolated_wavelengths,refractive_index = "100", shift_spectrum=7,rescale_factor=0.622222): """ Can be used for any wavelengths in the range 400 to 800 (UNITS: nm) Uses supplied calculation from LaserOptik Interpolate over selected wavelengths: returns a function which takes wavelength (nm) as argument Shifts transmission spectrum with calibration still to come, likewise for "rescale_factor" "refractive_index" argument is only for backwards compatibility """ reflectivity_folder = data_root_folder + folder_separator+ "calibration_data" + folder_separator #reflectivity_folder = "./" reflectivity_filename = "LaserOptik20160129_Theorie_T.DAT" fname = reflectivity_folder+reflectivity_filename res = csv.reader(open(fname), delimiter='\t') refl_text = [x for x in res][1:] #removes column headings original_wavelengths = array([float(l[0]) for l in refl_text]) original_transmissions = array([float(l[1]) for l in refl_text]) original_reflectivities = 1-original_transmissions # wavelength_shift = 0 if shift_spectrum == "planar": #shift to be measured wavelength_shift = 0 elif shift_spectrum == "spherical": wavelength_shift = 0 # shift to be measured elif isinstance(shift_spectrum,Number): wavelength_shift = shift_spectrum # interpolated_transmission_func = interp1d(original_wavelengths,original_transmissions) interpolated_transmissions = interpolated_transmission_func(interpolated_wavelengths + wavelength_shift) #Transmission to be calibrated at at least one narrow wavelength #Assume transmission scales with this factor at all wavelengths [not well justified assumption] interpolated_transmissions = interpolated_transmissions / rescale_factor return interpolated_transmissions
28,984
def integrate_to_context(initial_tokens, words): """ initial_tokens.keys() ['ident', 'tree', 'original', 'words'] ident: {'wtypes': set(['NN']), 'words': set(['door']), 'idents': set(['door_NN'])} original: [('open', 'VB'), ('the', 'DT'), ('door', 'NN')] words (list of lists): ('open', 'VB', meta with graph) meta: ['wtypes', 'meta_graph', 'words', 'idents'] wtypes: set('VB') meta_graph: [u'HasProperty', u'HasContext', u'Synonym', u'DerivedFrom', u'RelatedTo', u'AtLocation', u'IsA', u'FormOf'] words: set(['open']) idents: set(['open_VB']) """ # Add to initial stage sentence graph - a temporal space of sentences # and graph words. # Determin best linear graph for sentence _id = init_graphing.open_temporal() for word in words: init_graphing.add_word(_id, word)
28,985
def shake(robot): """ :type robot: rosebot.RoseBot """ robot.drive_system.go_forward_until_distance_is_less_than(10, 50) robot.drive_system.turn_degrees(40, 100) robot.drive_system.turn_degrees(-40, 100) robot.drive_system.turn_degrees(40, 100) robot.drive_system.turn_degrees(-40, 100) robot.drive_system.turn_degrees(40, 100) robot.drive_system.turn_degrees(-40, 100) robot.drive_system.turn_degrees(40, 100) robot.drive_system.turn_degrees(-40, 100) robot.drive_system.stop()
28,986
def menu(prompt, titles, cols=1, col_by_col=True, exc_on_cancel=None, caption=None, default=None): """Show a simple menu. If the input is not allowed the prompt will be shown again. The input can be cancelled with EOF (``^D``). The caller has to take care that the menu will fit in the terminal. :: def update(): ... def sort(desc=True, duration=True): ... items = ( ('Update', update), ('Sort duration desc', sort), ('Sort duration asc', sort, False), ('Sort size desc', sort, True, False), ('Sort size asc', sort, False, False), ) i = menu('> ', tuple(x[0] for x in items)) print() if i is not None: items[i][1](*items[i][2:]) .. raw:: html <pre style="color:#FFFFFF;background-color:#000000">[1] Update [2] Sort duration desc [3] Sort duration asc [4] Sort size desc [5] Sort size asc &gt; </pre> :param str prompt: the prompt :param tuple titles: the titles of the menu options :param int cols: number of columns :param bool col_by_col: if ``True`` the menu will be filled column-by-column, otherwise row-by-row :param bool exc_on_cancel: if ``True`` an EOF will cause an Exception; if ``None`` the value of ``exception_on_cancel`` will be used :param str caption: caption for the menu :param int default: number of the default menu option :return: index of the selected option in ``titles`` or None if cancelled and ``exc_on_cancel=False`` :rtype: int or None :raises EOFError: if input was cancelled and ``exc_on_cancel=True`` :raises TypeError: if ``titles`` is not a tuple or ``default`` is not an integer .. versionadded:: 0.4.0 .. versionchanged:: 0.6.0 Add parameter ``caption`` .. versionchanged:: 0.17.0 Add parameter ``default`` """ if default is not None: check_type(default, int, 'default') if not (0 < default <= (len(titles))): raise ValueError( f'default must be > 0 and <= {len(titles)}, got {default}') check_type(titles, tuple, 'titles') rows = math.ceil(len(titles) / cols) num_width = len(str(len(titles))) title_width = max(map(len, titles)) if col_by_col: indices = (x + rows * y for x in range(rows) for y in range(cols)) else: indices = range(len(titles)) lines = [] row = [] for cnt, idx in enumerate(indices, 1): if idx < len(titles): row.append(f'[{idx + 1:{num_width}}] {titles[idx]:{title_width}}') if cnt % cols == 0: lines.append(' '.join(row)) lines.append('\n') row.clear() if row: lines.append(' '.join(row)) lines.append('\n') if caption: width = max(len(caption), max(map(len, lines))) text = caption.center(width) + '\n' + '-' * width + '\n' else: text = '' text += ''.join(lines) + prompt def f(s): i = int(s) if 0 < i <= len(titles): return i - 1 raise ValueError return read(text, check=f, exc_on_cancel=exc_on_cancel, default=str(default))
28,987
def dict_to_annotation(annotation_dict: Dict[str, Any], ignore_extra_keys = True) -> Annotation: """Calls specific Category object constructor based on the structure of the `annotation_dict`. Args: annotation_dict (Dict[str, Any]): One of COCO Annotation dictionaries. ignore_extra_keys (bool, optional): Ignore the fact dictionary has more fields than specified in dataset. Defaults to True. Raises: ValueError: If `annotation_dict` has unspecified structure. Returns: Annotation: Dataclass category generated from the `annotation_dict`. """ if set(DICT_TO_ANNOTATION_MAP['object_detection']).issubset(annotation_dict.keys()): return ObjectDetectionAnnotation.from_dict(annotation_dict, ignore_extra_keys) elif set(DICT_TO_ANNOTATION_MAP['keypoint_detection']).issubset(annotation_dict.keys()): return KeypointDetectionAnnotation.from_dict(annotation_dict, ignore_extra_keys) elif set(DICT_TO_ANNOTATION_MAP['panoptic_segmentation']).issubset(annotation_dict.keys()): return PanopticSegmentationAnnotation.from_dict(annotation_dict, ignore_extra_keys) elif set(DICT_TO_ANNOTATION_MAP['image_captioning']).issubset(annotation_dict.keys()): return ImageCaptioningAnnotation.from_dict(annotation_dict, ignore_extra_keys) elif set(DICT_TO_ANNOTATION_MAP['dense_pose']).issubset(annotation_dict.keys()): return DensePoseAnnotation.from_dict(annotation_dict, ignore_extra_keys) raise ValueError( "Unexpected annotation structure. Consider manually creating COCO dataset." "\nAnd extending one of existing objects or create new following one of the Protocols structure." )
28,988
def getAggregation(values): """ Produces a dictionary mapping raw states to aggregated states in the form {raw_state:aggregated_state} """ unique_values = list(set(values)) aggregation = {i:unique_values.index(v) for i, v in enumerate(values)} aggregation['n'] = len(unique_values) return aggregation
28,989
def create_anonymous_client(): """Creates an anonymous s3 client. This is useful if you need to read an object created by an anonymous user, which the normal client won't have access to. """ return boto3.client('s3', config=Config(signature_version=UNSIGNED))
28,990
def predictionTopK(pdt, k): """预测值中topk @param pdt 预测结果,nupmy数组格式 @param k 前k个结果 @return topk结果,numpy数组格式 """ m, n = np.shape(pdt) ret = [] for i in range(m): curNums = pdt[i] tmp = topK(curNums.tolist()[0], k) ret.append(tmp) return np.mat(ret)
28,991
def test_evaluate_with_quantities(): """ Test evaluation of a single model with Quantity parameters that do not explicitly require units. """ # We create two models here - one with quantities, and one without. The one # without is used to create the reference values for comparison. g = Gaussian1D(1, 1, 0.1) gq = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m) # We first check that calling the Gaussian with quantities returns the # expected result assert_quantity_allclose(gq(1 * u.m), g(1) * u.J) # Units have to be specified for the Gaussian with quantities - if not, an # error is raised with pytest.raises(UnitsError) as exc: gq(1) assert exc.value.args[0] == ("Units of input 'x', (dimensionless), could not be " "converted to required input units of m (length)") # However, zero is a special case assert_quantity_allclose(gq(0), g(0) * u.J) # We can also evaluate models with equivalent units assert_allclose(gq(0.0005 * u.km).value, g(0.5)) # But not with incompatible units with pytest.raises(UnitsError) as exc: gq(3 * u.s) assert exc.value.args[0] == ("Units of input 'x', s (time), could not be " "converted to required input units of m (length)") # We also can't evaluate the model without quantities with a quantity with pytest.raises(UnitsError) as exc: g(3 * u.m) # TODO: determine what error message should be here # assert exc.value.args[0] == ("Units of input 'x', m (length), could not be " # "converted to required dimensionless input")
28,992
def extract_url(args: argparse.Namespace) -> dict: """Extracts data from products.json endpoint from specified args. Args: args (argparse.Namespace): Parsed args. Returns: dict: Data logged from extraction, including if successful or errors present. """ p = format_url(args.url, scheme='https', return_type='parse_result') formatted_url = p.geturl() json_key = 'products' if args.collections: json_key = 'collections' fp = os.path.join( args.dest_path, f'{p.netloc}.{json_key}.json') if args.file_path: fp = os.path.join( args.dest_path, f'{args.file_path}.json') endpoint = f'{formatted_url}/{json_key}.json' ret = { 'url': endpoint, 'collected_at': str(datetime.now()), 'success': False, 'error': '', 'file_path': '', } try: data = extract(endpoint, json_key, args.page_range) except requests.exceptions.HTTPError as err: ret['error'] = str(err) except json.decoder.JSONDecodeError as err: ret['error'] = str(err) except Exception as err: ret['error'] = str(err) else: ret['success'] = True ret[json_key] = data if ret['success']: ret['file_path'] = fp json_to_file(fp, data) return ret
28,993
def _devive_from_rsrc_id(app_unique_name): """Format devices names. :returns: ``tuple`` - Pair for device names based on the app_unique_name. """ # FIXME(boysson): This kind of manipulation should live elsewhere. _, uniqueid = app_unique_name.rsplit('-', 1) veth0 = '{id:>013s}.0'.format(id=uniqueid) veth1 = '{id:>013s}.1'.format(id=uniqueid) return (veth0, veth1)
28,994
def fit_noise_1d(npower,lmin=300,lmax=10000,wnoise_annulus=500,bin_annulus=20,lknee_guess=3000,alpha_guess=-4, lknee_min=0,lknee_max=9000,alpha_min=-5,alpha_max=1,allow_low_wnoise=False): """Obtain a white noise + lknee + alpha fit to a 2D noise power spectrum The white noise part is inferred from the mean of lmax-wnoise_annulus < ells < lmax npower is 2d noise power """ fbin_edges = np.arange(lmin,lmax,bin_annulus) modlmap = npower.modlmap() fbinner = stats.bin2D(modlmap,fbin_edges) cents,dn1d = fbinner.bin(npower) w2 = dn1d[np.logical_and(cents>=(lmax-wnoise_annulus),cents<lmax)].mean() try: # print(w2) assert w2>0 # pl = io.Plotter('Dell') # pl.add(cents,dn1d) # pl.add(cents,cents*0+w2) # pl.done(os.environ['WORK']+"/nonpos_white_works.png") except: print("White noise level not positive") print(w2) if not(allow_low_wnoise): pl = io.Plotter('Dell') pl.add(cents,dn1d) pl.done(os.environ['WORK']+"/nonpos_white.png") raise else: w2 = np.abs(w2) print("Setting to ",w2) wnoise = np.sqrt(w2)*180.*60./np.pi ntemplatefunc = lambda x,lknee,alpha: fbinner.bin(rednoise(modlmap,wnoise,lknee=lknee,alpha=alpha))[1] #ntemplatefunc = lambda x,lknee,alpha: rednoise(x,wnoise,lknee=lknee,alpha=alpha) # FIXME: This switch needs testing !!!! res,_ = curve_fit(ntemplatefunc,cents,dn1d,p0=[lknee_guess,alpha_guess],bounds=([lknee_min,alpha_min],[lknee_max,alpha_max])) lknee_fit,alpha_fit = res # print(lknee_fit,alpha_fit,wnoise) # pl = io.Plotter(xyscale='linlog',xlabel='l',ylabel='D',scalefn=lambda x: x**2./2./np.pi) # pl.add(cents,dn1d) # pl.add(cents,cents*0+w2) # pl.add(cents,rednoise(cents,wnoise,lknee=lknee_fit,alpha=alpha_fit),ls="--") # pl.add(cents,rednoise(cents,wnoise,lknee=lknee_guess,alpha=alpha_guess),ls="-.") # pl._ax.set_ylim(1e-1,1e4) # pl.done(os.environ['WORK']+"/fitnoise_pre.png") # sys.exit() return wnoise,lknee_fit,alpha_fit
28,995
def parseCsv(file_content): """ parseCsv ======== parser a string file from Shimadzu analysis, returning a dictonary with current, livetime and sample ID Parameters ---------- file_content : str shimadzu output csv content Returns ------- dic dic with irradiation parameters """ irradiation_parameters = {} irradiation_parameters['sample'] = file_content.split(',')[0].split(':')[1].replace("\"", "").strip() irradiation_parameters['current'] = re.sub(' +',' ',file_content.split(',')[12]).split(' ')[3] irradiation_parameters['current'] = int(re.findall('\d+', irradiation_parameters['current'])[0]) irradiation_parameters['livetime'] = int(re.sub(' +',' ',file_content.split(',')[12]).split(' ')[13]) return(irradiation_parameters)
28,996
def unpackFITS(h5IN, h5archive, overwrite=True): # ~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ """ Package contents of an h5 block to multi-extention FITS files """ """ MAJOR BUG: This does not like the ExtendLinked HDF5 files one bit... Only real blocks. No idea why. (1) Read h5 baby block of symbolic links. (2) Count number of target blocks. (3) Begin loop over packaging function. -- Write a single row of the SAMI Master as a primary HDU. -- Write each dataset as a FITS extension with corresponding header. (4) Inform user of FITS file screated, exit successfully. """ # Open h5 file. hdf = h5.File(h5IN, 'r') # Count number of target blocks. version = hdf['/SAMI'].keys()[0] # = getVersion(h5IN, hdf, '') # *** Assuming only one version of data available. g_version = hdf['/SAMI/'+version] nTarget = 0 nCalibrator = 0 if 'Target' in g_version.keys(): nTarget = len(g_version['Target'].keys()) gTarget = g_version['Target'] thereAreTargets = True if 'Calibrator' in g_version.keys(): nCalibrator = len(g_version['Calibrator'].keys()) gCalibrator = g_version['Calibrator'] thereAreCalibrators = True nGroups = nTarget + nCalibrator def plural(nGroups): plural = '' if nGroups > 1: plural == 's' return(plural) print("Identified "+str(nGroups)+" Target Block"+plural(nGroups)+\ " in '"+h5IN+"'.") def stripTable(name, version, h5archive): #master = hdf['/SAMI/'+version+'/Table/SAMI_MASTER'] h5archive = h5.File(h5archive, 'r') master = h5archive['/SAMI/'+version+'/Table/SAMI_MASTER'] tabline = master[master["CATID"] == int(name)][0] # For now excluding all strings to make FITS-compatible # *** BUT HEADER will not know that. hdu = [v for v in tabline if not isinstance(v, str)] hdr = makeHead(master) h5archive.close() return(hdu, hdr) # Begin loop over all SAMI targets requested. # *** CURRENTLY ONLY Targets, not Calibrators. Combine groups in a list? for thisG in range(nTarget): # What is the SAMI name of this target? name = gTarget.keys()[thisG] # Search for 'Cube' and 'RSS' among Dsets to define output filename areThereCubes = ['Cube' in s for s in gTarget[name].keys()] areThereRSS = ['RSS' in s for s in gTarget[name].keys()] sContents = [] if sum(areThereCubes) > 0: sContents.append('cubes') if sum(areThereRSS) > 0: sContents.append('RSS') if len(sContents) > 1: sContents = '_'.join(sContents) else: sContents = sContents[0] # Define output filename fname = 'SAMI_'+name+'_'+sContents+'.fits' # Primary HDU is a single row of the Master table. hdu0, hdr0 = stripTable(name, version, h5archive) hdulist = pf.HDUList([pf.PrimaryHDU(hdu0, header=hdr0)]) # Cycle through all dsets, make HDUs and headers with native names. # Get number of datasets. thisTarget = gTarget[name] nDsets = len(thisTarget.keys()) # Begin loop through all datasets. for thisDset in range(nDsets): #for thisDset in range(5): # Determine dataset. dsetName = thisTarget.keys()[thisDset] print("Processing dataset '"+dsetName+"'...") # Create dataset and populate header. data = thisTarget[dsetName] hdr = makeHead(data) # Add all this to an HDU. hdulist.append( pf.ImageHDU(np.array(thisTarget[dsetName]), name=dsetName, header=makeHead(data) ) ) # Write to a new FITS file. hdulist.writeto(fname, clobber=overwrite) hdf.close()
28,997
def s_from_v(speed, time=None): """ Calculate {distance} from {speed} The chosen scheme: speed at [i] represents the distance from [i] to [i+1]. This means distance.diff() and time.diff() are shifted by one index from speed. I have chosen to extrapolate the position at the first index by assuming we start at a cumulative distance of 0. Args: {speed_arg} {time_arg} Default None. Returns: {distance_returns} """ if time is None: time = pd.Series([i for i in range(len(speed))]) # Should this assume the index at position 0 is 0, or should this # assume the only NaN is at position 0? Assumpts either way... return (speed.shift(1) * time.diff()).cumsum().fillna(0)
28,998
def get_distinct_elements(items, key=None): """ 去除序列中的重复元素,使得剩下的元素仍然保持顺序不变,对于不可哈希的对象,需要指定 key ,说明去重元素 :param: * items: (list) 需要去重的列表 * key: (hook函数) 指定一个函数,用来将序列中的元素转换成可哈希类型 :return: * result: (generator) 去重后的结果的生成器 举例如下:: print('--- remove_duplicate_elements demo---') list_demo = remove_duplicate_elements([1, 5, 2, 1, 9, 1, 5, 10]) print(list(list_demo)) list2 = [{'x': 1, 'y': 2}, {'x': 1, 'y': 3}, {'x': 1, 'y': 2}, {'x': 2, 'y': 4}] dict_demo1 = remove_duplicate_elements(list2, key=lambda d: (d['x'], d['y'])) print(list(dict_demo1)) dict_demo2 = remove_duplicate_elements(list2, key=lambda d: d['x']) print(list(dict_demo2)) dict_demo3 = remove_duplicate_elements(list2, key=lambda d: d['y']) print(list(dict_demo3)) print('---') 执行结果:: --- remove_duplicate_elements demo--- [1, 5, 2, 9, 10] [{'x': 1, 'y': 2}, {'x': 1, 'y': 3}, {'x': 2, 'y': 4}] [{'x': 1, 'y': 2}, {'x': 2, 'y': 4}] [{'x': 1, 'y': 2}, {'x': 1, 'y': 3}, {'x': 2, 'y': 4}] --- """ seen = set() for item in items: val = item if key is None else key(item) if val not in seen: yield item seen.add(val)
28,999