content
stringlengths
22
815k
id
int64
0
4.91M
def simple_split_with_list(x, y, train_fraction=0.8, seed=None): """Splits data stored in a list. The data x and y are list of arrays with shape [batch, ...]. These are split in two sets randomly using train_fraction over the number of element of the list. Then these sets are returned with the arrays concatenated along the first dimension """ n_subjects = len(x) n_train = int(n_subjects * train_fraction) print('Split: Total %d -- Training %d' % (n_subjects, n_train)) random_idx = np.random.RandomState(seed=seed).permutation(n_subjects) train_idx = random_idx[:n_train] test_idx = random_idx[n_train:] x_train = np.concatenate([x[i] for i in train_idx], axis=0) y_train = np.concatenate([y[i] for i in train_idx], axis=0) x_test = np.concatenate([x[i] for i in test_idx], axis=0) y_test = np.concatenate([y[i] for i in test_idx], axis=0) return x_train, y_train, x_test, y_test
5,330,000
def unix_socket_path(suffix=""): """A context manager which returns a non-existent file name and tries to delete it on exit. """ assert psutil.POSIX path = unique_filename(suffix=suffix) try: yield path finally: try: os.unlink(path) except OSError: pass
5,330,001
def test_list_int_enumeration_3_nistxml_sv_iv_list_int_enumeration_4_3(mode, save_output, output_format): """ Type list/int is restricted by facet enumeration. """ assert_bindings( schema="nistData/list/int/Schema+Instance/NISTSchema-SV-IV-list-int-enumeration-4.xsd", instance="nistData/list/int/Schema+Instance/NISTXML-SV-IV-list-int-enumeration-4-3.xml", class_name="NistschemaSvIvListIntEnumeration4", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
5,330,002
def save_correlation_heatmap_results( correlations: pd.DataFrame, intensity_label: str = "Intensity", show_suptitle: bool = True, close_plots: str = "all", exp_has_techrep: bool = False, **kwargs ) -> Tuple[plt.Figure, plt.Axes]: """ Saves the plot with prefix: {{name}} Parameters ---------- correlations DataFrame containing the correlations to be plotted intensity_label label of the dataframe show_suptitle should the figure title be shown close_plots which plots should be closed when creating the plot, if None no plots will be closed exp_has_techrep whether technical replicates were aggregated for the plot kwargs {kwargs} """ # TODO save csv if close_plots is not None: plt.close(close_plots) num_cols, a = correlations.shape assert num_cols == a, "Dataframe needs to be quadratic" mask = np.zeros_like(correlations).astype(bool) mask[np.triu_indices_from(mask)] = True wid_hei = 4 + 0.5 * num_cols fig, ax = plt.subplots(1, 1, figsize=(wid_hei, wid_hei)) if show_suptitle: fig.suptitle(f"Correlation Heatmap {intensity_label}" + (TECHREP_SUFFIX if exp_has_techrep else "")) mesh = ax.pcolormesh(np.ma.masked_where(mask, correlations.values), cmap="coolwarm") ax.figure.colorbar(mesh, ax=ax) ax.invert_yaxis() # set x and y ticks ax.set_xticks(np.linspace(start=0.5, stop=num_cols - 0.5, num=num_cols)) ax.set_xticklabels(correlations.index, rotation=90) ax.set_yticks(np.linspace(start=0.5, stop=num_cols - 0.5, num=num_cols)) ax.set_yticklabels(correlations.index) # annotate values for x, col in enumerate(correlations.columns): for y, idx in enumerate(correlations.index): if not mask[y, x]: ax.text(x + 0.5, y + 0.5, f"{correlations.loc[idx, col]:.4f}", ha="center", va="center") fig.tight_layout(rect=[0, 0.03, 1, 0.95]) return fig, ax
5,330,003
def copy_file_with_stream(input_file_name, output_file_name): """Copies a file from the file system into an output file on the file system using streams""" input_file = open(input_file_name, 'rb') stream = io.BytesIO() l = input_file.read(1024) with open(output_file_name, 'w') as output_file: while l: output_file.write(l) stream.write(l) l = input_file.read(1024) output_file.close() input_file.close() assert os.path.getsize(output_file_name) == stream.tell()
5,330,004
def get_star(star_path, verbose=False, recreate=False): """Return a varconlib.star.Star object based on its name. Parameters ---------- star_path : str A string representing the name of the directory where the HDF5 file containing a `star.Star`'s data can be found. Optional -------- verbose : bool, Default: False If *True*, write out additional information. recreate : bool, Default: False If *True*, first recreate the star from observations before returning it. This will only work on stars which already have an HDF5 file saved, and will not create new ones. Returns ------- `star.Star` A Star object from the directory. Note that this will only use already- existing stars, it will not create ones which do not already exist from their observations. """ assert star_path.exists(), FileNotFoundError('Star directory' f' {star_path}' ' not found.') # Flip boolean value, since to recreate (True) the star requires setting its # load_data argument to False. recreate = not recreate try: return Star(star_path.stem, star_path, load_data=recreate) except IndexError: vprint(f'Excluded {star_path.stem}.') pass except HDF5FileNotFoundError: vprint(f'No HDF5 file for {star_path.stem}.') pass except AttributeError: vprint(f'Affected star is {star_path.stem}.') raise except PickleFilesNotFoundError: vprint(f'No pickle files found for {star_path.stem}') pass
5,330,005
def get_terminal_map(): """Get a map of device-id -> path as a dict. Used by Process.terminal() """ ret = {} ls = glob.glob('/dev/tty*') + glob.glob('/dev/pts/*') for name in ls: assert name not in ret, name try: ret[os.stat(name).st_rdev] = name except FileNotFoundError: pass return ret
5,330,006
def targets(inventory="/etc/ansible/hosts", **kwargs): """ Return the targets from the ansible inventory_file Default: /etc/salt/roster """ if not os.path.isfile(inventory): raise CommandExecutionError("Inventory file not found: {}".format(inventory)) extra_cmd = [] if "export" in kwargs: extra_cmd.append("--export") if "yaml" in kwargs: extra_cmd.append("--yaml") inv = salt.modules.cmdmod.run( "ansible-inventory -i {} --list {}".format(inventory, " ".join(extra_cmd)) ) if kwargs.get("yaml", False): return salt.utils.stringutils.to_str(inv) else: return salt.utils.json.loads(salt.utils.stringutils.to_str(inv))
5,330,007
def get_relative_days(days): """Calculates a relative date/time in the past without any time offsets. This is useful when a service wants to have a default value of, for example 7 days back. If an ISO duration format is used, such as P7D then the current time will be factored in which results in the earliest day being incomplete when computing an absolute time stamp. :param days: The number of days back to calculate from now. :type days: int :returns: An absolute time stamp that is the complete range of relative days back. :rtype: datetime.datetime """ base_date = (timezone.now() - datetime.timedelta(days=days)).date() return datetime.datetime.combine(base_date, datetime.time.min).replace(tzinfo=timezone.utc)
5,330,008
def find_pending_trade(df): """ Find the trade value according to its sign like negative number means Sell type or positive number means Buy """ p_df = pd.DataFrame() p_df['Type'] = df['Buy_Qty'] - df['Sell_Qty'] return p_df['Type'].map(lambda val: trade_type_conversion(val))
5,330,009
def validate_root_vertex_directives(root_ast): """Validate the directives that appear at the root vertex field.""" directives_present_at_root = set() for directive_obj in root_ast.directives: directive_name = directive_obj.name.value if is_filter_with_outer_scope_vertex_field_operator(directive_obj): raise GraphQLCompilationError(u'Found a filter directive with an operator that is not' u'allowed on the root vertex: {}'.format(directive_obj)) directives_present_at_root.add(directive_name) disallowed_directives = directives_present_at_root & VERTEX_DIRECTIVES_PROHIBITED_ON_ROOT if disallowed_directives: raise GraphQLCompilationError(u'Found prohibited directives on root vertex: ' u'{}'.format(disallowed_directives))
5,330,010
def test_chpi_adjust(): """Test cHPI logging and adjustment.""" raw = read_raw_fif(chpi_fif_fname, allow_maxshield='yes') with catch_logging() as log: _get_hpi_initial_fit(raw.info, adjust=True, verbose='debug') _get_hpi_info(raw.info, verbose='debug') # Ran MaxFilter (with -list, -v, -movecomp, etc.), and got: msg = ['HPIFIT: 5 coils digitized in order 5 1 4 3 2', 'HPIFIT: 3 coils accepted: 1 2 4', 'Hpi coil moments (3 5):', '2.08542e-15 -1.52486e-15 -1.53484e-15', '2.14516e-15 2.09608e-15 7.30303e-16', '-3.2318e-16 -4.25666e-16 2.69997e-15', '5.21717e-16 1.28406e-15 1.95335e-15', '1.21199e-15 -1.25801e-19 1.18321e-15', 'HPIFIT errors: 0.3, 0.3, 5.3, 0.4, 3.2 mm.', 'HPI consistency of isotrak and hpifit is OK.', 'HP fitting limits: err = 5.0 mm, gval = 0.980.', 'Using 5 HPI coils: 83 143 203 263 323 Hz', # actually came earlier ] log = log.getvalue().splitlines() assert set(log) == set(msg), '\n' + '\n'.join(set(msg) - set(log)) # Then took the raw file, did this: raw.info['dig'][5]['r'][2] += 1. # And checked the result in MaxFilter, which changed the logging as: msg = msg[:8] + [ 'HPIFIT errors: 0.3, 0.3, 5.3, 999.7, 3.2 mm.', 'Note: HPI coil 3 isotrak is adjusted by 5.3 mm!', 'Note: HPI coil 5 isotrak is adjusted by 3.2 mm!'] + msg[-2:] with catch_logging() as log: _get_hpi_initial_fit(raw.info, adjust=True, verbose='debug') _get_hpi_info(raw.info, verbose='debug') log = log.getvalue().splitlines() assert set(log) == set(msg), '\n' + '\n'.join(set(msg) - set(log))
5,330,011
def setup_database(conn: sqlite3.Connection) -> None: """ Sets up the schema of the Moonstream NFTs dataset in the given SQLite database. """ cur = conn.cursor() cur.execute(CREATE_NFTS_TABLE_QUERY) cur.execute(create_events_table_query(EventType.TRANSFER)) cur.execute(create_events_table_query(EventType.MINT)) cur.execute(CREATE_CHECKPOINT_TABLE_QUERY) conn.commit()
5,330,012
def rtri(x, a, b): """Convolution of rect(ax) with tri(bx).""" assert a > 0 assert b > 0 return b*(step2(x + 1/(2*a) + 1/b) - 2*step2(x + 1/(2*a)) + step2(x + 1/(2*a) - 1/b) - step2(x - 1/(2*a) + 1/b) + 2*step2(x - 1/(2*a)) - step2(x - 1/(2*a) - 1/b))
5,330,013
def make_xgboost_predict_extractor( eval_shared_model: tfma.EvalSharedModel, eval_config: tfma.EvalConfig, ) -> extractor.Extractor: """Creates an extractor for performing predictions using a xgboost model. The extractor's PTransform loads and runs the serving pickle against every extract yielding a copy of the incoming extracts with an additional extract added for the predictions keyed by tfma.PREDICTIONS_KEY. The model inputs are searched for under tfma.FEATURES_KEY. Args: eval_shared_model: Shared model (single-model evaluation). Returns: Extractor for extracting predictions. """ eval_shared_models = model_util.verify_and_update_eval_shared_models( eval_shared_model) return extractor.Extractor( stage_name=_PREDICT_EXTRACTOR_STAGE_NAME, ptransform=_ExtractPredictions( # pylint: disable=no-value-for-parameter eval_shared_models={m.model_name: m for m in eval_shared_models}, eval_config=eval_config))
5,330,014
def cloth(): """Runs a simple cloth simulation based on linked springs.""" from .cloth import run run()
5,330,015
def test_coal_heat_content(pudl_out_orig, live_pudl_db): """Check that the distribution of coal heat content per unit is valid.""" if not live_pudl_db: raise AssertionError("Data validation only works with a live PUDL DB.") for args in pudl.validate.gf_eia923_coal_heat_content: pudl.validate.vs_bounds(pudl_out_orig.gf_eia923(), **args)
5,330,016
def zip_score_list(exp_list, savedir_base, out_fname, include_list=None): """Compress a list of experiments in zip. Parameters ---------- exp_list : list List of experiments to zip savedir_base : str Directory where the experiments from the list are saved out_fname : str File name for the zip file include_list : list, optional List of files to include. If None, include all files in the folder, by default None """ for exp_dict in exp_list: # TODO: This will zip only the last experiments, zipdir will overwritwe the previous file # Get the experiment id exp_id = hash_dict(exp_dict) # Zip folder zipdir(os.path.join(savedir_base, exp_id), out_fname, include_list=include_list)
5,330,017
def training_dataset() -> Dataset: """Creating the dataframe.""" data = { "record1": [ {"@first_name": "Hans", "@last_name": "Peter"}, {"@first_name": "Heinrich", "@last_name": "Meier"}, {"@first_name": "Hans", "@last_name": "Peter"}, ], "record2": [ {"@first_name": "Hans", "@last_name": "Petre"}, {"@first_name": "Heinz", "@last_name": "Meier"}, {"@first_name": "Hansel", "@last_name": "Peter"}, ], "label": ["duplicate", "not_duplicate", "duplicate"], } return Dataset.from_dict(data)
5,330,018
def getWCSForcamera(cameraname, crpix1, crpix2): """ Return SIP non-linear coordiante correction object intialized for a camera from a lookup table. If the camera is not in the lookup table, an identify transformation is returned. TODO: variable order, so far limit ouselves to second order TODO: Time-constraint lookup. :param cameraname: Name of camera, e.g., ak01 :param crpix1: CRPIX1 for camera, as that my have changed over time :param crpix2: CRPIX2 for camera, as that my have changed over time :return: """ m = 2 sip_a = np.zeros((m + 1, m + 1), np.double) sip_b = np.zeros((m + 1, m + 1), np.double) if cameraname in akwcslookup: sip_a[1][1] = akwcslookup[cameraname]['SIPA_1_1'] sip_a[2][0] = akwcslookup[cameraname]['SIPA_2_0'] sip_a[0][2] = akwcslookup[cameraname]['SIPA_0_2'] sip_b[1][1] = akwcslookup[cameraname]['SIPB_1_1'] sip_b[2][0] = akwcslookup[cameraname]['SIPB_2_0'] sip_b[0][2] = akwcslookup[cameraname]['SIPB_0_2'] sip = Sip(sip_a, sip_b, None, None, [crpix1, crpix2]) return sip
5,330,019
def split_by_table_id_and_write( examples, output_dir, train_suffix = ".tfrecord", test_suffix = ".tfrecord", num_splits = 100, proto_message=interaction_pb2.Interaction, ): """Split interactions into train and test and write them to disc.""" train, test = ( examples | "Partition" >> beam.Partition(_partition_fn, 2, num_splits)) for name, suffix, data in zip( ["train", "test"], [train_suffix, test_suffix], [train, test], ): output_file = os.path.join(output_dir, name + suffix) _ = ( data | "WriteTFRecordsExamples_%s" % name >> beam.io.WriteToTFRecord( file_path_prefix=output_file, shard_name_template="", coder=beam.coders.ProtoCoder(proto_message)))
5,330,020
def get_full_frac_val(r_recalc,fs,diff_frac=0,bypass_correction=0): """ Compute total offset in number of samples, and also fractional sample correction. Parameters ---------- r_recalc : float delay. fs : float sampling frequency. diff_frac : 0 [unused] 0 by default. bypass_correction : int | if 0: corrects the fractional sample correction to be between -0.5 and +0.5. | if 1: returns the fractional sample correction between 0 and 1. Returns ------- full_fractional_recalc : float total offset in number of samples, including fractional part. fractional_recalc : float fractional sample correction. Notes ----- Bypass correction used in get_frac_over for simplicity. """ fractional_recalc=((r_recalc)*fs) full_fractional_recalc=fractional_recalc fractional_recalc=np.mod(fractional_recalc,1) if bypass_correction==0: fractional_recalc_out=fractional_recalc-(fractional_recalc>0.5).astype(np.float64) else: fractional_recalc_out=fractional_recalc return([full_fractional_recalc,fractional_recalc_out])
5,330,021
def extract_random_tiles( dataset_dir: str, processed_path: str, tile_size: Tuple[int, int], n_tiles: int, level: int, seed: int, check_tissue: bool, ) -> None: """Save random tiles extracted from WSIs in `dataset_dir` into `processed_path`/tiles Parameters ---------- dataset_dir : str Path were the WSIs are saved processed_path : str Path where to store the tiles (will be concatenated with /tiles) tile_size : Tuple[int, int] width and height of the cropped tiles n_tiles : int Maximum number of tiles to extract level : int Magnification level from which extract the tiles seed : int Seed for RandomState check_tissue : bool Whether to check if the tile has enough tissue to be saved """ slideset = SlideSet(dataset_dir, processed_path, valid_extensions=[".svs"]) for slide in tqdm(slideset.slides): prefix = f"{slide.name}_" random_tiles_extractor = RandomTiler( tile_size=tile_size, n_tiles=n_tiles, level=level, seed=seed, check_tissue=check_tissue, prefix=prefix, ) random_tiles_extractor.extract(slide)
5,330,022
def get_code(): """ returns the code for the min cost path function """ return inspect.getsource(calculate_path)
5,330,023
def box_net(images, level, num_anchors, num_filters, is_training, act_type, repeats=4, separable_conv=True, survival_prob=None, strategy=None, data_format='channels_last'): """Box regression network.""" if separable_conv: conv_op = functools.partial( tf.layers.separable_conv2d, depth_multiplier=1, data_format=data_format, pointwise_initializer=tf.initializers.variance_scaling(), depthwise_initializer=tf.initializers.variance_scaling()) else: conv_op = functools.partial( tf.layers.conv2d, data_format=data_format, kernel_initializer=tf.random_normal_initializer(stddev=0.01)) for i in range(repeats): orig_images = images images = conv_op( images, num_filters, kernel_size=3, activation=None, bias_initializer=tf.zeros_initializer(), padding='same', name='box-%d' % i) images = utils.batch_norm_act( images, is_training, act_type=act_type, init_zero=False, strategy=strategy, data_format=data_format, name='box-%d-bn-%d' % (i, level)) if i > 0 and survival_prob: images = utils.drop_connect(images, is_training, survival_prob) images = images + orig_images boxes = conv_op( images, 4 * num_anchors, kernel_size=3, bias_initializer=tf.zeros_initializer(), padding='same', name='box-predict') return boxes
5,330,024
def main_plot(experiments, legends=None, smoothing_window=10, resample_ticks=None, x_key="Episode", y_key='Accumulated Reward', **kwargs ): """ Plot an experiment. To plot invidual lines (i.e. no averaging) use > units="Unit", estimator=None, """ ensure_list = lambda x: x if isinstance(x, list) else [x] experiments = ensure_list(experiments) if legends is None: legends = experiments legends = ensure_list(legends) data = [] for logdir, legend_title in zip(experiments, legends): resample_key = x_key if resample_ticks is not None else None data += get_datasets(logdir, x=x_key, condition=legend_title, smoothing_window=smoothing_window, resample_key=resample_key, resample_ticks=resample_ticks) plot_data(data, y=y_key, x=x_key, **kwargs)
5,330,025
def user_similarity_pearson(train, iif=False): """ 通过皮尔逊相关系数计算u和v的兴趣相似度 :param train: 训练集 :param iif: 是否惩罚热门物品 """ global _avr _avr = {} item_users = {} for user, items in train.iteritems(): _avr[user] = sum(items.itervalues()) / len(items) for item, rating in items.iteritems(): item_users.setdefault(item, {}) item_users[item][user] = rating avr_x = {} avr_y = {} tot = {} for users in item_users.itervalues(): for u, ru in users.iteritems(): avr_x.setdefault(u, {}) avr_y.setdefault(u, {}) tot.setdefault(u, {}) for v, rv in users.iteritems(): if u == v: continue avr_x[u].setdefault(v, 0) avr_x[u][v] += ru avr_y[u].setdefault(v, 0) avr_y[u][v] += rv tot[u].setdefault(v, 0) tot[u][v] += 1 for u, related_users in tot.iteritems(): for v, cnt in related_users.iteritems(): avr_x[u][v] /= cnt avr_y[u][v] /= cnt c = {} x = {} y = {} for users in item_users.itervalues(): user_len = len(users) for u, ru in users.iteritems(): c.setdefault(u, {}) x.setdefault(u, {}) y.setdefault(u, {}) for v, rv in users.iteritems(): if u == v: continue c[u].setdefault(v, 0) c[u][v] += (ru - avr_x[u][v]) * (rv - avr_y[u][v]) if not iif else (ru - avr_x[u][v]) * ( rv - avr_y[u][v]) / math.log(1 + user_len) x[u].setdefault(v, 0) x[u][v] += (ru - avr_x[u][v]) ** 2 y[u].setdefault(v, 0) y[u][v] += (rv - avr_y[u][v]) ** 2 global _w _w = {} for u, related_users in c.iteritems(): _w[u] = {} for v, cuv in related_users.iteritems(): _w[u][v] = cuv / math.sqrt(x[u][v] * y[u][v]) if x[u][v] * y[u][v] else 0
5,330,026
def NewSetup(*setupnames): """Load the given setups instead of the current one. Example: >>> NewSetup('tas', 'psd') will clear the current setup and load the "tas" and "psd" setups at the same time. Without arguments, the current setups are reloaded. Example: >>> NewSetup() You can use `ListSetups()` to find out which setups are available. see also: `AddSetup`, `RemoveSetup`, `ListSetups` """ current_mode = session.mode # reload current setups if none given update_aliases = True if not setupnames: update_aliases = False setupnames = session.explicit_setups # refresh setup files first session.readSetups() session.checkSetupCompatibility(setupnames, set()) session.unloadSetup() try: session.startMultiCreate() try: session.loadSetup(setupnames, update_aliases=update_aliases) finally: session.endMultiCreate() except Exception: session.log.warning('could not load new setup, falling back to ' 'startup setup', exc=1) session.unloadSetup() session.loadSetup('startup') if current_mode == MASTER: # need to refresh master status session.setMode(MASTER)
5,330,027
def get_distance_to_center( element: object, centers: "Set[object]", distance_function: "function" ) -> float: """ Returns the distance from the given point to its center :param element: a point to get the distance for :param centers: an iteratable of the center points :param distance_function: a function that will compare two datapoints :return: the distance from the element to its center """ return distance_function( element, get_nearest_center(centers, element, distance_function) )
5,330,028
def auto_delete_file_on_change(sender, instance, **kwargs): """ Deletes old file from filesystem when corresponding `Worksheet` object is updated with a new file. """ if not instance.pk: return False db_obj = Worksheet.objects.get(pk=instance.pk) exists = True try: old_file = db_obj.worksheet_file except Worksheet.DoesNotExist: exists = False if exists: new_file = instance.worksheet_file if old_file != new_file: db_obj.worksheet_file.delete(save=False) exists = True try: old_file = db_obj.solution_file except Worksheet.DoesNotExist: exists = False if exists: new_file = instance.solution_file if old_file != new_file: db_obj.solution_file.delete(save=False)
5,330,029
def clean_immigration_data(validPorts: dict, immigration_usa_df: psd.DataFrame, spark: pss.DataFrame) -> psd.DataFrame: """[This cleans immigration data in USA. It casts date of immigrant entry, city of destination, and port entry.] Args: validPorts (dict): [dictionery that includes valid entry ports in USA] immigration_usa_df (psd.DataFrame): [Spark dataframe that includes immigration data in USA] spark (pss.DataFrame): [Spark session that is used for executing queries] Returns: psd.DataFrame: [spark dataframe that includes clean data of immigration movement in usa] """ # cast cities that are valid valid_city_code = list(set(city_code.keys())) str_valid_city_code = str(valid_city_code).replace('[', '(').replace(']', ')') # cast ports that are valid valid_port_code = list(set(validPorts.keys())) str_valid_port_code = str(valid_port_code).replace('[', '(').replace(']', ')') # clean immigration data clean_immigration_usa_df = spark.sql(f''' select date(concat(cast(i94yr as int), "-", cast(i94mon as int), "-01")) as dt, cast(i94addr as varchar(2)), cast(i94port as varchar(3)) from immigration_usa_table where i94yr is not null and i94mon is not null and i94addr is not null and i94port is not null and i94addr in {str_valid_city_code} and i94port in {str_valid_port_code} ''') return clean_immigration_usa_df
5,330,030
def copy_dir(dir_from, dir_to): """ Copy a complete directory """ print "%s/ -> %s/" % (dir_from, dir_to) try: shutil.copytree(dir_from, dir_to) except Exception, err: error("can't copy '%s' to '%s' (%s)" % (dir_from, dir_to, str(err)))
5,330,031
def prepare(args: dict, overwriting: bool) -> Path: """Load config and key file,create output directories and setup log files. Args: args (dict): argparser dictionary Returns: Path: output directory path """ output_dir = make_dir(args, "results_tmp", "aggregation", overwriting) create_log_files(output_dir) return output_dir
5,330,032
def dd_wave_function_array(x, u_array, Lx): """Returns numpy array of all second derivatives of waves in Fourier sum""" coeff = 2 * np.pi / Lx f_array = wave_function_array(x, u_array, Lx) return - coeff ** 2 * u_array ** 2 * f_array
5,330,033
def plot_energy_fluxes(solver, fsrs, group_bounds=None, norm=True, loglog=True, get_figure=False): """Plot the scalar flux vs. energy for one or more FSRs. The Solver must have converged the FSR sources before calling this routine. The routine will generate a step plot of the flux across each energy group. An optional parameter for the energy group bounds may be input. The group bounds should be input in increasing order of energy. If group bounds are not specified, the routine will use equal width steps for each energy group. Parameters ---------- solver : openmoc.Solver An OpenMOC solver used to compute the flux fsrs : Iterable of Integral The FSRs for which to plot the flux group_bounds : Iterable of Real or None, optional The bounds of the energy groups norm : bool, optional Whether to normalize the fluxes to a unity flux sum (True by default) loglog : bool Whether to use a log scale on the x- and y-axes (True by default) get_figure : bool Whether to return the Matplotlib figure Returns ------- fig : list of matplotlib.Figure or None The Matplotlib figures are returned if get_figure is True Examples -------- A user may invoke this function from an OpenMOC Python file as follows: >>> openmoc.plotter.plot_energy_fluxes(solver, fsrs=[1,5,20], \ group_bounds=[0., 0.625, 2e7]) """ global solver_types cv.check_type('solver', solver, solver_types) cv.check_type('fsrs', fsrs, Iterable, Integral) cv.check_type('norm', norm, bool) cv.check_type('loglog', loglog, bool) cv.check_type('get_figure', get_figure, bool) geometry = solver.getGeometry() num_groups = geometry.getNumEnergyGroups() if group_bounds: cv.check_type('group_bounds', group_bounds, Iterable, Real) if not all(low < up for low, up in zip(group_bounds, group_bounds[1:])): py_printf('ERROR', 'Unable to plot the flux vs. energy since ' + 'the group bounds are not monotonically increasing') else: group_bounds = np.arange(num_groups+1, dtype=np.int) loglog = False py_printf('NORMAL', 'Plotting the scalar fluxes vs. energy...') global subdirectory, matplotlib_rcparams directory = openmoc.get_output_directory() + subdirectory # Ensure that normal settings are used even if called from ipython curr_rc = matplotlib.rcParams.copy() update_rc_param(curr_rc) # Make directory if it does not exist try: os.makedirs(directory) except OSError: pass # Compute difference in energy bounds for each group group_deltas = np.ediff1d(group_bounds) group_bounds = np.flipud(group_bounds) group_deltas = np.flipud(group_deltas) # Initialize an empty list of Matplotlib figures if requestd by the user figures = [] # Iterate over all source regions for fsr in fsrs: # Allocate memory for an array of this FSR's fluxes fluxes = np.zeros(num_groups, dtype=np.float) # Extract the flux in each energy group for group in range(num_groups): fluxes[group] = solver.getFlux(fsr, group+1) # Normalize fluxes to the total integrated flux if norm: fluxes /= np.sum(group_deltas * fluxes) # Initialize a separate plot for this FSR's fluxes fig = plt.figure() fig.patch.set_facecolor('none') # Draw horizontal/vertical lines on the plot for each energy group for group in range(num_groups): # Horizontal line if loglog: plt.loglog(group_bounds[group:group+2], [fluxes[group]]*2, linewidth=3, c='b', label='openmoc', linestyle='-') else: plt.plot(group_bounds[group:group+2], [fluxes[group]]*2, linewidth=3, c='b', label='openmoc', linestyle='-') # Vertical lines if group < num_groups - 1: if loglog: plt.loglog([group_bounds[group+1]]*2, fluxes[group:group+2], c='b', linestyle='--') else: plt.plot([group_bounds[group+1]]*2, fluxes[group:group+2], c='b', linestyle='--') plt.xlabel('Energy') plt.ylabel('Flux') plt.xlim((min(group_bounds), max(group_bounds))) plt.grid() plt.title('FSR {0} Flux ({1} groups)'.format(fsr, num_groups)) # Save the figure to a file or return to user if requested if geometry.isRootDomain(): if get_figure: figures.append(fig) else: filename = 'flux-fsr-{0}.png'.format(fsr) plt.savefig(directory+filename, bbox_inches='tight') plt.close(fig) # Restore settings if called from ipython update_rc_param(curr_rc) # Return the figures if requested by user if get_figure: return figures
5,330,034
def test_atomic_any_uri_min_length_3_nistxml_sv_iv_atomic_any_uri_min_length_4_3(mode, save_output, output_format): """ Type atomic/anyURI is restricted by facet minLength with value 50. """ assert_bindings( schema="nistData/atomic/anyURI/Schema+Instance/NISTSchema-SV-IV-atomic-anyURI-minLength-4.xsd", instance="nistData/atomic/anyURI/Schema+Instance/NISTXML-SV-IV-atomic-anyURI-minLength-4-3.xml", class_name="NistschemaSvIvAtomicAnyUriMinLength4", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
5,330,035
def surface_distance_ground_truth_plot(ct: np.ndarray, ground_truth: np.ndarray, sds_full: np.ndarray, subject_id: int, structure: str, plane: Plane, output_img_dir: Path, dice: float = None, save_fig: bool = True, annotator: str = None) -> None: """ Plot surface distances where prediction > 0, with ground truth contour :param ct: CT scan :param ground_truth: Ground truth segmentation :param sds_full: Surface distances (full= where prediction > 0) :param subject_id: ID of subject for annotating plot :param structure: Name of structure for annotating plot :param plane: The plane to view images in (axial, sagittal or coronal) :param output_img_dir: The dir in which to store the plots :param dice: Optional dice score for annotating plot :param save_fig: If True, saves image. Otherwise displays it. :param annotator: Optional annotator name for annotating plot :return: """ # get dimension to slice across to get the best 2D view view_dim, origin = plotting_util.get_view_dim_and_origin(plane) midpoint = ground_truth.shape[view_dim] // 2 # Take image slices and mask where necessary sds_full_slice = np.take(sds_full, midpoint, axis=view_dim) total_pixels = sds_full_slice # If surface distance array covers everywhere with pred > 0, mask at some threshold else centre of every # structure will be red masked_sds_full_slice = np.ma.masked_where(sds_full_slice == 0, sds_full_slice) gt_contour = extract_border(ground_truth, connectivity=3) gt_contour_slice = np.take(gt_contour, midpoint, axis=view_dim) total_pixels += gt_contour_slice.astype(float) try: bounding_box = plotting_util.get_cropped_axes(total_pixels) except IndexError: bounding_box = tuple([slice(0, total_pixels.shape[0] + 1), slice(0, total_pixels.shape[1] + 1)]) fig, ax = plt.subplots() black_cmap = colors.ListedColormap('black') sds_cmap = plt.get_cmap("RdYlGn_r") bounds = [0.5, 1, 1.5, 2, 2.5, 3, 3.5] sds_norm = colors.BoundaryNorm(bounds, sds_cmap.N) # plot pixels outside of border in black masked_external_pixels = np.ma.masked_where(ground_truth == 1, ground_truth) masked_external_slice = np.take(masked_external_pixels, midpoint, axis=view_dim) if ct is not None: ct_slice = np.take(ct, midpoint, axis=view_dim) ax.imshow(ct_slice[bounding_box], cmap="Greys", origin=origin) ax.imshow(masked_external_slice[bounding_box], cmap=black_cmap, origin=origin, alpha=0.7) else: gt_slice = np.take(ground_truth, midpoint, axis=view_dim) ax.imshow(gt_slice[bounding_box], cmap='Greys_r', origin=origin) cb = ax.imshow(masked_sds_full_slice[bounding_box], cmap=sds_cmap, norm=sds_norm, origin=origin, alpha=0.7) fig.colorbar(cb) # Plot title dice_str = str(dice) if dice else "" annot_str = annotator if annotator else "" fig.suptitle(f'{subject_id} {structure} sds - {annot_str}. Dice: {dice_str}') # Resize image ax.set_aspect('equal') if save_fig: figpath = Path("outputs") / output_img_dir / f"{int(subject_id):03d}_{structure}_sds2_{annot_str}.png" print(f"saving to {str(figpath)}") resize_and_save(5, 5, figpath) else: fig.show()
5,330,036
def handle_health_check(): """Return response 200 for successful health check""" return Response(status=200)
5,330,037
def pow(x, n): """ pow(x, n) Power function. """ return x**n
5,330,038
def create_graph_from_path(path: Path) -> None: """Loads recursively all Kubernetes manifests with a yaml file extension in the given directory and constructs a Caboto graph data structure.""" for p in path.rglob("*.yaml"): with open(p.absolute(), "r") as stream: try: file = yaml.safe_load_all(stream) except yaml.YAMLError as exc: print(exc) raise exc else: for doc in file: create_graph_from_dict(doc)
5,330,039
def read_fifo(): """ Read messages from the FIFO. """ # init fifo try: os.unlink(fifo_file) except OSError, e: if e.errno == os.errno.ENOENT: pass oldmask = os.umask(0) os.mkfifo(fifo_file, 0777) os.umask(oldmask) while True: fd = open(fifo_file, 'r') read_and_forward(fd) fd.close()
5,330,040
def download_cad_model(): """Download cad dataset.""" return _download_and_read('42400-IDGH.stl')
5,330,041
def dot_product(u, v): """Computes dot product of two vectors u and v, each represented as a tuple or list of coordinates. Assume the two vectors are the same length.""" output = 0 for i in range(len(u)): output += (u[i]*v[i]) return output
5,330,042
def precise_diff( d1, d2 ): # type: (typing.Union[datetime.datetime, datetime.date], typing.Union[datetime.datetime, datetime.date]) -> PreciseDiff """ Calculate a precise difference between two datetimes. :param d1: The first datetime :type d1: datetime.datetime or datetime.date :param d2: The second datetime :type d2: datetime.datetime or datetime.date :rtype: PreciseDiff """ sign = 1 if d1 == d2: return PreciseDiff(0, 0, 0, 0, 0, 0, 0, 0) tzinfo1 = d1.tzinfo if isinstance(d1, datetime.datetime) else None tzinfo2 = d2.tzinfo if isinstance(d2, datetime.datetime) else None if ( tzinfo1 is None and tzinfo2 is not None or tzinfo2 is None and tzinfo1 is not None ): raise ValueError( "Comparison between naive and aware datetimes is not supported" ) if d1 > d2: d1, d2 = d2, d1 sign = -1 d_diff = 0 hour_diff = 0 min_diff = 0 sec_diff = 0 mic_diff = 0 total_days = _day_number(d2.year, d2.month, d2.day) - _day_number( d1.year, d1.month, d1.day ) in_same_tz = False tz1 = None tz2 = None # Trying to figure out the timezone names # If we can't find them, we assume different timezones if tzinfo1 and tzinfo2: if hasattr(tzinfo1, "name"): # Pendulum timezone tz1 = tzinfo1.name elif hasattr(tzinfo1, "zone"): # pytz timezone tz1 = tzinfo1.zone if hasattr(tzinfo2, "name"): tz2 = tzinfo2.name elif hasattr(tzinfo2, "zone"): tz2 = tzinfo2.zone in_same_tz = tz1 == tz2 and tz1 is not None if isinstance(d2, datetime.datetime): if isinstance(d1, datetime.datetime): # If we are not in the same timezone # we need to adjust # # We also need to adjust if we do not # have variable-length units if not in_same_tz or total_days == 0: offset1 = d1.utcoffset() offset2 = d2.utcoffset() if offset1: d1 = d1 - offset1 if offset2: d2 = d2 - offset2 hour_diff = d2.hour - d1.hour min_diff = d2.minute - d1.minute sec_diff = d2.second - d1.second mic_diff = d2.microsecond - d1.microsecond else: hour_diff = d2.hour min_diff = d2.minute sec_diff = d2.second mic_diff = d2.microsecond if mic_diff < 0: mic_diff += 1000000 sec_diff -= 1 if sec_diff < 0: sec_diff += 60 min_diff -= 1 if min_diff < 0: min_diff += 60 hour_diff -= 1 if hour_diff < 0: hour_diff += 24 d_diff -= 1 y_diff = d2.year - d1.year m_diff = d2.month - d1.month d_diff += d2.day - d1.day if d_diff < 0: year = d2.year month = d2.month if month == 1: month = 12 year -= 1 else: month -= 1 leap = int(is_leap(year)) days_in_last_month = DAYS_PER_MONTHS[leap][month] days_in_month = DAYS_PER_MONTHS[int(is_leap(d2.year))][d2.month] if d_diff < days_in_month - days_in_last_month: # We don't have a full month, we calculate days if days_in_last_month < d1.day: d_diff += d1.day else: d_diff += days_in_last_month elif d_diff == days_in_month - days_in_last_month: # We have exactly a full month # We remove the days difference # and add one to the months difference d_diff = 0 m_diff += 1 else: # We have a full month d_diff += days_in_last_month m_diff -= 1 if m_diff < 0: m_diff += 12 y_diff -= 1 return PreciseDiff( sign * y_diff, sign * m_diff, sign * d_diff, sign * hour_diff, sign * min_diff, sign * sec_diff, sign * mic_diff, sign * total_days, )
5,330,043
def get_module_doctest_tup( testable_list=None, check_flags=True, module=None, allexamples=None, needs_enable=None, N=0, verbose=True, testslow=False, ): """ Parses module for testable doctesttups enabled_testtup_list (list): a list of testtup testtup (tuple): (name, num, src, want, flag) describes a valid doctest in the module name (str): test name num (str): test number of the module / function / class / method src (str): test source code want (str): expected test result flag (str): a valid commandline flag to enable this test frame_fpath (str): module fpath that will be tested all_testflags (list): the command line arguments that will enable different tests module (module): the actual module that will be tested exclude_inherited (bool): does not included tests defined in other modules Args: testable_list (list): a list of functions (default = None) check_flags (bool): (default = True) module (None): (default = None) allexamples (None): (default = None) needs_enable (None): (default = None) N (int): (default = 0) verbose (bool): verbosity flag(default = True) testslow (bool): (default = False) Returns: ModuleDoctestTup : (enabled_testtup_list, frame_fpath, all_testflags, module) CommandLine: python -m utool.util_tests --exec-get_module_doctest_tup Example: >>> from utool.util_tests import * # NOQA >>> import utool as ut >>> #testable_list = [ut.util_import.package_contents] >>> testable_list = None >>> check_flags = False >>> module = ut.util_cplat >>> allexamples = False >>> needs_enable = None >>> N = 0 >>> verbose = True >>> testslow = False >>> mod_doctest_tup = get_module_doctest_tup(testable_list, check_flags, >>> module, allexamples, >>> needs_enable, N, verbose, >>> testslow) >>> result = ('mod_doctest_tup = %s' % (ut.repr4(mod_doctest_tup, nl=4),)) >>> print(result) """ # +------------------------ if VERBOSE_TEST: print('[util_test.get_module_doctest tup][DEPTH 2] get_module_doctest tup()') import utool as ut # NOQA if needs_enable is None: needs_enable = not ut.get_argflag('--enableall') # needs_enable = True TEST_ALL_EXAMPLES = allexamples or ut.get_argflag(('--allexamples', '--all-examples')) parse_testables = True if isinstance(testable_list, types.ModuleType): # hack module = testable_list testable_list = [] testable_name_list = [] elif testable_list is None: testable_list = [] testable_name_list = [] else: testable_name_list = [ut.get_funcname(func) for func in testable_list] parse_testables = False # L________________________ # +------------------------ # GET_MODULE_DOCTEST_TUP Step 1: # Inspect caller module for testable names if module is None: frame_fpath = '???' try: # This is a bit finky. Need to be exactly N frames under the main # module frame = ut.get_parent_frame(N=N) main_modname = '__main__' frame_name = frame.f_globals['__name__'] frame_fpath = frame.f_globals['__file__'] if frame_name == main_modname: module = sys.modules[main_modname] entry_modname = ut.get_modname_from_modpath(module.__file__) if entry_modname in ['kernprof', 'kernprof-script']: # kernprof clobbers the __main__ variable. # workaround by reimporting the module name import importlib modname = ut.get_modname_from_modpath(frame_fpath) module = importlib.import_module(modname) except Exception as ex: print(frame.f_globals) ut.printex(ex, keys=['frame', 'module']) raise allexamples = False else: frame_fpath = module.__file__ allexamples = True # L________________________ # +------------------------ # GET_MODULE_DOCTEST_TUP Step 2: # --- PARSE TESTABLE FUNCTIONS --- # Get testable functions if parse_testables: try: if verbose or VERBOSE_TEST and ut.NOT_QUIET: print('[ut.test] Iterating over module funcs') print('[ut.test] module =%r' % (module,)) _testableiter = ut.iter_module_doctestable(module, include_inherited=False) for key, val in _testableiter: if isinstance(val, staticmethod): docstr = inspect.getdoc(val.__func__) else: docstr = inspect.getdoc(val) docstr = ut.ensure_unicode(docstr) if docstr is not None and ( docstr.find('Example') >= 0 or docstr.find('Doctest') >= 0 ): testable_name_list.append(key) testable_list.append(val) if VERBOSE_TEST and ut.NOT_QUIET: print('[ut.test] Testable: %s' % (key,)) else: if VERBOSE_TEST and ut.NOT_QUIET: if docstr.find('Example') >= 0 or docstr.find('Doctest') >= 0: print('[ut.test] Ignoring (disabled) : %s' % key) else: print('[ut.test] Ignoring (no Example) : %s' % key) except Exception as ex: print('FAILED') print(docstr) ut.printex(ex, keys=['frame']) raise # OUTPUTS: testable_list # L________________________ # +------------------------ # GET_MODULE_DOCTEST_TUP Step 3: # --- FILTER TESTABLES_--- # Get testable function examples test_sentinals = [ 'ENABLE_DOCTEST', 'ENABLE_GRID_DOCTEST', ] if testslow or ut.get_argflag(('--testall', '--testslow', '--test-slow')): test_sentinals.append('SLOW_DOCTEST') if testslow or ut.get_argflag(('--testall', '--testunstable')): test_sentinals.append('UNSTABLE_DOCTEST') # FIND THE TEST NAMES REQUESTED # Grab sys.argv enabled tests cmdline_varargs = ut.get_cmdline_varargs() force_enable_testnames_ = cmdline_varargs[:] valid_prefix_list = ['--test-', '--exec-', '--dump-'] # if False: for arg in sys.argv: for prefix in valid_prefix_list: if arg.startswith(prefix): testname = arg[len(prefix) :] # testname = testname.split(':')[0].replace('-', '_') force_enable_testnames_.append(testname) # break # PartA: Fixup names # TODO: parse out requested test number here # instead of later in the code. See PartB force_enable_testnames = [] for testname in force_enable_testnames_: testname = testname.split(':')[0].replace('-', '_') testname.split(':')[0].replace('-', '_') force_enable_testnames.append(testname) def _get_testable_name(testable): import utool as ut if isinstance(testable, staticmethod): testable = testable.__func__ try: testable_name = testable.func_name except AttributeError as ex1: try: testable_name = testable.__name__ except AttributeError as ex2: ut.printex(ex1, ut.repr4(dir(testable))) ut.printex(ex2, ut.repr4(dir(testable))) raise return testable_name sorted_testable = sorted(list(set(testable_list)), key=_get_testable_name) # Append each testable example if VERBOSE_TEST: print('Vars:') print(' * needs_enable = %r' % (needs_enable,)) print(' * force_enable_testnames = %r' % (force_enable_testnames,)) print(' * len(sorted_testable) = %r' % (len(sorted_testable),)) print(' * cmdline_varargs = %r' % (cmdline_varargs,)) indenter = ut.Indenter('[FIND_AVAIL]') indenter.start() # PARSE OUT THE AVAILABLE TESTS FOR EACH REQUEST local_testtup_list = [] for testable in sorted_testable: short_testname = _get_testable_name(testable) full_testname = None # Namespaced classname (within module) if isinstance(testable, staticmethod): testable = testable.__func__ if hasattr(testable, '__ut_parent_class__'): # HACK for getting classname.funcname test_namespace = testable.__ut_parent_class__.__name__ full_testname = test_namespace + '.' + short_testname else: test_namespace = None full_testname = short_testname nametup = tuple(ut.unique([full_testname, short_testname])) # modpath = ut.get_modpath(module) examptup = get_doctest_examples(testable) examples, wants, linenums, func_lineno, docstr = examptup total_examples = len(examples) if total_examples > 0: for testno, srcwant_tup in enumerate(zip(examples, wants)): src, want = srcwant_tup src_ = ut.regex_replace('from __future__ import.*$', '', src) test_disabled = not any([src_.find(s) >= 0 for s in test_sentinals]) skip = ( needs_enable and test_disabled and ut.isdisjoint(nametup, force_enable_testnames) ) if not skip: if VERBOSE_TEST: print( ' * HACK adding testname=%r to local_testtup_list' % (full_testname,) ) local_testtup = ( nametup, testno, src_, want, test_namespace, short_testname, total_examples, ) local_testtup_list.append(local_testtup) else: if VERBOSE_TEST: # print('force_enable_testnames = %r' % (force_enable_testnames,)) # print('nametup = %r' % (nametup,)) # print('needs_enable = %r' % (needs_enable,)) # print('test_disabled = %r' % (test_disabled,)) print(' * skipping: %r / %r' % (short_testname, full_testname)) else: print( 'WARNING: no examples in %r for testname=%r' % (frame_fpath, full_testname) ) if verbose: print(testable) print(examples) print(wants) print(docstr) if VERBOSE_TEST: print(' --') if VERBOSE_TEST: indenter.stop() # L________________________ # +------------------------ # Get enabled (requested) examples if VERBOSE_TEST: print('\n-----\n') indenter = ut.Indenter('[IS_ENABLED]') indenter.start() print('Finished parsing available doctests.') print('Now we need to find which examples are enabled') print('len(local_testtup_list) = %r' % (len(local_testtup_list),)) print( 'local_testtup_list.T[0:2].T = %s' % ut.repr4(ut.take_column(local_testtup_list, [0, 1])) ) print('sys.argv = %r' % (sys.argv,)) all_testflags = [] enabled_testtup_list = [] distabled_testflags = [] subx = ut.get_argval( '--subx', type_=int, default=None, help_='Only tests the subxth example' ) def make_valid_testnames(name, num, total): return [ name + ':' + str(num), name, name + ':' + str(num - total), # allow negative indices # prefix + name.replace('_', '-') + ':' + str(num), # prefix + name.replace('_', '-') ] def make_valid_test_argflags(prefix, name, num, total): valid_testnames = make_valid_testnames(name, num, total) return [prefix + testname for testname in valid_testnames] def check_if_test_requested(nametup, num, total, valid_prefix_list): # cmdline_varargs if VERBOSE_TEST: print('Checking cmdline for %r %r' % (nametup, num)) valid_argflags = [] # FIXME: PartB # should parse out test number above instead of here # See PartA mode = None veryverb = 0 # First check positional args testflag = None for name in nametup: valid_testnames = make_valid_test_argflags('', name, num, total) if veryverb: print('Checking if positional* %r' % (valid_testnames[0:1],)) print('name = %r' % (name,)) if any([x in cmdline_varargs for x in valid_testnames]): # hack mode = 'exec' testflag = name flag1 = '--exec-' + name + ':' + str(num) if testflag is not None: if veryverb: print('FOUND POSARG') print(' * testflag = %r' % (testflag,)) print(' * num = %r' % (num,)) break # Then check keyword-ish args if mode is None: for prefix, name in reversed(list(ut.iprod(valid_prefix_list, nametup))): valid_argflags = make_valid_test_argflags(prefix, name, num, total) if veryverb: print('Checking for flags*: %r' % (valid_argflags[0],)) flag1 = valid_argflags[0] testflag = ut.get_argflag(valid_argflags) mode = prefix.replace('-', '') if testflag: if veryverb: print('FOUND VARARG') break else: # print('WARNING NO TEST IS ENABLED %r ' % (nametup,)) pass checktup = flag1, mode, name, testflag return checktup for local_testtup in local_testtup_list: (nametup, num, src, want, shortname, test_namespace, total) = local_testtup checktup = check_if_test_requested(nametup, num, total, valid_prefix_list) flag1, mode, name, testflag = checktup testenabled = TEST_ALL_EXAMPLES or not check_flags or testflag if subx is not None and subx != num: continue all_testflags.append(flag1) if testenabled: if VERBOSE_TEST: print('... enabling test') testtup = TestTuple( name, num, src, want, flag1, frame_fpath=frame_fpath, mode=mode, total=total, nametup=nametup, shortname=shortname, test_namespace=test_namespace, ) if VERBOSE_TEST: print('... ' + str(testtup)) enabled_testtup_list.append(testtup) else: if VERBOSE_TEST: print('... disabling test') distabled_testflags.append(flag1) # Attempt to run test without any context # This will only work if the function exist and is self contained if len(force_enable_testnames_) > 0 and len(enabled_testtup_list) == 0: if VERBOSE_TEST: print('Forced test did not have a doctest example') print('Maybe it can be run without any context') import utool as ut # assert len(force_enable_testnames) == 1 test_funcname_ = force_enable_testnames[0] if test_funcname_.find('.') != -1: test_classname, test_funcname = test_funcname_.split('.') class_ = getattr(module, test_classname, None) assert class_ is not None func_ = getattr(class_, test_funcname, None) else: test_funcname = test_funcname_ func_ = getattr(module, test_funcname, None) if VERBOSE_TEST: print('test_funcname = %r' % (test_funcname,)) print('func_ = %r' % (func_,)) if func_ is not None: testno = 0 modname = ut.get_modname_from_modpath(module.__file__) want = None try: if VERBOSE_TEST: print('attempting xdoctest hack') # hack to get classmethods to read their example using # the xdoctest port from xdoctest import docscrape_google from xdoctest import core as xdoc_core from xdoctest import static_analysis as static if func_.__doc__ is None: raise TypeError blocks = docscrape_google.split_google_docblocks(func_.__doc__) example_blocks = [] for type_, block in blocks: if type_.startswith('Example') or type_.startswith('Doctest'): example_blocks.append((type_, block)) if len(example_blocks) == 0: if VERBOSE_TEST: print('xdoctest found no blocks') raise KeyError callname = test_funcname_ hack_testtups = [] for num, (type_, block) in enumerate(example_blocks): # print('modname = %r' % (modname,)) # print('callname = %r' % (callname,)) # print('num = %r' % (num,)) modpath = static.modname_to_modpath(modname) example = xdoc_core.DocTest( modpath=modpath, callname=callname, docsrc=block, num=num ) src = example.format_src(colored=False, want=False, linenos=False) want = '\n'.join(list(example.wants())) testtup = TestTuple( test_funcname_, num, src, want=want, flag='--exec-' + test_funcname_, frame_fpath=frame_fpath, mode='exec', total=len(example_blocks), nametup=[test_funcname_], ) hack_testtups.append(testtup) if VERBOSE_TEST: print('hack_testtups = %r' % (hack_testtups,)) enabled_testtup_list.extend(hack_testtups) # src = '\n'.join([line[4:] for line in src.split('\n')]) except (ImportError, KeyError, TypeError): if VERBOSE_TEST: print('xdoctest hack failed') # varargs = ut.get_cmdline_varargs() varargs = force_enable_testnames[1:] # Create dummy doctest src = ut.codeblock( """ # DUMMY_DOCTEST from {modname} import * # NOQA args = {varargs} result = {test_funcname_}(*args) print(result) """ ).format( modname=modname, test_funcname_=test_funcname_, varargs=repr(varargs) ) testtup = TestTuple( test_funcname_, testno, src, want=want, flag='--exec-' + test_funcname_, frame_fpath=frame_fpath, mode='exec', total=1, nametup=[test_funcname_], ) enabled_testtup_list.append(testtup) else: print('function %r was not found in %r' % (test_funcname_, module)) if VERBOSE_TEST: indenter.stop() if ut.get_argflag('--list'): # HACK: Should probably just return a richer structure print('testable_name_list = %s' % (ut.repr4(testable_name_list),)) mod_doctest_tup = ModuleDoctestTup( enabled_testtup_list, frame_fpath, all_testflags, module ) # L________________________ return mod_doctest_tup
5,330,044
def subset_lists(L, min_size=0, max_size=None): """Strategy to generate a subset of a `list`. This should be built in to hypothesis (see hypothesis issue #1115), but was rejected. Parameters ---------- L : list List of elements we want to get a subset of. min_size : int Minimum size of the resulting subset list. max_size : int or None Maximum size of the resulting subset list. Returns ------- L : list List that is subset of `L` with all unique elements. """ _check_valid_size_interval(min_size, max_size, "subset list size") uniq_len = len(set(L)) order_check("input list size", 0, min_size, uniq_len) max_size = uniq_len if max_size is None else min(uniq_len, max_size) # Avoid deprecation warning HypothesisDeprecationWarning: sampled_from() elements_st = nothing() if uniq_len == 0 else sampled_from(L) S = lists(elements=elements_st, min_size=min_size, max_size=max_size, unique=True) return S
5,330,045
async def async_setup_entry( hass: HomeAssistant, entry: config_entries.ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Set up the Flux lights.""" coordinator: FluxLedUpdateCoordinator = hass.data[DOMAIN][entry.entry_id] device = coordinator.device entities: list[ FluxSpeedNumber | FluxPixelsPerSegmentNumber | FluxSegmentsNumber | FluxMusicPixelsPerSegmentNumber | FluxMusicSegmentsNumber ] = [] name = entry.data[CONF_NAME] unique_id = entry.unique_id if device.pixels_per_segment is not None: entities.append( FluxPixelsPerSegmentNumber( coordinator, unique_id, f"{name} Pixels Per Segment", "pixels_per_segment", ) ) if device.segments is not None: entities.append( FluxSegmentsNumber(coordinator, unique_id, f"{name} Segments", "segments") ) if device.music_pixels_per_segment is not None: entities.append( FluxMusicPixelsPerSegmentNumber( coordinator, unique_id, f"{name} Music Pixels Per Segment", "music_pixels_per_segment", ) ) if device.music_segments is not None: entities.append( FluxMusicSegmentsNumber( coordinator, unique_id, f"{name} Music Segments", "music_segments" ) ) if device.effect_list and device.effect_list != [EFFECT_RANDOM]: entities.append( FluxSpeedNumber(coordinator, unique_id, f"{name} Effect Speed", None) ) if entities: async_add_entities(entities)
5,330,046
def DCNPack(x, extra_feat, out_channels, kernel_size=(3, 3), strides=(1, 1), padding='same', dilations=(1, 1), use_bias=True, num_groups=1, num_deform_groups=1, trainable=True, dcn_version='v2', name='DCN'): """Deformable convolution encapsulation that acts as normal convolution layers.""" with tf.variable_scope(name): x = tf.cast(x, tf.float32) if dcn_version == 'v1': offset = Conv2D(extra_feat, num_deform_groups * 2 * kernel_size[0] * kernel_size[1], kernel_size=kernel_size, strides=strides, padding=padding, dilations=dilations, use_bias=use_bias, trainable=trainable, name='conv_offset') offset = tf.cast(offset, tf.float32) mask = None elif dcn_version == 'v2': conv_offset = Conv2D(extra_feat, num_deform_groups * 3 * kernel_size[0] * kernel_size[1], kernel_size=kernel_size, strides=strides, padding=padding, dilations=dilations, use_bias=use_bias, trainable=trainable, name='conv_offset') conv_offset = tf.cast(conv_offset, tf.float32) offset = conv_offset[:, :, :, :num_deform_groups * 2 * kernel_size[0] * kernel_size[1]] mask = conv_offset[:, :, :, num_deform_groups * 2 * kernel_size[0] * kernel_size[1]:] mask = tf.nn.sigmoid(mask) else: raise NotImplementedError out = DeformableConvLayer( in_channels=int(x.shape[-1]), out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, dilations=dilations, use_bias=use_bias, num_groups=num_groups, num_deform_groups=num_deform_groups, trainable=trainable)(x, offset, mask) return out
5,330,047
def endtiming(fn): """ Decorator used to end timing. Keeps track of the count for the first and second calls. """ NITER = 10000 def new(*args, **kw): ret = fn(*args, **kw) obj = args[0] if obj.firststoptime == 0: obj.firststoptime = time.time() elif obj.secondstoptime == 0: obj.secondstoptime = time.time() elif obj.count == NITER: now = time.time() total = now - obj.secondstarttime perrequest = total/NITER filename = "output/%s-%s" % (str(len(obj.groups[NODE_REPLICA])+1), str(len(obj.groups[NODE_ACCEPTOR]))) outputfile = open("./"+filename, "a") # numreplicas #numacceptors #perrequest #total outputfile.write("%s\t%s\t%s\t%s\n" % (str(len(obj.groups[NODE_REPLICA])+1), str(len(obj.groups[NODE_ACCEPTOR])), str(perrequest), str(total))) outputfile.close() obj.count += 1 sys.stdout.flush() profile_off() profilerdict = get_profile_stats() for key, value in sorted(profilerdict.iteritems(), key=lambda (k,v): (v[2],k)): print ("%s: %s" % (key, value)) time.sleep(10) sys.stdout.flush() os._exit(0) else: obj.count += 1 return ret return new
5,330,048
def web_test_named_executable(testonly = True, **kwargs): """Wrapper around web_test_named_executable to correctly set defaults.""" _web_test_named_executable(testonly = testonly, **kwargs)
5,330,049
def CreateRailFrames(thisNurbsCurve, parameters, multiple=False): """ Computes relatively parallel rail sweep frames at specified parameters. Args: parameters (IEnumerable<double>): A collection of curve parameters. Returns: Plane[]: An array of planes if successful, or an empty array on failure. """ url = "rhino/geometry/nurbscurve/createrailframes-nurbscurve_doublearray" if multiple: url += "?multiple=true" args = [thisNurbsCurve, parameters] if multiple: args = list(zip(thisNurbsCurve, parameters)) response = Util.ComputeFetch(url, args) return response
5,330,050
def news(stock): """analyzes analyst recommendations using keywords and assigns values to them :param stock: stock that will be analyzed :return recommendations value""" stock = yf.Ticker(str(stock)) reco = str(stock.recommendations) # Stands for recomend reco = reco.split() reco.reverse() del reco[15 :-1] #### KEY WORDS ### buy = reco.count("Buy") #Means price is going up = Good sell = reco.count("Sell") #Means price is going down = Bad hold = reco.count("Hold") #Means price is going to increase = Good neutral = reco.count("Neutral") #Means price is not going to drastically change = Neutral overweight = reco.count("Overweight") #Means stock is better value for money than others = Good equalweight = reco.count("Equal-Weight") #Means stock is about the same value compared to others = Neutral underweight = reco.count("Underweight") #Means stock is worse value than what it is assesed to be = Bad perform = reco.count("Perform") #Means stock performance is on par with the industry average = Neutral outperform = reco.count("Outperform") #Means stock performance will be slightly better than industry = Good underperform = reco.count("Underperform") #Means stock performance will be slightly worse than industry = Bad if (buy + hold + neutral + equalweight + overweight + outperform) == 0: news = .95 / (sell + underweight + perform + underperform) elif (sell + underweight + perform + underperform) == 0: news = 1.05 * (buy + .5 * hold + .1 * neutral + .1 * equalweight + overweight + outperform) else: news = (buy + .5 * hold + .1 * neutral + .1 * equalweight + overweight + outperform)/(sell + underweight + perform + underperform) if news < 1: if news < .5: news = 1 - news return news
5,330,051
async def test_tilt_via_topic_altered_range(opp, mqtt_mock): """Test tilt status via MQTT with altered tilt range.""" assert await async_setup_component( opp, cover.DOMAIN, { cover.DOMAIN: { "platform": "mqtt", "name": "test", "state_topic": "state-topic", "command_topic": "command-topic", "qos": 0, "payload_open": "OPEN", "payload_close": "CLOSE", "payload_stop": "STOP", "tilt_command_topic": "tilt-command-topic", "tilt_status_topic": "tilt-status-topic", "tilt_min": 0, "tilt_max": 50, } }, ) await opp.async_block_till_done() async_fire_mqtt_message(opp, "tilt-status-topic", "0") current_cover_tilt_position = opp.states.get("cover.test").attributes[ ATTR_CURRENT_TILT_POSITION ] assert current_cover_tilt_position == 0 async_fire_mqtt_message(opp, "tilt-status-topic", "50") current_cover_tilt_position = opp.states.get("cover.test").attributes[ ATTR_CURRENT_TILT_POSITION ] assert current_cover_tilt_position == 100 async_fire_mqtt_message(opp, "tilt-status-topic", "25") current_cover_tilt_position = opp.states.get("cover.test").attributes[ ATTR_CURRENT_TILT_POSITION ] assert current_cover_tilt_position == 50
5,330,052
def get_pulse_coefficient(pulse_profile_dictionary, tt): """ This function generates an envelope that smoothly goes from 0 to 1, and back down to 0. It follows the nomenclature introduced to me by working with oscilloscopes. The pulse profile dictionary will contain a rise time, flat time, and fall time. The rise time is the time over which the pulse goes from 0 to 1 The flat time is the duration of the flat-top of the pulse The fall time is the time over which the pulse goes from 1 to 0. :param pulse_profile_dictionary: :param tt: :return: """ start_time = pulse_profile_dictionary["start_time"] end_time = ( start_time + pulse_profile_dictionary["rise_time"] + pulse_profile_dictionary["flat_time"] + pulse_profile_dictionary["fall_time"] ) this_pulse = 0 if (tt > start_time) and (tt < end_time): rise_time = pulse_profile_dictionary["rise_time"] flat_time = pulse_profile_dictionary["flat_time"] fall_time = pulse_profile_dictionary["fall_time"] end_rise = start_time + rise_time end_flat = start_time + rise_time + flat_time if tt <= end_rise: normalized_time = (tt - start_time) / rise_time this_pulse = _get_rise_fall_coeff_(normalized_time) elif (tt > end_rise) and (tt < end_flat): this_pulse = 1.0 elif tt >= end_flat: normalized_time = (tt - end_flat) / fall_time this_pulse = 1.0 - _get_rise_fall_coeff_(normalized_time) this_pulse *= pulse_profile_dictionary["a0"] return this_pulse
5,330,053
def parse_variable_char(packed): """ Map a 6-bit packed char to ASCII """ packed_char = packed if packed_char == 0: return "" if 1 <= packed_char <= 10: return chr(ord('0') - 1 + packed_char) elif 11 <= packed_char <= 36: return chr(ord('A') - 11 + packed_char) elif 37 <= packed_char <= 62: return chr(ord('a') - 37 + packed_char) else: return "_"
5,330,054
def do_emulator_error_plots( data: PowerSpecs, means_mf: List[np.ndarray], means_sf: List[np.ndarray], pred_exacts_mf: List[np.ndarray], pred_exacts_sf: List[np.ndarray], label_mf: str = "NARGP", label_sf: str = "HF only", figure_name: str = "", ): """ 1. predicted / exact power spectrum 2. absolute error plot """ # mean emulation error emulator_errors = np.abs(np.array(pred_exacts_mf) - 1) plt.loglog( 10 ** data.kf, np.mean(emulator_errors, axis=0), label=label_mf, color="C0" ) plt.fill_between( 10 ** data.kf, y1=np.min(emulator_errors, axis=0), y2=np.max(emulator_errors, axis=0), color="C0", alpha=0.3, ) emulator_errors = np.abs(np.array(pred_exacts_sf) - 1) plt.loglog( 10 ** data.kf, np.mean(emulator_errors, axis=0), label=label_sf, color="C1" ) plt.fill_between( 10 ** data.kf, y1=np.min(emulator_errors, axis=0), y2=np.max(emulator_errors, axis=0), color="C1", alpha=0.3, ) plt.legend() plt.ylabel(r"$| P_\mathrm{predicted}(k) / P_\mathrm{true}(k) - 1|$") plt.xlabel(r"$k (h/\mathrm{Mpc})$") save_figure("absolute_errors_" + figure_name) plt.close() plt.clf()
5,330,055
def get_feature_vector(feature_id, cohort_id_array): """ Fetches the data from BigQuery tables for a given feature identifier and one or more stored cohorts. Returns the intersection of the samples defined by the feature identifier and the stored cohort. Each returned data point is represented as a dict containing patient, sample and aliquot barcodes, and the value as defined by the feature identifier. Args: feature_id: Feature identifier cohort_id_array: Array of cohort identifiers (integers) Returns: Data as an array of dicts. """ provider = FeatureProviderFactory.from_feature_id(feature_id) cohort_settings = settings.GET_BQ_COHORT_SETTINGS() result = provider.get_data(cohort_id_array, cohort_settings.dataset_id, cohort_settings.table_id) items = [] for data_point in result: data_item = {key: data_point[key] for key in ['case_id', 'sample_id', 'aliquot_id']} value = provider.process_data_point(data_point) # TODO refactor missing value logic if value is None: value = 'NA' data_item['value'] = value items.append(data_item) return provider.get_value_type(), items
5,330,056
def batch_get_logs( jobs: List[AWSBatchJob], jobs_db: AioAWSBatchDB = None, aio_batch_config: AWSBatchConfig = None, ): """ Get job logs. :param jobs: any AWSBatchJob :param jobs_db: an optional jobs-db to persist job data; this is only applied if an aio_batch_config is not provided :param aio_batch_config: a custom AWSBatchConfig; if provided, it is responsible for providing any optional jobs-db :return: each job maintains state, so this function returns nothing """ # The polling is kept to a minimum to avoid interference with the batch API; # max_pool_connections = 1 is used because of details in aio-aws where it # creates a new client for each monitoring task (that could change in new # releases of aio-aws). if aio_batch_config is None: # AWS Batch logs has limited bandwidth, so default # settings try to avoid rate throttling aio_batch_config = AWSBatchConfig( aio_batch_db=jobs_db, min_jitter=3, max_jitter=8, min_pause=2, max_pause=10, start_pause=2, max_pool_connections=1, sem=100, ) asyncio.run(aio_batch_get_logs(jobs=jobs, config=aio_batch_config))
5,330,057
def transcript_iterator(gff_iterator, strict=True): """iterate over the contents of a gtf file. return a list of entries with the same transcript id. Any features without a transcript_id will be ignored. The entries for the same transcript have to be consecutive in the file. If *strict* is set an AssertionError will be raised if that is not true. """ last = None matches = [] found = set() for gff in gff_iterator: # ignore entries without transcript or gene id try: this = gff.transcript_id + gff.gene_id except KeyError: continue except AttributeError: continue if last != this: if last: yield matches matches = [] assert not strict or this not in found, \ "duplicate entry: %s" % this found.add(this) last = this matches.append(gff) if last: yield matches
5,330,058
def analyse(tx): """ Analyses a given set of features. Marks the features with zero variance as the features to be deleted from the data set. Replaces each instance of a null(-999) valued feature point with the mean of the non null valued feature points. Also handles the outliers by clipping the very large and very small features. Args: tx: the numpy array representing the given set of features Returns: columns_to_remove: indices of the features with zero variance, which will be removed from the numpy array """ num_cols = tx.shape[1] print('\nNumber of columns in the data matrix: ', num_cols) columns_to_remove = [] print('Analysis for data:\n') for col in range(num_cols): current_col = tx[:, col] if len(np.unique(current_col)) == 1: print('The column with index ', col, ' is all the same, it will be deleted.') columns_to_remove.append(col) else: current_col[current_col == -999] = np.median(current_col[current_col != -999]) # Handling the outliers std_current_col = np.std(current_col) mean_current_col = np.mean(current_col) lower_bound = mean_current_col - 2 * std_current_col upper_bound = mean_current_col + 2 * std_current_col current_col[current_col < lower_bound] = lower_bound current_col[current_col > upper_bound] = upper_bound print('null values in the ', col, ' indexed column are replaced with the mean and outliers are handled.') return columns_to_remove
5,330,059
def validate_word_syntax(word): """ This function is designed to validate that the syntax for a string variable is acceptable. A validate format is English words that only contain alpha characters and hyphens. :param word: string to validate :return: boolean true or false """ if len(word) == 0: return False else: temp = regex.match(r'^[a-zA-Z-\s]*$', word.strip()) if temp: return True else: return False
5,330,060
def select_hierarchy(root=None, add=False): """ Selects the hierarchy of the given node If no object is given current selection will be used :param root: str :param add: bool, Whether new selected objects need to be added to current selection or not """ raise NotImplementedError()
5,330,061
def next_page(context): """ Get the next page for signup or login. The query string takes priority over the template variable and the default is an empty string. """ if "next" in context.request.GET: return context.request.GET["next"] if "next" in context.request.POST: return context.request.POST["next"] if "next" in context: return context["next"] return ""
5,330,062
def main(eml: str): """ Encodes entity names in the EML file to the PASTA equivalent. \b EML: EML file """ p = Path(eml) if not p.exists(): msg = f"File '{eml}' not found." raise FileNotFoundError(msg) eml_file = p.read_text() names = list() xml = etree.fromstring(eml_file.encode("utf")) datatables = xml.findall("./dataset/dataTable") names += get_entity_names(datatables) spatialrasters = xml.findall("./dataset/spatialRaster") names += get_entity_names(spatialrasters) spatialvectors = xml.findall("./dataset/spatialVector") names += get_entity_names(spatialvectors) otherentities = xml.findall("./dataset/otherEntity") names += get_entity_names(otherentities) for name in names: md5 = get_md5(name) print(f"{md5} - {name}")
5,330,063
def process_references(ctx: Context): """ Processes references in the Markdown text Args: ctx: Context """ ctx.latex_refs = list() for abbrev, raw_ref in ctx.settings.references.items(): if abbrev in ctx.markdown_text: ref = Reference(raw_ref, abbrev) ctx.latex_refs.append(ref.to_latex()) ctx.markdown_text = ctx.markdown_text.replace(abbrev, ref.to_markdown()) ctx.debug(f'used reference: {abbrev}') ctx.latex_refs = sorted(ctx.latex_refs)
5,330,064
def make_train_test_sets(input_matrix, label_matrix, train_per_class): """Return ((training_inputs, training_labels), (testing_inputs, testing_labels)). Args: input_matrix: attributes matrix. Each row is sample, each column is attribute. label_matrix: labels matrix. Each row is sample, each column is label. train_per_class: Number of samples for each class in training set. """ training_inputs = [] training_labels = [] testing_inputs = [] testing_labels = [] label_counts = {} # Add each row to training or testing set depending on count of labels for input_, label in zip(input_matrix, label_matrix): key = tuple(label) try: count = label_counts[key] except KeyError: # First time seeing label, count is 0 count = 0 if count < train_per_class: # Still need more training samples for this label training_inputs.append(input_) training_labels.append(label) else: # We have enough training samples for this label, # add to testing set instead testing_inputs.append(input_) testing_labels.append(label) label_counts[key] = count + 1 if testing_inputs == []: raise ValueError('train_per_class too high, no testing set') return ((numpy.array(training_inputs), numpy.array(training_labels)), (numpy.array(testing_inputs), numpy.array(testing_labels)))
5,330,065
def add_coords_table(document: Document, cif: CifContainer, table_num: int): """ Adds the table with the atom coordinates. :param document: The current word document. :param cif: the cif object from CifContainer. :return: None """ atoms = list(cif.atoms()) table_num += 1 headline = "Table {}. Atomic coordinates and ".format(table_num) h = document.add_heading(headline, 2) h.add_run('U').font.italic = True h.add_run('eq').font.subscript = True h.add_run('{}[{}'.format(protected_space, angstrom)) h.add_run('2').font.superscript = True h.add_run('] for {}'.format(cif.block.name)) coords_table = document.add_table(rows=len(atoms) + 1, cols=5, style='Table Grid') # Atom x y z U(eq) head_row = coords_table.rows[0] head_row.cells[0].paragraphs[0].add_run('Atom').bold = True px = head_row.cells[1].paragraphs[0] ar = px.add_run('x') ar.bold = True ar.italic = True py = head_row.cells[2].paragraphs[0] ar = py.add_run('y') ar.bold = True ar.italic = True pz = head_row.cells[3].paragraphs[0] ar = pz.add_run('z') ar.bold = True ar.italic = True pu = head_row.cells[4].paragraphs[0] ar = pu.add_run('U') ar.bold = True ar.italic = True ar2 = pu.add_run('eq') ar2.bold = True ar2.font.subscript = True # having a list of column cells before is *much* faster! col0_cells = coords_table.columns[0].cells col1_cells = coords_table.columns[1].cells col2_cells = coords_table.columns[2].cells col3_cells = coords_table.columns[3].cells col4_cells = coords_table.columns[4].cells rowidx = 1 for at in atoms: c0, c1, c2, c3, c4 = col0_cells[rowidx], col1_cells[rowidx], col2_cells[rowidx], \ col3_cells[rowidx], col4_cells[rowidx] rowidx += 1 c0.text = at[0] # label c1.text = (str(at[2])) # x c2.text = (str(at[3])) # y c3.text = (str(at[4])) # z c4.text = (str(at[7])) # ueq p = document.add_paragraph() p.style = document.styles['tabunterschr'] p.add_run('U').font.italic = True p.add_run('eq').font.subscript = True p.add_run(' is defined as 1/3 of the trace of the orthogonalized ') p.add_run('U').font.italic = True ij = p.add_run('ij') ij.font.subscript = True ij.font.italic = True p.add_run(' tensor.') set_column_width(coords_table.columns[0], Cm(2.3)) set_column_width(coords_table.columns[1], Cm(2.8)) set_column_width(coords_table.columns[2], Cm(2.8)) set_column_width(coords_table.columns[3], Cm(2.8)) set_column_width(coords_table.columns[4], Cm(2.8)) document.add_paragraph() return table_num
5,330,066
def bz(xp, yp, zp, spheres): """ Calculates the z component of the magnetic induction produced by spheres. .. note:: Input units are SI. Output is in nT Parameters: * xp, yp, zp : arrays The x, y, and z coordinates where the anomaly will be calculated * spheres : list of :class:`fatiando.mesher.Sphere` The spheres. Spheres must have the physical property ``'magnetization'``. Spheres without ``'magnetization'`` will be ignored. The ``'magnetization'`` must be a vector. Returns: * bz: array The z component of the magnetic induction Example: >>> from fatiando import mesher, gridder, utils >>> # Create a model formed by two spheres >>> # The magnetization of each sphere is a vector >>> model = [ ... mesher.Sphere(1000, 1000, 600, 500, ... {'magnetization':utils.ang2vec(13, -10, 28)}), ... mesher.Sphere(-1000, -1000, 600, 500, ... {'magnetization':utils.ang2vec(10, 70, -5)})] >>> # Create a regular grid at 100m height >>> shape = (4, 4) >>> area = (-3000, 3000, -3000, 3000) >>> xp, yp, zp = gridder.regular(area, shape, z=-100) >>> # Calculate the bz component >>> for b in bz(xp, yp, zp, model): ... print '%15.8e' % b -1.13152279e+01 -3.24362266e+01 -1.63235805e+01 -4.48136597e+00 -1.27492012e+01 2.89101261e+03 -1.30263918e+01 -9.64182996e+00 -6.45566985e+00 3.32987598e+01 -7.08905624e+02 -5.55139945e+01 -1.35745203e+00 2.91949888e+00 -2.78345635e+01 -1.69425703e+01 """ if xp.shape != yp.shape != zp.shape: raise ValueError("Input arrays xp, yp, and zp must have same shape!") size = len(xp) res = numpy.zeros(size, dtype=numpy.float) for sphere in spheres: if sphere is None or ('magnetization' not in sphere.props): continue # Get the magnetization vector components mx, my, mz = sphere.props['magnetization'] _sphere.bz(xp, yp, zp, sphere.x, sphere.y, sphere.z, sphere.radius, mx, my, mz, res) res *= CM * T2NT return res
5,330,067
def update_stocks(): """ method to update the data (used by the spark service) :return: """ global stocks body = request.get_json(silent=True) ''' { "data" : [ { "symbol" : "string", "ask_price" : "string", "last_sale_time" : "string", "timestamp" : 0 } ], "companies" : { "symbol_x" : "string" } } ''' data = body['data'] companies = body['companies'] for stock_data in data: symbol = stock_data['symbol'] # if stock does not exist if symbol not in stocks: obj = { 'companyName': companies[symbol], 'stockPrices': [ { 'value': stock_data['ask_price'], 'timestamp': stock_data['timestamp'] } ] } stocks[symbol] = obj else: # add to existing stock stocks[symbol]['stockPrices'].append({'value': stock_data['ask_price'], 'timestamp': stock_data['timestamp']}) return "{}", 200
5,330,068
def chunks_from_iterable(iterable: Iterable[T], size: int) -> Iterable[Sequence[T]]: """Generate adjacent chunks of data""" it = iter(iterable) return iter(lambda: tuple(itertools.islice(it, size)), ())
5,330,069
def p_transitions(p): """ transition_list : transition """ p[0] = [p[1]]
5,330,070
def load_ct_phantom(phantom_dir): """ load the CT data from a directory Parameters ---------- phantom_dir : str The directory contianing the CT data to load Returns ------- ndarray the CT data array list the spacing property for this CT """ # dicom parameters dcm = dicom.read_file(phantom_dir + "image_0") row_pixel = dcm.Rows col_pixel = dcm.Columns row_pixel_spacing = np.round(dcm.PixelSpacing[0], 2) col_pixel_spacing = np.round(dcm.PixelSpacing[1], 2) slice_thickness = np.round(dcm.SliceThickness, 2) num_slices = len(os.listdir(phantom_dir)) phantom = np.zeros((num_slices, row_pixel, col_pixel)) for i in range(num_slices): dcm = dicom.read_file(phantom_dir + "image_" + str(i)) dcm.image = dcm.pixel_array * dcm.RescaleSlope + dcm.RescaleIntercept phantom[i, :, :] = dcm.image.copy() # Volume Parameters: volume_size = [slice_thickness, row_pixel_spacing, col_pixel_spacing] return phantom, volume_size
5,330,071
def summary_overall(queryset: QuerySet) -> Dict[str, Decimal]: """Summarizes how much money was spent""" amount_sum = sum([value[0] for value in queryset.values_list('amount')]) return {'overall': amount_sum}
5,330,072
def test_min_middleware_with_exclude(monkeypatch, test_case: dict[str, str], settings) -> None: """Very simple basic minify test.""" settings.HMIN_EXCLUDE = ["hello/", "unrelated/", "strange/trash/happens/"] importlib.reload(middleware) _run_inner_middleware_test(test_case)
5,330,073
def test_calculate_smoothness_cost(): """ The Laplacian of a constant field is zero """ u = 10*np.ones((10, 10, 10)) v = 10*np.ones((10, 10, 10)) w = 0*np.ones((10, 10, 10)) dx = 100.0 dy = 100.0 dz = 100.0 z = np.arange(0, 1000.0, 100) cost = pydda.cost_functions.calculate_smoothness_cost( u, v, w) cost_grad = pydda.cost_functions.calculate_smoothness_gradient( u, v, w) assert cost == 0 assert np.all(cost_grad == 0) """ Now, put in a discontinuity """ u[:, :, 5] = -10 cost = pydda.cost_functions.calculate_smoothness_cost( u, v, w) assert cost > 0
5,330,074
def _get_trained_ann(train_exo: "ndarray", train_meth: "ndarray") -> "QdaisANN2010": """Return trained ANN.""" train_data = BiogasData(train_exo, train_meth) ann = QdaisANN2010(train_data.train_exo.shape[1]) try: ann.load_state_dict(torch.load("./assets/ann.pt")) except IOError: optimizer = optim.Adam(ann.parameters()) scheduler = ReduceLROnPlateau(optimizer) criterion = nn.MSELoss() train_loader = DataLoader(train_data, batch_size=64, shuffle=True) for _ in range(1024): # number of epochs running_loss = 0 for _, (inputs, labels) in enumerate(train_loader): optimizer.zero_grad() loss = criterion(ann(inputs), labels) loss.backward() optimizer.step() running_loss += loss.item() scheduler.step(running_loss) print(f"Running loss: {running_loss / len(train_data)}", end="\r", flush=True) torch.save(ann.state_dict(), "./assets/ann.pt") return ann
5,330,075
def pi_float(): """native float""" lasts, t, s, n, na, d, da = 0, 3.0, 3, 1, 0, 0, 24 while s != lasts: lasts = s n, na = n+na, na+8 d, da = d+da, da+32 t = (t * n) / d s += t return s
5,330,076
def test_atoms_material_cell(uo2, water): """ Test if correct number of atoms is returned. Also check if Cell.atoms still works after volume/material was changed """ c = openmc.Cell(fill=uo2) c.volume = 2.0 expected_nucs = ['U235', 'O16'] # Precalculate the expected number of atoms M = (atomic_mass('U235') + 2 * atomic_mass('O16')) / 3 expected_atoms = [ 1/3 * uo2.density/M * AVOGADRO * 2.0, # U235 2/3 * uo2.density/M * AVOGADRO * 2.0 # O16 ] tuples = c.atoms.items() for nuc, atom_num, t in zip(expected_nucs, expected_atoms, tuples): assert nuc == t[0] assert atom_num == pytest.approx(t[1]) # Change volume and check if OK c.volume = 3.0 expected_atoms = [ 1/3 * uo2.density/M * AVOGADRO * 3.0, # U235 2/3 * uo2.density/M * AVOGADRO * 3.0 # O16 ] tuples = c.atoms.items() for nuc, atom_num, t in zip(expected_nucs, expected_atoms, tuples): assert nuc == t[0] assert atom_num == pytest.approx(t[1]) # Change material and check if OK c.fill = water expected_nucs = ['H1', 'O16'] M = (2 * atomic_mass('H1') + atomic_mass('O16')) / 3 expected_atoms = [ 2/3 * water.density/M * AVOGADRO * 3.0, # H1 1/3 * water.density/M * AVOGADRO * 3.0 # O16 ] tuples = c.atoms.items() for nuc, atom_num, t in zip(expected_nucs, expected_atoms, tuples): assert nuc == t[0] assert atom_num == pytest.approx(t[1])
5,330,077
def work_permit_notify_grd_supervisor(reminder_indicator): # at 9am checks grd supervisor approval on online application """ Notify GRD Supervisor to remind accepting WP on ASHAL application """ # Get work permit list if reminder_indicator == 'yellow': filters = {'docstatus': 1, 'grd_operator_apply_work_permit_on_ashal':['=','Yes'], 'grd_supervisor_check_and_approval_wp_online':['=','No']} work_permit_list = frappe.db.get_list('Work Permit', filters, ['name', 'grd_supervisor']) email_notification_to_grd_user('grd_supervisor', work_permit_list, reminder_indicator, 'Check and Accept')
5,330,078
def iso3_to_country(iso3): """ Take user input and convert it to the short version of the country name """ if iso3 == 'Global': return 'Global' country = coco.convert(names=iso3, to='name_short') return country
5,330,079
def test_bivariate(N, n_neighbors, rng, noise): """Test with bivariate normal variables""" mu = np.zeros(2) cov = np.array([[1., 0.8], [0.8, 1.0]]) xy_gauss = rng.multivariate_normal(mu, cov, size=N) x, y = xy_gauss[:, 0], xy_gauss[:, 1] z = rng.normal(size=N) cmi_analytic = -0.5 * np.log(det(cov)) cmi = compute_cmi(x, y, z, n_neighbors, noise) mi = compute_mi(x, y, n_neighbors, noise) return [cmi, mi, cmi_analytic]
5,330,080
def osm_tile_number_to_latlon(xtile, ytile, zoom): """ Returns the latitude and longitude of the north west corner of a tile, based on the tile numbers and the zoom level""" n = 2.0 ** zoom lon_deg = xtile / n * 360.0 - 180.0 lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n))) lat_deg = math.degrees(lat_rad) return (lat_deg, lon_deg)
5,330,081
def to_timedelta(arg: numpy.ndarray, unit: Literal["h"]): """ usage.xarray: 3 """ ...
5,330,082
def prodtab_111(): """ produces angular distance from [1,1,1] plane with other planes up to (321) type """ listangles_111 = [anglebetween([1, 1, 1], elemref) for elemref in LISTNEIGHBORS_111] return np.array(listangles_111)
5,330,083
def cuda_cos(a): """ Trigonometric cosine of GPUArray elements. Parameters: a (gpu): GPUArray with elements to be operated on. Returns: gpu: cos(GPUArray) Examples: >>> a = cuda_cos(cuda_give([0, pi / 4])) array([ 1., 0.70710678]) >>> type(a) <class 'pycuda.gpuarray.GPUArray'> """ return pycuda.cumath.cos(a)
5,330,084
def top_diffs(spect: list, num_acids: int) -> list: """Finds at least num_acids top differences in [57, 200] Accepts ties :param spect: a cyclic spectrum to find differences in :type spect: list (of ints) :type keep: int :returns: the trimmed leaderboard :rtype: list (of lists (of ints)) """ # must be sorted & start with 0 spect.sort() if spect[0] != 0: spect.insert(0, 0) diffs = [spect[i] - spect[j] for i in range(1, len(spect)) for j in range(i - 1, -1, -1)] acids = [] last_count = 0 for mass, count in Counter(diffs).most_common(): # leave if over min AND not tying min if len(acids) >= num_acids and count < last_count: break # restricted weight for amino acid masses if 57 <= mass <= 200: acids.append(mass) last_count = count return acids
5,330,085
def form_sized_range(range_: Range, substs) -> typing.Tuple[ SizedRange, typing.Optional[Symbol] ]: """Form a sized range from the original raw range. The when a symbol exists in the ranges, it will be returned as the second result, or the second result will be none. """ if not range_.bounded: raise ValueError( 'Invalid range for optimization', range_, 'expecting explicit bound' ) lower, upper = [ i.xreplace(substs) for i in [range_.lower, range_.upper] ] size_expr = upper - lower size, symb = form_size(size_expr) return SizedRange(range_.label, size), symb
5,330,086
def create_html_app(): # pragma: no cover """Returns WSGI app that serves HTML pages.""" app = webapp2.WSGIApplication( handlers.get_frontend_routes(), debug=utils.is_local_dev_server()) gae_ts_mon.initialize(app, cron_module='backend') return app
5,330,087
def line_integrals(vs, uloc, vloc, kind="same"): """ calculate line integrals along all islands Arguments: kind: "same" calculates only line integral contributions of an island with itself, while "full" calculates all possible pairings between all islands. """ if kind == "same": s1 = s2 = (slice(None), slice(None), slice(None)) elif kind == "full": s1 = (slice(None), slice(None), np.newaxis, slice(None)) s2 = (slice(None), slice(None), slice(None), np.newaxis) else: raise ValueError("kind must be 'same' or 'full'") east = vloc[1:-2, 1:-2, :] * vs.dyu[np.newaxis, 1:-2, np.newaxis] \ + uloc[1:-2, 2:-1, :] \ * vs.dxu[1:-2, np.newaxis, np.newaxis] \ * vs.cost[np.newaxis, 2:-1, np.newaxis] west = -vloc[2:-1, 1:-2, :] * vs.dyu[np.newaxis, 1:-2, np.newaxis] \ - uloc[1:-2, 1:-2, :] \ * vs.dxu[1:-2, np.newaxis, np.newaxis] \ * vs.cost[np.newaxis, 1:-2, np.newaxis] north = vloc[1:-2, 1:-2, :] * vs.dyu[np.newaxis, 1:-2, np.newaxis] \ - uloc[1:-2, 1:-2, :] \ * vs.dxu[1:-2, np.newaxis, np.newaxis] \ * vs.cost[np.newaxis, 1:-2, np.newaxis] south = -vloc[2:-1, 1:-2, :] * vs.dyu[np.newaxis, 1:-2, np.newaxis] \ + uloc[1:-2, 2:-1, :] \ * vs.dxu[1:-2, np.newaxis, np.newaxis] \ * vs.cost[np.newaxis, 2:-1, np.newaxis] east = np.sum(east[s1] * (vs.line_dir_east_mask[1:-2, 1:-2] & vs.boundary_mask[1:-2, 1:-2])[s2], axis=(0, 1)) west = np.sum(west[s1] * (vs.line_dir_west_mask[1:-2, 1:-2] & vs.boundary_mask[1:-2, 1:-2])[s2], axis=(0, 1)) north = np.sum(north[s1] * (vs.line_dir_north_mask[1:-2, 1:-2] & vs.boundary_mask[1:-2, 1:-2])[s2], axis=(0, 1)) south = np.sum(south[s1] * (vs.line_dir_south_mask[1:-2, 1:-2] & vs.boundary_mask[1:-2, 1:-2])[s2], axis=(0, 1)) return east + west + north + south
5,330,088
def crop_black_borders(image, threshold=0): """Crops any edges below or equal to threshold Crops blank image to 1x1. Returns cropped image. """ if len(image.shape) == 3: flatImage = np.max(image, 2) else: flatImage = image assert len(flatImage.shape) == 2 rows = np.where(np.max(flatImage, 0) > threshold)[0] if rows.size: cols = np.where(np.max(flatImage, 1) > threshold)[0] image = image[cols[0]: cols[-1] + 1, rows[0]: rows[-1] + 1] else: image = image[:1, :1] return image
5,330,089
def update(): """ After start() is run, this function is run every frame until the back button is pressed """ # Follow the wall to the right of the car without hitting anything. scan = rc.lidar.get_samples() left_angle, left_dist = rc_utils.get_lidar_closest_point(scan, LEFT_WINDOW) right_angle, right_dist = rc_utils.get_lidar_closest_point(scan, RIGHT_WINDOW) rc.display.show_lidar(scan, 128, 1000, [(left_angle, left_dist), (right_angle, right_dist)]) error = right_dist - left_dist maxError = 12 kP = 0.5 angle = rc_utils.clamp(kP * error / maxError, -1, 1) speed = DRIVE_SPEED # speed = rc_utils.clamp(math.cos(0.5 * math.pi * angle) * DRIVE_SPEED + MIN_SPEED, -1, 1) # smoothened version of -abs(angle) + 1 # https://www.desmos.com/calculator/24qctllaj1 print("Error: " + str(error)) rc.drive.set_speed_angle(speed, angle)
5,330,090
def change_password(): """Allows user to change password""" # Forget any user_id session.clear() # User reached route via POST (as by submitting a form via POST) if request.method == "POST": # If password and confirmation don't match, accuse error if request.form.get("new_password") != request.form.get("new_confirmation"): flash("The New Password and the Confirmation don't match. Try again.") return render_template("change.html") else: # Query database for username rows = db.execute( "SELECT * FROM users WHERE username = ?", request.form.get("username") ) # Ensure username exists and password is correct if len(rows) != 1 or not check_password_hash( rows[0]["hash"], request.form.get("old_password") ): flash("Invalid username and/or password.") return render_template("change.html") else: # Hashes new password before storying it into the database pass_hash = generate_password_hash( request.form.get("new_password"), method="pbkdf2:sha256", salt_length=8, ) # Store usersname and password into database db.execute( "UPDATE users SET hash = ? WHERE username = ?", pass_hash, request.form.get("username"), ) # Display a flash message that the password was changed flash("Password changed!") return render_template("change.html") # Request method = GET else: return render_template("change.html")
5,330,091
def add_placeholders(components): """Add placeholders for missing DATA/INSTANCE components""" headers = [s[:2] for s in components] for prefix in ("CD", "CR"): if prefix not in headers: components.append(prefix + ("C" * 11)) return components
5,330,092
def clipsToHieroClipItems(clips): """ @itemUsage hiero.items.HieroClipItem """ from ..items import HieroClipItem clipItems = [] if clips: for c in clips: i = HieroClipItem(c) clipItems.append(i) return clipItems
5,330,093
def md5(s, raw_output=False): """Calculates the md5 hash of a given string""" res = hashlib.md5(s.encode()) if raw_output: return res.digest() return res.hexdigest()
5,330,094
def _write_file(path, contents, mode='w'): """Write the string to the specified path. Returns nothing if the write fails, instead of raising an IOError. """ try: with open(path, mode) as f: f.write(contents) except IOError: pass
5,330,095
def new_Q(T, ms, Ps, G): """TODO DOC STRING""" print("==> Tuning Q") SIGMA = np.zeros_like(Ps[0], dtype=np.complex128) PHI = np.zeros_like(Ps[0], dtype=np.complex128) C = np.zeros_like(Ps[0], dtype=np.complex128) shape = (Ps[0].shape[0], 1) for k in range(1, T + 1): m1 = gains_vector(ms[k]).reshape(shape) m2 = gains_vector(ms[k - 1]).reshape(shape) SIGMA += Ps[k] + m1 @ m1.conj().T PHI += Ps[k - 1] + m2 @ m2.conj().T C += Ps[k] @ G[k - 1].conj().T + m1 @ m2.conj().T SIGMA *= 1/T PHI *= 1/T C *= 1/T # Diagonal real-values Q_diag = np.diag(SIGMA - C - C.conj().T + PHI).real # Return new Q return np.diag(Q_diag).real
5,330,096
def _get_pk_message_increase(cache_dict: dict, project_list: list) -> str: """根据项目列表构建增量模式下PK播报的信息. ### Args: ``cache_dict``: 增量计算的基础.\n ``project_list``: PK的项目列表.\n ### Result: ``message``: PK进展的播报信息.\n """ amount_dict = _get_pk_amount(project_list) increase_dict = dict() for idol in amount_dict.keys(): increase_dict[idol] = amount_dict[idol] - cache_dict[idol] increase_dict[idol] = round(increase_dict[idol], 2) return _build_increase_pk_message(amount_dict, increase_dict)
5,330,097
def test_update_all_fields(mock_get_company_updates): """ Test update_companies_from_dnb_service command with no options calls through to get_company_updates celery task successfully. """ datetime = '2019-01-01T00:00:00' call_command( 'update_companies_from_dnb_service', datetime, ) mock_get_company_updates.apply.assert_called_with(kwargs={ 'last_updated_after': datetime, 'fields_to_update': None, })
5,330,098
def flatten(dic, keep_iter=False, position=None): """ Returns a flattened dictionary from a dictionary of nested dictionaries and lists. `keep_iter` will treat iterables as valid values, while also flattening them. """ child = {} if not dic: return {} for k, v in get_iter(dic): if isstr(k): k = k.replace('.', '_') if position: item_position = '%s.%s' % (position, k) else: item_position = '%s' % k if is_iter(v): child.update(flatten(dic[k], keep_iter, item_position)) if keep_iter: child[item_position] = v else: child[item_position] = v return child
5,330,099