content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import os def eval_postprocess(positive_path, negative_type): """ get accuracy """ files = os.listdir(positive_path) log = [] for f in files: score_file = os.path.join(config.result_path, f.split('.')[0] + '_0.bin') positive_file = os.path.join(positive_path, f) argsort = np.fromfile(score_file, np.int32) if negative_type == 'head': positive_arg = np.fromfile(positive_file, np.int32)[0] # 0 is head else: positive_arg = np.fromfile(positive_file, np.int32)[2] # 2 is tail ranking = np.where(argsort == positive_arg)[0][0] ranking = 1 + ranking log.append({ 'MRR': 1.0 / ranking, 'MR': ranking, 'HITS@1': 1.0 if ranking <= 1 else 0.0, 'HITS@3': 1.0 if ranking <= 3 else 0.0, 'HITS@10': 1.0 if ranking <= 10 else 0.0, }) return log
a5bc76ff06ffd791f39e687c4eaaf055889ea75f
3,627,000
def compute_q10_correction(q10, T1, T2): """Compute the Q10 temperature coefficient. As explained in [1]_, the time course of voltage clamp recordings are strongly affected by temperature: the rates of activation and inactivation increase with increasing temperature. The :math:`Q_{10}` temperature coefficient, a measure of the increase in rate for a 10 :math:`^{\circ}C` temperature change, is a correction factor used in HH-style models to quantify this temperature dependence. In HH-style models, the adjustment due to temperature can be achieved by decreasing the time constants by a factor :math:`Q_{10}^{(T_2 - T_1)/10}`, where the temperatures :math:`T_1 < T_2`. The temperature unit must be either the Celsius or the Kelvin. Note that :math:`T_1` and :math:`T_2` must have the same unit, and do not need to be exactly 10 degrees apart. Parameters ---------- q10 : :obj:`float` The :math:`Q_{10}` temperature coefficient. T1 : :obj:`float` Temperature at which the first rate is recorded. T2 : :obj:`float` Temperature at which the second rate is recorded. Returns ------- correction : :obj:`float` Correction factor due to temperature. References ---------- .. [1] D. Sterratt, B. Graham, A. Gillies, D. Willshaw, "Principles of Computational Modelling in Neuroscience", Cambridge University Press, 2011. """ # that the test below allows T1 = T2 is intentional; the function should # accomendate for no correction, i.e. a correction factor equal to 1. if T1 > T2: msg = ("T2 must be greater than or equal to T1") raise ValueError(msg) return q10**((T2 - T1) / 10)
eed7d7f38c1f9d98b1a6a89a28eb4f1a6656b6c7
3,627,001
import collections def get_frameworks_table(): """ Generates a dataframe containing the supported auto classes for each model type, using the content of the auto modules. """ # Dictionary model names to config. config_maping_names = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES model_prefix_to_model_type = { config.replace("Config", ""): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. pt_models = collections.defaultdict(bool) tf_models = collections.defaultdict(bool) flax_models = collections.defaultdict(bool) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(transformers_module): lookup_dict = None if _re_tf_models.match(attr_name) is not None: lookup_dict = tf_models attr_name = _re_tf_models.match(attr_name).groups()[0] elif _re_flax_models.match(attr_name) is not None: lookup_dict = flax_models attr_name = _re_flax_models.match(attr_name).groups()[0] elif _re_pt_models.match(attr_name) is not None: lookup_dict = pt_models attr_name = _re_pt_models.match(attr_name).groups()[0] if lookup_dict is not None: while len(attr_name) > 0: if attr_name in model_prefix_to_model_type: lookup_dict[model_prefix_to_model_type[attr_name]] = True break # Try again after removing the last word in the name attr_name = "".join(camel_case_split(attr_name)[:-1]) all_models = set(list(pt_models.keys()) + list(tf_models.keys()) + list(flax_models.keys())) all_models = list(all_models) all_models.sort() data = {"model_type": all_models} data["pytorch"] = [pt_models[t] for t in all_models] data["tensorflow"] = [tf_models[t] for t in all_models] data["flax"] = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure processors = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: processors[t] = "AutoProcessor" elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: processors[t] = "AutoTokenizer" elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: processors[t] = "AutoFeatureExtractor" else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. processors[t] = "AutoTokenizer" data["processor"] = [processors[t] for t in all_models] return pd.DataFrame(data)
189b521e81a2e3622534f34e258f4d3fbe665084
3,627,002
import torch def extract_video_features(model, get_frame_fn, video_names, video_frame_counts, update_progress_cb=None): """ :param model: :param get_frame_fn: :param video_names: :param video_frame_counts: :param update_progress_cb: :return: VxF np.float32 array """ clip_features = [] for video_name, num_frames in zip(video_names, video_frame_counts): cur_video_frames = [] for t in range(num_frames): cur_video_frame = get_frame_fn(video_name, t) cur_video_frames.append(_convert_frame_vfid(cur_video_frame)) # Pass current window through model model_input = torch.stack(cur_video_frames, dim=2).cuda() pred = model.extract_features(model_input, target_endpoints='Logits') # Obtain global spatially-pooled features as single vector features = pred[0].squeeze() # 1024 clip_features.append(features.cpu().numpy()) # Update work count if update_progress_cb is not None: update_progress_cb() clip_features = np.stack(clip_features) return clip_features
48fe01307ef56f48dcd3acbfc350717d75bdb21a
3,627,003
def osarch_is_amd64(): """Check if the architecture maps to amd64.""" return osarch_match("amd64")
ec34b586247c4206093f0a89c3fe875f3a6774c5
3,627,004
from skimage import exposure def increase_contrast(im, method = 'adaptive'): """ Adative method seems to deal with background gradients the best, but two other options are possible using scikit package """ ## SEE: https://scikit-image.org/docs/dev/auto_examples/color_exposure/plot_equalize.html if method == 'adaptive': img_hicontrast = exposure.equalize_adapthist(im, clip_limit=0.03) if method == 'contrast_stretching': p2, p98 = np.percentile(im, (2, 98)) img_hicontrast = exposure.rescale_intensity(im, in_range=(p2, p98)) if method == 'histogram_equalization': img_hicontrast = skimage.exposure.equalize_hist(im) return img_hicontrast
966bfeb6487d6a7add8c4e1bfbfd0c31d8683fb1
3,627,005
def without(array, *values): """Creates an array with all occurrences of the passed values removed. Args: array (list): List to filter. values (mixed): Values to remove. Returns: list: Filtered list. Example: >>> without([1, 2, 3, 2, 4, 4], 2, 4) [1, 3] .. versionadded:: 1.0.0 """ return [item for item in array if item not in values]
21bddf5244a591a261f704557fb8017a2401ef77
3,627,006
def get_total_mnsp_ramp_rate_violation(model): """Get total MNSP ramp rate violation""" ramp_up = sum(v.value for v in model.V_CV_MNSP_RAMP_UP.values()) ramp_down = sum(v.value for v in model.V_CV_MNSP_RAMP_DOWN.values()) return ramp_up + ramp_down
9e326a70966edce51f82036977fcec1b26991c21
3,627,007
def get_row_line(p1,p2): """ Compute x,y values and alpha angle for the line between start (p1) and ending (p2) points """ l,alpha = line_polar(p1,p2) x,y = line_polar_to_cart(l,alpha,p1) return (np.round(x).astype("int"),np.round(y).astype("int"),alpha)
3c253c8e768204af53bb26a50afbce952229e57a
3,627,008
import re def parse_traceroute(raw_result): """ Parse the 'traceroute' command raw output. :param str raw_result: traceroute raw result string. :rtype: dict :return: The parsed result of the traceroute command in a \ dictionary of the form: :: {1: {'time_stamp2': '0.189', 'time_stamp3': '0.141', 'time_stamp1': '0.217', 'hop_num': 1, 'int_hop': '50.1.1.4' }, 2: {'time_stamp2': '0.144', 'time_stamp3': '0.222', 'time_stamp1': '0.216', 'hop_num': 2, 'int_hop': '40.1.1.3' }, 'probe': 3, 'min_ttl': 1, 'dest_addr': '10.1.1.10', 'max_ttl': 30, 'time_out': 3 } """ traceroute_re1 = ( r'(.*\s+(?P<dst_unreachable>!H)\s*?.*)' ) traceroute_re2 = ( r'(\s*(?P<hop_number>\d+)\s+(?P<hop_timeout>(\*\s+)+))' ) traceroute_re3 = ( r'.*\s*(?P<network_unreachable>(Network is unreachable))\s*' ) traceroute_re4 = ( r'\s*traceroute to\s+(?P<dest_addr>(\d+.\d+.\d+.\d+))\s+' ) traceroute_re5 = ( r'.*\s+(?P<min_ttl>\d+)\s+hops min,' r'.*\s+(?P<max_ttl>\d+)\s+hops max,' r'.*\s+(?P<time_out>\d+)\s+sec. timeout,' r'.*\s+(?P<probe>\d+)\s+probes' ) traceroute_re6 = ( r'(\s*(?P<hop_num>\d+)\s+(?P<int_hop>(\d+.\d+.\d+.\d+))\s+' r'(?P<time_stamp1>(\d+.\d+))ms\s+' r'((?P<time_stamp2>(\d+.\d+))ms\s+)?' r'((?P<time_stamp3>(\d+.\d+))ms\s+)?' r'((?P<time_stamp4>(\d+.\d+))ms\s+)?' r'((?P<time_stamp5>(\d+.\d+))ms\s*)?.*)' ) result = {} re_result1 = re.search(traceroute_re1, raw_result) if re_result1: for key, value in re_result1.groupdict().items(): if value is None: result[key] = 'No match found' elif value.isdigit(): result[key] = int(value) else: result[key] = value return result re_result2 = re.search(traceroute_re2, raw_result) if re_result2: for key, value in re_result2.groupdict().items(): if value is None: result[key] = 'No match found' elif value.isdigit(): result[key] = int(value) else: result[key] = value return result re_result3 = re.search(traceroute_re3, raw_result) if re_result3: for key, value in re_result3.groupdict().items(): if value is None: result[key] = 'No match found' elif value.isdigit(): result[key] = int(value) else: result[key] = value return result raw_result_lines = raw_result.splitlines() length = len(raw_result_lines) re_result4 = re.search(traceroute_re4, raw_result) if re_result4: for key, value in re_result4.groupdict().items(): if value is None: result[key] = "No match found" elif value.isdigit(): result[key] = int(value) else: result[key] = value re_result5 = re.search(traceroute_re5, raw_result) if re_result5: for key, value in re_result5.groupdict().items(): if value is None: result[key] = "No match found" elif value.isdigit(): result[key] = int(value) else: result[key] = value for hop_num in range(1, length): result[hop_num] = {} re_result6 = re.search(traceroute_re6, raw_result_lines[hop_num]) if re_result6: for key, value in re_result6.groupdict().items(): if value is None: result[hop_num][key] = "No match found" elif value.isdigit(): result[hop_num][key] = int(value) else: result[hop_num][key] = value return result
2a12d72a4e2e9a64287c65525b7eca6997849f97
3,627,009
def get_scheds(aircraft_list, flights): """ Generates a list of Schedule objects with the appropriate attributes Parameters ---------- aircraft_list : list, aircrafts in plan flights : list, list of classes.Flight objects Returns ------- schedules : list, list of classes.Schedule objects """ schedules = {} for k in aircraft_list: schedule = Schedule() schedule.label = (0, k, 's_0_%s' % k) schedule.flights = [f for f in flights if f.aircraft.tail == k] schedule.cost = 10000 schedule.aircraft = k schedules[schedule.label] = schedule return schedules
13d0bc2326190261fd4b4a93603f2c533d326174
3,627,010
def _transform_get_item_to_module(module: Module, debug: bool) -> GraphModule: """Transforms the built-in getitem function to ReduceTuple module. This function is usually used to reduce the tuple output of RNNs. Args: module: container module to transform debug: whether to print debug messages Returns: equivalent transformed module """ target = "<built-in function getitem>" if debug: print(f"\tBegin transformation: {target} -> ReduceTuple") graph: Graph = BackpackTracer().trace(module) nodes = [ n for n in graph.nodes if n.op == "call_function" and target == str(n.target) ] for node in nodes: _change_node_to_module( node, "reduce_tuple", module, ReduceTuple(index=node.args[1]), (node.args[0],), ) graph.lint() if debug: print(f"\tReduceTuple transformed: {len(nodes)}") return GraphModule(module, graph)
c3b1d95db04c891a7c83d1ddb71940a0b2158b75
3,627,011
from typing import Optional from typing import List import argparse from pathlib import Path def build_parser(argv: Optional[List] = None): """Return ArgumentParser parser for script.""" # Create parser object parser = argparse.ArgumentParser( prog="cw_query_database", description="Interrogate a local CAZyme database", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument( "database", type=Path, help="Path to local CAZyme database" ) parser.add_argument( "file_types", action="store", nargs="+", choices=["csv", "json"], help="File types to write the query output in [csv,json]" ) parser.add_argument( "--cache_dir", type=Path, default=None, help="Target path for cache dir to be used instead of default path", ) # Add option to use own CAZy class synoymn dict parser.add_argument( "--cazy_synonyms", type=Path, default=None, help="Path to JSON file containing CAZy class synoymn names", ) # Add option to define complete classes to scrape parser.add_argument( "--classes", type=str, default=None, help="CAZy classes to retrieve UniProt data for. Separate classes by ','" ) # Add option to specify path to configuration file parser.add_argument( "-c", "--config", type=Path, metavar="config file", default=None, help="Path to configuration file. Default: None, scrapes entire database", ) parser.add_argument( "--ec_filter", type=str, default=None, help="Limit retrieval to proteins annotated with the provided EC numbers. Separate EC numbers with single commas" ) # Add option to specify families to scrape parser.add_argument( "--families", type=str, default=None, help="CAZy families to UniProt data for. Separate families by commas 'GH1,GH2' (case sensitive)" ) parser.add_argument( "-f", "--force", dest="force", action="store_true", default=False, help="Force writing to existing output dir", ) # Add option to restrict scrape to specific genera parser.add_argument( "--genera", type=str, default=None, help="Genera to UniProt data for" ) parser.add_argument( "--include", action="store", nargs="+", choices=["class", "family", "subfamily", "organism", "uniprot_acc", "uniprot_name", "ec", "pdb", "genbank_seq", "uniprot_seq"], help="Additional data to include in the output file. Separate with a single space (' ')" ) parser.add_argument( "--kingdoms", type=str, default=None, help="Tax Kingdoms to UniProt data for" ) # Add log file name option # If not given, no log file will be written out parser.add_argument( "-l", "--log", type=Path, metavar="log file name", default=None, help="Defines log file name and/or path", ) parser.add_argument( "-n", "--nodelete", dest="nodelete", action="store_true", default=False, help="When called, content in the existing output dir is NOT deleted", ) # Add option to not delete content in the existing cache dir parser.add_argument( "--nodelete_cache", dest="nodelete_cache", action="store_true", default=False, help="When called, content in the existing cache dir is NOT deleted", ) parser.add_argument( "-o", "--output_dir", type=Path, default=None, help="Path to output dir, default: None (writes to cwd)", ) parser.add_argument( "--overwrite", dest="overwrite", action="store_true", default=False, help="When called, overwrites existing output files", ) parser.add_argument( "-p", "--prefix", type=str, default=None, help="Str to prefix all output files with, default: None", ) # Add option to force file over writting parser.add_argument( "--sql_echo", dest="sql_echo", action="store_true", default=False, help="Set SQLite engine echo to True (SQLite will print its log messages)", ) # Add option to UniProt data for specific species. This will scrape CAZymes from # all strains belonging to each listed species parser.add_argument( "--species", type=str, default=None, help="Species (written as Genus Species) to UniProt data for" ) # Add option to restrict scraping to specific strains of organisms parser.add_argument( "--strains", type=str, default=None, help=( "Specific strains of organisms to UniProt data for " "(written as Genus Species Strain)" ), ) # Add option for more detail (verbose) logging parser.add_argument( "-v", "--verbose", dest="verbose", action="store_true", default=False, help="Set logger level to 'INFO'", ) if argv is None: # parse command-line return parser else: # return namespace return parser.parse_args(argv)
c426c5a58b1d9794c6f819c39d21aca2a73c77ba
3,627,012
def symptom_LDAP_user_enabled_emulation_use_group_config_ignored(): """`[ldap] user_enabled_emulation_use_group_config` is being ignored. There is no reason to set this value unless `keystone.conf [ldap] user_enabled_emulation` is also enabled. """ return ( not CONF.ldap.user_enabled_emulation and CONF.ldap.user_enabled_emulation_use_group_config)
f16071879f5b43a7a076207e7ccedef5c281709a
3,627,013
def add_user(): """Add a new user, you must have permissions""" form = UserForm() resp_message = 'Invalid payload.' if not form.validate(): return BadRequest(resp_message) username = form.username.data email = form.email.data password = form.password.data user = User.query.filter_by(email=email).first() if not user: with db.auto_commit(resp_message): user = User(username, email, password) db.session.add(user) response_object = { 'status': 'success', 'message': f'{email} was added' } return jsonify(response_object), 201 else: return BadRequest('Sorry, the email is already existing.')
1b9a3c154c05e4aca9eb9a7f7f120174c35b99a9
3,627,014
import _sha256 def _fetch_remote(remote, dirname=None): """Helper function to download a remote dataset into path Fetch a dataset pointed by remote's url, save into path using remote's filename and ensure its integrity based on the SHA256 Checksum of the downloaded file. Parameters ---------- remote : RemoteFileMetadata Named tuple containing remote dataset meta information: url, filename and checksum dirname : string Directory to save the file to. Returns ------- file_path: string Full path of the created file. """ file_path = (remote.filename if dirname is None else join(dirname, remote.filename)) urlretrieve(remote.url, file_path) checksum = _sha256(file_path) if remote.checksum != checksum: raise IOError("{} has an SHA256 checksum ({}) " "differing from expected ({}), " "file may be corrupted.".format(file_path, checksum, remote.checksum)) return file_path
ffa19832ac69a20891d084d0610c2c264d696a27
3,627,015
import torch def evaluate(config: ConfigLoader, model, validationloader): """ evaluate model using the data given by validationloader Args: config (ConfigLoader): config of the experiment model (nn.Module): model to be evaluated validationloader (torch.utils.data.DataLoader): dataloader containing the data to be used for evaluation Returns: (dict, float): dict with actual and predicted labels, actual labels are accessible with the key 'actual' and the predicted labels with 'predicted'; float containing the loss of last evaluated minibatch """ model = model.eval() predicted_labels = np.empty(0, dtype='int') actual_labels = np.empty(0, dtype='int') loss = torch.zeros(1) with torch.no_grad(): for data in validationloader: features, labels = data features = features.to(config.DEVICE) labels = labels.long().to(config.DEVICE) outputs = model(features) criterion = nn.NLLLoss() loss = criterion(outputs, labels) _, predicted_labels_i = torch.max(outputs, dim=1) predicted_labels = np.r_[predicted_labels, predicted_labels_i.tolist()] actual_labels = np.r_[actual_labels, labels.tolist()] return {'actual': actual_labels, 'predicted': predicted_labels}, loss.item()
7e977a140174db610249689b2257255cd9fc651c
3,627,016
from typing import Union from typing import Literal def curve( data: dict, *, ax: Union[plt.Axes, None] = None, curve_layout: Literal["overlay", "stacked", "shifetd"] = "overlay", shade: bool = True, kde_norm: bool = True, order: Union[list, None] = None, kernel_kws: Union[dict, None] = None, style: Union[Literal["default"], None] = "default", style_kws: Union[dict, None] = None, fig_kws: Union[dict, None] = None, **kwargs, ) -> plt.Axes: """Basic plotting function primarily for the spectratype to show length distributions Parameters ---------- data Weighted counts for KDE. ax Custom axis if needed. curve_layout if the KDE-based curves should be stacked or shifted vetrically. kde_norm KDE curves are by default normalized to a sum of 1. Set to False in order to keep normalized cell weights. kernel_kws Parameters that should be passed to `KernelDensity` function of sklearn. order Specifies the order of groups. shade If True, draw a shade between curves {common_doc} Returns ------- List of axes. """ if ax is None: ax = _init_ax(fig_kws) xmax = 0 for k, v in data.items(): mx = np.amax(v) if mx > xmax: xmax = mx x = np.arange(0, xmax, 0.1) fy, _i = 0, 0 yticks = [] outline = curve_layout != "stacked" if order is None: order = list(data.keys()) if kernel_kws is None: kernel_kws = dict() if "kernel" not in kernel_kws: kernel_kws["kernel"] = "gaussian" if "bandwidth" not in kernel_kws: kernel_kws["bandwidth"] = 0.6 # Draw a curve for every series for i in range(len(order)): label = order[i] col = data[label] sx = col.sum() X = col.reshape(-1, 1) kde = KernelDensity(**kernel_kws).fit(X) y = np.exp(kde.score_samples(x.reshape(-1, 1))) if not kde_norm: y *= sx if curve_layout == "shifted": y = y + _i fy = _i + 0 _i = y.max() yticks.append(fy) else: if curve_layout == "stacked": if i < 1: _y = np.zeros(len(y)) fy = _y[:] _y = _y + y y = fy + y if shade: if outline: ax.plot(x, y, label=label) ax.fill_between(x, y, fy, alpha=0.6) else: ax.fill_between(x, y, fy, alpha=0.6, label=label) else: ax.plot(x, y, label=label) if style_kws is None: style_kws = dict() style_kws["change_xticks"] = False if kde_norm: style_kws["ylab"] = "Probability" if curve_layout == "shifted": style_kws["add_legend"] = False style_kws["ylab"] = "" ax.set_yticklabels(order) ax.set_yticks(yticks) ax.spines["left"].set_visible(False) ax.spines["right"].set_visible(False) ax.get_yaxis().set_tick_params(length=0) style_axes(ax, style, style_kws) return ax
fa42743b997064882bacf9c90ed6734b211e8f0f
3,627,017
def load_kaldi_hmms(fctx): """Load HMMs from text output of context to pdf binary""" hmms ={} hmm = [] for line in open(fctx): lx = line.strip().split() ctx = (lx[0], lx[1], lx[2]) n = int(lx[3]) pdf = int(lx[4]) # we do not need disambig phones if "#" in ctx[0] or "#" in ctx[1] or "#" in ctx[2]: continue # we do not need <eps> if "<eps>" in ctx[0] or "<eps>" in ctx[1] or "<eps>" in ctx[2]: continue if n == 0: # first state, start new HMM if len(hmm) > 0: if not tuple(hmm) in hmms: hmms[tuple(hmm)] = [ctx_last] else: hmms[tuple(hmm)] += [ctx_last] hmm = [pdf] else: hmm.append(pdf) ctx_last = ctx # and the last one :) if not tuple(hmm) in hmms: hmms[tuple(hmm)] = [ctx_last] else: hmms[tuple(hmm)] += [ctx_last] return hmms
7703ac442377e541b6d33aed4a33ca5a858f960e
3,627,018
def mask (raster, threshold, symName): """Highlight those raster pixels that reach/exceed the given threshold.""" return layer(Local("U8", "Greater Than Equal", Resample(raster,5), threshold), symName)
d10b29e4da8caf1189276dc90e7d6d765b40f5cb
3,627,019
def get_vector_w2v(sent:Series, model:gensim.models.keyedvectors.Word2VecKeyedVectors): """ Create a word vector for a given sentence using a Word2Vec model. This function is called in a lambda expression in `core.get_vectors`. Returns list """ tokens = [token for token in sent.phrase if token in model.wv.vocab] return model[tokens].sum(axis=0)
3e50be76496f350109274ed205f27e1450dd4f9e
3,627,020
def inc(initial_value): """Return arithmetic increase of 1 to initial_value""" print "In inc() subroutine now" return initial_value + 1
d0ffcd97f4a2fa6c62526c8ee89c73f42a50aa46
3,627,021
def search(): """Search page ui.""" return render_template(current_app.config["SEARCH_UI_SEARCH_TEMPLATE"])
c5bfda79f0695beb7b5299a5d7d61eb20945a095
3,627,022
def vendor_id(request): """ Return the paddle.com vendor ID as a context variable """ return {"DJPADDLE_VENDOR_ID": settings.DJPADDLE_VENDOR_ID}
5420c2c049e32f16e8dccd85ea9fdb46090c1704
3,627,023
import importlib import inspect import sys def factory(cls, modules=None, **kwargs): """ Factory for creating objects. Arguments are passed directly to the constructor of the chosen class. """ # Format modules into a list if modules is None: modules = [__name__] elif isinstance(modules,basestring): modules = [modules] # Import the requested modules for module in modules: importlib.import_module(module) # Define a preticate for selecting class members def fn(member): return inspect.isclass(member) and member.__module__ in modules # Fill a dictionary of classes classes = odict() for module in modules: classes.update(inspect.getmembers(sys.modules[module], fn)) # Lowercase class names members = odict([(k.lower(),v) for k,v in classes.items()]) # Select class (case-insensitive) lower = cls.lower() if lower not in members.keys(): msg = "Unrecognized class: %s"%(cls) raise KeyError(msg) # Access the requested class and build the object return members[lower](**kwargs)
71261043c10057d8e79cb052c9cd783ca76a6dfc
3,627,024
def filter_samplesheet_by_project(file_path, proj_id, project_column_label='SampleProject', output_ini_headers=False): """ Windows \r\n :param file_path: :type file_path: :param proj_id: :type proj_id: :param project_column_label: :type project_column_label: :return: :rtype: """ # FCID,Lane,SampleID,SampleRef,Index,Description,Control,Recipe, # Operator,SampleProject # # last line: #_IEMVERSION_3_TruSeq LT,,,,,,,,, outlines = [] with open(file_path, "rU") as f: header_content = [] header = f.readline().strip() header_content.append(header + '\r\n') # skip any INI-style headers down to the CSV sample list in the [Data] if '[Header]' in header: while '[Data]' not in header: header = f.readline().strip() header_content.append(header + '\r\n') header = f.readline().strip() header_content.append(header + '\r\n') if output_ini_headers: outlines.extend(header_content[:-1]) s = header.split(',') # old samplesheet formats have no underscores in column labels, # newer (IEMv4) ones do. by removing underscores here, we can find # 'SampleProject' and 'Sample_Project', whichever exists s_no_underscores = [c.lower().replace('_', '') for c in s] try: project_column_index = s_no_underscores.index(project_column_label.lower()) except ValueError: project_column_index = None outlines.append(header + '\r\n') one_project_only = True if project_column_index is not None: # find the set of unique project ids in the sample sheet proj_ids_in_sheet = set() for l in f: if l.strip() == '': continue s = l.strip().split(',') proj_ids_in_sheet.add(s[project_column_index].strip()) # We want to output all lines if there is only a single project. # The on-instrument FASTQ generation for MiSeq seems to not produce # Project directories, even if there is a Sample_Project specified in # the samplesheet. So the 'proj_id' here might be empty, but we still # want all the SampleSheet.csv lines of the one and only project in the # SampleSheet in this case. one_project_only = (len(proj_ids_in_sheet) == 1) for l in f: s = l.strip().split(',') if (one_project_only or s[project_column_index].strip() == proj_id.strip() or l[0] == '#'): outlines.append(l.strip() + '\r\n') return outlines
d2e1fff7c9514654643b9c3949ef1850faf5ba94
3,627,025
import math def ecliptic_obliquity_radians(time): """Returns ecliptic obliquity radians at time.""" return math.radians(23.439 - 0.0000004 * time)
384199a506d29cb14b2a42facf2d6c46bf44f111
3,627,026
def hessian_Y(D , Gamma, eigQ, W, sigma_t): """ this is the linear operator for the CG method argument is D Gamma and W are constructed beforehand in order to evaluate more efficiently """ tmp1 = eval_jacobian_phiplus(D, Gamma, eigQ) tmp2 = eval_jacobian_prox_p( D , W) res = - sigma_t * (tmp1 + tmp2) return res
27f1e2a0ef69a9cf4b38dc8396921247c09e93fb
3,627,027
def spec_pix_to_world(pixel, wcs, axisnumber, unit=None): """ Given a WCS, an axis ID, and a pixel ID, return the WCS spectral value at a pixel location .. TODO:: refactor to use wcs.sub """ coords = list(wcs.wcs.crpix) coords[axisnumber] = pixel+1 coords = list(np.broadcast(*coords)) if unit is None: return wcs.wcs_pix2world(coords,1)[:,axisnumber] else: return wcs.wcs_pix2world(coords,1)[:,axisnumber]*unit
1da3cc761f16ead462b114253af484c8af0b1279
3,627,028
def share_replica_get(context, replica_id, with_share_data=False, with_share_server=False, session=None): """Returns summary of requested replica if available.""" session = session or get_session() result = _share_replica_get_with_filters( context, with_share_server=with_share_server, replica_id=replica_id, session=session).first() if result is None: raise exception.ShareReplicaNotFound(replica_id=replica_id) if with_share_data: result = _set_replica_share_data(context, result, session)[0] return result
319285b3c38274ff928dd37e13286890408fdb06
3,627,029
async def playing_song(this, ctx: Context): """재생중인 컨텐츠 정보 보기 :param this: self :param ctx: discord.ext.commands.Context """ vc = ctx.voice_client if not vc or not vc.is_connected(): return await ctx.send(embed=embed_ERROR, delete_after=20) player = this.get_player(ctx) if not player.current: return await ctx.send(embed=embed_ERROR) try: await player.np.delete() except discord.HTTPException: pass ex = await this.now_playing_embed(vc=vc) player.np = await ctx.send(embed=ex)
543b9f39c112099fdc5f6e11d01e952648924471
3,627,030
import time def check_and_record_restart_request(service, changed_files): """Check if restarts are permitted, if they are not log the request. :param service: Service to be restarted :type service: str :param changed_files: Files that have changed to trigger restarts. :type changed_files: List[str] :returns: Whether restarts are permitted :rtype: bool """ changed_files = sorted(list(set(changed_files))) permitted = is_restart_permitted() if not permitted: save_event(ServiceEvent( timestamp=round(time.time()), service=service, reason='File(s) changed: {}'.format( ', '.join(changed_files)), action='restart')) return permitted
08b779823732d9798f5875fdc5615eed4e0d2ed6
3,627,031
from typing import Union import requests def get_user_wantlist(user: Union[UserWithoutAuthentication, UserWithUserTokenBasedAuthentication], username: str, page: Union[int, None] = None, per_page: Union[int, None] = None, ) -> requests.models.Response: """ Get a list of releases from the wantlist for the given user. Note: If the wantlist has been made private by its owner, you must be authenticated as the owner to view it. No user Authentication needed. Parameters: user: user object (required) username: string (required) -> The username of the wantlist you are trying to fetch. page: number (optional) -> The page you want to request. per_page: number (optional) -> The number of items per page. """ url = f"{USERS_URL}/{username}/wants" headers = user.headers params = user.params if page: params["page"] = max(1, page) if per_page: params["per_page"] = max(1, per_page) return requests.get(url, headers=headers, params=params)
060338f7a6cf88799c4913130be5bcce6961acb1
3,627,032
def gaussian_additive_noise(x, sigma): """Gaussian additive noise. Parameters ---------- x : array input data matrix. sigma : float noise standard deviation. Noise values are sampled from N(0, sigma) for each input feature. Returns ------- x_noise : array output data matrix. """ noise = np.random.normal(0.0, sigma, size=x.shape) return x + noise
50f67c6b5695dbf3f6a997bbf9cfc6f9b9464e4d
3,627,033
import global_vars as GV def LCOH_fn(well_cost = None, Ed = None, mdot = None, dP_inj = None): """ Calculates the LCOH based on the well cost as capital cost and the pumping cost for operating cost, and the thermal energy recovered. Results match simplified_LCOH_fn, but simplified_LCOH_fn is preferred because it matches the notation in Birdsell et al. paper. well_cost is the cost of one well [$] Ed is the annual heat recovered [kWh] mdot is flow rate [kg/s] dP_inj is the injection pressure returns: LCOH [$/kWh] cap_cost of project [$] pump_cost is total project pumping cost, to present day $ benefit_USD - is the total project amount of revenue, adjusted to present-day dollars, if the heat has a value of dollars_per_kWhth (usually $0.10/kWhth) """ capital_cost = 4.0*well_cost ### The work into pumping is 2*mdot*dP_total/rho_w. ### The 2 is because of loading and unloading. dP_total= 2*mdot joules_per_second_pumping = (2.0*mdot*dP_inj/GV.rho_w) ## This is the J/s during pumping total_pumping_time = 2.0*GV.t_inj #seconds of pumping joules = joules_per_second_pumping*total_pumping_time annual_pumping_cost = joules*GV.joule_to_kWh*GV.dollars_per_kWhth cap_cost = 0 #Calculates the value with discount rate. initialize 0 pump_cost = 0 #Calculates total pumping cost with discount rate. initialize to 0 benefit = 0 # initialize (kWhth) benefit_USD = 0 # initizlize (USD) ### Loop through the lifetime for t in range(0,GV.lifetime+1): if t == 0: cap_cost = capital_cost/(1.+GV.r)**t + cap_cost else: pump_cost = annual_pumping_cost/(1.+GV.r)**t + pump_cost benefit = Ed/(1.+GV.r)**t + benefit benefit_USD = (Ed/(1.+GV.r)**t)*GV.dollars_per_kWhth + benefit_USD LCOH = (cap_cost+pump_cost)/benefit #$/kWhth return [LCOH, cap_cost, pump_cost, benefit_USD]
f44d8b1d2c6100db3baa4d4e0b9885052c27a87f
3,627,034
def confirm_uid(request): """confirm with code sent to uid""" params = request.get_params(schemas.AddUIDCodeSchema()) device = get_device(request) customer = device.customer wc_params = { 'secret': params['secret'], 'code': params['code'], 'attempt_id': params['attempt_id'], 'g-recaptcha-response': params['recaptcha_response'] } if params.get('replace_uid'): wc_params['replace_uid'] = params['replace_uid'] access_token = get_wc_token(request, customer) response = wc_contact( request, 'POST', 'wallet/add-uid-confirm', params=wc_params, access_token=access_token, return_errors=True) if response.status_code != 200: if 'invalid' in response.json(): raise Invalid(None, msg=response.json()['invalid']) else: # Recaptcha required, or attempt expired error = response.json().get('error') if error == 'captcha_required': return recovery_error(request, 'recaptcha_required') elif response.status_code == 410: return recovery_error(request, 'code_expired') else: return recovery_error(request, 'unexpected_wc_response') return {}
cd076ff03aa2bcbaecb15f5bf96e5a0c7bf374a7
3,627,035
def get_coverage(file, label, regions=None, nth=1, readcount=-1): """Get coverage for every `nth` position from alignment file.""" readcount = float(readcount) contigs_coverage = defaultdict(dd) with pysam.AlignmentFile(file) as f: if isinstance(regions, str): regions = [regions] for region in regions: chrom = region if ':' in region and '-' in region: chrom = region.rsplit(':', 1)[0] start_stop = region.rsplit(':', 1)[1] start, stop = start_stop.split('-') start = int(start.strip()) stop = int(stop.strip()) else: start = 0 stop = f.header.get_reference_length(chrom) contigs_coverage[chrom][label][start] = 0 contigs_coverage[chrom][label][stop] = 0 for pileup_pos in f.pileup(contig=chrom, start=start, stop=stop, max_depth=20000): pos = pileup_pos.pos if pileup_pos.pos % nth == 0: before = pos - nth after = pos + nth if before not in contigs_coverage[chrom][label]: contigs_coverage[chrom][label][before] = 0 contigs_coverage[chrom][label][after] = 0 contigs_coverage[chrom][label][pos] = pileup_pos.nsegments / (readcount / 10**6) if readcount else 0 return contigs_coverage
b972133f98208f0c916f6221b75b7755d3b48ab8
3,627,036
def equations_to_matrix() -> list: """ :return: augmented matrix formed from user input (user inputs = linear equations) :rtype: list """ n = int(input("input number of rows ")) m = int(input("input number of columns ")) A = [] for row_space in range(n): print("input row ", row_space + 1) row = input().split() if len(row) == m: row_map = list(map(int, row)) # use map function convert string to integer A.append(row_map) else: print("length must be the column size of A") equations_to_matrix() print(A) return A
702c252fed2d7127e4e5f9e5433ca4c29867138c
3,627,037
from datetime import datetime async def base_control(timestamp: datetime.datetime, base_id: int, new_faction_id: int, old_faction_id: int, server_id: int, continent_id: int, conn: Connection[Row]) -> bool: """Dispatch a ``BaseControl`` Blip to the database.""" try: await conn.execute( SQL_BASE_CONTROL, (timestamp, server_id, continent_id, base_id, old_faction_id, new_faction_id)) except ForeignKeyViolation as err: log.debug('Ignored FK violation: %s', err) return False return True
a9737f23f64baca2a8e1fa608815da54292b2321
3,627,038
def get_ports(): """List all serial port with connected devices. Returns ------- port_list : list(str) Names of all the serial ports with connected devices. """ port_list = list(list_ports.comports()) for i in range(len(port_list)): port_list[i] = port_list[i].device return port_list
61ebb00920f466ecc6e0fd48114a3c31b431d8db
3,627,039
def same_origin(origin1, origin2): """ Return True if these two origins have at least one common ASN. """ if isinstance(origin1, int): if isinstance(origin2, int): return origin1 == origin2 return origin1 in origin2 if isinstance(origin2, int): return origin2 in origin1 return len(origin1.intersection(origin2)) > 0
1fbc55d9dcfb928c173128a5b386cc6375ec0cde
3,627,040
import random def generate_output(model, sequences, idx_word, seed_length=50, new_words=50, diversity=1, return_output=False, n_gen=1): """Generate `new_words` words of output from a trained model and format into HTML.""" # Choose a random sequence seq = random.choice(sequences) # Choose a random starting point seed_idx = random.randint(0, len(seq) - seed_length - 10) # Ending index for seed end_idx = seed_idx + seed_length gen_list = [] for n in range(n_gen): # Extract the seed sequence seed = seq[seed_idx:end_idx] original_sequence = [idx_word[i] for i in seed] generated = seed[:] + ['#'] # Find the actual entire sequence actual = generated[:] + seq[end_idx:end_idx + new_words] # Keep adding new words for i in range(new_words): # Make a prediction from the seed preds = model.predict(np.array(seed).reshape(1, -1))[0].astype( np.float64) # Diversify preds = np.log(preds) / diversity exp_preds = np.exp(preds) # Softmax preds = exp_preds / sum(exp_preds) # Choose the next word probas = np.random.multinomial(1, preds, 1)[0] next_idx = np.argmax(probas) # New seed adds on old word # seed = seed[1:] + [next_idx] seed += [next_idx] generated.append(next_idx) # Showing generated and actual abstract n = [] for i in generated: n.append(idx_word.get(i, '< --- >')) gen_list.append(n) a = [] for i in actual: a.append(idx_word.get(i, '< --- >')) a = a[seed_length:] gen_list = [gen[seed_length:seed_length + len(a)] for gen in gen_list] if return_output: return original_sequence, gen_list, a # HTML formatting seed_html = '' seed_html = addContent(seed_html, header( 'Seed Sequence', color='darkblue')) seed_html = addContent(seed_html, box(remove_spaces(' '.join(original_sequence)))) gen_html = '' gen_html = addContent(gen_html, header('RNN Generated', color='darkred')) gen_html = addContent(gen_html, box(remove_spaces(' '.join(gen_list[0])))) a_html = '' a_html = addContent(a_html, header('Actual', color='darkgreen')) a_html = addContent(a_html, box(remove_spaces(' '.join(a)))) return seed_html, gen_html, a_html
8072723e9744e8322de87ebb3437d6e96517e6db
3,627,041
def stagewise_grad(w, X, Y, alpha, valpha): """ the gradient loss used in the stage-wise gradient descent (where the value-at-risk is fixed) Parameters ---------- w : d-array, candidate X : d-n array, sample Y : n array, sample alpha : float, quantile level valpha : float, value-at-risk Returns ------- d-array, gradient loss at w """ d = len(X) g = [(1 / alpha) * 2 * X[i] * (np.dot(w, X) - Y) for i in range(d)] return np.where(loss_function(w, X, Y) >= valpha, g, 0)
af2b734c150d65f3ee8d7e6815a4065e31ad754c
3,627,042
def parse_location(url): """ Parse latitude and longitude from a Google Maps URL. URL is in the form: https://maps.google.com/maps/ms?...&ll=9.029795,-83.299043&... Sometimes there is a weird ll query param like this: https://maps.google.com/maps/ms?...&ll=9.029795, -83.299043,255&... """ params = parse_qs(urlparse(url).query, keep_blank_values=True) if 'll' in params: return tuple(float(c) for c in params.get('ll')[0].split(','))[0:2] return (0.0, 0.0)
6964f031d58d4bcedbced4d6bb90db8e75e1b006
3,627,043
def conv2d_zeros(name, x, width, filter_size=[3, 3], stride=[1, 1], pad="SAME", logscale_factor=3, skip=1, edge_bias=True): """Conv2dZeros is just a normal Conv2d layer with zero initialization described in paper 3.3 section: We initialize the last convolution of each NN() with zeros, such that each affine coupling layer initially performs an identity function; we found that this helps training very deep networks. """ with tf.variable_scope(name): if edge_bias and pad == "SAME": x = add_edge_padding(x, filter_size) pad = 'VALID' n_in = int(x.get_shape()[3]) stride_shape = [1] + stride + [1] filter_shape = filter_size + [n_in, width] w = tf.get_variable("W", filter_shape, tf.float32, initializer=tf.zeros_initializer()) if skip == 1: x = tf.nn.conv2d(x, w, stride_shape, pad, data_format='NHWC') else: assert stride[0] == 1 and stride[1] == 1 x = tf.nn.atrous_conv2d(x, w, skip, pad) x += tf.get_variable("b", [1, 1, 1, width], initializer=tf.zeros_initializer()) x *= tf.exp( tf.get_variable("logs", [1, width], initializer=tf.zeros_initializer()) * logscale_factor) return x
77804460ca179eb45bd89b7876f4ac2842f93c87
3,627,044
def deepcopy(value): """ The default copy.deepcopy seems to copy all objects and some are not `copy-able`. We only need to make sure the provided data is a copy per key, object does not need to be copied. """ if not isinstance(value, (dict, list, tuple)): return value if isinstance(value, dict): copy = {} for k, v in value.items(): copy[k] = deepcopy(v) if isinstance(value, tuple): copy = list(range(len(value))) for k in get_keys(list(value)): copy[k] = deepcopy(value[k]) copy = tuple(copy) if isinstance(value, list): copy = list(range(len(value))) for k in get_keys(value): copy[k] = deepcopy(value[k]) return copy
e74c22cb8980ce70085f3d58873698c5a03c4681
3,627,045
import re def split_delimited_symbol(symbol): """ Takes in a symbol that may be delimited and splits it in to a company symbol and share class symbol. Also returns the fuzzy symbol, which is the symbol without any fuzzy characters at all. Parameters ---------- symbol : str The possibly-delimited symbol to be split Returns ------- company_symbol : str The company part of the symbol. share_class_symbol : str The share class part of a symbol. """ # return blank strings for any bad fuzzy symbols, like NaN or None if symbol in _delimited_symbol_default_triggers: return '', '' symbol = symbol.upper() split_list = re.split( pattern=_delimited_symbol_delimiters_regex, string=symbol, maxsplit=1, ) # Break the list up in to its two components, the company symbol and the # share class symbol company_symbol = split_list[0] if len(split_list) > 1: share_class_symbol = split_list[1] else: share_class_symbol = '' return company_symbol, share_class_symbol
770247e4b6a61794aedb73deab3fd85329b5a7c1
3,627,046
import urllib import hashlib def gravatar(email, size=48): """hacked from djangosnippets.org, but basically given an email address render an img tag with the hashed up bits needed for leetness omgwtfstillreading """ url = "http://www.gravatar.com/avatar.php?%s" % urllib.urlencode({ 'gravatar_id': hashlib.md5(email).hexdigest(), 'size': str(size) }) return ('<img src="%s" width="%s" height="%s" alt="gravatar" ' 'class="gravatar" border="0" />' % (url, size, size))
8133e9857311e2163c7fde400db7d47aedc4da86
3,627,047
def carli( p0: np.array, p1: np.array, ) -> float: """ Carli bilateral index, using price information. .. math:: \\text{Carli} = \\frac{\\sum_{i=1}^{n} p_i}{\\sum_{i=1}^{n} p_0} :param p0: Base price vector. :param p1: Current price vector. """ return np.mean(p1 / p0)
6361747561919d0c0451c87e387994f82708cd70
3,627,048
def format_outcome_results(outcome_results): """ Cleans up formatting of outcome_results DataFrame :param outcome_results: outcome_results DataFrame :return: Reformatted outcomes DataFrame """ new_col_names = {"links.learning_outcome": "outcome_id"} outcome_results = outcome_results.rename(columns=new_col_names) outcome_results["outcome_id"] = outcome_results["outcome_id"].astype("int") outcome_results = outcome_results.sort_values( ["links.user", "outcome_id", "submitted_or_assessed_at"] ) return outcome_results
9c22481725f2782d614b48582edfcd60db284c13
3,627,049
from torch import from_numpy, autograd def infer(batch, model, lite, framework): """ Perform inference on supplied image batch. Args: batch: ndarray Stack of preprocessed images model: deep learning model Initialized EfficientPose model to utilize (RT, I, II, III, IV, RT_Lite, I_Lite or II_Lite) lite: boolean Defines if EfficientPose Lite model is used framework: string Deep learning framework to use (Keras, TensorFlow, TensorFlow Lite or PyTorch) Returns: EfficientPose model outputs for the supplied batch. """ # Keras if framework in ['keras', 'k']: if lite: batch_outputs = model.predict(batch) else: batch_outputs = model.predict(batch)[-1] # TensorFlow elif framework in ['tensorflow', 'tf']: # tf.profiler.experimental.start('logdir') output_tensor = model.graph.get_tensor_by_name('upscaled_confs/BiasAdd:0') if lite: batch_outputs = model.run(output_tensor, {'input_1_0:0': batch}) else: batch_outputs = model.run(output_tensor, {'input_res1:0': batch}) # tf.profiler.experimental.stop() # TensorFlow Lite elif framework in ['tensorflowlite', 'tflite']: input_details = model.get_input_details() output_details = model.get_output_details() model.set_tensor(input_details[0]['index'], batch) model.invoke() batch_outputs = model.get_tensor(output_details[-1]['index']) # PyTorch elif framework in ['pytorch', 'torch']: batch = np.rollaxis(batch, 3, 1) batch = from_numpy(batch) batch = autograd.Variable(batch, requires_grad=False).float() batch_outputs = model(batch) batch_outputs = batch_outputs.detach().numpy() batch_outputs = np.rollaxis(batch_outputs, 1, 4) return batch_outputs
671e33563a2b890b089c475600c3f74c8245fb18
3,627,050
import os def get_package_data(package): """ Return all files under the root package, that are not in a package themselves. """ walk = [(dirpath.replace(package + os.sep, "", 1), filenames) for dirpath, dirnames, filenames in os.walk(package) if not os.path.exists(os.path.join(dirpath, "__init__.py"))] filepaths = [] for base, filenames in walk: filepaths.extend([os.path.join(base, filename) for filename in filenames if not filename.endswith('pyc')]) return {package: filepaths}
a3e52a03681fdd65c46798b79a7f1427a16221d3
3,627,051
import sys import requests def delete_saved_search(search_name, owner): """Deletes an existing saved search. This is used when overwriting a saved search.""" try: voyager_server = sys.argv[2].split('=')[1].split('solr')[0][:-1] get_url = "{0}/api/rest/display/ssearch/export".format(voyager_server) get_response = requests.get(get_url, verify=verify_ssl, headers={'Content-type': 'application/json', 'x-access-token': task_utils.get_security_token(owner)}) if get_response.status_code == 200: delete_url = '' saved_searches = get_response.json()['searches'] for ss in saved_searches: if ss['title'] == search_name: search_id = ss['id'] delete_url = "{0}/api/rest/display/ssearch/{1}".format(voyager_server, search_id) break if delete_url: res = requests.delete(delete_url, verify=verify_ssl, headers={'Content-type': 'application/json', 'x-access-token': task_utils.get_security_token(owner)}) if not res.status_code == 200: if hasattr(res, 'content'): return False, eval(res.content)['error'] else: return False, 'Error creating saved search: {0}: {1}'.format(search_name, res.reason) else: return True, '' else: return True, '' else: return False, eval(get_response.content)['message'] except requests.HTTPError as http_error: return False, http_error except requests.exceptions.InvalidURL as url_error: return False, url_error except requests.RequestException as re: return False, re
f923b2acd95f07ae4d96b46119cf8ead283be1f6
3,627,052
def get_edges(t, p): """ Gets the edges (segments) that contain point p as their right endpoint or in the interior """ lr = [] lc = [] for s in AVLTree(t): if s.rp == p: lr.append(s) elif s.lp == p and s.status == INTERIOR: lc.append(s) elif sideplr(p, s.lp, s.rp) == 0: lc.append(s) return lr, lc
6252f4ca41836f6f7203d115d37cc4da6d220b0e
3,627,053
def register_derived_unit(symbol, singular_name, base_unit, multiple=1, plural_name=None): """Registers a unit based on another unit. i.e. it should be a measure of the same quantity.""" return register_unit(symbol, singular_name, base_unit.quantities, base_unit.quantity_vector, multiple=base_unit.multiple*multiple, plural_name=plural_name)
d069ab157c5c85729a1a9a2ea8b77419d483b601
3,627,054
from typing import Optional import requests def soup( url: str, *args, session: Optional[requests.Session] = None, **kwargs ) -> bs4.BeautifulSoup: """Get a url as a BeautifulSoup. Args: url: The url to get a soup from. *args: Passed to session.get(). session: The session to use to download the soup. Defaults to None. **kwargs: Passed to session.get(). """ if session is None: session = requests.Session() response = session.get(url, *args, **kwargs) return bs4.BeautifulSoup(response.text, BS4_PARSER)
25e37b9ecf59a5aa9ef4ef5d6ee50c3b33f123ec
3,627,055
from win32api import GetSystemMetrics def _get_max_width(): """Hamta information om total skarmbredd och -hojd """ #Hamta information om total skarmbredd over alla anslutna skarmar width = GetSystemMetrics(78) #Hamta information om total skarmhojd over alla anslutna skarmar height = GetSystemMetrics(79) return width, height
e2382eab98faecd7d8cf9ba2689897d2512c39db
3,627,056
def preprocess_stack_parallel(hier_graph_dict:dict,circuit_name,G): """ Preprocess the input graph by reducing parallel caps, series resistance, identify stacking, adding parallel transistors. Parameters ---------- hier_graph_dict : dict dictionary of all circuit in spice file circuit_name : str name of circuit to be processed. G : networkx graph graph of circuit. Returns ------- None. """ logger.debug(f"no of nodes: {len(G)}") add_parallel_caps(G) add_series_res(G) add_stacked_transistor(G) add_parallel_transistor(G) initial_size=len(G) delta =1 while delta > 0: logger.debug(f"CHECKING stacked transistors {circuit_name} {G}") add_stacked_transistor(G) add_parallel_transistor(G) delta = initial_size - len(G) initial_size = len(G) #remove single instance subcircuits attributes = [attr for node, attr in G.nodes(data=True) if 'net' not in attr["inst_type"]] if len(attributes)==1: #Check any existing hier if 'sub_graph' in attributes[0].keys() and attributes[0]['sub_graph'] is not None: logger.debug(f"sub_graph nodes {attributes[0]['sub_graph'].nodes()}") stacked_ckt = preprocess_stack_parallel(hier_graph_dict,attributes[0]["real_inst_type"],attributes[0]["sub_graph"]) if stacked_ckt ==None: return None for ckt in hier_graph_dict.values(): for node,attr in ckt["graph"].nodes(data=True): if 'net' not in attr["inst_type"] and attr["inst_type"]==circuit_name: logger.debug(f"updating instance {node} {attr} with stacked device {attributes}") attr["inst_type"]=attributes[0]["inst_type"] attr["real_inst_type"]=attributes[0]["real_inst_type"] attr["values"]={**attributes[0]["values"],**attr["values"]} attr["sub_graph"] =None attr["ports"] =[attr["connection"][port] for port in attributes[0]["ports"] if port in attr["connection"]] attr["edge_weight"] = attributes[0]["edge_weight"] attr["connection"]= None return circuit_name else: return None
dd16d10fd321c01f27a83963165041519edbe235
3,627,057
def list_table_names(): """List known table names from configuration, without namespace.""" return get_config().yaml['schemas'].keys()
27dcf56818a120ebfffc633c36ade84b4fc905cf
3,627,058
def execute_quantum_request(backend, quantum_request): """ Takes a quantum request and execute the content, returns the messages for Alice and Bob. """ method = quantum_request["method"] A_basis = quantum_request["A_basis"] B_basis = quantum_request["B_basis"] if method == "BB84": A_bitstr = quantum_request["A_bitstr"] measB = BB84(backend, A_bitstr, A_basis, B_basis) measA = [] return measA, measB if method == "E91": measA, measB = E91(backend, A_basis, B_basis) return measA, measB raise
1e80a8dcfcd2908fe1916c76a8340c2127056e57
3,627,059
def get_ResidualDemand(demand_sector): """loader function for parameter ResidualDemand """ return get_demand_sector_parameter("ResidualDemand", demand_sector)
d467be1ed8be1ff5a048558cfad53422aaa03399
3,627,060
import six def parse_bar_separated_phrase(bar_separated_phrase): """ Parses a vertical bar separated phrase into a Phrase object. The expect format is that used by e.g. the LCC: Aber|KON es|PPER gibt|VVFIN keine|PIAT Garantie|NN :param bar_separated_phrase: :return: """ assert isinstance(bar_separated_phrase, six.string_types) space_tokenized = bar_separated_phrase.split(" ") bar_tokenized = list(map(lambda x: x.rsplit("|", 1), space_tokenized)) return word_pos_tuples_to_phrase(bar_tokenized)
2773e51f1667318a8a7b86e7771cc81803bbeaaa
3,627,061
def preprocess(filename): """ Preprocesses the file at the specified name and returns only the branching (conditional) lines in the dump, defined to have (in the format specified for the offline dump versions): 1) Reads the flags register (that is, conditionRegister == 'R'), and 2) Is either taken or not taken (that is, TNnotBranch != '-'). """ cleaned = [] with open(filename, "r") as f: for line in f: instruction_dump = line.split() if instruction_dump[s.FLAGS] == 'R' and \ instruction_dump[s.BRANCH] != '-': cleaned.append(instruction_dump) return np.array(cleaned)
a3ccab311151ed45d8c6a4ff776fc410381098a8
3,627,062
import os def load_paste_app(filename, appname): """Builds a wsgi app from a paste config, None if app not configured.""" filename = os.path.abspath(filename) app = None try: app = deploy.loadapp("config:%s" % filename, name=appname) except LookupError: pass return app
374b933923367756e091a723789cc666f251fec3
3,627,063
def Hunter_Lab_to_XYZ( Lab: ArrayLike, XYZ_n: ArrayLike = TVS_ILLUMINANTS_HUNTERLAB[ "CIE 1931 2 Degree Standard Observer" ]["D65"].XYZ_n, K_ab: ArrayLike = TVS_ILLUMINANTS_HUNTERLAB[ "CIE 1931 2 Degree Standard Observer" ]["D65"].K_ab, ) -> NDArray: """ Converts from *Hunter L,a,b* colour scale to *CIE XYZ* tristimulus values. Parameters ---------- Lab *Hunter L,a,b* colour scale array. XYZ_n Reference *illuminant* tristimulus values. K_ab Reference *illuminant* chromaticity coefficients, if ``K_ab`` is set to *None* it will be computed using :func:`colour.XYZ_to_K_ab_HunterLab1966`. Returns ------- :class:`numpy.ndarray` *CIE XYZ* tristimulus values. Notes ----- +------------+-----------------------+-----------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +============+=======================+=================+ | ``Lab`` | ``L`` : [0, 100] | ``L`` : [0, 1] | | | | | | | ``a`` : [-100, 100] | ``a`` : [-1, 1] | | | | | | | ``b`` : [-100, 100] | ``b`` : [-1, 1] | +------------+-----------------------+-----------------+ | ``XYZ_n`` | [0, 100] | [0, 1] | +------------+-----------------------+-----------------+ +------------+-----------------------+-----------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +============+=======================+=================+ | ``XYZ`` | [0, 100] | [0, 1] | +------------+-----------------------+-----------------+ References ---------- :cite:`HunterLab2008b` Examples -------- >>> Lab = np.array([34.92452577, 47.06189858, 14.38615107]) >>> D65 = TVS_ILLUMINANTS_HUNTERLAB[ ... 'CIE 1931 2 Degree Standard Observer']['D65'] >>> Hunter_Lab_to_XYZ(Lab, D65.XYZ_n, D65.K_ab) array([ 20.654008, 12.197225, 5.136952]) """ L, a, b = tsplit(to_domain_100(Lab)) X_n, Y_n, Z_n = tsplit(to_domain_100(XYZ_n)) K_a, K_b = ( tsplit(XYZ_to_K_ab_HunterLab1966(XYZ_n)) if K_ab is None else tsplit(K_ab) ) L_100 = L / 100 L_100_2 = L_100 ** 2 Y = L_100_2 * Y_n X = ((a / K_a) * L_100 + L_100_2) * X_n Z = -((b / K_b) * L_100 - L_100_2) * Z_n XYZ = tstack([X, Y, Z]) return from_range_100(XYZ)
e9270f674a63b728c15b3865134c01f0d6197dfd
3,627,064
def post_multipart(host, selector, fields, files): """ Post fields and files to an http host as multipart/form-data. fields is a sequence of (name, value) elements for regular form fields. files is a sequence of (name, filename, value) elements for data to be uploaded as files Return the server's response page. """ (content_type, body) = encode_multipart_formdata(fields, files) #print "<<<%s>>>\n\n" % body #sys.exit() errcode = 0 try: h = httplib.HTTP(host) h.putrequest('POST', selector) h.putheader('Host', host) h.putheader('Content-Type', content_type) h.putheader('Content-Length', str(len(body))) h.putheader('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8') h.putheader('Origin', 'https://translate.google.com') h.putheader('User-Agent', 'Mozilla/5.0 (X11; Linux i686 (x86_64)) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.143 Safari/537.36') h.endheaders() h.send(body) errcode, errmsg, headers = h.getreply() return h.file.read() except: log( "Level upload failed" ) errcode = 0 return "#fail"
d8a50bc086f673798c33b191248f059f44f07aea
3,627,065
def client(): """Static link to MongoDB connection.""" return AsyncIOMotorClient(URI)
3a48977481d3cba66f98b459d559bd78974748e5
3,627,066
def mask_ring_median(values_array, positions_array, alpha): # pragma: no cover """Find outlier pixels in a single ring via a single pass with the median. Parameters ---------- values_array : ndarray The ring values positions_array : ndarray The positions of the values alpha: float The threshold Returns ------- removals: np.ndarray The positions of pixels to be removed from the data """ z = np.abs(values_array - np.median(values_array)) / np.std(values_array) removals = positions_array[np.where(z > alpha)] return removals
7223e985ccf9e6f395f1ae92326d3743190296b2
3,627,067
from core.models import Snapshot from pathlib import Path from typing import List def load_main_index(out_dir: Path=OUTPUT_DIR, warn: bool=True) -> List[Link]: """parse and load existing index with any new links from import_path merged in""" try: return Snapshot.objects.all() except (KeyboardInterrupt, SystemExit): raise SystemExit(0)
b02ab8349d44553c7780255867edceaa89a68c5d
3,627,068
import subprocess def upstream(env=env): """Get 'upstream' URL for the git repository.""" remotes = remotes('fetch') # Try the remote tracking value for this branch try: upstream = subprocess.check_output( ['git', 'rev-parse', '--symbolic-full-name', '@{u}'], env=env, ).decode('utf-8').strip() if upstream and '/' in upstream: remote, remote_branch = upstream.split('/') assert remote in remotes, (remote, remotes) return remotes[remote] except subprocess.CalledProcessError: pass # If there is only one remote, use that if len(remotes) == 1: return remotes.popitem()[-1] # Otherwise try using 'origin' if 'origin' in remotes: return remotes['origin'] # Or 'upstream' if 'upstream' in remotes: return remotes['upstream'] return None
9c680f2a06b7d38be7b9ee800f537e74bd0bc8e8
3,627,069
def ref_model_and_multivariate_training_data(training_data_covar_complex): """ defines a multivariate GP model and the data it is defined by :return: covars :return: train_X, train_Y (training data, from custom_models_simple_training_data_4elements above) :return: model_obj (model object, SingleTaskGP) :return: lh, ll (model likelihood and marginal log-likelihood) """ covars = training_data_covar_complex[0] train_X = training_data_covar_complex[1] train_Y = training_data_covar_complex[2] # set up the model model_obj = SingleTaskGP(train_X, train_Y) # the likelihood lh = model_obj.likelihood # define the "loss" function ll = ExactMarginalLogLikelihood(lh, model_obj) return covars, train_X, train_Y, model_obj, lh, ll
b56e9f211bcc1d526f4526172f81ee321518880f
3,627,070
def get_all_movies(request): """ List of all movies names """ try: name = request.GET.get('name', None) offset = request.GET.get('offset', 0) limit = request.GET.get('limit', 10) movies_list = [] count = 0 if not name: movies_list = Movie.objects.filter().order_by('-date_created')[offset:limit] count = Movie.objects.count() if name: movies_list = Movie.objects.filter(Q(name__icontains = name)).order_by('-date_created')[offset:limit] count = Movie.objects.filter(Q(name__icontains = name)).count() movies_serializer = MovieSerializer(movies_list, many=True) HttpResponse.status_code = status.HTTP_200_OK return JsonResponse({'count': count,'movies': movies_serializer.data}) except CustomApiException as err: HttpResponse.status_code = err.status_code return JsonResponse({'status_code': err.status_code, 'message': err.detail}) except Exception, e: HttpResponse.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR return JsonResponse({'status_code': status.HTTP_500_INTERNAL_SERVER_ERROR, 'message': str(e)})
df426e8a7331cc6a8458d647ebef8aca350145f1
3,627,071
def is_email_or_url(string: str) -> bool: """Checks if string is either an email or url. """ out = False try: URL_VALIDATOR(string) out = True except ValidationError: pass try: URL_VALIDATOR(string) out = True except ValidationError: pass return out
2e8e0540ee1a81b898287b7b54509ce46f9bcacf
3,627,072
def get_synthetic_preds(synthetic_data_func, n=1000, estimators={}): """Generate predictions for synthetic data using specified function (single simulation) Args: synthetic_data_func (function): synthetic data generation function n (int, optional): number of samples estimators (dict of object): dict of names and objects of treatment effect estimators Returns: (dict): dict of the actual and estimates of treatuement effects """ y, X, w, tau, b, e = synthetic_data_func(n=n) preds_dict = {} preds_dict[KEY_ACTUAL] = tau preds_dict[KEY_GENERATED_DATA] = {'y': y, 'X': X, 'w': w, 'tau': tau, 'b': b, 'e': e} # Predict p_hat because e would not be directly observed in real-life p_model = ElasticNetPropensityModel() p_hat = p_model.fit_predict(X, w) if estimators: for name, learner in estimators.items(): try: preds_dict[name] = learner.fit_predict(X=X, p=p_hat, treatment=w, y=y).flatten() except TypeError: preds_dict[name] = learner.fit_predict(X=X, treatment=w, y=y).flatten() else: for base_learner, label_l in zip([BaseSRegressor, BaseTRegressor, BaseXRegressor, BaseRRegressor], ['S', 'T', 'X', 'R']): for model, label_m in zip([LinearRegression, XGBRegressor], ['LR', 'XGB']): learner = base_learner(model()) model_name = '{} Learner ({})'.format(label_l, label_m) try: preds_dict[model_name] = learner.fit_predict(X=X, p=p_hat, treatment=w, y=y).flatten() except TypeError: preds_dict[model_name] = learner.fit_predict(X=X, treatment=w, y=y).flatten() learner = CausalTreeRegressor(random_state=RANDOM_SEED) preds_dict['Causal Tree'] = learner.fit_predict(X=X, treatment=w, y=y).flatten() return preds_dict
c2e775c894fdb6b66d099a8d7130d3458e6cb381
3,627,073
def arrays_from_dataset(dataset): """Converts a tf.data.Dataset to nested np.ndarrays.""" return tf.nest.map_structure( lambda tensor: np.asarray(tensor), # pylint: disable=unnecessary-lambda tensors_from_dataset(dataset))
35db90ed14481fd635710b220c9cb7270cf0fa58
3,627,074
def ldns_buffer_status_ok(*args): """LDNS buffer.""" return _ldns.ldns_buffer_status_ok(*args)
ad2299bedb24fae1098fd8a48f25c5d42e9a3f26
3,627,075
import argparse import logging import time def parse_args() -> argparse.Namespace: """Parse command line arguments :return: argparse.Namespace """ parser = argparse.ArgumentParser() parser.add_argument('--log', dest='log', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], default='ERROR', help='Set the logging level') parser.add_argument('--lat', required=True, type=float, help='Latitude in format d.ddd') parser.add_argument('--lon', required=True, type=float, help='Longitude in format d.ddd') parser.add_argument('--slots', required=False, type=int, default=24, help='Number of slots in rain array') parser.add_argument('--interval', required=True, type=int, help='Slot length in minutes') parser.add_argument('--targetdir', required=True, help='Directory to save new images') parser.add_argument('--yrdirs', required=True, nargs='+', help='Directories containing JSON files from YR API') parser.add_argument('--tbdirs', required=True, nargs='+', help='Directories containing PNG files from testbed.fmi.fi') # parser.add_argument('--targetdir', required=True, help='Directory to save new images') args = parser.parse_args() logging.basicConfig(level=getattr(logging, args.log), datefmt='%Y-%m-%dT%H:%M:%S', format="%(asctime)s.%(msecs)03dZ %(levelname)s %(message)s") logging.Formatter.converter = time.gmtime # Timestamps in UTC time return args
c51e56210bb3a191ad50b0e4535fe6ffe6c8b57f
3,627,076
import torch def upsample(img, scale, border='reflect'): """Bicubical upsample via **CONV2D**. Using PIL's kernel. Args: img: a tf tensor of 2/3/4-D. scale: must be integer >= 2. border: padding mode. Recommend to 'REFLECT'. """ device = img.device kernels, s = _weights_upsample(scale) kernels = [k.astype('float32') for k in kernels] kernels = [torch.from_numpy(k) for k in kernels] p1 = 1 + s // 2 p2 = 3 img, shape = _push_shape_4d(img) img_ex = F.pad(img, [p1, p2, p1, p2], mode=border) c = img_ex.shape[1] assert c is not None, "img must define channel number" c = int(c) filters = [torch.reshape(torch.eye(c, c), [c, c, 1, 1]) * k for k in kernels] weights = torch.stack(filters, dim=0).transpose(0, 1).reshape([-1, c, 5, 5]) img_s = F.conv2d(img_ex, weights.to(device)) img_s = F.pixel_shuffle(img_s, s) more = s // 2 * s crop = slice(more - s // 2, - (s // 2)) img_s = _pop_shape(img_s[..., crop, crop], shape) return img_s
68f30b9f94139a9c1f6222d9d9082e02251e0b42
3,627,077
import copy def first_round_phase(tableau, phase_stabilizer, phase_destabilizer): """This phase round adds a diagonal matrix D to the Z stabilizer matrix such that Z + D = M*M' for some invertible M""" num_qubits = int(len(tableau[0, :]) / 2) x_destab = tableau[0:num_qubits, 0:num_qubits] z_destab = tableau[0:num_qubits, num_qubits:2 * num_qubits] z_stab = tableau[num_qubits:2 * num_qubits, num_qubits:2 * num_qubits] x_stab = tableau[num_qubits:2 * num_qubits, 0:num_qubits] matrix = copy.deepcopy(z_stab) M = np.identity(num_qubits) circ=[] for j in range(0, num_qubits): if j == 0: for i in range(1, num_qubits): M[i, j] = matrix[i, j] continue for i in range(j + 1, num_qubits): result = [] for k in range(0, j): Sum = (M[i, k] * M[j, k]) % 2 result.append(Sum) final_sum = sum(result) % 2 M[i, j] = (matrix[i, j] + final_sum) % 2 matrix = np.matmul(M, np.transpose(M)) % 2 bits_to_flip = [] for i in range(int(len(matrix[0, :]))): if matrix[i, i] != z_stab[i, i]: bits_to_flip.append(i) elif matrix[i, i] == z_stab[i, i]: continue for target_qubit in bits_to_flip: circ.append(tq.gates.S(target=target_qubit)) for i in range(0, num_qubits): phase_destabilizer[i] = int(phase_destabilizer[i]) ^ ( int(x_destab[i, target_qubit]) * int(z_destab[i, target_qubit])) phase_stabilizer[i] = int(phase_stabilizer[i]) ^ ( int(x_stab[i, target_qubit]) * int(z_stab[i, target_qubit])) z_stab[i, target_qubit] = int(z_stab[i, target_qubit]) ^ int(x_stab[i, target_qubit]) z_destab[i, target_qubit] = int(z_destab[i, target_qubit]) ^ int(x_destab[i, target_qubit]) destabilizer = np.concatenate((x_destab, z_destab), axis=1) stabilizer = np.concatenate((x_stab, z_stab), axis=1) tableau = np.concatenate((destabilizer, stabilizer), axis=0) return (tableau, phase_stabilizer, phase_destabilizer, circ)
e798da4e1700592b4089358f45fb883b3b81a629
3,627,078
async def read_address2account(address: PyAddress, cur: Cursor): """read account by address or raise exception""" user = await read_address2userid(address, cur) if user is None: raise BlockChainError('Not found account {}'.format(address)) return await read_account_info(user, cur)
366b1d28af966bd2deebbdb63d2f203f1169ae50
3,627,079
def _get_ntddi(osvi): """ Determines the current operating system. This function allows you to quickly tell apart major OS differences. For more detailed information call L{kernel32.GetVersionEx} instead. @note: Wine reports itself as Windows XP 32 bits (even if the Linux host is 64 bits). ReactOS may report itself as Windows 2000 or Windows XP, depending on the version of ReactOS. @type osvi: L{OSVersionInfoExA} @param osvi: Optional. The return value from L{kernel32.GetVersionEx}. @rtype: int @return: NTDDI version number. """ if not osvi: osvi = GET_VERISON_EX() ntddi = 0 ntddi += (osvi.dw_major_version & 0xFF) << 24 ntddi += (osvi.dw_minor_version & 0xFF) << 16 ntddi += (osvi.w_service_pack_major & 0xFF) << 8 ntddi += (osvi.w_service_pack_minor & 0xFF) return ntddi
54ca095610a0eb92da244c55d9f7313985699c9e
3,627,080
def mel_dropout(mel: tf.Tensor, drop_prob: int = 0.05) -> tf.Tensor: """ mel drop out Args: mel (tf.Tensor): [freq, time] float32, float32 drop_prob (int, optional): keep prob. Defaults to 0.05. Returns: tf.Tensor: [freq, time] float32, float32 """ return tf.nn.dropout(mel, rate=1 - drop_prob)
ea3d42dcba9599197deb5399cfed1c9b66f00c20
3,627,081
def getPortableIpRangeServices(config): """ Reads config values related to portable ip and fills up services accordingly""" services = {} attributeError = False if config.portableIpRange.startip: services["startip"] = config.portableIpRange.startip else: attributeError = True if config.portableIpRange.endip: services["endip"] = config.portableIpRange.endip else: attributeError = True if config.portableIpRange.netmask: services["netmask"] = config.portableIpRange.netmask else: attributeError = True if config.portableIpRange.gateway: services["gateway"] = config.portableIpRange.gateway else: attributeError = True if config.portableIpRange.vlan: services["vlan"] = config.portableIpRange.vlan if attributeError: services = FAILED return services
0b2a9c863b4b1ccf0084ff4f8bbc4d4f8af7e2a1
3,627,082
from datetime import datetime import functools import time def function_timer(func): """This is a timer decorator when defining a function if you want that function to be timed then add `@function_timer` before the `def` statement and it'll time the function Arguments: func {function} -- it takes a function for this decorator to work Returns: this will print out the time taken and the time the function started and completed """ @functools.wraps(func) def wrapper_timer(*args, **kwargs): start_time = time.time() # start_date = datetime.now().strftime("%m/%d/%Y, %H:%M:%S") # print(f"The function {func.__name__} started at {start_date}") value = func(*args, **kwargs) elapsed_time = time.time() - start_time stop_date = datetime.now().strftime("%m/%d/%Y, %H:%M:%S") if elapsed_time > 60 <= 3600: print( f"The function {func.__name__} took: {round(elapsed_time/60, 3)} minutes at {stop_date}" ) elif elapsed_time > 3600: print( f"The function {func.__name__} took: {round((elapsed_time/60)/60, 3)} hours at {stop_date}" ) else: print(f"The function {func.__name__} took: {round(elapsed_time, 3)} seconds") return value return wrapper_timer
6ddcca82ae60aafb2c072e62497f8b27d557ccdc
3,627,083
def breadcrumbs(category): """ Renders a category tree path using a customizable delimiter. Usage:: {% breadcrumbs <category> %} Example:: {% breadcrumbs category %} """ return {'ancestors': category.get_ancestors()}
3c83a7ad7e8ae30ad297fd9d3d7aa5ffa5631449
3,627,084
def resnext34(**kwargs): """Constructs a ResNeXt-34 model. """ model = ResNeXt(BasicBlockX, [3, 4, 6, 3], **kwargs).cuda() name = "resnext34" return model, name
542cb4810fb56209ed8c504a5dd985e7d323725a
3,627,085
def _draw_latex_header(table, drop_columns): """Draw the Latex header. - Applies header border if appropriate. Example Output: \hline Name & Age & Nickname \\ \hline """ out = "" if table._has_border(): out += _indent_text("\\hline\n", 3) # Drop header columns if required header = _drop_columns(table._header.copy(), table._header, drop_columns) out += _indent_text(" & ".join(header) + " \\\\\n", 3) if table._has_header(): out += _indent_text("\\hline\n", 3) return out
804361428780ee7782dd53cbc48bdef7435fd342
3,627,086
def register(): """Register a new user, and send them a confirmation email.""" form = RegistrationForm() if form.validate_on_submit(): user = User( first_name=form.first_name.data, last_name=form.last_name.data, email=form.email.data, confirmed=True, password=form.password.data) db.session.add(user) db.session.commit() token = user.generate_confirmation_token() confirm_link = url_for('account.confirm', token=token, _external=True) # get_queue().enqueue( # send_email, # recipient=user.email, # subject='Confirm Your Account', # template='account/email/confirm', # user=user, # confirm_link=confirm_link) flash('Account successfully created', 'success') return redirect(url_for('main.index')) return render_template('account/register.html', form=form)
75cee330a981a36553c5680e93d0d7ef0f100aa1
3,627,087
from typing import List def combine_results_dicts(results_summaries: List[dict]) -> dict: """For a list of dictionaries, each with keys 0..n-1, combine into a single dictionary with keys 0..ntot-1""" combined_summary = {} n_overall = 0 for d in results_summaries: n_this = len(d) for i in range(n_this): combined_summary[i + n_overall] = d.pop(i) n_overall += n_this return combined_summary
67e5654b3f4b045526bc181ddb9b05eb9f7ce018
3,627,088
import json def svc_get_objects_in_collection(database, collection): """ Get objects from a collection. These forms of query strings are supported * No query string implies all objects in a collection are returned * A query string with a single "query_string" parameter results in the value of this parameter being used as the query in the find call * All other query strings are converted into a dict which is then used as the query in the find call. """ if database in system_databases: return logAndAbort("Cannot get data for system databases") expdb = mongoclient[database] if not len(request.args): logger.debug("Returning all objects in the collection %s in the database %s", collection, database) return JSONEncoder().encode([x for x in expdb[collection].find()]) elif 'query_string' in request.args: query_string = request.args['query_string'] logger.debug("Returning all objects in the collection %s in the database %s matching query %s", collection, database, query_string) return JSONEncoder().encode([x for x in expdb[collection].find(parse_query_string(query_string))]) else: logger.debug("Returning all objects in the collection %s in the database %s matching query %s", collection, database, json.dumps(request.args)) return JSONEncoder().encode([x for x in expdb[collection].find(request.args)])
4212e350c88e29d914ecc830508c88e849e23d20
3,627,089
def conv_model(features, labels, mode): """2-layer convolution model.""" # Reshape feature to 4d tensor with 2nd and 3rd dimensions being # image width and height final dimension being the number of color channels. feature = tf.reshape(features[X_FEATURE], [-1, 28, 28, 1]) # First conv layer will compute 32 features for each 5x5 patch with tf.variable_scope('conv_layer1'): h_conv1 = tf.layers.conv2d( feature, filters=32, kernel_size=[5, 5], padding='same', activation=tf.nn.relu) h_pool1 = tf.layers.max_pooling2d( h_conv1, pool_size=2, strides=2, padding='same') # Second conv layer will compute 64 features for each 5x5 patch. with tf.variable_scope('conv_layer2'): h_conv2 = tf.layers.conv2d( h_pool1, filters=64, kernel_size=[5, 5], padding='same', activation=tf.nn.relu) h_pool2 = tf.layers.max_pooling2d( h_conv2, pool_size=2, strides=2, padding='same') # reshape tensor into a batch of vectors h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64]) # Densely connected layer with 1024 neurons. h_fc1 = tf.layers.dense(h_pool2_flat, 1024, activation=tf.nn.relu) if mode == tf.estimator.ModeKeys.TRAIN: h_fc1 = tf.layers.dropout(h_fc1, rate=0.5) # Compute logits (1 per class) and compute loss. logits = tf.layers.dense(h_fc1, N_DIGITS, activation=None) # Compute predictions. predicted_classes = tf.argmax(logits, 1) if mode == tf.estimator.ModeKeys.PREDICT: predictions = { 'class': predicted_classes, 'prob': tf.nn.softmax(logits) } return tf.estimator.EstimatorSpec(mode, predictions=predictions) # Compute loss. onehot_labels = tf.one_hot(tf.cast(labels, tf.int32), N_DIGITS, 1, 0) loss = tf.losses.softmax_cross_entropy( onehot_labels=onehot_labels, logits=logits) # Create training op. if mode == tf.estimator.ModeKeys.TRAIN: optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01) train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step()) return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op) # Compute evaluation metrics. eval_metric_ops = { 'accuracy': tf.metrics.accuracy( labels=labels, predictions=predicted_classes) } return tf.estimator.EstimatorSpec( mode, loss=loss, eval_metric_ops=eval_metric_ops)
d371901c36c513cb2db629414320719759dda291
3,627,090
def MapColoringCSP(colors, neighbors): """Make a CSP for the problem of coloring a map with different colors for any two adjacent regions. Arguments are a list of colors, and a dict of {region: [neighbor,...]} entries. This dict may also be specified as a string of the form defined by parse_neighbors.""" if isinstance(neighbors, str): neighbors = parse_neighbors(neighbors) return CSP(list(neighbors.keys()), UniversalDict(colors), neighbors, different_values_constraint)
8c79a9f5f0237d0e6891d2ebe29b93e5538e44ef
3,627,091
def direction_resend_n3(limit): """ Вернуть: Направления, в к-рых все исследования подтверждены, и подтверждены после определенной даты в SQL: """ with connection.cursor() as cursor: cursor.execute( """ SELECT id FROM public.directions_napravleniya WHERE need_resend_n3 = True ORDER BY id DESC LIMIT %(limit)s """, params={'limit': limit}, ) row = cursor.fetchall() return row
1fabb5260d130b0953091b2205d48ae10f68a3cd
3,627,092
def StringToId(peg_positions): """ input a list of strings representing peg positions returns the game bitfield as integer number """ my_string = [''] * 36 cur_pos = 0 cur_bitfield = 0 for row in ['A', 'B', 'C', 'D', 'E', 'F']: for col in ['1', '2', '3', '4', '5', '6']: my_string[cur_pos] = row + col cur_pos += 1 for this_peg in peg_positions: cur_bitfield = cur_bitfield | (2 ** my_string.index(this_peg)) return cur_bitfield
71845dd2a9166bf1e43fc68040de81f93806b322
3,627,093
def hide(obj): """Convert object to hidden task. """ converted_object = convert(obj, Hidden=True) return converted_object
d106fe028ed2be620261d77a53c3f3b299295c10
3,627,094
from datetime import datetime def trim_prediction( data, prediction_days, history_days=CREST_RANGE ): """trim predicted dataframe into shape for results Args: data (:obj:`pandas.DataFrame`): data reported history_days (int): number of days BACK to report prediction_days (int): number of days FORWARD to report Returns: pandas.DataFrame: same shape as original dataframe, but with days removed """ back_date = datetime.utcnow() - timedelta(days=history_days) forward_date = datetime.utcnow() + timedelta(days=prediction_days) back_date_str = back_date.strftime('%Y-%m-%d') forward_date_str = forward_date.strftime('%Y-%m-%d') trim_data = data.loc[data.date >= back_date_str] trim_data = trim_data.loc[trim_data.date <= forward_date_str] return trim_data
eeb2120568392e732775c8f0d24a3a572dff948e
3,627,095
def series_simple_math( ser: pd.Series, function: str, number: int ) -> pd.core.series.Series: """Write some simple math helper functions for series. Take the given series, perfrom the required operation and return the new series. For example. Give the series: 0 0 1 1 2 2 dtype: int64 Function 'add' and 'number' 2 you should return 0 2 1 3 2 4 dtype: int64 :param ser: Series to perform operation on :param function: The operation to perform :param number: The number to apply the operation to """ if function == 'add': return ser + number elif function == 'sub': return ser - number elif function == 'mul': return ser * number else: return ser / number pass
4ade703df1de1f16315f5c0be1a2019f4015b89c
3,627,096
def _gl_matrix(array): """ Convert a sane numpy transformation matrix (row major, (4,4)) to an stupid GLfloat transformation matrix (column major, (16,)) """ a = np.array(array).T.reshape(-1) return (gl.GLfloat * len(a))(*a)
5a1678a505d813e04e512841140aec8fdd01a888
3,627,097
def create_vshieldr_controller(vmm_domp, provider, vcenter_domain, controller, host_or_ip, **args): """Create vShield Controller""" args = args['optional_args'] if 'optional_args' in args.keys() else args vmm_ctrlrp = CtrlrP(vmm_domp, controller, hostOrIp=host_or_ip, scope='iaas') if is_valid_key(args, 'management_epg'): vmm_rsmgmtepg = RsMgmtEPg(vmm_ctrlrp, tDn='uni/tn-mgmt/out-vmm/instP-' + args['management_epg']) if is_valid_key(args, 'associated_vcenter_controller'): vmm_rsvmmctrlrp = RsVmmCtrlrP(vmm_ctrlrp, tDn='uni/vmmp-'+provider+'/dom-'+vcenter_domain+'/ctrlr-' + args['associated_vcenter_controller']) if is_valid_key(args, 'associated_vxlan_pool'): vmm_rsvxlanns = RsVxlanNs(vmm_ctrlrp, tDn='uni/infra/vxlanns-' + args['associated_vxlan_pool']) if is_valid_key(args, 'associated_multicast_address_pool'): vmm_rsmcastaddrns = RsMcastAddrNs(vmm_ctrlrp, tDn='uni/infra/maddrns-' + args['associated_multicast_address_pool']) return vmm_ctrlrp
89c0f98f759ca3b5f6a41ffafd3c56041790049b
3,627,098
from typing import List from typing import Optional def get_all_by_type_and_status( *, db_session, service_type: str, is_active: bool ) -> List[Optional[Service]]: """Gets services by type and status.""" return ( db_session.query(Service) .filter(Service.type == service_type) .filter(Service.is_active.is_(is_active)) .all() )
3c52ce959a8d065c1f7a908f4925e9517ebdb39f
3,627,099