max_stars_repo_path
stringlengths
3
269
max_stars_repo_name
stringlengths
4
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.05M
score
float64
0.23
5.13
int_score
int64
0
5
tf_idf.py
ricosr/retrieval_chatbot
16
12300
<gh_stars>10-100 # -*- coding: utf-8 -*- import pickle import os import jieba from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.externals import joblib from sklearn.metrics.pairwise import cosine_similarity class TfIdf: def __init__(self, config): self.config = config self.model_dict = {} self.vector_context_ls = [] self.vector_utterrance_ls = [] self.load_models(self.config) def load_models(self, config): try: for file_name, file_path in self.config.model_dict.items(): self.model_dict[file_name] = joblib.load(file_path) except Exception as e: pass def select_model(self, file_name): try: self.current_model = self.model_dict[file_name] except Exception as e: pass def predict_tfidf(self, utterances, context_ls): for each_context in context_ls: if each_context == (0, 0): continue self.vector_context_ls.append(self.current_model.transform( [self.word_segment(each_context[0]) + self.word_segment(each_context[1])])) self.vector_utterrance_ls.append(self.current_model.transform( [self.word_segment(utterances) + self.word_segment(each_context[1])])) def calculate_distances(self): result_ls = [] for tfidf_c, tfidf_u in zip(self.vector_context_ls, self.vector_utterrance_ls): result_ls.append(self.calculate_cos_similarity(tfidf_c, tfidf_u)) result_ls = self.normalization(result_ls) self.vector_utterrance_ls.clear() self.vector_context_ls.clear() return result_ls def calculate_cos_similarity(self, x, y): x = x.reshape(1, -1) y = y.reshape(1, -1) return cosine_similarity(x, y) def word_segment(self, chinese_characters): seg_list = [each_word for each_word in jieba.cut(chinese_characters, cut_all=False)] return " ".join(seg_list) def normalization(self, ratio_ls): max_ratio = max(ratio_ls) min_ratio = min(ratio_ls) if max_ratio == min_ratio: return [1]*len(ratio_ls) return [(each_ratio - min_ratio) / (max_ratio - min_ratio) for each_ratio in ratio_ls] class TrainTfIdf: def __init__(self, config): self.config = config self.files_dict = {} self.load_stop_words(self.config) def load_pickle(self, file=None): if file: with open(self.config.file_dict[file], 'rb') as fp: self.files_dict[file] = pickle.load(fp) else: for file_name, path in self.config.file_dict.items(): with open(path, 'rb') as fp: self.files_dict[file_name] = pickle.load(fp) def word_segment(self, chinese_characters): seg_list = [each_word for each_word in jieba.cut(chinese_characters, cut_all=False)] return " ".join(seg_list) def load_stop_words(self, config): with open(config.stop_words, 'rb') as fpr: self.stop_words = pickle.load(fpr) # def remove_stop_words(self, cut_words): # cut_words_ls = cut_words.split(' ') # for i in range(len(cut_words_ls)): # if cut_words_ls[i] in self.stop_words: # cut_words_ls[i] = 0 # while True: # if 0 in cut_words_ls: # cut_words_ls.remove(0) # else: # break # return ' '.join(cut_words_ls) def train(self): if not os.path.exists("model"): os.mkdir("model") for file_name, content in self.files_dict.items(): # content:[[question, answer]] tmp_content = map(lambda each_chat: map(self.word_segment, each_chat), content) content_str_ls = [' '.join(list(each_chat)) for each_chat in tmp_content] # no_stop_content_ls = list(map(self.remove_stop_words, content_str_ls)) vectorizer = TfidfVectorizer(stop_words=self.stop_words) vectorizer.fit_transform(content_str_ls) joblib.dump(vectorizer, 'model/{}.pkl'.format(file_name))
2.375
2
d373c7/pytorch/models/classifiers.py
t0kk35/d373c7
1
12301
<gh_stars>1-10 """ Module for classifier Models (c) 2020 d373c7 """ import logging import torch import torch.nn as nn from .common import PyTorchModelException, ModelDefaults, _History, _ModelGenerated, _ModelStream from .encoders import GeneratedAutoEncoder from ..layers import LSTMBody, ConvolutionalBody1d, AttentionLastEntry, LinearEncoder, TensorDefinitionHead from ..layers import TransformerBody, TailBinary from ..loss import SingleLabelBCELoss from ...features import TensorDefinition, TensorDefinitionMulti from typing import List, Dict, Union logger = logging.getLogger(__name__) class BinaryClassifierHistory(_History): loss_key = 'loss' acc_key = 'acc' def __init__(self, *args): dl = self._val_argument(args) h = {m: [] for m in [BinaryClassifierHistory.loss_key, BinaryClassifierHistory.acc_key]} _History.__init__(self, dl, h) self._running_loss = 0 self._running_correct_cnt = 0 self._running_count = 0 @staticmethod def _reshape_label(pr: torch.Tensor, lb: torch.Tensor) -> torch.Tensor: if pr.shape == lb.shape: return lb elif len(pr.shape)-1 == len(lb.shape) and pr.shape[-1] == 1: return torch.unsqueeze(lb, dim=len(pr.shape)-1) else: raise PyTorchModelException( f'Incompatible shapes for prediction and label. Got {pr.shape} and {lb.shape}. Can not safely compare' ) def end_step(self, *args): BinaryClassifierHistory._val_is_tensor(args[0]) BinaryClassifierHistory._val_is_tensor_list(args[1]) BinaryClassifierHistory._val_is_tensor(args[2]) pr, lb, loss = args[0], args[1][0], args[2] lb = BinaryClassifierHistory._reshape_label(pr, lb) self._running_loss += loss.item() self._running_correct_cnt += torch.sum(torch.eq(torch.ge(pr, 0.5), lb)).item() self._running_count += pr.shape[0] super(BinaryClassifierHistory, self).end_step(pr, lb, loss) def end_epoch(self): self._history[BinaryClassifierHistory.loss_key].append(round(self._running_loss/self.steps, 4)) self._history[BinaryClassifierHistory.acc_key].append(round(self._running_correct_cnt/self.samples, 4)) self._running_correct_cnt = 0 self._running_count = 0 self._running_loss = 0 super(BinaryClassifierHistory, self).end_epoch() def step_stats(self) -> Dict: r = { BinaryClassifierHistory.loss_key: round(self._running_loss/self.step, 4), BinaryClassifierHistory.acc_key: round(self._running_correct_cnt/self._running_count, 4) } return r def early_break(self) -> bool: return False class ClassifierDefaults(ModelDefaults): def __init__(self): super(ClassifierDefaults, self).__init__() self.emb_dim(4, 100, 0.2) self.linear_batch_norm = True self.inter_layer_drop_out = 0.1 self.default_series_body = 'recurrent' self.attention_drop_out = 0.0 self.convolutional_dense = True self.convolutional_drop_out = 0.1 self.transformer_positional_logic = 'encoding' self.transformer_positional_size = 16 self.transformer_drop_out = 0.2 def emb_dim(self, minimum: int, maximum: int, dropout: float): self.set('emb_min_dim', minimum) self.set('emb_max_dim', maximum) self.set('emb_dropout', dropout) @property def linear_batch_norm(self) -> bool: """Define if a batch norm layer will be added before the final hidden layer. :return: bool """ return self.get_bool('lin_batch_norm') @linear_batch_norm.setter def linear_batch_norm(self, flag: bool): """Set if a batch norm layer will be added before the final hidden layer. :return: bool """ self.set('lin_batch_norm', flag) @property def inter_layer_drop_out(self) -> float: """Defines a value for the inter layer dropout between linear layers. If set, then dropout will be applied between linear layers. :return: A float value, the dropout aka p value to apply in the nn.Dropout layers. """ return self.get_float('lin_interlayer_drop_out') @inter_layer_drop_out.setter def inter_layer_drop_out(self, dropout: float): """Define a value for the inter layer dropout between linear layers. If set, then dropout will be applied between linear layers. :param dropout: The dropout aka p value to apply in the nn.Dropout layers. """ self.set('lin_interlayer_drop_out', dropout) @property def default_series_body(self) -> str: """Defines the default body type for series, which is a tensor of rank 3 (including batch). This could be for instance 'recurrent'. :return: A string value, the default body type to apply to a rank 3 tensor stream. """ return self.get_str('def_series_body') @default_series_body.setter def default_series_body(self, def_series_body: str): """Defines the default body type for series, which is a tensor of rank 3 (including batch). This could be for instance 'recurrent'. :param def_series_body: A string value, the default body type to apply to a rank 3 tensor stream. """ self.set('def_series_body', def_series_body) @property def attention_drop_out(self) -> float: """Define a value for the attention dropout. If set, then dropout will be applied after the attention layer. :return: The dropout aka p value to apply in the nn.Dropout layers. """ return self.get_float('attn_drop_out') @attention_drop_out.setter def attention_drop_out(self, dropout: float): """Define a value for the attention dropout. If set, then dropout will be applied after the attention layer. :param dropout: The dropout aka p value to apply in the nn.Dropout layers. """ self.set('attn_drop_out', dropout) @property def convolutional_drop_out(self) -> float: """Define a value for the attention dropout. If set, then dropout will be applied after the attention layer. :return: The dropout aka p value to apply in the nn.Dropout layers. """ return self.get_float('conv_body_dropout') @convolutional_drop_out.setter def convolutional_drop_out(self, dropout: float): """Define a value for the attention dropout. If set, then dropout will be applied after the attention layer. :param dropout: The dropout aka p value to apply in the nn.Dropout layers. """ self.set('conv_body_dropout', dropout) @property def convolutional_dense(self) -> bool: """Defines if convolutional bodies are dense. Dense bodies mean that the input to the layer is added to the output. It forms a sort of residual connection. The input is concatenated along the features axis. This allows the model to work with the input if that turns out to be useful. :return: A boolean value, indicating if the input will be added to the output or not. """ return self.get_bool('conv_body_dense') @convolutional_dense.setter def convolutional_dense(self, dense: bool): """Defines if convolutional bodies are dense. Dense bodies mean that the input to the layer is added to the output. It forms a sort of residual connection. The input is concatenated along the features axis. This allows the model to work with the input if that turns out to be useful. :param dense: A boolean value, indicating if the input will be added to the output or not. """ self.set('conv_body_dense', dense) @property def transformer_positional_logic(self) -> str: """Sets which positional logic is used in transformer blocks. 'encoding' : The system will use the encoding, 'embedding' : The system will use an embedding layer. :return: A string value defining which positional logic to use. """ return self.get_str('trans_pos_logic') @transformer_positional_logic.setter def transformer_positional_logic(self, positional_logic: str): """Sets which positional logic is used in transformer blocks. 'encoding' : The system will use the encoding, 'embedding' : The system will use an embedding layer. :param positional_logic: A string value defining which positional logic to use. """ self.set('trans_pos_logic', positional_logic) @property def transformer_positional_size(self) -> int: """Sets the positional size of transformer blocks. The size is the number of elements added to each transaction in the series to help the model determine the position of transactions in the series. :return: An integer value. The number of elements output by the positional logic """ return self.get_int('trans_pos_size') @transformer_positional_size.setter def transformer_positional_size(self, positional_size: int): """Sets the positional size of transformer blocks. The size is the number of elements added to each transaction in the series to help the model determine the position of transactions in the series. :param positional_size: An integer value. The number of elements output by the positional logic """ self.set('trans_pos_size', positional_size) @property def transformer_drop_out(self) -> float: """Defines the drop out to apply in the transformer layer :return: An float value. The drop out value to apply in transformer layers """ return self.get_float('trans_dropout') @transformer_drop_out.setter def transformer_drop_out(self, dropout: float): """Defines the drop out to apply in the transformer layer :param dropout: The drop out value to apply in transformer layers """ self.set('trans_dropout', dropout) class GeneratedClassifier(_ModelGenerated): """Generate a Pytorch classifier model. This class will create a model that fits the input and label definition of the TensorDefinition. Args: tensor_def: A TensorDefinition or TensorDefinitionMulti object describing the various input and output features c_defaults: (Optional) ClassifierDefaults object defining the defaults which need to be used. kwargs: Various named parameters which can be use to drive the type of classifier and the capacity of the model. """ def __init__(self, tensor_def: Union[TensorDefinition, TensorDefinitionMulti], c_defaults=ClassifierDefaults(), **kwargs): tensor_def_m = self.val_is_td_multi(tensor_def) super(GeneratedClassifier, self).__init__(tensor_def_m, c_defaults) # Set-up stream per tensor_definition label_td = self.label_tensor_def(tensor_def_m) feature_td = [td for td in self._tensor_def.tensor_definitions if td not in label_td] streams = [_ModelStream(td.name) for td in feature_td] if self.is_param_defined('transfer_from', kwargs): # We're being asked to do transfer learning. # TODO we'll need a bunch of validation here. om = self.get_gen_model_parameter('transfer_from', kwargs) logger.info(f'Transferring from model {om.__class__}') # The Source model is an auto-encoder if isinstance(om, GeneratedAutoEncoder): self.set_up_heads(c_defaults, feature_td, streams) # Copy and freeze the TensorDefinitionHead, this should normally be the first item. for s, oms in zip(streams, om.streams): for sly in oms: if isinstance(sly, TensorDefinitionHead): src = self.is_tensor_definition_head(sly) trg = self.is_tensor_definition_head(s.layers[0]) trg.copy_state_dict(src) trg.freeze() logger.info(f'Transferred and froze TensorDefinitionHead {trg.tensor_definition.name}') elif isinstance(sly, LinearEncoder): # If no linear layers defined then try and copy the encoder linear_layers if not self.is_param_defined('linear_layers', kwargs): linear_layers = sly.layer_definition # Add last layer. Because this is binary, it has to have size of 1. linear_layers.append((1, 0.0)) tail = TailBinary( sum(s.out_size for s in streams), linear_layers, c_defaults.linear_batch_norm ) tail_state = tail.state_dict() # Get state of the target layer, remove last item. (popitem) source_state = list(sly.state_dict().values()) for i, sk in enumerate(tail_state.keys()): if i < 2: tail_state[sk].copy_(source_state[i]) # Load target Dict in the target layer. tail.load_state_dict(tail_state) for i, p in enumerate(tail.parameters()): if i < 2: p.requires_grad = False logger.info(f'Transferred and froze Linear Encoder layers {sly.layer_definition}') else: # Set-up a head layer to each stream. This is done in the parent class. self.set_up_heads(c_defaults, feature_td, streams) # Add Body to each stream. for td, s in zip(feature_td, streams): self._add_body(s, td, kwargs, c_defaults) # Create tail. linear_layers = self.get_list_parameter('linear_layers', int, kwargs) # Add dropout parameter this will make a list of tuples of (layer_size, dropout) linear_layers = [(i, c_defaults.inter_layer_drop_out) for i in linear_layers] # Add last layer. Because this is binary, it has to have size of 1. linear_layers.append((1, 0.0)) tail = TailBinary(sum(s.out_size for s in streams), linear_layers, c_defaults.linear_batch_norm) # Assume the last entry is the label self._y_index = self._x_indexes[-1] + 1 self.streams = nn.ModuleList( [s.create() for s in streams] ) self.tail = tail # Last but not least, set-up the loss function self.set_loss_fn(SingleLabelBCELoss()) def _add_body(self, stream: _ModelStream, tensor_def: TensorDefinition, kwargs: dict, defaults: ClassifierDefaults): if tensor_def.rank == 2: # No need to add anything to the body, rank goes directly to the tail. return elif tensor_def.rank == 3: # Figure out to which body to use. if self.is_param_defined('recurrent_layers', kwargs): body_type = 'recurrent' elif self.is_param_defined('convolutional_layers', kwargs): body_type = 'convolutional' elif self.is_param_defined('attention_heads', kwargs): body_type = 'transformer' else: body_type = defaults.default_series_body # Set-up the body. if body_type.lower() == 'recurrent': self._add_recurrent_body(stream, kwargs, defaults) elif body_type.lower() == 'convolutional': self._add_convolutional_body(stream, tensor_def, kwargs, defaults) elif body_type.lower() == 'transformer': self._add_transformer_body(stream, tensor_def, kwargs, defaults) else: raise PyTorchModelException( f'Do not know how to build body of type {body_type}' ) def _add_recurrent_body(self, stream: _ModelStream, kwargs: dict, defaults: ClassifierDefaults): attn_heads = self.get_int_parameter('attention_heads', kwargs, 0) # attn_do = defaults.attention_drop_out rnn_features = self.get_int_parameter( 'recurrent_features', kwargs, self.closest_power_of_2(int(stream.out_size / 3)) ) rnn_layers = self.get_int_parameter('recurrent_layers', kwargs, 1) # Add attention if requested if attn_heads > 0: attn = AttentionLastEntry(stream.out_size, attn_heads, rnn_features) stream.add('Attention', attn, attn.output_size) # Add main rnn layer rnn = LSTMBody(stream.out_size, rnn_features, rnn_layers, True, False) stream.add('Recurrent', rnn, rnn.output_size) def _add_convolutional_body(self, stream: _ModelStream, tensor_def: TensorDefinition, kwargs: dict, defaults: ClassifierDefaults): s_length = [s[1] for s in tensor_def.shapes if len(s) == 3][0] convolutional_layers = self.get_list_of_tuples_parameter('convolutional_layers', int, kwargs, None) dropout = defaults.convolutional_drop_out dense = defaults.convolutional_dense cnn = ConvolutionalBody1d(stream.out_size, s_length, convolutional_layers, dropout, dense) stream.add('Convolutional', cnn, cnn.output_size) def _add_transformer_body(self, stream: _ModelStream, tensor_def: TensorDefinition, kwargs: dict, defaults: ClassifierDefaults): s_length = [s[1] for s in tensor_def.shapes if len(s) == 3][0] attention_head = self.get_int_parameter('attention_heads', kwargs, 1) feedforward_size = self.get_int_parameter( 'feedforward_size', kwargs, self.closest_power_of_2(int(stream.out_size / 3)) ) drop_out = defaults.transformer_drop_out positional_size = defaults.transformer_positional_size positional_logic = defaults.transformer_positional_logic trans = TransformerBody( stream.out_size, s_length, positional_size, positional_logic, attention_head, feedforward_size, drop_out ) stream.add('Transformer', trans, trans.output_size) def get_y(self, ds: List[torch.Tensor]) -> List[torch.Tensor]: return ds[self._y_index: self._y_index+1] def history(self, *args) -> _History: return BinaryClassifierHistory(*args) def forward(self, x: List[torch.Tensor]): y = [s([x[i] for i in hi]) for hi, s in zip(self.head_indexes, self.streams)] y = self.tail(y) return y
2.15625
2
qklnn/plots/hyperpar_scan.py
cambouvy/BSc-Thesis-Project
1
12302
<filename>qklnn/plots/hyperpar_scan.py import re import numpy as np import pandas as pd import matplotlib as mpl mpl.use("pdf") import matplotlib.pyplot as plt from matplotlib import gridspec from peewee import AsIs, JOIN, prefetch, SQL from IPython import embed from bokeh.layouts import row, column from bokeh.plotting import figure, show, output_file from bokeh.transform import linear_cmap from bokeh.models import ( ColumnDataSource, Range1d, LabelSet, Label, Rect, HoverTool, Div, ) from qlknn.NNDB.model import ( Network, PureNetworkParams, PostprocessSlice, NetworkMetadata, TrainMetadata, Postprocess, db, Hyperparameters, ) from qlknn.plots.statistical_spread import get_base_stats from qlknn.misc.to_precision import to_precision # First, get some statistics target_names = ["efeTEM_GB"] hyperpars = ["cost_stable_positive_scale", "cost_l2_scale"] # hyperpars = ['cost_stable_positive_scale', 'cost_stable_positive_offset'] goodness_pars = [ "rms", "no_pop_frac", "no_thresh_frac", "pop_abs_mis_median", "thresh_rel_mis_median", "wobble_qlkunstab", ] try: report = get_base_stats(target_names, hyperpars, goodness_pars) except Network.DoesNotExist: report = pd.DataFrame(columns=goodness_pars, index=["mean", "stddev", "stderr"]) query = ( Network.select( Network.id.alias("network_id"), PostprocessSlice, Postprocess.rms, Hyperparameters, ) .join(PostprocessSlice, JOIN.LEFT_OUTER) .switch(Network) .join(Postprocess, JOIN.LEFT_OUTER) .switch(Network) .where(Network.target_names == target_names) .switch(Network) .join(PureNetworkParams) .join(Hyperparameters) .where(Hyperparameters.cost_stable_positive_offset.cast("numeric") == -5) .where(Hyperparameters.cost_stable_positive_function == "block") ) if query.count() > 0: results = list(query.dicts()) df = pd.DataFrame(results) # df['network'] = df['network'].apply(lambda el: 'pure_' + str(el)) # df['l2_norm'] = df['l2_norm'].apply(np.nanmean) df.drop(["id", "network"], inplace=True, axis="columns") df.set_index("network_id", inplace=True) stats = df stats = stats.applymap(np.array) stats = stats.applymap(lambda x: x[0] if isinstance(x, np.ndarray) and len(x) == 1 else x) stats.dropna(axis="columns", how="all", inplace=True) stats.dropna(axis="rows", how="all", inplace=True) stats = stats.loc[:, hyperpars + goodness_pars] stats.reset_index(inplace=True) # stats.set_index(hyperpars, inplace=True) # stats.sort_index(ascending=False, inplace=True) # stats = stats.groupby(level=list(range(len(stats.index.levels)))).mean() #Average equal hyperpars # stats.reset_index(inplace=True) aggdict = {"network_id": lambda x: tuple(x)} aggdict.update({name: "mean" for name in goodness_pars}) stats_mean = stats.groupby(hyperpars).agg(aggdict) aggdict.update({name: "std" for name in goodness_pars}) stats_std = stats.groupby(hyperpars).agg(aggdict) stats = stats_mean.merge(stats_std, left_index=True, right_index=True, suffixes=("", "_std")) stats.reset_index(inplace=True) for name in hyperpars: stats[name] = stats[name].apply(str) for name in goodness_pars: fmt = lambda x: "" if np.isnan(x) else to_precision(x, 4) fmt_mean = stats[name].apply(fmt) stats[name + "_formatted"] = fmt_mean fmt = lambda x: "" if np.isnan(x) else to_precision(x, 2) fmt_std = stats[name + "_std"].apply(fmt) prepend = lambda x: "+- " + x if x != "" else x stats[name + "_std_formatted"] = fmt_std.apply(prepend) x = np.unique(stats[hyperpars[1]].values) x = sorted(x, key=lambda x: float(x)) y = np.unique(stats[hyperpars[0]].values) y = sorted(y, key=lambda x: float(x)) source = ColumnDataSource(stats) plotmode = "bokehz" hover = HoverTool( tooltips=[ ("network_id", "@network_id"), (hyperpars[0], "@" + hyperpars[0]), (hyperpars[1], "@" + hyperpars[1]), ] ) plots = [] for statname in goodness_pars: fmt = lambda x: "" if np.isnan(x) else to_precision(x, 2) title = "{:s} (ref={:s}±{:s})".format( statname, fmt(report[statname]["mean"]), fmt(report[statname]["stddev"] + report[statname]["stderr"]), ) p = figure(title=title, tools="tap", toolbar_location=None, x_range=x, y_range=y) p.add_tools(hover) color = linear_cmap(statname, "Viridis256", min(stats[statname]), max(stats[statname])) p.rect( x=hyperpars[1], y=hyperpars[0], width=1, height=1, source=source, fill_color=color, line_color=None, nonselection_fill_alpha=0.4, nonselection_fill_color=color, ) non_selected = Rect(fill_alpha=0.8) label_kwargs = dict( x=hyperpars[1], y=hyperpars[0], level="glyph", source=source, text_align="center", text_color="red", ) labels = LabelSet(text=statname + "_formatted", text_baseline="bottom", **label_kwargs) labels_std = LabelSet(text=statname + "_std_formatted", text_baseline="top", **label_kwargs) p.add_layout(labels) p.add_layout(labels_std) p.xaxis.axis_label = hyperpars[1] p.yaxis.axis_label = hyperpars[0] plots.append(p) from bokeh.layouts import layout, widgetbox title = Div(text=",".join(target_names)) l = layout([[title], [plots]]) show(l)
2.40625
2
py3canvas/tests/shared_brand_configs.py
tylerclair/py3canvas
0
12303
<filename>py3canvas/tests/shared_brand_configs.py """SharedBrandConfigs API Tests for Version 1.0. This is a testing template for the generated SharedBrandConfigsAPI Class. """ import unittest import requests import secrets from py3canvas.apis.shared_brand_configs import SharedBrandConfigsAPI from py3canvas.apis.shared_brand_configs import Sharedbrandconfig class TestSharedBrandConfigsAPI(unittest.TestCase): """Tests for the SharedBrandConfigsAPI.""" def setUp(self): self.client = SharedBrandConfigsAPI( secrets.instance_address, secrets.access_token ) def test_share_brandconfig_theme(self): """Integration test for the SharedBrandConfigsAPI.share_brandconfig_theme method.""" # This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration. pass def test_update_shared_theme(self): """Integration test for the SharedBrandConfigsAPI.update_shared_theme method.""" # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration. pass def test_un_share_brandconfig_theme(self): """Integration test for the SharedBrandConfigsAPI.un_share_brandconfig_theme method.""" id = None # Change me!! r = self.client.un_share_brandconfig_theme(id)
2.375
2
apps/cars/tests/api/abstract/abstract_base_api_test.py
agorsk1/car-rating-app
1
12304
from abc import ABC, abstractmethod from django.test import TestCase from rest_framework.generics import GenericAPIView from rest_framework.test import APIRequestFactory from apps.cars.factory import UserFactory class AbstractBaseTest(object): class AbstractBaseApiTestCase(TestCase, ABC): """ Abstract Base TestCase class. """ def setUp(self) -> None: """Base setup""" self.user = UserFactory.create() self.request_factory = APIRequestFactory() self.view = self._view() self.endpoint = self._endpoint() @abstractmethod def _view(self) -> GenericAPIView.as_view(): """Abstract method that returns YourApiToTest.as_view()""" pass @abstractmethod def _endpoint(self) -> str: """Abstract method that return endpoint string E.g /cars/""" pass @abstractmethod def test_anonymous_request(self, *args, **kwargs) -> None: """test if anonymous user cannot access endpoint""" pass
2.53125
3
templating-tool.py
salayatana66/vw-serving-flask
4
12305
""" A simple templating tool for Dockerfiles """ import sys import os import click import jinja2 import yaml @click.group() def cli(): """ @Unimplemented """ pass @cli.command() @click.argument("template", required=True, type=str) @click.option("-y", "--yaml_file", required=True, help="Yaml file with keys for template", type=str) def from_yaml(template, yaml_file): """ Fills in template file fields using the yaml_file """ temp_path = os.path.expanduser( os.path.expandvars(template)) yml_path = os.path.expanduser( os.path.expandvars(yaml_file)) with open(temp_path, 'r') as tfile: temp_jin = jinja2.Template(tfile.read()) with open(yml_path, 'r') as yfile: yml_loaded = yaml.load(yfile, Loader=yaml.BaseLoader) temp_rend = temp_jin.render(**yml_loaded) sys.stdout.write(temp_rend) sys.stdout.flush() cli.add_command(from_yaml) if __name__ == '__main__': cli()
2.875
3
verbforms.py
wmcooper2/Clean-Code-English-Tests
0
12306
"""File for holding the different verb forms for all of the verbs in the Total English book series.""" verb_forms = { 'become' : { 'normal' : 'become', 'present' : ['become','becomes'], 'past' : 'became', 'past participle' : 'become', 'gerund' : 'becoming', }, 'be': { 'normal' : 'be', 'present' : ['am','is','are'], 'past' : ['was', 'were'], 'past participle' : 'been', 'gerund' : 'being', }, 'begin': { 'normal' : 'begin', 'present' : ['begin','begins'], 'past' : 'began', 'past participle' : 'begun', 'gerund' : 'beginning', }, 'blow': { 'normal' : 'blow', 'present' : ['blow', 'blows'], 'past' : 'blew', 'past participle' : 'blown', 'gerund' : 'blowing', }, 'bring': { 'normal' : 'bring', 'present' : ['bring','brings'], 'past' : 'brought', 'past participle' : 'brought', 'gerund' : 'bringing', }, 'build': { 'normal' : 'build', 'present' : ['build','builds'], 'past' : 'built', 'past participle' : 'built', 'gerund' : 'building', }, 'burn': { 'normal' : 'burn', 'present' : ['burn','burns'], 'past' : ['burned','burnt'], 'past participle' : ['burned','burnt'], 'gerund' : 'burning', }, 'buy': { 'normal' : 'buy', 'present' : ['buy','buys'], 'past' : 'bought', 'past participle' : 'bought', 'gerund' : 'buying', }, 'catch': { 'normal' : 'catch', 'present' : ['catch','catches'], 'past' : 'caught', 'past participle' : 'caught', 'gerund' : 'catching', }, 'choose': { 'normal' : 'choose', 'present' : ['choose','chooses'], 'past' : 'chose', 'past participle' : 'chosen', 'gerund' : 'choosing', }, 'come': { 'normal' : 'come', 'present' : ['come','comes'], 'past' : 'came', 'past participle' : 'come', 'gerund' : 'coming', }, 'cut': { 'normal' : 'cut', 'present' : ['cut','cuts'], 'past' : 'cut', 'past participle' : 'cut', 'gerund' : 'cutting', }, 'do': { 'normal' : 'do', 'present' : ['do','does'], 'past' : 'did', 'past participle' : 'done', 'gerund' : 'doing', }, 'drink': { 'normal' : 'drink', 'present' : ['drink','drinks'], 'past' : 'drank', 'past participle' : 'drunk', 'gerund' : 'drinking', }, 'eat': { 'normal' : 'eat', 'present' : ['eat','eats'], 'past' : 'ate', 'past participle' : 'eaten', 'gerund' : 'eating', }, 'feel': { 'normal' : 'feel', 'present' : ['feel','feels'], 'past' : 'felt', 'past participle' : 'felt', 'gerund' : 'feeling', }, 'fight': { 'normal' : 'fight', 'present' : ['fight','fights'], 'past' : 'fought', 'past participle' : 'fought', 'gerund' : 'fighting', }, 'find': { 'normal' : 'find', 'present' : ['find','finds'], 'past' : 'found', 'past participle' : 'found', 'gerund' : 'finding', }, 'fly': { 'normal' : 'fly', 'present' : ['fly','flies'], 'past' : 'flew', 'past participle' : 'flown', 'gerund' : 'flying', }, 'forget': { 'normal' : 'forget', 'present' : ['forget','forgets'], 'past' : 'forgot', 'past participle' : ['forgotten','forgot'], 'gerund' : 'forgetting', }, 'get': { 'normal' : 'get', 'present' : ['get','gets'], 'past' : 'got', 'past participle' : ['gotten','got'], 'gerund' : 'getting', }, 'give': { 'normal' : 'give', 'present' : ['give','gives'], 'past' : 'gave', 'past participle' : 'given', 'gerund' : 'giving', }, 'go': { 'normal' : 'go', 'present' : ['go','goes'], 'past' : 'went', 'past participle' : 'gone', 'gerund' : 'going', }, 'grow': { 'normal' : 'grow', 'present' : ['grow','grows'], 'past' : 'grew', 'past participle' : 'grown', 'gerund' : 'growing', }, 'have': { 'normal' : 'have', 'present' : ['have','has'], 'past' : 'had', 'past participle' : 'had', 'gerund' : 'having', }, 'hear': { 'normal' : 'hear', 'present' : ['hear','hears'], 'past' : 'heard', 'past participle' : 'heard', 'gerund' : 'hearing', }, 'hit': { 'normal' : 'hit', 'present' : ['hit','hits'], 'past' : 'hit', 'past participle' : 'hit', 'gerund' : 'hitting', }, 'hold': { 'normal' : 'hold', 'present' : ['hold','holds'], 'past' : 'held', 'past participle' : 'held', 'gerund' : 'holding', }, 'hurt': { 'normal' : 'hurt', 'present' : ['hurt','hurts'], 'past' : 'hurt', 'past participle' : 'hurt', 'gerund' : 'hurting', }, 'keep': { 'normal' : 'keep', 'present' : ['keep','keeps'], 'past' : 'kept', 'past participle' : 'kept', 'gerund' : 'keeping', }, 'know': { 'normal' : 'know', 'present' : ['know','knows'], 'past' : 'knew', 'past participle' : 'known', 'gerund' : 'knowing', }, 'lead': { 'normal' : 'lead', 'present' : ['lead','leads'], 'past' : 'led', 'past participle' : 'led', 'gerund' : 'leading', }, 'leave': { 'normal' : 'leave', 'present' : ['leave','leaves'], 'past' : 'left', 'past participle' : 'left', 'gerund' : 'leaving', }, 'lend': { 'normal' : 'lend', 'present' : ['lend','lends'], 'past' : 'lent', 'past participle' : 'lent', 'gerund' : 'lending', }, 'lie': { 'normal' : 'lie', 'present' : ['lie','lies'], 'past' : 'lay', 'past participle' : 'lain', 'gerund' : 'lying', }, 'lose': { 'normal' : 'lose', 'present' : ['lose','loses'], 'past' : 'lost', 'past participle' : 'lost', 'gerund' : 'losing', }, 'make': { 'normal' : 'make', 'present' : ['make','makes'], 'past' : 'made', 'past participle' : 'made', 'gerund' : 'making', }, 'mean': { 'normal' : 'mean', 'present' : ['mean','means'], 'past' : 'meant', 'past participle' : 'meant', 'gerund' : 'meaning', }, 'meet': { 'normal' : 'meet', 'present' : ['meet','meets'], 'past' : 'met', 'past participle' : 'met', 'gerund' : 'meeting', }, 'put': { 'normal' : 'put', 'present' : ['put','puts'], 'past' : 'put', 'past participle' : 'put', 'gerund' : 'putting', }, 'read': { 'normal' : 'read', 'present' : ['read','reads'], 'past' : 'read', 'past participle' : 'read', 'gerund' : 'reading', }, 'ride': { 'normal' : 'ride', 'present' : ['ride','rides'], 'past' : 'rode', 'past participle' : 'ridden', 'gerund' : 'riding', }, 'ring': { 'normal' : 'ring', 'present' : ['ring','rings'], 'past' : 'rang', 'past participle' : 'rung', 'gerund' : 'ringing', }, 'run': { 'normal' : 'run', 'present' : ['run','runs'], 'past' : 'ran', 'past participle' : 'run', 'gerund' : 'running', }, 'say': { 'normal' : 'say', 'present' : ['say','says'], 'past' : 'said', 'past participle' : 'said', 'gerund' : 'saying', }, 'see': { 'normal' : 'see', 'present' : ['see','sees'], 'past' : 'saw', 'past participle' : 'seen', 'gerund' : 'seeing', }, 'sell': { 'normal' : 'sell', 'present' : ['sell','sells'], 'past' : 'sold', 'past participle' : 'sold', 'gerund' : 'selling', }, 'send': { 'normal' : 'send', 'present' : ['send','sends'], 'past' : 'sent', 'past participle' : 'sent', 'gerund' : 'sending', }, 'shake': { 'normal' : 'shake', 'present' : ['shake','shakes'], 'past' : 'shook', 'past participle' : 'shaken', 'gerund' : 'shaking', }, 'show': { 'normal' : 'show', 'present' : ['show','shows'], 'past' : 'showed', 'past participle' : 'shown', 'gerund' : 'showing', }, 'shut': { 'normal' : 'shut', 'present' : ['shut','shuts'], 'past' : 'shut', 'past participle' : 'shut', 'gerund' : 'shutting', }, 'sing': { 'normal' : 'sing', 'present' : ['sing','sings'], 'past' : 'sang', 'past participle' : 'sung', 'gerund' : 'singing', }, 'sit': { 'normal' : 'sit', 'present' : ['sit','sits'], 'past' : 'sat', 'past participle' : 'sat', 'gerund' : 'sitting', }, 'sleep': { 'normal' : 'sleep', 'present' : ['sleep','sleeps'], 'past' : 'slept', 'past participle' : 'slept', 'gerund' : 'sleeping', }, 'smell': { 'normal' : 'smell', 'present' : ['smell','smells'], 'past' : 'smelled,smelt', 'past participle' : 'smelled,smelt', 'gerund' : 'smelling', }, 'speak': { 'normal' : 'speak', 'present' : ['speak','speaks'], 'past' : 'spoke', 'past participle' : 'spoken', 'gerund' : 'speaking', }, 'spend': { 'normal' : 'spend', 'present' : ['spend','spends'], 'past' : 'spent', 'past participle' : 'spent', 'gerund' : 'spending', }, 'stand': { 'normal' : 'stand', 'present' : ['stand','stands'], 'past' : 'stood', 'past participle' : 'stood', 'gerund' : 'standing', }, 'swim': { 'normal' : 'swim', 'present' : ['swim','swims'], 'past' : 'swam', 'past participle' : 'swum', 'gerund' : 'swimming', }, 'take': { 'normal' : 'take', 'present' : ['take','takes'], 'past' : 'took', 'past participle' : 'taken', 'gerund' : 'taking', }, 'teach': { 'normal' : 'teach', 'present' : ['teach','teaches'], 'past' : 'taught', 'past participle' : 'taught', 'gerund' : 'teaching', }, 'tell': { 'normal' : 'tell', 'present' : ['tell','tells'], 'past' : 'told', 'past participle' : 'told', 'gerund' : 'telling', }, 'think': { 'normal' : 'think', 'present' : ['think','thinks'], 'past' : 'thought', 'past participle' : 'thought', 'gerund' : 'thinking', }, 'throw': { 'normal' : 'throw', 'present' : ['throw','throws'], 'past' : 'threw', 'past participle' : 'thrown', 'gerund' : 'throwing', }, 'understand': { 'normal' : 'understand', 'present' : ['understand','understands'], 'past' : 'understood', 'past participle' : 'understood', 'gerund' : 'unerstanding', }, 'wear': { 'normal' : 'wear', 'present' : ['wear','wears'], 'past' : 'wore', 'past participle' : 'worn', 'gerund' : 'wearing', }, 'win': { 'normal' : 'win', 'present' : ['win','wins'], 'past' : 'won', 'past participle' : 'won', 'gerund' : 'winning', }, 'write': { 'normal' : 'write', 'present' : ['write','writes'], 'past' : 'wrote', 'past participle' : 'written', 'gerund' : 'writing',},}
2.578125
3
bokeh/themes/__init__.py
quasiben/bokeh
0
12307
<reponame>quasiben/bokeh ''' Provides API for loading themes ''' from __future__ import absolute_import from os.path import join from .theme import Theme default = Theme(json={}) del join
1.179688
1
files/spam-filter/tracspamfilter/captcha/keycaptcha.py
Puppet-Finland/puppet-trac
0
12308
<gh_stars>0 # -*- coding: utf-8 -*- # # Copyright (C) 2015 <NAME> <<EMAIL>> # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://trac.edgewall.com/license.html. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://projects.edgewall.com/trac/. import hashlib import random import urllib2 from trac.config import Option from trac.core import Component, implements from trac.util.html import tag from tracspamfilter.api import user_agent from tracspamfilter.captcha import ICaptchaMethod class KeycaptchaCaptcha(Component): """KeyCaptcha implementation""" implements(ICaptchaMethod) private_key = Option('spam-filter', 'captcha_keycaptcha_private_key', '', """Private key for KeyCaptcha usage.""", doc_domain="tracspamfilter") user_id = Option('spam-filter', 'captcha_keycaptcha_user_id', '', """User id for KeyCaptcha usage.""", doc_domain="tracspamfilter") def generate_captcha(self, req): session_id = "%d-3.4.0.001" % random.randint(1, 10000000) sign1 = hashlib.md5(session_id + req.remote_addr + self.private_key).hexdigest() sign2 = hashlib.md5(session_id + self.private_key).hexdigest() varblock = "var s_s_c_user_id = '%s';\n" % self.user_id varblock += "var s_s_c_session_id = '%s';\n" % session_id varblock += "var s_s_c_captcha_field_id = 'keycaptcha_response_field';\n" varblock += "var s_s_c_submit_button_id = 'keycaptcha_response_button';\n" varblock += "var s_s_c_web_server_sign = '%s';\n" % sign1 varblock += "var s_s_c_web_server_sign2 = '%s';\n" % sign2 varblock += "document.s_s_c_debugmode=1;\n" fragment = tag(tag.script(varblock, type='text/javascript')) fragment.append( tag.script(type='text/javascript', src='http://backs.keycaptcha.com/swfs/cap.js') ) fragment.append( tag.input(type='hidden', id='keycaptcha_response_field', name='keycaptcha_response_field') ) fragment.append( tag.input(type='submit', id='keycaptcha_response_button', name='keycaptcha_response_button') ) req.session['captcha_key_session'] = session_id return None, fragment def verify_key(self, private_key, user_id): if private_key is None or user_id is None: return False # FIXME - Not yet implemented return True def verify_captcha(self, req): session = None if 'captcha_key_session' in req.session: session = req.session['captcha_key_session'] del req.session['captcha_key_session'] response_field = req.args.get('keycaptcha_response_field') val = response_field.split('|') s = hashlib.md5('accept' + val[1] + self.private_key + val[2]).hexdigest() self.log.debug("KeyCaptcha response: %s .. %s .. %s", response_field, s, session) if s == val[0] and session == val[3]: try: request = urllib2.Request( url=val[2], headers={"User-agent": user_agent} ) response = urllib2.urlopen(request) return_values = response.read() response.close() except Exception, e: self.log.warning("Exception in KeyCaptcha handling (%s)", e) else: self.log.debug("KeyCaptcha check result: %s", return_values) if return_values == '1': return True self.log.warning("KeyCaptcha returned invalid check result: " "%s (%s)", return_values, response_field) else: self.log.warning("KeyCaptcha returned invalid data: " "%s (%s,%s)", response_field, s, session) return False def is_usable(self, req): return self.private_key and self.user_id
2.1875
2
app/model/causalnex.py
splunk/splunk-mltk-container-docker
20
12309
#!/usr/bin/env python # coding: utf-8 # In[18]: # this definition exposes all python module imports that should be available in all subsequent commands import json import numpy as np import pandas as pd from causalnex.structure import DAGRegressor from sklearn.model_selection import cross_val_score from sklearn.preprocessing import StandardScaler from sklearn.model_selection import KFold # ... # global constants MODEL_DIRECTORY = "/srv/app/model/data/" # In[22]: # this cell is not executed from MLTK and should only be used for staging data into the notebook environment def stage(name): with open("data/"+name+".csv", 'r') as f: df = pd.read_csv(f) with open("data/"+name+".json", 'r') as f: param = json.load(f) return df, param # In[24]: # initialize your model # available inputs: data and parameters # returns the model object which will be used as a reference to call fit, apply and summary subsequently def init(df,param): model = DAGRegressor( alpha=0.1, beta=0.9, fit_intercept=True, hidden_layer_units=None, dependent_target=True, enforce_dag=True, ) return model # In[26]: # train your model # returns a fit info json object and may modify the model object def fit(model,df,param): target=param['target_variables'][0] #Data prep for processing y_p = df[target] y = y_p.values X_p = df[param['feature_variables']] X = X_p.to_numpy() X_col = list(X_p.columns) #Scale the data ss = StandardScaler() X_ss = ss.fit_transform(X) y_ss = (y - y.mean()) / y.std() scores = cross_val_score(model, X_ss, y_ss, cv=KFold(shuffle=True, random_state=42)) print(f'MEAN R2: {np.mean(scores).mean():.3f}') X_pd = pd.DataFrame(X_ss, columns=X_col) y_pd = pd.Series(y_ss, name=target) model.fit(X_pd, y_pd) info = pd.Series(model.coef_, index=X_col) #info = pd.Series(model.coef_, index=list(df.drop(['_time'],axis=1).columns)) return info # In[28]: # apply your model # returns the calculated results def apply(model,df,param): data = [] for col in list(df.columns): s = model.get_edges_to_node(col) for i in s.index: data.append([i,col,s[i]]); graph = pd.DataFrame(data, columns=['src','dest','weight']) #results to send back to Splunk graph_output=graph[graph['weight']>0] return graph_output # In[ ]: # save model to name in expected convention "<algo_name>_<model_name>" def save(model,name): #with open(MODEL_DIRECTORY + name + ".json", 'w') as file: # json.dump(model, file) return model # In[ ]: # load model from name in expected convention "<algo_name>_<model_name>" def load(name): model = {} #with open(MODEL_DIRECTORY + name + ".json", 'r') as file: # model = json.load(file) return model # In[ ]: # return a model summary def summary(model=None): returns = {"version": {"numpy": np.__version__, "pandas": pd.__version__} } return returns
2.765625
3
Assignment1/Q4/q4.py
NavneelSinghal/COL774
0
12310
import matplotlib import matplotlib.pyplot as plt import matplotlib.animation as animation matplotlib.use('Agg') import math import numpy as np import sys from os.path import join, isfile import warnings warnings.filterwarnings("ignore") def gda(x, y): x = x.T y = y.T # phi = P(y = 1) # mu[i] = mean of the feature vectors of the ith class # sigma = common co-variance matrix # M[i] = number of data points of class i phi, mu, sigma, M = 0, np.array([0., 0.]), 0, np.array([0, 0]) m = y.shape[0] M[1] = np.sum(y) M[0] = m - M[1] phi = M[1] / m mu = np.array([np.sum(np.array([x[j] for j in range(m) if y[j] == i]), axis=0) / M[i] for i in range(2)]) sigma = np.sum(np.array([np.outer(x[i] - mu[y[i]], x[i] - mu[y[i]]) for i in range(m)]), axis=0).astype(float) / m return phi, mu, sigma def gda_general(x, y): x = x.T y = y.T # phi = P(y = 1) # mu[i] = mean of the feature vectors of the ith class # sigma[i] = co-variance matrix for the ith class # M[i] = number of data points of class i phi, mu, sigma, M = 0, np.array([0., 0.]), 0, np.array([0, 0]) m = y.shape[0] M[1] = np.sum(y) M[0] = m - M[1] phi = M[1] / m mu = np.array([np.sum(np.array([x[j] for j in range(m) if y[j] == i]), axis=0) / M[i] for i in range(2)]) sigma = np.array([np.sum(np.array([np.outer(x[i] - mu[k], x[i] - mu[k]) for i in range(m) if y[i] == k]), axis=0) / M[k] for k in range(2)]).astype(float) return phi, mu, sigma def main(): # read command-line arguments data_dir = sys.argv[1] out_dir = sys.argv[2] part = sys.argv[3] # check for existence of input files for c in ['x', 'y']: if not isfile(join(data_dir, 'q4' + c + '.dat')): raise Exception('q4' + c + '.dat not found') # read from csv file x = np.array(np.genfromtxt(join(data_dir, 'q4x.dat'))).T y = np.array([0 if yi == 'Alaska' else 1 for yi in np.loadtxt(join(data_dir, 'q4y.dat'), dtype=str)]) # normalisation x_mean = np.array([0.0] * 2) x_stddev = np.array([0.0] * 2) for i in range(2): x_mean[i] = np.mean(x[i]) x[i] -= np.full_like(x[i], np.mean(x[i])) x_stddev[i] = np.sqrt(np.sum(x[i] ** 2) / x[i].shape[0]) x[i] /= np.sqrt(np.sum(x[i] ** 2) / x[i].shape[0]) # part A # running GDA with common co-variance matrix phi, mu, sigma = gda(x, y) if part == 'a': output_file = open(join(out_dir, '4aoutput.txt'), mode='w') output_file.write('phi = ' + str(phi) + '\n') output_file.write('mu[0] = ' + str(mu[0]) + '\n') output_file.write('mu[1] = ' + str(mu[1]) + '\n') output_file.write('sigma = \n' + str(sigma) + '\n') output_file.close() print('phi = ' + str(phi)) print('mu[0] = ' + str(mu[0])) print('mu[1] = ' + str(mu[1])) print('sigma = \n' + str(sigma)) return 0 # part B, C fig4b, ax4b = plt.subplots() # filter by y-values x0, x1 = [], [] for i in range(y.shape[0]): if y[i] == 0: x0.append([x[0][i], x[1][i]]) else: x1.append([x[0][i], x[1][i]]) x0 = np.array(x0).T x1 = np.array(x1).T # plot classes alaska = ax4b.scatter(x0[0] * x_stddev[0] + x_mean[0], x0[1] * x_stddev[1] + x_mean[1], c='red', s=6) canada = ax4b.scatter(x1[0] * x_stddev[0] + x_mean[0], x1[1] * x_stddev[1] + x_mean[1], c='blue', s=6) ax4b.set_xlabel('Fresh water ring dia.') ax4b.set_ylabel('Marine water ring dia.') fig4b.legend((alaska, canada), ('Alaska', 'Canada')) if part == 'b': fig4b.savefig(join(out_dir, '1b_plot.png')) plt.show() return 0 # linear boundary computation - equation in report sigma_inverse = np.linalg.inv(sigma) theta = np.array([0., 0., 0.]) theta[0] = np.log(phi / (1 - phi)) for i in range(2): mui = np.array([mu[i]]) theta[0] += ((-1) ** i) * np.matmul(np.matmul(mui, sigma_inverse), mui.T) theta[1:] = np.matmul(np.array([mu[1] - mu[0]]), sigma_inverse) # plotting the boundary rx = np.arange(-3, 4) ry = (-theta[0] - theta[1] * rx) / theta[2] ax4b.plot(rx * x_stddev[0] + x_mean[0], ry * x_stddev[1] + x_mean[1]) #plt.show() if part == 'c': fig4b.savefig(join(out_dir, '1c_plot.png')) plt.show() return 0 # part D # running generalised GDA phi, mu, sigma = gda_general(x, y) if part == 'd': output_file = open(join(out_dir, '4doutput.txt'), mode='w') output_file.write('phi = ' + str(phi) + '\n') output_file.write('mu[0] = ' + str(mu[0]) + '\n') output_file.write('mu[1] = ' + str(mu[1]) + '\n') output_file.write('sigma[0] = \n' + str(sigma[0]) + '\n') output_file.write('sigma[1] = \n' + str(sigma[1]) + '\n') output_file.close() print('phi = ' + str(phi)) print('mu[0] = ' + str(mu[0])) print('mu[1] = ' + str(mu[1])) print('sigma[0] = \n' + str(sigma[0])) print('sigma[1] = \n' + str(sigma[1])) return 0 # part E # quadratic boundary computation - equation in report constant = np.log(phi / (1 - phi)) + np.log(np.linalg.det(sigma[0]) / np.linalg.det(sigma[1])) / 2 linear = 0 quadratic = 0 for i in range(2): sigma_inverse = np.linalg.inv(sigma[i]) mui = np.array([mu[i]]) prod = np.matmul(mui, sigma_inverse) constant += ((-1) ** i) * np.matmul(prod, mui.T) / 2 linear += ((-1) ** (i + 1)) * prod quadratic += ((-1) ** i) * sigma_inverse / 2 constant = constant[0][0] linear = linear[0] # note that here x transposed is the feature vector (as x is a row vector) # and similarly mu[i] is also a row vector, which explains the equations above # equation is x * quadratic * x.T + linear * x.T + constant = 0 # plotting the quadratic boundary Z = 0 X, Y = np.meshgrid(np.linspace(-4, 4, 100), np.linspace(-4, 4, 100)) Z += quadratic[0, 0] * (X ** 2) + (quadratic[0, 1] + quadratic[1, 0]) * X * Y + (quadratic[1, 1]) * (Y ** 2) Z += linear[0] * X + linear[1] * Y Z += constant ax4b.contour(X * x_stddev[0] + x_mean[0], Y * x_stddev[1] + x_mean[1], Z, 0) if part == 'e': fig4b.savefig(join(out_dir, '1e_plot.png')) plt.show() # part F - in the report return 0 if __name__ == '__main__': main()
3.0625
3
var/spack/repos/builtin/packages/bcache/package.py
milljm/spack
0
12311
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Bcache(MakefilePackage): """Bcache is a patch for the Linux kernel to use SSDs to cache other block devices.""" homepage = "http://bcache.evilpiepirate.org" url = "https://github.com/g2p/bcache-tools/archive/v1.0.8.tar.gz" version('1.0.8', sha256='d56923936f37287efc57a46315679102ef2c86cd0be5874590320acd48c1201c') version('1.0.7', sha256='64d76d1085afba8c3d5037beb67bf9d69ee163f357016e267bf328c0b1807abd') version('1.0.6', sha256='9677c6da3ceac4e1799d560617c4d00ea7e9d26031928f8f94b8ab327496d4e0') version('1.0.5', sha256='1449294ef545b3dc6f715f7b063bc2c8656984ad73bcd81a0dc048cbba416ea9') version('1.0.4', sha256='102ffc3a8389180f4b491188c3520f8a4b1a84e5a7ca26d2bd6de1821f4d913d') depends_on('libuuid') depends_on('util-linux') depends_on('gettext') depends_on('pkgconfig', type='build') def setup_build_environment(self, env): env.append_flags('LDFLAGS', '-lintl') patch('func_crc64.patch', sha256='558b35cadab4f410ce8f87f0766424a429ca0611aa2fd247326ad10da115737d') def install(self, spec, prefix): mkdirp(prefix.bin) install('bcache-register', prefix.bin) install('bcache-super-show', prefix.bin) install('make-bcache', prefix.bin) install('probe-bcache', prefix.bin)
1.703125
2
core/migrations/0010_wagtailsitepage_screenshot.py
admariner/madewithwagtail
0
12312
<filename>core/migrations/0010_wagtailsitepage_screenshot.py # -*- coding: utf-8 -*- # Generated by Django 1.11.7 on 2017-11-21 23:50 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('core', '0009_wagtail112upgrade'), ] operations = [ migrations.RenameField( model_name='wagtailsitepage', old_name='image_desktop', new_name='site_screenshot', ), migrations.RemoveField( model_name='wagtailsitepage', name='image_phone', ), migrations.RemoveField( model_name='wagtailsitepage', name='image_tablet', ), ]
1.632813
2
circuitpython/schedule.py
Flameeyes/birch-books-smarthome
0
12313
# SPDX-FileCopyrightText: © 2020 The birch-books-smarthome Authors # SPDX-License-Identifier: MIT BOOKSTORE_GROUND_FLOOR = 0x0007 BOOKSTORE_FIRST_FLOOR = 0x0008 BOOKSTORE_TERRARIUM = 0x0010 BOOKSTORE_BEDROOM = 0x0020 HOUSE_BASEMENT = 0x0040 HOUSE_GROUND_FLOOR = 0x0380 HOUSE_BEDROOM_LIGHT = 0x0400 HOUSE_BEDROOM_LAMP = 0x0800 HOUSE_FIREPLACE_1 = 0x1000 HOUSE_FIREPLACE_2 = 0x2000 SCHEDULE = [ BOOKSTORE_BEDROOM | HOUSE_BEDROOM_LIGHT, BOOKSTORE_TERRARIUM | BOOKSTORE_BEDROOM | HOUSE_BEDROOM_LIGHT, BOOKSTORE_TERRARIUM | BOOKSTORE_FIRST_FLOOR | HOUSE_BEDROOM_LIGHT, BOOKSTORE_TERRARIUM | BOOKSTORE_GROUND_FLOOR | HOUSE_GROUND_FLOOR, BOOKSTORE_TERRARIUM | BOOKSTORE_GROUND_FLOOR | HOUSE_GROUND_FLOOR, BOOKSTORE_TERRARIUM | BOOKSTORE_GROUND_FLOOR, BOOKSTORE_TERRARIUM | BOOKSTORE_GROUND_FLOOR, BOOKSTORE_TERRARIUM | BOOKSTORE_GROUND_FLOOR, BOOKSTORE_TERRARIUM | BOOKSTORE_GROUND_FLOOR, BOOKSTORE_TERRARIUM | BOOKSTORE_GROUND_FLOOR | HOUSE_GROUND_FLOOR, BOOKSTORE_TERRARIUM | BOOKSTORE_FIRST_FLOOR | HOUSE_GROUND_FLOOR, BOOKSTORE_TERRARIUM | BOOKSTORE_FIRST_FLOOR | HOUSE_BASEMENT | HOUSE_BEDROOM_LIGHT, BOOKSTORE_TERRARIUM | BOOKSTORE_BEDROOM | HOUSE_BASEMENT | HOUSE_BEDROOM_LAMP, BOOKSTORE_BEDROOM | HOUSE_BEDROOM_LAMP, 0, 0, ] TEST_SCHEDULE = [ BOOKSTORE_GROUND_FLOOR, BOOKSTORE_FIRST_FLOOR, BOOKSTORE_TERRARIUM, BOOKSTORE_BEDROOM, HOUSE_BASEMENT, HOUSE_GROUND_FLOOR, HOUSE_BEDROOM_LIGHT, HOUSE_BEDROOM_LAMP, HOUSE_FIREPLACE_1, HOUSE_FIREPLACE_2, ]
1.304688
1
imdb/utils.py
rinkurajole/imdb_sanic_app
0
12314
import bcrypt salt = bcrypt.gensalt() def generate_hash(passwd, salt=salt): return str(bcrypt.hashpw(passwd, salt)) def match_password(req_pwd, db_pwd): db_pwd = db_pwd.replace('b\'','').replace('\'','').encode('utf-8') return db_pwd == bcrypt.hashpw(req_pwd, db_pwd)
2.921875
3
test/test_modules/test_math.py
dragonteros/unsuspected-hangeul
62
12315
from test.test_base import TestBase class TestMath(TestBase): def test_isclose(self): _test = self._assert_execute _test('ㄹ (ㄱㅇㄱ ㄱㅇㄱ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ ㄴ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ)ㅎㄴ', 'True') _test('ㅂ ㅅ ㅂ ㅂㅎㄹ (ㄱㅇㄱ ㄱㅇㄱ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ ㄴ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ)ㅎㄴ', 'True') _test('ㅂ ㅅ ㅈ ㅂㅎㄹ (ㄱㅇㄱ ㄱㅇㄱ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ ㄴ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ)ㅎㄴ', 'True') _test('ㅈㅈㅈ ㄴㄱ ㅅㅎㄷ ㅅㅈㅈ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'False') _test('ㅈㅈㅈㅈㅈㅈㅈ ㄴㄱ ㅅㅎㄷ ㅅㅈㅈㅈㅈㅈㅈ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'False') _test('ㅅㄷㄱ ㅈ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ (ㅂ ㅅ ㅂ ㅂㅎㄹ) (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'False') _test('ㄴㅈㄱㄹㄴㄹㄱ ㅅㄴㅂㄱㄱㄴㄱ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ (ㅂ ㅅ ㅂ ㅂㅎㄹ) (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'True') _test('(ㅂ ㅅ ㅈ ㅂㅎㄹ) (ㄱ ㅂ ㅅ ㅂ ㅂㅎㄹ ㅄㅎㄷ) ㅅㅎㄷ, ㄴㄱ, (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'True') _test('ㄱㄴ ㄷ (ㄱㅇㄱ ㄴㅇㄱ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㄴㅇㄱ ㅅㅎㄷ, ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ)ㅎㄷ', 'True') _test('ㄱㄴ ㄹ (ㄱㅇㄱ ㄴㅇㄱ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㄴㅇㄱ ㅅㅎㄷ, ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ)ㅎㄷ', 'True') _test('ㄱㄴ ㅁ (ㄱㅇㄱ ㄴㅇㄱ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㄴㅇㄱ ㅅㅎㄷ, ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ)ㅎㄷ', 'True') _test('ㄴㄱ ㄷ (ㄱㅇㄱ ㄴㅇㄱ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㄴㅇㄱ ㅅㅎㄷ, ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ)ㅎㄷ', 'True') _test('ㄴㄱ ㄹ (ㄱㅇㄱ ㄴㅇㄱ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㄴㅇㄱ ㅅㅎㄷ, ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ)ㅎㄷ', 'True') _test('ㄴㄱ ㅁ (ㄱㅇㄱ ㄴㅇㄱ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㄴㅇㄱ ㅅㅎㄷ, ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ)ㅎㄷ', 'True') def test_isnan(self): _test = self._assert_execute _test('(ㅂ ㅅ ㄴ ㅂㅎㄹ) (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'True') _test('(ㅂ ㅅ ㅁ ㅂㅎㄹ) (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False') _test('ㄱ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False') _test('ㄴ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False') _test('(ㅂ ㅅ ㄴ ㅂㅎㄹ) ㄴ ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'True') _test('ㄱ (ㅂ ㅅ ㄴ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'True') _test('ㄴ (ㅂ ㅅ ㄴ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'True') _test('(ㅂ ㅅ ㄴ ㅂㅎㄹ) (ㅂ ㅅ ㄴ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'True') _test('(ㅂ ㅅ ㅁ ㅂㅎㄹ) (ㅂ ㅅ ㄴ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'True') _test('(ㅂ ㅅ ㄴ ㅂㅎㄹ) (ㅂ ㅅ ㅁ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'True') _test('ㅂ ㅅ ㅁ ㅂㅎㄹ ㄴㄱ ㄱㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False') _test('(ㅂ ㅅ ㅁ ㅂㅎㄹ) ㄴ ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False') _test('ㄱ (ㅂ ㅅ ㅁ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False') _test('ㄴ (ㅂ ㅅ ㅁ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False') _test('(ㅂ ㅅ ㅁ ㅂㅎㄹ) (ㅂ ㅅ ㅁ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False') _test('ㄱ (ㅂ ㅅ ㅁ ㅂㅎㄹ ㄴㄱ ㄱㅎㄷ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False') _test('ㄱ (ㅂ ㅅ ㅁ ㅂㅎㄹ ㄴㄱ ㄱㅎㄷ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False') def test_isinf(self): _test = self._assert_execute _test('(ㅂ ㅅ ㅁ ㅂㅎㄹ) (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True') _test('(ㅂ ㅅ ㄴ ㅂㅎㄹ) (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'False') _test('ㄱ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'False') _test('ㄴ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'False') _test('ㅂ ㅅ ㅁ ㅂㅎㄹ ㄴㄱ ㄱㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True') _test('(ㅂ ㅅ ㅁ ㅂㅎㄹ) ㄴ ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True') _test('(ㅂ ㅅ ㅁ ㅂㅎㄹ ㄴㄱ ㄱㅎㄷ) ㄴ ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True') _test('(ㅂ ㅅ ㅁ ㅂㅎㄹ) (ㅂ ㅅ ㄴ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True') _test('ㄱ (ㅂ ㅅ ㅁ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True') _test('ㄴ (ㅂ ㅅ ㅁ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True') _test('(ㅂ ㅅ ㄴ ㅂㅎㄹ) (ㅂ ㅅ ㅁ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True') _test('(ㅂ ㅅ ㅁ ㅂㅎㄹ) (ㅂ ㅅ ㅁ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True') _test('ㄱ (ㅂ ㅅ ㅁ ㅂㅎㄹ ㄴㄱ ㄱㅎㄷ) ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True') _test('(ㅂ ㅅ ㄴ ㅂㅎㄹ) ㄴ ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'False') _test('ㄱ (ㅂ ㅅ ㄴ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'False') _test('ㄴ (ㅂ ㅅ ㄴ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'False') _test('(ㅂ ㅅ ㄴ ㅂㅎㄹ) (ㅂ ㅅ ㄴ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'False') def test_abs(self): _test = self._assert_execute _test('ㄱ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0') _test('ㄱ ㄷ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.0') _test('ㄱ ㅄㅎㄴ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.0') _test('ㄴ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '1') _test('ㄴㄱ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '1') _test('ㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '2') _test('ㄷㄱ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '2') _test('ㄷ ㄴㄱ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.5') _test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.5') _test('ㅁ ㄴㄱ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.25') _test('ㅁㄱ ㄴㄱ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.25') _test('ㄴ ㅄㅎㄴ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '1.0') _test('ㄴㄱ ㅄㅎㄴ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '1.0') _test('ㄷ ㄴㄱ ㅅㅎㄷ ㅄㅎㄴ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.5') _test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㅄㅎㄴ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.5') _test('ㄱ ㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '1.0') _test('ㄱ ㄴㄱ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '1.0') _test('ㄱ ㄷ ㄴㄱ ㅅㅎㄷ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.5') _test('ㄱ ㄷㄱ ㄴㄱ ㅅㅎㄷ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.5') _test('ㄹ ㅁ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㅂ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True') _test('ㄹㄱ ㅁ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㅂ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True') _test('ㄹ ㅁㄱ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㅂ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True') _test('ㄹㄱ ㅁㄱ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㅂ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True') _test('ㅂ ㅁㄴㄱ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㅂㄴㄱ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True') _test('ㅂㄱ ㅁㄴㄱ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㅂㄴㄱ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True') _test('ㅂ ㅁㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㅂㄴㄱ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True') _test('ㅂㄱ ㅁㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㅂㄴㄱ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True') _test('ㄴㄱ ㄷ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄴ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True') _test('ㄴㄱ ㄹ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄴ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True') _test('ㄴㄱ ㅁ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄴ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True') _test('ㅁㄱ ㄷ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄷ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True') _test('ㄱㄴ ㄹ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄷ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True') _test('ㄱㄷ ㅁ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄷ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True') def test_log(self): _test = self._assert_execute _test('ㄴ [((ㅂ ㅅ ㅈ ㅂㅎㄹ) (ㄱㅇㄱ ㅂ ㅅ ㄺ ㅂㅎㅀㄴ) ㅅㅎㄷ) ㄱㅇㄱ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True') _test('ㄴㄱ [(ㅂ ㅅ ㅈ ㅂㅎㄹ (ㄱㅇㄱ ㅂ ㅅ ㄺ ㅂㅎㅀㄴ) ㅅㅎㄷ) ㄱㅇㄱ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True') _test('ㄷㄱ [(ㅂ ㅅ ㅈ ㅂㅎㄹ (ㄱㅇㄱ ㅂ ㅅ ㄺ ㅂㅎㅀㄴ) ㅅㅎㄷ) ㄱㅇㄱ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True') _test('ㄷ [(ㅂ ㅅ ㅈ ㅂㅎㄹ (ㄱㅇㄱ ㅂ ㅅ ㄺ ㅂㅎㅀㄴ) ㅅㅎㄷ) ㄱㅇㄱ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True') _test('ㄴㄱ ㄹ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ [(ㅂ ㅅ ㅈ ㅂㅎㄹ (ㄱㅇㄱ ㅂ ㅅ ㄺ ㅂㅎㅀㄴ) ㅅㅎㄷ) ㄱㅇㄱ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True') def test_trig(self): _test = self._assert_execute _test('ㄱ (ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ) ㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'True') _test('[ㅈㄱ ㅅㄱ ㅂㄱ ㅁㄱ ㄺ ㄷㄱ ㄴㄱ ㄱ ㄴ ㄷ ㄹ ㅁ ㅂ ㅅ ㅈ] ㅁㅀㅈㄴㄱ [ㅂ ㅅ ㅂ ㅂㅎㄹ (ㄱㅇㄱ ㄷ ㄱㅎㄷ ㄷ ㄴㄱ ㅅㅎㄷ ㄷㅎㄷ) ㄱㅎㄷ (ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ) ㄴ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ] ㅁㄷㅎㄷ (ㄱ ㅁㅂㅎㄴ)ㅎㄴ', 'True') _test('[ㅈㄱ ㅅㄱ ㅂㄱ ㅁㄱ ㄺ ㄷㄱ ㄴㄱ ㄱ ㄴ ㄷ ㄹ ㅁ ㅂ ㅅ ㅈ] ㅁㅀㅈㄴㄱ [ㅂ ㅅ ㅂ ㅂㅎㄹ (ㄱㅇㄱ ㄷ ㄱㅎㄷ ㄷㄱ ㄴㄱ ㅅㅎㄷ ㄷㅎㄷ) ㄱㅎㄷ (ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ) ㄴㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ] ㅁㄷㅎㄷ (ㄱ ㅁㅂㅎㄴ)ㅎㄴ', 'True') _test('ㄱ (ㅂ ㅅ ㄳ ㅂㅎㅀㄴ) ㄴ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'True') _test('[ㅈㄱ ㅅㄱ ㅂㄱ ㅁㄱ ㄺ ㄷㄱ ㄴㄱ ㄱ ㄴ ㄷ ㄹ ㅁ ㅂ ㅅ ㅈ] ㅁㅀㅈㄴㄱ [ㅂ ㅅ ㅂ ㅂㅎㄹ (ㄱㅇㄱ ㄷ ㄱㅎㄷ) ㄱㅎㄷ (ㅂ ㅅ ㄳ ㅂㅎㅀㄴ) ㄴ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ] ㅁㄷㅎㄷ (ㄱ ㅁㅂㅎㄴ)ㅎㄴ', 'True') _test('[ㅈㄱ ㅅㄱ ㅂㄱ ㅁㄱ ㄺ ㄷㄱ ㄴㄱ ㄱ ㄴ ㄷ ㄹ ㅁ ㅂ ㅅ ㅈ] ㅁㅀㅈㄴㄱ [ㅂ ㅅ ㅂ ㅂㅎㄹ (ㄱㅇㄱ ㄷ ㄱㅎㄷ ㄴ ㄷㅎㄷ) ㄱㅎㄷ (ㅂ ㅅ ㄳ ㅂㅎㅀㄴ) ㄴㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ] ㅁㄷㅎㄷ (ㄱ ㅁㅂㅎㄴ)ㅎㄴ', 'True') _test('ㄱ (ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ) ㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'True') _test('(ㅂ ㅅ ㅂ ㅂㅎㄹ ㅁ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ) (ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ) ㄴ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'True') _test('(ㅂ ㅅ ㅂ ㅂㅎㄹ ㅁㄱ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ) (ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ) ㄴㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'True') _test('(ㅂ ㅅ ㅂ ㅂㅎㄹ ㄹ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ) (ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ) (ㄹ ㄷ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ) (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'True') _test('(ㅂ ㅅ ㅂ ㅂㅎㄹ ㅅ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ) (ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ) (ㄹ ㄷㄱ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ) (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'True') _test('ㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ) ㄴ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True') _test('ㄴ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ) ㄴ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True') _test('ㄴㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ) ㄴ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True') _test('ㄷ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ) ㄴ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True') _test('ㄷㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ) ㄴ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True') _test('ㅂ ㅅ ㅂ ㅂㅎㄹ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ) ㄴ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True') _test('ㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄷ ㅅㅎㄷ) (ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄷㄱ ㅅㅎㄷ) ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True') _test('ㄴ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄷ ㅅㅎㄷ) (ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄷㄱ ㅅㅎㄷ) ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True') _test('ㄴㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄷ ㅅㅎㄷ) (ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄷㄱ ㅅㅎㄷ) ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True') _test('ㄷ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄷ ㅅㅎㄷ) (ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄷㄱ ㅅㅎㄷ) ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True') _test('ㄷㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄷ ㅅㅎㄷ) (ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄷㄱ ㅅㅎㄷ) ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True') _test('ㅂ ㅅ ㅂ ㅂㅎㄹ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄷ ㅅㅎㄷ) (ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄷㄱ ㅅㅎㄷ) ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True') def test_asin(self): _test = self._assert_execute _test('ㄱ [(ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True') _test('ㄴ [(ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True') _test('ㄴㄱ [(ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True') _test('ㄱ ㄴ ㅄㅎㄷ [(ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True') _test('ㄴ ㄴ ㅄㅎㄷ [(ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True') _test('ㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True') _test('ㄴ[(ㄱㅇㄱ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True') _test('ㄴㄱ[(ㄱㅇㄱ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True') _test('ㄱ ㄴ ㅄㅎㄷ[(ㄱㅇㄱ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True') _test('ㄴ ㄴ ㅄㅎㄷ[(ㄱㅇㄱ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True') def test_acos(self): _test = self._assert_execute _test('ㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㅂ ㅅ ㅅㄱ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True') _test('ㄴ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㅂ ㅅ ㅅㄱ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True') _test('ㄱ ㄴ ㅄㅎㄷ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㅂ ㅅ ㅅㄱ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True') _test('ㄴ ㄴ ㅄㅎㄷ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㅂ ㅅ ㅅㄱ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True') _test('ㄴ[(ㄱㅇㄱ ㅂ ㅅ ㅅㄱ ㅂㅎㅀㄴ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True') _test('ㄴㄱ[(ㄱㅇㄱ ㅂ ㅅ ㅅㄱ ㅂㅎㅀㄴ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True') _test('ㄱ ㄴ ㅄㅎㄷ[(ㄱㅇㄱ ㅂ ㅅ ㅅㄱ ㅂㅎㅀㄴ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True') _test('ㄴ ㄴ ㅄㅎㄷ[(ㄱㅇㄱ ㅂ ㅅ ㅅㄱ ㅂㅎㅀㄴ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True') def test_atan(self): _test = self._assert_execute _test('ㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㄷ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True') _test('ㄴ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㄷ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True') _test('ㄴㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㄷ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True') _test('ㄱ ㄴ ㅄㅎㄷ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㄷ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True') _test('ㄴ ㄴ ㅄㅎㄷ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㄷ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True') _test('ㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄴㄷ ㅂㅎㅀㄴ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True') _test('ㄴ[(ㄱㅇㄱ ㅂ ㅅ ㄴㄷ ㅂㅎㅀㄴ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True') _test('ㄴㄱ[(ㄱㅇㄱ ㅂ ㅅ ㄴㄷ ㅂㅎㅀㄴ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True') def test_atan2(self): _test = self._assert_execute _test('ㄱ ㄴ ㅂ ㅅ ㄴㄷ ㅂㅎㄹ ㅎㄷ', '0.0') _test('ㄱ ㄴㄱ ㅂ ㅅ ㄴㄷ ㅂㅎㄹ ㅎㄷ (ㅂ ㅅ ㅂ ㅂㅎㄹ)ㄶㄷ', 'True') _test('(ㄴ ㄱ ㅂ ㅅ ㄴㄷ ㅂㅎㄹ ㅎㄷ) ㄷ ㄱㅎㄷ (ㅂ ㅅ ㅂ ㅂㅎㄹ)ㄶㄷ', 'True') _test('(ㄴㄱ ㄱ ㅂ ㅅ ㄴㄷ ㅂㅎㄹ ㅎㄷ) ㄷㄱ ㄱㅎㄷ (ㅂ ㅅ ㅂ ㅂㅎㄹ)ㄶㄷ', 'True') _test('[ㄴ ㄴㄱ ㄷ ㄷㄱ ㄹ ㄺ ㅁㅀㅅ] [(ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ, ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ, ㅂ ㅅ ㄴㄷ ㅂㅎㅀㄷ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ] ㅁㄷㅎㄷ (ㄱ ㅁㅂㅎㄴ)ㅎㄴ', 'True') def test_trunc(self): _test = self._assert_execute _test('ㄱ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '0') _test('ㅁ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '0') _test('ㅁㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '0') _test('ㄷ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '0') _test('ㄷㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '0') _test('ㄴ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '1') _test('ㄴㄱ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '-1') _test('ㄷ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '1') _test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '-1') _test('ㄷ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '2') _test('ㄷㄱ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '-2') _test('ㄷ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '2') _test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '-2') def test_floor(self): _test = self._assert_execute _test('ㄱ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '0') _test('ㅁ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '0') _test('ㅁㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '-1') _test('ㄷ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '0') _test('ㄷㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '-1') _test('ㄴ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '1') _test('ㄴㄱ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '-1') _test('ㄷ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '1') _test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '-2') _test('ㄷ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '2') _test('ㄷㄱ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '-2') _test('ㄷ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '2') _test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '-3') def test_round(self): _test = self._assert_execute _test('ㄱ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '0') _test('ㅁ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '0') _test('ㅁㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '0') _test('ㄷ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '0') _test('ㄷㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '0') _test('ㄴ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '1') _test('ㄴㄱ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '-1') _test('ㄷ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '2') _test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '-2') _test('ㄷ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '2') _test('ㄷㄱ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '-2') _test('ㄷ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '2') _test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '-2') def test_ceil(self): _test = self._assert_execute _test('ㄱ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '0') _test('ㅁ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '1') _test('ㅁㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '0') _test('ㄷ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '1') _test('ㄷㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '0') _test('ㄴ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '1') _test('ㄴㄱ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '-1') _test('ㄷ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '2') _test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '-1') _test('ㄷ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '2') _test('ㄷㄱ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '-2') _test('ㄷ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '3') _test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '-2') def test_round_to_inf(self): _test = self._assert_execute _test('ㄱ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '0') _test('ㅁ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '1') _test('ㅁㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '-1') _test('ㄷ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '1') _test('ㄷㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '-1') _test('ㄴ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '1') _test('ㄴㄱ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '-1') _test('ㄷ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '2') _test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '-2') _test('ㄷ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '2') _test('ㄷㄱ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '-2') _test('ㄷ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '3') _test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '-3')
2.578125
3
services/smtp.py
sourceperl/docker.mqttwarn
0
12316
#!/usr/bin/env python # -*- coding: utf-8 -*- __author__ = '<NAME> <<EMAIL>>' __copyright__ = 'Copyright 2014 <NAME>' __license__ = """Eclipse Public License - v 1.0 (http://www.eclipse.org/legal/epl-v10.html)""" import smtplib from email.mime.text import MIMEText def plugin(srv, item): srv.logging.debug("*** MODULE=%s: service=%s, target=%s", __file__, item.service, item.target) smtp_addresses = item.addrs server = item.config['server'] sender = item.config['sender'] starttls = item.config['starttls'] username = item.config['username'] password = item.config['password'] msg = MIMEText(item.message) msg['Subject'] = item.get('title', "%s notification" % (srv.SCRIPTNAME)) msg['To'] = ", ".join(smtp_addresses) msg['From'] = sender msg['X-Mailer'] = srv.SCRIPTNAME try: srv.logging.debug("Sending SMTP notification to %s [%s]..." % (item.target, smtp_addresses)) server = smtplib.SMTP(server) server.set_debuglevel(0) server.ehlo() if starttls: server.starttls() if username: server.login(username, password) server.sendmail(sender, smtp_addresses, msg.as_string()) server.quit() srv.logging.debug("Successfully sent SMTP notification") except Exception, e: srv.logging.warn("Error sending notification to SMTP recipient %s [%s]: %s" % (item.target, smtp_addresses, str(e))) return False return True
2.375
2
xknx/knxip/disconnect_request.py
Trance-Paradox/xknx
0
12317
""" Module for Serialization and Deserialization of a KNX Disconnect Request information. Disconnect requests are used to disconnect a tunnel from a KNX/IP device. """ from __future__ import annotations from typing import TYPE_CHECKING from xknx.exceptions import CouldNotParseKNXIP from .body import KNXIPBody from .hpai import HPAI from .knxip_enum import KNXIPServiceType if TYPE_CHECKING: from xknx.xknx import XKNX class DisconnectRequest(KNXIPBody): """Representation of a KNX Disconnect Request.""" SERVICE_TYPE = KNXIPServiceType.DISCONNECT_REQUEST def __init__( self, xknx: XKNX, communication_channel_id: int = 1, control_endpoint: HPAI = HPAI(), ): """Initialize DisconnectRequest object.""" super().__init__(xknx) self.communication_channel_id = communication_channel_id self.control_endpoint = control_endpoint def calculated_length(self) -> int: """Get length of KNX/IP body.""" return 2 + HPAI.LENGTH def from_knx(self, raw: bytes) -> int: """Parse/deserialize from KNX/IP raw data.""" if len(raw) < 2: raise CouldNotParseKNXIP("Disconnect info has wrong length") self.communication_channel_id = raw[0] # raw[1] is reserved return self.control_endpoint.from_knx(raw[2:]) + 2 def to_knx(self) -> bytes: """Serialize to KNX/IP raw data.""" return ( bytes((self.communication_channel_id, 0x00)) # 2nd byte is reserved + self.control_endpoint.to_knx() ) def __str__(self) -> str: """Return object as readable string.""" return ( "<DisconnectRequest " f'CommunicationChannelID="{self.communication_channel_id}" ' f'control_endpoint="{self.control_endpoint}" />' )
2.625
3
src/database_setup.py
liuchanglilian/crowdsourcing-text-msg
0
12318
from src.sqlite_helper import create_message_table, drop_message_table """ This script will create a SQLite table for you, and should be one time setup The table name is message which will store all the Post message """ create_message_table() """ If you need to drop the message table, un-comment the following code by removing the # sign in the beginning """ # # drop_message_table() #
3
3
home/pedrosenarego/zorba/zorba1.0.py
rv8flyboy/pyrobotlab
63
12319
<gh_stars>10-100 from java.lang import String import threading import random import codecs import io import itertools import time import os import urllib2 import textwrap import socket import shutil ############################################################# # This is the ZOrba # ############################################################# # All bot specific configuration goes here. leftPort = "/dev/ttyACM1" rightPort = "/dev/ttyACM0" headPort = leftPort gesturesPath = "/home/pedro/Dropbox/pastaPessoal/3Dprinter/inmoov/scripts/zorba/gestures" botVoice = "WillBadGuy" #starting the INMOOV i01 = Runtime.createAndStart("i01", "InMoov") i01.setMute(True) ##############STARTING THE RIGHT HAND######### i01.rightHand = Runtime.create("i01.rightHand", "InMoovHand") #tweaking defaults settings of right hand i01.rightHand.thumb.setMinMax(20,155) i01.rightHand.index.setMinMax(30,130) i01.rightHand.majeure.setMinMax(38,150) i01.rightHand.ringFinger.setMinMax(30,170) i01.rightHand.pinky.setMinMax(30,150) i01.rightHand.thumb.map(0,180,20,155) i01.rightHand.index.map(0,180,30,130) i01.rightHand.majeure.map(0,180,38,150) i01.rightHand.ringFinger.map(0,180,30,175) i01.rightHand.pinky.map(0,180,30,150) ################# #################STARTING RIGHT ARM############### i01.startRightArm(rightPort) #i01.rightArm = Runtime.create("i01.rightArm", "InMoovArm") ## tweak default RightArm i01.detach() i01.rightArm.bicep.setMinMax(0,60) i01.rightArm.bicep.map(0,180,0,60) i01.rightArm.rotate.setMinMax(46,130) i01.rightArm.rotate.map(0,180,46,130) i01.rightArm.shoulder.setMinMax(0,155) i01.rightArm.shoulder.map(0,180,0,155) i01.rightArm.omoplate.setMinMax(8,85) i01.rightArm.omoplate.map(0,180,8,85) ########STARTING SIDE NECK CONTROL######## def neckMoveTo(restPos,delta): leftneckServo.moveTo(restPos + delta) rightneckServo.moveTo(restPos - delta) leftneckServo = Runtime.start("leftNeck","Servo") rightneckServo = Runtime.start("rightNeck","Servo") right = Runtime.start("i01.right", "Arduino") #right.connect(rightPort) leftneckServo.attach(right, 13) rightneckServo.attach(right, 12) restPos = 90 delta = 20 neckMoveTo(restPos,delta) #############STARTING THE HEAD############## i01.head = Runtime.create("i01.head", "InMoovHead") #weaking defaults settings of head i01.head.jaw.setMinMax(35,75) i01.head.jaw.map(0,180,35,75) i01.head.jaw.setRest(35) #tweaking default settings of eyes i01.head.eyeY.setMinMax(0,180) i01.head.eyeY.map(0,180,70,110) i01.head.eyeY.setRest(90) i01.head.eyeX.setMinMax(0,180) i01.head.eyeX.map(0,180,70,110) i01.head.eyeX.setRest(90) i01.head.neck.setMinMax(40,142) i01.head.neck.map(0,180,40,142) i01.head.neck.setRest(70) i01.head.rothead.setMinMax(21,151) i01.head.rothead.map(0,180,21,151) i01.head.rothead.setRest(88) #########STARTING MOUTH CONTROL############### i01.startMouthControl(leftPort) i01.mouthControl.setmouth(0,180) ###################################################################### # mouth service, speech synthesis mouth = Runtime.createAndStart("i01.mouth", "AcapelaSpeech") mouth.setVoice(botVoice) ###################################################################### # helper function help debug the recognized text from webkit/sphinx ###################################################################### def heard(data): print "Speech Recognition Data:"+str(data) ###################################################################### # Create ProgramAB chat bot ( This is the inmoov "brain" ) ###################################################################### zorba2 = Runtime.createAndStart("zorba", "ProgramAB") zorba2.startSession("Pedro", "zorba") ###################################################################### # Html filter to clean the output from programab. (just in case) htmlfilter = Runtime.createAndStart("htmlfilter", "HtmlFilter") ###################################################################### # the "ear" of the inmoov TODO: replace this with just base inmoov ear? ear = Runtime.createAndStart("i01.ear", "WebkitSpeechRecognition") ear.addListener("publishText", python.name, "heard"); ear.addMouth(mouth) ###################################################################### # MRL Routing webkitspeechrecognition/ear -> program ab -> htmlfilter -> mouth ###################################################################### ear.addTextListener(zorba) zorba2.addTextListener(htmlfilter) htmlfilter.addTextListener(mouth) #starting the INMOOV i01 = Runtime.createAndStart("i01", "InMoov") i01.setMute(True) i01.mouth = mouth ###################################################################### # Launch the web gui and create the webkit speech recognition gui # This service works in Google Chrome only with the WebGui ################################################################# webgui = Runtime.createAndStart("webgui","WebGui") ###################################################################### # Helper functions and various gesture definitions ###################################################################### i01.loadGestures(gesturesPath) ear.startListening() ###################################################################### # starting services ###################################################################### i01.startRightHand(rightPort) i01.detach() leftneckServo.detach() rightneckServo.detach() i01.startHead(leftPort) i01.detach()
2.453125
2
pocs/tests/test_state_machine.py
zacharyt20/POCS
1
12320
<reponame>zacharyt20/POCS import os import pytest import yaml from pocs.core import POCS from pocs.observatory import Observatory from pocs.utils import error @pytest.fixture def observatory(): observatory = Observatory(simulator=['all']) yield observatory def test_bad_state_machine_file(): with pytest.raises(error.InvalidConfig): POCS.load_state_table(state_table_name='foo') def test_load_bad_state(observatory): pocs = POCS(observatory) with pytest.raises(error.InvalidConfig): pocs._load_state('foo') def test_state_machine_absolute(temp_file): state_table = POCS.load_state_table() assert isinstance(state_table, dict) with open(temp_file, 'w') as f: f.write(yaml.dump(state_table)) file_path = os.path.abspath(temp_file) assert POCS.load_state_table(state_table_name=file_path)
2.125
2
code/BacDup/scripts/gff_parser.py
JFsanchezherrero/TFM_UOC_AMoya
2
12321
<reponame>JFsanchezherrero/TFM_UOC_AMoya #!/usr/bin/env python3 ############################################################## ## <NAME> & <NAME> ## ## Copyright (C) 2020-2021 ## ############################################################## ''' Created on 28 oct. 2020 @author: alba Modified in March 2021 @author: <NAME> ''' ## useful imports import sys import os import pandas as pd import numpy as np import HCGB from Bio import SeqIO, Seq from Bio.SeqRecord import SeqRecord from BCBio import GFF from BacDup.scripts.functions import columns_annot_table ################################################## def gff_parser_caller(gff_file, ref_file, output_path, debug): '''This function calls the actual gff parser It serves as the entry point either from a module or system call ''' ## set output paths prot_file = os.path.abspath( os.path.join(output_path, 'proteins.fa')) csv_file = os.path.abspath( os.path.join(output_path, 'annot_df.csv')) csv_length = os.path.abspath( os.path.join(output_path, 'length_df.csv')) list_out_files = [prot_file, csv_file, csv_length] try: with open (ref_file) as in_handle: ref_recs = SeqIO.to_dict(SeqIO.parse(in_handle, "fasta")) ## debug messages if (debug): debug_message('GenBank record', 'yellow') print (ref_recs) ## parse with open(prot_file, "w") as out_handle: SeqIO.write(protein_recs(gff_file, ref_recs, list_out_files, debug=debug), out_handle, "fasta") ## return information return (list_out_files) except: return (False) ############################################################ def protein_recs(gff_file, ref_recs, list_out_files, debug=False): '''GFF parser to retrieve proteins and annotation ''' #create an empty dataframe. columns = columns_annot_table() ## get common column names annot_df = pd.DataFrame(data=None, columns=columns) genome_length = pd.DataFrame(data=None, columns=["length"]) with open(gff_file) as in_handle: ##parse the output. Generate SeqRecord and SeqFeatures for predictions ##sort by CDS type. Duplicate genes analysis just needs coding regions to proteins. limit_info = dict(gff_type=["CDS"]) for rec in GFF.parse(in_handle, limit_info = limit_info, base_dict=ref_recs): #get genome length for BioCircos plotting ID = rec.id genome_length.loc[ID,["length"]]=[len(rec.seq)] ## debug messages if (debug): debug_message('GFF record', 'yellow') print(rec) for feature in rec.features: ## Debug messages if (debug): debug_message('feature: ', 'yellow') print(feature) ## strand if feature.strand == -1: strand = "neg" else: strand = "pos" #we create an ID for each entry protID = feature.type + "_" + rec.id + "_" + str(feature.location.nofuzzy_start) + "_" + str(feature.location.nofuzzy_end) + "_" + strand annot_df.loc[protID, ["rec_id", "type", "start", "end", "strand"]] = [ID, feature.type, feature.location.nofuzzy_start, feature.location.nofuzzy_end, strand] qualif = feature.qualifiers ## Debug messages if (debug): debug_message('protID: ' + protID, 'yellow') debug_message('qualif: ', 'yellow') print (qualif) ## loop for keys, values in qualif.items(): #fill the dataframe info if keys == "Note": continue annot_df.loc[protID,[keys]] = [values[0]] ## get gene sequence gene_seq = Seq.Seq(str(rec.seq[feature.location.nofuzzy_start:feature.location.nofuzzy_end])) ## Debug messages if (debug): debug_message('gene_seq: ' + protID, 'yellow') print (gene_seq) if feature.type == "CDS": if feature.strand == -1: gene_seq = gene_seq.reverse_complement() # translate genome sequence table_code = feature.qualifiers["transl_table"][0] protein_seq = gene_seq.translate(table=table_code, to_stop=False) # delete STOP symbols # we set gene_seq.translate to include all stop codons to include # stop codons in pseudogenes. then, we removed last symbol * for # each sequence if protein_seq.endswith("*"): protein_seq = protein_seq[:-1] yield(SeqRecord(protein_seq, protID, "", "")) ## print additional information annot_df.to_csv(list_out_files[1], header=True) genome_length.to_csv(list_out_files[2], header=True) #get genome length for BioCircos plotting #genome_length = pd.DataFrame(data=None, columns=["length"]) #ID = rec.id #length = len(rec.seq) #genome_length.loc[ID,["length"]]=[length] #csv_length = "%s/%s_length.csv" % (output_path, rec.id) #genome_length.to_csv(csv_length, header=True) ## debug messages if (debug): debug_message('annot_df: ', 'yellow') print(annot_df) ## empty return return() ################################################################# def main (gff_file, ref_file, output_folder, debug=False): #get name base, ext = os.path.splitext(gff_file) gff_file = os.path.abspath(gff_file) #create folder output_path = HCGB.functions.file_functions.create_folder(output_path) if (debug): print ("## DEBUG:") print ("base:" , base) print ("ext:" , ext) print () gff_parser_caller(gff_file, ref_file, output_path, debug) ################################################################################ if __name__ == "__main__": if len(sys.argv) != 4: print (__doc__) print ("## Usage gff_parser") print ("python %s gff_file ref_fasta_file output_folder\n" %sys.argv[0]) sys.exit() main(*sys.argv[1:], debug=True) #main(*sys.argv[1:]) # la variable debug no es obligatoria. tiene un "por defecto definido" # Se utiliza el "=" para indicar el default.
2.296875
2
PAL/Cross/client/sources-linux/build_library_zip.py
infosecsecurity/OSPTF
2
12322
<filename>PAL/Cross/client/sources-linux/build_library_zip.py<gh_stars>1-10 import sys from distutils.core import setup import os from glob import glob import zipfile import shutil sys.path.insert(0, os.path.join('resources','library_patches')) sys.path.insert(0, os.path.join('..','..','pupy')) import pp import additional_imports import Crypto all_dependencies=set([x.split('.')[0] for x,m in sys.modules.iteritems() if not '(built-in)' in str(m) and x != '__main__']) print "ALLDEPS: ", all_dependencies zf = zipfile.ZipFile(os.path.join('resources','library.zip'), mode='w', compression=zipfile.ZIP_DEFLATED) try: for dep in all_dependencies: mdep = __import__(dep) print "DEPENDENCY: ", dep, mdep if hasattr(mdep, '__path__'): print('adding package %s'%dep) path, root = os.path.split(mdep.__path__[0]) for root, dirs, files in os.walk(mdep.__path__[0]): for f in list(set([x.rsplit('.',1)[0] for x in files])): found=False for ext in ('.pyc', '.so', '.pyo', '.py'): if ext == '.py' and found: continue if os.path.exists(os.path.join(root,f+ext)): zipname = os.path.join(root[len(path)+1:], f.split('.', 1)[0] + ext) print('adding file : {}'.format(zipname)) zf.write(os.path.join(root, f+ext), zipname) found=True else: if '<memimport>' in mdep.__file__: continue _, ext = os.path.splitext(mdep.__file__) print('adding %s -> %s'%(mdep.__file__, dep+ext)) zf.write(mdep.__file__, dep+ext) finally: zf.close()
2.0625
2
conans/test/functional/old/short_paths_test.py
Manu343726/conan
0
12323
<gh_stars>0 import os import platform import unittest from conans.model.ref import ConanFileReference from conans.test.utils.tools import NO_SETTINGS_PACKAGE_ID, TestClient class ShortPathsTest(unittest.TestCase): @unittest.skipUnless(platform.system() == "Windows", "Requires Windows") def inconsistent_cache_test(self): conanfile = """ import os from conans import ConanFile, tools class TestConan(ConanFile): name = "test" version = "1.0" short_paths = {0} exports_sources = "source_file.cpp" def source(self): for item in os.listdir(self.source_folder): self.output.info("SOURCE: " + str(item)) def build(self): tools.save(os.path.join(self.build_folder, "artifact"), "") for item in os.listdir(self.build_folder): self.output.info("BUILD: " + str(item)) def package(self): self.copy("source_file.cpp") self.copy("artifact") for item in os.listdir(self.build_folder): self.output.info("PACKAGE: " + str(item)) """ client = TestClient() client.save({"conanfile.py": conanfile.format("False"), "source_file.cpp": ""}) client.run("create . danimtb/testing") conan_ref = ConanFileReference("test", "1.0", "danimtb", "testing") source_folder = os.path.join(client.client_cache.conan(conan_ref), "source") build_folder = os.path.join(client.client_cache.conan(conan_ref), "build", NO_SETTINGS_PACKAGE_ID) package_folder = os.path.join(client.client_cache.conan(conan_ref), "package", NO_SETTINGS_PACKAGE_ID) self.assertIn("SOURCE: source_file.cpp", client.out) self.assertEqual(["source_file.cpp"], os.listdir(source_folder)) self.assertIn("BUILD: source_file.cpp", client.out) self.assertIn("BUILD: artifact", client.out) self.assertEqual( sorted(["artifact", "conanbuildinfo.txt", "conaninfo.txt", "source_file.cpp"]), sorted(os.listdir(build_folder))) self.assertIn("PACKAGE: source_file.cpp", client.out) self.assertIn("PACKAGE: artifact", client.out) self.assertEqual( sorted(["artifact", "conaninfo.txt", "conanmanifest.txt", "source_file.cpp"]), sorted(os.listdir(package_folder))) client.save({"conanfile.py": conanfile.format("True")}) client.run("create . danimtb/testing") self.assertIn("SOURCE: source_file.cpp", client.out) self.assertEqual([".conan_link"], os.listdir(source_folder)) self.assertIn("BUILD: source_file.cpp", client.out) self.assertIn("BUILD: artifact", client.out) self.assertEqual([".conan_link"], os.listdir(build_folder)) self.assertIn("PACKAGE: source_file.cpp", client.out) self.assertIn("PACKAGE: artifact", client.out) self.assertEqual([".conan_link"], os.listdir(package_folder)) @unittest.skipUnless(platform.system() == "Windows", "Requires Windows") def package_output_test(self): conanfile = """ import os from conans import ConanFile, tools class TestConan(ConanFile): name = "test" version = "1.0" short_paths = True """ client = TestClient() client.save({"conanfile.py": conanfile, "source_file.cpp": ""}) client.run("create . danimtb/testing") self.assertNotIn("test/1.0@danimtb/testing: Package '1' created", client.out) self.assertIn( "test/1.0@danimtb/testing: Package '%s' created" % NO_SETTINGS_PACKAGE_ID, client.out) # try local flow still works, but no pkg id available client.run("install .") client.run("package .") self.assertIn("conanfile.py (test/1.0@None/None): Package 'package' created", client.out) # try export-pkg with package folder client.run("remove test/1.0@danimtb/testing --force") client.run("export-pkg . test/1.0@danimtb/testing --package-folder package") self.assertIn( "test/1.0@danimtb/testing: Package '%s' created" % NO_SETTINGS_PACKAGE_ID, client.out) # try export-pkg without package folder client.run("remove test/1.0@danimtb/testing --force") client.run("export-pkg . test/1.0@danimtb/testing --install-folder .") self.assertIn( "test/1.0@danimtb/testing: Package '%s' created" % NO_SETTINGS_PACKAGE_ID, client.out) # try conan get client.run("get test/1.0@danimtb/testing . -p %s" % NO_SETTINGS_PACKAGE_ID) self.assertIn("conaninfo.txt", client.out) self.assertIn("conanmanifest.txt", client.out)
2.34375
2
data_collection/json2mongodb.py
kwond2/hedgehogs
9
12324
<reponame>kwond2/hedgehogs #-*- coding: utf-8 -*- # import os # from optparse import OptionParser # from pymongo import MongoClient, bulk # import json # import collections # import sys from import_hedgehogs import * HOST = '172.16.17.32' PORT = 27017 DB = 'SEC_EDGAR' class OrderedDictWithKeyEscaping(collections.OrderedDict): def __setitem__(self, key, value, dict_setitem=dict.__setitem__): # MongoDB complains when keys contain dots, so we call json.load with # a modified OrderedDict class which escapes dots in keys on the fly key = key.replace('.', '<DOT>') super(OrderedDictWithKeyEscaping, self).__setitem__(key, value)#, dict_setitem=dict.__setitem__) #super(OrderedDictWithKeyEscaping, self).__setitem__ #super() def save_to_mongodb(input_file_name, collectionID, usernameID, passwordID): with open(input_file_name) as fp: data = fp.read() json_ = json.loads(data, encoding='utf-8', object_pairs_hook=OrderedDictWithKeyEscaping) client = MongoClient(HOST, PORT, username=usernameID, password=<PASSWORD>, authMechanism ='SCRAM-SHA-1') # client.admin.authenticate('jgeorge','123',source= 'SEC_EDGAR') # print("arguments to function:", input_file_name, usernameID, collectionID) db = client[DB] collection = db[collectionID] # print(type(input_file_name)) # file = open(input_file_name, "r") # data = json.load(file) # print(type(data)) # print(type(file)) # data = json_util.loads(file.read()) # print(json_) for item in json_: collection.insert_one(item) # file.close() def get_collection_name(input_file_name): data_list = json.load(open(input_file_name)) data = dict(data_list[0]) ticker = "TICKER" quarter = "QUARTER" try: # year = data.get("Document And Entity Information [Abstract]") # print(year) year = data.get("Document And Entity Information [Abstract]").get("Document Fiscal Year Focus").get("value") quarter = data.get("Document And Entity Information [Abstract]").get("Document Fiscal Period Focus").get("value") ticker = data.get("Document And Entity Information [Abstract]").get("Entity Trading Symbol").get("value") except AttributeError: print("[EXCEPT] Issues with ", input_file_namex) # except AttributeError: # year = data.get("Document And Entity Information").get("Document Fiscal Year Focus").get("value") # quarter = data.get("Document And Entity Information").get("Document Fiscal Period Focus").get("value") # try: # ticker = data.get("Document And Entity Information [Abstract]").get("Entity Trading Symbol").get("value") # except: # ticker = data.get("Document And Entity Information [Abstract]").get("Trading Symbol").get("value") # try: # ticker = data.get("Document And Entity Information [Abstract]").get("Entity Trading Symbol").get("value") # except: # ticker = data.get("Document And Entity Information [Abstract]").get("Trading Symbol").get("value") # quarter = data.get("Document And Entity Information [Abstract]").get("Document Fiscal Period Focus").get("value") return str(ticker) + "_" + str(year) + "_" + str(quarter) def main(): cli_parser = OptionParser( usage='usage: %prog <input.json> <username> <password>' ) (options, args) = cli_parser.parse_args() # Input file checks if len(args) < 2: cli_parser.error("You have to supply 2 arguments, USAGE: .json username") input_file_name = args[0] if not os.path.exists(input_file_name): cli_parser.error("The input file %s you supplied does not exist" % input_file_name) # JAROD's FUNCTION collection = get_collection_name(input_file_name) #collection = (sys.argv[1]).strip('.') username = sys.argv[2] password = sys.argv[3] print("Adding to MongoDB...") #save_to_mongodb(input_file_name, collection, username) if __name__ == "__main__": print("[WARNING] STILL UNDER DEVELOPMENT") main()
2.34375
2
scripts/convert_to_singlehead.py
Lollipop321/mini-decoder-network
1
12325
<filename>scripts/convert_to_singlehead.py import torch import math import time import struct import argparse import numpy as np from collections import OrderedDict if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-model', required=True, help="trained model prefix, also include dir, e.g. ../data/model-100") args = parser.parse_args() model_path = args.model checkpoint = torch.load(model_path, map_location='cpu') assert 'args' in checkpoint assert 'model' in checkpoint args = checkpoint['args'] model = checkpoint['model'] checkpoint_new = {} model_new = {} e = [0, 0, 0, 0, 0, 0] d = [0, 0, 0, 0, 0, 0] for name, w in model.items(): if "decoder" in name: if "self_attn.in_proj" in name: layer = eval(name.split(".")[2]) wq, wk, wv = w.chunk(3, dim=0) assert args.encoder_embed_dim == args.decoder_embed_dim model_new[name] = torch.cat([wq[(args.encoder_embed_dim // 8 * e[layer]): (args.encoder_embed_dim // 8 * (e[layer] + 1))], wk[(args.encoder_embed_dim // 8 * e[layer]): (args.encoder_embed_dim // 8 * (e[layer] + 1))], wv[(args.encoder_embed_dim // 8 * e[layer]): (args.encoder_embed_dim // 8 * (e[layer] + 1))]], dim=0) elif "encoder_attn.in_proj" in name: layer = eval(name.split(".")[2]) wq, wk, wv = w.chunk(3, dim=0) assert args.encoder_embed_dim == args.decoder_embed_dim model_new[name] = torch.cat([wq[(args.encoder_embed_dim // 8 * d[layer]): (args.encoder_embed_dim // 8 * (d[layer] + 1))], wk[(args.encoder_embed_dim // 8 * d[layer]): (args.encoder_embed_dim // 8 * (d[layer] + 1))], wv[(args.encoder_embed_dim // 8 * d[layer]): (args.encoder_embed_dim // 8 * (d[layer] + 1))]], dim=0) elif "self_attn.out_proj.weight" in name: layer = eval(name.split(".")[2]) assert args.encoder_embed_dim == args.decoder_embed_dim model_new[name] = w[:, (args.encoder_embed_dim // 8 * e[layer]): (args.encoder_embed_dim // 8 * (e[layer] + 1))] elif "encoder_attn.out_proj.weight" in name: layer = eval(name.split(".")[2]) assert args.encoder_embed_dim == args.decoder_embed_dim model_new[name] = w[:, (args.encoder_embed_dim // 8 * d[layer]): (args.encoder_embed_dim // 8 * (d[layer] + 1))] else: model_new[name] = w else: model_new[name] = w checkpoint_new['args'] = args checkpoint_new['args'].arch = "transformer_singlehead_t2t_wmt_en_de" checkpoint_new['model'] = model_new # print(checkpoint_new['args'].arch) torch.save(checkpoint_new, 'checkpoint_singlehead.pt') print("finished!")
2.375
2
tests/integration/frameworks/test_detectron2_impl.py
francoisserra/BentoML
1
12326
import typing as t from typing import TYPE_CHECKING import numpy as np import torch import pytest import imageio from detectron2 import model_zoo from detectron2.data import transforms as T from detectron2.config import get_cfg from detectron2.modeling import build_model import bentoml if TYPE_CHECKING: from detectron2.config import CfgNode from bentoml._internal.types import Tag from bentoml._internal.models import ModelStore IMAGE_URL: str = "./tests/utils/_static/detectron2_sample.jpg" def extract_result(raw_result: t.Dict[str, t.Any]) -> t.Dict[str, t.Any]: pred_instances = raw_result["instances"] boxes = pred_instances.pred_boxes.to("cpu").tensor.detach().numpy() scores = pred_instances.scores.to("cpu").detach().numpy() pred_classes = pred_instances.pred_classes.to("cpu").detach().numpy() result = { "boxes": boxes, "scores": scores, "classes": pred_classes, } return result def prepare_image( original_image: "np.ndarray[t.Any, np.dtype[t.Any]]", ) -> "np.ndarray[t.Any, np.dtype[t.Any]]": """Mainly to test on COCO dataset""" _aug = T.ResizeShortestEdge([800, 800], 1333) image = _aug.get_transform(original_image).apply_image(original_image) return image.transpose(2, 0, 1) def detectron_model_and_config() -> t.Tuple[torch.nn.Module, "CfgNode"]: model_url: str = "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml" cfg: "CfgNode" = get_cfg() cfg.merge_from_file(model_zoo.get_config_file(model_url)) # set threshold for this model cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_url) cloned = cfg.clone() cloned.MODEL.DEVICE = "cpu" # running on CI model: torch.nn.Module = build_model(cloned) model.eval() return model, cfg @pytest.fixture(scope="module", name="image_array") def fixture_image_array() -> "np.ndarray[t.Any, np.dtype[t.Any]]": return np.asarray(imageio.imread(IMAGE_URL)) def save_procedure(metadata: t.Dict[str, t.Any], _modelstore: "ModelStore") -> "Tag": model, config = detectron_model_and_config() tag_info = bentoml.detectron.save( "test_detectron2_model", model, model_config=config, metadata=metadata, model_store=_modelstore, ) return tag_info @pytest.mark.parametrize("metadata", [{"acc": 0.876}]) def test_detectron2_save_load( metadata: t.Dict[str, t.Any], image_array: "np.ndarray[t.Any, np.dtype[t.Any]]", modelstore: "ModelStore", ) -> None: tag = save_procedure(metadata, _modelstore=modelstore) _model = bentoml.models.get(tag, _model_store=modelstore) assert _model.info.metadata is not None detectron_loaded = bentoml.detectron.load( _model.tag, device="cpu", model_store=modelstore, ) assert next(detectron_loaded.parameters()).device.type == "cpu" image = prepare_image(image_array) image = torch.as_tensor(image) input_data = [{"image": image}] raw_result = detectron_loaded(input_data) result = extract_result(raw_result[0]) assert result["scores"][0] > 0.9 def test_detectron2_setup_run_batch( image_array: "np.ndarray[t.Any, np.dtype[t.Any]]", modelstore: "ModelStore" ) -> None: tag = save_procedure({}, _modelstore=modelstore) runner = bentoml.detectron.load_runner(tag, model_store=modelstore) assert tag in runner.required_models assert runner.num_concurrency_per_replica == 1 assert runner.num_replica == 1 image = torch.as_tensor(prepare_image(image_array)) res = runner.run_batch(image) result = extract_result(res[0]) assert result["boxes"] is not None
2.125
2
molecule/default/tests/test_default.py
escalate/ansible-influxdb-docker
0
12327
"""Role testing files using testinfra""" def test_config_directory(host): """Check config directory""" f = host.file("/etc/influxdb") assert f.is_directory assert f.user == "influxdb" assert f.group == "root" assert f.mode == 0o775 def test_data_directory(host): """Check data directory""" d = host.file("/var/lib/influxdb") assert d.is_directory assert d.user == "influxdb" assert d.group == "root" assert d.mode == 0o700 def test_backup_directory(host): """Check backup directory""" b = host.file("/var/backups/influxdb") assert b.is_directory assert b.user == "influxdb" assert b.group == "root" assert b.mode == 0o775 def test_influxdb_service(host): """Check InfluxDB service""" s = host.service("influxdb") assert s.is_running assert s.is_enabled def test_influxdb_docker_container(host): """Check InfluxDB docker container""" d = host.docker("influxdb.service").inspect() assert d["HostConfig"]["Memory"] == 1073741824 assert d["Config"]["Image"] == "influxdb:latest" assert d["Config"]["Labels"]["maintainer"] == "<EMAIL>" assert "INFLUXD_REPORTING_DISABLED=true" in d["Config"]["Env"] assert "internal" in d["NetworkSettings"]["Networks"] assert \ "influxdb" in d["NetworkSettings"]["Networks"]["internal"]["Aliases"] def test_backup(host): """Check if the backup runs successfully""" cmd = host.run("/usr/local/bin/backup-influxdb.sh") assert cmd.succeeded def test_backup_cron_job(host): """Check backup cron job""" f = host.file("/var/spool/cron/crontabs/root") assert "/usr/local/bin/backup-influxdb.sh" in f.content_string def test_restore(host): """Check if the restore runs successfully""" cmd = host.run("/usr/local/bin/restore-influxdb.sh") assert cmd.succeeded
2.140625
2
ZIP-v0.01/Serial_to_MQTT.py
JittoThomas/IOT
0
12328
<gh_stars>0 #!/usr/bin/env python import cayenne.client, datetime, time, serial # import random #Delay Start #print "Time now = ", datetime.datetime.now().strftime("%H-%M-%S") #time.sleep(60) #print "Starting now = ", datetime.datetime.now().strftime("%H-%M-%S") # Cayenne authentication info. This should be obtained from the Cayenne Dashboard. MQTT_USERNAME = "6<PASSWORD>" MQTT_PASSWORD = "<PASSWORD>" MQTT_CLIENT_ID = "157d1d10-69dd-11e8-84d1-4d9372e87a68" # Other settings that seem to be embedded in Cayenne's libraries # MQTT_URL = "mqtt.mydevices.com" # MQTT_PORT = "1883" # Default location of serial port on Pi models 1 and 2 #SERIAL_PORT = "/dev/ttyAMA0" # Default location of serial port on Pi models 3 and Zero SERIAL_PORT = "/dev/ttyS0" # How often shall we write values to Cayenne? (Seconds + 1) interval = 5 #This sets up the serial port specified above. baud rate is the bits per second timeout seconds #port = serial.Serial(SERIAL_PORT, baudrate=2400, timeout=5) #This sets up the serial port specified above. baud rate. This WAITS for any cr/lf (new blob of data from picaxe) port = serial.Serial(SERIAL_PORT, baudrate=2400) # The callback for when a message is received from Cayenne. def on_message(message): print("def on_message reply back, message received: " + str(message)) # If there is an error processing the message return an error string, otherwise returns nothing. client = cayenne.client.CayenneMQTTClient() client.on_message = on_message client.begin(MQTT_USERNAME, MQTT_PASSWORD, MQTT_CLIENT_ID) #Predefine Data Packet objects for python prior to trying to look for them :) node = ":01" channel = "A" data = 123 cs = 0 while True: try: rcv = port.readline() #read buffer until cr/lf #print("Serial Readline Data = " + rcv) rcv = rcv.rstrip("\r\n") node,channel,data,cs = rcv.split(",") #Test Point print("rcv.split Data = : " + node + channel + data + cs) if cs == '0': #if cs = Check Sum is good = 0 then do the following if channel == 'A': data = float(data)/1 client.virtualWrite(1, data, "analog_sensor", "null") client.loop() if channel == 'B': data = float(data)/1 client.virtualWrite(2, data, "analog_sensor", "null") client.loop() if channel == 'C': data = float(data)/1 client.virtualWrite(3, data, "analog_sensor", "null") client.loop() if channel == 'D': data = float(data)/1 client.virtualWrite(4, data, "analog_sensor", "null") client.loop() if channel == 'E': data = float(data)/1 client.virtualWrite(5, data, "analog_sensor", "null") client.loop() if channel == 'F': data = float(data)/1 client.virtualWrite(6, data, "analog_sensor", "null") client.loop() if channel == 'G': data = float(data)/1 client.virtualWrite(7, data, "analog_sensor", "null") client.loop() if channel == 'H': data = float(data)/1 client.virtualWrite(8, data, "analog_sensor", "null") client.loop() if channel == 'I': data = float(data)/1 client.virtualWrite(9, data, "analog_sensor", "null") client.loop() if channel == 'J': data = float(data)/1 client.virtualWrite(10, data, "analog_sensor", "null") client.loop() if channel == 'K': data = float(data)/1 client.virtualWrite(11, data, "analog_sensor", "null") client.loop() if channel == 'L': data = float(data)/1 client.virtualWrite(12, data, "analog_sensor", "null") client.loop() except ValueError: #if Data Packet corrupt or malformed then... print("Data Packet corrupt or malformed")
2.828125
3
test/test_slimta_queue_proxy.py
nanojob/python-slimta
141
12329
<reponame>nanojob/python-slimta<filename>test/test_slimta_queue_proxy.py<gh_stars>100-1000 import unittest from mox3.mox import MoxTestBase, IsA from slimta.queue.proxy import ProxyQueue from slimta.smtp.reply import Reply from slimta.relay import Relay, TransientRelayError, PermanentRelayError from slimta.envelope import Envelope class TestProxyQueue(MoxTestBase, unittest.TestCase): def setUp(self): super(TestProxyQueue, self).setUp() self.relay = self.mox.CreateMock(Relay) self.env = Envelope('<EMAIL>', ['<EMAIL>']) def test_enqueue(self): self.relay._attempt(self.env, 0) self.mox.ReplayAll() q = ProxyQueue(self.relay) ret = q.enqueue(self.env) self.assertEqual(1, len(ret)) self.assertEqual(2, len(ret[0])) self.assertEqual(self.env, ret[0][0]) self.assertRegexpMatches(ret[0][1], r'[0-9a-fA-F]{32}') def test_enqueue_relayerror(self): err = PermanentRelayError('msg failure', Reply('550', 'Not Ok')) self.relay._attempt(self.env, 0).AndRaise(err) self.mox.ReplayAll() q = ProxyQueue(self.relay) ret = q.enqueue(self.env) self.assertEqual(1, len(ret)) self.assertEqual(2, len(ret[0])) self.assertEqual(self.env, ret[0][0]) self.assertEqual(err, ret[0][1]) def test_start_noop(self): self.mox.ReplayAll() q = ProxyQueue(self.relay) q.start() def test_kill_noop(self): self.mox.ReplayAll() q = ProxyQueue(self.relay) q.kill() def test_flush_noop(self): self.mox.ReplayAll() q = ProxyQueue(self.relay) q.flush() def test_add_policy_error(self): self.mox.ReplayAll() q = ProxyQueue(self.relay) with self.assertRaises(NotImplementedError): q.add_policy('test') # vim:et:fdm=marker:sts=4:sw=4:ts=4
2.171875
2
tests/potential/EamPotential/Al__born_exp_fs/test____init__.py
eragasa/pypospack
4
12330
import pytest from pypospack.potential import EamPotential symbols = ['Al'] func_pair_name = "bornmayer" func_density_name = "eam_dens_exp" func_embedding_name = "fs" expected_parameter_names_pair_potential = [] expected_parameter_names_density_function = [] expected_parameter_names_embedding_function = [] expected_parameter_names = [ 'p_AlAl_phi0', 'p_AlAl_gamma', 'p_AlAl_r0', 'd_Al_rho0', 'd_Al_beta', 'd_Al_r0', 'e_Al_F0', 'e_Al_p', 'e_Al_q', 'e_Al_F1', 'e_Al_rho0'] print(80*'-') print("func_pair_name={}".format(func_pair_name)) print("func_density_name={}".format(func_density_name)) print("func_embedding_name={}".format(func_density_name)) print(80*'-') def test____init__(): obj_pot = EamPotential( symbols=symbols, func_pair=func_pair_name, func_density=func_density_name, func_embedding=func_embedding_name) assert type(obj_pot) is EamPotential assert obj_pot.potential_type == 'eam' assert type(obj_pot.symbols) is list assert len(obj_pot.symbols) == len(symbols) for i,v in enumerate(symbols): obj_pot.symbols[i] = v assert obj_pot.is_charge is False assert type(obj_pot.parameter_names) is list assert len(obj_pot.parameter_names) == len(expected_parameter_names) for i,v in enumerate(expected_parameter_names): obj_pot.parameter_names = v if __name__ == "__main__": # CONSTRUCTOR TEST pot = EamPotential(symbols=symbols, func_pair=func_pair_name, func_density=func_density_name, func_embedding=func_embedding_name) print('pot.potential_type == {}'.format(\ pot.potential_type)) print('pot.symbols == {}'.format(\ pot.symbols)) print('pot.parameter_names == {}'.format(\ pot.parameter_names)) print('pot.is_charge == {}'.format(\ pot.is_charge))
2.375
2
backend/bin/main/enrichers/enricher.py
anjo-ba/PCAP-Analyzer
4
12331
from typing import Dict from main.helpers.print_helper import PrintHelper class Enricher(object): def __init__(self, enricher_type: str, header: str) -> None: self.enricher_type = enricher_type self.header = header def get_information(self, packet: Dict[str, str], information_dict) -> None: pass def print(self) -> None: PrintHelper.print_nothing(self.enricher_type)
2.671875
3
Modulo_5/proyecto/presentacion/form_ubicacion/formAUbicacion_designer.py
AutodidactaMx/cocid_python
0
12332
<reponame>AutodidactaMx/cocid_python import tkinter as tk import matplotlib.pyplot as plt from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from pandastable import Table import util.generic as utl class FormUbicacionDesigner(tk.Toplevel): def __init__(self): super().__init__() def initialize_component(self): self.config_window() self.framePrincipal() self.framePrincipalPanel1() self.framePrincipalPanel2() self.tablaEstadisticosUbicacion() self.graficaUbicacion() def config_window(self): self.title('Analisis de variable de ubicación') w, h = 1400,500 self.geometry("%dx%d+0+0" % (w, h)) self.config(bg='black') utl.centrar_ventana(self,w,h) def framePrincipal(self): self.frame_zona_principal = tk.Frame(self, bd=0, relief=tk.SOLID, bg='white', width=100, height=100) self.frame_zona_principal.pack(side="top",fill=tk.BOTH ) def framePrincipalPanel1(self): self.frame_zona_principal_panel1 = tk.Frame(self.frame_zona_principal, bd=1, relief=tk.SOLID, bg='white', width=100, height=100) self.frame_zona_principal_panel1.pack(side="left",fill=tk.BOTH, expand="yes") def framePrincipalPanel2(self): self.frame_zona_principal_panel2 = tk.Frame(self.frame_zona_principal, bd=1, relief=tk.SOLID, bg='white', width=100, height=100) self.frame_zona_principal_panel2.pack(side="left",fill=tk.BOTH, expand="yes") def tablaEstadisticosUbicacion(self): self.tablaDatosUbicacion = Table(self.frame_zona_principal_panel1, showtoolbar=False, showstatusbar=False, rows=8,width=500) self.tablaDatosUbicacion.show() def graficaUbicacion(self): self.figure_ubicacion = plt.Figure(figsize=(50,10)) self.canvas_figure_ubicacion = FigureCanvasTkAgg(self.figure_ubicacion, self.frame_zona_principal_panel2) self.canvas_figure_ubicacion.get_tk_widget().pack(side=tk.LEFT, fill=tk.X, pady=20)
2.625
3
injector/__init__.py
vnepomuceno/kafka-events-injector
0
12333
import coloredlogs coloredlogs.install() custom_logger = logging.getLogger(name) coloredlogs.install(level="INFO", logger=custom_logger)
1.710938
2
setup.py
Lcvette/qtpyvcp
71
12334
<reponame>Lcvette/qtpyvcp import os import versioneer from setuptools import setup, find_packages with open("README.md", "r") as fh: long_description = fh.read() if os.getenv('DEB_BUILD') == 'true' or os.getenv('USER') == 'root': "/usr/share/doc/linuxcnc/examples/sample-configs/sim" # list of (destination, source_file) tuples DATA_FILES = [ ('/usr/lib/x86_64-linux-gnu/qt5/plugins/designer/', [ 'pyqt5designer/Qt5.7.1-64bit/libpyqt5_py2.so', 'pyqt5designer/Qt5.7.1-64bit/libpyqt5_py3.so']), ] # list of (destination, source_dir) tuples DATA_DIRS = [ ('/usr/share/doc/linuxcnc/examples/sample-configs/sim', 'linuxcnc/configs'), ] if os.getenv('USER') == 'root': try: os.rename('/usr/lib/x86_64-linux-gnu/qt5/plugins/designer/libpyqt5.so', '/usr/lib/x86_64-linux-gnu/qt5/plugins/designer/libpyqt5.so.old') except: pass else: # list of (destination, source_file) tuples DATA_FILES = [ ('~/', ['scripts/.xsessionrc',]), ] # list of (destination, source_dir) tuples DATA_DIRS = [ ('~/linuxcnc/configs/sim.qtpyvcp', 'linuxcnc/configs/sim.qtpyvcp'), ('~/linuxcnc/nc_files/qtpyvcp', 'linuxcnc/nc_files/qtpyvcp'), # ('~/linuxcnc/vcps', 'examples'), ] def data_files_from_dirs(data_dirs): data_files = [] for dest_dir, source_dir in data_dirs: dest_dir = os.path.expanduser(dest_dir) for root, dirs, files in os.walk(source_dir): root_files = [os.path.join(root, i) for i in files] dest = os.path.join(dest_dir, os.path.relpath(root, source_dir)) data_files.append((dest, root_files)) return data_files data_files = [(os.path.expanduser(dest), src_list) for dest, src_list in DATA_FILES] data_files.extend(data_files_from_dirs(DATA_DIRS)) setup( name="qtpyvcp", version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), author="<NAME>", author_email="<EMAIL>", description="Qt and Python based Virtual Control Panel framework for LinuxCNC.", long_description=long_description, long_description_content_type="text/markdown", license="GNU General Public License v2 (GPLv2)", url="https://github.com/kcjengr/qtpyvcp", download_url="https://github.com/kcjengr/qtpyvcp/archive/master.zip", classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Intended Audience :: Manufacturing', 'Intended Audience :: End Users/Desktop', 'Topic :: Software Development :: Widget Sets', 'Topic :: Software Development :: User Interfaces', 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)', 'Programming Language :: Python :: 2.7', ], packages=find_packages(), data_files=data_files, include_package_data=True, install_requires=[ 'docopt', 'qtpy', 'pyudev', 'psutil', 'HiYaPyCo', 'pyopengl', 'vtk', 'pyqtgraph', 'oyaml', 'simpleeval', ], entry_points={ 'console_scripts': [ 'qtpyvcp=qtpyvcp.app:main', 'qcompile=qtpyvcp.tools.qcompile:main', 'editvcp=qtpyvcp.tools.editvcp:main', # example VCPs 'mini=examples.mini:main', 'brender=examples.brender:main', # test VCPs 'vtk_test=video_tests.vtk_test:main', 'opengl_test=video_tests.opengl_test:main', 'qtpyvcp_test=video_tests.qtpyvcp_test:main', ], 'qtpyvcp.example_vcp': [ 'mini=examples.mini', 'brender=examples.brender', 'actions=examples.actions', ], 'qtpyvcp.test_vcp': [ 'vtk_test=video_tests.vtk_test', 'opengl_test=video_tests.opengl_test', 'qtpyvcp_test=video_tests.qtpyvcp_test', ], }, )
1.539063
2
hidparser/UsagePage.py
NZSmartie/PyHIDParser
22
12335
from enum import Enum as _Enum class UsageType(_Enum): CONTROL_LINEAR = () CONTROL_ON_OFF = () CONTROL_MOMENTARY = () CONTROL_ONE_SHOT = () CONTROL_RE_TRIGGER = () DATA_SELECTOR = () DATA_STATIC_VALUE = () DATA_STATIC_FLAG = () DATA_DYNAMIC_VALUE = () DATA_DYNAMIC_FLAG = () COLLECTION_NAMED_ARRAY = () COLLECTION_APPLICATION = () COLLECTION_LOGICAL = () COLLECTION_PHYSICAL = () COLLECTION_USAGE_SWITCH = () COLLECTION_USAGE_MODIFIER = () def __new__(cls): value = len(cls.__members__) + 1 obj = object.__new__(cls) obj._value_ = value return obj @classmethod def control_usage_types(cls): return ( UsageType.CONTROL_LINEAR, UsageType.CONTROL_ON_OFF, UsageType.CONTROL_MOMENTARY, UsageType.CONTROL_ONE_SHOT, UsageType.CONTROL_RE_TRIGGER, ) @classmethod def data_usage_types(cls): return ( UsageType.DATA_SELECTOR, UsageType.DATA_STATIC_VALUE, UsageType.DATA_STATIC_FLAG, UsageType.DATA_DYNAMIC_VALUE, UsageType.DATA_DYNAMIC_FLAG, ) @classmethod def collection_usage_types(cls): return ( UsageType.COLLECTION_NAMED_ARRAY, # UsageType.collection_application, # Commented out as it is used for top level collections only UsageType.COLLECTION_LOGICAL, UsageType.COLLECTION_PHYSICAL, UsageType.COLLECTION_USAGE_SWITCH, UsageType.COLLECTION_USAGE_MODIFIER ) class Usage: def __init__(self, value, usage_types): if not isinstance(usage_types, list): usage_types = [usage_types,] for usage_type in usage_types: if not isinstance(usage_type, UsageType): raise ValueError("usage_type {} is not instance of {}".format( usage_type.__class__.__name__, UsageType.__name__) ) self.value = value self.usage_types = usage_types class UsagePage(_Enum): def __init__(self, item): if not isinstance(item, Usage): raise ValueError("{} is not a valid {}".format(item.__name__, self.__class__.__name__)) self.index = item.value & 0xFFFF self.usage = item self.usage_types = item.usage_types @classmethod def get_usage(cls, value): for key, member in cls.__members__.items(): if not isinstance(member.value, Usage): continue if member.index == value: return member raise ValueError("{} is not a valid {}".format(value, cls.__name__)) @classmethod def _get_usage_page_index(cls): raise NotImplementedError() @classmethod def find_usage_page(cls, value): if not hasattr(cls, "usage_page_map"): cls.usage_page_map = {usage_page._get_usage_page_index(): usage_page for usage_page in cls.__subclasses__()} if value in cls.usage_page_map.keys(): return cls.usage_page_map[value] if value not in range(0xFF00,0xFFFF): raise ValueError("Reserved or missing usage page 0x{:04X}".format(value)) raise NotImplementedError("Yet to support Vendor defined usage pages") class UsageRange: def __init__(self, usage_page: UsagePage.__class__ = None, minimum = None, maximum = None): self.usage_page = usage_page self.minimum = minimum self.maximum = maximum def get_range(self): if self.minimum is None or self.maximum is None: raise ValueError("Usage Minimum and Usage Maximum must be set") if isinstance(self.minimum, UsagePage): if not isinstance(self.maximum, UsagePage): raise ValueError("UsageRange type mismatch in minimum and maximum usages") self.usage_page = self.minimum.__class__ return [self.usage_page.get_usage(value) for value in range(self.minimum.index & 0xFFFF, (self.maximum.index & 0xFFFF) + 1)] if self.minimum & ~0xFFFF: self.usage_page = UsagePage.find_usage_page((self.minimum & ~0xFFFF) >> 16) return [self.usage_page.get_usage(value) for value in range(self.minimum & 0xFFFF, (self.maximum & 0xFFFF) + 1)]
2.640625
3
sandbox/lib/jumpscale/JumpscaleLibs/tools/legal_contracts/LegalDoc.py
threefoldtech/threebot_prebuilt
2
12336
from reportlab.lib.pagesizes import A4 from reportlab.lib.units import cm from reportlab.lib.styles import getSampleStyleSheet from reportlab.platypus import BaseDocTemplate, Frame, PageTemplate, Paragraph class LegalDoc: def __init__(self, path): self.path = path styles = getSampleStyleSheet() self._styleN = styles["Normal"] self._styleH1 = styles["Heading1"] self._styleH2 = styles["Heading2"] self.page = 0 doc = BaseDocTemplate(self.path, pagesize=A4) frame = Frame(doc.leftMargin, doc.bottomMargin, doc.width, doc.height - 2 * cm, id="normal") template = PageTemplate(id="legal_doc", frames=frame, onPage=self.header_footer) doc.addPageTemplates([template]) text = [] for i in range(111): text.append(Paragraph("This is line %d." % i, self._styleN)) doc.build(text) def header_footer(self, canvas, doc): self.page += 1 canvas.saveState() P = Paragraph("This is a multi-line header. It goes on every page. " * 2, self._styleN) w, h = P.wrap(doc.width, doc.topMargin) P.drawOn(canvas, doc.leftMargin, doc.height + doc.topMargin - h) canvas.restoreState() canvas.saveState() P = Paragraph("This is a multi-line footer:%s. It goes on every page. " % self.page, self._styleN) w, h = P.wrap(doc.width, doc.bottomMargin) P.drawOn(canvas, doc.leftMargin, h) canvas.restoreState()
2.578125
3
tests/outcome/test_outcome_models.py
ConnorBarnhill/kf-api-dataservice
6
12337
from datetime import datetime import uuid from sqlalchemy.exc import IntegrityError from dataservice.api.study.models import Study from dataservice.api.participant.models import Participant from dataservice.api.outcome.models import Outcome from dataservice.extensions import db from tests.utils import FlaskTestCase class ModelTest(FlaskTestCase): """ Test Outcome database model """ def test_create(self): """ Test create outcome """ # Create study study = Study(external_id='phs001') # Create and save participant participant_id = 'Test subject 0' p = Participant(external_id=participant_id, is_proband=True, study=study) db.session.add(p) db.session.commit() # Create outcomes data = { 'external_id': 'test_0', 'vital_status': 'Alive', 'disease_related': False, 'age_at_event_days': 120, 'participant_id': p.kf_id } dt = datetime.now() o1 = Outcome(**data) db.session.add(o1) data['vital_status'] = 'Deceased' data['disease_related'] = 'True' o2 = Outcome(**data) db.session.add(o2) db.session.commit() self.assertEqual(Outcome.query.count(), 2) new_outcome = Outcome.query.all()[1] self.assertGreater(new_outcome.created_at, dt) self.assertGreater(new_outcome.modified_at, dt) self.assertIs(type(uuid.UUID(new_outcome.uuid)), uuid.UUID) self.assertEqual(new_outcome.vital_status, data['vital_status']) self.assertEqual(new_outcome.disease_related, data['disease_related']) def test_create_via_participant(self): """ Test create outcomes via creation of participant """ outcomes, p = self._create_outcomes() oc = ['Deceased', 'Alive'] # Check outcomes were created self.assertEqual(Outcome.query.count(), 2) # Check Particpant has the outcomes for o in Participant.query.first().outcomes: self.assertIn(o.vital_status, oc) # Outcomes have the participant p = Participant.query.first() for o in Outcome.query.all(): self.assertEqual(o.participant_id, p.kf_id) def test_find_outcome(self): """ Test find one outcome """ outcomes, p = self._create_outcomes() # Find outcome oc = ['Deceased', 'Alive'] o = Outcome.query.filter_by(vital_status=oc[0]).one_or_none() self.assertEqual(o.vital_status, oc[0]) def test_update_outcome(self): """ Test update outcome """ outcomes, p = self._create_outcomes() # Update and save oc = ['Deceased', 'Alive'] o = Outcome.query.filter_by(vital_status=oc[0]).one_or_none() o.outcome = oc[1] db.session.commit() # Check updated values o = Outcome.query.filter_by(vital_status=oc[1]).one_or_none() self.assertIsNot(o, None) def test_delete_outcome(self): """ Test delete outcome """ outcomes, p = self._create_outcomes() # Choose one and delete it oc = ['Deceased', 'Alive'] o = Outcome.query.filter_by(vital_status=oc[0]).one_or_none() db.session.delete(o) db.session.commit() o = Outcome.query.filter_by(vital_status=oc[0]).one_or_none() self.assertIs(o, None) outcomes = [_o for _o in p.outcomes] self.assertNotIn(o, outcomes) def test_delete_outcome_via_participant(self): """ Test delete related outcomes via deletion of participant """ outcomes, p = self._create_outcomes() # Delete participant db.session.delete(p) db.session.commit() # Check that outcomes have been deleted oc = ['Deceased', 'Alive'] o1 = Outcome.query.filter_by(vital_status=oc[0]).one_or_none() o2 = Outcome.query.filter_by(vital_status=oc[1]).one_or_none() self.assertIs(o1, None) self.assertIs(o2, None) def test_not_null_constraint(self): """ Test that a outcome cannot be created without required parameters such as participant_id """ # Create outcome data = { 'vital_status': 'Alive', # non-existent required param: participant_id } o = Outcome(**data) # Add to db self.assertRaises(IntegrityError, db.session.add(o)) def test_foreign_key_constraint(self): """ Test that a outcome cannot be created without an existing reference Participant. This checks foreign key constraint """ # Create outcome data = { 'vital_status': 'Alive', 'participant_id': '' # empty blank foreign key } o = Outcome(**data) # Add to db self.assertRaises(IntegrityError, db.session.add(o)) def _create_outcomes(self): """ Create outcome and required entities """ # Create study study = Study(external_id='phs001') # Create two outcomes oc = ['Deceased', 'Alive'] o1 = Outcome(vital_status=oc[0]) o2 = Outcome(vital_status=oc[1]) p = Participant(external_id='p1', is_proband=True, study=study) # Add to participant and save p.outcomes.extend([o1, o2]) db.session.add(p) db.session.commit() return [o1, o2], p
2.640625
3
sapextractor/utils/fields_corresp/extract_dd03t.py
aarkue/sap-meta-explorer
2
12338
def apply(con, target_language="E"): dict_field_desc = {} try: df = con.prepare_and_execute_query("DD03T", ["DDLANGUAGE", "FIELDNAME", "DDTEXT"], " WHERE DDLANGUAGE = '"+target_language+"'") stream = df.to_dict("records") for el in stream: dict_field_desc[el["FIELDNAME"]] = el["DDTEXT"] except: pass return dict_field_desc
2.453125
2
app/forms/fields/month_year_date_field.py
ons-eq-team/eq-questionnaire-runner
0
12339
import logging from werkzeug.utils import cached_property from wtforms import FormField, Form, StringField logger = logging.getLogger(__name__) def get_form_class(validators): class YearMonthDateForm(Form): year = StringField(validators=validators) month = StringField() @cached_property def data(self): data = super().data try: return "{year:04d}-{month:02d}".format( year=int(data["year"]), month=int(data["month"]) ) except (TypeError, ValueError): return None return YearMonthDateForm class MonthYearDateField(FormField): def __init__(self, validators, **kwargs): form_class = get_form_class(validators) super().__init__(form_class, **kwargs) def process(self, formdata, data=None): if data is not None: substrings = data.split("-") data = {"year": substrings[0], "month": substrings[1]} super().process(formdata, data)
2.4375
2
inf_classif_analysis/descriptive_analysis.py
Marco-Ametrano/myocardal_infarction_class
0
12340
#AFTER PREPROCESSING AND TARGETS DEFINITION newdataset.describe() LET_IS.value_counts() LET_IS.value_counts().plot(kind='bar', color='c') Y_unica.value_counts() Y_unica.value_counts().plot(kind='bar', color='c') ZSN.value_counts().plot(kind='bar', color='c') Survive.value_counts().plot(kind='bar', color='c')
1.65625
2
src/service/eda_service.py
LiuYuWei/service-data-eda-analysis
0
12341
<reponame>LiuYuWei/service-data-eda-analysis """Confusion matrix calculation service.""" # coding=utf-8 # import relation package. from pandas_profiling import ProfileReport import pandas as pd import datetime import json # import project package. from config.config_setting import ConfigSetting class EdaService: """Confusion matrix calculation service.""" def __init__(self): """Initial variable and module""" config_setting = ConfigSetting() self.log = config_setting.set_logger( "[eda_service]") self.config = config_setting.yaml_parser() self.eda_html = None def transform_json_to_pandas(self, data, column_name): df = pd.DataFrame(data, columns=column_name) payload = {} payload["length_df"] = len(df) payload["length_column_df"] = len(df.columns) df.to_csv("data/csv/dataframe.csv") return payload def pandas_profiling_eda_transfer(self, title_name): df = pd.read_csv("data/csv/dataframe.csv") profile = df.profile_report(title='Pandas Profiling Report') payload = {} now_time = datetime.datetime.now() payload["timestamp"] = now_time.isoformat() payload["eda_report"] = "eda_{}.html".format(now_time.strftime("%Y%m%d_%H%M%S")) self.eda_html = payload["eda_report"] profile.to_file("src/templates/{}".format(payload["eda_report"])) return payload def show_eda_result_in_html(self): result = None if self.eda_html is not None: result = self.eda_html return result
2.578125
3
google/cloud/bigquery_v2/types/__init__.py
KoffieLabs/python-bigquery
0
12342
<filename>google/cloud/bigquery_v2/types/__init__.py # -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from .encryption_config import ( EncryptionConfiguration, ) from .model import ( DeleteModelRequest, GetModelRequest, ListModelsRequest, ListModelsResponse, Model, PatchModelRequest, ) from .model_reference import ( ModelReference, ) from .standard_sql import ( StandardSqlDataType, StandardSqlField, StandardSqlStructType, StandardSqlTableType, ) from .table_reference import ( TableReference, ) __all__ = ( "EncryptionConfiguration", "DeleteModelRequest", "GetModelRequest", "ListModelsRequest", "ListModelsResponse", "Model", "PatchModelRequest", "ModelReference", "StandardSqlDataType", "StandardSqlField", "StandardSqlStructType", "StandardSqlTableType", "TableReference", )
1.585938
2
blurr/core/store.py
ddrightnow/blurr
0
12343
from abc import abstractmethod, ABC from datetime import datetime, timezone from typing import Any, List, Tuple, Dict from blurr.core.base import BaseSchema from blurr.core.store_key import Key, KeyType class StoreSchema(BaseSchema): pass class Store(ABC): """ Base Store that allows for data to be persisted during / after transformation """ @abstractmethod def get_all(self, identity: str) -> Dict[Key, Any]: """ Gets all the items for an identity """ raise NotImplementedError() @abstractmethod def get(self, key: Key) -> Any: """ Gets an item by key """ raise NotImplementedError() def get_range(self, base_key: Key, start_time: datetime, end_time: datetime = None, count: int = 0) -> List[Tuple[Key, Any]]: """ Returns the list of items from the store based on the given time range or count. :param base_key: Items which don't start with the base_key are filtered out. :param start_time: Start time to for the range query :param end_time: End time of the range query. If None count is used. :param count: The number of items to be returned. Used if end_time is not specified. """ if end_time and count: raise ValueError('Only one of `end` or `count` can be set') if count: end_time = datetime.min.replace( tzinfo=timezone.utc) if count < 0 else datetime.max.replace(tzinfo=timezone.utc) if end_time < start_time: start_time, end_time = end_time, start_time if base_key.key_type == KeyType.TIMESTAMP: start_key = Key(KeyType.TIMESTAMP, base_key.identity, base_key.group, [], start_time) end_key = Key(KeyType.TIMESTAMP, base_key.identity, base_key.group, [], end_time) return self._get_range_timestamp_key(start_key, end_key, count) else: return self._get_range_dimension_key(base_key, start_time, end_time, count) @abstractmethod def _get_range_timestamp_key(self, start: Key, end: Key, count: int = 0) -> List[Tuple[Key, Any]]: """ Returns the list of items from the store based on the given time range or count. This is used when the key being used is a TIMESTAMP key. """ raise NotImplementedError() def get_time_range(self, identity, group, start_time, end_time) -> List[Tuple[Key, Any]]: raise NotImplementedError() def get_count_range(self, identity, group, time, count): raise NotImplementedError() @abstractmethod def _get_range_dimension_key(self, base_key: Key, start_time: datetime, end_time: datetime, count: int = 0) -> List[Tuple[Key, Any]]: """ Returns the list of items from the store based on the given time range or count. This is used when the key being used is a DIMENSION key. """ raise NotImplementedError() @staticmethod def _restrict_items_to_count(items: List[Tuple[Key, Any]], count: int) -> List[Tuple[Key, Any]]: """ Restricts items to count number if len(items) is larger than abs(count). This function assumes that items is sorted by time. :param items: The items to restrict. :param count: The number of items returned. """ if abs(count) > len(items): count = Store._sign(count) * len(items) if count < 0: return items[count:] else: return items[:count] @abstractmethod def save(self, key: Key, item: Any) -> None: """ Saves an item to store """ raise NotImplementedError() @abstractmethod def delete(self, key: Key) -> None: """ Deletes an item from the store by key """ raise NotImplementedError() @abstractmethod def finalize(self) -> None: """ Finalizes the store by flushing all remaining data to persistence """ raise NotImplementedError() @staticmethod def _sign(x: int) -> int: return (1, -1)[x < 0]
2.890625
3
seamless/core/cache/tempref.py
sjdv1982/seamless
15
12344
import time, copy import asyncio class TempRefManager: def __init__(self): self.refs = [] self.running = False def add_ref(self, ref, lifetime, on_shutdown): expiry_time = time.time() + lifetime self.refs.append((ref, expiry_time, on_shutdown)) def purge_all(self): """Purges all refs, regardless of expiry time Only call this when Seamless is shutting down""" while len(self.refs): ref, _, on_shutdown = self.refs.pop(0) if not on_shutdown: continue try: ref() except: pass def purge(self): """Purges expired refs""" t = time.time() for item in copy.copy(self.refs): ref, expiry_time, _ = item if expiry_time < t: self.refs.remove(item) ref() async def loop(self): if self.running: return self.running = True while 1: try: self.purge() except Exception: import traceback traceback.print_exc() await asyncio.sleep(0.05) self.running = False temprefmanager = TempRefManager() coro = temprefmanager.loop() import asyncio task = asyncio.ensure_future(coro) import atexit atexit.register(lambda *args, **kwargs: task.cancel())
2.640625
3
lib/python27/Lib/site-packages/wx-2.8-msw-ansi/wx/tools/Editra/src/eclib/choicedlg.py
bo3b/iZ3D
27
12345
<gh_stars>10-100 ############################################################################### # Name: choicedlg.py # # Purpose: Generic Choice Dialog # # Author: <NAME> <<EMAIL>> # # Copyright: (c) 2008 <NAME> <<EMAIL>> # # License: wxWindows License # ############################################################################### """ Editra Control Library: Choice Dialog A generic choice dialog that uses a wx.Choice control to display its choices. @summary: Generic Choice Dialog """ __author__ = "<NAME> <<EMAIL>>" __svnid__ = "$Id: choicedlg.py 63820 2010-04-01 21:46:22Z CJP $" __revision__ = "$Revision: 63820 $" __all__ = ['ChoiceDialog',] #--------------------------------------------------------------------------# # Imports import wx #--------------------------------------------------------------------------# # Globals ChoiceDialogNameStr = u"ChoiceDialog" #--------------------------------------------------------------------------# class ChoiceDialog(wx.Dialog): """Dialog with a wx.Choice control for showing a list of choices""" def __init__(self, parent, id=wx.ID_ANY, msg=u'', title=u'', choices=None, default=u'', pos=wx.DefaultPosition, size=wx.DefaultSize, style=0, name=ChoiceDialogNameStr): """Create the choice dialog @keyword msg: Dialog Message @keyword title: Dialog Title @keyword choices: list of strings @keyword default: Default selection """ wx.Dialog.__init__(self, parent, id, title, style=wx.CAPTION, pos=pos, size=size, name=name) # Attributes self._panel = ChoicePanel(self, msg=msg, choices=choices, default=default, style=style) # Layout self.__DoLayout() def __DoLayout(self): """Layout the dialogs controls""" sizer = wx.BoxSizer(wx.HORIZONTAL) sizer.Add(self._panel, 1, wx.EXPAND) self.SetSizer(sizer) self.SetAutoLayout(True) self.SetInitialSize() def SetChoices(self, choices): """Set the dialogs choices @param choices: list of strings """ self._panel.SetChoices(choices) def GetSelection(self): """Get the selected choice @return: string """ return self._panel.GetSelection() def GetStringSelection(self): """Get the chosen string @return: string """ return self._panel.GetStringSelection() def SetBitmap(self, bmp): """Set the bitmap used in the dialog @param bmp: wx.Bitmap """ self._panel.SetBitmap(bmp) def SetStringSelection(self, sel): """Set the selected choice @param sel: string """ self._panel.SetStringSelection(sel) def SetSelection(self, sel): """Set the selected choice @param sel: string """ self._panel.SetSelection(sel) #--------------------------------------------------------------------------# class ChoicePanel(wx.Panel): """Generic Choice dialog panel""" def __init__(self, parent, msg=u'', choices=list(), default=u'', style=wx.OK|wx.CANCEL): """Create the panel @keyword msg: Display message @keyword choices: list of strings @keyword default: default selection @keyword style: dialog style """ wx.Panel.__init__(self, parent) # Attributes self._msg = msg self._choices = wx.Choice(self, wx.ID_ANY) self._selection = default self._selidx = 0 self._bmp = None self._buttons = list() # Setup self._choices.SetItems(choices) if default in choices: self._choices.SetStringSelection(default) self._selidx = self._choices.GetSelection() else: self._choices.SetSelection(0) self._selidx = 0 self._selection = self._choices.GetStringSelection() # Setup Buttons for btn, id_ in ((wx.OK, wx.ID_OK), (wx.CANCEL, wx.ID_CANCEL), (wx.YES, wx.ID_YES), (wx.NO, wx.ID_NO)): if btn & style: button = wx.Button(self, id_) self._buttons.append(button) if not len(self._buttons): self._buttons.append(wx.Button(self, wx.ID_OK)) self._buttons.append(wx.Button(self, wx.ID_CANCEL)) # Layout self.__DoLayout(style) # Event Handlers self.Bind(wx.EVT_CHOICE, self.OnChoice, self._choices) self.Bind(wx.EVT_BUTTON, self.OnButton) def __DoLayout(self, style): """Layout the panel""" hsizer = wx.BoxSizer(wx.HORIZONTAL) vsizer = wx.BoxSizer(wx.VERTICAL) caption = wx.StaticText(self, label=self._msg) # Layout the buttons bsizer = wx.StdDialogButtonSizer() for button in self._buttons: bsizer.AddButton(button) bid = button.GetId() if bid in (wx.ID_NO, wx.ID_YES): if wx.NO_DEFAULT & style: if bid == wx.ID_NO: button.SetDefault() else: if bid == wx.ID_YES: button.SetDefault() elif bid == wx.ID_OK: button.SetDefault() bsizer.Realize() vsizer.AddMany([((10, 10), 0), (caption, 0), ((20, 20), 0), (self._choices, 1, wx.EXPAND), ((10, 10), 0), (bsizer, 1, wx.EXPAND), ((10, 10), 0)]) icon_id = wx.ART_INFORMATION for i_id, a_id in ((wx.ICON_ERROR, wx.ART_ERROR), (wx.ICON_WARNING, wx.ART_WARNING)): if i_id & style: icon_id = a_id break icon = wx.ArtProvider.GetBitmap(icon_id, wx.ART_MESSAGE_BOX, (64, 64)) self._bmp = wx.StaticBitmap(self, bitmap=icon) bmpsz = wx.BoxSizer(wx.VERTICAL) bmpsz.AddMany([((10, 10), 0), (self._bmp, 0, wx.ALIGN_CENTER_VERTICAL), ((10, 30), 0, wx.EXPAND)]) hsizer.AddMany([((10, 10), 0), (bmpsz, 0, wx.ALIGN_TOP), ((10, 10), 0), (vsizer, 1), ((10, 10), 0)]) self.SetSizer(hsizer) self.SetInitialSize() self.SetAutoLayout(True) def GetChoiceControl(self): """Get the dialogs choice control @return: wx.Choice """ return self._choices def GetSelection(self): """Get the chosen index @return: int """ return self._selidx def GetStringSelection(self): """Get the chosen string @return: string """ return self._selection def OnButton(self, evt): """Handle button events @param evt: wx.EVT_BUTTON @type evt: wx.CommandEvent """ self.GetParent().EndModal(evt.GetId()) def OnChoice(self, evt): """Update the selection @param evt: wx.EVT_CHOICE @type evt: wx.CommandEvent """ if evt.GetEventObject() == self._choices: self._selection = self._choices.GetStringSelection() self._selidx = self._choices.GetSelection() else: evt.Skip() def SetBitmap(self, bmp): """Set the dialogs bitmap @param bmp: wx.Bitmap """ self._bmp.SetBitmap(bmp) self.Layout() def SetChoices(self, choices): """Set the dialogs choices @param choices: list of strings """ self._choices.SetItems(choices) self._choices.SetSelection(0) self._selection = self._choices.GetStringSelection() def SetSelection(self, sel): """Set the selected choice @param sel: int """ self._choices.SetSelection(sel) self._selection = self._choices.GetStringSelection() self._selidx = self._choices.GetSelection() def SetStringSelection(self, sel): """Set the selected choice @param sel: string """ self._choices.SetStringSelection(sel) self._selection = self._choices.GetStringSelection() self._selidx = self._choices.GetSelection() #--------------------------------------------------------------------------#
2.09375
2
shopyo/__init__.py
rehmanis/shopyo2
2
12346
<reponame>rehmanis/shopyo2 version_info = (4, 0, 1) __version__ = ".".join([str(v) for v in version_info])
1.585938
2
check_digit_calc.py
zhoffm/Check-Digit-Calculator
1
12347
<reponame>zhoffm/Check-Digit-Calculator from random import randint import pandas as pd def random_11_digit_upc(): upc_string = ''.join(["%s" % randint(0, 9) for num in range(0, 11)]) print(upc_string) return upc_string # Class to calculate the check digit for 11 digit UPC's class CheckDigitCalculations: def __init__(self): self.input_string = None self.input_integer = None self.odd_sum = None self.odd_sum_times_3 = None self.even_sum = None self.new_sum = None self.m = None self.check_digit = None def len_check(self): if len(self.input_string) == 11: return True else: return False def check_integer(self): try: self.input_integer = int(self.input_string) except ValueError: print('The entered string is not exclusively numeric.') # 1. Sum the digits at odd-numbered positions (first, third, fifth,..., eleventh). def step_1(self): self.odd_sum = sum(int(self.input_string[i]) for i, j in enumerate(self.input_string) if i % 2 == 0) # 2. Multiply the result by 3. def step_2(self): self.odd_sum_times_3 = 3 * self.odd_sum # 3. Add the digit sum at even-numbered positions (second, fourth, sixth,..., tenth) to the result. def step_3(self): self.even_sum = sum(int(self.input_string[i]) for i, j in enumerate(self.input_string) if i % 2 != 0) self.new_sum = self.even_sum + self.odd_sum_times_3 # 4. Find the result modulo 10 (i.e. the remainder, when divided by 10) and call it M. def step_4(self): self.m = self.new_sum % 10 # 5. If M is zero, then the check digit is 0; otherwise the check digit is 10 − M. def step_5(self): if self.m == 0: self.check_digit = 0 else: self.check_digit = 10 - self.m # Do all the steps! This runs all the previous steps. def compute_check_digit(self, input_upc): self.input_string = input_upc if self.len_check(): self.step_1() self.step_2() self.step_2() self.step_3() self.step_4() self.step_5() return self.check_digit else: return '' def get_full_upc(self, input_upc): self.input_string = input_upc return self.input_string + str(self.compute_check_digit(input_upc)) class RawCSVProcessing(CheckDigitCalculations): def __init__(self): super().__init__() self.input_file_path = None self.input_file = None self.output_file_path = None self.output_file = None self.upc_col = 'REFCODE' self.upc_df = pd.DataFrame() self.upc_list = None self.updated_upcs = None def read_file_into_df(self, input_file_path, input_file): self.input_file_path = input_file_path self.input_file = input_file self.upc_df = pd.read_csv( self.input_file_path + self.input_file, dtype={self.upc_col: str}, na_filter=False, usecols=['DESCRIPT', 'REFCODE'] ) def add_updated_upc_to_df(self): self.upc_list = list(self.upc_df[self.upc_col]) self.updated_upcs = [(x + str(self.compute_check_digit(x))) for x in self.upc_list] self.upc_df[self.upc_col] = self.updated_upcs def write_upcs_to_csv(self, output_file_path, output_file): self.output_file_path = output_file_path self.output_file = output_file self.upc_df.to_csv(self.output_file_path + self.output_file, index=False) if __name__ == '__main__': test_upc = random_11_digit_upc() obj = CheckDigitCalculations() print(obj.get_full_upc(test_upc))
3.921875
4
test/test_load.py
ramsdalesteve/forest
0
12348
import yaml import forest from forest import main def test_earth_networks_loader_given_pattern(): loader = forest.Loader.from_pattern("Label", "EarthNetworks*.txt", "earth_networks") assert isinstance(loader, forest.earth_networks.Loader) def test_build_loader_given_files(): """replicate main.py as close as possible""" files = ["file_20190101T0000Z.nc"] args = main.parse_args.parse_args(files) config = forest.config.from_files(args.files, args.file_type) group = config.file_groups[0] loader = forest.Loader.group_args(group, args) assert isinstance(loader, forest.data.DBLoader) assert loader.locator.paths == files def test_build_loader_given_database(tmpdir): """replicate main.py as close as possible""" database_file = str(tmpdir / "database.db") config_file = str(tmpdir / "config.yml") settings = { "files": [ { "label": "UM", "pattern": "*.nc", "locator": "database" } ] } with open(config_file, "w") as stream: yaml.dump(settings, stream) args = main.parse_args.parse_args([ "--database", database_file, "--config-file", config_file]) config = forest.config.load_config(args.config_file) group = config.file_groups[0] database = forest.db.Database.connect(database_file) loader = forest.Loader.group_args(group, args, database=database) database.close() assert hasattr(loader.locator, "connection") assert loader.locator.directory is None def test_build_loader_given_database_and_directory(tmpdir): database_file = str(tmpdir / "database.db") config_file = str(tmpdir / "config.yml") args = main.parse_args.parse_args([ "--database", database_file, "--config-file", config_file]) label = "UM" pattern = "*.nc" directory = "/some/dir" group = forest.config.FileGroup( label, pattern, directory=directory, locator="database") database = forest.db.Database.connect(database_file) loader = forest.Loader.group_args(group, args, database=database) database.close() assert hasattr(loader.locator, "connection") assert loader.locator.directory == directory def test_build_loader_given_config_file_pattern(tmpdir): config_file = str(tmpdir / "config.yml") path = str(tmpdir / "file_20190101T0000Z.nc") with open(path, "w"): pass args = main.parse_args.parse_args([ "--config-file", config_file]) label = "UM" pattern = "*.nc" directory = str(tmpdir) group = forest.config.FileGroup( label, pattern, directory=directory, locator="file_system") loader = forest.Loader.group_args(group, args) assert loader.locator.paths == [path] def test_build_loader_given_eida50_file_type(): label = "EIDA50" pattern = "eida50*.nc" file_type = "eida50" loader = forest.Loader.from_pattern(label, pattern, file_type) assert isinstance(loader, forest.satellite.EIDA50) assert isinstance(loader.locator, forest.satellite.Locator) def test_build_loader_given_rdt_file_type(): loader = forest.Loader.from_pattern( "Label", "*.json", "rdt") assert isinstance(loader, forest.rdt.Loader) assert isinstance(loader.locator, forest.rdt.Locator) def test_replace_dir_given_args_dir_only(): check_replace_dir("args/dir", None, "args/dir") def test_replace_dir_given_group_dir_only(): check_replace_dir(None, "group/dir", "group/dir") def test_replace_dir_given_relative_group_dir_appends_to_args_dir(): check_replace_dir("args/dir", "leaf", "args/dir/leaf") def test_replace_dir_given_absolute_group_dir_overrides_rel_args_dir(): check_replace_dir("args/relative", "/group/absolute", "/group/absolute") def test_replace_dir_given_absolute_group_dir_overrides_abs_args_dir(): check_replace_dir("/args/absolute", "/group/absolute", "/group/absolute") def check_replace_dir(args_dir, group_dir, expected): actual = forest.Loader.replace_dir(args_dir, group_dir) assert actual == expected def test_full_pattern_given_name_only(): check_full_pattern("file.nc", None, None, "file.nc") def test_full_pattern_given_relative_prefix_dir(): check_full_pattern("file.nc", None, "prefix", "prefix/file.nc") def test_full_pattern_given_relative_leaf_and_prefix_dir(): check_full_pattern("file.nc", "leaf", "prefix", "prefix/leaf/file.nc") def test_full_pattern_given_absolute_leaf_ignores_relative_prefix(): check_full_pattern("file.nc", "/leaf", "prefix", "/leaf/file.nc") def test_full_pattern_given_absolute_leaf_ignores_absolute_prefix(): check_full_pattern("file.nc", "/leaf", "/prefix", "/leaf/file.nc") def check_full_pattern(name, leaf, prefix, expected): actual = forest.Loader.full_pattern(name, leaf, prefix) assert actual == expected
2.53125
3
tournaments/binarySearch/binarySearch.py
gurfinkel/codeSignal
5
12349
def binarySearch(inputArray, searchElement): minIndex = -1 maxIndex = len(inputArray) while minIndex < maxIndex - 1: currentIndex = (minIndex + maxIndex) // 2 currentElement = inputArray[currentIndex] if currentElement < searchElement: minIndex = currentIndex else: maxIndex = currentIndex if maxIndex == len(inputArray) or inputArray[maxIndex] != searchElement: return -1 return maxIndex
3.8125
4
swagger_server/models/rule.py
Capping-WAR/API
0
12350
# coding: utf-8 from __future__ import absolute_import from datetime import date, datetime # noqa: F401 from typing import List, Dict # noqa: F401 from swagger_server.models.base_model_ import Model from swagger_server import util class Rule(Model): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self, rule_id: int=None, rule_name: str=None, description: str=None, priority: int=None): # noqa: E501 """Rule - a model defined in Swagger :param rule_id: The rule_id of this Rule. # noqa: E501 :type rule_id: int :param rule_name: The rule_name of this Rule. # noqa: E501 :type rule_name: str :param description: The description of this Rule. # noqa: E501 :type description: str :param priority: The priority of this Rule. # noqa: E501 :type priority: int """ self.swagger_types = { 'rule_id': int, 'rule_name': str, 'description': str, 'priority': int } self.attribute_map = { 'rule_id': 'ruleID', 'rule_name': 'ruleName', 'description': 'description', 'priority': 'priority' } self._rule_id = rule_id self._rule_name = rule_name self._description = description self._priority = priority @classmethod def from_dict(cls, dikt) -> 'Rule': """Returns the dict as a model :param dikt: A dict. :type: dict :return: The rule of this Rule. # noqa: E501 :rtype: Rule """ return util.deserialize_model(dikt, cls) @property def rule_id(self) -> int: """Gets the rule_id of this Rule. Unique ID of the rule # noqa: E501 :return: The rule_id of this Rule. :rtype: int """ return self._rule_id @rule_id.setter def rule_id(self, rule_id: int): """Sets the rule_id of this Rule. Unique ID of the rule # noqa: E501 :param rule_id: The rule_id of this Rule. :type rule_id: int """ self._rule_id = rule_id @property def rule_name(self) -> str: """Gets the rule_name of this Rule. name of rule # noqa: E501 :return: The rule_name of this Rule. :rtype: str """ return self._rule_name @rule_name.setter def rule_name(self, rule_name: str): """Sets the rule_name of this Rule. name of rule # noqa: E501 :param rule_name: The rule_name of this Rule. :type rule_name: str """ if rule_name is None: raise ValueError("Invalid value for `rule_name`, must not be `None`") # noqa: E501 self._rule_name = rule_name @property def description(self) -> str: """Gets the description of this Rule. description of rule # noqa: E501 :return: The description of this Rule. :rtype: str """ return self._description @description.setter def description(self, description: str): """Sets the description of this Rule. description of rule # noqa: E501 :param description: The description of this Rule. :type description: str """ if description is None: raise ValueError("Invalid value for `description`, must not be `None`") # noqa: E501 self._description = description @property def priority(self) -> int: """Gets the priority of this Rule. the current value it has towards the dataset, used to get more of a ceartin rule # noqa: E501 :return: The priority of this Rule. :rtype: int """ return self._priority @priority.setter def priority(self, priority: int): """Sets the priority of this Rule. the current value it has towards the dataset, used to get more of a ceartin rule # noqa: E501 :param priority: The priority of this Rule. :type priority: int """ if priority is None: raise ValueError("Invalid value for `priority`, must not be `None`") # noqa: E501 self._priority = priority
2.296875
2
Project1/mazes/gen_sparses.py
VFerrari/MC906
0
12351
<filename>Project1/mazes/gen_sparses.py import os import re import numpy as np # WARNING: this function overrides the mazes in sparse directory; don't run it # as the idea is that everyone test the same mazes def gen_sparses(dir_path): ''' Randomly remove points from dense instances ''' pattern = re.compile('^([0-9]+[a-zA-Z]+)') denses_fn = [x for x in os.listdir(dir_path + '/dense') if pattern.match(x)] print(denses_fn) for dense_fn in denses_fn: sparse = np.genfromtxt(dir_path + '/dense/' + dense_fn, dtype='str', delimiter=1) for r in range(0, len(sparse)): for c in range(0, len(sparse[0])): if sparse[r][c] == '.': sparse[r][c] = ' ' if bool(np.random.choice(np.arange(0,2), p=[0.25,0.75])) else '.' np.savetxt(dir_path + '/sparse/' + dense_fn, sparse, fmt='%s', delimiter='') gen_sparses('.')
3.09375
3
cryomem/cmtools/lib/jjivarray2.py
bebaek/cryomem
1
12352
<reponame>bebaek/cryomem<filename>cryomem/cmtools/lib/jjivarray2.py """ Analyze JJ IV curve array (core) v.2 BB, 2016 """ import numpy as np from . import jjiv2 as jjiv import sys def fit2rsj_arr(iarr, varr, **kwargs): """Fit IV array to 2 Ic RSJ model and return arrays of fit params, error. Keyword arguments: guess: array of (Ic+, Ic-, Rn, Vo) io: fixed Io. updateguess: guess update ratio 0 to 1 """ if 'guess' in kwargs: kwargs['guess'] = np.array(kwargs['guess']) # array type update = kwargs.get('updateguess', 0.95) n = len(iarr) npopt = 4 popt_arr, pcov_arr = np.zeros((n, npopt)), np.zeros((n, npopt, npopt)) for k in range(n): try: done = False; l = 0 while not done: # fit popt, pcov = jjiv.fit2rsj(iarr[k], varr[k], **kwargs) # update guess if k == 0: kwargs['guess'] = popt else: kwargs['guess'] = (1-update)*kwargs['guess'] + update*popt # check if fit is good l += 1 if np.shape(pcov)==(4,4): perr = np.sqrt(np.diag(pcov)) else: perr = (np.inf, np.inf, np.inf, np.inf) if (np.amax(perr) < .05) or (l > 5): done = True popt_arr[k], pcov_arr[k] = popt, pcov else: print('Fit not good. Index: {}, Trial: {}'.format(k,l)) except RuntimeError: print('Can\'t fit. Index: {}!'.format(k)) return popt_arr, pcov_arr
2.078125
2
python/0122.py
garywei944/LeetCode
0
12353
from leetcode_tester import Tester from typing import Optional, List class Solution: def maxProfit(self, prices: List[int]) -> int: r = 0 for i in range(1, len(prices)): if prices[i] > prices[i - 1]: r += prices[i] - prices[i - 1] return r if __name__ == '__main__': solution = Solution() test = Tester(solution.maxProfit) test.addTest( [7, 1, 5, 3, 6, 4], 7 ) test.addTest( [1, 2, 3, 4, 5], 4 ) test.addTest( [7, 6, 4, 3, 1], 0 ) test.doTest()
3.359375
3
dataset.py
songrotek/wechat_jump_end_to_end_train
26
12354
import torch import json import os from torch.utils.data import DataLoader,Dataset import torchvision.transforms as transforms from PIL import Image import numpy as np data_folder = "./dataset/images" press_times = json.load(open("./dataset/dataset.json")) image_roots = [os.path.join(data_folder,image_file) \ for image_file in os.listdir(data_folder)] class JumpDataset(Dataset): def __init__(self,transform = None): self.image_roots = image_roots self.press_times = press_times self.transform = transform def __len__(self): return len(self.image_roots) def __getitem__(self,idx): image_root = self.image_roots[idx] image_name = image_root.split("/")[-1] image = Image.open(image_root) image = image.convert('RGB') image = image.resize((224,224), resample=Image.LANCZOS) #image = np.array(image, dtype=np.float32) if self.transform is not None: image = self.transform(image) press_time = self.press_times[image_name] return image,press_time def jump_data_loader(): normalize = transforms.Normalize(mean=[0.92206, 0.92206, 0.92206], std=[0.08426, 0.08426, 0.08426]) transform = transforms.Compose([transforms.ToTensor(),normalize]) dataset = JumpDataset(transform=transform) return DataLoader(dataset,batch_size = 32,shuffle = True)
2.609375
3
grafana_backup/create_snapshot.py
Keimille/grafana-backup-tool
515
12355
import json from grafana_backup.dashboardApi import create_snapshot def main(args, settings, file_path): grafana_url = settings.get('GRAFANA_URL') http_post_headers = settings.get('HTTP_POST_HEADERS') verify_ssl = settings.get('VERIFY_SSL') client_cert = settings.get('CLIENT_CERT') debug = settings.get('DEBUG') with open(file_path, 'r') as f: data = f.read() snapshot = json.loads(data) try: snapshot['name'] = snapshot['dashboard']['title'] except KeyError: snapshot['name'] = "Untitled Snapshot" (status, content) = create_snapshot(json.dumps(snapshot), grafana_url, http_post_headers, verify_ssl, client_cert, debug) if status == 200: print("create snapshot: {0}, status: {1}, msg: {2}".format(snapshot['name'], status, content)) else: print("creating snapshot {0} failed with status {1}".format(snapshot['name'], status))
2.5
2
examples/keras_ssd_example.py
jiayunhan/perceptron-benchmark
38
12356
""" Test case for Keras """ from perceptron.zoo.ssd_300.keras_ssd300 import SSD300 from perceptron.models.detection.keras_ssd300 import KerasSSD300Model from perceptron.utils.image import load_image from perceptron.benchmarks.brightness import BrightnessMetric from perceptron.utils.criteria.detection import TargetClassMiss from perceptron.utils.tools import bcolors from perceptron.utils.tools import plot_image_objectdetection # instantiate the model from keras applications ssd300 = SSD300() # initialize the KerasResNet50RetinaNetModel kmodel = KerasSSD300Model(ssd300, bounds=(0, 255)) # get source image and label # the model expects values in [0, 1], and channles_last image = load_image(shape=(300, 300), bounds=(0, 255), fname='car.png') metric = BrightnessMetric(kmodel, criterion=TargetClassMiss(7)) print(bcolors.BOLD + 'Process start' + bcolors.ENDC) adversary = metric(image, unpack=False) print(bcolors.BOLD + 'Process finished' + bcolors.ENDC) if adversary.image is None: print(bcolors.WARNING + 'Warning: Cannot find an adversary!' + bcolors.ENDC) exit(-1) ################### print summary info ##################################### keywords = ['Keras', 'SSD300', 'TargetClassMiss', 'BrightnessMetric'] print(bcolors.HEADER + bcolors.UNDERLINE + 'Summary:' + bcolors.ENDC) print('Configuration:' + bcolors.CYAN + ' --framework %s ' '--model %s --criterion %s ' '--metric %s' % tuple(keywords) + bcolors.ENDC) print('Minimum perturbation required: %s' % bcolors.BLUE + str(adversary.distance) + bcolors.ENDC) print('\n') # print the original image and the adversary plot_image_objectdetection(adversary, kmodel, bounds=(0, 255), title=", ".join(keywords), figname='examples/images/%s.png' % '_'.join(keywords))
2.609375
3
math/470.ImplementRand10UsingRand7.py
bzd111/leetcode
0
12357
<reponame>bzd111/leetcode import sys def rand7() -> int: ... class Solution: def rand10(self) -> int: index = sys.maxsize while index > 40: index = 7 * (rand7() - 1) + rand7() - 1 return index % 10 + 1
2.953125
3
segmentation/data/transforms/__init__.py
RajasekharChowdary9/panoptic-deeplab
506
12358
<gh_stars>100-1000 from .build import build_transforms from .pre_augmentation_transforms import Resize from .target_transforms import PanopticTargetGenerator, SemanticTargetGenerator
1.109375
1
plasmapy/diagnostics/proton_radiography.py
MarikinPaulina/PlasmaPy
0
12359
""" Routines for the analysis of proton radiographs. These routines can be broadly classified as either creating synthetic radiographs from prescribed fields or methods of 'inverting' experimentally created radiographs to reconstruct the original fields (under some set of assumptions). """ __all__ = [ "SyntheticProtonRadiograph", ] import astropy.constants as const import astropy.units as u import numpy as np import sys import warnings from tqdm import tqdm from plasmapy import particles from plasmapy.formulary.mathematics import rot_a_to_b from plasmapy.particles import Particle from plasmapy.plasma.grids import AbstractGrid from plasmapy.simulation.particle_integrators import boris_push def _coerce_to_cartesian_si(pos): """ Takes a tuple of `astropy.unit.Quantity` values representing a position in space in either Cartesian, cylindrical, or spherical coordinates, and returns a numpy array representing the same point in Cartesian coordinates and units of meters. """ # Auto-detect geometry based on units geo_units = [x.unit for x in pos] if geo_units[2].is_equivalent(u.rad): geometry = "spherical" elif geo_units[1].is_equivalent(u.rad): geometry = "cylindrical" else: geometry = "cartesian" # Convert geometrical inputs between coordinates systems pos_out = np.zeros(3) if geometry == "cartesian": x, y, z = pos pos_out[0] = x.to(u.m).value pos_out[1] = y.to(u.m).value pos_out[2] = z.to(u.m).value elif geometry == "cylindrical": r, t, z = pos r = r.to(u.m) t = t.to(u.rad).value z = z.to(u.m) pos_out[0] = (r * np.cos(t)).to(u.m).value pos_out[1] = (r * np.sin(t)).to(u.m).value pos_out[2] = z.to(u.m).value elif geometry == "spherical": r, t, p = pos r = r.to(u.m) t = t.to(u.rad).value p = p.to(u.rad).value pos_out[0] = (r * np.sin(t) * np.cos(p)).to(u.m).value pos_out[1] = (r * np.sin(t) * np.sin(p)).to(u.m).value pos_out[2] = (r * np.cos(t)).to(u.m).value return pos_out class SyntheticProtonRadiograph: r""" Represents a charged particle radiography experiment with simulated or calculated E and B fields given at positions defined by a grid of spatial coordinates. The particle source and detector plane are defined by vectors from the origin of the grid. Parameters ---------- grid : `~plasmapy.plasma.grids.AbstractGrid` or subclass thereof A Grid object containing the required quantities [E_x, E_y, E_z, B_x, B_y, B_z]. If any of these quantities are missing, a warning will be given and that quantity will be assumed to be zero everywhere. source : `~astropy.units.Quantity`, shape (3) A vector pointing from the origin of the grid to the location of the particle source. This vector will be interpreted as being in either cartesian, cylindrical, or spherical coordinates based on its units. Valid geometries are: * Cartesian (x,y,z) : (meters, meters, meters) * cylindrical (r, theta, z) : (meters, radians, meters) * spherical (r, theta, phi) : (meters, radians, radians) In spherical coordinates theta is the polar angle. detector : `~astropy.units.Quantity`, shape (3) A vector pointing from the origin of the grid to the center of the detector plane. The vector from the source point to this point defines the normal vector of the detector plane. This vector can also be specified in cartesian, cylindrical, or spherical coordinates (see the `source` keyword). detector_hdir : `numpy.ndarray`, shape (3), optional A unit vector (in Cartesian coordinates) defining the horizontal direction on the detector plane. By default, the horizontal axis in the detector plane is defined to be perpendicular to both the source-to-detector vector and the z-axis (unless the source-to-detector axis is parallel to the z axis, in which case the horizontal axis is the x-axis). The detector vertical axis is then defined to be orthogonal to both the source-to-detector vector and the detector horizontal axis. verbose : bool, optional If true, updates on the status of the program will be printed into the standard output while running. """ def __init__( self, grid: AbstractGrid, source: u.m, detector: u.m, detector_hdir=None, verbose=True, ): # self.grid is the grid object self.grid = grid # self.grid_arr is the grid positions in si units. This is created here # so that it isn't continously called later self.grid_arr = grid.grid.to(u.m).value self.verbose = verbose # A list of wire meshes added to the grid with add_wire_mesh # Particles that would hit these meshes will be removed at runtime # by _apply_wire_mesh self.mesh_list = [] # ************************************************************************ # Setup the source and detector geometries # ************************************************************************ self.source = _coerce_to_cartesian_si(source) self.detector = _coerce_to_cartesian_si(detector) self._log(f"Source: {self.source} m") self._log(f"Detector: {self.detector} m") # Calculate normal vectors (facing towards the grid origin) for both # the source and detector planes self.src_n = -self.source / np.linalg.norm(self.source) self.det_n = -self.detector / np.linalg.norm(self.detector) # Vector directly from source to detector self.src_det = self.detector - self.source # Magnification self.mag = 1 + np.linalg.norm(self.detector) / np.linalg.norm(self.source) self._log(f"Magnification: {self.mag}") # Check that source-detector vector actually passes through the grid if not self.grid.vector_intersects(self.source * u.m, self.detector * u.m): raise ValueError( "The vector between the source and the detector " "does not intersect the grid provided!" ) # Determine the angle above which particles will not hit the grid # these particles can be ignored until the end of the simulation, # then immediately advanced to the detector grid with their original # velocities self.max_theta_hit_grid = self._max_theta_hit_grid() # ************************************************************************ # Define the detector plane # ************************************************************************ # Load or calculate the detector hdir if detector_hdir is not None: self.det_hdir = detector_hdir / np.linalg.norm(detector_hdir) else: self.det_hdir = self._default_detector_hdir() # Calculate the detector vdir ny = np.cross(self.det_hdir, self.det_n) self.det_vdir = -ny / np.linalg.norm(ny) # ************************************************************************ # Validate the E and B fields # ************************************************************************ req_quantities = ["E_x", "E_y", "E_z", "B_x", "B_y", "B_z"] self.grid.require_quantities(req_quantities, replace_with_zeros=True) for rq in req_quantities: # Check that there are no infinite values if not np.isfinite(self.grid[rq].value).all(): raise ValueError( f"Input arrays must be finite: {rq} contains " "either NaN or infinite values." ) # Check that the max values on the edges of the arrays are # small relative to the maximum values on that grid # # Array must be dimensionless to re-assemble it into an array # of max values like this arr = np.abs(self.grid[rq]).value edge_max = np.max( np.array( [ np.max(arr[0, :, :]), np.max(arr[-1, :, :]), np.max(arr[:, 0, :]), np.max(arr[:, -1, :]), np.max(arr[:, :, 0]), np.max(arr[:, :, -1]), ] ) ) if edge_max > 1e-3 * np.max(arr): unit = grid.recognized_quantities[rq].unit warnings.warn( "Fields should go to zero at edges of grid to avoid " f"non-physical effects, but a value of {edge_max:.2E} {unit} was " f"found on the edge of the {rq} array. Consider applying a " "envelope function to force the fields at the edge to go to " "zero.", RuntimeWarning, ) def _default_detector_hdir(self): """ Calculates the default horizontal unit vector for the detector plane (see __init__ description for details) """ # Create unit vectors that define the detector plane # Define plane horizontal axis if np.allclose(np.abs(self.det_n), np.array([0, 0, 1])): nx = np.array([1, 0, 0]) else: nx = np.cross(np.array([0, 0, 1]), self.det_n) nx = nx / np.linalg.norm(nx) return nx def _max_theta_hit_grid(self): r""" Using the grid and the source position, compute the maximum particle theta that will impact the grid. This value can be used to determine which particles are worth tracking. """ ind = 0 theta = np.zeros([8]) for x in [0, -1]: for y in [0, -1]: for z in [0, -1]: # Source to grid corner vector vec = self.grid_arr[x, y, z, :] - self.source # Calculate angle between vec and the source-to-detector # axis, which is the central axis of the particle beam theta[ind] = np.arccos( np.dot(vec, self.src_det) / np.linalg.norm(vec) / np.linalg.norm(self.src_det) ) ind += 1 return np.max(theta) def _log(self, msg): if self.verbose: print(msg) # Define some constants so they don't get constantly re-evaluated _c = const.c.si.value # ************************************************************************* # Create mesh # ************************************************************************* def add_wire_mesh( self, location, extent, nwires, wire_diameter, mesh_hdir=None, mesh_vdir=None ): """ Add a wire mesh grid between the particle source and the object grid that blocks particles whose paths intersect the wires. Parameters ---------- location : `~astropy.units.Quantity`, shape (3) A vector pointing from the origin of the grid to the center of the mesh grid. This location must be between the source and the object grid. This vector will be interpreted as being in either cartesian, cylindrical, or spherical coordinates based on its units. Valid geometries are: * Cartesian (x,y,z) : (meters, meters, meters) * cylindrical (r, theta, z) : (meters, radians, meters) * spherical (r, theta, phi) : (meters, radians, radians) In spherical coordinates theta is the polar angle. extent : Tuple of 1 or 2 `~astropy.units.Quantity` The size of the mesh grid (in the mesh plane). If one value is provided, the mesh is circular and the value provided is interpreted as the diameter. If two values are provided, the mesh is rectangular and they the values are interpreted as the width and height respectively. nwires : Tuple of 1 or 2 ints, or a single int The number of wires in the horizontal and vertical directions. If only one value is provided, the number in the two directions is assumed to be equal. Note that a wire will cross the center of the mesh only when nwires is odd. wire_diameter : `~astropy.units.Quantity` The diameter of the wires. mesh_hdir : `numpy.ndarray`, shape (3), optional A unit vector (in Cartesian coordinates) defining the horizontal direction on the mesh plane. Modifying this vector can rotate the mesh in the plane or tilt the mesh plane relative to the source-detector axis. By default, `mesh_hdir` is set equal to `detector_hdir` (see `detector_hdir` keyword in `__init__`). mesh_vdir : `numpy.ndarray`, shape (3), optional A unit vector (in Cartesian coordinates) defining the vertical direction on the mesh plane. Modifying this vector can tilt the mesh relative to the source-detector axis. By default, `mesh_vdir` is defined to be perpendicular to `mesh_hdir` and the detector plane normal (such that the mesh is parallel to the detector plane). Raises ------ ValueError Raises a ValueError if the provided mesh location is not between the source and the object grid. """ location = _coerce_to_cartesian_si(location) wire_radius = wire_diameter.si.value / 2 if not isinstance(extent, tuple): extent = (extent,) if len(extent) == 1: radius = 0.5 * extent[0].si.value width = extent[0].si.value height = extent[0].si.value elif len(extent) == 2: radius = None width = extent[0].si.value height = extent[1].si.value else: raise ValueError( "extent must be a tuple of 1 or 2 elements, but " f"{len(extent)} elements were provided." ) if not isinstance(nwires, tuple): nwires = (nwires,) if len(nwires) != 2: nwires = (nwires[0], nwires[0]) # If no hdir/vdir is specified, calculate a default value # If one is specified, make sure it is normalized if mesh_hdir is None: # Re-calculate the default here, in case the user # specified a different det_hdir mesh_hdir = self._default_detector_hdir() else: mesh_hdir = mesh_hdir / np.linalg.norm(mesh_hdir) if mesh_vdir is None: mesh_vdir = np.cross(mesh_hdir, self.det_n) mesh_vdir = -mesh_vdir / np.linalg.norm(mesh_vdir) else: mesh_vdir = mesh_vdir / np.linalg.norm(mesh_vdir) # Raise exception if mesh is AFTER the field grid if np.linalg.norm(location - self.source) > np.linalg.norm(self.source): raise ValueError( f"The specified mesh location, {location}," "is not between the source and the origin." ) mesh_entry = { "location": location, "wire_radius": wire_radius, "radius": radius, "width": width, "height": height, "nwires": nwires, "mesh_hdir": mesh_hdir, "mesh_vdir": mesh_vdir, } self.mesh_list.append(mesh_entry) def _apply_wire_mesh( self, location=None, wire_radius=None, radius=None, width=None, height=None, nwires=None, mesh_hdir=None, mesh_vdir=None, ): """ Apply wire meshes that were added to self.mesh_list """ x = self._coast_to_plane(location, mesh_hdir, mesh_vdir) # Particle positions in 2D on the mesh plane xloc = np.dot(x - location, mesh_hdir) yloc = np.dot(x - location, mesh_vdir) # Create an array in which True indicates that a particle has hit a wire # and False indicates that it has not hit = np.zeros(self.nparticles, dtype=bool) # Mark particles that overlap vertical or horizontal position with a wire h_centers = np.linspace(-width / 2, width / 2, num=nwires[0]) for c in h_centers: hit |= np.isclose(xloc, c, atol=wire_radius) v_centers = np.linspace(-height / 2, height / 2, num=nwires[1]) for c in v_centers: hit |= np.isclose(yloc, c, atol=wire_radius) # Put back any particles that are outside the mesh boundaries # First handle the case where the mesh is rectangular if radius is None: # Replace particles outside the x-boundary hit[ np.logical_or( xloc > np.max(h_centers) + wire_radius, xloc < np.min(h_centers) - wire_radius, ) ] = False # Replace particles outside the y-boundary hit[ np.logical_or( yloc > np.max(v_centers) + wire_radius, yloc < np.min(v_centers) - wire_radius, ) ] = False # Handle the case where the mesh is circular else: loc_rad = np.sqrt(xloc ** 2 + yloc ** 2) hit[loc_rad > radius] = False # In the case of a circular mesh, also create a round wire along the # outside edge hit[np.isclose(loc_rad, radius, atol=wire_radius)] = True # Identify the particles that have hit something, then remove them from # all of the arrays keep_these_particles = ~hit number_kept_particles = keep_these_particles.sum() nremoved = self.nparticles - number_kept_particles if self.nparticles - nremoved <= 0: raise ValueError( "The specified mesh is blocking all of the particles. " f"The wire diameter ({2*wire_radius}) may be too large." ) self.x = self.x[keep_these_particles, :] self.v = self.v[keep_these_particles, :] self.theta = self.theta[ keep_these_particles ] # Important to apply here to get correct grid_ind self.nparticles = number_kept_particles # ************************************************************************* # Particle creation methods # ************************************************************************* def _angles_monte_carlo(self): """ Generates angles for each particle randomly such that the flux per solid angle is uniform. """ # Create a probability vector for the theta distribution # Theta must follow a sine distribution in order for the particle # flux per solid angle to be uniform. arg = np.linspace(0, self.max_theta, num=int(1e5)) prob = np.sin(arg) prob *= 1 / np.sum(prob) # Randomly choose theta's weighted with the sine probabilities theta = np.random.choice(arg, size=self.nparticles, replace=True, p=prob) # Also generate a uniform phi distribution phi = np.random.uniform(high=2 * np.pi, size=self.nparticles) return theta, phi def _angles_uniform(self): """ Generates angles for each particle such that their velocities are uniformly distributed on a grid in theta and phi. This method requires that `nparticles` be a perfect square. If it is not, `nparticles` will be set as the largest perfect square smaller than the provided `nparticles`. """ # Calculate the approximate square root n_per = np.floor(np.sqrt(self.nparticles)).astype(np.int32) # Set new nparticles to be a perfect square self.nparticles = n_per ** 2 # Create an imaginary grid positioned 1 unit from the source # and spanning max_theta at the corners extent = np.sin(self.max_theta) / np.sqrt(2) arr = np.linspace(-extent, extent, num=n_per) harr, varr = np.meshgrid(arr, arr, indexing="ij") # calculate the angles from the source for each point in # the grid. theta = np.arctan(np.sqrt(harr ** 2 + varr ** 2)) phi = np.arctan2(varr, harr) return theta.flatten(), phi.flatten() @particles.particle_input def create_particles( self, nparticles, particle_energy, max_theta=None, particle: Particle = Particle("p+"), distribution="monte-carlo", ): r""" Generates the angular distributions about the Z-axis, then rotates those distributions to align with the source-to-detector axis. By default, particles are generated over almost the entire pi/2. However, if the detector is far from the source, many of these particles will never be observed. The max_theta keyword allows these extraneous particles to be neglected to focus computational resources on the particles who will actually hit the detector. nparticles : integer The number of particles to include in the simulation. The default is 1e5. particle_energy : `~astropy.units.Quantity` The energy of the particle, in units convertible to eV. All particles are given the same energy. max_theta : `~astropy.units.Quantity`, optional The largest velocity vector angle (measured from the source-to-detector axis) for which particles should be generated. Decreasing this angle can eliminate particles that would never reach the detector region of interest. If no value is given, a guess will be made based on the size of the grid. Units must be convertible to radians. particle : ~plasmapy.particles.Particle or string representation of same, optional Representation of the particle species as either a `Particle` object or a string representation. The default particle is protons. distribution: str A keyword which determines how particles will be distributed in velocity space. Options are: - 'monte-carlo': velocities will be chosen randomly, such that the flux per solid angle is uniform. - 'uniform': velocities will be distrbuted such that, left unperturbed,they will form a uniform pattern on the detection plane. This method requires that `nparticles` be a perfect square. If it is not, `nparticles` will be set as the largest perfect square smaller than the provided `nparticles`. Simulations run in the `uniform` mode will imprint a grid pattern on the image, but will well-sample the field grid with a smaller number of particles. The default is `monte-carlo` """ self._log("Creating Particles") # Load inputs self.nparticles = int(nparticles) self.particle_energy = particle_energy.to(u.eV).value self.q = particle.charge.to(u.C).value self.m = particle.mass.to(u.kg).value # If max_theta is not specified, make a guess based on the grid size if max_theta is None: self.max_theta = np.clip( 1.5 * self.max_theta_hit_grid, 0.01, 0.99 * np.pi / 2 ) else: self.max_theta = max_theta.to(u.rad).value # Calculate the velocity corresponding to the particle energy ER = self.particle_energy * 1.6e-19 / (self.m * self._c ** 2) v0 = self._c * np.sqrt(1 - 1 / (ER + 1) ** 2) if distribution == "monte-carlo": theta, phi = self._angles_monte_carlo() elif distribution == "uniform": theta, phi = self._angles_uniform() # Temporarily save theta to later determine which particles # should be tracked self.theta = theta # Construct the velocity distribution around the z-axis self.v = np.zeros([self.nparticles, 3]) self.v[:, 0] = v0 * np.sin(theta) * np.cos(phi) self.v[:, 1] = v0 * np.sin(theta) * np.sin(phi) self.v[:, 2] = v0 * np.cos(theta) # Calculate the rotation matrix that rotates the z-axis # onto the source-detector axis a = np.array([0, 0, 1]) b = self.detector - self.source rot = rot_a_to_b(a, b) # Apply rotation matrix to calculated velocity distribution self.v = np.matmul(self.v, rot) # Place particles at the source self.x = np.tile(self.source, (self.nparticles, 1)) @particles.particle_input def load_particles( self, x, v, particle: Particle = Particle("p+"), ): r""" Load arrays of particle positions and velocities x : `~astropy.units.Quantity`, shape (N,3) Positions for N particles v: `~astropy.units.Quantity`, shape (N,3) Velocities for N particles particle : ~plasmapy.particles.Particle or string representation of same, optional Representation of the particle species as either a `Particle` object or a string representation. The default particle is protons. distribution: str A keyword which determines how particles will be distributed in velocity space. Options are: - 'monte-carlo': velocities will be chosen randomly, such that the flux per solid angle is uniform. - 'uniform': velocities will be distrbuted such that, left unpreturbed,they will form a uniform pattern on the detection plane. Simulations run in the `uniform` mode will imprint a grid pattern on the image, but will well-sample the field grid with a smaller number of particles. The default is `monte-carlo` """ self.q = particle.charge.to(u.C).value self.m = particle.mass.to(u.kg).value if x.shape[0] != v.shape[0]: raise ValueError( "Provided x and v arrays have inconsistent numbers " " of particles " f"({x.shape[0]} and {v.shape[0]} respectively)." ) else: self.nparticles = x.shape[0] self.x = x.to(u.m).value self.v = v.to(u.m / u.s).value self.theta = np.arccos( np.inner(self.v, self.src_n) / np.linalg.norm(self.v, axis=-1) ) n_wrong_way = np.sum(np.where(self.theta > np.pi / 2, 1, 0)) if n_wrong_way > 1: warnings.warn( f"{100*n_wrong_way/self.nparticles:.2f}% of particles " "initialized are heading away from the grid. Check the orientation " " of the provided velocity vectors.", RuntimeWarning, ) # ************************************************************************* # Run/push loop methods # ************************************************************************* def _adaptive_dt(self, Ex, Ey, Ez, Bx, By, Bz): r""" Calculate the appropriate dt based on a number of considerations including the local grid resolution (ds) and the gyroperiod of the particles in the current fields. """ # If dt was explicitly set, skip the rest of this function if self.dt.size == 1: return self.dt # Compute the timestep indicated by the grid resolution ds = self.grid.grid_resolution.to(u.m).value gridstep = 0.5 * (np.min(ds) / self.vmax) # If not, compute a number of possible timesteps # Compute the cyclotron gyroperiod Bmag = np.max(np.sqrt(Bx ** 2 + By ** 2 + Bz ** 2)).to(u.T).value # Compute the gyroperiod if Bmag == 0: gyroperiod = np.inf else: gyroperiod = 2 * np.pi * self.m / (self.q * np.max(Bmag)) # TODO: introduce a minimum timestep based on electric fields too! # Create an array of all the possible time steps we computed candidates = np.array([gyroperiod / 12, gridstep]) # Enforce limits on dt candidates = np.clip(candidates, self.dt[0], self.dt[1]) # dt is the min of the remaining candidates return np.min(candidates) def _coast_to_grid(self): r""" Coasts all particles to the timestep when the first particle should be entering the grid. Doing in this in one step (rather than pushing the particles through zero fields) saves computation time. """ # Distance from the source to the nearest gridpoint dist = np.min(np.linalg.norm(self.grid_arr - self.source, axis=3)) # Find the particle with the highest speed towards the grid vmax = np.max(np.dot(self.v, self.src_n)) # Time for fastest possible particle to reach the grid. t = dist / vmax # Coast the particles to the advanced position self.x = self.x + self.v * t def _coast_to_plane(self, center, hdir, vdir, x=None): """ Calculates the positions where the current trajectories of each particle impact a plane, described by the plane's center and horizontal and vertical unit vectors. Returns an [nparticles, 3] array of the particle positions in the plane By default this function does not alter self.x. The optional keyword x can be used to pass in an output array that will used to hold the positions in the plane. This can be used to directly update self.x as follows: self._coast_to_plane(self.detector, self.det_hdir, self.det_vdir, x = self.x) """ normal = np.cross(hdir, vdir) # Calculate the time required to evolve each particle into the # plane t = np.inner(center[np.newaxis, :] - self.x, normal) / np.inner(self.v, normal) # Calculate particle positions in the plane if x is None: # If no output array is provided, preallocate x = np.empty_like(self.x) x[...] = self.x + self.v * t[:, np.newaxis] # Check that all points are now in the plane # (Eq. of a plane is nhat*x + d = 0) plane_eq = np.dot(x - center, normal) assert np.allclose(plane_eq, 0, atol=1e-6) return x def _remove_deflected_particles(self): r""" Removes any particles that have been deflected away from the detector plane (eg. those that will never hit the grid) """ dist_remaining = np.dot(self.x, self.det_n) + np.linalg.norm(self.detector) v_towards_det = np.dot(self.v, -self.det_n) # If particles have not yet reached the detector plane and are moving # away from it, they will never reach the detector. # So, we can remove them from the arrays # Find the indices of all particles that we should keep: # i.e. those still moving towards the detector. ind = np.logical_not((v_towards_det < 0) & (dist_remaining > 0)).nonzero()[0] # Drop the other particles self.x = self.x[ind, :] self.v = self.v[ind, :] self.v_init = self.v_init[ind, :] self.nparticles_grid = self.x.shape[0] # Store the number of particles deflected self.fract_deflected = (self.nparticles - ind.size) / self.nparticles # Warn the user if a large number of particles are being deflected if self.fract_deflected > 0.05: warnings.warn( f"{100*self.fract_deflected:.1f}% particles have been " "deflected away from the detector plane. The fields " "provided may be too high to successfully radiograph " "with this particle energy.", RuntimeWarning, ) def _push(self): r""" Advance particles using an implementation of the time-centered Boris algorithm """ # Get a list of positions (input for interpolator) pos = self.x[self.grid_ind, :] * u.m # Update the list of particles on and off the grid self.on_grid = self.grid.on_grid(pos) # entered_grid is zero at the end if a particle has never # entered the grid self.entered_grid += self.on_grid # Estimate the E and B fields for each particle # Note that this interpolation step is BY FAR the slowest part of the push # loop. Any speed improvements will have to come from here. if self.field_weighting == "volume averaged": Ex, Ey, Ez, Bx, By, Bz = self.grid.volume_averaged_interpolator( pos, "E_x", "E_y", "E_z", "B_x", "B_y", "B_z", persistent=True, ) elif self.field_weighting == "nearest neighbor": Ex, Ey, Ez, Bx, By, Bz = self.grid.nearest_neighbor_interpolator( pos, "E_x", "E_y", "E_z", "B_x", "B_y", "B_z", persistent=True, ) # Create arrays of E and B as required by push algorithm E = np.array( [Ex.to(u.V / u.m).value, Ey.to(u.V / u.m).value, Ez.to(u.V / u.m).value] ) E = np.moveaxis(E, 0, -1) B = np.array([Bx.to(u.T).value, By.to(u.T).value, Bz.to(u.T).value]) B = np.moveaxis(B, 0, -1) # Calculate the adaptive timestep from the fields currently experienced # by the particles # If user sets dt explicitly, that's handled in _adpative_dt dt = self._adaptive_dt(Ex, Ey, Ez, Bx, By, Bz) # TODO: Test v/c and implement relativistic Boris push when required # vc = np.max(v)/_c x = self.x[self.grid_ind, :] v = self.v[self.grid_ind, :] boris_push(x, v, B, E, self.q, self.m, dt) self.x[self.grid_ind, :] = x self.v[self.grid_ind, :] = v def _stop_condition(self): r""" The stop condition is that most of the particles have entered the grid and almost all have now left it. """ # Count the number of particles who have entered, which is the # number of non-zero entries in entered_grid self.num_entered = np.nonzero(self.entered_grid)[0].size # How many of the particles have entered the grid self.fract_entered = np.sum(self.num_entered) / self.nparticles_grid # Of the particles that have entered the grid, how many are currently # on the grid? # if/else avoids dividing by zero if np.sum(self.num_entered) > 0: still_on = np.sum(self.on_grid) / np.sum(self.num_entered) else: still_on = 0.0 if self.fract_entered > 0.1 and still_on < 0.001: # Warn user if < 10% of the particles ended up on the grid if self.num_entered < 0.1 * self.nparticles: warnings.warn( f"Only {100*self.num_entered/self.nparticles:.2f}% of " "particles entered the field grid: consider " "decreasing the max_theta to increase this " "number.", RuntimeWarning, ) return True else: return False def run( self, dt=None, field_weighting="volume averaged", ): r""" Runs a particle-tracing simulation. Timesteps are adaptively calculated based on the local grid resolution of the particles and the electric and magnetic fields they are experiencing. After all particles have left the grid, they are advanced to the detector plane where they can be used to construct a synthetic diagnostic image. Parameters ---------- dt : `~astropy.units.Quantity`, optional An explicitly set timestep in units convertable to seconds. Setting this optional keyword overrules the adaptive time step capability and forces the use of this timestep throughout. If a tuple of timesteps is provided, the adaptive timstep will be clamped between the first and second values. field_weighting : str String that selects the field weighting algorithm used to determine what fields are felt by the particles. Options are: * 'nearest neighbor': Particles are assigned the fields on the grid vertex closest to them. * 'volume averaged' : The fields experienced by a particle are a volume-average of the eight grid points surrounding them. The default is 'volume averaged'. Returns ------- None. """ # Load and validate inputs field_weightings = ["volume averaged", "nearest neighbor"] if field_weighting in field_weightings: self.field_weighting = field_weighting else: raise ValueError( f"{field_weighting} is not a valid option for ", "field_weighting. Valid choices are", f"{field_weightings}", ) if dt is None: # Set dt as an infinite range by default (auto dt with no restrictions) self.dt = np.array([0.0, np.inf]) * u.s else: self.dt = dt self.dt = (self.dt).to(u.s).value # Check to make sure particles have already been generated if not hasattr(self, "x"): raise ValueError( "Either the create_particles or load_particles method must be " "called before running the particle tracing algorithm." ) # If meshes have been added, apply them now for mesh in self.mesh_list: self._apply_wire_mesh(**mesh) # Store a copy of the initial velocity distribution in memory # This will be used later to calculate the maximum deflection self.v_init = np.copy(self.v) # Calculate the maximum velocity # Used for determining the grid crossing maximum timestep self.vmax = np.max(np.linalg.norm(self.v, axis=-1)) # Determine which particles should be tracked # This array holds the indices of all particles that WILL hit the grid # Only these particles will actually be pushed through the fields self.grid_ind = np.where(self.theta < self.max_theta_hit_grid)[0] self.nparticles_grid = len(self.grid_ind) self.fract_tracked = self.nparticles_grid / self.nparticles # Create flags for tracking when particles during the simulation # on_grid -> zero if the particle is off grid, 1 self.on_grid = np.zeros([self.nparticles_grid]) # Entered grid -> non-zero if particle EVER entered the grid self.entered_grid = np.zeros([self.nparticles_grid]) # Generate a null distribution of points (the result in the absence of # any fields) for statistical comparison self.x0 = self._coast_to_plane(self.detector, self.det_hdir, self.det_vdir) # Advance the particles to the near the start of the grid self._coast_to_grid() # Initialize a "progress bar" (really more of a meter) # Setting sys.stdout lets this play nicely with regular print() pbar = tqdm( initial=0, total=self.nparticles_grid + 1, disable=not self.verbose, desc="Particles on grid", unit="particles", bar_format="{l_bar}{bar}{n:.1e}/{total:.1e} {unit}", file=sys.stdout, ) # Push the particles until the stop condition is satisfied # (no more particles on the simulation grid) while not self._stop_condition(): n_on_grid = np.sum(self.on_grid) pbar.n = n_on_grid pbar.last_print_n = n_on_grid pbar.update() self._push() pbar.close() # Remove particles that will never reach the detector self._remove_deflected_particles() # Advance the particles to the image plane self._coast_to_plane(self.detector, self.det_hdir, self.det_vdir, x=self.x) # Log a summary of the run self._log("Run completed") self._log("Fraction of particles tracked: " f"{self.fract_tracked*100:.1f}%") self._log( "Fraction of tracked particles that entered the grid: " f"{self.fract_entered*100:.1f}%" ) self._log( "Fraction of tracked particles deflected away from the " "detector plane: " f"{self.fract_deflected*100}%" ) @property def max_deflection(self): """ The maximum deflection experienced by one of the particles, determined by comparing their initial and final velocitiy vectors. This value can be used to determine the charged particle radiography regime using the dimensionless number defined by Kugland et al. 2012 Returns ------- max_deflection : float The maximum deflection in radians """ # Normalize the initial and final velocities v_norm = self.v / np.linalg.norm(self.v, axis=1, keepdims=True) v_init_norm = self.v_init / np.linalg.norm(self.v_init, axis=1, keepdims=True) # Compute the dot product proj = np.sum(v_norm * v_init_norm, axis=1) # In case of numerical errors, make sure the output is within the domain of # arccos proj = np.where(proj > 1, 1, proj) max_deflection = np.max(np.arccos(proj)) return max_deflection * u.rad # ************************************************************************* # Synthetic diagnostic methods (creating output) # ************************************************************************* def synthetic_radiograph( self, size=None, bins=[200, 200], ignore_grid=False, optical_density=False ): r""" Calculate a "synthetic radiograph" (particle count histogram in the image plane). Parameters ---------- size : `~astropy.units.Quantity`, shape (2,2) The size of the detector array, specified as the minimum and maximum values included in both the horizontal and vertical directions in the detector plane coordinates. Shape is [[hmin,hmax], [vmin, vmax]]. Units must be convertable to meters. bins : array of integers, shape (2) The number of bins in each direction in the format [hbins, vbins]. The default is [200,200]. ignore_grid: bool If True, returns the intensity in the image plane in the absence of simulated fields. optical_density: bool If True, return the optical density rather than the intensity .. math:: OD = -log_{10}(Intensity/I_0) where I_O is the intensity on the detector plane in the absence of simulated fields. Default is False. Returns ------- hax : `~astropy.units.Quantity` array shape (hbins,) The horizontal axis of the synthetic radiograph in meters. vax : `~astropy.units.Quantity` array shape (vbins, ) The vertical axis of the synthetic radiograph in meters. intensity : ndarray, shape (hbins, vbins) The number of particles counted in each bin of the histogram. """ # Note that, at the end of the simulation, all particles were moved # into the image plane. # If ignore_grid is True, use the predicted positions in the absence of # simulated fields if ignore_grid: x = self.x0 else: x = self.x # Determine locations of points in the detector plane using unit # vectors xloc = np.dot(x - self.detector, self.det_hdir) yloc = np.dot(x - self.detector, self.det_vdir) if size is None: # If a detector size is not given, choose lengths based on the # dimensions of the grid w = self.mag * np.max( [ np.max(np.abs(self.grid.pts0.to(u.m).value)), np.max(np.abs(self.grid.pts1.to(u.m).value)), np.max(np.abs(self.grid.pts2.to(u.m).value)), ] ) # The factor of 5 here is somewhat arbitrary: we just want a # region a few times bigger than the image of the grid on the # detector, since particles could be deflected out size = 5 * np.array([[-w, w], [-w, w]]) * u.m # Generate the histogram intensity, h, v = np.histogram2d( xloc, yloc, range=size.to(u.m).value, bins=bins ) # h, v are the bin edges: compute the centers to produce arrays # of the right length (then trim off the extra point) h = ((h + np.roll(h, -1)) / 2)[0:-1] v = ((v + np.roll(v, -1)) / 2)[0:-1] # Throw a warning if < 50% of the particles are included on the # histogram percentage = np.sum(intensity) / self.nparticles if percentage < 0.5: warnings.warn( f"Only {percentage:.2%} of the particles are shown " "on this synthetic radiograph. Consider increasing " "the size to include more.", RuntimeWarning, ) if optical_density: # Generate the null radiograph x, y, I0 = self.synthetic_radiograph(size=size, bins=bins, ignore_grid=True) # Calculate I0 as the mean of the non-zero values in the null # histogram. Zeros are just outside of the illuminate area. I0 = np.mean(I0[I0 != 0]) # Overwrite any zeros in intensity to avoid log10(0) intensity[intensity == 0] = 1 # Calculate the optical_density intensity = -np.log10(intensity / I0) return h * u.m, v * u.m, intensity
2.625
3
polyengine/switch_start.py
AkanshDivker/polyengine
5
12360
# switch_start.py # Adding another switch statement # Authors : <NAME> import string import random class Switch_Start: def __init__(self, str): self.string = str def insert_switch(self, str): #generate random variable _LENGTH = 11 string_pool = string.ascii_letters + string.digits num_pool = string.digits var1 = random.choice(string.ascii_letters) for i in range(_LENGTH): var1 += random.choice(string_pool) #writing another switch statement first = "{int " case0 = "switch (0) { case 0:" case1 = "; case 1:" case2 = "; case 2:" case3 = "; case 3:" last = "; }}" result = str + first + var1 + "="+random.choice(num_pool)+";" + case0 + var1 + "++" + case1 + var1 + "--" + case2 + var1 + "++" + case3 + var1 + "--" + last return result
3.75
4
focal_mech/demo/test6.py
blasscoc/FocalMechClassifier
12
12361
from numpy import array, rad2deg, pi, mgrid, argmin from matplotlib.pylab import contour import matplotlib.pyplot as plt import mplstereonet from obspy.imaging.beachball import aux_plane from focal_mech.lib.classify_mechanism import classify, translate_to_sphharm from focal_mech.io.read_hash import read_demo, read_hash_solutions from focal_mech.util.hash_routines import hash_to_classifier from focal_mech.lib.sph_harm import get_sph_harm from focal_mech.lib.correlate import corr_shear hash_solns = read_hash_solutions("example1.out") # we want solutions that are symetric polarity_data = read_demo("north1.phase", "scsn.reverse", reverse=True) inputs = hash_to_classifier(polarity_data, parity=1) event = 3146815 result = classify(*inputs[event], kernel_degree=2) Alm = translate_to_sphharm(*result, kernel_degree=2) coeffs = array([Alm[0,0], Alm[1,-1], Alm[1,0], Alm[1,1], Alm[2,-2], Alm[2,-1], Alm[2,0], Alm[2,1], Alm[2,2]]) svm_soln, f = corr_shear(Alm) resolution = (200,400) longi, lati, Z = get_sph_harm(resolution=resolution) mech = coeffs.dot(Z).real longi.shape = resolution lati.shape = resolution mech.shape = resolution c = contour(longi, lati, mech, [0]) pth1 = c.collections[0].get_paths()[0].vertices pth1 = rad2deg(pth1) pth2 = c.collections[0].get_paths()[1].vertices pth2 = rad2deg(pth2) hash_focal = rad2deg(hash_solns[event]) event2 = 3158361 result = classify(*inputs[event2], kernel_degree=2) Alm = translate_to_sphharm(*result, kernel_degree=2) coeffs = array([Alm[0,0], Alm[1,-1], Alm[1,0], Alm[1,1], Alm[2,-2], Alm[2,-1], Alm[2,0], Alm[2,1], Alm[2,2]]) svm_soln2, f = corr_shear(Alm) resolution = (200,400) longi, lati, Z = get_sph_harm(resolution=resolution) mech = coeffs.dot(Z).real longi.shape = resolution lati.shape = resolution mech.shape = resolution c = contour(longi, lati, mech, [0]) pth3 = c.collections[0].get_paths()[0].vertices pth3 = rad2deg(pth3) pth4 = c.collections[0].get_paths()[1].vertices pth4 = rad2deg(pth4) hash_focal2 = rad2deg(hash_solns[event2]) event3 = 3153955 result = classify(*inputs[event3], kernel_degree=2) Alm = translate_to_sphharm(*result, kernel_degree=2) coeffs = array([Alm[0,0], Alm[1,-1], Alm[1,0], Alm[1,1], Alm[2,-2], Alm[2,-1], Alm[2,0], Alm[2,1], Alm[2,2]]) svm_soln3, f = corr_shear(Alm) resolution = (200,400) longi, lati, Z = get_sph_harm(resolution=resolution) mech = coeffs.dot(Z).real longi.shape = resolution lati.shape = resolution mech.shape = resolution c = contour(longi, lati, mech, [0]) pth5 = c.collections[0].get_paths()[0].vertices pth5 = rad2deg(pth5) pth6 = c.collections[0].get_paths()[1].vertices pth6 = rad2deg(pth6) hash_focal3 = rad2deg(hash_solns[event3]) fig = plt.figure(facecolor="white", figsize=(10,20)) ax = fig.add_subplot(221, projection='stereonet') ax.rake(pth1[:,0], pth1[:,1] +90.0, 90.0, ':', color='red', linewidth=3) ax.rake(pth2[:,0], pth2[:,1] +90.0, 90.0, ':', color='red', linewidth=3) strike, dip, rake = svm_soln ax.plane(strike, dip, '-r', linewidth=2) strike, dip, rake = aux_plane(*svm_soln) ax.plane(strike, dip, '-r', linewidth=2) strike, dip, rake = hash_focal ax.plane(strike-90, dip, 'g-', linewidth=2) strike, dip, rake = aux_plane(*hash_focal) ax.plane(strike-90, dip,'g-', linewidth=2) azi = rad2deg(polarity_data[event][:,0]) toa = rad2deg(polarity_data[event][:,1]) polarity = polarity_data[event][:,2] for a, t, p in zip(azi, toa, polarity): if p > 0: ax.pole(a, t,'o', markeredgecolor='red', markerfacecolor='red') else: ax.pole(a, t,'o', markeredgecolor='blue', markerfacecolor='white') ax.grid() ax = fig.add_subplot(222, projection='stereonet') ax.rake(pth3[:,0], pth3[:,1] +90.0, 90.0, ':', color='red', linewidth=3) ax.rake(pth4[:,0], pth4[:,1] +90.0, 90.0, ':', color='red', linewidth=3) strike, dip, rake = svm_soln2 ax.plane(strike, dip, '-r', linewidth=2) strike, dip, rake = aux_plane(*svm_soln2) ax.plane(strike, dip, '-r', linewidth=2) strike, dip, rake = hash_focal2 ax.plane(strike-90, dip, 'g-', linewidth=2) strike, dip, rake = aux_plane(*hash_focal2) ax.plane(strike-90, dip,'g-', linewidth=2) azi = rad2deg(polarity_data[event2][:,0]) toa = rad2deg(polarity_data[event2][:,1]) polarity = polarity_data[event2][:,2] for a, t, p in zip(azi, toa, polarity): if p > 0: ax.pole(a, t,'o', markeredgecolor='red', markerfacecolor='red') else: ax.pole(a, t,'o', markeredgecolor='blue', markerfacecolor='white') ax.grid() ax = fig.add_subplot(224, projection='stereonet') ax.rake(pth5[:,0], pth5[:,1] +90.0, 90.0, ':', color='red', linewidth=3) ax.rake(pth6[:,0], pth6[:,1] +90.0, 90.0, ':', color='red', linewidth=3) strike, dip, rake = svm_soln3 ax.plane(strike, dip, '-r', linewidth=2) strike, dip, rake = aux_plane(*svm_soln3) ax.plane(strike, dip, '-r', linewidth=2) strike, dip, rake = hash_focal3 ax.plane(strike-90, dip, 'g-', linewidth=2) strike, dip, rake = aux_plane(*hash_focal3) ax.plane(strike-90, dip,'g-', linewidth=2) azi = rad2deg(polarity_data[event3][:,0]) toa = rad2deg(polarity_data[event3][:,1]) polarity = polarity_data[event3][:,2] for a, t, p in zip(azi, toa, polarity): if p > 0: ax.pole(a, t,'o', markeredgecolor='red', markerfacecolor='red') else: ax.pole(a, t,'o', markeredgecolor='blue', markerfacecolor='white') ax.grid() plt.tight_layout(pad=4.0, h_pad=20.0) plt.show()
1.992188
2
gigantumcli/changelog.py
fossabot/gigantum-cli
0
12362
<filename>gigantumcli/changelog.py # Copyright (c) 2017 FlashX, LLC # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import requests import json class ChangeLog(object): """Class to provide an interface to the posted ChangeLog information""" def __init__(self): """Constructor""" # Load data self._change_log_url = "https://s3.amazonaws.com/io.gigantum.changelog/changelog.json" self.data = self._load_data() def _load_data(self): """Load the changelog data file from remote source Returns: dict """ data = None try: response = requests.get(self._change_log_url) data = response.json() finally: return data def is_update_available(self, tag): """Method to check if an update is available using the changelog as a history Args: tag(str): The 8-char short hash tag for the CURRENT image in used Returns: bool """ latest_hash = self.data['latest']['id'] return latest_hash != tag def get_changelog(self, tag="latest"): """Method to print the changelog data Args: tag(str): Version of the changelog to grab Returns: str """ if not self.data: # No changelog data was available...probably no internet connection return None if tag not in self.data: raise ValueError("Tag {} not available".format(tag)) data = self.data[tag] msg = "Version: {}\n".format(data['id']) msg = "{}Release Date: {}\n".format(msg, data['date']) msg = "{}Note: \n".format(msg) # Show notices if 'messages' in data: for note in data['messages']: msg = "{} - {}\n".format(msg, note) # Show changes for change_key in data['changes']: msg = "{}\n{}: \n".format(msg, change_key) for change_str in data['changes'][change_key]: msg = "{} - {}\n".format(msg, change_str) return msg
2.328125
2
main_old/understanding_smoothing_microsoft.py
benjaminleroy/smooth_rf
3
12363
import numpy as np import pandas as pd import matplotlib.pyplot as plt import sklearn.ensemble import sklearn.metrics import sklearn import progressbar import sklearn.model_selection from plotnine import * import pdb import sys sys.path.append("smooth_rf/") import smooth_base import smooth_level # function def average_depth(random_forest, data): """ calculate the average depth of each point (average across trees) Arguments: ---------- random_forest : sklearn random forest model (fit) data : array (n, p) data frame that can be predicted from random_forest Returns: -------- average_depth : array (n,) vector of average depth in forest of each data point """ # test: #rf_fit #smooth_rf_opt #d1 = average_depth(rf_fit, data) #d2 = average_depth(smooth_rf_opt, data) #np.all(d1 == d2) n_trees = len(random_forest.estimators_) n_obs = data.shape[0] depth = np.zeros(n_obs) for t in random_forest.estimators_: d_path = t.decision_path(data) depth = depth + np.array(d_path.sum(axis = 1)).ravel() return depth / n_trees # start of analysis data, y = smooth_base.generate_data(large_n = 650) data_vis = pd.DataFrame(data = {"x1":data[:,0], "x2":data[:,1], "y":y}, columns = ["x1","x2","y"]) ggout = ggplot(data_vis) +\ geom_point(aes(x = "x1",y ="x2", color = "factor(y)")) +\ theme_minimal() +\ labs(x= "X1", y = "X2", color = "value (minus 100)") rf = sklearn.ensemble.RandomForestRegressor(n_estimators = 300) rf_fit = rf.fit(data,y) smooth_rf_opt, smooth_rf_last ,_, _ = smooth_base.smooth( rf_fit, X_trained = data, y_trained = y.ravel(), X_tune = None, y_tune = None, resample_tune= False, # oob no_constraint = False, subgrad_max_num = 10000, subgrad_t_fix = 1, parents_all=True, verbose = True, all_trees = False, initial_lamb_seed = None) # test data data_test, y_test = smooth_base.generate_data(large_n = 10000) reorder = np.random.choice(data_test.shape[0], size = data_test.shape[0], replace= False) data_test = data_test[reorder,:] y_test = y_test[reorder] yhat_base = rf_fit.predict(data_test) yhat_smooth = smooth_rf_opt.predict(data_test) base_mse = sklearn.metrics.mean_squared_error(y_true = y_test, y_pred = yhat_base) smooth_mse = sklearn.metrics.mean_squared_error(y_true = y_test, y_pred = yhat_smooth) error_base = np.abs(yhat_base - y_test) error_smooth = np.abs(yhat_smooth - y_test) extreme_binary = np.max([np.max(np.abs(error_base)), np.max(np.abs(error_smooth))]) col_vis = error_base - error_smooth extreme = np.max(np.abs(col_vis)) mean_depth_test = average_depth(rf_fit,data_test) data_vis = pd.DataFrame(data = {"X1":data_test[:,0], "X2":data_test[:,1], "y": y_test.ravel(), "error_base":error_base.copy(), "error_smooth":error_smooth.copy(), "error":col_vis.copy(), "mean_depth":mean_depth_test.copy()}, columns = ["X1","X2","y","error", "error_base","error_smooth", "mean_depth"]) a = ggplot(data_vis) +\ geom_point(aes(x = "X1", y="X2", color = "error"), size = .5) +\ scale_color_continuous(name = "bwr", limits= [-extreme, extreme]) +\ theme_bw() +\ labs(color = "Difference in Error", title = r'Difference in Error ($Error_{base} - Error_{smooth}$)') b = ggplot(data_vis) +\ geom_point(aes(x = "X1", y="X2", color = "error_base"), size = .5) +\ scale_color_continuous(name = "binary", limits= [0, extreme_binary]) +\ theme_bw() +\ labs(color = "Error", title = "Error from Base Random Forest") c = ggplot(data_vis) +\ geom_point(aes(x = "X1", y="X2", color = "error_smooth"), size = .5) +\ scale_color_continuous(name = "binary", limits= [0, extreme_binary]) +\ theme_bw() +\ labs(color = "Error", title = "Error from Smoothed Random Forest") d = ggplot(data_vis) +\ geom_point(aes(x = "X1", y="X2", color = "factor(y)"), size = .5) +\ theme_bw() +\ labs(color = "True Value (discrete)", title = "Test Set True Values") e = ggplot(data_vis,aes(x = "mean_depth", y = "error")) +\ geom_point(alpha = .1) +\ theme_bw() +\ labs(x = "Mean depth in Forest", y = "Difference in Error", title = "Lack of relationship between diff in errors and depth") f = ggplot(data_vis, aes(x = "X1", y = "X2", color = "mean_depth")) +\ geom_point() +\ scale_color_continuous(name = "Blues") +\ theme_bw() +\ labs(color = "Mean depth in Forest", title = "Mean depth in Forest (Depth averaged across trees)") g = ggplot(data_vis) +\ geom_point(aes(x = "error_base", y = "error_smooth"), alpha = .05) +\ geom_abline(intercept = 0, slope = 1) +\ theme_bw() +\ labs(x = "Error from Random Forest", y = "Error from Smooth Random Forest", title = "Comparing Errors Between Models", subtitle = r"(total error: rf: %f vs srf: %f)" %\ (base_mse, smooth_mse)) save_as_pdf_pages([a + theme(figure_size = (8,6))], filename = "images/diff_error"+"_understanding_smoothing.pdf") save_as_pdf_pages([b + theme(figure_size = (8,6))], filename = "images/error_base"+"_understanding_smoothing.pdf") save_as_pdf_pages([c + theme(figure_size = (8,6))], filename = "images/error_smooth"+"_understanding_smoothing.pdf") save_as_pdf_pages([d + theme(figure_size = (8,6))], filename = "images/truth"+"_understanding_smoothing.pdf") save_as_pdf_pages([e + theme(figure_size = (8,6))], filename = "images/mean_depth_diff_error"+"_understanding_smoothing.pdf") save_as_pdf_pages([f + theme(figure_size = (8,6))], filename = "images/mean_depth"+"_understanding_smoothing.pdf") save_as_pdf_pages([g + theme(figure_size = (8,6))], filename = "images/error_vs_error"+"_understanding_smoothing.pdf") save_as_pdf_pages([a + theme(figure_size = (8,6)), b + theme(figure_size = (8,6)), c + theme(figure_size = (8,6)), d + theme(figure_size = (8,6)), e + theme(figure_size = (8,6)), f + theme(figure_size = (8,6)), g + theme(figure_size = (8,6))], filename = "images/understanding_smoothing.pdf") # some of these observations might be due to the decision on the values of the classes # we'll see
2.703125
3
AER/Experiments/Metrics.py
LeBenchmark/Interspeech2021
48
12364
<reponame>LeBenchmark/Interspeech2021 import numpy as np def CCC(y_true, y_pred): """ Calculate the CCC for two numpy arrays. """ x = y_true y = y_pred xMean = x.mean() yMean = y.mean() xyCov = (x * y).mean() - (xMean * yMean) # xyCov = ((x-xMean) * (y-yMean)).mean() xVar = x.var() yVar = y.var() return 2 * xyCov / (xVar + yVar + (xMean - yMean) ** 2) def MSE(y_true, y_pred): """ Calculate the Mean Square Error for two numpy arrays. """ mse = (np.square(y_true - y_pred)).mean(axis=0) return mse def RMSE(y_true, y_pred): """ Calculate the Mean Square Error for two numpy arrays. """ return np.sqrt(MSE(y_true, y_pred)) def perfMeasure(y_actual, y_pred): """ Calculate the confusion matrix for two numpy arrays. """ TP = 0 FP = 0 TN = 0 FN = 0 for i in range(len(y_pred)): if y_actual[i]==y_pred[i]==1: TP += 1 if y_pred[i]==1 and y_actual[i]!=y_pred[i]: FP += 1 if y_actual[i]==y_pred[i]==-1: TN += 1 if y_pred[i]==-1 and y_actual[i]!=y_pred[i]: FN += 1 return (TP, FP, TN, FN)
2.96875
3
myo/device_listener.py
ehliang/myo-unlock
1
12365
<filename>myo/device_listener.py # Copyright (c) 2015 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import abc import six import time import threading import warnings from .lowlevel.enums import EventType, Pose, Arm, XDirection from .utils.threading import TimeoutClock from .vector import Vector from .quaternion import Quaternion class DeviceListener(six.with_metaclass(abc.ABCMeta)): """ Interface for listening to data sent from a Myo device. Return False from one of its callback methods to instruct the Hub to stop processing. The *DeviceListener* operates between the high and low level of the myo Python bindings. The ``myo`` object that is passed to callback methods is a :class:`myo.lowlevel.ctyping.Myo` object. """ def on_event(self, kind, event): """ Called before any of the event callbacks. """ def on_event_finished(self, kind, event): """ Called after the respective event callbacks have been invoked. This method is *always* triggered, even if one of the callbacks requested the stop of the Hub. """ def on_pair(self, myo, timestamp): pass def on_unpair(self, myo, timestamp): pass def on_connect(self, myo, timestamp): pass def on_disconnect(self, myo, timestamp): pass def on_pose(self, myo, timestamp, pose): pass def on_orientation_data(self, myo, timestamp, orientation): pass def on_accelerometor_data(self, myo, timestamp, acceleration): pass def on_gyroscope_data(self, myo, timestamp, gyroscope): pass def on_rssi(self, myo, timestamp, rssi): pass def on_emg(self, myo, timestamp, emg): pass def on_unsync(self, myo, timestamp): pass def on_sync(self, myo, timestamp, arm, x_direction): pass def on_unlock(self, myo, timestamp): pass def on_lock(self, myo, timestamp): pass class Feed(DeviceListener): """ This class implements the :class:`DeviceListener` interface to collect all data and make it available to another thread on-demand. .. code-block:: python import myo as libmyo feed = libmyo.device_listener.Feed() hub = libmyo.Hub() hub.run(1000, feed) try: while True: myos = feed.get_connected_devices() if myos: print myos[0], myos[0].orientation time.sleep(0.5) finally: hub.stop(True) hub.shutdown() """ class MyoProxy(object): __slots__ = ('synchronized,_pair_time,_unpair_time,_connect_time,' '_disconnect_time,_myo,_emg,_orientation,_acceleration,' '_gyroscope,_pose,_arm,_xdir,_rssi,_firmware_version').split(',') def __init__(self, low_myo, timestamp, firmware_version): super(Feed.MyoProxy, self).__init__() self.synchronized = threading.Condition() self._pair_time = timestamp self._unpair_time = None self._connect_time = None self._disconnect_time = None self._myo = low_myo self._emg = None self._orientation = Quaternion.identity() self._acceleration = Vector(0, 0, 0) self._gyroscope = Vector(0, 0, 0) self._pose = Pose.rest self._arm = None self._xdir = None self._rssi = None self._firmware_version = firmware_version def __repr__(self): result = '<MyoProxy (' with self.synchronized: if self.connected: result += 'connected) at 0x{0:x}>'.format(self._myo.value) else: result += 'disconnected)>' return result def __assert_connected(self): if not self.connected: raise RuntimeError('Myo was disconnected') @property def connected(self): with self.synchronized: return (self._connect_time is not None and self._disconnect_time is None) @property def paired(self): with self.synchronized: return (self.myo_ is None or self._unpair_time is not None) @property def pair_time(self): return self._pair_time @property def unpair_time(self): with self.synchronized: return self._unpair_time @property def connect_time(self): return self._connect_time @property def disconnect_time(self): with self.synchronized: return self._disconnect_time @property def firmware_version(self): return self._firmware_version @property def orientation(self): with self.synchronized: return self._orientation.copy() @property def acceleration(self): with self.synchronized: return self._acceleration.copy() @property def gyroscope(self): with self.synchronized: return self._gyroscope.copy() @property def pose(self): with self.synchronized: return self._pose @property def arm(self): with self.synchronized: return self._arm @property def x_direction(self): with self.synchronized: return self._xdir @property def rssi(self): with self.synchronized: return self._rssi def set_locking_policy(self, locking_policy): with self.synchronized: self.__assert_connected() self._myo.set_locking_policy(locking_policy) def set_stream_emg(self, emg): with self.synchronized: self.__assert_connected() self._myo.set_stream_emg(emg) def vibrate(self, vibration_type): with self.synchronized: self.__assert_connected() self._myo.vibrate(vibration_type) def request_rssi(self): """ Requests the RSSI of the Myo armband. Until the RSSI is retrieved, :attr:`rssi` returns None. """ with self.synchronized: self.__assert_connected() self._rssi = None self._myo.request_rssi() def __init__(self): super(Feed, self).__init__() self.synchronized = threading.Condition() self._myos = {} def get_devices(self): """ get_devices() -> list of Feed.MyoProxy Returns a list of paired and connected Myo's. """ with self.synchronized: return list(self._myos.values()) def get_connected_devices(self): """ get_connected_devices(self) -> list of Feed.MyoProxy Returns a list of connected Myo's. """ with self.synchronized: return [myo for myo in self._myos.values() if myo.connected] def wait_for_single_device(self, timeout=None, interval=0.5): """ wait_for_single_device(timeout) -> Feed.MyoProxy or None Waits until a Myo is was paired **and** connected with the Hub and returns it. If the *timeout* is exceeded, returns None. This function will not return a Myo that is only paired but not connected. :param timeout: The maximum time to wait for a device. :param interval: The interval at which the function should exit sleeping. We can not sleep endlessly, otherwise the main thread can not be exit, eg. through a KeyboardInterrupt. """ timer = TimeoutClock(timeout) start = time.time() with self.synchronized: # As long as there are no Myo's connected, wait until we # get notified about a change. while not timer.exceeded: # Check if we found a Myo that is connected. for myo in six.itervalues(self._myos): if myo.connected: return myo remaining = timer.remaining if interval is not None and remaining > interval: remaining = interval self.synchronized.wait(remaining) return None # DeviceListener def on_event(self, kind, event): myo = event.myo timestamp = event.timestamp with self.synchronized: if kind == EventType.paired: fmw_version = event.firmware_version self._myos[myo.value] = self.MyoProxy(myo, timestamp, fmw_version) self.synchronized.notify_all() return True elif kind == EventType.unpaired: try: proxy = self._myos.pop(myo.value) except KeyError: message = "Myo 0x{0:x} was not in the known Myo's list" warnings.warn(message.format(myo.value), RuntimeWarning) else: # Remove the reference handle from the Myo proxy. with proxy.synchronized: proxy._unpair_time = timestamp proxy._myo = None finally: self.synchronized.notify_all() return True else: try: proxy = self._myos[myo.value] except KeyError: message = "Myo 0x{0:x} was not in the known Myo's list" warnings.warn(message.format(myo.value), RuntimeWarning) return True with proxy.synchronized: if kind == EventType.connected: proxy._connect_time = timestamp elif kind == EventType.disconnected: proxy._disconnect_time = timestamp elif kind == EventType.emg: proxy._emg = event.emg elif kind == EventType.arm_synced: proxy._arm = event.arm proxy._xdir = event.x_direction elif kind == EventType.rssi: proxy._rssi = event.rssi elif kind == EventType.pose: proxy._pose = event.pose elif kind == EventType.orientation: proxy._orientation = event.orientation proxy._gyroscope = event.gyroscope proxy._acceleration = event.acceleration
2.203125
2
utilities/poisson.py
lukepinkel/pylmm
0
12366
<reponame>lukepinkel/pylmm #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Aug 12 13:34:49 2020 @author: lukepinkel """ import numpy as np import scipy as sp import scipy.special def poisson_logp(x, mu, logp=True): p = sp.special.xlogy(x, mu) - sp.special.gammaln(x + 1) - mu if logp==False: p = np.exp(p) return p
2.109375
2
iotest.py
AaltoRSE/ImageNetTools
1
12367
''' Created on Sep 29, 2021 @author: thomas ''' import ImageNetTools import sys import getopt def main(argv): try: opts, args = getopt.getopt(argv,"hd:",["dataset="]) except getopt.GetoptError: printHelp() sys.exit(2) for opt, arg in opts: if opt in ("-d", "--dataset"): ImageNetTools.benchmarkIOSpeeds(arg) sys.exit() def printHelp(): print('Run IO Speed testing with a given Dataset') print('python iotest.py -d /path/to/dataset' ) main(sys.argv[1:])
2.4375
2
sagas/tests/sinkers/test_results_render.py
samlet/stack
3
12368
<filename>sagas/tests/sinkers/test_results_render.py<gh_stars>1-10 """ $ pytest -s -v test_results_render.py """ import logging import pytest from sagas.nlu.results_render import ResultsRender def test_descriptor(): import sagas.nlu.results_render sagas.nlu.results_render.logger.setLevel(logging.DEBUG) # $ str 'Rezervasyonumu onaylamak istiyorum.' results = [{'delivery': 'sentence', 'inspector': 'specs_of', 'part': '_', 'pattern': 'behave_reservation', 'provider': 'default', 'value': {'category': 'request', 'pos': 'v', 'subs': [{'candidates': 'request', 'substitute': 'request', 'word': 'iste'}], 'words': ['istiyorum/iste']}}, {'delivery': 'slot', 'inspector': 'pipes', 'part': 'verb:obj/obj', 'pattern': 'behave_reservation', 'provider': 'cat/cat_proc', 'value': [{'cat': 'reservation', 'path': '/obj/obj', 'pos': 'noun', 'trans': 'reservation', 'value': 'reservation', 'word': 'rezervasyon'}]}, {'delivery': 'sentence', 'inspector': 'kind_of', 'part': 'obj', 'pattern': 'behave_reservation', 'provider': 'default', 'value': {'category': 'approve', 'pos': '*', 'word': 'onaylamak/onayla'}}] dsp=ResultsRender() patt = 'behave {obj:_} for {obj:/obj}, modal {_:_}' assert dsp.render(patt, results)=='behave approve for reservation, modal request'
2.203125
2
equatation.py
asteinig4018/mhacks19
1
12369
import json import math from HistoricalTweetDataFetcher import getHistoricalData joelsarray = getHistoricalData(0) arrs = [] arrm = [] arrp = [] arrsTotal = 0 arrmTotal = 0 ncount = 0 ccount = 0 lcount = 0 time = joelsarray[0]["h"] for dictionary in joelsarray: arrs.append(dictionary["s"]) arrm.append(dictionary["m"]) arrp.append(dictionary["p"]) for x in range(len(arrs)): arrsTotal += arrs[x] arrmTotal += arrm[x] if arrp[x]=='l': lcount += 1 elif arrp[x]=='c': ccount += 1 elif arrp[x]=='n': ncount += 1 arrsAvg = arrsTotal/len(arrs)#sentiment value arrmAvg = arrmTotal/len(arrm)#magnitude value #print(arrsTotal) #print(len(arrs)) #rint(arrsAvg) #print(arrmAvg) #print(lcount) #print(ccount) ################################################################### filename2 = "weather_us.json" if filename2: with open(filename2, 'r') as f: weatherstore = json.load(f) for x in range(50): statearray = list(weatherstore.keys()) statesAverage = 0 for state in statearray: for x in range(50): temptemp = float(weatherstore[state]["temperature"]) temphigh = float(weatherstore[state]["average_monthly_high"]) templow = float(weatherstore[state]["average_monthly_low"]) statesAverage+=((temptemp-temphigh)*(templow-temptemp))/(math.pow(((temphigh+templow)/2),2)) statesAverage = statesAverage/50 #this is the average tempeature multiplyer print(statesAverage) ##################################################################################### filename3 = "sp500_price.json" if filename3: with open(filename3, 'r') as f: stockdata = json.load(f) stockpricecurrent = stockdata["current_price"] stockpricechange = stockdata["percent_change"]#percent change of S&P500 if stockpricechange <= 0.73 and stockpricechange >=-0.73: stockmultiply = 0; else: stockmultiply = stockpricechange*0.5*0.73 print(stockpricechange) ######################################################################################### filename4 = "trump_approval_rating.json" if filename4: with open(filename4, 'r') as f: approvalratingdata = json.load(f) approveAvg = approvalratingdata["approve_avg"]#approval average data currentApproval = approvalratingdata["approve"]#current approval percentage ######################################################################################## def equation(sentiment, stockmultiply, pollcurrent, pollaverage, avgtemp, lvalue, cvalue, ltweets, ctweet, time, const1 = 70, const2 = 60, const3 = 50, const4 = 45, const5 = 25, slideInput = True): point = const1*(sentiment) + const2*(stockmultiply)+const3*((pollcurrent-pollaverage)/(pollaverage))+const4*avgtemp + const5/2*lvalue*ltweets+ const5/2*cvalue+ctweet+const5 filename5 = "data.json" if(slideInput==True): if filename5: with open(filename5, 'r') as f: outputdata = json.load(f) print(outputdata) outputdata["chartData"]["labels"][0]=outputdata["chartData"]["labels"][1] outputdata["chartData"]["labels"][1]=outputdata["chartData"]["labels"][2] outputdata["chartData"]["labels"][2]=outputdata["chartData"]["labels"][3] outputdata["chartData"]["labels"][3]=outputdata["chartData"]["labels"][4] outputdata["chartData"]["labels"][4]=outputdata["chartData"]["labels"][5] outputdata["chartData"]["labels"][5]=outputdata["chartData"]["labels"][6] outputdata["chartData"]["labels"][6] = str(time)+":00" outputdata["chartData"]["thisWeek"][0]=outputdata["chartData"]["thisWeek"][1] outputdata["chartData"]["thisWeek"][1]=outputdata["chartData"]["thisWeek"][2] outputdata["chartData"]["thisWeek"][2]=outputdata["chartData"]["thisWeek"][3] outputdata["chartData"]["thisWeek"][3]=outputdata["chartData"]["thisWeek"][4] outputdata["chartData"]["thisWeek"][4]=outputdata["chartData"]["thisWeek"][5] outputdata["chartData"]["thisWeek"][5]=outputdata["chartData"]["thisWeek"][6] outputdata["chartData"]["thisWeek"][6] = point with open(filename5, 'w') as f: json.dump(outputdata, f) else: if filename5: with open(filename5, 'r') as f: outputdata = json.load(f) print(outputdata) outputdata["chartData"]["labels"][0]=outputdata["chartData"]["labels"][1] outputdata["chartData"]["labels"][1]=outputdata["chartData"]["labels"][2] outputdata["chartData"]["labels"][2]=outputdata["chartData"]["labels"][3] outputdata["chartData"]["labels"][3]=outputdata["chartData"]["labels"][4] outputdata["chartData"]["labels"][4]=outputdata["chartData"]["labels"][5] outputdata["chartData"]["labels"][5]=outputdata["chartData"]["labels"][6] outputdata["chartData"]["labels"][6] = str(time) + ":00" outputdata["chartData"]["thisWeek"][0]=outputdata["chartData"]["thisWeek"][1] outputdata["chartData"]["thisWeek"][1]=outputdata["chartData"]["thisWeek"][2] outputdata["chartData"]["thisWeek"][2]=outputdata["chartData"]["thisWeek"][3] outputdata["chartData"]["thisWeek"][3]=outputdata["chartData"]["thisWeek"][4] outputdata["chartData"]["thisWeek"][4]=outputdata["chartData"]["thisWeek"][5] outputdata["chartData"]["thisWeek"][5]=outputdata["chartData"]["thisWeek"][6] outputdata["chartData"]["thisWeek"][6] = point with open(filename5, 'w') as f: json.dump(outputdata, f) return point my_list = equation(arrsAvg, stockmultiply, currentApproval, approveAvg, statesAverage, 0, 0, lcount, ccount, 17, 70, 60, 50, 45, 25)
2.671875
3
oscar/apps/offer/models.py
endgame/django-oscar
0
12370
from decimal import Decimal as D, ROUND_DOWN, ROUND_UP import math import datetime from django.core import exceptions from django.template.defaultfilters import slugify from django.db import models from django.utils.translation import ungettext, ugettext as _ from django.utils.importlib import import_module from django.core.exceptions import ValidationError from django.core.urlresolvers import reverse from django.conf import settings from oscar.apps.offer.managers import ActiveOfferManager from oscar.templatetags.currency_filters import currency from oscar.models.fields import PositiveDecimalField, ExtendedURLField def load_proxy(proxy_class): module, classname = proxy_class.rsplit('.', 1) try: mod = import_module(module) except ImportError, e: raise exceptions.ImproperlyConfigured( "Error importing module %s: %s" % (module, e)) try: return getattr(mod, classname) except AttributeError: raise exceptions.ImproperlyConfigured( "Module %s does not define a %s" % (module, classname)) class ConditionalOffer(models.Model): """ A conditional offer (eg buy 1, get 10% off) """ name = models.CharField( _("Name"), max_length=128, unique=True, help_text=_("This is displayed within the customer's basket")) slug = models.SlugField(_("Slug"), max_length=128, unique=True, null=True) description = models.TextField(_("Description"), blank=True, null=True) # Offers come in a few different types: # (a) Offers that are available to all customers on the site. Eg a # 3-for-2 offer. # (b) Offers that are linked to a voucher, and only become available once # that voucher has been applied to the basket # (c) Offers that are linked to a user. Eg, all students get 10% off. The # code to apply this offer needs to be coded # (d) Session offers - these are temporarily available to a user after some # trigger event. Eg, users coming from some affiliate site get 10% off. SITE, VOUCHER, USER, SESSION = ("Site", "Voucher", "User", "Session") TYPE_CHOICES = ( (SITE, _("Site offer - available to all users")), (VOUCHER, _("Voucher offer - only available after entering the appropriate voucher code")), (USER, _("User offer - available to certain types of user")), (SESSION, _("Session offer - temporary offer, available for a user for the duration of their session")), ) offer_type = models.CharField(_("Type"), choices=TYPE_CHOICES, default=SITE, max_length=128) condition = models.ForeignKey('offer.Condition', verbose_name=_("Condition")) benefit = models.ForeignKey('offer.Benefit', verbose_name=_("Benefit")) # Some complicated situations require offers to be applied in a set order. priority = models.IntegerField(_("Priority"), default=0, help_text=_("The highest priority offers are applied first")) # AVAILABILITY # Range of availability. Note that if this is a voucher offer, then these # dates are ignored and only the dates from the voucher are used to # determine availability. start_date = models.DateField(_("Start Date"), blank=True, null=True) end_date = models.DateField( _("End Date"), blank=True, null=True, help_text=_("Offers are not active on their end date, only " "the days preceding")) # Use this field to limit the number of times this offer can be applied in # total. Note that a single order can apply an offer multiple times so # this is not the same as the number of orders that can use it. max_global_applications = models.PositiveIntegerField( _("Max global applications"), help_text=_("The number of times this offer can be used before it " "is unavailable"), blank=True, null=True) # Use this field to limit the number of times this offer can be used by a # single user. This only works for signed-in users - it doesn't really # make sense for sites that allow anonymous checkout. max_user_applications = models.PositiveIntegerField( _("Max user applications"), help_text=_("The number of times a single user can use this offer"), blank=True, null=True) # Use this field to limit the number of times this offer can be applied to # a basket (and hence a single order). max_basket_applications = models.PositiveIntegerField( blank=True, null=True, help_text=_("The number of times this offer can be applied to a " "basket (and order)")) # Use this field to limit the amount of discount an offer can lead to. # This can be helpful with budgeting. max_discount = models.DecimalField( _("Max discount"), decimal_places=2, max_digits=12, null=True, blank=True, help_text=_("When an offer has given more discount to orders " "than this threshold, then the offer becomes " "unavailable")) # TRACKING total_discount = models.DecimalField( _("Total Discount"), decimal_places=2, max_digits=12, default=D('0.00')) num_applications = models.PositiveIntegerField( _("Number of applications"), default=0) num_orders = models.PositiveIntegerField( _("Number of Orders"), default=0) redirect_url = ExtendedURLField(_("URL redirect (optional)"), blank=True) date_created = models.DateTimeField(_("Date Created"), auto_now_add=True) objects = models.Manager() active = ActiveOfferManager() # We need to track the voucher that this offer came from (if it is a # voucher offer) _voucher = None class Meta: ordering = ['-priority'] verbose_name = _("Conditional Offer") verbose_name_plural = _("Conditional Offers") def save(self, *args, **kwargs): if not self.slug: self.slug = slugify(self.name) return super(ConditionalOffer, self).save(*args, **kwargs) def get_absolute_url(self): return reverse('offer:detail', kwargs={'slug': self.slug}) def __unicode__(self): return self.name def clean(self): if self.start_date and self.end_date and self.start_date > self.end_date: raise exceptions.ValidationError(_('End date should be later than start date')) def is_active(self, test_date=None): """ Test whether this offer is active and can be used by customers """ if test_date is None: test_date = datetime.date.today() predicates = [self.get_max_applications() > 0] if self.start_date: predicates.append(self.start_date <= test_date) if self.end_date: predicates.append(test_date < self.end_date) if self.max_discount: predicates.append(self.total_discount < self.max_discount) return all(predicates) def is_condition_satisfied(self, basket): return self._proxy_condition().is_satisfied(basket) def is_condition_partially_satisfied(self, basket): return self._proxy_condition().is_partially_satisfied(basket) def get_upsell_message(self, basket): return self._proxy_condition().get_upsell_message(basket) def apply_benefit(self, basket): """ Applies the benefit to the given basket and returns the discount. """ if not self.is_condition_satisfied(basket): return D('0.00') return self._proxy_benefit().apply(basket, self._proxy_condition(), self) def set_voucher(self, voucher): self._voucher = voucher def get_voucher(self): return self._voucher def get_max_applications(self, user=None): """ Return the number of times this offer can be applied to a basket """ limits = [10000] if self.max_user_applications and user: limits.append(max(0, self.max_user_applications - self.get_num_user_applications(user))) if self.max_basket_applications: limits.append(self.max_basket_applications) if self.max_global_applications: limits.append( max(0, self.max_global_applications - self.num_applications)) return min(limits) def get_num_user_applications(self, user): OrderDiscount = models.get_model('order', 'OrderDiscount') aggregates = OrderDiscount.objects.filter( offer_id=self.id, order__user=user).aggregate( total=models.Sum('frequency')) return aggregates['total'] if aggregates['total'] is not None else 0 def shipping_discount(self, charge): return self._proxy_benefit().shipping_discount(charge) def _proxy_condition(self): """ Returns the appropriate proxy model for the condition """ field_dict = dict(self.condition.__dict__) for field in field_dict.keys(): if field.startswith('_'): del field_dict[field] if self.condition.proxy_class: klass = load_proxy(self.condition.proxy_class) return klass(**field_dict) klassmap = { self.condition.COUNT: CountCondition, self.condition.VALUE: ValueCondition, self.condition.COVERAGE: CoverageCondition} if self.condition.type in klassmap: return klassmap[self.condition.type](**field_dict) return self.condition def _proxy_benefit(self): """ Returns the appropriate proxy model for the benefit """ field_dict = dict(self.benefit.__dict__) for field in field_dict.keys(): if field.startswith('_'): del field_dict[field] klassmap = { self.benefit.PERCENTAGE: PercentageDiscountBenefit, self.benefit.FIXED: AbsoluteDiscountBenefit, self.benefit.MULTIBUY: MultibuyDiscountBenefit, self.benefit.FIXED_PRICE: FixedPriceBenefit, self.benefit.SHIPPING_ABSOLUTE: ShippingAbsoluteDiscountBenefit, self.benefit.SHIPPING_FIXED_PRICE: ShippingFixedPriceBenefit, self.benefit.SHIPPING_PERCENTAGE: ShippingPercentageDiscountBenefit} if self.benefit.type in klassmap: return klassmap[self.benefit.type](**field_dict) return self.benefit def record_usage(self, discount): self.num_applications += discount['freq'] self.total_discount += discount['discount'] self.num_orders += 1 self.save() record_usage.alters_data = True def availability_description(self): """ Return a description of when this offer is available """ sentences = [] if self.max_global_applications: desc = _( "Can be used %(total)d times " "(%(remainder)d remaining)") % { 'total': self.max_global_applications, 'remainder': self.max_global_applications - self.num_applications} sentences.append(desc) if self.max_user_applications: if self.max_user_applications == 1: desc = _("Can be used once per user") else: desc = _( "Can be used %(total)d times per user") % { 'total': self.max_user_applications} sentences.append(desc) if self.max_basket_applications: if self.max_user_applications == 1: desc = _("Can be used once per basket") else: desc = _( "Can be used %(total)d times per basket") % { 'total': self.max_basket_applications} sentences.append(desc) if self.start_date and self.end_date: desc = _("Available between %(start)s and %(end)s") % { 'start': self.start_date, 'end': self.end_date} sentences.append(desc) elif self.start_date: sentences.append(_("Available until %(start)s") % { 'start': self.start_date}) elif self.end_date: sentences.append(_("Available until %(end)s") % { 'end': self.end_date}) if self.max_discount: sentences.append(_("Available until a discount of %(max)s " "has been awarded") % { 'max': currency(self.max_discount)}) return "<br/>".join(sentences) class Condition(models.Model): COUNT, VALUE, COVERAGE = ("Count", "Value", "Coverage") TYPE_CHOICES = ( (COUNT, _("Depends on number of items in basket that are in " "condition range")), (VALUE, _("Depends on value of items in basket that are in " "condition range")), (COVERAGE, _("Needs to contain a set number of DISTINCT items " "from the condition range"))) range = models.ForeignKey( 'offer.Range', verbose_name=_("Range"), null=True, blank=True) type = models.CharField(_('Type'), max_length=128, choices=TYPE_CHOICES, null=True, blank=True) value = PositiveDecimalField(_('Value'), decimal_places=2, max_digits=12, null=True, blank=True) proxy_class = models.CharField(_("Custom class"), null=True, blank=True, max_length=255, unique=True, default=None) class Meta: verbose_name = _("Condition") verbose_name_plural = _("Conditions") def __unicode__(self): if self.proxy_class: return load_proxy(self.proxy_class).name if self.type == self.COUNT: return _("Basket includes %(count)d item(s) from %(range)s") % { 'count': self.value, 'range': unicode(self.range).lower()} elif self.type == self.COVERAGE: return _("Basket includes %(count)d distinct products from %(range)s") % { 'count': self.value, 'range': unicode(self.range).lower()} return _("Basket includes %(amount)s from %(range)s") % { 'amount': currency(self.value), 'range': unicode(self.range).lower()} description = __unicode__ def consume_items(self, basket, affected_lines): pass def is_satisfied(self, basket): """ Determines whether a given basket meets this condition. This is stubbed in this top-class object. The subclassing proxies are responsible for implementing it correctly. """ return False def is_partially_satisfied(self, basket): """ Determine if the basket partially meets the condition. This is useful for up-selling messages to entice customers to buy something more in order to qualify for an offer. """ return False def get_upsell_message(self, basket): return None def can_apply_condition(self, product): """ Determines whether the condition can be applied to a given product """ return (self.range.contains_product(product) and product.is_discountable and product.has_stockrecord) def get_applicable_lines(self, basket, most_expensive_first=True): """ Return line data for the lines that can be consumed by this condition """ line_tuples = [] for line in basket.all_lines(): product = line.product if not self.can_apply_condition(product): continue price = line.unit_price_incl_tax if not price: continue line_tuples.append((price, line)) if most_expensive_first: return sorted(line_tuples, reverse=True) return sorted(line_tuples) class Benefit(models.Model): range = models.ForeignKey( 'offer.Range', null=True, blank=True, verbose_name=_("Range")) # Benefit types PERCENTAGE, FIXED, MULTIBUY, FIXED_PRICE = ( "Percentage", "Absolute", "Multibuy", "Fixed price") SHIPPING_PERCENTAGE, SHIPPING_ABSOLUTE, SHIPPING_FIXED_PRICE = ( 'Shipping percentage', 'Shipping absolute', 'Shipping fixed price') TYPE_CHOICES = ( (PERCENTAGE, _("Discount is a % of the product's value")), (FIXED, _("Discount is a fixed amount off the product's value")), (MULTIBUY, _("Discount is to give the cheapest product for free")), (FIXED_PRICE, _("Get the products that meet the condition for a fixed price")), (SHIPPING_ABSOLUTE, _("Discount is a fixed amount off the shipping cost")), (SHIPPING_FIXED_PRICE, _("Get shipping for a fixed price")), (SHIPPING_PERCENTAGE, _("Discount is a % off the shipping cost")), ) type = models.CharField(_("Type"), max_length=128, choices=TYPE_CHOICES) value = PositiveDecimalField(_("Value"), decimal_places=2, max_digits=12, null=True, blank=True) # If this is not set, then there is no upper limit on how many products # can be discounted by this benefit. max_affected_items = models.PositiveIntegerField( _("Max Affected Items"), blank=True, null=True, help_text=_("Set this to prevent the discount consuming all items " "within the range that are in the basket.")) class Meta: verbose_name = _("Benefit") verbose_name_plural = _("Benefits") def __unicode__(self): if self.type == self.PERCENTAGE: desc = _("%(value)s%% discount on %(range)s") % { 'value': self.value, 'range': unicode(self.range).lower()} elif self.type == self.MULTIBUY: desc = _("Cheapest product is free from %s") % ( unicode(self.range).lower(),) elif self.type == self.FIXED_PRICE: desc = _("The products that meet the condition are " "sold for %(amount)s") % { 'amount': currency(self.value)} elif self.type == self.SHIPPING_PERCENTAGE: desc = _("%(value)s%% off shipping cost") % { 'value': self.value} elif self.type == self.SHIPPING_ABSOLUTE: desc = _("%(amount)s off shipping cost") % { 'amount': currency(self.value)} elif self.type == self.SHIPPING_FIXED_PRICE: desc = _("Get shipping for %(amount)s") % { 'amount': currency(self.value)} else: desc = _("%(amount)s discount on %(range)s") % { 'amount': currency(self.value), 'range': unicode(self.range).lower()} if self.max_affected_items: desc += ungettext(" (max %d item)", " (max %d items)", self.max_affected_items) % self.max_affected_items return desc description = __unicode__ def apply(self, basket, condition, offer=None): return D('0.00') def clean(self): if not self.type: raise ValidationError(_("Benefit requires a value")) method_name = 'clean_%s' % self.type.lower().replace(' ', '_') if hasattr(self, method_name): getattr(self, method_name)() def clean_multibuy(self): if not self.range: raise ValidationError( _("Multibuy benefits require a product range")) if self.value: raise ValidationError( _("Multibuy benefits don't require a value")) if self.max_affected_items: raise ValidationError( _("Multibuy benefits don't require a 'max affected items' " "attribute")) def clean_percentage(self): if not self.range: raise ValidationError( _("Percentage benefits require a product range")) if self.value > 100: raise ValidationError( _("Percentage discount cannot be greater than 100")) def clean_shipping_absolute(self): if not self.value: raise ValidationError( _("A discount value is required")) if self.range: raise ValidationError( _("No range should be selected as this benefit does not " "apply to products")) if self.max_affected_items: raise ValidationError( _("Shipping discounts don't require a 'max affected items' " "attribute")) def clean_shipping_percentage(self): if self.value > 100: raise ValidationError( _("Percentage discount cannot be greater than 100")) if self.range: raise ValidationError( _("No range should be selected as this benefit does not " "apply to products")) if self.max_affected_items: raise ValidationError( _("Shipping discounts don't require a 'max affected items' " "attribute")) def clean_shipping_fixed_price(self): if self.range: raise ValidationError( _("No range should be selected as this benefit does not " "apply to products")) if self.max_affected_items: raise ValidationError( _("Shipping discounts don't require a 'max affected items' " "attribute")) def clean_fixed_price(self): if self.range: raise ValidationError( _("No range should be selected as the condition range will " "be used instead.")) def clean_absolute(self): if not self.range: raise ValidationError( _("Percentage benefits require a product range")) def round(self, amount): """ Apply rounding to discount amount """ if hasattr(settings, 'OSCAR_OFFER_ROUNDING_FUNCTION'): return settings.OSCAR_OFFER_ROUNDING_FUNCTION(amount) return amount.quantize(D('.01'), ROUND_DOWN) def _effective_max_affected_items(self): """ Return the maximum number of items that can have a discount applied during the application of this benefit """ return self.max_affected_items if self.max_affected_items else 10000 def can_apply_benefit(self, product): """ Determines whether the benefit can be applied to a given product """ return product.has_stockrecord and product.is_discountable def get_applicable_lines(self, basket, range=None): """ Return the basket lines that are available to be discounted :basket: The basket :range: The range of products to use for filtering. The fixed-price benefit ignores its range and uses the condition range """ if range is None: range = self.range line_tuples = [] for line in basket.all_lines(): product = line.product if (not range.contains(product) or not self.can_apply_benefit(product)): continue price = line.unit_price_incl_tax if not price: # Avoid zero price products continue if line.quantity_without_discount == 0: continue line_tuples.append((price, line)) # We sort lines to be cheapest first to ensure consistent applications return sorted(line_tuples) def shipping_discount(self, charge): return D('0.00') class Range(models.Model): """ Represents a range of products that can be used within an offer """ name = models.CharField(_("Name"), max_length=128, unique=True) includes_all_products = models.BooleanField(_('Includes All Products'), default=False) included_products = models.ManyToManyField('catalogue.Product', related_name='includes', blank=True, verbose_name=_("Included Products")) excluded_products = models.ManyToManyField('catalogue.Product', related_name='excludes', blank=True, verbose_name=_("Excluded Products")) classes = models.ManyToManyField('catalogue.ProductClass', related_name='classes', blank=True, verbose_name=_("Product Classes")) included_categories = models.ManyToManyField('catalogue.Category', related_name='includes', blank=True, verbose_name=_("Included Categories")) # Allow a custom range instance to be specified proxy_class = models.CharField(_("Custom class"), null=True, blank=True, max_length=255, default=None, unique=True) date_created = models.DateTimeField(_("Date Created"), auto_now_add=True) __included_product_ids = None __excluded_product_ids = None __class_ids = None class Meta: verbose_name = _("Range") verbose_name_plural = _("Ranges") def __unicode__(self): return self.name def contains_product(self, product): """ Check whether the passed product is part of this range """ # We look for shortcircuit checks first before # the tests that require more database queries. if settings.OSCAR_OFFER_BLACKLIST_PRODUCT and \ settings.OSCAR_OFFER_BLACKLIST_PRODUCT(product): return False # Delegate to a proxy class if one is provided if self.proxy_class: return load_proxy(self.proxy_class)().contains_product(product) excluded_product_ids = self._excluded_product_ids() if product.id in excluded_product_ids: return False if self.includes_all_products: return True if product.product_class_id in self._class_ids(): return True included_product_ids = self._included_product_ids() if product.id in included_product_ids: return True test_categories = self.included_categories.all() if test_categories: for category in product.categories.all(): for test_category in test_categories: if category == test_category or category.is_descendant_of(test_category): return True return False # Shorter alias contains = contains_product def _included_product_ids(self): if None == self.__included_product_ids: self.__included_product_ids = [row['id'] for row in self.included_products.values('id')] return self.__included_product_ids def _excluded_product_ids(self): if not self.id: return [] if None == self.__excluded_product_ids: self.__excluded_product_ids = [row['id'] for row in self.excluded_products.values('id')] return self.__excluded_product_ids def _class_ids(self): if None == self.__class_ids: self.__class_ids = [row['id'] for row in self.classes.values('id')] return self.__class_ids def num_products(self): if self.includes_all_products: return None return self.included_products.all().count() @property def is_editable(self): """ Test whether this product can be edited in the dashboard """ return self.proxy_class is None # ========== # Conditions # ========== class CountCondition(Condition): """ An offer condition dependent on the NUMBER of matching items from the basket. """ class Meta: proxy = True verbose_name = _("Count Condition") verbose_name_plural = _("Count Conditions") def is_satisfied(self, basket): """ Determines whether a given basket meets this condition """ num_matches = 0 for line in basket.all_lines(): if (self.can_apply_condition(line.product) and line.quantity_without_discount > 0): num_matches += line.quantity_without_discount if num_matches >= self.value: return True return False def _get_num_matches(self, basket): if hasattr(self, '_num_matches'): return getattr(self, '_num_matches') num_matches = 0 for line in basket.all_lines(): if (self.can_apply_condition(line.product) and line.quantity_without_discount > 0): num_matches += line.quantity_without_discount self._num_matches = num_matches return num_matches def is_partially_satisfied(self, basket): num_matches = self._get_num_matches(basket) return 0 < num_matches < self.value def get_upsell_message(self, basket): num_matches = self._get_num_matches(basket) delta = self.value - num_matches return ungettext('Buy %(delta)d more product from %(range)s', 'Buy %(delta)d more products from %(range)s', delta) % { 'delta': delta, 'range': self.range} def consume_items(self, basket, affected_lines): """ Marks items within the basket lines as consumed so they can't be reused in other offers. :basket: The basket :affected_lines: The lines that have been affected by the discount. This should be list of tuples (line, discount, qty) """ # We need to count how many items have already been consumed as part of # applying the benefit, so we don't consume too many items. num_consumed = 0 for line, __, quantity in affected_lines: num_consumed += quantity to_consume = max(0, self.value - num_consumed) if to_consume == 0: return for __, line in self.get_applicable_lines(basket, most_expensive_first=True): quantity_to_consume = min(line.quantity_without_discount, to_consume) line.consume(quantity_to_consume) to_consume -= quantity_to_consume if to_consume == 0: break class CoverageCondition(Condition): """ An offer condition dependent on the number of DISTINCT matching items from the basket. """ class Meta: proxy = True verbose_name = _("Coverage Condition") verbose_name_plural = _("Coverage Conditions") def is_satisfied(self, basket): """ Determines whether a given basket meets this condition """ covered_ids = [] for line in basket.all_lines(): if not line.is_available_for_discount: continue product = line.product if (self.can_apply_condition(product) and product.id not in covered_ids): covered_ids.append(product.id) if len(covered_ids) >= self.value: return True return False def _get_num_covered_products(self, basket): covered_ids = [] for line in basket.all_lines(): if not line.is_available_for_discount: continue product = line.product if (self.can_apply_condition(product) and product.id not in covered_ids): covered_ids.append(product.id) return len(covered_ids) def get_upsell_message(self, basket): delta = self.value - self._get_num_covered_products(basket) return ungettext('Buy %(delta)d more product from %(range)s', 'Buy %(delta)d more products from %(range)s', delta) % { 'delta': delta, 'range': self.range} def is_partially_satisfied(self, basket): return 0 < self._get_num_covered_products(basket) < self.value def consume_items(self, basket, affected_lines): """ Marks items within the basket lines as consumed so they can't be reused in other offers. """ # Determine products that have already been consumed by applying the # benefit consumed_products = [] for line, __, quantity in affected_lines: consumed_products.append(line.product) to_consume = max(0, self.value - len(consumed_products)) if to_consume == 0: return for line in basket.all_lines(): product = line.product if not self.can_apply_condition(product): continue if product in consumed_products: continue if not line.is_available_for_discount: continue # Only consume a quantity of 1 from each line line.consume(1) consumed_products.append(product) to_consume -= 1 if to_consume == 0: break def get_value_of_satisfying_items(self, basket): covered_ids = [] value = D('0.00') for line in basket.all_lines(): if (self.can_apply_condition(line.product) and line.product.id not in covered_ids): covered_ids.append(line.product.id) value += line.unit_price_incl_tax if len(covered_ids) >= self.value: return value return value class ValueCondition(Condition): """ An offer condition dependent on the VALUE of matching items from the basket. """ class Meta: proxy = True verbose_name = _("Value Condition") verbose_name_plural = _("Value Conditions") def is_satisfied(self, basket): """ Determine whether a given basket meets this condition """ value_of_matches = D('0.00') for line in basket.all_lines(): product = line.product if (self.can_apply_condition(product) and product.has_stockrecord and line.quantity_without_discount > 0): price = line.unit_price_incl_tax value_of_matches += price * int(line.quantity_without_discount) if value_of_matches >= self.value: return True return False def _get_value_of_matches(self, basket): if hasattr(self, '_value_of_matches'): return getattr(self, '_value_of_matches') value_of_matches = D('0.00') for line in basket.all_lines(): product = line.product if (self.can_apply_condition(product) and product.has_stockrecord and line.quantity_without_discount > 0): price = line.unit_price_incl_tax value_of_matches += price * int(line.quantity_without_discount) self._value_of_matches = value_of_matches return value_of_matches def is_partially_satisfied(self, basket): value_of_matches = self._get_value_of_matches(basket) return D('0.00') < value_of_matches < self.value def get_upsell_message(self, basket): value_of_matches = self._get_value_of_matches(basket) return _('Spend %(value)s more from %(range)s') % { 'value': currency(self.value - value_of_matches), 'range': self.range} def consume_items(self, basket, affected_lines): """ Marks items within the basket lines as consumed so they can't be reused in other offers. We allow lines to be passed in as sometimes we want them sorted in a specific order. """ # Determine value of items already consumed as part of discount value_consumed = D('0.00') for line, __, qty in affected_lines: price = line.unit_price_incl_tax value_consumed += price * qty to_consume = max(0, self.value - value_consumed) if to_consume == 0: return for price, line in self.get_applicable_lines(basket, most_expensive_first=True): quantity_to_consume = min( line.quantity_without_discount, (to_consume / price).quantize(D(1), ROUND_UP)) line.consume(quantity_to_consume) to_consume -= price * quantity_to_consume if to_consume == 0: break # ======== # Benefits # ======== class PercentageDiscountBenefit(Benefit): """ An offer benefit that gives a percentage discount """ class Meta: proxy = True verbose_name = _("Percentage discount benefit") verbose_name_plural = _("Percentage discount benefits") def apply(self, basket, condition, offer=None): line_tuples = self.get_applicable_lines(basket) discount = D('0.00') affected_items = 0 max_affected_items = self._effective_max_affected_items() affected_lines = [] for price, line in line_tuples: if affected_items >= max_affected_items: break quantity_affected = min(line.quantity_without_discount, max_affected_items - affected_items) line_discount = self.round(self.value / D('100.0') * price * int(quantity_affected)) line.discount(line_discount, quantity_affected) affected_lines.append((line, line_discount, quantity_affected)) affected_items += quantity_affected discount += line_discount if discount > 0: condition.consume_items(basket, affected_lines) return discount class AbsoluteDiscountBenefit(Benefit): """ An offer benefit that gives an absolute discount """ class Meta: proxy = True verbose_name = _("Absolute discount benefit") verbose_name_plural = _("Absolute discount benefits") def apply(self, basket, condition, offer=None): line_tuples = self.get_applicable_lines(basket) if not line_tuples: return self.round(D('0.00')) discount = D('0.00') affected_items = 0 max_affected_items = self._effective_max_affected_items() affected_lines = [] for price, line in line_tuples: if affected_items >= max_affected_items: break remaining_discount = self.value - discount quantity_affected = min( line.quantity_without_discount, max_affected_items - affected_items, int(math.ceil(remaining_discount / price))) line_discount = self.round(min(remaining_discount, quantity_affected * price)) line.discount(line_discount, quantity_affected) affected_lines.append((line, line_discount, quantity_affected)) affected_items += quantity_affected discount += line_discount if discount > 0: condition.consume_items(basket, affected_lines) return discount class FixedPriceBenefit(Benefit): """ An offer benefit that gives the items in the condition for a fixed price. This is useful for "bundle" offers. Note that we ignore the benefit range here and only give a fixed price for the products in the condition range. The condition cannot be a value condition. We also ignore the max_affected_items setting. """ class Meta: proxy = True verbose_name = _("Fixed price benefit") verbose_name_plural = _("Fixed price benefits") def apply(self, basket, condition, offer=None): if isinstance(condition, ValueCondition): return self.round(D('0.00')) line_tuples = self.get_applicable_lines(basket, range=condition.range) if not line_tuples: return self.round(D('0.00')) # Determine the lines to consume num_permitted = int(condition.value) num_affected = 0 value_affected = D('0.00') covered_lines = [] for price, line in line_tuples: if isinstance(condition, CoverageCondition): quantity_affected = 1 else: quantity_affected = min( line.quantity_without_discount, num_permitted - num_affected) num_affected += quantity_affected value_affected += quantity_affected * price covered_lines.append((price, line, quantity_affected)) if num_affected >= num_permitted: break discount = max(value_affected - self.value, D('0.00')) if not discount: return self.round(discount) # Apply discount to the affected lines discount_applied = D('0.00') last_line = covered_lines[-1][0] for price, line, quantity in covered_lines: if line == last_line: # If last line, we just take the difference to ensure that # rounding doesn't lead to an off-by-one error line_discount = discount - discount_applied else: line_discount = self.round( discount * (price * quantity) / value_affected) line.discount(line_discount, quantity) discount_applied += line_discount return discount class MultibuyDiscountBenefit(Benefit): class Meta: proxy = True verbose_name = _("Multibuy discount benefit") verbose_name_plural = _("Multibuy discount benefits") def apply(self, basket, condition, offer=None): line_tuples = self.get_applicable_lines(basket) if not line_tuples: return self.round(D('0.00')) # Cheapest line gives free product discount, line = line_tuples[0] line.discount(discount, 1) affected_lines = [(line, discount, 1)] condition.consume_items(basket, affected_lines) return discount # ================= # Shipping benefits # ================= class ShippingBenefit(Benefit): def apply(self, basket, condition, offer=None): # Attach offer to basket to indicate that it qualifies for a shipping # discount. At this point, we only allow one shipping offer per # basket. basket.shipping_offer = offer condition.consume_items(basket, affected_lines=()) return D('0.00') class ShippingAbsoluteDiscountBenefit(ShippingBenefit): class Meta: proxy = True verbose_name = _("Shipping absolute discount benefit") verbose_name_plural = _("Shipping absolute discount benefits") def shipping_discount(self, charge): return min(charge, self.value) class ShippingFixedPriceBenefit(ShippingBenefit): class Meta: proxy = True verbose_name = _("Fixed price shipping benefit") verbose_name_plural = _("Fixed price shipping benefits") def shipping_discount(self, charge): if charge < self.value: return D('0.00') return charge - self.value class ShippingPercentageDiscountBenefit(ShippingBenefit): class Meta: proxy = True verbose_name = _("Shipping percentage discount benefit") verbose_name_plural = _("Shipping percentage discount benefits") def shipping_discount(self, charge): return charge * self.value / D('100.0')
1.96875
2
htk-lite/commandlist/help.py
otherbeast/hackers-tool-kit
393
12371
<reponame>otherbeast/hackers-tool-kit #!/usr/local/bin/python # coding: latin-1 #if you use this code give me credit @tuf_unkn0wn #i do not give you permission to show / edit this script without my credit #to ask questions or report a problem message me on instagram @tuf_unkn0wn """ ██░ ██ ▄▄▄ ▄████▄ ██ ▄█▀▓█████ ▓█████▄ ▓██░ ██▒▒████▄ ▒██▀ ▀█ ██▄█▒ ▓█ ▀ ▒██▀ ██▌ ▒██▀▀██░▒██ ▀█▄ ▒▓█ ▄ ▓███▄░ ▒███ ░██ █▌ ░▓█ ░██ ░██▄▄▄▄██ ▒▓▓▄ ▄██▒▓██ █▄ ▒▓█ ▄ ░▓█▄ ▌ ░▓█▒░██▓ ▓█ ▓██▒▒ ▓███▀ ░▒██▒ █▄░▒████▒░▒████▓ ▒ ▒░▒ ▒▒ ▓▒█ ░▒ ▒ ░▒ ▒▒ ▓▒ ▒░ ░ ▒▒▓ ▒ ▒ ░▒░ ░ ▒ ▒▒ ░ ░ ▒ ░ ░▒ ▒░ ░ ░ ░ ░ ▒ ▒ ░ ░ ░ ▒ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ """ import os import sys import random lred = '\033[91m' lblue = '\033[94m' lgreen = '\033[92m' yellow = '\033[93m' cyan = '\033[1;36m' purple = '\033[95m' red = '\033[31m' green = '\033[32m' blue = '\033[34m' orange = '\033[33m' colorlist = [red, blue, green, yellow, lblue, purple, cyan, lred, lgreen, orange] randomcolor = random.choice(colorlist) banner3list = [red, blue, green, purple] def helpbanner(): a = os.popen("ls commandlist -1 | wc -l").read() b = a.replace('\n', '') print """ ╔══════════════════════════════════════════════════════════╗ ║ ║ ║ \033[92m ██░ ██ ▓█████ ██▓ ██▓███ \033[0m ║ ║ \033[90m ▓██░ ██▒▓█ ▀ ▓██▒ ▓██░ ██▒ \033[0m ║ ║ \033[92m ▒██▀▀██░▒███ ▒██░ ▓██░ ██▓▒ \033[0m ║ ║ \033[90m ░▓█ ░██ ▒▓█ ▄ ▒██░ ▒██▄█▓▒ ▒ \033[0m ║ ║ \033[92m ░▓█▒░██▓░▒████▒░██████▒▒██▒ ░ ░ \033[0m ║ ║ \033[94m ▒ ░░▒░▒░░ ▒░ ░░ ▒░▓ ░▒▓▒░ ░ ░ \033[0m ║ ║ \033[90m ▒ ░▒░ ░ ░ ░ ░░ ░ ▒ ░░▒ ░ \033[0m ║ ║ \033[94m ░ ░░ ░ ░ ░ ░ ░░ \033[0m ║ ║ \033[90m ░ ░ ░ ░ ░ ░ ░ \033[0m ║ ║ ║ ║══════════════════════════════════════════════════════════║ ║ Commands: [\033[32m{0}\033[0m] Banners: [\033[31m6\033[0m] ║ ║══════════════════════════════════════════════════════════════════════════════════════╗ ║ ? | this menu ║ ║ exit | exit htkl ║ ║ clear | clears screen ║ ║ banner | shows a banner ║ ║ infoscan | gather information on a host [for a more specific scan type infoscan -o] ║ ║ dos | run Denial-Of-Service attacks ║ ║ ║ ║ ║ ║ \033[5m@tuf_unkn0wn\033[0m ║ ╚══════════════════════════════════════════════════════════════════════════════════════╝ \033[0m\n""".format(b) helpbanner()
2.21875
2
scripts/core/soldier.py
whackashoe/entwinement
1
12372
d_soldiers = [] class Soldier: def __init__(self, id, name, team): self.id = id self.name = name self.team = team self.x = 0 self.y = 0 self.xVelo = 0 self.yVelo = 0 self.kills = 0 self.deaths = 0 self.alive = 'true' self.driving = 'false' self.gun = 0 self.ammo = 0 self.reloading = 'false' def setPosition(self, x, y, xv, yv): self.x = x self.y = y self.xVelo = xv self.yVelo = yv def setName(self, name): self.name = name def setTeam(self, team): self.team = team def setGun(self, gun): self.gun = gun def setGunInfo(self, gun, ammo, reloading): self.gun = gun self.ammo = ammo self.reloading = reloading def die(self): self.alive = 'false' self.driving = 'false' self.deaths += 1 def respawn(self): self.alive = 'true' def teleport(self, x, y): global com self.x = x self.y = y com += 'f_t s '+str(self.id)+' '+str(self.x)+' '+str(self.y)+';' def applyForce(self, xf, yf): global com com += 'f_af s '+str(self.id)+' '+str(xf)+' '+str(yf)+';' def setVelocity(self, xf, yf): global com self.xVelo = xf self.yVelo = yf com += 'f_v s '+str(self.id)+' '+str(self.xVelo)+' '+str(self.yVelo)+';' def changeTeam(self, team): global com self.team = team com += 's_ct '+str(self.id)+' '+str(self.team)+';' def changeGun(self, gun): global com self.gun = gun com += 's_cg '+str(self.id)+' '+str(self.gun)+';' def changeAttachment(self, type, amount): global com com += 's_ca '+str(self.id)+' '+str(type)+' '+str(amount)+';' def killSoldier(self): global com self.alive = false com += 's_ks '+str(id)+';' def respawnSoldier(self, spawn): global com com += 's_rs '+str(self.id)+' '+str(spawn)+';' def enterVehicle(self, vehicleId): global com com += 's_en '+str(self.id)+' '+str(vehicleId)+';' def exitVehicle(self): global com com += 's_ex '+str(self.id)+';' def addKill(self): global com self.kills += 1 com += 's_ak '+str(self.id)+';' def addDeath(self): global com self.deaths += 1 com += 's_ad '+str(self.id)+';' def dropGun(self): global com com += 's_dg '+str(self.id)+';' def addSoldier(team): global com com += 'a s '+str(team)+';' def getSoldier(n): global d_soldiers return d_soldiers[n] def getSoldierById(id): global d_soldiers for n in xrange(len(d_soldiers)): s = d_soldiers[n] if s.id == id: return s def getSoldiers(): global d_soldiers return d_soldiers def getSoldierCount(): global d_soldiers return len(d_soldiers) def getTeamKills(team): amount = 0 for n in xrange(len(d_soldiers)): s = d_soldiers[n] if s.team == team: amount += s.kills return amount def getTeamDeaths(team): amount = 0 for n in xrange(len(d_soldiers)): s = d_soldiers[n] if s.team == team: amount += s.deaths return amount def getTeamSize(team): amount = 0 for n in xrange(len(d_soldiers)): s = d_soldiers[n] if s.team == team: amount += 1 return amount
2.953125
3
dzTraficoBackend/dzTrafico/BusinessLayer/Statistics/DataVisualizationController.py
DZAymen/dz-Trafico
0
12373
<reponame>DZAymen/dz-Trafico<filename>dzTraficoBackend/dzTrafico/BusinessLayer/Statistics/DataVisualizationController.py from dzTrafico.BusinessEntities.Simulation import Simulation import lxml.etree as etree class DataVisualizationController(object): def __init__(self, simulation): # Initialize necessary file paths self.simulation = simulation def get_emissions_results(self): pass def get_travel_time_results(self): travel_time_results = [] def get_waiting_time_results(self): pass def get_root_node_file(self, filename): tree = etree.parse(Simulation.project_directory + filename) return tree.getroot() class DataVisualization(object): def __init__(self, type, data): self.type = type self.data = data def add_data(self, data): for value in data: self.data.append(value)
2.453125
2
bunkai/algorithm/lbd/custom_tokenizers.py
megagonlabs/bunkai
149
12374
#!/usr/bin/env python3 import collections import logging import os import typing import unicodedata from janome.tokenizer import Tokenizer from transformers.file_utils import cached_path from transformers.models.bert.tokenization_bert import BertTokenizer, WordpieceTokenizer, load_vocab import bunkai.constant """ The original source code is from cl-tohoku/bert-japanese. https://github.com/cl-tohoku/bert-japanese/blob/master/tokenization.py The original source code is under Apache-2.0 License. """ logger = logging.getLogger(__name__) KNOWN_PRETRAINED_VOCABS = { "cl-tohoku/bert-base-japanese", "cl-tohoku/bert-base-japanese-whole-word-masking", "cl-tohoku/bert-base-japanese-char", "cl-tohoku/bert-base-japanese-char-whole-word-masking", } class JanomeTokenizer(object): """Runs basic tokenization with Janome morphological parser.""" def __init__(self, *, do_lower_case=False, never_split=None, normalize_text=True): """ Construct a JanomeTokenizer. :arg do_lower_case: (`optional`) boolean (default True) Whether to lower case the input. :arg never_split: (`optional`) list of str Kept for backward compatibility purposes. Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`) List of token not to split. :arg normalize_text: (`optional`) boolean (default True) Whether to apply unicode normalization to text before tokenization. """ self.do_lower_case = do_lower_case self.never_split = never_split if never_split is not None else [] self.normalize_text = normalize_text self.janome_tokenizer = Tokenizer() def tokenize(self, text: str, *, never_split=None, **kwargs): """Tokenizes a piece of text.""" if self.normalize_text: text = unicodedata.normalize("NFKC", text) never_split = self.never_split + (never_split if never_split is not None else []) tokens = self.janome_tokenizer.tokenize(text) __tokens = [] last_index = 0 for t in tokens: token = t.surface token_start = text.index(token, last_index) if last_index != token_start: __tokens.append(text[last_index:token_start]) if self.do_lower_case and token not in never_split: token = token.lower() __tokens.append(token.lower()) else: __tokens.append(token) last_index = token_start + len(token) if len(text) != last_index: __tokens.append(text[last_index:]) assert text == "".join(__tokens), f"[{text}] != [{''.join(__tokens)}]" return __tokens class CharacterTokenizer(object): """Runs Character tokenziation.""" def __init__(self, vocab, unk_token, normalize_text=True): self.vocab = vocab self.unk_token = unk_token self.normalize_text = normalize_text def tokenize(self, text): """ Tokenize a piece of text into characters. For example: input = "apple" output = ["a", "p", "p", "l", "e"] :arg text: A single token or whitespace separated tokens. This should have already been passed through `BasicTokenizer`. :return: A list of characters. """ if self.normalize_text: text = unicodedata.normalize("NFKC", text) output_tokens = [] for char in text: if char not in self.vocab: output_tokens.append(self.unk_token) continue output_tokens.append(char) return output_tokens class JanomeSubwordsTokenizer(BertTokenizer): def __init__( self, vocab_file, *, subword_tokenizer_type="wordpiece", do_subword_tokenize: bool = True, never_split=None, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", **kwargs, ): """ Construct a JanomeSubwordsTokenizer. :arg vocab_file: Path to a one-wordpiece-per-line vocabulary file. :arg do_lower_case: (`optional`) boolean (default True) Whether to lower case the input. Only has an effect when do_basic_tokenize=True. :arg do_word_tokenize: (`optional`) boolean (default True) Whether to do word tokenization. :arg do_subword_tokenize: (`optional`) boolean (default True) Whether to do subword tokenization. :arg word_tokenizer_type: (`optional`) string (default "basic") Type of word tokenizer. basic / janome / pre_tokenize :arg subword_tokenizer_type: (`optional`) string (default "wordpiece") Type of subword tokenizer. :arg cls_token: No description. """ super(BertTokenizer, self).__init__( unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs, ) if os.path.isfile(vocab_file): self.vocab = load_vocab(vocab_file) elif vocab_file in KNOWN_PRETRAINED_VOCABS: url: str = f"https://s3.amazonaws.com/models.huggingface.co/bert/{vocab_file}/vocab.txt" self.vocab = load_vocab(cached_path(url)) else: raise ValueError( "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained " "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file) ) # add new vocab self.add_tokens([" ", bunkai.constant.METACHAR_LINE_BREAK]) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_word_tokenize = False self.do_subword_tokenize = True if do_subword_tokenize: if subword_tokenizer_type == "wordpiece": self.subword_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token) elif subword_tokenizer_type == "character": self.subword_tokenizer = CharacterTokenizer(vocab=self.vocab, unk_token=self.unk_token) else: raise ValueError("Invalid subword_tokenizer_type '{}' is specified.".format(subword_tokenizer_type)) self.janome_tokenizer = JanomeTokenizer() def tokenize(self, text: typing.Union[str, typing.List[str]]) -> typing.List[str]: if isinstance(text, str): morphemes = self.janome_tokenizer.tokenize(text) elif isinstance(text, list) and all([isinstance(t, str) for t in text]): morphemes = text else: raise Exception(f"Invalid input-type {text}") if self.do_subword_tokenize: split_tokens = [] for token in morphemes: sts = [sub_token for sub_token in self.subword_tokenizer.tokenize(token)] if len(sts) == 0: split_tokens.append(token) else: split_tokens += sts else: split_tokens = morphemes return split_tokens
2.5625
3
tests/exchanges_tests.py
tomwalton78/Crypto-Exchange-API-Aggregator
0
12375
<reponame>tomwalton78/Crypto-Exchange-API-Aggregator<filename>tests/exchanges_tests.py import unittest from datetime import datetime import os import sys from api.exchanges.exchange import ExchangeAPICallFailedException from api.exchanges.gdax_exchange import GdaxExchange from api.exchanges.kraken_exchange import KrakenExchange from api.exchanges.bitstamp_exchange import BitstampExchange from api.exchanges.bitfinex_exchange import BitfinexExchange class HiddenPrints: """Class to disable printing for functions run under its scope. Example: with HiddenPrints() print('hello world') Nothing will print, since anything under the scope of HiddenPrints has its printing output suppressed. """ def __enter__(self): """Disable printing on entering 'with HiddenPrints()' scope """ self._original_stdout = sys.stdout sys.stdout = open(os.devnull, 'w') def __exit__(self, exc_type, exc_val, exc_tb): """Re-enable printing on exiting 'with HiddenPrints()' scope """ sys.stdout.close() sys.stdout = self._original_stdout class GdaxExchangeTests(unittest.TestCase): """ Tests that functions within GdaxExchange class perform as intended. """ def test_initialisation_with_valid_market(self): try: g = GdaxExchange('BTC-EUR') pass except KeyError: self.fail( 'Initialising GdaxExchange with BTC-EUR raised KeyError.' ) def test_initialisation_with_invalid_market(self): with self.assertRaises(KeyError): g = GdaxExchange('REDDDDDDDDDD-BLUEEEEEEEEEE') def test_fetch_l1_quote_on_supported_market(self): try: g = GdaxExchange('BTC-EUR') g.fetch_l1_quote() pass except Exception as e: self.fail( 'Fetch l1 quote on supported market failed: {}'.format( str(e) ) ) def test_fetch_l1_quote_on_unsupported_market(self): with self.assertRaises(ExchangeAPICallFailedException): g = GdaxExchange('LTC-GBP') g.fetch_l1_quote() def test_latest_l1_quote_to_csv(self): g = GdaxExchange('BTC-EUR') g.latest_l1_quote = { "best ask size": 0.65333759, "best bid price": 5780.1, "best ask price": 5781.24, "timestamp": datetime.utcnow(), "best bid size": 0.001006 } g.latest_l1_quote_to_csv( path_to_folder=os.path.dirname(os.path.realpath(__file__)) + '/' ) # Test that csv file exists path = ( os.path.dirname(os.path.realpath(__file__)) + '/gdax_BTC-EUR.csv' ) self.assertTrue(os.path.exists(path)) os.remove(path) def test_fetch_l1_quote_and_write_to_csv(self): g = GdaxExchange('BTC-EUR') with HiddenPrints(): g.fetch_l1_quote_and_write_to_csv( path_to_folder=os.path.dirname(os.path.realpath(__file__)) + '/' ) # Test that csv file exists path = ( os.path.dirname(os.path.realpath(__file__)) + '/gdax_BTC-EUR.csv' ) self.assertTrue(os.path.exists(path)) os.remove(path) class KrakenExchangeTests(unittest.TestCase): """ Tests that functions within KrakenExchange class perform as intended. """ def test_initialisation_with_valid_market(self): try: k = KrakenExchange('BTC-EUR') pass except KeyError: self.fail( 'Initialising KrakenExchange with BTC-EUR raised KeyError.' ) def test_initialisation_with_invalid_market(self): with self.assertRaises(KeyError): k = KrakenExchange('REDDDDDDDDDD-BLUEEEEEEEEEE') def test_fetch_l1_quote_on_supported_market(self): try: k = KrakenExchange('BTC-EUR') k.fetch_l1_quote() pass except Exception as e: self.fail( 'Fetch l1 quote on supported market failed: {}'.format( str(e) ) ) def test_fetch_l1_quote_on_unsupported_market(self): with self.assertRaises(ExchangeAPICallFailedException): k = KrakenExchange('LTC-GBP') k.fetch_l1_quote() def test_latest_l1_quote_to_csv(self): k = KrakenExchange('BTC-EUR') k.latest_l1_quote = { "best ask size": 0.65333759, "best bid price": 5780.1, "best ask price": 5781.24, "timestamp": datetime.utcnow(), "best bid size": 0.001006 } k.latest_l1_quote_to_csv( path_to_folder=os.path.dirname(os.path.realpath(__file__)) + '/' ) # Test that csv file exists path = ( os.path.dirname(os.path.realpath(__file__)) + '/kraken_BTC-EUR.csv' ) self.assertTrue(os.path.exists(path)) os.remove(path) def test_fetch_l1_quote_and_write_to_csv(self): k = KrakenExchange('BTC-EUR') with HiddenPrints(): k.fetch_l1_quote_and_write_to_csv( path_to_folder=os.path.dirname(os.path.realpath(__file__)) + '/' ) # Test that csv file exists path = ( os.path.dirname(os.path.realpath(__file__)) + '/kraken_BTC-EUR.csv' ) self.assertTrue(os.path.exists(path)) os.remove(path) class BitstampExchangeTests(unittest.TestCase): """ Tests that functions within BitstampExchange class perform as intended. """ def test_initialisation_with_valid_market(self): try: k = BitstampExchange('BTC-EUR') pass except KeyError: self.fail( 'Initialising BitstampExchange with BTC-EUR raised KeyError.' ) def test_initialisation_with_invalid_market(self): with self.assertRaises(KeyError): k = BitstampExchange('REDDDDDDDDDD-BLUEEEEEEEEEE') def test_fetch_l1_quote_on_supported_market(self): try: k = BitstampExchange('BTC-EUR') k.fetch_l1_quote() pass except Exception as e: self.fail( 'Fetch l1 quote on supported market failed: {}'.format( str(e) ) ) def test_fetch_l1_quote_on_unsupported_market(self): with self.assertRaises(ExchangeAPICallFailedException): k = BitstampExchange('LTC-GBP') k.fetch_l1_quote() def test_latest_l1_quote_to_csv(self): k = BitstampExchange('BTC-EUR') k.latest_l1_quote = { "best ask size": 0.65333759, "best bid price": 5780.1, "best ask price": 5781.24, "timestamp": datetime.utcnow(), "best bid size": 0.001006 } k.latest_l1_quote_to_csv( path_to_folder=os.path.dirname(os.path.realpath(__file__)) + '/' ) # Test that csv file exists path = ( os.path.dirname(os.path.realpath(__file__)) + '/bitstamp_BTC-EUR.csv' ) self.assertTrue(os.path.exists(path)) os.remove(path) def test_fetch_l1_quote_and_write_to_csv(self): k = BitstampExchange('BTC-EUR') with HiddenPrints(): k.fetch_l1_quote_and_write_to_csv( path_to_folder=os.path.dirname(os.path.realpath(__file__)) + '/' ) # Test that csv file exists path = ( os.path.dirname(os.path.realpath(__file__)) + '/bitstamp_BTC-EUR.csv' ) self.assertTrue(os.path.exists(path)) os.remove(path) class BitfinexExchangeTests(unittest.TestCase): """ Tests that functions within BitfinexExchange class perform as intended. """ def test_initialisation_with_valid_market(self): try: k = BitfinexExchange('BTC-EUR') pass except KeyError: self.fail( 'Initialising BitfinexExchange with BTC-EUR raised KeyError.' ) def test_initialisation_with_invalid_market(self): with self.assertRaises(KeyError): k = BitfinexExchange('REDDDDDDDDDD-BLUEEEEEEEEEE') def test_fetch_l1_quote_on_supported_market(self): try: k = BitfinexExchange('BTC-EUR') k.fetch_l1_quote() pass except Exception as e: self.fail( 'Fetch l1 quote on supported market failed: {}'.format( str(e) ) ) def test_fetch_l1_quote_on_unsupported_market(self): with self.assertRaises(ExchangeAPICallFailedException): k = BitfinexExchange('LTC-GBP') k.fetch_l1_quote() def test_latest_l1_quote_to_csv(self): k = BitfinexExchange('BTC-EUR') k.latest_l1_quote = { "best ask size": 0.65333759, "best bid price": 5780.1, "best ask price": 5781.24, "timestamp": datetime.utcnow(), "best bid size": 0.001006 } k.latest_l1_quote_to_csv( path_to_folder=os.path.dirname(os.path.realpath(__file__)) + '/' ) # Test that csv file exists path = ( os.path.dirname(os.path.realpath(__file__)) + '/bitfinex_BTC-EUR.csv' ) self.assertTrue(os.path.exists(path)) os.remove(path) def test_fetch_l1_quote_and_write_to_csv(self): k = BitfinexExchange('BTC-EUR') with HiddenPrints(): k.fetch_l1_quote_and_write_to_csv( path_to_folder=os.path.dirname(os.path.realpath(__file__)) + '/' ) # Test that csv file exists path = ( os.path.dirname(os.path.realpath(__file__)) + '/bitfinex_BTC-EUR.csv' ) self.assertTrue(os.path.exists(path)) os.remove(path) if __name__ == '__main__': unittest.main(exit=False)
2.171875
2
stats/clustering.py
KNSI-Golem/assets-generation
0
12376
<gh_stars>0 from sklearn.cluster import KMeans import image_processing import numpy as np import some_analysis from sklearn.manifold import TSNE import matplotlib.pyplot as plt from autoencoder import ConvAutoencoder input_path = './bin' output_shape = (32, 48) processing_output = './processed/results_processing' data = image_processing.get_data_from_images(processing_output) data = data[:, :, :, :-1] encoder, _, _ = ConvAutoencoder.build(32, 48, 3, filters=(32, 64), latentDim=512) encoder.load_weights('encoder.h5') data_encoded = encoder.predict(data) #data_reshaped = data.reshape((data.shape[0], -1)) n_clusters = 200 # Runs in parallel 4 CPUs kmeans = KMeans(n_clusters=n_clusters, n_init=15, n_jobs=8) # Train K-Means. y_pred_kmeans = kmeans.fit_predict(data_encoded) data += 1.0 data *= 127.5 array = np.empty((n_clusters), dtype=object) for i in range(n_clusters): array[i] = [] for cluster, idx in zip(y_pred_kmeans, range(data.shape[0])): array[cluster].append(idx) i = 1 for l in array: cluster = data[l] some_analysis.make_preview(cluster, f'./previews/cluster_v3_{i}.png', n_cols=5) i += 1 ''' data_embedded = TSNE(learning_rate=200).fit_transform(data_reshaped) plt.scatter(data_embedded[:, 0], data_embedded[:, 1]) '''
2.484375
2
CursoEmVideo/Aula22/ex109/ex109.py
lucashsouza/Desafios-Python
0
12377
<gh_stars>0 """ Modifique as funções que foram criadas no desafio 107 para que elas aceitem um parametro a mais, informando se o valor retornado por elas vai ser ou não formatado pela função moeda(), desenvolvida no desafio 108. """ from Aula22.ex109 import moeda from Aula22.ex109.titulo import titulo preco = float(input("Preço: R$")) titulo('Informações Calculadas: ') print(f"Metade: {moeda.metade(preco, True)}") print(f"Dobro: {moeda.dobro(preco, True)}") print(f"10% Acréscimo: {moeda.aumentar(preco, 10, True)}") print(f"10% Desconto: {moeda.diminuir(preco, 10, True)}")
3.34375
3
mocu/graphical_model/mocu/scripts/visualizetoysystem.py
exalearn/oded
0
12378
<reponame>exalearn/oded from mocu.utils.toysystems import * import matplotlib.pyplot as plt import matplotlib.cm as cm def make_rhs_full_system(a,b,k,c,lam,psi,theta): def rhs_full_system(y,t): C = c(a,b,k,y[0],psi,theta) y1_dot = lam[0] * (y[0] - 1) y2_dot = lam[1] * (y[1] - C) * (y[1] - a) * (y[1] - b) return [y1_dot , y2_dot] return rhs_full_system def plot_points_in_full_space(xy,c,colors): a = 0; b=1; k = 5 y1_lin = np.linspace( 0, 1, 100 ) plt.scatter(xy[0] , xy[1] , c=colors , cmap=plt.cm.coolwarm) plt.plot( y1_lin , c(a,b,k,y1_lin) , 'k') def plot_different_thetas(): a = 0; b=1; theta = [0.45 , 0.5 , 0.55] nsamp = 100 y0 = np.linspace(0.5 - (b-a)/20 , 0.5 + (b-a)/20 , nsamp) tf = 30 dt = 0.05 t = np.arange(0,tf,dt) colors = cm.coolwarm( np.linspace(0,1,len(y0)) ) yfinal = [] fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(16, 4)) for (i,th) in enumerate(theta): f = make_f( a , b , th ) g = make_noise_function(0.03) for (y0_i,c_i) in zip(y0,colors): Y = np.squeeze( sdeint.itoint(f,g,y0_i,t) ) yfinal.append(Y[-1]) axes[i].plot(t , [y for y in Y] , c=c_i) axes[i].set_title(r'Boundary = ' + str(th) , fontsize=20) axes[0].set_xlabel(r'$t$' , fontsize=16) axes[0].set_ylabel(r'$c(t,\theta)$' , fontsize=16) plt.tight_layout() plt.show() def f_input_output_sde( psi,theta,x0 ): tf = 30 dt = 0.05 t = np.arange(0,tf,dt) a = 0; b=1; c = 0.04 * np.abs( psi-theta ) + 0.48 f = make_f( a , b , c ) g = make_noise_function( 0.03 ) y = np.squeeze( sdeint.itoint(f,g,x0,t) ) return y def f_input_output_ode( psi,theta,x0 ): dt = 0.05 tf = 30 t = np.arange(0,tf,dt) lam = [-0.01,-1] k = 5 a = 0; b=1; c = lambda a,b,k,y1,psi,theta : (0.48 + 0.04*np.abs(psi-theta) ) + 0.04*np.abs(b-a)*np.sin(2*np.pi*k*y1) f_real = make_rhs_full_system(a,b,k,c,lam,psi,theta) y = np.squeeze( odeint( f_real , x0 , t ) ) #y = [ yi[1] for yi in y ] return y def estimate_transition_probabilities( f_input_output , psi , theta , y0 ): colors = cm.coolwarm( np.linspace(0,1,len(y0)) ) yfinal = [] #plt.figure() #fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(16, 4)) for (y0_i,c_i) in zip(y0,colors): Y = f_input_output( psi , theta , y0_i ) if (np.size(Y[0]) > 1): Y = [ yi[1] for yi in Y ] yfinal.append(Y[-1]) #axes[2].plot(t , [y for y in Y] , c=c_i) # Only use y-coordinate of "real" system if (np.size(y0[0]) > 1): y0 = np.array( [ yi[1] for yi in y0 ] ) # Estimate IC->final phase probabilities idx0_0 = np.where( y0 < 0.5 )[0] idx0_1 = np.where( y0 >= 0.5 )[0] yfinal = np.array( yfinal ) n_00 = np.sum( yfinal[idx0_0] < 0.5 ) n_01 = np.sum( yfinal[idx0_0] >= 0.5 ) n_10 = np.sum( yfinal[idx0_1] < 0.5 ) n_11 = np.sum( yfinal[idx0_1] >= 0.5 ) n_0 = np.sum( yfinal < 0.5 ) n_1 = np.sum( yfinal >= 0.5 ) rho_ic_to_final = np.array([ [n_00/(n_00+n_01) , n_01/(n_00+n_01) ] , [n_10/(n_10+n_11) , n_11/(n_10+n_11)] ]) print( 'rho( final phase | ic ): ' ) print( rho_ic_to_final ) def plot_real_system( psi , theta , y0_2 ): dt = 0.05 tf = 30 t = np.arange(0,tf,dt) #fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(8, 4)) colors = cm.coolwarm( np.linspace(0,1,len(y0_2)) ) for (y0_i,c_i) in zip(y0_2,colors): y = f_input_output_ode( psi,theta,y0_i ) plt.figure(2) plt.plot( t , [yi[1] for yi in y] , c=c_i ) plt.xlabel(r'$t$',fontsize=20) plt.ylabel(r'$c_2$',fontsize=20) plt.figure(3) plt.plot( [yi[0] for yi in y] , [yi[1] for yi in y] , c=c_i ) plt.xlabel(r'$c_1$',fontsize=20) plt.ylabel(r'$c_2$',fontsize=20) c1 = np.linspace( 0 , 1 , 100 ) C = (0.48 + 0.04 * np.abs(psi-theta) ) + 0.04 * np.sin(2*np.pi*5*c1) plt.plot( c1 , C , 'k--' , lw=3 ) plt.tight_layout() plt.show() def main(): nsamp = 1000 y0 = np.random.uniform( 0 , 1 , nsamp ) y1 = np.linspace(0.45 , 0.55 , nsamp) y = tuple( zip( y0 , y1 ) ) psi = 0.0; theta = 0.5 estimate_transition_probabilities( f_input_output_sde , psi , theta , y1 ) estimate_transition_probabilities( f_input_output_ode , psi , theta , y ) #plot_different_thetas() #plot_real_system( psi , theta , y ) if __name__ == '__main__': main()
2.234375
2
sbm/stochastic_block_model.py
pmacg/pysbm
1
12379
""" Several methods for generating graphs from the stochastic block model. """ import itertools import math import random import scipy.sparse import numpy as np def _get_num_pos_edges(c1_size, c2_size, same_cluster, self_loops, directed): """ Compute the number of possible edges between two clusters. :param c1_size: The size of the first cluster :param c2_size: The size of the second cluster :param same_cluster: Whether these are the same cluster :param self_loops: Whether we will generate self loops :param directed: Whether we are generating a directed graph :return: the number of possible edges between these clusters """ if not same_cluster: # The number is simply the product of the number of vertices return c1_size * c2_size else: # The base number is n choose 2 possible_edges_between_clusters = int((c1_size * (c1_size - 1)) / 2) # If we are allowed self-loops, then add them on if self_loops: possible_edges_between_clusters += c1_size # The number is normally the same for undirected and directed graphs, unless the clusters are the same, in which # case the number for the directed graph is double since we need to consider both directions of each edge. if directed: possible_edges_between_clusters *= 2 # But if we are allowed self-loops, then we shouldn't double them since there is only one 'direction'. if directed and self_loops: possible_edges_between_clusters -= c1_size return possible_edges_between_clusters def _get_number_of_edges(c1_size, c2_size, prob, same_cluster, self_loops, directed): """ Compute the number of edges there will be between two clusters. :param c1_size: The size of the first cluster :param c2_size: The size of the second cluster :param prob: The probability of an edge between the clusters :param same_cluster: Whether these are the same cluster :param self_loops: Whether we will generate self loops :param directed: Whether we are generating a directed graph :return: the number of edges to generate between these clusters """ # We need to compute the number of possible edges possible_edges_between_clusters = _get_num_pos_edges(c1_size, c2_size, same_cluster, self_loops, directed) # Sample the number of edges from the binomial distribution return np.random.binomial(possible_edges_between_clusters, prob) def _generate_sbm_edges(cluster_sizes, prob_mat_q, directed=False): """ Given a list of cluster sizes, and a square matrix Q, generates edges for a graph in the following way. For two vertices u and v where u is in cluster i and v is in cluster j, there is an edge between u and v with probability Q_{i, j}. For the undirected case, we assume that the matrix Q is symmetric (and in practice look only at the upper triangle). For the directed case, we generate edges (u, v) and (v, u) with probabilities Q_{i, j} and Q_{j, i} respectively. May return self-loops. The calling code can decide what to do with them. Returns edges as pairs (u, v) where u and v are integers giving the index of the respective vertices. :param cluster_sizes: a list giving the number of vertices in each cluster :param prob_mat_q: A square matrix where Q_{i, j} is the probability of each edge between clusters i and j. Should be symmetric in the undirected case. :param directed: Whether to generate a directed graph (default is false). :return: Edges (u, v). """ # We will iterate over the clusters. This variable keeps track of the index of the first vertex in the current # cluster_1. c1_base_index = 0 for cluster_1 in range(len(cluster_sizes)): # Keep track of the index of the first vertex in the current cluster_2 c2_base_index = c1_base_index # If we are constructing a directed graph, we need to consider all values of cluster_2. # Otherwise, we will consider only the clusters with an index >= cluster_1. if directed: second_clusters = range(len(cluster_sizes)) c2_base_index = 0 else: second_clusters = range(cluster_1, len(cluster_sizes)) for cluster_2 in second_clusters: # Compute the number of edges between these two clusters num_edges = _get_number_of_edges(cluster_sizes[cluster_1], cluster_sizes[cluster_2], prob_mat_q[cluster_1][cluster_2], cluster_1 == cluster_2, True, directed) # Sample this number of edges. TODO: correct for possible double-sampling of edges num_possible_edges = (cluster_sizes[cluster_1] * cluster_sizes[cluster_2]) - 1 for i in range(num_edges): edge_idx = random.randint(0, num_possible_edges) u = c1_base_index + int(edge_idx / cluster_sizes[cluster_1]) v = c2_base_index + (edge_idx % cluster_sizes[cluster_1]) yield u, v # Update the base index for the second cluster c2_base_index += cluster_sizes[cluster_2] # Update the base index of this cluster c1_base_index += cluster_sizes[cluster_1] def sbm_adjmat(cluster_sizes, prob_mat_q, directed=False, self_loops=False): """ Generate a graph from the stochastic block model. The list cluster_sizes gives the number of vertices inside each cluster and the matrix Q gives the probability of each edge between pairs of clusters. For two vertices u and v where u is in cluster i and v is in cluster j, there is an edge between u and v with probability Q_{i, j}. For the undirected case, we assume that the matrix Q is symmetric (and in practice look only at the upper triangle). For the directed case, we generate edges (u, v) and (v, u) with probabilities Q_{i, j} and Q_{j, i} respectively. Returns the adjacency matrix of the graph as a sparse scipy matrix in the CSR format. :param cluster_sizes: The number of vertices in each cluster. :param prob_mat_q: A square matrix where Q_{i, j} is the probability of each edge between clusters i and j. Should be symmetric in the undirected case. :param directed: Whether to generate a directed graph (default is false). :param self_loops: Whether to generate self-loops (default is false). :return: The sparse adjacency matrix of the graph. """ # Initialize the adjacency matrix adj_mat = scipy.sparse.lil_matrix((sum(cluster_sizes), sum(cluster_sizes))) # Generate the edges in the graph for (u, v) in _generate_sbm_edges(cluster_sizes, prob_mat_q, directed=directed): if u != v or self_loops: # Add this edge to the adjacency matrix. adj_mat[u, v] = 1 if not directed: adj_mat[v, u] = 1 # Reformat the output matrix to the CSR format return adj_mat.tocsr() def sbm_adjmat_equal_clusters(n, k, prob_mat_q, directed=False): """ Generate a graph from the general stochastic block model. Generates a graph with n vertices and k clusters. Every cluster will have floor(n/k) vertices. The probability of each edge inside a cluster is given by the probability matrix Q. :param n: The number of vertices in the graph. :param k: The number of clusters. :param prob_mat_q: q[i][j] gives the probability of an edge between clusters i and j :param directed: Whether to generate a directed graph. :return: The sparse adjacency matrix of the graph. """ return sbm_adjmat([int(n/k)] * k, prob_mat_q, directed=directed) def ssbm_adjmat(n, k, p, q, directed=False): """ Generate a graph from the symmetric stochastic block model. Generates a graph with n vertices and k clusters. Every cluster will have floor(n/k) vertices. The probability of each edge inside a cluster is given by p. The probability of an edge between two different clusters is q. :param n: The number of vertices in the graph. :param k: The number of clusters. :param p: The probability of an edge inside a cluster. :param q: The probability of an edge between clusters. :param directed: Whether to generate a directed graph. :return: The sparse adjacency matrix of the graph. """ # Every cluster has the same size. cluster_sizes = [int(n/k)] * k # Construct the k*k probability matrix Q. The off-diagonal entries are all q and the diagonal entries are all p. prob_mat_q = [] for row_num in range(k): new_row = [q] * k new_row[row_num] = p prob_mat_q.append(new_row) # Call the general sbm method. return sbm_adjmat(cluster_sizes, prob_mat_q, directed=directed)
3.453125
3
py_include/__init__.py
mauro-balades/py-include
2
12380
#!/usr/bin/python3 """ | --------------------- Py include <Mauro Baladés> --------------------- | ___ _ _ _ __ _ _ ___ ____ | | |_) \ \_/ | | | |\ | / /` | | | | | | | \ | |_ | |_| |_| |_| |_| \| \_\_, |_|__ \_\_/ |_|_/ |_|__ | ---------------------------------------------------------------------- | MIT License | | Copyright (c) 2022 <NAME> | | Permission is hereby granted, free of charge, to any person obtaining a copy | of this software and associated documentation files (the "Software"), to deal | in the Software without restriction, including without limitation the rights | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | copies of the Software, and to permit persons to whom the Software is | furnished to do so, subject to the following conditions: | | The above copyright notice and this permission notice shall be included in all | copies or substantial portions of the Software. | | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | SOFTWARE. | """ from pathlib import Path import sys def _exec_modules(*args, **kwargs): # Get locals from kwargs local = kwargs.get("local", None) # Check if local is None, # because user did not define it. if local is None: raise Exception("Need to pass the local variable") # Iterate every path that user gives as # arguments (stored in *args). for arg in args: # Store the path into a # platform specific-path path = Path(arg) # Open the file and get it's # content with open(path, "r") as f: data = f.read() # Execute the file content. exec(data, globals(), local) def _ret_modules(*args, **kwargs): pass def include(*args, **kwargs): """Here is where all the magic ocour. This function takes an infinite amount of paths and they are being executend to feel like user imported it. Note: It can also be used to store it into a variable if user needs it. This can be done by adding the argument `ret` to True (more detail in #Args). Note: Please note how (for the import statement) you will need a `__init__.py` and paths separated by dots. With py-include, you don't need. Py-include will make your path supported by the current platform and it will open it's content and execute it, so you don't need a path divided by `.` or a `__init__.py` Args: files [list(str)]: A list of paths to include. ret [bool]: If it is set to True, return the module (defaults to False). Note: If `ret` is set to `True`, the function will return all modules as user will need to unpack them. """ # Get the value whether user whan't to execute # the module or to return it. (defaults to False) ret = kwargs.get("ret", False) # Check if user inserted `ret` as True. If it not, # we will open the file and execute it's content. # If it is True, we will return the module they # whanted to import. if not ret: _exec_modules(*args, **kwargs) return _ret_modules(*args, **kwargs)
1.828125
2
src/scripts/load_data.py
murphycj/agfusionweb-react
1
12381
<filename>src/scripts/load_data.py import pyensembl import sys import sqlite3 import boto3 import pickle dynamodb = boto3.resource('dynamodb') table_agfusion_gene_synonyms = dynamodb.Table('agfusion_gene_synonyms') table_agfusion_genes = dynamodb.Table('agfusion_genes') table_agfusion_sequences = dynamodb.Table('agfusion_sequences') def add_synonym(data, id, ensg): if id != '': if id not in data: data[id] = [ensg] else: data[id].append(ensg) return data def process_gene_synonym(species, release, pyens_db, c): data = {} # get gene synonymes query = c.execute('select * from ' + species + '_' + str(release) + ';').fetchall() for row in query: ensg = row[1] entrez = row[2] symbol = row[3] if ensg!='': data = add_synonym(data, entrez, ensg) data = add_synonym(data, symbol, ensg) else: continue with table_agfusion_gene_synonyms.batch_writer() as batch: for gene_id, ensg in data.items(): batch.put_item( Item={ 'gene_id': gene_id, 'species_release': species + '_' + str(release), 'ensembl_gene_id': ';'.join(ensg) } ) def write(db, species, release): with table_agfusion_sequences.batch_writer() as batch: for gene_id, seq in db.items(): batch.put_item( Item={ 'id': gene_id, 'species_release': species + '_' + str(release), 'sequence': seq } ) def upload_fasta(species, genome, release): # cdna db = pickle.load(open( '/Users/charliemurphy/Library/Caches/pyensembl/{}/ensembl{}/{}.{}.{}cdna.all.fa.gz.pickle'.format( genome, release, species.capitalize(), genome, str(release) + '.' if release <= 75 else '' ))) write(db, species, release) # import pdb; pdb.set_trace() db = pickle.load(open( '/Users/charliemurphy/Library/Caches/pyensembl/{}/ensembl{}/{}.{}.{}ncrna.fa.gz.pickle'.format( genome, release, species.capitalize(), genome, str(release) + '.' if release <= 75 else '' ))) write(db, species, release) # pep db = pickle.load(open( '/Users/charliemurphy/Library/Caches/pyensembl/{}/ensembl{}/{}.{}.{}pep.all.fa.gz.pickle'.format( genome, release, species.capitalize(), genome, str(release) + '.' if release <= 75 else '' ))) write(db, species, release) def process_gene_data(species, release, pyens_db, c): protein_db = [ 'pfam', 'smart', 'superfamily', 'tigrfam', 'pfscan', 'tmhmm', 'seg', 'ncoils', 'prints', 'pirsf', 'signalp'] domains = {} for pdb in protein_db: query = c.execute('select * from {}_{}_{}'.format(species, release, pdb)).fetchall() for q in query: ensp = q[1] if ensp not in domains: domains[ensp] = {j:[] for j in protein_db} domains[ensp][pdb].append(list(q[2:])) genes = pyens_db.genes() canonical = c.execute( 'select g.stable_id, t.transcript_stable_id from {}_{} g left join {}_{}_transcript t on g.canonical_transcript_id = t.transcript_id;'.format( species, release, species, release )).fetchall() canonical = dict(canonical) with table_agfusion_genes.batch_writer() as batch: for gene in genes: data = { 'id': gene.id, 'species_release': species + '_' + str(release), 'name': gene.name, 'start': gene.start, 'end': gene.end, 'strand': gene.strand, 'contig': gene.contig, 'biotype': gene.biotype, 'is_protein_coding': gene.is_protein_coding, 'transcripts': {} } for transcript in gene.transcripts: five_prime_utr_len = 0 three_prime_utr_len = 0 if transcript.contains_start_codon: five_prime_utr_len = len(transcript.five_prime_utr_sequence) if transcript.contains_stop_codon: three_prime_utr_len = len(transcript.three_prime_utr_sequence) data['transcripts'][transcript.id] = { 'name': transcript.name, 'start': transcript.start, 'end': transcript.end, 'biotype': transcript.biotype, 'complete': transcript.complete, 'exons': [[i[0], i[1]] for i in transcript.exon_intervals], 'has_start_codon': transcript.contains_start_codon, 'has_stop_codon': transcript.contains_stop_codon, 'five_prime_utr_len': five_prime_utr_len, 'three_prime_utr_len': three_prime_utr_len, 'is_protein_coding': transcript.is_protein_coding, 'protein_id': transcript.protein_id, 'domains': {j: [] for j in protein_db}, 'canonical': True if transcript.id == canonical.get(gene.id, '') else False } if transcript.is_protein_coding: data['transcripts'][transcript.id]['coding'] = \ [[i[0], i[1]] for i in transcript.coding_sequence_position_ranges] if transcript.protein_id in domains: data['transcripts'][transcript.id]['domains'] = domains[transcript.protein_id] # make sure nothing is an empty string, convert to none for pdb in data['transcripts'][transcript.id]['domains'].keys(): for i in range(len(data['transcripts'][transcript.id]['domains'][pdb])): domain = data['transcripts'][transcript.id]['domains'][pdb][i] domain = [j if j else None for j in domain] data['transcripts'][transcript.id]['domains'][pdb][i] = domain try: # table_agfusion_genes.put_item(Item=data) batch.put_item(Item=data) except: import pdb; pdb.set_trace() def process_data(species, release, genome, agfusion): pyens_db = pyensembl.EnsemblRelease(release, species) db = sqlite3.Connection(agfusion) c = db.cursor() # process_gene_synonym(species, release, pyens_db, c) # process_gene_data(species, release, pyens_db, c) upload_fasta(species, genome, release) def put_to_dynamodb(): pass # process_data('homo_sapiens', 94, '/Users/charliemurphy/Downloads/agfusion.homo_sapiens.94.db') # process_data('homo_sapiens', 75, 'GRCh37', '/Users/charliemurphy/Downloads/agfusion.homo_sapiens.75.db') # process_data('mus_musculus', 92, 'GRCm38', '/Users/charliemurphy/Downloads/agfusion.mus_musculus.92.db')
2.296875
2
google-cloud-sdk/lib/third_party/cloud_ml_engine_sdk/dataflow/io/multifiles_source.py
bopopescu/searchparty
0
12382
<gh_stars>0 # Copyright 2017 Google Inc. All Rights Reserved. # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Mutiple files/file patterns source. Multiple File source, which reads the union of multiple files and/or file patterns. """ from apache_beam import coders from apache_beam.io import iobase from apache_beam.io.filesystem import CompressionTypes from apache_beam.io.iobase import Read from apache_beam.io.range_trackers import OffsetRangeTracker from apache_beam.io.textio import _TextSource as TextSource from apache_beam.io.tfrecordio import _TFRecordSource as TFRecordSource from apache_beam.transforms import PTransform from apache_beam.transforms.display import DisplayDataItem # pylint: disable=g-import-not-at-top try: from apache_beam.options.value_provider import ValueProvider from apache_beam.options.value_provider import StaticValueProvider except ImportError: from apache_beam.utils.value_provider import ValueProvider from apache_beam.utils.value_provider import StaticValueProvider # pylint: enable=g-import-not-at-top FILE_LIST_SEPARATOR = ',' class MultiFilesSource(iobase.BoundedSource): """Base class for multiple files source. Support to read multiple files or file patterns separated by a comma. Subclass should implement create_source() to actually create sources to use. """ def __init__(self, file_patterns, **kwargs): # Handle the templated values. if not isinstance(file_patterns, (basestring, ValueProvider)): raise TypeError('%s: file_pattern must be of type string' ' or ValueProvider; got %r instead' % (self.__class__.__name__, file_patterns)) if isinstance(file_patterns, basestring): file_patterns = StaticValueProvider(str, file_patterns) self._file_patterns = file_patterns self._sources = [] self._kwargs = kwargs def _populate_sources_lazily(self): # We need to do it lazily because self._file_patterns can be a templated # value and must be evaluated at runtime. if not self._sources: # dedup repeated files or file patterns. for file_pattern in list(set(self._file_patterns.get().split( FILE_LIST_SEPARATOR))): self._sources.append(self.create_source(file_pattern.strip(), **self._kwargs)) def estimate_size(self): self._populate_sources_lazily() return sum(s.estimate_size() for s in self._sources) def get_range_tracker(self, start_position, stop_position): self._populate_sources_lazily() if start_position is None: start_position = 0 if stop_position is None: stop_position = len(self._sources) return OffsetRangeTracker(start_position, stop_position) def create_source(self, file_pattern, **kwargs): raise NotImplementedError('MultiFilesSource cannot be used directly.') def read(self, range_tracker): self._populate_sources_lazily() start_source = range_tracker.start_position() stop_source = range_tracker.stop_position() for source_ix in range(start_source, stop_source): if not range_tracker.try_claim(source_ix): break sub_range_tracker = self._sources[source_ix].get_range_tracker(None, None) for record in self._sources[source_ix].read(sub_range_tracker): yield record def split(self, desired_bundle_size, start_position=None, stop_position=None): self._populate_sources_lazily() if start_position or stop_position: raise ValueError( 'Multi-files initial splitting is not supported. Expected start and ' 'stop positions to be None. Received %r and %r respectively.' % (start_position, stop_position)) for source in self._sources: for bundle in source.split(desired_bundle_size): yield bundle def display_data(self): return {'file_patterns': DisplayDataItem(str(self._file_patterns), label='File Patterns')} class _MultiTextSource(MultiFilesSource): """Multiple files source for Text source.""" # TODO(user): Currently liquid sharding is performed on source boundaries. # For text files, a more complicated RangeTracker can be implemented to # support liquid sharding within sub-sources if needed. See ConcatRangeTracker # in concat_source.py for reference. def create_source(self, file_pattern, min_bundle_size=0, compression_type=CompressionTypes.AUTO, strip_trailing_newlines=True, coder=coders.StrUtf8Coder(), validate=True, skip_header_lines=0): return TextSource(file_pattern=file_pattern, min_bundle_size=min_bundle_size, compression_type=compression_type, strip_trailing_newlines=strip_trailing_newlines, coder=coders.StrUtf8Coder(), validate=validate, skip_header_lines=skip_header_lines) # TODO(user): currently compression_type is not a ValueProvider valure in # filebased_source, thereby we have to make seperate classes for # non-compressed and compressed version of TFRecord sources. Consider to # make the compression_type a ValueProvider in filebased_source. class _MultiTFRecordSource(MultiFilesSource): """Multiple files source for TFRecord source.""" def create_source(self, file_pattern): return TFRecordSource( file_pattern=file_pattern, coder=coders.BytesCoder(), compression_type=CompressionTypes.AUTO, validate=True) class _MultiTFRecordGZipSource(MultiFilesSource): """Multiple files source for TFRecord source gzipped.""" def create_source(self, file_pattern): return TFRecordSource( file_pattern=file_pattern, coder=coders.BytesCoder(), compression_type=CompressionTypes.GZIP, validate=True) class ReadFromMultiFilesText(PTransform): """A PTransform for reading text files or files patterns. It is a wrapper of ReadFromText but supports multiple files or files patterns. """ def __init__( self, file_patterns, min_bundle_size=0, compression_type=CompressionTypes.AUTO, strip_trailing_newlines=True, coder=coders.StrUtf8Coder(), validate=True, skip_header_lines=0, **kwargs): """Initialize the ReadFromText transform. Args: file_patterns: The file paths/patterns to read from as local file paths or GCS files. Paths/patterns seperated by commas. min_bundle_size: Minimum size of bundles that should be generated when splitting this source into bundles. See ``FileBasedSource`` for more details. compression_type: Used to handle compressed input files. Typical value is CompressionTypes.AUTO, in which case the underlying file_path's extension will be used to detect the compression. strip_trailing_newlines: Indicates whether this source should remove the newline char in each line it reads before decoding that line. coder: Coder used to decode each line. validate: flag to verify that the files exist during the pipeline creation time. skip_header_lines: Number of header lines to skip. Same number is skipped from each source file. Must be 0 or higher. Large number of skipped lines might impact performance. **kwargs: optional args dictionary. """ super(ReadFromMultiFilesText, self).__init__(**kwargs) self._source = _MultiTextSource( file_patterns, min_bundle_size=min_bundle_size, compression_type=compression_type, strip_trailing_newlines=strip_trailing_newlines, coder=coder, validate=validate, skip_header_lines=skip_header_lines) def expand(self, pvalue): return pvalue.pipeline | Read(self._source) class ReadFromMultiFilesTFRecord(PTransform): """Transform for reading multiple TFRecord sources. It is a wrapper of ReadFromTFRecord but supports multiple files or files patterns. """ def __init__(self, file_patterns, **kwargs): """Initialize a ReadFromMultiFilesTFRecord transform. Args: file_patterns: file glob patterns to read TFRecords from. **kwargs: optional args dictionary. Returns: A ReadFromTFRecord transform object. """ super(ReadFromMultiFilesTFRecord, self).__init__(**kwargs) self._source = _MultiTFRecordSource(file_patterns) def expand(self, pvalue): return pvalue.pipeline | Read(self._source) class ReadFromMultiFilesTFRecordGZip(PTransform): """Transform for reading multiple TFRecord Gzipped sources. It is a wrapper of ReadFromTFRecord gzipped but supports multiple files or files patterns. """ def __init__(self, file_patterns, **kwargs): """Initialize a ReadFromMultiFilesTFRecordGzip transform. Args: file_patterns: file glob patterns to read TFRecords from. **kwargs: optional args dictionary. Returns: A ReadFromTFRecord transform object. """ super(ReadFromMultiFilesTFRecordGZip, self).__init__(**kwargs) self._source = _MultiTFRecordGZipSource(file_patterns) def expand(self, pvalue): return pvalue.pipeline | Read(self._source)
1.625
2
eureka/S5_lightcurve_fitting/s5_fit.py
evamariaa/Eureka
15
12383
<reponame>evamariaa/Eureka import numpy as np import matplotlib.pyplot as plt import glob, os, time from ..lib import manageevent as me from ..lib import readECF as rd from ..lib import sort_nicely as sn from ..lib import util, logedit from . import parameters as p from . import lightcurve as lc from . import models as m from .utils import get_target_data #FINDME: Keep reload statements for easy testing from importlib import reload reload(p) reload(m) reload(lc) class MetaClass: '''A class to hold Eureka! metadata. ''' def __init__(self): return def fitJWST(eventlabel, s4_meta=None): '''Fits 1D spectra with various models and fitters. Parameters ---------- eventlabel: str The unique identifier for these data. s4_meta: MetaClass The metadata object from Eureka!'s S4 step (if running S4 and S5 sequentially). Returns ------- meta: MetaClass The metadata object with attributes added by S5. Notes ------- History: - November 12-December 15, 2021 <NAME> Original version - December 17-20, 2021 <NAME> Connecting S5 to S4 outputs - December 17-20, 2021 <NAME> Increasing connectedness of S5 and S4 ''' print("\nStarting Stage 5: Light Curve Fitting\n") # Initialize a new metadata object meta = MetaClass() meta.eventlabel = eventlabel # Load Eureka! control file and store values in Event object ecffile = 'S5_' + eventlabel + '.ecf' ecf = rd.read_ecf(ecffile) rd.store_ecf(meta, ecf) # load savefile if s4_meta == None: # Search for the S2 output metadata in the inputdir provided in # First just check the specific inputdir folder rootdir = os.path.join(meta.topdir, *meta.inputdir.split(os.sep)) if rootdir[-1]!='/': rootdir += '/' files = glob.glob(rootdir+'S4_'+meta.eventlabel+'*_Meta_Save.dat') if len(files)==0: # There were no metadata files in that folder, so let's see if there are in children folders files = glob.glob(rootdir+'**/S4_'+meta.eventlabel+'*_Meta_Save.dat', recursive=True) files = sn.sort_nicely(files) if len(files)==0: # There may be no metafiles in the inputdir - raise an error and give a helpful message raise AssertionError('Unable to find an output metadata file from Eureka!\'s S4 step ' +'in the inputdir: \n"{}"!'.format(rootdir)) elif len(files)>1: # There may be multiple runs - use the most recent but warn the user print('WARNING: There are multiple metadata save files in your inputdir: \n"{}"\n'.format(rootdir) +'Using the metadata file: \n{}\n'.format(files[-1]) +'and will consider aperture ranges listed there. If this metadata file is not a part\n' +'of the run you intended, please provide a more precise folder for the metadata file.') fname = files[-1] # Pick the last file name (should be the most recent or only file) fname = fname[:-4] # Strip off the .dat ending s4_meta = me.loadevent(fname) # Need to remove the topdir from the outputdir s4_outputdir = s4_meta.outputdir[len(s4_meta.topdir):] if s4_outputdir[0]=='/': s4_outputdir = s4_outputdir[1:] s4_allapers = s4_meta.allapers # Overwrite the temporary meta object made above to be able to find s4_meta meta = s4_meta # Load Eureka! control file and store values in the S4 metadata object ecffile = 'S5_' + eventlabel + '.ecf' ecf = rd.read_ecf(ecffile) rd.store_ecf(meta, ecf) # Overwrite the inputdir with the exact output directory from S4 meta.inputdir = s4_outputdir meta.old_datetime = s4_meta.datetime # Capture the date that the meta.datetime = None # Reset the datetime in case we're running this on a different day meta.inputdir_raw = meta.inputdir meta.outputdir_raw = meta.outputdir if (not s4_allapers) or (not meta.allapers): # The user indicated in the ecf that they only want to consider one aperture # in which case the code will consider only the one which made s4_meta. # Alternatively, S4 was run without allapers, so S5's allapers will only conside that one meta.spec_hw_range = [meta.spec_hw,] meta.bg_hw_range = [meta.bg_hw,] run_i = 0 for spec_hw_val in meta.spec_hw_range: for bg_hw_val in meta.bg_hw_range: t0 = time.time() meta.spec_hw = spec_hw_val meta.bg_hw = bg_hw_val # Do some folder swapping to be able to reuse this function to find S4 outputs tempfolder = meta.outputdir_raw meta.outputdir_raw = meta.inputdir_raw meta.inputdir = util.pathdirectory(meta, 'S4', meta.runs[run_i], old_datetime=meta.old_datetime, ap=spec_hw_val, bg=bg_hw_val) meta.outputdir_raw = tempfolder run_i += 1 if meta.testing_S5: # Only fit a single channel while testing chanrng = [0] else: chanrng = range(meta.nspecchan) for channel in chanrng: # Create directories for Stage 5 processing outputs run = util.makedirectory(meta, 'S5', ap=spec_hw_val, bg=bg_hw_val, ch=channel) meta.outputdir = util.pathdirectory(meta, 'S5', run, ap=spec_hw_val, bg=bg_hw_val, ch=channel) # Copy existing S4 log file and resume log meta.s5_logname = meta.outputdir + 'S5_' + meta.eventlabel + ".log" log = logedit.Logedit(meta.s5_logname, read=meta.s4_logname) log.writelog("\nStarting Channel {} of {}\n".format(channel+1, meta.nspecchan)) log.writelog(f"Input directory: {meta.inputdir}") log.writelog(f"Output directory: {meta.outputdir}") # Copy ecf (and update outputdir in case S5 is being called sequentially with S4) log.writelog('Copying S5 control file') # shutil.copy(ecffile, meta.outputdir) new_ecfname = meta.outputdir + ecffile.split('/')[-1] with open(new_ecfname, 'w') as new_file: with open(ecffile, 'r') as file: for line in file.readlines(): if len(line.strip())==0 or line.strip()[0]=='#': new_file.write(line) else: line_segs = line.strip().split() if line_segs[0]=='inputdir': new_file.write(line_segs[0]+'\t\t/'+meta.inputdir+'\t'+' '.join(line_segs[2:])+'\n') else: new_file.write(line) # Set the intial fitting parameters params = p.Parameters(param_file=meta.fit_par) # Subtract off the zeroth time value to avoid floating point precision problems when fitting for t0 t_offset = int(np.floor(meta.bjdtdb[0])) t_mjdtdb = meta.bjdtdb - t_offset params.t0.value -= t_offset # Get the flux and error measurements for the current channel flux = meta.lcdata[channel,:] flux_err = meta.lcerr[channel,:] # Normalize flux and uncertainties to avoid large flux values flux_err /= flux.mean() flux /= flux.mean() if meta.testing_S5: # FINDME: Use this area to add systematics into the data # when testing new systematics models. In this case, I'm # introducing an exponential ramp to test m.ExpRampModel(). log.writelog('****Adding exponential ramp systematic to light curve****') fakeramp = m.ExpRampModel(parameters=params, name='ramp', fmt='r--') fakeramp.coeffs = np.array([-1,40,-3, 0, 0, 0]) flux *= fakeramp.eval(time=t_mjdtdb) # Load the relevant values into the LightCurve model object lc_model = lc.LightCurve(t_mjdtdb, flux, channel, meta.nspecchan, unc=flux_err, name=eventlabel, time_units=f'MJD_TDB = BJD_TDB - {t_offset}') # Make the astrophysical and detector models modellist=[] if 'transit' in meta.run_myfuncs: t_model = m.TransitModel(parameters=params, name='transit', fmt='r--') modellist.append(t_model) if 'polynomial' in meta.run_myfuncs: t_polynom = m.PolynomialModel(parameters=params, name='polynom', fmt='r--') modellist.append(t_polynom) if 'expramp' in meta.run_myfuncs: t_ramp = m.ExpRampModel(parameters=params, name='ramp', fmt='r--') modellist.append(t_ramp) model = m.CompositeModel(modellist) # Fit the models using one or more fitters log.writelog("=========================") if 'lsq' in meta.fit_method: log.writelog("Starting lsq fit.") model.fitter = 'lsq' lc_model.fit(model, meta, fitter='lsq') log.writelog("Completed lsq fit.") log.writelog("-------------------------") if 'emcee' in meta.fit_method: log.writelog("Starting emcee fit.") model.fitter = 'emcee' lc_model.fit(model, meta, fitter='emcee') log.writelog("Completed emcee fit.") log.writelog("-------------------------") if 'dynesty' in meta.fit_method: log.writelog("Starting dynesty fit.") model.fitter = 'dynesty' lc_model.fit(model, meta, fitter='dynesty') log.writelog("Completed dynesty fit.") log.writelog("-------------------------") if 'lmfit' in meta.fit_method: log.writelog("Starting lmfit fit.") model.fitter = 'lmfit' lc_model.fit(model, meta, fitter='lmfit') log.writelog("Completed lmfit fit.") log.writelog("-------------------------") log.writelog("=========================") # Plot the results from the fit(s) if meta.isplots_S5 >= 1: lc_model.plot(meta) return meta, lc_model
2.078125
2
model_compression_toolkit/common/graph/graph_matchers.py
eladc-git/model_optimization
0
12384
# Copyright 2021 Sony Semiconductors Israel, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from typing import Any, List from model_compression_toolkit.common.graph.base_node import BaseNode from model_compression_toolkit.common.matchers import node_matcher, walk_matcher, edge_matcher class NodeOperationMatcher(node_matcher.BaseNodeMatcher): """ Class NodeOperationMatcher to check if the layer class of a node matches a specific layer. """ def __init__(self, operation: Any): """ Init for class NodeOperationMathcer. Args: operation: Which layer to check if matches. """ self.operation = operation def apply(self, input_node_object: Any) -> bool: """ Check if input_node_object matches the matcher condition. Args: input_node_object: Node object to check the matcher on. Returns: True if input_node_object is the layer the NodeOperationMatcher holds. Otherwise, return nothing. """ if input_node_object.type == self.operation: return True class NodeFrameworkAttrMatcher(node_matcher.BaseNodeMatcher): """ Class NodeFrameworkAttrMatcher to check if a node's attribute has a specific value. """ def __init__(self, attr_name: str, attr_value: Any): """ Init a NodeFrameworkAttrMatcher object. Args: attr_name: Name of node's attribute to check. attr_value: Value to check if the attribute is equal to. """ self.attr_name = attr_name self.attr_value = attr_value def apply(self, input_node_object: Any) -> bool: """ Check if input_node_object has an attribute with the value the NodeFrameworkAttrMatcher contains. Args: input_node_object: Node object to check for its attribute and value. Returns: True if the node has an attribute with the attribute name and the value that were passed during the initialization of NodeFrameworkAttrMatcher. """ if self.attr_name in input_node_object.framework_attr: if input_node_object.framework_attr[self.attr_name] == self.attr_value: return True class EdgeMatcher(edge_matcher.BaseEdgeMatcher): """ class EdgeMatcher to check if an edge matches an edge that EdgeMatcher contains. """ def __init__(self, source_matcher: BaseNode, target_matcher: BaseNode): """ Init an EdgeMatcher object. Args: source_matcher: Source node to match. target_matcher: Destination node to match. """ super().__init__(source_matcher, target_matcher) def apply(self, input_object: Any) -> bool: """ Check if input_object is a tuple of two nodes and the same nodes that were passed during the EdgeMatcher initialization. Args: input_object: Object to check if equals to the edge EdgeMatcher holds. Returns: Whether input_object is equal to the edge EdgeMatcher holds or not. """ if isinstance(input_object, tuple) and len(input_object) >= 2: return self.source_matcher.apply(input_object[0]) and self.target_matcher.apply(input_object[1]) else: return False class WalkMatcher(walk_matcher.WalkMatcherList): """ Class WalkMatcher to check if a list of nodes matches another list of nodes. """ def __init__(self, matcher_list: List[BaseNode]): """ Init a WalkMatcher object. Args: matcher_list: List of nodes to holds for checking. """ super().__init__(matcher_list) def apply(self, input_object: Any) -> bool: # not in use """ Check if a list of nodes matches the list of nodes the WalkMatcher holds. Args: input_object: Object to check. Returns: True if input_object matches the list of nodes the WalkMatcher holds. """ pass # pragma: no cover
2.375
2
initialize_app_db.py
daniel-julio-iglesias/microblog
0
12385
<filename>initialize_app_db.py<gh_stars>0 #!/usr/bin/env python # -*- coding: utf-8 -*- """ The next steps use just in case to recreate the already existing DB Backup and Delete the folder "migrations" Backup and Delete the file "app.db" Execute the next console commands Linux (venv) $ export FLASK_APP=microblog.py MS Windows (venv) $ set FLASK_APP=microblog.py (venv) $ flask db init (venv) $ flask db migrate -m "initialization" (venv) $ python initialize_app_db.py ### (venv) $ flask shell (venv) $ flask run http://localhost:5000/ http://localhost:5000/index Use the function "initialize_data_into_db()" for data recreation. Use the function "remove_data_from_db()" for data deletion. Then you can simply use again the function "initialize_data_into_db()" for data recreation. """ from datetime import datetime, timedelta from app import create_app, db from app.models import User, Post from config import Config def initialize_data_into_db(): app = create_app(Config) app_context = app.app_context() app_context.push() db.create_all() u1 = User(username='john', email='<EMAIL>') u2 = User(username='susan', email='<EMAIL>') u3 = User(username='mary', email='<EMAIL>') u4 = User(username='david', email='<EMAIL>') u5 = User(username='daniel', email='<EMAIL>') u5.set_password('<PASSWORD>') db.session.add_all([u1, u2, u3, u4, u5]) now = datetime.utcnow() p1 = Post(body="post from john", author=u1, timestamp=now + timedelta(seconds=1)) p2 = Post(body="post from susan", author=u2, timestamp=now + timedelta(seconds=4)) p3 = Post(body="post from mary", author=u3, timestamp=now + timedelta(seconds=3)) p4 = Post(body="post from david", author=u4, timestamp=now + timedelta(seconds=2)) p5 = Post(body="My post number one.", author=u5, timestamp=now + timedelta(seconds=5)) p6 = Post(body="My post number two.", author=u5, timestamp=now + timedelta(seconds=6)) p7 = Post(body="My post number three.", author=u5, timestamp=now + timedelta(seconds=7)) p8 = Post(body="My post number four.", author=u5, timestamp=now + timedelta(seconds=8)) p9 = Post(body="My post number five.", author=u5, timestamp=now + timedelta(seconds=9)) db.session.add_all([p1, p2, p3, p4, p5, p6, p7, p8, p9]) db.session.commit() u1.follow(u2) u1.follow(u4) u2.follow(u3) u3.follow(u4) db.session.commit() users = User.query.all() print(users) """ [<User john>, <User susan>] """ for u in users: print(u.id, u.username) def remove_data_from_db(): """ In case of removing data... """ app = create_app(Config) app_context = app.app_context() app_context.push() db.create_all() db.session.remove() db.drop_all() app_context.pop() if __name__ == '__main__': initialize_data_into_db() # remove_data_from_db()
2.8125
3
gpytorch/kernels/rbf_kernel.py
techshot25/gpytorch
1
12386
<reponame>techshot25/gpytorch #!/usr/bin/env python3 from .kernel import Kernel from ..functions import RBFCovariance def postprocess_rbf(dist_mat): return dist_mat.div_(-2).exp_() class RBFKernel(Kernel): r""" Computes a covariance matrix based on the RBF (squared exponential) kernel between inputs :math:`\mathbf{x_1}` and :math:`\mathbf{x_2}`: .. math:: \begin{equation*} k_{\text{RBF}}(\mathbf{x_1}, \mathbf{x_2}) = \exp \left( -\frac{1}{2} (\mathbf{x_1} - \mathbf{x_2})^\top \Theta^{-2} (\mathbf{x_1} - \mathbf{x_2}) \right) \end{equation*} where :math:`\Theta` is a :attr:`lengthscale` parameter. See :class:`gpytorch.kernels.Kernel` for descriptions of the lengthscale options. .. note:: This kernel does not have an `outputscale` parameter. To add a scaling parameter, decorate this kernel with a :class:`gpytorch.kernels.ScaleKernel`. Args: :attr:`ard_num_dims` (int, optional): Set this if you want a separate lengthscale for each input dimension. It should be `d` if :attr:`x1` is a `n x d` matrix. Default: `None` :attr:`batch_shape` (torch.Size, optional): Set this if you want a separate lengthscale for each batch of input data. It should be `b` if :attr:`x1` is a `b x n x d` tensor. Default: `torch.Size([])`. :attr:`active_dims` (tuple of ints, optional): Set this if you want to compute the covariance of only a few input dimensions. The ints corresponds to the indices of the dimensions. Default: `None`. :attr:`lengthscale_prior` (Prior, optional): Set this if you want to apply a prior to the lengthscale parameter. Default: `None`. :attr:`lengthscale_constraint` (Constraint, optional): Set this if you want to apply a constraint to the lengthscale parameter. Default: `Positive`. :attr:`eps` (float): The minimum value that the lengthscale can take (prevents divide by zero errors). Default: `1e-6`. Attributes: :attr:`lengthscale` (Tensor): The lengthscale parameter. Size/shape of parameter depends on the :attr:`ard_num_dims` and :attr:`batch_shape` arguments. Example: >>> x = torch.randn(10, 5) >>> # Non-batch: Simple option >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel()) >>> # Non-batch: ARD (different lengthscale for each input dimension) >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel(ard_num_dims=5)) >>> covar = covar_module(x) # Output: LazyTensor of size (10 x 10) >>> >>> batch_x = torch.randn(2, 10, 5) >>> # Batch: Simple option >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel()) >>> # Batch: different lengthscale for each batch >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel(batch_shape=torch.Size([2]))) >>> covar = covar_module(x) # Output: LazyTensor of size (2 x 10 x 10) """ def __init__(self, **kwargs): super(RBFKernel, self).__init__(has_lengthscale=True, **kwargs) def forward(self, x1, x2, diag=False, **params): if ( x1.requires_grad or x2.requires_grad or (self.ard_num_dims is not None and self.ard_num_dims > 1) or diag ): x1_ = x1.div(self.lengthscale) x2_ = x2.div(self.lengthscale) return self.covar_dist(x1_, x2_, square_dist=True, diag=diag, dist_postprocess_func=postprocess_rbf, postprocess=True, **params) return RBFCovariance().apply(x1, x2, self.lengthscale, lambda x1, x2: self.covar_dist(x1, x2, square_dist=True, diag=False, dist_postprocess_func=postprocess_rbf, postprocess=False, **params))
2.9375
3
src/dependenpy/finder.py
gitter-badger/dependenpy
10
12387
# -*- coding: utf-8 -*- """dependenpy finder module.""" from importlib.util import find_spec from os.path import basename, exists, isdir, isfile, join, splitext class PackageSpec(object): """Holder for a package specification (given as argument to DSM).""" def __init__(self, name, path, limit_to=None): """ Initialization method. Args: name (str): name of the package. path (str): path to the package. limit_to (list of str): limitations. """ self.name = name self.path = path self.limit_to = limit_to or [] def __hash__(self): return hash((self.name, self.path)) @property def ismodule(self): """Property to tell if the package is in fact a module (a file).""" return self.path.endswith(".py") def add(self, spec): """ Add limitations of given spec to self's. Args: spec (PackageSpec): another spec. """ for limit in spec.limit_to: if limit not in self.limit_to: self.limit_to.append(limit) @staticmethod def combine(specs): """ Combine package specifications' limitations. Args: specs (list of PackageSpec): the package specifications. Returns: list of PackageSpec: the new, merged list of PackageSpec. """ new_specs = {} for spec in specs: if new_specs.get(spec, None) is None: new_specs[spec] = spec else: new_specs[spec].add(spec) return list(new_specs.values()) class PackageFinder(object): """Abstract package finder class.""" def find(self, package, **kwargs): """ Find method. Args: package (str): package to find. **kwargs (): additional keyword arguments. Returns: PackageSpec: the PackageSpec corresponding to the package, or None. """ raise NotImplementedError class LocalPackageFinder(PackageFinder): """Finder to find local packages (directories on the disk).""" def find(self, package, **kwargs): """ Find method. Args: package (str): package to find. **kwargs (): additional keyword arguments. Returns: PackageSpec: the PackageSpec corresponding to the package, or None. """ if not exists(package): return None name, path = None, None enforce_init = kwargs.pop("enforce_init", True) if isdir(package): if isfile(join(package, "__init__.py")) or not enforce_init: name, path = basename(package), package elif isfile(package) and package.endswith(".py"): name, path = splitext(basename(package))[0], package if name and path: return PackageSpec(name, path) return None class InstalledPackageFinder(PackageFinder): """Finder to find installed Python packages using importlib.""" def find(self, package, **kwargs): """ Find method. Args: package (str): package to find. **kwargs (): additional keyword arguments. Returns: PackageSpec: the PackageSpec corresponding to the package, or None. """ spec = find_spec(package) if spec is None: return None limit = [] if "." in package: package, limit = package.split(".", 1) limit = [limit] spec = find_spec(package) if spec is not None: if spec.submodule_search_locations: path = spec.submodule_search_locations[0] elif spec.origin and spec.origin != "built-in": path = spec.origin else: return None return PackageSpec(spec.name, path, limit) return None class Finder(object): """ Main package finder class. Initialize it with a list of package finder classes (not instances). """ def __init__(self, finders=None): """ Initialization method. Args: finders (list of classes): list of package finder classes (not instances) in a specific order. Default: [LocalPackageFinder, InstalledPackageFinder]. """ if finders is None: self.finders = [LocalPackageFinder(), InstalledPackageFinder()] else: self.finders = [f() for f in finders] def find(self, package, **kwargs): """ Find a package using package finders. Return the first package found. Args: package (str): package to find. **kwargs (): additional keyword arguments used by finders. Returns: PackageSpec: if package found, else None """ for finder in self.finders: package_spec = finder.find(package, **kwargs) if package_spec: return package_spec return None
2.625
3
pearsonr/beta.py
rkhullar/pearsonr-pure-python
0
12388
import math def contfractbeta(a: float, b: float, x: float, itmax: int = 200) -> float: # https://malishoaib.wordpress.com/2014/04/15/the-beautiful-beta-functions-in-raw-python/ # evaluates the continued fraction form of the incomplete Beta function; incompbeta() # code translated from: Numerical Recipes in C eps = 3.0e-7 bm = az = am = 1.0 qab = a + b qap = a + 1.0 qam = a - 1.0 bz = 1.0 - qab * x / qap for i in range(itmax + 1): em = float(i + 1) tem = em + em d = em * (b - em) * x / ((qam + tem) * (a + tem)) ap = az + d * am bp = bz + d * bm d = -(a + em) * (qab + em) * x / ((qap + tem) * (a + tem)) app = ap + d * az bpp = bp + d * bz aold = az am = ap / bpp bm = bp / bpp az = app / bpp bz = 1.0 if abs(az - aold) < (eps * abs(az)): return az message = 'a or b too large or given itmax too small for computing incomplete beta function.' raise ValueError(message) def incompbeta(a: float, b: float, x: float) -> float: # https://malishoaib.wordpress.com/2014/04/15/the-beautiful-beta-functions-in-raw-python/ # evaluates incomplete beta function, here a, b > 0 and 0 <= x <= 1 # this function requires contfractbeta(a,b,x, itmax = 200) # code translated from: Numerical Recipes in C if x == 0 or x == 1: return x else: lbeta = math.lgamma(a + b) - math.lgamma(a) - math.lgamma(b) + a * math.log(x) + b * math.log(1 - x) if x < (a + 1) / (a + b + 2): return math.exp(lbeta) * contfractbeta(a, b, x) / a else: return 1 - math.exp(lbeta) * contfractbeta(b, a, 1 - x) / b
3.15625
3
hisim/components/generic_pv_system.py
FZJ-IEK3-VSA/HiSim
12
12389
<reponame>FZJ-IEK3-VSA/HiSim # Generic/Built-in import datetime import math import os import numpy as np import matplotlib.pyplot as plt import pandas as pd import pvlib from dataclasses_json import dataclass_json from typing import Optional from dataclasses import dataclass from functools import lru_cache from hisim.simulationparameters import SimulationParameters # Owned from hisim import component as cp from hisim import loadtypes as lt from hisim import utils from hisim import log from hisim.components.weather import Weather __authors__ = "<NAME>" __copyright__ = "Copyright 2021, the House Infrastructure Project" __credits__ = ["<NAME>"] __license__ = "MIT" __version__ = "0.1" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" __status__ = "development" """ The functions cited in this module are at some degree based on the tsib project: [tsib-kotzur]: <NAME>, <NAME>, and <NAME>. Future grid load of the residential building sector. No. RWTH-2018-231872. Lehrstuhl für Brennstoffzellen (FZ Jülich), 2019. ID: http://hdl.handle.net/2128/21115 http://nbn-resolving.org/resolver?verb=redirect&identifier=urn:nbn:de:0001-2019020614 The implementation of the tsib project can be found under the following repository: https://github.com/FZJ-IEK3-VSA/tsib """ temp_model = pvlib.temperature.TEMPERATURE_MODEL_PARAMETERS["sapm"]["open_rack_glass_glass"] @lru_cache(maxsize=16) def simPhotovoltaicFast( dni_extra=None, DNI=None, DHI=None, GHI=None, azimuth=None, apparent_zenith=None, temperature=None, wind_speed=None, surface_azimuth : float = 180, surface_tilt : float = 30 ): """ Simulates a defined PV array with the Sandia PV Array Performance Model. The implementation is done in accordance with following tutorial: https://github.com/pvlib/pvlib-python/blob/master/docs/tutorials/tmy_to_power.ipynb Parameters ---------- surface_tilt: int or float, optional (default:30) Tilt angle of of the array in degree. surface_azimuth: int or float, optional (default:180) Azimuth angle of of the array in degree. 180 degree means south, 90 degree east and 270 west. losses: float, optional (default: 0.1) Losses due to soiling, mismatch, diode connections, dc wiring etc. Returns -------- """ poa_irrad = pvlib.irradiance.get_total_irradiance( surface_tilt, surface_azimuth, apparent_zenith, azimuth, DNI, GHI, DHI, dni_extra ) pvtemps = pvlib.temperature.sapm_cell(poa_irrad["poa_global"], temperature, wind_speed, **temp_model) pv_dc = pvlib.pvsystem.pvwatts_dc( poa_irrad[ "poa_global" ], temp_cell = pvtemps, pdc0 = 1, gamma_pdc = -0.002, temp_ref = 25.0 ) if math.isnan(pv_dc): pv_dc = 0 return pv_dc def simPhotovoltaicSimple( dni_extra=None, DNI=None, DHI=None, GHI=None, azimuth=None, apparent_zenith=None, temperature=None, wind_speed=None, surface_tilt=30, surface_azimuth=180, albedo=0.2): """ Simulates a defined PV array with the Sandia PV Array Performance Model. The implementation is done in accordance with following tutorial: https://github.com/pvlib/pvlib-python/blob/master/docs/tutorials/tmy_to_power.ipynb Based on the tsib project @[tsib-kotzur] (Check header) Parameters ---------- tmy_data: pandas.DataFrame(), required Weatherfile in the format of a tmy file. surface_tilt: int or float, optional (default:30) Tilt angle of of the array in degree. surface_azimuth: int or float, optional (default:180) Azimuth angle of of the array in degree. 180 degree means south, 90 degree east and 270 west. albedo: float, optional (default: 0.2) Reflection coefficient of the surrounding area. losses: float, optional (default: 0.1) Losses due to soiling, mismatch, diode connections, dc wiring etc. load_module_data: Boolean, optional (default: False) If True the module data base is loaded from the Sandia Website. Otherwise it is loaded from this relative path '\\profiles\\PV-Modules\\sandia_modules.csv'. module_name: str, optional (default:'Hanwha_HSL60P6_PA_4_250T__2013_') Module name. The string must be existens in Sandia Module database. integrateInverter: bool, optional (default: True) If an inverter shall be added to the simulation, providing the photovoltaic output after the inverter. inverter_name: str, optional (default: 'ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_') Type of inverter. Returns -------- """ # automatic pd time series in future pvlib version # calculate airmass airmass = pvlib.atmosphere.get_relative_airmass(apparent_zenith) # use perez model to calculate the plane of array diffuse sky radiation poa_sky_diffuse = pvlib.irradiance.perez( surface_tilt, surface_azimuth, DHI, np.float64(DNI), dni_extra, apparent_zenith, azimuth, airmass, ) # calculate ground diffuse with specified albedo poa_ground_diffuse = pvlib.irradiance.get_ground_diffuse( surface_tilt, GHI, albedo=albedo ) # calculate angle of incidence aoi = pvlib.irradiance.aoi(surface_tilt, surface_azimuth, apparent_zenith, azimuth) # calculate plane of array irradiance poa_irrad = pvlib.irradiance.poa_components(aoi, np.float64(DNI), poa_sky_diffuse, poa_ground_diffuse) # calculate pv cell and module temperature temp_model = pvlib.temperature.TEMPERATURE_MODEL_PARAMETERS["sapm"]["open_rack_glass_glass"] pvtemps = pvlib.temperature.sapm_cell(poa_irrad["poa_global"], temperature, wind_speed, **temp_model) pv_dc = pvlib.pvsystem.pvwatts_dc(poa_irrad["poa_global"], temp_cell=pvtemps, pdc0=1, gamma_pdc=-0.002, temp_ref=25.0) if math.isnan(pv_dc): pv_dc = 0 return pv_dc @dataclass_json @dataclass class PVSystemConfig: parameter_string: str time: int location: str module_name:str integrate_inverter: bool inverter_name:str power: float def __init__(self, my_simulation_parameters: SimulationParameters, time:int, location:str, power:float, module_name:str, integrate_inverter:bool, inverter_name:str ): self.parameter_string = my_simulation_parameters.get_unique_key() self.time = time self.location = location self.module_name = module_name self.integrate_inverter = integrate_inverter self.inverter_name = inverter_name self.power = power class PVSystem(cp.Component): """ Parameters: ----------------------------------------------------- time: simulation timeline location: Location object Location with temperature and solar data power: float Power in kWp to be provided by the PV System Returns: ----------------------------------------------------- pass """ # Inputs TemperatureOutside = "TemperatureOutside" DirectNormalIrradiance = "DirectNormalIrradiance" DirectNormalIrradianceExtra = "DirectNormalIrradianceExtra" DiffuseHorizontalIrradiance = "DiffuseHorizontalIrradiance" GlobalHorizontalIrradiance = "GlobalHorizontalIrradiance" Azimuth = "Azimuth" ApparentZenith = "ApparentZenith" WindSpeed = "WindSpeed" # Outputs ElectricityOutput = "ElectricityOutput" #Forecasts PV_Forecast_24h = "PV_Forecast_24h" # Similar components to connect to: # 1. Weather @utils.measure_execution_time def __init__(self, my_simulation_parameters: SimulationParameters, my_simulation_repository : Optional[ cp.SimRepository ] = None, time : int = 2019, location : str = "Aachen", power : float = 10E3, load_module_data : bool = False, module_name : str = "Hanwha_HSL60P6_PA_4_250T__2013_", integrateInverter : bool = True, inverter_name : str = "ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_" ): super().__init__( "PVSystem", my_simulation_parameters = my_simulation_parameters ) self.pvconfig = PVSystemConfig(my_simulation_parameters=my_simulation_parameters, location=location, power = power, module_name=module_name, integrate_inverter=integrateInverter, inverter_name=inverter_name, time=time) self.build( load_module_data, my_simulation_repository ) self.t_outC : cp.ComponentInput = self.add_input(self.ComponentName, self.TemperatureOutside, lt.LoadTypes.Temperature, lt.Units.Celsius, True) self.DNIC : cp.ComponentInput = self.add_input(self.ComponentName, self.DirectNormalIrradiance, lt.LoadTypes.Irradiance, lt.Units.Wm2, True) self.DNIextraC : cp.ComponentInput = self.add_input(self.ComponentName, self.DirectNormalIrradianceExtra, lt.LoadTypes.Irradiance, lt.Units.Wm2, True) self.DHIC: cp.ComponentInput = self.add_input(self.ComponentName, self.DiffuseHorizontalIrradiance, lt.LoadTypes.Irradiance, lt.Units.Wm2, True) self.GHIC: cp.ComponentInput = self.add_input(self.ComponentName, self.GlobalHorizontalIrradiance, lt.LoadTypes.Irradiance, lt.Units.Wm2, True) self.azimuthC : cp.ComponentInput = self.add_input(self.ComponentName, self.Azimuth, lt.LoadTypes.Any, lt.Units.Degrees, True) self.apparent_zenithC : cp.ComponentInput = self.add_input(self.ComponentName, self.ApparentZenith, lt.LoadTypes.Any, lt.Units.Degrees, True) self.wind_speedC: cp.ComponentInput = self.add_input(self.ComponentName, self.WindSpeed, lt.LoadTypes.Speed, lt.Units.MeterPerSecond, True) self.electricity_outputC : cp.ComponentOutput = self.add_output(self.ComponentName, PVSystem.ElectricityOutput, lt.LoadTypes.Electricity, lt.Units.Watt, False) self.add_default_connections(Weather, self.get_weather_default_connections()) def get_weather_default_connections(self): log.information("setting weather default connections") connections = [] weather_classname = Weather.get_classname() connections.append(cp.ComponentConnection(PVSystem.TemperatureOutside,weather_classname, Weather.TemperatureOutside)) connections.append(cp.ComponentConnection(PVSystem.DirectNormalIrradiance,weather_classname, Weather.DirectNormalIrradiance)) connections.append(cp.ComponentConnection(PVSystem.DirectNormalIrradianceExtra,weather_classname, Weather.DirectNormalIrradianceExtra)) connections.append(cp.ComponentConnection(PVSystem.DiffuseHorizontalIrradiance,weather_classname, Weather.DiffuseHorizontalIrradiance)) connections.append(cp.ComponentConnection(PVSystem.GlobalHorizontalIrradiance,weather_classname, Weather.GlobalHorizontalIrradiance)) connections.append(cp.ComponentConnection(PVSystem.Azimuth,weather_classname, Weather.Azimuth)) connections.append(cp.ComponentConnection(PVSystem.ApparentZenith,weather_classname, Weather.ApparentZenith)) connections.append(cp.ComponentConnection(PVSystem.WindSpeed,weather_classname, Weather.WindSpeed)) return connections def i_restore_state(self): pass def write_to_report(self): lines = [] lines.append("Name: {}".format(self.ComponentName)) lines.append("Power: {:3.0f} kWp".format(self.pvconfig.power*1E-3)) lines.append("Module: {}".format(self.pvconfig.module_name)) lines.append("Inverter: {}".format(self.pvconfig.inverter_name)) return lines def i_simulate(self, timestep: int, stsv: cp.SingleTimeStepValues, force_convergence: bool): if hasattr(self, "output"): #if(len(self.output) < timestep) # raise Exception("Somehow the precalculated list of values for the PV system seems to be incorrect. Please delete the cache.") stsv.set_output_value(self.electricity_outputC, self.output[timestep] * self.pvconfig.power) else: DNI = stsv.get_input_value(self.DNIC) dni_extra = stsv.get_input_value(self.DNIextraC) DHI = stsv.get_input_value(self.DHIC) GHI = stsv.get_input_value(self.GHIC) azimuth = stsv.get_input_value(self.azimuthC) temperature = stsv.get_input_value(self.t_outC) wind_speed = stsv.get_input_value(self.wind_speedC) apparent_zenith = stsv.get_input_value(self.apparent_zenithC) #ac_power = self.simPhotovoltaic2(dni_extra=dni_extra, # DNI=DNI, # DHI=DHI, # GHI=GHI, # azimuth=azimuth, # apparent_zenith=apparent_zenith, # temperature=temperature, # wind_speed=wind_speed) #ac_power = simPhotovoltaicSimple( # dni_extra=dni_extra, # DNI=DNI, # DHI=DHI, # GHI=GHI, # azimuth=azimuth, # apparent_zenith=apparent_zenith, # temperature=temperature, # wind_speed=wind_speed) ac_power = simPhotovoltaicFast( dni_extra=dni_extra, DNI=DNI, DHI=DHI, GHI=GHI, azimuth=azimuth, apparent_zenith=apparent_zenith, temperature=temperature, wind_speed=wind_speed) resultingvalue = ac_power * self.pvconfig.power # if you wanted to access the temperature forecast from the weather component: # val = self.simulation_repository.get_entry(Weather.Weather_Temperature_Forecast_24h) stsv.set_output_value(self.electricity_outputC, resultingvalue) self.data[timestep] = ac_power if timestep + 1 == self.data_length: database = pd.DataFrame(self.data, columns=["output"]) database.to_csv(self.cache_filepath, sep=",", decimal=".", index=False) if self.my_simulation_parameters.system_config.predictive == True: last_forecast_timestep = int( timestep + 24 * 3600 / self.my_simulation_parameters.seconds_per_timestep ) if ( last_forecast_timestep > len( self.output ) ): last_forecast_timestep = len( self.output ) pvforecast = [ self.output[ t ] * self.pvconfig.power for t in range( timestep, last_forecast_timestep ) ] self.simulation_repository.set_entry( self.PV_Forecast_24h, pvforecast ) def get_coordinates(self, location="Aachen", year=2019): """ Reads a test reference year file and gets the GHI, DHI and DNI from it. Based on the tsib project @[tsib-kotzur] (Check header) Parameters ------- try_num: int (default: 4) The region number of the test reference year. year: int (default: 2010) The year. Only data for 2010 and 2030 available """ # get the correct file path filepath = os.path.join(utils.HISIMPATH["weather"][location]) # get the geoposition with open(filepath + ".dat", encoding="utf-8") as fp: lines = fp.readlines() location_name = lines[0].split(maxsplit=2)[2].replace('\n', '') lat = float(lines[1][20:37]) lon = float(lines[2][15:30]) self.location = {"name": location_name, "latitude": lat, "longitude": lon} self.index = pd.date_range( "{}-01-01 00:00:00".format(year), periods=60*24*365, freq="T", tz="Europe/Berlin" ) def i_save_state(self): pass def i_doublecheck(self, timestep: int, stsv: cp.SingleTimeStepValues): pass def build( self, load_module_data : bool, my_simulation_repository : Optional[ cp.SimRepository ] ): log.information(self.pvconfig.to_json()) # type: ignore file_exists, self.cache_filepath = utils.get_cache_file("PVSystem", self.pvconfig) if file_exists: self.output = pd.read_csv(self.cache_filepath, sep=',', decimal='.')['output'].tolist() if len(self.output) != self.my_simulation_parameters.timesteps: raise Exception("Reading the cached PV values seems to have failed. Expected " + str(self.my_simulation_parameters.timesteps) + " values, but got " + str(len(self.output ))) else: self.get_coordinates(location = self.pvconfig.location, year = self.pvconfig.time) # Factor to guarantee peak power based on module with 250 Wh self.ac_power_factor = math.ceil( ( self.pvconfig.power * 1e3 ) / 250 ) #when predictive control is activated, the PV simulation is run beforhand to make forecasting easier if self.my_simulation_parameters.system_config.predictive and my_simulation_repository is not None: #get yearly weather data from dictionary dni_extra = my_simulation_repository.get_entry( Weather.Weather_DirectNormalIrradianceExtra_yearly_forecast ) DNI = my_simulation_repository.get_entry( Weather.Weather_DirectNormalIrradiance_yearly_forecast ) DHI = my_simulation_repository.get_entry( Weather.Weather_DiffuseHorizontalIrradiance_yearly_forecast ) GHI = my_simulation_repository.get_entry( Weather.Weather_GlobalHorizontalIrradiance_yearly_forecast ) azimuth = my_simulation_repository.get_entry( Weather.Weather_Azimuth_yearly_forecast ) apparent_zenith = my_simulation_repository.get_entry( Weather.Weather_ApparentZenith_yearly_forecast ) temperature = my_simulation_repository.get_entry( Weather.Weather_TemperatureOutside_yearly_forecast ) wind_speed = my_simulation_repository.get_entry( Weather.Weather_WindSpeed_yearly_forecast ) x= [ ] for i in range( len( dni_extra ) ): x.append( simPhotovoltaicFast( dni_extra[ i ], DNI[ i ], DHI[ i ], GHI[ i ], azimuth[ i ], apparent_zenith[ i ], temperature[ i ], wind_speed[ i ] ) ) self.output = x database = pd.DataFrame( self.output, columns = [ "output" ] ) database.to_csv( self.cache_filepath, sep=",", decimal=".", index=False ) else: self.data = [0] * self.my_simulation_parameters.timesteps self.data_length = self.my_simulation_parameters.timesteps if self.my_simulation_parameters.system_config.predictive and my_simulation_repository is not None: my_simulation_repository.delete_entry( Weather.Weather_DirectNormalIrradianceExtra_yearly_forecast ) my_simulation_repository.delete_entry( Weather.Weather_DirectNormalIrradiance_yearly_forecast ) my_simulation_repository.delete_entry( Weather.Weather_DiffuseHorizontalIrradiance_yearly_forecast ) my_simulation_repository.delete_entry( Weather.Weather_GlobalHorizontalIrradiance_yearly_forecast ) my_simulation_repository.delete_entry( Weather.Weather_Azimuth_yearly_forecast ) my_simulation_repository.delete_entry( Weather.Weather_ApparentZenith_yearly_forecast ) my_simulation_repository.delete_entry( Weather.Weather_TemperatureOutside_yearly_forecast ) my_simulation_repository.delete_entry( Weather.Weather_WindSpeed_yearly_forecast ) self.modules = pd.read_csv( os.path.join(utils.HISIMPATH["photovoltaic"]["modules"]), index_col=0, ) self.inverters = pd.read_csv( os.path.join(utils.HISIMPATH["photovoltaic"]["inverters"]), index_col=0, ) self.temp_model = pvlib.temperature.TEMPERATURE_MODEL_PARAMETERS["sapm"]["open_rack_glass_glass"] # load the sandia data if load_module_data: # load module data online modules = pvlib.pvsystem.retrieve_sam(name="SandiaMod") self.module = modules[self.pvconfig.module_name] # get inverter data inverters = pvlib.pvsystem.retrieve_sam("cecinverter") self.inverter = inverters[self.pvconfig.inverter_name] else: # load module and inverter data from csv module = self.modules[self.pvconfig.module_name] self.module = pd.to_numeric(module, errors="coerce") inverter = self.inverters[self.pvconfig.inverter_name] self.inverter = pd.to_numeric(inverter, errors="coerce") #self.power = self.power #self.module_name = module_name #self.inverter_name = inverter_name #self.integrateInverter = integrateInverter #self.simPhotovoltaicSimpleJit = simPhotovoltaicSimple def plot(self): # Plots ac_power. One day is represented by 1440 steps. #self.ac_power.iloc[0:7200].plot() plt.plot(self.data) plt.ylabel("Power [W]") plt.xlabel("Time") plt.show() def interpolate(self,pd_database,year): firstday = pd.Series([0.0], index=[ pd.to_datetime(datetime.datetime(year-1, 12, 31, 23, 0), utc=True).tz_convert("Europe/Berlin")]) lastday = pd.Series(pd_database[-1], index=[ pd.to_datetime(datetime.datetime(year, 12, 31, 22, 59), utc=True).tz_convert("Europe/Berlin")]) #pd_database = pd_database.append(firstday) pd_database = pd_database.append(lastday) pd_database = pd_database.sort_index() return pd_database.resample('1T').asfreq().interpolate(method='linear').tolist() def simPhotovoltaic2( self, dni_extra=None, DNI=None, DHI=None, GHI=None, azimuth=None, apparent_zenith=None, temperature=None, wind_speed=None, surface_tilt=30, surface_azimuth=180, albedo=0.2): """ Simulates a defined PV array with the Sandia PV Array Performance Model. The implementation is done in accordance with following tutorial: https://github.com/pvlib/pvlib-python/blob/master/docs/tutorials/tmy_to_power.ipynb Based on the tsib project @[tsib-kotzur] (Check header) Parameters ---------- tmy_data: pandas.DataFrame(), required Weatherfile in the format of a tmy file. surface_tilt: int or float, optional (default:30) Tilt angle of of the array in degree. surface_azimuth: int or float, optional (default:180) Azimuth angle of of the array in degree. 180 degree means south, 90 degree east and 270 west. albedo: float, optional (default: 0.2) Reflection coefficient of the surrounding area. losses: float, optional (default: 0.1) Losses due to soiling, mismatch, diode connections, dc wiring etc. load_module_data: Boolean, optional (default: False) If True the module data base is loaded from the Sandia Website. Otherwise it is loaded from this relative path '\\profiles\\PV-Modules\\sandia_modules.csv'. module_name: str, optional (default:'Hanwha_HSL60P6_PA_4_250T__2013_') Module name. The string must be existens in Sandia Module database. integrateInverter: bool, optional (default: True) If an inverter shall be added to the simulation, providing the photovoltaic output after the inverter. inverter_name: str, optional (default: 'ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_') Type of inverter. Returns -------- """ # automatic pd time series in future pvlib version # calculate airmass airmass = pvlib.atmosphere.get_relative_airmass(apparent_zenith) # use perez model to calculate the plane of array diffuse sky radiation poa_sky_diffuse = pvlib.irradiance.perez( surface_tilt, surface_azimuth, DHI, np.float64(DNI), dni_extra, apparent_zenith, azimuth, airmass, ) # calculate ground diffuse with specified albedo poa_ground_diffuse = pvlib.irradiance.get_ground_diffuse( surface_tilt, GHI, albedo=albedo ) # calculate angle of incidence aoi = pvlib.irradiance.aoi(surface_tilt, surface_azimuth, apparent_zenith, azimuth) # calculate plane of array irradiance poa_irrad = pvlib.irradiance.poa_components(aoi, np.float64(DNI), poa_sky_diffuse, poa_ground_diffuse) # calculate pv cell and module temperature #temp_model = pvlib.temperature.TEMPERATURE_MODEL_PARAMETERS["sapm"]["open_rack_glass_glass"] pvtemps = pvlib.temperature.sapm_cell(poa_irrad["poa_global"], temperature, wind_speed, **self.temp_model) # calculate effective irradiance on pv module sapm_irr = pvlib.pvsystem.sapm_effective_irradiance( module=self.module, poa_direct=poa_irrad["poa_direct"], poa_diffuse=poa_irrad["poa_diffuse"], airmass_absolute=airmass, aoi=aoi, ) # calculate pv performance sapm_out = pvlib.pvsystem.sapm( sapm_irr, module=self.module, temp_cell=pvtemps, ) # calculate peak load of single module [W] peak_load = self.module.loc["Impo"] * self.module.loc["Vmpo"] ac_power = pd.DataFrame() if self.pvconfig.integrate_inverter: # calculate load after inverter iv_load = pvlib.inverter.sandia(inverter=self.inverter, v_dc=sapm_out["v_mp"], p_dc=sapm_out["p_mp"]) ac_power = iv_load / peak_load else: # load in [kW/kWp] ac_power = sapm_out["p_mp"] / peak_load if math.isnan(ac_power): ac_power = 0.0 #ac_power = ac_power * self.time_correction_factor #ac_power = ac_power #data = [DHI, # DNI, # GHI, # dni_extra, # aoi, # apparent_zenith, # azimuth, # airmass, # wind_speed] #if timestep % 60 == 0 and timestep < 1442: # log.information(data) # log.information("Timestep:{} , AcPower: {}".format(timestep, ac_power)) return ac_power def readTRY(location="Aachen", year=2010): """ Reads a test reference year file and gets the GHI, DHI and DNI from it. Based on the tsib project @[tsib-kotzur] (Check header) Parameters ------- try_num: int (default: 4) The region number of the test reference year. year: int (default: 2010) The year. Only data for 2010 and 2030 available """ # get the correct file path filepath = os.path.join(utils.HISIMPATH["weather"][location]) # get the geoposition with open(filepath + ".dat", encoding="utf-8") as fp: lines = fp.readlines() location_name = lines[0].split(maxsplit=2)[2].replace('\n', '') lat = float(lines[1][20:37]) lon = float(lines[2][15:30]) location = {"name": location_name, "latitude": lat, "longitude": lon} # check if time series data already exists as .csv with DNI if os.path.isfile(filepath + ".csv"): data = pd.read_csv(filepath + ".csv", index_col=0, parse_dates=True,sep=";",decimal=",") data.index = pd.to_datetime(data.index, utc=True).tz_convert("Europe/Berlin") # else read from .dat and calculate DNI etc. else: # get data data = pd.read_csv( filepath + ".dat", sep=r"\s+", skiprows=([i for i in range(0, 31)]) ) data.index = pd.date_range( "{}-01-01 00:00:00".format(year), periods=8760, freq="H", tz="Europe/Berlin" ) data["GHI"] = data["D"] + data["B"] data = data.rename(columns={"D": "DHI", "t": "T", "WG": "WS"}) # calculate direct normal data["DNI"] = calculateDNI(data["B"], lon, lat) # data["DNI"] = data["B"] # save as .csv #data.to_csv(filepath + ".csv",sep=";",decimal=",") return data, location def calculateDNI(directHI, lon, lat, zenith_tol=87.0): """ Calculates the direct NORMAL irradiance from the direct horizontal irradiance with the help of the PV lib. Based on the tsib project @[tsib-kotzur] (Check header) Parameters ---------- directHI: pd.Series with time index Direct horizontal irradiance lon: float Longitude of the location lat: float Latitude of the location zenith_tol: float, optional Avoid cosines of values above a certain zenith angle of in order to avoid division by zero. Returns ------- DNI: pd.Series """ solarPos = pvlib.solarposition.get_solarposition(directHI.index, lat, lon) solarPos["apparent_zenith"][solarPos.apparent_zenith > zenith_tol] = zenith_tol DNI = directHI.div(solarPos["apparent_zenith"].apply(math.radians).apply(math.cos)) DNI = DNI.fillna(0) if DNI.isnull().values.any(): raise ValueError("Something went wrong...") return DNI
1.976563
2
gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/__init__.py
t-triobox/gQuant
0
12390
<filename>gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/__init__.py<gh_stars>0 """ //////////////////////////////////////////////////////////////////////////// // // Copyright (C) NVIDIA Corporation. All rights reserved. // // NVIDIA Sample Code // // Please refer to the NVIDIA end user license agreement (EULA) associated // with this source code for terms and conditions that govern your use of // this software. Any use, reproduction, disclosure, or distribution of // this software and related documentation outside the terms of the EULA // is strictly prohibited. // //////////////////////////////////////////////////////////////////////////// """ from .loadCsvNode import LoadCsvNode from .bootstrapNode import BootstrapNode from .logReturnNode import LogReturnNode from .distanceNode import DistanceNode from .hierarchicalClusteringNode import HierarchicalClusteringNode from .hrpWeight import HRPWeightNode from .portfolioNode import PortfolioNode from .performanceMetricNode import PerformanceMetricNode from .nrpWeightNode import NRPWeightNode from .maxDrawdownNode import MaxDrawdownNode from .featureNode import FeatureNode from .aggregateTimeFeature import AggregateTimeFeatureNode from .mergeNode import MergeNode from .diffNode import DiffNode from .rSquaredNode import RSquaredNode from .shapSummaryPlotNode import ShapSummaryPlotPlotNode from .leverageNode import LeverageNode from .rawDataNode import RawDataNode from .transactionCostNode import TransactionCostNode __all__ = ["LoadCsvNode", "BootstrapNode", "LogReturnNode", "DistanceNode", "HierarchicalClusteringNode", "HRPWeightNode", "PortfolioNode", "PerformanceMetricNode", "NRPWeightNode", "MaxDrawdownNode", "FeatureNode", "AggregateTimeFeatureNode", "MergeNode", "DiffNode", "RSquaredNode", "ShapSummaryPlotPlotNode", "LeverageNode", "RawDataNode", "TransactionCostNode"]
1.328125
1
pythran/tests/rosetta/greatest_subsequential_sum.py
davidbrochart/pythran
1,647
12391
<gh_stars>1000+ #from http://rosettacode.org/wiki/Greatest_subsequential_sum#Python #pythran export maxsum(int list) #pythran export maxsumseq(int list) #pythran export maxsumit(int list) #runas maxsum([0, 1, 0]) #runas maxsumseq([-1, 2, -1, 3, -1]) #runas maxsumit([-1, 1, 2, -5, -6]) def maxsum(sequence): """Return maximum sum.""" maxsofar, maxendinghere = 0, 0 for x in sequence: # invariant: ``maxendinghere`` and ``maxsofar`` are accurate for ``x[0..i-1]`` maxendinghere = max(maxendinghere + x, 0) maxsofar = max(maxsofar, maxendinghere) return maxsofar def maxsumseq(sequence): start, end, sum_start = -1, -1, -1 maxsum_, sum_ = 0, 0 for i, x in enumerate(sequence): sum_ += x if maxsum_ < sum_: # found maximal subsequence so far maxsum_ = sum_ start, end = sum_start, i elif sum_ < 0: # start new sequence sum_ = 0 sum_start = i assert maxsum_ == maxsum(sequence) assert maxsum_ == sum(sequence[start + 1:end + 1]) return sequence[start + 1:end + 1] def maxsumit(iterable): maxseq = seq = [] start, end, sum_start = -1, -1, -1 maxsum_, sum_ = 0, 0 for i, x in enumerate(iterable): seq.append(x); sum_ += x if maxsum_ < sum_: maxseq = seq; maxsum_ = sum_ start, end = sum_start, i elif sum_ < 0: seq = []; sum_ = 0 sum_start = i assert maxsum_ == sum(maxseq[:end - start]) return maxseq[:end - start]
2.921875
3
src/binwalk/__init__.py
dotysan/binwalk
1
12392
__all__ = ["Binwalk"] import os import re import time import magic from binwalk.compat import * from binwalk.config import * from binwalk.update import * from binwalk.filter import * from binwalk.parser import * from binwalk.plugins import * from binwalk.plotter import * from binwalk.hexdiff import * from binwalk.entropy import * from binwalk.extractor import * from binwalk.prettyprint import * from binwalk.smartstrings import * from binwalk.smartsignature import * from binwalk.common import file_size, unique_file_name, BlockFile class Binwalk(object): ''' Primary Binwalk class. Useful class objects: self.filter - An instance of the MagicFilter class. self.extractor - An instance of the Extractor class. self.parser - An instance of the MagicParser class. self.display - An instance of the PrettyPrint class. self.magic_files - A list of magic file path strings to use whenever the scan() method is invoked. self.scan_length - The total number of bytes to be scanned. self.total_scanned - The number of bytes that have already been scanned. self.scan_type - The type of scan being performed, one of: BINWALK, BINCAST, BINARCH, STRINGS, ENTROPY. Performing a simple binwalk scan: from binwalk import Binwalk scan = Binwalk().scan(['firmware1.bin', 'firmware2.bin']) for (filename, file_results) in scan.iteritems(): print "Results for %s:" % filename for (offset, results) in file_results: for result in results: print offset, result['description'] ''' # Default libmagic flags. Basically disable anything we don't need in the name of speed. DEFAULT_FLAGS = magic.MAGIC_NO_CHECK_TEXT | magic.MAGIC_NO_CHECK_ENCODING | magic.MAGIC_NO_CHECK_APPTYPE | magic.MAGIC_NO_CHECK_TOKENS # Maximum magic bytes length MAX_SIGNATURE_SIZE = 128 # Minimum verbosity level at which to enable extractor verbosity. VERY_VERBOSE = 2 # Scan every byte by default. DEFAULT_BYTE_ALIGNMENT = 1 # Valid scan_type values. # ENTROPY must be the largest value to ensure it is performed last if multiple scans are performed. # REHASH must also be larger than any scans that would generate extracted files. BINWALK = 0x01 BINARCH = 0x02 BINCAST = 0x03 STRINGS = 0x04 COMPRESSION = 0x05 HEXDIFF = 0x06 CUSTOM = 0x07 REHASH = 0x08 BINVIS = 0x09 ENTROPY = 0x0A def __init__(self, magic_files=[], flags=magic.MAGIC_NONE, log=None, quiet=False, verbose=0, ignore_smart_keywords=False, ignore_time_skews=False, load_extractor=False, load_plugins=True, exec_commands=True, max_extract_size=None): ''' Class constructor. @magic_files - A list of magic files to use. @flags - Flags to pass to magic_open. [TODO: Might this be more appropriate as an argument to load_signaures?] @log - Output PrettyPrint data to log file as well as to stdout. @quiet - If set to True, supress PrettyPrint output to stdout. @verbose - Verbosity level. @ignore_smart_keywords - Set to True to ignore smart signature keywords. @ignore_time_skews - Set to True to ignore file results with timestamps in the future. @load_extractor - Set to True to load the default extraction rules automatically. @load_plugins - Set to False to disable plugin support. @exec_commands - Set to False to disable the execution of external utilities when extracting data from files. @max_extract_size - Limit the size of extracted files. Returns None. ''' self.flags = self.DEFAULT_FLAGS | flags self.last_extra_data_section = '' self.load_plugins = load_plugins self.magic_files = magic_files self.verbose = verbose self.total_scanned = 0 self.scan_length = 0 self.total_read = 0 self.matryoshka = 1 self.epoch = 0 self.year = 0 self.plugins = None self.magic = None self.mfile = None self.entropy = None self.strings = None self.scan_type = self.BINWALK if not ignore_time_skews: # Consider timestamps up to 1 year in the future valid, # to account for any minor time skew on the local system. self.year = time.localtime().tm_year + 1 self.epoch = int(time.time()) + (60 * 60 * 24 * 365) # Instantiate the config class so we can access file/directory paths self.config = Config() # Use the system default magic file if no other was specified if not self.magic_files or self.magic_files is None: # Append the user's magic file first so that those signatures take precedence self.magic_files = [ self.config.paths['user'][self.config.BINWALK_MAGIC_FILE], self.config.paths['system'][self.config.BINWALK_MAGIC_FILE], ] # Only set the extractor verbosity if told to be very verbose if self.verbose >= self.VERY_VERBOSE: extractor_verbose = True else: extractor_verbose = False # Create an instance of the PrettyPrint class, which can be used to print results to screen/file. self.display = PrettyPrint(self, log=log, quiet=quiet, verbose=verbose) # Create MagicFilter and Extractor class instances. These can be used to: # # o Create include/exclude filters # o Specify file extraction rules to be applied during a scan # self.filter = MagicFilter() self.extractor = Extractor(verbose=extractor_verbose, exec_commands=exec_commands, max_size=max_extract_size) if load_extractor: self.extractor.load_defaults() # Create SmartSignature and MagicParser class instances. These are mostly for internal use. self.smart = SmartSignature(self.filter, ignore_smart_signatures=ignore_smart_keywords) self.parser = MagicParser(self.filter, self.smart) def __del__(self): self.cleanup() def __enter__(self): return self def __exit__(self, t, v, traceback): self.cleanup() def cleanup(self): ''' Close magic and cleanup any temporary files generated by the internal instance of MagicParser. Returns None. ''' try: self.magic.close() except: pass try: self.parser.cleanup() except: pass def load_signatures(self, magic_files=[]): ''' Load signatures from magic file(s). Called automatically by Binwalk.scan() with all defaults, if not already called manually. @magic_files - A list of magic files to use (default: self.magic_files). Returns None. ''' # The magic files specified here override any already set if magic_files and magic_files is not None: self.magic_files = magic_files # Parse the magic file(s) and initialize libmagic self.mfile = self.parser.parse(self.magic_files) self.magic = magic.open(self.flags) self.magic.load(str2bytes(self.mfile)) # Once the temporary magic file is loaded into libmagic, we don't need it anymore; delete the temp file self.parser.rm_magic_file() def hexdiff(self, file_names, length=0x100, offset=0, block=16, first=False): if not length and len(file_names) > 0: length = file_size(file_names[0]) if not block: block = 16 HexDiff(self).display(file_names, offset=offset, size=length, block=block, show_first_only=first) def analyze_strings(self, file_names, length=0, offset=0, n=0, block=0, load_plugins=True, whitelist=[], blacklist=[]): ''' Performs a strings analysis on the specified file(s). @file_names - A list of files to analyze. @length - The number of bytes in the file to analyze. @offset - The starting offset into the file to begin analysis. @n - The minimum valid string length. @block - The block size to use when performing entropy analysis. @load_plugins - Set to False to disable plugin callbacks. @whitelist - A list of whitelisted plugins. @blacklist - A list of blacklisted plugins. Returns a dictionary compatible with other classes and methods (Entropy, Binwalk, analyze_entropy, etc): { 'file_name' : (offset, [{ 'description' : 'Strings', 'string' : 'found_string' }] ) } ''' data = {} self.strings = Strings(file_names, self, length=length, offset=offset, n=n, block=block, algorithm='gzip', # Use gzip here as it is faster and we don't need the detail provided by shannon load_plugins=load_plugins, whitelist=whitelist, blacklist=blacklist) data = self.strings.strings() del self.strings self.strings = None return data def analyze_entropy(self, files, offset=0, length=0, block=0, plot=True, legend=True, save=False, algorithm=None, load_plugins=True, whitelist=[], blacklist=[], compcheck=False): ''' Performs an entropy analysis on the specified file(s). @files - A dictionary containing file names and results data, as returned by Binwalk.scan. @offset - The offset into the data to begin analysis. @length - The number of bytes to analyze. @block - The size of the data blocks to analyze. @plot - Set to False to disable plotting. @legend - Set to False to exclude the legend and custom offset markers from the plot. @save - Set to True to save plots to disk instead of displaying them. @algorithm - Set to 'gzip' to use the gzip entropy "algorithm". @load_plugins - Set to False to disable plugin callbacks. @whitelist - A list of whitelisted plugins. @blacklist - A list of blacklisted plugins. @compcheck - Set to True to perform heuristic compression detection. Returns a dictionary of: { 'file_name' : ([list, of, offsets], [list, of, entropy], average_entropy) } ''' data = {} self.entropy = Entropy(files, self, offset, length, block, plot, legend, save, algorithm=algorithm, load_plugins=plugins, whitelist=whitelist, blacklist=blacklist, compcheck=compcheck) data = self.entropy.analyze() del self.entropy self.entropy = None return data def plot3d(self, target_files, offset=0, length=0, max_points=None, show_grids=False, verbose=False): ''' Generates a 3D data plot of the specified target files. @target_files - File or list of files to scan. @offset - Starting offset at which to start the scan. @length - Number of bytes to scan. Specify 0 to scan the entire file(s). @max_points - Set the maximum number of data points to plot. @show_grids - Set to True to show axis grids in the 3D plot. @verbose - Set to True to enable verbose output. Returns None. ''' if not isinstance(target_files, type([])): target_files = [target_files] Plotter3D(target_files, offset=offset, length=length, max_points=max_points, show_grids=show_grids, verbose=verbose).plot() def plot2d(self, target_files, offset=0, length=0, max_points=None, show_grids=False, verbose=False): ''' Generates a 2D data plot of the specified target files. @target_files - File or list of files to scan. @offset - Starting offset at which to start the scan. @length - Number of bytes to scan. Specify 0 to scan the entire file(s). @max_points - Set the maximum number of data points to plot. @show_grids - Set to True to show axis grids in the 3D plot. @verbose - Set to True to enable verbose output. Returns None. ''' if not isinstance(target_files, type([])): target_files = [target_files] Plotter2D(target_files, offset=offset, length=length, max_points=max_points, show_grids=show_grids, verbose=verbose).plot() def scan(self, target_files, offset=0, length=0, show_invalid_results=False, callback=None, start_callback=None, end_callback=None, base_dir=None, matryoshka=1, plugins_whitelist=[], plugins_blacklist=[]): ''' Performs a binwalk scan on a file or list of files. @target_files - File or list of files to scan. @offset - Starting offset at which to start the scan. @length - Number of bytes to scan. Specify -1 for streams. @show_invalid_results - Set to True to display invalid results. @callback - Callback function to be invoked when matches are found. @start_callback - Callback function to be invoked prior to scanning each file. @end_callback - Callback function to be invoked after scanning each file. @base_dir - Base directory for output files. @matryoshka - Number of levels to traverse into the rabbit hole. @plugins_whitelist - A list of plugin names to load. If not empty, only these plugins will be loaded. @plugins_blacklist - A list of plugin names to not load. Returns a dictionary of : { 'target file name' : [ (0, [{description : "LZMA compressed data..."}]), (112, [{description : "gzip compressed data..."}]) ] } ''' # Prefix all directory names with an underscore. This prevents accidental deletion of the original file(s) # when the user is typing too fast and is trying to deleted the extraction directory. prefix = '_' dir_extension = 'extracted' i = 0 total_results = {} self.matryoshka = matryoshka # For backwards compatibility if not isinstance(target_files, type([])): target_files = [target_files] if base_dir is None: base_dir = '' # Instantiate the Plugins class and load all plugins, if not disabled self.plugins = Plugins(self, whitelist=plugins_whitelist, blacklist=plugins_blacklist) if self.load_plugins: self.plugins._load_plugins() # Load the magic signatures. This must be done for every scan, as some signature scans # may use a different list of magic signatures. self.load_signatures() while i < self.matryoshka: new_target_files = [] # Scan each target file for target_file in target_files: ignore_files = [] # On the first scan, add the base_dir value to dir_prefix. Subsequent target_file values will have this value prepended already. if i == 0: dir_prefix = os.path.join(base_dir, prefix + os.path.basename(target_file)) else: dir_prefix = os.path.join(os.path.dirname(target_file), prefix + os.path.basename(target_file)) output_dir = unique_file_name(dir_prefix, dir_extension) # Set the output directory for extracted files to go to self.extractor.output_directory(output_dir) if start_callback is not None: start_callback(target_file) results = self.single_scan(target_file, offset=offset, length=length, show_invalid_results=show_invalid_results, callback=callback) if end_callback is not None: end_callback(target_file) # Get a list of extracted file names; don't scan them again. for (index, results_list) in results: for result in results_list: if result['extract']: ignore_files.append(result['extract']) # Find all newly created files and add them to new_target_files / new_target_directories for (dir_path, sub_dirs, files) in os.walk(output_dir): for fname in files: fname = os.path.join(dir_path, fname) if fname not in ignore_files: new_target_files.append(fname) # Don't worry about sub-directories break total_results[target_file] = results target_files = new_target_files i += 1 # Be sure to delete the Plugins instance so that there isn't a lingering reference to # this Binwalk class instance (lingering handles to this Binwalk instance cause the # __del__ deconstructor to not be called). if self.plugins is not None: del self.plugins self.plugins = None return total_results def single_scan(self, target_file='', fd=None, offset=0, length=0, show_invalid_results=False, callback=None, plugins_whitelist=[], plugins_blacklist=[]): ''' Performs a binwalk scan on one target file or file descriptor. @target_file - File to scan. @fd - A common.BlockFile object. @offset - Starting offset at which to start the scan. @length - Number of bytes to scan. Specify -1 for streams. @show_invalid_results - Set to True to display invalid results. @callback - Callback function to be invoked when matches are found. @plugins_whitelist - A list of plugin names to load. If not empty, only these plugins will be loaded. @plugins_blacklist - A list of plugin names to not load. The callback function is passed two arguments: a list of result dictionaries containing the scan results (one result per dict), and the offset at which those results were identified. Example callback function: def my_callback(offset, results): print "Found %d results at offset %d:" % (len(results), offset) for result in results: print "\t%s" % result['description'] binwalk.Binwalk(callback=my_callback).scan("firmware.bin") Upon completion, the scan method returns a sorted list of tuples containing a list of results dictionaries and the offsets at which those results were identified: scan_results = [ (0, [{description : "LZMA compressed data..."}]), (112, [{description : "gzip compressed data..."}]) ] See SmartSignature.parse for a more detailed description of the results dictionary structure. ''' scan_results = {} fsize = 0 jump_offset = 0 i_opened_fd = False i_loaded_plugins = False plugret = PLUGIN_CONTINUE plugret_start = PLUGIN_CONTINUE self.total_read = 0 self.total_scanned = 0 self.scan_length = length self.filter.show_invalid_results = show_invalid_results self.start_offset = offset # Check to make sure either a target file or a file descriptor was supplied if not target_file and fd is None: raise Exception("Must supply Binwalk.single_scan with a valid file path or BlockFile object") # Need the total size of the target file, even if we aren't scanning the whole thing if target_file: fsize = file_size(target_file) # If no length was specified, make the length the size of the target file minus the starting offset if self.scan_length == 0: self.scan_length = fsize - offset # Open the target file and seek to the specified start offset if fd is None: fd = BlockFile(target_file, length=self.scan_length, offset=offset) i_opened_fd = True # If offset is negative (bytes from EOF), BlockFile class will autmoatically calculate the right offset offset = fd.offset # Seek to the starting offset. #fd.seek(offset) # If the Plugins class has not already been instantitated, do that now. if self.plugins is None: self.plugins = Plugins(self, blacklist=plugins_blacklist, whitelist=plugins_whitelist) i_loaded_plugins = True if self.load_plugins: self.plugins._load_plugins() # Invoke any pre-scan plugins plugret_start = self.plugins._pre_scan_callbacks(fd) # Load the magic signatures if they weren't already loaded. if not self.magic: self.load_signatures() # Main loop, scan through all the data while not ((plugret | plugret_start) & PLUGIN_TERMINATE): i = 0 # Read in the next block of data from the target file and make sure it's valid (data, dlen) = fd.read_block() if not data or dlen == 0: break # The total number of bytes scanned could be bigger than the total number # of bytes read from the file if the previous signature result specified a # jump offset that was beyond the end of the then current data block. # # If this is the case, we need to index into this data block appropriately in order to # resume the scan from the appropriate offset. # # Don't update dlen though, as it is the literal offset into the data block that we # are to scan up to in this loop iteration. It is also appended to self.total_scanned, # which is what we want (even if we have been told to skip part of the block, the skipped # part is still considered part of the total bytes scanned). if jump_offset > 0: total_check = self.total_scanned + dlen # Is the jump offset beyond the total amount of data that we've currently read in (i.e., in a future data block)? if jump_offset >= total_check: i = -1 # Try to seek to the jump offset; this won't work if fd == sys.stdin try: fd.seek(jump_offset) self.total_read = jump_offset self.total_scanned = jump_offset - dlen except: pass # Is the jump offset inside this block of data? elif jump_offset > self.total_scanned and jump_offset < total_check: # Index into this block appropriately; jump_offset is the file offset that # we need to jump to, and self.total_scanned is the file offset that starts # the beginning of the current block i = jump_offset - self.total_scanned # We're done with jump_offset, zero it out for the next round jump_offset = 0 # Scan through each block of data looking for signatures if i >= 0 and i < dlen: # Scan this data block for a list of offsets which are candidates for possible valid signatures. # Signatures could be split across the block boundary; since data conatins 1KB more than dlen, # pass up to dlen+MAX_SIGNATURE_SIZE to find_signature_candidates, but don't accept signatures that # start after the end of dlen. for candidate in self.parser.find_signature_candidates(data[i:dlen+self.MAX_SIGNATURE_SIZE], (dlen-i)): # If a previous signature specified a jump offset beyond this candidate signature offset, ignore it if (i + candidate + self.total_scanned) < jump_offset: continue # Reset these values on each loop smart = {} results = [] results_offset = -1 # In python3 we need a bytes object to pass to magic.buffer candidate_data = str2bytes(data[i+candidate:i+candidate+fd.MAX_TRAILING_SIZE]) # Pass the data to libmagic, and split out multiple results into a list for magic_result in self.parser.split(self.magic.buffer(candidate_data)): i_set_results_offset = False # Some signatures need to take into account the length of a given string # when specifying additional offsets. Parse the string-len keyword to adjust # for this prior to calling self.smart.parse. magic_result = self.smart._parse_string_len(magic_result) # Some file names are not NULL byte terminated, but rather their length is # specified in a size field. To ensure these are not marked as invalid due to # non-printable characters existing in the file name, parse the filename(s) and # trim them to the specified filename length, if one was specified. magic_result = self.smart._parse_raw_strings(magic_result) # Invoke any pre-parser callback plugin functions if not (plugret_start & PLUGIN_STOP_PLUGINS): raw_result = {'description' : magic_result} plugret = self.plugins._scan_pre_parser_callbacks(raw_result) magic_result = raw_result['description'] if (plugret & PLUGIN_TERMINATE): break # Make sure this is a valid result before further processing if not self.filter.invalid(magic_result): # The smart filter parser returns a dictionary of keyword values and the signature description. smart = self.smart.parse(magic_result) # Validate the jump value and check if the response description should be displayed if self._is_valid(smart, candidate+i, fsize): # If multiple results are returned and one of them has smart['jump'] set to a non-zero value, # the calculated results offset will be wrong since i will have been incremented. Only set the # results_offset value when the first match is encountered. if results_offset < 0: results_offset = offset + i + candidate + smart['adjust'] + self.total_scanned i_set_results_offset = True # Double check to make sure the smart['adjust'] value is sane. # If it makes results_offset negative, then it is not sane. if results_offset >= 0: smart['offset'] = results_offset # Invoke any scan plugins if not (plugret_start & PLUGIN_STOP_PLUGINS): plugret = self.plugins._scan_callbacks(smart) results_offset = smart['offset'] if (plugret & PLUGIN_TERMINATE): break # Extract the result, if it matches one of the extract rules and is not a delayed extract. if self.extractor.enabled and not (self.extractor.delayed and smart['delay']) and not ((plugret | plugret_start) & PLUGIN_NO_EXTRACT): # If the signature did not specify a size, extract to the end of the file. if not smart['size']: smart['size'] = fsize-results_offset smart['extract'] = self.extractor.extract( results_offset, smart['description'], target_file, smart['size'], name=smart['name']) if not ((plugret | plugret_start) & PLUGIN_NO_DISPLAY): # This appears to be a valid result, so append it to the results list. results.append(smart) elif i_set_results_offset: results_offset = -1 # Did we find any valid results? if results_offset >= 0: scan_results[results_offset] = results if callback is not None: callback(results_offset, results) # If a relative jump offset was specified, update the absolute jump_offset variable if has_key(smart, 'jump') and smart['jump'] > 0: jump_offset = results_offset + smart['jump'] # Track the total number of bytes scanned self.total_scanned += dlen # The starting offset only affects the reported offset for results # in the first block of data. Zero it out after the first block has # been processed. offset = 0 # Sort the results before returning them scan_items = list(scan_results.items()) scan_items.sort() # Do delayed extraction, if specified. if self.extractor.enabled and self.extractor.delayed: scan_items = self.extractor.delayed_extract(scan_items, target_file, fsize) # Invoke any post-scan plugins #if not (plugret_start & PLUGIN_STOP_PLUGINS): self.plugins._post_scan_callbacks(fd) # Be sure to delete the Plugins instance so that there isn't a lingering reference to # this Binwalk class instance (lingering handles to this Binwalk instance cause the # __del__ deconstructor to not be called). if i_loaded_plugins: del self.plugins self.plugins = None if i_opened_fd: fd.close() return scan_items def concatenate_results(self, results, new): ''' Concatenate multiple Binwalk.scan results into one dictionary. @results - Binwalk results to append new results to. @new - New data to append to results. Returns None. ''' for (new_file_name, new_data) in iterator(new): if not has_key(results, new_file_name): results[new_file_name] = new_data else: for i in range(0, len(new_data)): found_offset = False (new_offset, new_results_list) = new_data[i] for j in range(0, len(results[new_file_name])): (offset, results_list) = results[new_file_name][j] if offset == new_offset: results_list += new_results_list results[new_file_name][j] = (offset, results_list) found_offset = True break if not found_offset: results[new_file_name] += new_data def _is_valid(self, result, location, file_size): ''' Determines if a result string is valid and should be displayed to the user or not. @result - Result dictionary, as returned by self.smart.parse. @location - The file offset of the result. @file_size - The total size of the file. Returns True if the string should be displayed. Returns False if the string should not be displayed. ''' if self.filter.show_invalid_results: return True if result['invalid'] or result['jump'] < 0 or result['size'] < 0: return False if ((location + result['size']) > file_size) or (self.year and result['year'] > self.year) or (self.epoch and result['epoch'] > self.epoch): return False desc = result['description'] return (desc and desc is not None and not self.filter.invalid(desc) and self.filter.filter(desc) != self.filter.FILTER_EXCLUDE)
2.453125
2
dodo.py
Ublimjo/nwt
1
12393
<reponame>Ublimjo/nwt def task_clean_junk(): """Remove junk file""" return { 'actions': ['rm -rdf $(find . | grep pycache)'], 'clean': True, }
1.867188
2
greedy_algorithms/6_maximum_salary/largest_number.py
Desaiakshata/Algorithms-problems
0
12394
<reponame>Desaiakshata/Algorithms-problems #Uses python3 import sys def largest_number(a): #write your code here res = "" while len(a)!=0: maxa = a[0] for x in a: if int(str(x)+str(maxa))>int(str(maxa)+str(x)): maxa = x res += str(maxa) a.remove(str(maxa)) return res if __name__ == '__main__': #input = sys.stdin.read() data = input().split(' ') a = data[1:] print(largest_number(a))
3.8125
4
screenblankmgr.py
nsw42/pijuui
1
12395
<filename>screenblankmgr.py import logging import subprocess class PlayingState: Inactive = 0 Active = 1 class ProfileBase: def __init__(self): raise NotImplementedError() def on_start_playing(self): raise NotImplementedError() def on_stop_playing(self): raise NotImplementedError() def on_playing_tick(self): raise NotImplementedError() def _set_timeout(self, timeout): self._run_xset(str(timeout)) def _run_xset(self, s_arg): cmd = ['xset', 's', s_arg] logging.debug(cmd) subprocess.run(cmd) class ScreenBlankProfileNone(ProfileBase): def __init__(self): pass def on_start_playing(self): pass def on_stop_playing(self): pass def on_playing_tick(self): pass class ScreenBlankProfileBalanced(ProfileBase): def __init__(self): pass def on_start_playing(self): self._set_timeout(self, 300) def on_stop_playing(self): self._set_timeout(self, 30) def on_playing_tick(self): pass class ScreenBlankProfileOnWhenPlaying(ProfileBase): def __init__(self): pass def on_start_playing(self): self._set_timeout(60 * 60) def on_stop_playing(self): self._run_xset('on') self._set_timeout(10) def on_playing_tick(self): self._run_xset('off') self._run_xset('reset') class ScreenBlankMgr: def __init__(self, profile: ProfileBase): self.state = None self.profile = profile self.tick_countdown = 5 def set_state(self, new_state: str): """ new_state in ('playing', 'paused', 'stopped') """ new_state = PlayingState.Active if (new_state == 'playing') else PlayingState.Inactive if self.state == new_state: if self.state == PlayingState.Active: self.tick_countdown -= 1 if self.tick_countdown <= 0: self.profile.on_playing_tick() self.tick_countdown = 5 else: self.state = new_state if self.state == PlayingState.Active: self.profile.on_start_playing() else: self.profile.on_stop_playing() profiles = { 'none': ScreenBlankProfileNone(), 'balanced': ScreenBlankProfileBalanced(), 'onoff': ScreenBlankProfileOnWhenPlaying() }
2.3125
2
warmmail/subscribe/tasks_send.py
sahilsakhuja/warmmail
0
12396
# -*- coding: utf-8 -*- import os import urllib.parse from datetime import date, datetime from functools import partial from urllib.parse import quote_plus import pandas as pd import plotly.express as px import pytz from csci_utils.luigi.requires import Requirement, Requires from csci_utils.luigi.target import TargetOutput from django.template.loader import render_to_string from luigi import ( DateParameter, ExternalTask, ListParameter, LocalTarget, Parameter, Target, Task, ) from plotly.io import to_image from sendgrid import SendGridAPIClient from sendgrid.helpers.mail import Mail from .models import Subscription from .tasks_fetch import ConvertAQIFileToParquet class UrlParameter(Parameter): """Descriptor to ensure that a file name is url safe i.e. quoted""" def normalize(self, x): return quote_plus(x) class RowFilterTarget(Target): """A target class for filters on rows Checks to see if any rows exist that satisfy the given filter If no results found, return True (i.e. task is complete), else False False - causes Luigi to think that task is pending and runs it + check requirements """ def __init__(self, model, **kwargs): self.model = model self.kwargs = kwargs def exists(self): vals = self.model.objects.filter(**self.kwargs) if not vals: return True return False class RowFilterOutput: """Descriptor for the output method Returns a "RowFilterTarget" for the Luigi task Additional feature: in case there are values returned from the filter, descriptor can accept name of fields and parameters on the parent class and update the parent class parameters - this ensures that downstream tasks do not need to call the database again """ def __init__(self, model, entries_param=None, field=None, **kwargs): self.model = model entries_param = ( entries_param if isinstance(entries_param, list) else [entries_param] ) field = field if isinstance(field, list) else [field] self.parent_updates = dict(zip(entries_param, field)) self.kwargs = kwargs def __get__(self, task, cls): if not task: return self return partial(self.__call__, task) def __call__(self, task): vals = self.model.objects.filter(**self.kwargs) if vals and self.parent_updates: for entry, field in self.parent_updates.items(): setattr(task, entry, tuple(set(getattr(v, field) for v in vals))) return RowFilterTarget(self.model, **self.kwargs) class GenerateEmails(ExternalTask): """ Task to generate the html content to be sent via email. Uses Django's render to string functionality. :param city: name of the city for which report has to be generated :param pol: name of the dominant pollutant for that city :param date: the date for which report has to be generated """ city = UrlParameter(default=None) pol = Parameter(default="pm25") date = DateParameter(default=date.today()) requires = Requires() historical = Requirement(ConvertAQIFileToParquet) output = TargetOutput( factory=LocalTarget, file_pattern="emails/{task.city}-{task.date}", ext=".html", ) def run(self): city = urllib.parse.unquote(self.city) df = pd.read_parquet(self.historical.output().path) df = df[df["City"] == city].sort_index(ascending=False) df = df[df["Specie"].isin(["pm10", "pm25"])] df = df.pivot(index=None, columns="Specie", values="median") df.fillna(0, inplace=True) df.sort_index(inplace=True, ascending=False) last_7_days = df.iloc[:6] data = {"aqi": df.iloc[0][self.pol]} df["month"] = df.index.strftime("%Y-%m") df_month = df.groupby("month").agg("mean") last_7_days_bar = px.bar(last_7_days, title="Last 7 Days", barmode="group") month_bar = px.bar(df_month, title="Monthly", barmode="group") from base64 import b64encode data["image_last_7_days"] = b64encode( to_image(last_7_days_bar, format="png", engine="kaleido") ).decode() data["image_months"] = b64encode( to_image(month_bar, format="png", engine="kaleido") ).decode() html = render_to_string( "subscribe/newsletter_email_template.html", {"data": data} ) with open(self.output().path, "w") as f: f.write(html) class CheckForPendingEmails(Task): """ Task to check for pending emails. This uses a "RowFilterOutput" which checks for rows in the database which have the "next_email_date" in the past. For each such row found (city + dominent pollutant fetched frm the DB), the task requires a GenerateEmails task. """ cities = ListParameter(default=None) pols = ListParameter(default=None) date = DateParameter(default=date.today()) def requires(self): return { k: self.clone(GenerateEmails, city=k, pol=self.pols[i]) for i, k in enumerate(self.cities) } output = RowFilterOutput( model=Subscription, entries_param=["cities", "pols"], field=["city", "dominentpol"], next_email_date__lte=datetime.now(tz=pytz.utc), ) def run(self): for city in self.cities: vals = Subscription.objects.filter( next_email_date__lte=datetime.now(tz=pytz.utc), city__exact=city ) emails = list(map(lambda x: x.email, vals)) html = open(self.input()[city].path).read() message = Mail( from_email="<EMAIL>", to_emails=emails[0], subject=f"Daily AQI Update for {city} from WarmMail", html_content=html, ) try: sg = SendGridAPIClient(os.environ.get("SENDGRID_API_KEY")) sg.send(message) except Exception as e: print(e.message)
2.390625
2
cno/chrutils.py
CherokeeLanguage/cherokee-audio-data
2
12397
#!/usr/bin/env python3 def test(): cedTest = ["U²sgal²sdi ạ²dv¹ne²³li⁴sgi.", "Ụ²wo²³dị³ge⁴ɂi gi²hli a¹ke²³he³²ga na ạ²chu⁴ja.", "Ạ²ni²³tạɂ³li ạ²ni²sgạ²ya a¹ni²no²hạ²li²³do³²he, ạ²hwi du¹ni²hyọ²he.", "Sa¹gwu⁴hno ạ²sgạ²ya gạ²lo¹gwe³ ga²ne²he sọ³ɂị³hnv³ hla².", "Na³hnv³ gạ²lo¹gwe³ ga²ne⁴hi u²dlv²³kwsạ²ti ge¹se³, ạ²le go²hu⁴sdi yu²³dv³²ne⁴la a¹dlv²³kwsge³.", "A¹na³ɂi²sv⁴hnv go²hu⁴sdi wu²³ni³go²he do²jụ²wạ³ɂị²hlv,", "na³hnv³ gạ²lo¹gwe³ ga²ne⁴hi kị²lạ²gwu ị²yv⁴da wị²du²³sdạ³yo²hle³ o²³sdạ²gwu nu²³ksẹ²stạ²nv⁴na ị²yu³sdi da¹sdạ²yo²hị²hv⁴.", "U²do²hị²yu⁴hnv³ wu²³yo³hle³ ạ²le u¹ni²go²he³ gạ²nv³gv⁴.", "Na³hnv³ gạ²lo¹gwe³ nị²ga²³ne³hv⁴na \"ạ²hwi e¹ni²yo³ɂa!\" u¹dv²hne.", "\"Ji²yo³ɂe³²ga\" u¹dv²hne na³ gạ²lo¹gwe³ ga²ne⁴hi, a¹dlv²³kwsgv³.", "U¹na³ne²lu²³gi³²se do²jụ²wạ³ɂị²hlv³ di³dla, nạ²ɂv²³hnị³ge⁴hnv wu²³ni³luh²ja u¹ni²go²he³ so²³gwị³li gạɂ³nv⁴.", "\"So²³gwị³lị³le³² i¹nạ²da²hị³si\" u¹dv²hne³ na³ u²yo²hlv⁴.", "\"Hạ²da²hị³se³²ga³\" a¹go¹se²³le³."] for a in cedTest: print("_______________"); print(); print(a); print(ced2mco(a)); asciiCedText = ["ga.2da.2de3ga", "ha.2da.2du1ga", "u2da.2di23nv32di", "u1da.2di23nv32sv23?i", "a1da.2de3go3?i"] for a in asciiCedText: print("_______________"); print(); print(a); print(ascii_ced2mco(a)); return # Converts MCO annotation into pseudo English phonetics for use by the aeneas alignment package # lines prefixed with '#' are returned with the '#' removed, but otherwise unchanged. def mco2espeak(text: str): import unicodedata as ud import re if (len(text.strip()) == 0): return "" # Handle specially flagged text if (text[0].strip() == "#"): if text[1] != "!": return text.strip()[1:] else: text = text[2:] newText = ud.normalize('NFD', text.strip()).lower() if (newText[0] == ""): newText = newText[1:] # remove all tone indicators newText = re.sub("[\u030C\u0302\u0300\u0301\u030b]", "", newText) newText = "[[" + newText.strip() + "]]" newText = newText.replace(" ", "]] [[") newText = newText.replace("'", "]]'[[") newText = newText.replace(".]]", "]].") newText = newText.replace(",]]", "]],") newText = newText.replace("!]]", "]]!") newText = newText.replace("?]]", "]]?") newText = newText.replace(":]]", "]]:") newText = newText.replace(";]]", "]];") newText = newText.replace("\"]]", "]]\"") newText = newText.replace("']]", "]]'") newText = newText.replace(" ]]", "]] ") newText = newText.replace("[[ ", " [[") newText = re.sub("(?i)([aeiouv]):", "\\1", newText) # convert all vowels into approximate espeak x-sampa escaped forms newText = newText.replace("A", "0") newText = newText.replace("a", "0") newText = newText.replace("v", "V") newText = newText.replace("tl", "tl#") newText = newText.replace("hl", "l#") newText = newText.replace("J", "dZ") newText = newText.replace("j", "dZ") newText = newText.replace("Y", "j") newText = newText.replace("y", "j") newText = newText.replace("Ch", "tS") newText = newText.replace("ch", "tS") newText = newText.replace("ɂ", "?") return newText def ced2mco(text: str): import unicodedata as ud import re tones2mco = [("²³", "\u030C"), ("³²", "\u0302"), ("¹", "\u0300"), ("²", ""), ("³", "\u0301"), ("⁴", "\u030b")] text = ud.normalize('NFD', text) text = re.sub("(?i)([aeiouv])([^¹²³⁴\u0323]+)", "\\1\u0323\\2", text) text = re.sub("(?i)([aeiouv])([¹²³⁴]+)$", "\\1\u0323\\2", text) text = re.sub("(?i)([aeiouv])([¹²³⁴]+)([^¹²³⁴a-zɂ])", "\\1\u0323\\2\\3", text) text = re.sub("(?i)([^aeiouv\u0323¹²³⁴]+)([¹²³⁴]+)", "\\2\\1", text) text = re.sub("(?i)([aeiouv])([¹²³⁴]+)", "\\1\\2:", text) text = text.replace("\u0323", "") text = re.sub("(?i)([aeiouv])²$", "\\1\u0304", text) text = re.sub("(?i)([aeiouv])²([^a-zɂ¹²³⁴:])", "\\1\u0304\\2", text) for ced2mcotone in tones2mco: text = text.replace(ced2mcotone[0], ced2mcotone[1]) # return ud.normalize('NFC', text) def ascii_ced2mco(text: str): import unicodedata as ud text = ud.normalize('NFD', text) return ced2mco(ascii_ced2ced(text)) def ascii_ced2ced(text: str): import unicodedata as ud text = ud.normalize('NFD', text) text = text.replace(".", "\u0323") text = text.replace("1", "¹") text = text.replace("2", "²") text = text.replace("3", "³") text = text.replace("4", "⁴") text = text.replace("?", "ɂ") return text if __name__ == "__main__": test()
1.710938
2
molo/usermetadata/tests/test_tags.py
praekelt/molo.usermetadata
0
12398
<reponame>praekelt/molo.usermetadata<filename>molo/usermetadata/tests/test_tags.py import pytest from django.test import TestCase, Client from django.core.urlresolvers import reverse from molo.core.tests.base import MoloTestCaseMixin from molo.core.models import Main, SiteLanguageRelation, Languages from molo.usermetadata.models import PersonaIndexPage, PersonaPage from wagtail.wagtailcore.models import Site from wagtail.contrib.settings.context_processors import SettingsProxy @pytest.mark.django_db class TestPages(TestCase, MoloTestCaseMixin): def setUp(self): self.mk_main() self.main = Main.objects.all().first() self.english = SiteLanguageRelation.objects.create( language_setting=Languages.for_site(self.main.get_site()), locale='en', is_active=True ) self.index = PersonaIndexPage(title='Personae', slug="personae") self.main.add_child(instance=self.index) self.index.save_revision().publish() self.page = PersonaPage(title="child", slug="child") self.index.add_child(instance=self.page) self.page.save_revision().publish() self.client = Client() # Login self.user = self.login() site = Site.objects.get(is_default_site=True) setting = SettingsProxy(site) self.persona_settings = setting['usermetadata']['PersonaeSettings'] self.persona_settings.persona_required = True self.persona_settings.save() self.site_settings = setting['core']['SiteSettings'] self.site_settings.ga_tag_manager = 'GTM-xxxx' self.site_settings.save() def test_persona_selected_tag(self): response = self.client.get('/') self.assertRedirects( response, reverse('molo.usermetadata:persona') + '?next=/') response = self.client.get('%s?next=%s' % (( reverse( 'molo.usermetadata:set_persona', kwargs={'persona_slug': self.page.slug})), '/')) self.assertTrue(self.client.session['MOLO_PERSONA_SELECTED']) response = self.client.get('/') self.assertContains(response, 'persona=child') def test_skip_persona_selected_tag(self): response = self.client.get('/') self.assertRedirects( response, reverse('molo.usermetadata:persona') + '?next=/') response = self.client.get('%s?next=%s' % (( reverse('molo.usermetadata:skip_persona')), '/')) self.assertTrue(self.client.session['MOLO_PERSONA_SELECTED']) response = self.client.get('/') self.assertContains(response, 'persona=skip')
1.796875
2
app/db_con.py
bmugenya/Zup
0
12399
import psycopg2 url = "dbname='da43n1slakcjkc' user='msqgxzgmcskvst' host='ec2-54-80-184-43.compute-1.amazonaws.com' port=5432 password='<PASSWORD>'" class database_setup(object): def __init__(self): self.conn = psycopg2.connect(url) self.cursor = self.conn.cursor() def destroy_tables(self): self.cursor.execute("""DROP TABLE IF EXISTS user CASCADE;""") self.conn.commit() def create_tables(self): self.cursor.execute("""CREATE TABLE IF NOT EXISTS Users ( user_id SERIAL NOT NULL, fname VARCHAR(25) NOT NULL, lname VARCHAR(25) NOT NULL, post_date DATE NOT NULL DEFAULT CURRENT_DATE, email VARCHAR(50) UNIQUE NOT NULL, password VARCHAR(256) NOT NULL, photo VARCHAR(255) NOT NULL, PRIMARY KEY (email) );""") self.cursor.execute("""CREATE TABLE IF NOT EXISTS Report ( report_id SERIAL NOT NULL, num_tweet INT NOT NULL, tweet VARCHAR(255) NOT NULL, plot_bar VARCHAR(255) NOT NULL, plot_pie VARCHAR(255) NOT NULL, post_date DATE NOT NULL DEFAULT CURRENT_DATE, email VARCHAR(50) REFERENCES Users(email) NOT NULL, PRIMARY KEY (report_id) );""") self.cursor.execute("""CREATE TABLE IF NOT EXISTS Config ( config_id SERIAL NOT NULL, consumerKey TEXT NOT NULL, consumerSecret TEXT NOT NULL, accessToken TEXT NOT NULL, accessSecret TEXT NOT NULL, email VARCHAR(50) REFERENCES Users(email) NOT NULL, PRIMARY KEY (config_id) );""") self.conn.commit()
2.78125
3