content
stringlengths
5
1.05M
from math import floor import ansiscape as a from ansiscape.types import SequenceType def make_example() -> SequenceType: foreground_rgb = a.sequence() background_rgb = a.sequence() square = 10 for row in range(0, square): for column in range(0, (square * square)): block = floor(column / square) r = (1 / square) * row g = (1 / square) * block b = (1 / square) * (column - (square * block)) foreground_rgb.extend(a.foreground((r, g, b, 1), "X")) background_rgb.extend(a.background((r, g, b, 1), " ")) foreground_rgb.extend("\n") background_rgb.extend("\n") return a.sequence( a.heavy(a.double_underline("ansiscape")), "\n\n", "Welcome to the ", a.heavy("ansiscape"), " example!\n\n", "These are ", a.heavy("heavy"), " and ", a.light("light"), ".\n\nThese are ", a.italic("italic"), " and ", a.blackletter("blackletter"), ".\n\nThese are ", a.single_underline("single underlined"), ", ", a.double_underline("double underlined"), " and ", a.overline("overlined"), ".\n\nThese are ", a.slow_blink("blinking slowly"), " and ", a.fast_blink("blinking fast"), ".\n\nThese are ", a.invert("inverted"), ", ", a.conceal("concealed"), " (that's ", a.italic("concealed"), ") and ", a.strike("struck"), ".\n\nThese are the ", a.alternative_font_0("first alternative font"), ", the ", a.alternative_font_1("second alternative font"), ", the ", a.alternative_font_2("third alternative font"), ", the ", a.alternative_font_3("fourth alternative font"), ", the ", a.alternative_font_4("fifth alternative font"), ", the ", a.alternative_font_5("sixth alternative font"), ", the ", a.alternative_font_6("seventh alternative font"), ", the ", a.alternative_font_7("eighth alternative font"), " and the ", a.alternative_font_8("ninth alternative font"), ".\n\n", a.proportional_spacing("This entire line uses proportional spacing."), "\n\nThese are ", a.black("black"), ", ", a.red("red"), ", ", a.green("green"), ", ", a.yellow("yellow"), ", ", a.blue("blue"), ", ", a.magenta("magenta"), ", ", a.cyan("cyan"), ", ", a.white("white"), ", ", a.bright_black("bright black"), ", ", a.bright_red("bright red"), ", ", a.bright_green("bright green"), ", ", a.bright_yellow("bright yellow"), ", ", a.bright_blue("bright blue"), ", ", a.bright_magenta("bright magenta"), ", ", a.bright_cyan("bright cyan"), " and ", a.bright_white("bright white"), " foreground.\n\nThese are ", a.black_background("black"), ", ", a.red_background("red"), ", ", a.green_background("green"), ", ", a.yellow_background("yellow"), ", ", a.blue_background("blue"), ", ", a.magenta_background("magenta"), ", ", a.cyan_background("cyan"), ", ", a.white_background("white"), ", ", a.bright_black_background("bright black"), ", ", a.bright_red_background("bright red"), ", ", a.bright_green_background("bright green"), ", ", a.bright_yellow_background("bright yellow"), ", ", a.bright_blue_background("bright blue"), ", ", a.bright_magenta_background("bright magenta"), ", ", a.bright_cyan_background("bright cyan"), " and ", a.bright_white_background("bright white"), " background.\n\nHere's some foreground RGB:\n\n", foreground_rgb, "\nAnd some background RGB:\n\n", background_rgb, "\nThese are ", a.frame("framed"), " and ", a.circle("encircled"), ".\n\nThese are the ", a.single_line_under_or_right("single line under/right"), ", ", a.double_line_under_or_right("double line under/right"), ", ", a.single_line_over_or_left("single line over/left"), ", ", a.double_line_over_or_left("double line over/left"), " and ", a.stress("stress"), " ideograms.\n\nNot all terminals support all codes, so please don't ", "be too sad if some of the examples didn't work for you.\n\n", )
""" pyPESTO ======= Parameter Estimation TOolbox for python. """ from .version import __version__ # noqa: F401 from .objective import (ObjectiveOptions, Objective, AmiciObjective, PetabImporter) from .problem import Problem from .result import (Result, OptimizeResult, ProfileResult, SampleResult) from .optimize import (minimize, OptimizeOptions, OptimizerResult, Optimizer, ScipyOptimizer, DlibOptimizer, GlobalOptimizer) from .profile import (parameter_profile, ProfileOptions, ProfilerResult) from .engine import (SingleCoreEngine, MultiProcessEngine) from . import visualize __all__ = [ # objective "ObjectiveOptions", "Objective", "AmiciObjective", "PetabImporter", # problem "Problem", # result "Result", "OptimizeResult", "ProfileResult", "SampleResult", # optimize "minimize", "OptimizeOptions", "OptimizerResult", "Optimizer", "ScipyOptimizer", "DlibOptimizer", "GlobalOptimizer", # profile "parameter_profile", "ProfileOptions", "ProfilerResult", # engine "SingleCoreEngine", "MultiProcessEngine", "visualize", ]
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Speech processor class for Wav2Vec2 """ import os from contextlib import contextmanager from dataclasses import dataclass from multiprocessing import get_context from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Union import numpy as np from ...file_utils import ModelOutput, requires_backends from ...processing_utils import ProcessorMixin if TYPE_CHECKING: from pyctcdecode import BeamSearchDecoderCTC from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils import PreTrainedTokenizerBase ListOfDict = List[Dict[str, Union[int, str]]] @dataclass class Wav2Vec2DecoderWithLMOutput(ModelOutput): """ Output type of [`Wav2Vec2DecoderWithLM`], with transcription. Args: text (list of `str` or `str`): Decoded logits in text from. Usually the speech transcription. logit_score (list of `float` or `float`): Total logit score of the beam associated with produced text. lm_score (list of `float`): Fused lm_score of the beam associated with produced text. word_offsets (list of `List[Dict[str, Union[int, str]]]` or `List[Dict[str, Union[int, str]]]`): Offsets of the decoded words. In combination with sampling rate and model downsampling rate word offsets can be used to compute time stamps for each word. """ text: Union[List[str], str] logit_score: Union[List[float], float] = None lm_score: Union[List[float], float] = None word_offsets: Union[List[ListOfDict], ListOfDict] = None class Wav2Vec2ProcessorWithLM(ProcessorMixin): r""" Constructs a Wav2Vec2 processor which wraps a Wav2Vec2 feature extractor, a Wav2Vec2 CTC tokenizer and a decoder with language model support into a single processor for language model boosted speech recognition decoding. Args: feature_extractor ([`Wav2Vec2FeatureExtractor`]): An instance of [`Wav2Vec2FeatureExtractor`]. The feature extractor is a required input. tokenizer ([`Wav2Vec2CTCTokenizer`]): An instance of [`Wav2Vec2CTCTokenizer`]. The tokenizer is a required input. decoder (`pyctcdecode.BeamSearchDecoderCTC`): An instance of [`pyctcdecode.BeamSearchDecoderCTC`]. The decoder is a required input. """ feature_extractor_class = "Wav2Vec2FeatureExtractor" tokenizer_class = "Wav2Vec2CTCTokenizer" def __init__( self, feature_extractor: "FeatureExtractionMixin", tokenizer: "PreTrainedTokenizerBase", decoder: "BeamSearchDecoderCTC", ): from pyctcdecode import BeamSearchDecoderCTC super().__init__(feature_extractor, tokenizer) if not isinstance(decoder, BeamSearchDecoderCTC): raise ValueError(f"`decoder` has to be of type {BeamSearchDecoderCTC.__class__}, but is {type(decoder)}") # make sure that decoder's alphabet and tokenizer's vocab match in content missing_decoder_tokens = self.get_missing_alphabet_tokens(decoder, tokenizer) if len(missing_decoder_tokens) > 0: raise ValueError( f"The tokens {missing_decoder_tokens} are defined in the tokenizer's " "vocabulary, but not in the decoder's alphabet. " f"Make sure to include {missing_decoder_tokens} in the decoder's alphabet." ) self.decoder = decoder self.current_processor = self.feature_extractor def save_pretrained(self, save_directory): super().save_pretrained(save_directory) self.decoder.save_to_dir(save_directory) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): r""" Instantiate a [`Wav2Vec2ProcessorWithLM`] from a pretrained Wav2Vec2 processor. <Tip> This class method is simply calling Wav2Vec2FeatureExtractor's [`~feature_extraction_utils.FeatureExtractionMixin.from_pretrained`], Wav2Vec2CTCTokenizer's [`~tokenization_utils_base.PreTrainedTokenizer.from_pretrained`], and [`pyctcdecode.BeamSearchDecoderCTC.load_from_hf_hub`]. Please refer to the docstrings of the methods above for more information. </Tip> Args: pretrained_model_name_or_path (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`. - a path to a *directory* containing a feature extractor file saved using the [`~SequenceFeatureExtractor.save_pretrained`] method, e.g., `./my_model_directory/`. - a path or url to a saved feature extractor JSON *file*, e.g., `./my_model_directory/preprocessor_config.json`. **kwargs Additional keyword arguments passed along to both [`SequenceFeatureExtractor`] and [`PreTrainedTokenizer`] """ requires_backends(cls, "pyctcdecode") from pyctcdecode import BeamSearchDecoderCTC feature_extractor, tokenizer = super()._get_arguments_from_pretrained(pretrained_model_name_or_path, **kwargs) if os.path.isdir(pretrained_model_name_or_path) or os.path.isfile(pretrained_model_name_or_path): decoder = BeamSearchDecoderCTC.load_from_dir(pretrained_model_name_or_path) else: # BeamSearchDecoderCTC has no auto class kwargs.pop("_from_auto", None) # snapshot_download has no `trust_remote_code` flag kwargs.pop("trust_remote_code", None) # make sure that only relevant filenames are downloaded language_model_filenames = os.path.join(BeamSearchDecoderCTC._LANGUAGE_MODEL_SERIALIZED_DIRECTORY, "*") alphabet_filename = BeamSearchDecoderCTC._ALPHABET_SERIALIZED_FILENAME allow_regex = [language_model_filenames, alphabet_filename] decoder = BeamSearchDecoderCTC.load_from_hf_hub( pretrained_model_name_or_path, allow_regex=allow_regex, **kwargs ) # set language model attributes for attribute in ["alpha", "beta", "unk_score_offset", "score_boundary"]: value = kwargs.pop(attribute, None) if value is not None: cls._set_language_model_attribute(decoder, attribute, value) # make sure that decoder's alphabet and tokenizer's vocab match in content missing_decoder_tokens = cls.get_missing_alphabet_tokens(decoder, tokenizer) if len(missing_decoder_tokens) > 0: raise ValueError( f"The tokens {missing_decoder_tokens} are defined in the tokenizer's " "vocabulary, but not in the decoder's alphabet. " f"Make sure to include {missing_decoder_tokens} in the decoder's alphabet." ) return cls(feature_extractor=feature_extractor, tokenizer=tokenizer, decoder=decoder) @staticmethod def _set_language_model_attribute(decoder: "BeamSearchDecoderCTC", attribute: str, value: float): setattr(decoder.model_container[decoder._model_key], attribute, value) @property def language_model(self): return self.decoder.model_container[self.decoder._model_key] @staticmethod def get_missing_alphabet_tokens(decoder, tokenizer): from pyctcdecode.alphabet import BLANK_TOKEN_PTN, UNK_TOKEN, UNK_TOKEN_PTN # we need to make sure that all of the tokenizer's except the special tokens # are present in the decoder's alphabet. Retrieve missing alphabet token # from decoder tokenizer_vocab_list = list(tokenizer.get_vocab().keys()) # replace special tokens for i, token in enumerate(tokenizer_vocab_list): if BLANK_TOKEN_PTN.match(token): tokenizer_vocab_list[i] = "" if token == tokenizer.word_delimiter_token: tokenizer_vocab_list[i] = " " if UNK_TOKEN_PTN.match(token): tokenizer_vocab_list[i] = UNK_TOKEN # are any of the extra tokens no special tokenizer tokens? missing_tokens = set(tokenizer_vocab_list) - set(decoder._alphabet.labels) return missing_tokens def __call__(self, *args, **kwargs): """ When used in normal mode, this method forwards all its arguments to Wav2Vec2FeatureExtractor's [`~Wav2Vec2FeatureExtractor.__call__`] and returns its output. If used in the context [`~Wav2Vec2ProcessorWithLM.as_target_processor`] this method forwards all its arguments to Wav2Vec2CTCTokenizer's [`~Wav2Vec2CTCTokenizer.__call__`]. Please refer to the docstring of the above two methods for more information. """ return self.current_processor(*args, **kwargs) def pad(self, *args, **kwargs): """ When used in normal mode, this method forwards all its arguments to Wav2Vec2FeatureExtractor's [`~Wav2Vec2FeatureExtractor.pad`] and returns its output. If used in the context [`~Wav2Vec2ProcessorWithLM.as_target_processor`] this method forwards all its arguments to Wav2Vec2CTCTokenizer's [`~Wav2Vec2CTCTokenizer.pad`]. Please refer to the docstring of the above two methods for more information. """ return self.current_processor.pad(*args, **kwargs) def batch_decode( self, logits: np.ndarray, num_processes: Optional[int] = None, beam_width: Optional[int] = None, beam_prune_logp: Optional[float] = None, token_min_logp: Optional[float] = None, hotwords: Optional[Iterable[str]] = None, hotword_weight: Optional[float] = None, alpha: Optional[float] = None, beta: Optional[float] = None, unk_score_offset: Optional[float] = None, lm_score_boundary: Optional[bool] = None, output_word_offsets: bool = False, ): """ Batch decode output logits to audio transcription with language model support. <Tip> This function makes use of Python's multiprocessing. </Tip> Args: logits (`np.ndarray`): The logits output vector of the model representing the log probabilities for each token. num_processes (`int`, *optional*): Number of processes on which the function should be parallelized over. Defaults to the number of available CPUs. beam_width (`int`, *optional*): Maximum number of beams at each step in decoding. Defaults to pyctcdecode's DEFAULT_BEAM_WIDTH. beam_prune_logp (`int`, *optional*): Beams that are much worse than best beam will be pruned Defaults to pyctcdecode's DEFAULT_PRUNE_LOGP. token_min_logp (`int`, *optional*): Tokens below this logp are skipped unless they are argmax of frame Defaults to pyctcdecode's DEFAULT_MIN_TOKEN_LOGP. hotwords (`List[str]`, *optional*): List of words with extra importance, can be OOV for LM hotword_weight (`int`, *optional*): Weight factor for hotword importance Defaults to pyctcdecode's DEFAULT_HOTWORD_WEIGHT. alpha (`float`, *optional*): Weight for language model during shallow fusion beta (`float`, *optional*): Weight for length score adjustment of during scoring unk_score_offset (`float`, *optional*): Amount of log score offset for unknown tokens lm_score_boundary (`bool`, *optional*): Whether to have kenlm respect boundaries when scoring output_word_offsets (`bool`, *optional*, defaults to `False`): Whether or not to output word offsets. Word offsets can be used in combination with the sampling rate and model downsampling rate to compute the time-stamps of transcribed words. <Tip> Please take a look at the Example of [`~model.wav2vec2_with_lm.processing_wav2vec2_with_lm.decode`] to better understand how to make use of `output_word_offsets`. [`~model.wav2vec2_with_lm.processing_wav2vec2_with_lm.batch_decode`] works the same way with batched output. </Tip> Returns: [`~models.wav2vec2.Wav2Vec2DecoderWithLMOutput`] or `tuple`. """ from pyctcdecode.constants import ( DEFAULT_BEAM_WIDTH, DEFAULT_HOTWORD_WEIGHT, DEFAULT_MIN_TOKEN_LOGP, DEFAULT_PRUNE_LOGP, ) # set defaults beam_width = beam_width if beam_width is not None else DEFAULT_BEAM_WIDTH beam_prune_logp = beam_prune_logp if beam_prune_logp is not None else DEFAULT_PRUNE_LOGP token_min_logp = token_min_logp if token_min_logp is not None else DEFAULT_MIN_TOKEN_LOGP hotword_weight = hotword_weight if hotword_weight is not None else DEFAULT_HOTWORD_WEIGHT # reset params at every forward call. It's just a `set` method in pyctcdecode self.decoder.reset_params( alpha=alpha, beta=beta, unk_score_offset=unk_score_offset, lm_score_boundary=lm_score_boundary ) # create multiprocessing pool and list numpy arrays # filter out logits padding logits_list = [array[(array != -100.0).all(axis=-1)] for array in logits] pool = get_context("fork").Pool(num_processes) # pyctcdecode decoded_beams = self.decoder.decode_beams_batch( pool, logits_list=logits_list, beam_width=beam_width, beam_prune_logp=beam_prune_logp, token_min_logp=token_min_logp, hotwords=hotwords, hotword_weight=hotword_weight, ) # clone multi-processing pool pool.close() # extract text and scores batch_texts, logit_scores, lm_scores, word_offsets = [], [], [], [] for d in decoded_beams: batch_texts.append(d[0][0]) logit_scores.append(d[0][-2]) lm_scores.append(d[0][-1]) word_offsets.append([{"word": t[0], "start_offset": t[1][0], "end_offset": t[1][1]} for t in d[0][1]]) word_offsets = word_offsets if output_word_offsets else None return Wav2Vec2DecoderWithLMOutput( text=batch_texts, logit_score=logit_scores, lm_score=lm_scores, word_offsets=word_offsets ) def decode( self, logits: np.ndarray, beam_width: Optional[int] = None, beam_prune_logp: Optional[float] = None, token_min_logp: Optional[float] = None, hotwords: Optional[Iterable[str]] = None, hotword_weight: Optional[float] = None, alpha: Optional[float] = None, beta: Optional[float] = None, unk_score_offset: Optional[float] = None, lm_score_boundary: Optional[bool] = None, output_word_offsets: bool = False, ): """ Decode output logits to audio transcription with language model support. Args: logits (`np.ndarray`): The logits output vector of the model representing the log probabilities for each token. beam_width (`int`, *optional*): Maximum number of beams at each step in decoding. Defaults to pyctcdecode's DEFAULT_BEAM_WIDTH. beam_prune_logp (`int`, *optional*): A threshold to prune beams with log-probs less than best_beam_logp + beam_prune_logp. The value should be <= 0. Defaults to pyctcdecode's DEFAULT_PRUNE_LOGP. token_min_logp (`int`, *optional*): Tokens with log-probs below token_min_logp are skipped unless they are have the maximum log-prob for an utterance. Defaults to pyctcdecode's DEFAULT_MIN_TOKEN_LOGP. hotwords (`List[str]`, *optional*): List of words with extra importance which can be missing from the LM's vocabulary, e.g. ["huggingface"] hotword_weight (`int`, *optional*): Weight multiplier that boosts hotword scores. Defaults to pyctcdecode's DEFAULT_HOTWORD_WEIGHT. alpha (`float`, *optional*): Weight for language model during shallow fusion beta (`float`, *optional*): Weight for length score adjustment of during scoring unk_score_offset (`float`, *optional*): Amount of log score offset for unknown tokens lm_score_boundary (`bool`, *optional*): Whether to have kenlm respect boundaries when scoring output_word_offsets (`bool`, *optional*, defaults to `False`): Whether or not to output word offsets. Word offsets can be used in combination with the sampling rate and model downsampling rate to compute the time-stamps of transcribed words. <Tip> Please take a look at the example of [`~models.wav2vec2_with_lm.processing_wav2vec2_with_lm.decode`] to better understand how to make use of `output_word_offsets`. </Tip> Returns: [`~models.wav2vec2.Wav2Vec2DecoderWithLMOutput`] or `tuple`. Example: ```python >>> # Let's see how to retrieve time steps for a model >>> from transformers import AutoTokenizer, AutoProcessor, AutoModelForCTC >>> from datasets import load_dataset >>> import datasets >>> import torch >>> # import model, feature extractor, tokenizer >>> model = AutoModelForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm") >>> processor = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm") >>> # load first sample of English common_voice >>> dataset = load_dataset("common_voice", "en", split="train", streaming=True) >>> dataset = dataset.cast_column("audio", datasets.Audio(sampling_rate=16_000)) >>> dataset_iter = iter(dataset) >>> sample = next(dataset_iter) >>> # forward sample through model to get greedily predicted transcription ids >>> input_values = processor(sample["audio"]["array"], return_tensors="pt").input_values >>> with torch.no_grad(): ... logits = model(input_values).logits[0].cpu().numpy() >>> # retrieve word stamps (analogous commands for `output_char_offsets`) >>> outputs = processor.decode(logits, output_word_offsets=True) >>> # compute `time_offset` in seconds as product of downsampling ratio and sampling_rate >>> time_offset = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate >>> word_offsets = [ ... { ... "word": d["word"], ... "start_time": round(d["start_offset"] * time_offset, 2), ... "end_time": round(d["end_offset"] * time_offset, 2), ... } ... for d in outputs.word_offsets ... ] >>> # compare word offsets with audio `common_voice_en_100038.mp3` online on the dataset viewer: >>> # https://huggingface.co/datasets/common_voice/viewer/en/train >>> word_offsets[:4] [{'word': 'WHY', 'start_time': 1.42, 'end_time': 1.54}, {'word': 'DOES', 'start_time': 1.64, 'end_time': 1.88}, {'word': 'A', 'start_time': 2.12, 'end_time': 2.14}, {'word': 'MILE', 'start_time': 2.26, 'end_time': 2.46}] ```""" from pyctcdecode.constants import ( DEFAULT_BEAM_WIDTH, DEFAULT_HOTWORD_WEIGHT, DEFAULT_MIN_TOKEN_LOGP, DEFAULT_PRUNE_LOGP, ) # set defaults beam_width = beam_width if beam_width is not None else DEFAULT_BEAM_WIDTH beam_prune_logp = beam_prune_logp if beam_prune_logp is not None else DEFAULT_PRUNE_LOGP token_min_logp = token_min_logp if token_min_logp is not None else DEFAULT_MIN_TOKEN_LOGP hotword_weight = hotword_weight if hotword_weight is not None else DEFAULT_HOTWORD_WEIGHT # reset params at every forward call. It's just a `set` method in pyctcdecode self.decoder.reset_params( alpha=alpha, beta=beta, unk_score_offset=unk_score_offset, lm_score_boundary=lm_score_boundary ) # pyctcdecode decoded_beams = self.decoder.decode_beams( logits, beam_width=beam_width, beam_prune_logp=beam_prune_logp, token_min_logp=token_min_logp, hotwords=hotwords, hotword_weight=hotword_weight, ) word_offsets = None if output_word_offsets: word_offsets = [ {"word": word, "start_offset": start_offset, "end_offset": end_offset} for word, (start_offset, end_offset) in decoded_beams[0][2] ] # more output features will be added in the future return Wav2Vec2DecoderWithLMOutput( text=decoded_beams[0][0], logit_score=decoded_beams[0][-2], lm_score=decoded_beams[0][-1], word_offsets=word_offsets, ) @contextmanager def as_target_processor(self): """ Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning Wav2Vec2. """ self.current_processor = self.tokenizer yield self.current_processor = self.feature_extractor
from typing import List, Optional import aiosqlite from lotus.types.blockchain_format.coin import Coin from lotus.types.blockchain_format.sized_bytes import bytes32 from lotus.types.coin_record import CoinRecord from lotus.types.full_block import FullBlock from lotus.util.db_wrapper import DBWrapper from lotus.util.ints import uint32, uint64 from lotus.util.lru_cache import LRUCache class CoinStore: """ This object handles CoinRecords in DB. A cache is maintained for quicker access to recent coins. """ coin_record_db: aiosqlite.Connection coin_record_cache: LRUCache cache_size: uint32 db_wrapper: DBWrapper @classmethod async def create(cls, db_wrapper: DBWrapper, cache_size: uint32 = uint32(60000)): self = cls() self.cache_size = cache_size self.db_wrapper = db_wrapper self.coin_record_db = db_wrapper.db await self.coin_record_db.execute("pragma journal_mode=wal") await self.coin_record_db.execute("pragma synchronous=2") await self.coin_record_db.execute( ( "CREATE TABLE IF NOT EXISTS coin_record(" "coin_name text PRIMARY KEY," " confirmed_index bigint," " spent_index bigint," " spent int," " coinbase int," " puzzle_hash text," " coin_parent text," " amount blob," " timestamp bigint)" ) ) # Useful for reorg lookups await self.coin_record_db.execute( "CREATE INDEX IF NOT EXISTS coin_confirmed_index on coin_record(confirmed_index)" ) await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_spent_index on coin_record(spent_index)") await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_spent on coin_record(spent)") await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_puzzle_hash on coin_record(puzzle_hash)") await self.coin_record_db.commit() self.coin_record_cache = LRUCache(cache_size) return self async def new_block(self, block: FullBlock, tx_additions: List[Coin], tx_removals: List[bytes32]): """ Only called for blocks which are blocks (and thus have rewards and transactions) """ if block.is_transaction_block() is False: return None assert block.foliage_transaction_block is not None for coin in tx_additions: record: CoinRecord = CoinRecord( coin, block.height, uint32(0), False, False, block.foliage_transaction_block.timestamp, ) await self._add_coin_record(record, False) included_reward_coins = block.get_included_reward_coins() if block.height == 0: assert len(included_reward_coins) == 0 else: assert len(included_reward_coins) >= 2 for coin in included_reward_coins: reward_coin_r: CoinRecord = CoinRecord( coin, block.height, uint32(0), False, True, block.foliage_transaction_block.timestamp, ) await self._add_coin_record(reward_coin_r, False) total_amount_spent: int = 0 for coin_name in tx_removals: total_amount_spent += await self._set_spent(coin_name, block.height) # Sanity check, already checked in block_body_validation assert sum([a.amount for a in tx_additions]) <= total_amount_spent # Checks DB and DiffStores for CoinRecord with coin_name and returns it async def get_coin_record(self, coin_name: bytes32) -> Optional[CoinRecord]: cached = self.coin_record_cache.get(coin_name) if cached is not None: return cached cursor = await self.coin_record_db.execute("SELECT * from coin_record WHERE coin_name=?", (coin_name.hex(),)) row = await cursor.fetchone() await cursor.close() if row is not None: coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7])) record = CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]) self.coin_record_cache.put(record.coin.name(), record) return record return None async def get_coins_added_at_height(self, height: uint32) -> List[CoinRecord]: cursor = await self.coin_record_db.execute("SELECT * from coin_record WHERE confirmed_index=?", (height,)) rows = await cursor.fetchall() await cursor.close() coins = [] for row in rows: coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7])) coins.append(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8])) return coins async def get_coins_removed_at_height(self, height: uint32) -> List[CoinRecord]: cursor = await self.coin_record_db.execute("SELECT * from coin_record WHERE spent_index=?", (height,)) rows = await cursor.fetchall() await cursor.close() coins = [] for row in rows: spent: bool = bool(row[3]) if spent: coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7])) coin_record = CoinRecord(coin, row[1], row[2], spent, row[4], row[8]) coins.append(coin_record) return coins # Checks DB and DiffStores for CoinRecords with puzzle_hash and returns them async def get_coin_records_by_puzzle_hash( self, include_spent_coins: bool, puzzle_hash: bytes32, start_height: uint32 = uint32(0), end_height: uint32 = uint32((2 ** 32) - 1), ) -> List[CoinRecord]: coins = set() cursor = await self.coin_record_db.execute( f"SELECT * from coin_record WHERE puzzle_hash=? AND confirmed_index>=? AND confirmed_index<? " f"{'' if include_spent_coins else 'AND spent=0'}", (puzzle_hash.hex(), start_height, end_height), ) rows = await cursor.fetchall() await cursor.close() for row in rows: coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7])) coins.add(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8])) return list(coins) async def get_coin_records_by_puzzle_hashes( self, include_spent_coins: bool, puzzle_hashes: List[bytes32], start_height: uint32 = uint32(0), end_height: uint32 = uint32((2 ** 32) - 1), ) -> List[CoinRecord]: if len(puzzle_hashes) == 0: return [] coins = set() puzzle_hashes_db = tuple([ph.hex() for ph in puzzle_hashes]) cursor = await self.coin_record_db.execute( f'SELECT * from coin_record WHERE puzzle_hash in ({"?," * (len(puzzle_hashes_db) - 1)}?) ' f"AND confirmed_index>=? AND confirmed_index<? " f"{'' if include_spent_coins else 'AND spent=0'}", puzzle_hashes_db + (start_height, end_height), ) rows = await cursor.fetchall() await cursor.close() for row in rows: coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7])) coins.add(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8])) return list(coins) async def rollback_to_block(self, block_index: int): """ Note that block_index can be negative, in which case everything is rolled back """ # Update memory cache delete_queue: bytes32 = [] for coin_name, coin_record in list(self.coin_record_cache.cache.items()): if int(coin_record.spent_block_index) > block_index: new_record = CoinRecord( coin_record.coin, coin_record.confirmed_block_index, uint32(0), False, coin_record.coinbase, coin_record.timestamp, ) self.coin_record_cache.put(coin_record.coin.name(), new_record) if int(coin_record.confirmed_block_index) > block_index: delete_queue.append(coin_name) for coin_name in delete_queue: self.coin_record_cache.remove(coin_name) # Delete from storage c1 = await self.coin_record_db.execute("DELETE FROM coin_record WHERE confirmed_index>?", (block_index,)) await c1.close() c2 = await self.coin_record_db.execute( "UPDATE coin_record SET spent_index = 0, spent = 0 WHERE spent_index>?", (block_index,), ) await c2.close() # Store CoinRecord in DB and ram cache async def _add_coin_record(self, record: CoinRecord, allow_replace: bool) -> None: if self.coin_record_cache.get(record.coin.name()) is not None: self.coin_record_cache.remove(record.coin.name()) cursor = await self.coin_record_db.execute( f"INSERT {'OR REPLACE ' if allow_replace else ''}INTO coin_record VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)", ( record.coin.name().hex(), record.confirmed_block_index, record.spent_block_index, int(record.spent), int(record.coinbase), str(record.coin.puzzle_hash.hex()), str(record.coin.parent_coin_info.hex()), bytes(record.coin.amount), record.timestamp, ), ) await cursor.close() # Update coin_record to be spent in DB async def _set_spent(self, coin_name: bytes32, index: uint32) -> uint64: current: Optional[CoinRecord] = await self.get_coin_record(coin_name) if current is None: raise ValueError(f"Cannot spend a coin that does not exist in db: {coin_name}") assert not current.spent # Redundant sanity check, already checked in block_body_validation spent: CoinRecord = CoinRecord( current.coin, current.confirmed_block_index, index, True, current.coinbase, current.timestamp, ) # type: ignore # noqa await self._add_coin_record(spent, True) return current.coin.amount
import time import os from flask import Flask, request, Response from flask_restful import Api, Resource from waitress import serve from uuid import uuid4 MY_DIR = os.path.dirname(os.path.realpath(__file__)) RES_DIR = os.path.join(MY_DIR, "res") items = [] ditems = dict() class Item(Resource): def __get_item_if_exists(self, name): return next(iter(filter(lambda x: x['name'] == name, items)), None) def get(self, name): item = self.__get_item_if_exists(name) return item, item and 200 or 404 def post(self): rdata = request.get_json() #force=True -> now it does not need content-type header name = rdata['name'] item = self.__get_item_if_exists(name) if item: return {'code' : 'error', 'message' : 'item already exists for name: ' + name}, 400 # Bad Request item = {'name' : name, 'price' : rdata['price']} items.append(item) return {'code' : 'success'}, 200 def delete(self, name): global items items = list(filter(lambda x: x['name'] != name, items)) return {'code' : 'success'} def put(self): rdata = request.get_json() name = rdata['name'] item = self.__get_item_if_exists(name) if item: item.update(rdata) return {'code' : 'success'}, 200 else: item = {'name' : name, 'price' : rdata['price']} items.append(item) return {'code' : 'success'}, 201 class DynamicItem(Resource): ''' Generates an ID with post request and get request will work with that ID and not name. ''' def get(self, iid): try: return ditems[iid], 200 except KeyError: return None, 404 def post(self): rdata = request.get_json() #force=True -> now it does not need content-type header iid = str(uuid4()) rdata["iid"] = iid ditems[iid] = rdata return ditems[iid], 200 class ItemList(Resource): def get(self): return {'items' : items} def delete(self): global items items = list() return {'code' : 'success'} class DynamicItemList(Resource): def get(self): return {'ditems' : ditems} def delete(self): global ditems ditems = dict() return {'code' : 'success'} class Incrementer(Resource): def get(self, value): return {'value' : value + 1}, 200 class NaradaSvc(Resource): def get(self, path): f = open(os.path.join(RES_DIR, path), "r") res = f.read().replace("${BODY}", "Hello there") return Response(res, mimetype="text/html") def __launch_setu_svc(port): app = Flask(__name__) api = Api(app) api.add_resource(NaradaSvc, '/narada', '/narada/<path:path>') api.add_resource(Item, '/item', '/item/<string:name>', endpoint='item') api.add_resource(ItemList, '/items', endpoint='items') api.add_resource(Incrementer, '/inc', '/inc/<int:value>', endpoint='inc') api.add_resource(DynamicItem, '/ditem', '/ditem/<string:iid>', endpoint='ditem') api.add_resource(DynamicItemList, '/ditems', endpoint='ditems') # api.add_resource(ItemList, '/items', endpoint='items') #app.run(port=port, use_evalex=False) #, debug=True) serve(app, host="localhost", port=port, _quiet=True) def wait_for_port(port): import socket server_address = ('localhost', port) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) ct = time.time() while(time.time() - ct < 60): try: sock.bind(server_address) sock.close() return except Exception as e: time.sleep(1) print("Port is not open. Timeout after 60 seconds.") raise RuntimeError("Another service is running at port {}. Narada could not be launched. Message: ".format(port)) def launch_service(port): try: wait_for_port(port) __launch_setu_svc(port) except Exception as e: raise RuntimeError("Not able to launch Narada Service. Got response: ", e)
import warnings import matplotlib import matplotlib.pyplot as plt import pyDeltaRCM # filter out the warning raised about no netcdf being found warnings.filterwarnings("ignore", category=UserWarning) n = 1 cm = matplotlib.cm.get_cmap('tab10') # init delta model with pyDeltaRCM.shared_tools._docs_temp_directory() as output_dir: delta = pyDeltaRCM.DeltaModel( out_dir=output_dir, resume_checkpoint='../../_resources/checkpoint') _shp = delta.eta.shape delta.init_water_iteration() delta.run_water_iteration() fig, ax = plt.subplots(1, 2, sharex=True, sharey=True, figsize=(9, 3)) # fill in axis pyDeltaRCM.debug_tools.plot_domain( delta.eta, ax=ax[0], grid=False, cmap='cividis', label='bed elevation (m)') pyDeltaRCM.debug_tools.plot_domain( delta.eta, ax=ax[1], grid=False, cmap='cividis', label='bed elevation (m)') delta.show_line(delta.free_surf_walk_inds[::10, :].T, 'k-', ax=ax[1], alpha=0.1, multiline=True, nozeros=True) fig.show()
from django.conf.urls import url, include from src import settings from django.conf.urls.static import static from . import views urlpatterns = [ url(r'^$', views.home, name='home'), url(r'^dashboard/', views.dashboard, name='dashboard'), url(r'^delete/(?P<file_id>[\d ]+)/$', views.delete_file, name='delete_file'), url(r'^download_file/(?P<file_id>[\d ]+)/$', views.download_file, name='download_file'), url(r'^accounts/', include('registration.backends.default.urls')), ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
import argparse import pandas from fuzzywuzzy import process def colFix(x): """Convert column name to string, removing dots and spaces""" if type(x) is int: return 'd{}'.format(x) else: return x.replace('.', '').replace(' ', '').replace('-', '') def prepareTable(excel_file, feather_file): """Read excel table and save it to feather for faster access""" table = pandas.read_excel(excel_file, skiprows=3) table.columns = table.columns.map(colFix) table.to_feather(feather_file) def findJournal(title, table): """Perform fuzzy search on 1st title column""" match = process.extractOne(title, table.Tytuł1) row_id = match[-1] match = table.loc[row_id] return match def printFormatted(match): """Print relevant information from table row""" isBio = 'TAK' if match.d604 == 'x' else 'NIE' print('ISSN', match.issn) print('eISSN', match.eissn) print('Tytuł 1:', match.Tytuł1) print('Tytuł 2:', match.Tytuł2) print('Punkty:', match.Punkty) print('Nauki biologiczne:', isBio) if __name__ == '__main__': # argument parser parser = argparse.ArgumentParser() parser.add_argument('-d', '--prepare-db', help='Load data from excel table') parser.add_argument('name', nargs='?', default=None, help='Journal name') args = parser.parse_args() # constants DB_FILE = 'czasopisma.feather' # convert from excel to feather if --prepare-db specified if args.prepare_db is not None: prepareTable(args.prepare_db, DB_FILE) # search for journal if name specified if args.name is not None: # read the feather file try: table = pandas.read_feather(DB_FILE) except IOError: print('Cannot find {}. Run --prepare-db first'.format(DB_FILE)) exit() # find and display the match best_match = findJournal(args.name, table) printFormatted(best_match)
""" The :mod:`view` module contains the classes used to represent a MMIF view as a live Python object. In MMIF, views are created by apps in a pipeline that are annotating data that was previously present in the MMIF file. """ from datetime import datetime from typing import Dict, Union, Optional, Generator, List, cast import dateutil.parser from pyrsistent import pmap, pvector from mmif.vocabulary import ThingTypesBase from .annotation import Annotation, Document from .model import FreezableMmifObject, FreezableDataList, FreezableDataDict, MmifObject __all__ = ['View', 'ViewMetadata', 'Contain'] from .. import DocumentTypes class View(FreezableMmifObject): """ View object that represents a single view in a MMIF file. A view is identified by an ID, and contains certain metadata, a list of annotations, and potentially a JSON-LD ``@context`` IRI. If ``view_obj`` is not provided, an empty View will be generated. :param view_obj: the JSON data that defines the view """ def __init__(self, view_obj: Union[bytes, str, dict] = None) -> None: self._id_counts = {} self.id: str = '' self.metadata: ViewMetadata = ViewMetadata() self.annotations: AnnotationsList = AnnotationsList() self.disallow_additional_properties() self._attribute_classes = pmap({ 'metadata': ViewMetadata, 'annotations': AnnotationsList }) self._required_attributes = pvector(["id", "metadata", "annotations"]) super().__init__(view_obj) for item in self.annotations: if isinstance(item, Document): item.parent = self.id def new_contain(self, at_type: Union[str, ThingTypesBase], **contains_metadata) -> Optional['Contain']: """ Adds a new element to the ``contains`` metadata. :param at_type: the ``@type`` of the annotation type being added :param contains_metadata: any metadata associated with the annotation type :return: the generated :class:`Contain` object """ if MmifObject.is_empty(at_type): raise ValueError("@type must not be empty.") else: return self.metadata.new_contain(at_type, **contains_metadata) def _set_id(self, annotation: Annotation, identifier): if identifier is not None: annotation.id = identifier else: prefix = annotation.at_type.get_prefix() new_num = self._id_counts.get(prefix, 0) + 1 new_id = f'{prefix}_{new_num}' self._id_counts[prefix] = new_num annotation.id = new_id def new_annotation(self, at_type: Union[str, ThingTypesBase], aid: Optional[str] = None, overwrite=False, **properties) -> 'Annotation': """ Generates a new :class:`mmif.serialize.annotation.Annotation` object and adds it to the current view. Fails if there is already an annotation with the same ID in the view, unless ``overwrite`` is set to True. :param at_type: the desired ``@type`` of the annotation. :param aid: the desired ID of the annotation, when not given, the mmif SDK tries to automatically generate an ID based on Annotation type and existing annotations in the view. :param overwrite: if set to True, will overwrite an existing annotation with the same ID. :raises KeyError: if ``overwrite`` is set to False and an annotation with the same ID exists in the view. :return: the generated :class:`mmif.serialize.annotation.Annotation` """ new_annotation = Annotation() new_annotation.at_type = at_type self._set_id(new_annotation, aid) for propk, propv in properties.items(): new_annotation.add_property(propk, propv) return self.add_annotation(new_annotation, overwrite) def add_annotation(self, annotation: 'Annotation', overwrite=False) -> 'Annotation': """ Adds an annotation to the current view. Fails if there is already an annotation with the same ID in the view, unless ``overwrite`` is set to True. :param annotation: the :class:`mmif.serialize.annotation.Annotation` object to add :param overwrite: if set to True, will overwrite an existing annotation with the same ID :raises KeyError: if ``overwrite`` is set to False and an annotation with the same ID exists in the view :return: the same Annotation object passed in as ``annotation`` """ if self.is_frozen(): raise TypeError("MMIF object is frozen") self.annotations.append(annotation, overwrite) self.new_contain(annotation.at_type) return annotation def new_textdocument(self, text: str, lang: str = "en", did: Optional[str] = None, overwrite=False, **properties) -> 'Document': """ Generates a new :class:`mmif.serialize.annotation.Document` object, particularly typed as TextDocument and adds it to the current view. Fails if there is already a text document with the same ID in the view, unless ``overwrite`` is set to True. :param text: text content of the new document :param lang: ISO 639-1 code of the language used in the new document :param did: the desired ID of the document, when not given, the mmif SDK tries to automatically generate an ID based on Annotation type and existing documents in the view. :param overwrite: if set to True, will overwrite an existing document with the same ID :raises KeyError: if ``overwrite`` is set to False and an document with the same ID exists in the view :return: the generated :class:`mmif.serialize.annotation.Document` """ new_document = Document() new_document.at_type = DocumentTypes.TextDocument self._set_id(new_document, did) new_document.text_language = lang new_document.text_value = text for propk, propv in properties.items(): new_document.add_property(propk, propv) self.add_document(new_document, overwrite) return new_document def add_document(self, document: Document, overwrite=False) -> Annotation: """ Appends a Document object to the annotations list. Fails if there is already a document with the same ID in the annotations list. :param document: the Document object to add :param overwrite: if set to True, will overwrite an existing view with the same ID :return: None """ document.parent = self.id return self.add_annotation(document, overwrite) def get_annotations(self, at_type: Union[str, ThingTypesBase] = None, **properties) -> Generator[Annotation, None, None]: """ Look for certain annotations in this view, specified by parameters :param at_type: @type of the annotations to look for. When this is None, any @type will match. :param properties: properties of the annotations to look for. When given more than one property, all properties \ must match. Note that annotation type metadata are specified in the `contains` view metadata, not in individual \ annotation objects. """ def prop_check(k, v, *props): return any(k in prop and prop[k] == v for prop in props) for annotation in self.annotations: at_type_metadata = self.metadata.contains.get(annotation.at_type, {}) if not at_type or (at_type and annotation.at_type == at_type): if all(map(lambda kv: prop_check(kv[0], kv[1], annotation.properties, at_type_metadata), properties.items())): yield annotation def get_annotation_by_id(self, ann_id): ann_found = self.annotations.get(ann_id) if ann_found is None or not isinstance(ann_found, Annotation): raise KeyError(f"Annotation \"{ann_id}\" is not found in view {self.id}.") else: return ann_found def get_documents(self) -> List[Document]: return [cast(Document, annotation) for annotation in self.annotations if annotation.is_document()] def get_document_by_id(self, doc_id) -> Document: doc_found = self.annotations.get(doc_id) if doc_found is None or not isinstance(doc_found, Document): raise KeyError(f"Document \"{doc_id}\" not found in view {self.id}.") else: return doc_found def __getitem__(self, key: str) -> 'Annotation': """ getitem implementation for View. >>> obj = View('''{"id": "v1","metadata": {"contains": {"BoundingBox": {}},"document": "m1","tool": "http://tools.clams.io/east/1.0.4"},"annotations": [{"@type": "BoundingBox","properties": {"id": "bb1","coordinates": [[90,40], [110,40], [90,50], [110,50]] }}]}''') >>> type(obj['bb1']) <class 'mmif.serialize.annotation.Annotation'> >>> obj['asdf'] Traceback (most recent call last): ... KeyError: 'Annotation ID not found: asdf' :raises KeyError: if the key is not found :param key: the search string. :return: the :class:`mmif.serialize.annotation.Annotation` object searched for """ if key in self._named_attributes(): return self.__dict__[key] anno_result = self.annotations.get(key) if not anno_result: raise KeyError("Annotation ID not found: %s" % key) return anno_result def set_error(self, err_message: str, err_trace: str) -> None: self.metadata.set_error(err_message, err_trace) self.annotations.empty() class ViewMetadata(FreezableMmifObject): """ ViewMetadata object that represents the ``metadata`` object within a MMIF view. :param viewmetadata_obj: the JSON data that defines the metadata """ def __init__(self, viewmetadata_obj: Union[bytes, str, dict] = None) -> None: self.document: str = '' self.timestamp: Optional[datetime] = None self.app: str = '' self.contains: ContainsDict = ContainsDict() self.parameters: dict = {} self.error: Union[dict, ErrorDict] = {} self._required_attributes = pvector(["app"]) self._attribute_classes = pmap( {'error': ErrorDict, 'contains': ContainsDict} ) # in theory, either `contains` or `error` should appear in a `view` # but with current implementation, there's no easy way to set a condition # for `oneOf` requirement # see MmifObject::_required_attributes in model.py super().__init__(viewmetadata_obj) def new_contain(self, at_type: Union[str, ThingTypesBase], **contains_metadata) -> Optional['Contain']: """ Adds a new element to the ``contains`` dictionary. :param at_type: the ``@type`` of the annotation type being added :param contains_metadata: any metadata associated with the annotation type :return: the generated :class:`Contain` object """ if isinstance(at_type, str): at_type = ThingTypesBase.from_str(at_type) if at_type not in self.contains: new_contain = Contain(contains_metadata) self.contains[at_type] = new_contain return new_contain def add_parameters(self, **runtime_params): self.parameters.update(dict(runtime_params)) def add_parameter(self, param_key, param_value): self.parameters[param_key] = param_value def get_parameter(self, param_key): try: return self.parameters[param_key] except KeyError: raise KeyError(f"parameter \"{param_key}\" is not set in the view: {self.serialize()}") def set_error(self, message: str, stack_trace: str): self.error = ErrorDict({"message": message, "stackTrace": stack_trace}) self.contains.empty() class ErrorDict(FreezableMmifObject): """ Error object that stores information about error occurred during processing. """ def __init__(self, error_obj: Union[bytes, str, dict] = None) -> None: self.message: str = '' self.stackTrace: str = '' super().__init__(error_obj) class Contain(FreezableMmifObject): """ Contain object that represents the metadata of a single annotation type in the ``contains`` metadata of a MMIF view. """ class AnnotationsList(FreezableDataList[Union[Annotation, Document]]): """ AnnotationsList object that implements :class:`mmif.serialize.model.DataList` for :class:`mmif.serialize.annotation.Annotation`. """ _items: Dict[str, Union[Annotation, Document]] def _deserialize(self, input_list: list) -> None: """ Extends base ``_deserialize`` method to initialize ``items`` as a dict from annotation IDs to :class:`mmif.serialize.annotation.Annotation` objects. :param input_list: the JSON data that defines the list of annotations :return: None """ self._items = {item['properties']['id']: Document(item) if item['_type'].endswith("Document") else Annotation(item) for item in input_list} def append(self, value: Union[Annotation, Document], overwrite=False) -> None: """ Appends an annotation to the list. Fails if there is already an annotation with the same ID in the list, unless ``overwrite`` is set to True. :param value: the :class:`mmif.serialize.annotation.Annotation` object to add :param overwrite: if set to True, will overwrite an existing annotation with the same ID :raises KeyError: if ``overwrite`` is set to False and an annotation with the same ID exists in the list :return: None """ super()._append_with_key(value.id, value, overwrite) class ContainsDict(FreezableDataDict[ThingTypesBase, Contain]): def _deserialize(self, input_dict: dict) -> None: self._items = {ThingTypesBase.from_str(key): Contain(value) for key, value in input_dict.items()} def update(self, other: Union[dict, 'ContainsDict'], overwrite=False): for k, v in other.items(): if isinstance(k, str): k = ThingTypesBase.from_str(k) self._append_with_key(k, v, overwrite=overwrite) def get(self, key: Union[str, ThingTypesBase], default=None): if isinstance(key, str): key = ThingTypesBase.from_str(key) return self._items.get(key, default) def __contains__(self, item: Union[str, ThingTypesBase]): return item in list(self._items.keys())
# -*- coding: utf-8 -*- from datetime import datetime from app import db from flask_login import UserMixin from werkzeug.security import generate_password_hash, check_password_hash class Baby(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(32), unique=True) birthday = db.Column(db.Date) blogs = db.relationship('Blog', backref='baby', lazy='dynamic') healthys = db.relationship('Healthy', backref='baby', lazy='dynamic') def __inti__(self, name, birthday): self.name = name self.birthday = birthday def __repr__(self): return '<Baby {}>'.format(self.name) class User(db.Model, UserMixin): id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(32), unique=True) password = db.Column(db.String(128)) gm = db.Column(db.Integer) familymembers = db.Column(db.String(32), unique=True) blogs = db.relationship('Blog', backref='user', lazy='dynamic') def set_password(self, password): # 用来设置密码的方法,接受密码作为参数 self.password = generate_password_hash(password) # 将生成的密码保持到对应字段 def validate_password(self, password): # 用于验证密码的方法,接受密码作为参数 return check_password_hash(self.password, password) # 返回布尔值 def __repr__(self): return'<User {}:{}>'.format(self.name, self.amilymembers) class Blog(db.Model): id = db.Column(db.Integer, primary_key=True) first = db.Column(db.String(200)) language = db.Column(db.String(200)) cognitive = db.Column(db.String(200)) blog = db.Column(db.Text) create_time = db.Column(db.DateTime, default=datetime.utcnow, index=True) update_time = db.Column(db.DateTime, default=datetime.utcnow) baby_id = db.Column(db.Integer, db.ForeignKey('baby.id')) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) def __inti__(self, first='',language='',cognitive='',blog='',): self.first=first self.language=language self.cognitive=cognitive self.blog=blog class Healthy(db.Model): id = db.Column(db.Integer, primary_key=True) height = db.Column(db.Integer) weight = db.Column(db.Float) create_time = db.Column(db.DateTime, default=datetime.utcnow, index=True) baby_id = db.Column(db.Integer, db.ForeignKey('baby.id'))
def foo(a, b): """ :param a: :param b: """ pass
import turtle import random import constants class Player(turtle.Turtle): CRASHED = "crashed" READY = "ready" DEAD = "dead" def __init__(self, name, start_x, start_y, color): super(Player, self).__init__() self.penup() self.shape("square") self.color(color) self.shapesize(stretch_wid=0.2, stretch_len=0.8, outline=None) self.name = name self.speed(0) self.fwd_speed = 1 self.pensize(3) self.setheading(random.randrange(0, 360, 90)) self.setposition(start_x, start_y) self.prev_pos = (start_x, start_y) self.lives = 3 self.status = self.READY self.is_ai = False self.pendown() def turn_left(self): self.left(90) def turn_right(self): self.right(90) def go_east(self): if self.heading != constants.WEST: self.setheading(constants.EAST) def go_north(self): if self.heading != constants.SOUTH: self.setheading(constants.NORTH) def go_west(self): if self.heading != constants.EAST: self.setheading(constants.WEST) def go_south(self): if self.heading != constants.NORTH: self.setheading(constants.SOUTH) def accelerate(self): """Min. speed = 1, Max. speed = 3.""" if self.fwd_speed < 3: self.fwd_speed += 1 self.forward(self.fwd_speed) def decelerate(self): """Min. speed = 1, therefore player can never stop""" if self.fwd_speed > 1: self.fwd_speed -= 1 self.forward(self.fwd_speed) def set_prev_coord(self): """Sets prev coordinates.""" prev_x = int(self.xcor()) prev_y = int(self.ycor()) self.prev_pos = (prev_x, prev_y) def is_collision(self, grid, x, y): """Checks for any visited coordinate and if the coordinate is out of bounds.""" if x < 0 or y < 0: return True try: return grid.matrix[y][x] except IndexError: # Out of Bounds return True def clear_lightcycle(self): """Removes light cycle from screen""" self.hideturtle() self.penup() self.clear() def lose_life(self): """Take away one life from player""" self.lives -= 1 def has_lives(self): return self.lives > 0 def respawn(self, x, y): """Respawns light cycle to random coord passed as args, resets speed to 1, and resets the position list.""" self.status = self.READY self.setposition(x, y) self.setheading(random.randrange(0, 360, 90)) self.set_prev_coord() self.fwd_speed = 1 self.showturtle() self.pendown()
import logging from collections import namedtuple import geopandas as gp import osmium import pandas as pd import shapely.wkb as wkblib from pyproj import Transformer, CRS from shapely.geometry import MultiPoint from shapely.ops import transform, nearest_points from osmox import helpers OSMTag = namedtuple('OSMtag', 'key value') OSMObject = namedtuple('OSMobject', 'idx, activity_tags, geom') AVAILABLE_FEATURES = [ "area", "levels", "floor_area", "units", "transit_distance" ] class Object: DEFAULT_LEVELS = { # for if a level tag is required but not found "apartments": 4, "bungalow": 1, "detached": 2, "dormitory": 4, "hotel": 3, "house": 2, "residential": 2, "semidetached_house": 2, "terrace": 2, "commercial": 1, "retail": 1, "supermarket": 1, "industrial": 1, "office": 4, "warehouse": 1, "bakehouse": 1, "firestation": 2, "government": 2, "cathedral": 1, "chapel": 1, "church": 1, "mosque": 1, "religous": 1, "shrine": 1, "synagogue": 1, "temple": 1, "hospital": 4, "kindergarden": 2, "school": 2, "university": 3, "college": 3, "sports_hall": 1, "stadium": 1 } def __init__(self, idx, osm_tags, activity_tags, geom) -> None: self.idx = idx self.osm_tags = dict(osm_tags) self.activity_tags = activity_tags self.geom = geom self.activities = None self.features = {} def add_features(self, features): available = { "area": self.area, "levels": self.levels, "floor_area": self.floor_area, "units": self.units, } for f in features: self.features[f] = available[f]() def area(self): return int(self.geom.area) def levels(self): if 'building:levels' in self.osm_tags: levels = self.osm_tags['building:levels'] if levels.isnumeric(): return float(levels) # todo ensure integer if 'height' in self.osm_tags: height = helpers.height_to_m(self.osm_tags['height']) if height: return float(height / 4) if self.osm_tags.get("building"): if self.osm_tags["building"] in self.DEFAULT_LEVELS: return self.DEFAULT_LEVELS[self.osm_tags["building"]] return 2 return 1 def floor_area(self): return self.area() * self.levels() def units(self): if 'building:flats' in self.osm_tags: flats = self.osm_tags['building:flats'] if flats.isnumeric(): return float(flats) # todo ensure integer return 1 def __str__(self): return f""" {self.__class__}: id: {self.idx} osm_tags: {self.osm_tags} activity_tags: {self.activity_tags} activities: {self.activities} geom: {self.geom} """ def add_tags(self, osm_objects): for o in osm_objects: if o.activity_tags: self.activity_tags.extend(o.activity_tags) def apply_default_tag(self, tag): self.activity_tags = [OSMTag(tag[0], tag[1])] def assign_points(self, points): snaps = [c for c in points.intersection(self.geom.bounds)] if snaps: self.add_tags(snaps) return True def assign_areas(self, areas): snaps = [c for c in areas.intersection(self.geom.bounds)] snaps = [c for c in snaps if c.geom.contains(self.geom.centroid)] if snaps: self.add_tags(snaps) return True def assign_activities(self, activity_lookup, weight_calculations=None): """ Create a list of unique activities based on activity tags. This method is currently kept here incase we want to deal with duplicate assignments differently in future. """ act_set = set() for tag in self.activity_tags: act_set |= set(activity_lookup.get(tag.key, {}).get(tag.value, [])) self.activities = list(act_set) def get_closest_distance(self, targets, name): """ Calculate euclidean distance to nearest target :params Multipoint targets: A Shapely Multipoint object of all targets """ if not targets: self.features[f"distance_to_nearest_{name}"] = None else: nearest = nearest_points(self.geom.centroid, targets) self.features[f"distance_to_nearest_{name}"] = helpers.get_distance(nearest) # @property def transit_distance(self): return self._transit_distance def summary(self): """ Returbn a dict summary. """ fixed = { "id": self.idx, "activities": ','.join(self.activities), "geometry": self.geom.centroid } return {**fixed, **self.features} def single_activity_summaries(self): """ Yield (dict) summaries for each each activity of an object. """ for act in self.activities: fixed = { "id": self.idx, "activity": act, "geometry": self.geom.centroid } yield {**fixed, **self.features} class ObjectHandler(osmium.SimpleHandler): wkbfab = osmium.geom.WKBFactory() logger = logging.getLogger(__name__) def __init__( self, config, crs='epsg:27700', from_crs='epsg:4326', lazy=False, level=logging.DEBUG ): super().__init__() logging.basicConfig(level=level) self.cnfg = config self.crs = crs self.lazy = lazy self.filter = self.cnfg["filter"] self.object_features = self.cnfg["object_features"] self.default_tags = self.cnfg["default_tags"] self.activity_config = self.cnfg["activity_mapping"] self.transformer = Transformer.from_crs(CRS(from_crs), CRS(crs), always_xy=True) self.objects = helpers.AutoTree() self.points = helpers.AutoTree() self.areas = helpers.AutoTree() self.log = { "existing": 0, "points": 0, "areas": 0, "defaults": 0 } """ On handler.apply_file() method; parse through all nodes and areas: (i) add them to self.objects if they are within the filter_config (ii) else, add them to self.areas or self.points if they are within the activity_mapping """ def selects(self, tags): if tags: tags = dict(tags) return helpers.dict_list_match(tags, self.filter) def get_filtered_tags(self, tags): """ Return configured activity tags for an OSM object as list of OSMtags. """ if tags: tags = dict(tags) found = [] for osm_key, osm_val in tags.items(): if osm_key in self.activity_config: if osm_val in self.activity_config[osm_key] or self.activity_config[osm_key] == "*": found.append(OSMTag(key=osm_key, value=osm_val)) return found def add_object(self, idx, activity_tags, osm_tags, geom): if geom: geom = transform(self.transformer.transform, geom) self.objects.auto_insert(Object(idx=idx, osm_tags=osm_tags, activity_tags=activity_tags, geom=geom)) def add_point(self, idx, activity_tags, geom): if geom: geom = transform(self.transformer.transform, geom) self.points.auto_insert(OSMObject(idx=idx, activity_tags=activity_tags, geom=geom)) def add_area(self, idx, activity_tags, geom): if geom: geom = transform(self.transformer.transform, geom) self.areas.auto_insert(OSMObject(idx=idx, activity_tags=activity_tags, geom=geom)) def fab_point(self, n): try: wkb = self.wkbfab.create_point(n) return wkblib.loads(wkb, hex=True) except RuntimeError: self.logger.warning(f' RuntimeError encountered for point: {n}') return None def fab_area(self, a): try: wkb = self.wkbfab.create_multipolygon(a) return wkblib.loads(wkb, hex=True) except RuntimeError: self.logger.warning(f' RuntimeError encountered for polygon: {a}') return None def node(self, n): activity_tags = self.get_filtered_tags(n.tags) # todo consider renaming activiity tags to filtered or selected tags if self.selects(n.tags): self.add_object(idx=n.id, osm_tags=n.tags, activity_tags=activity_tags, geom=self.fab_point(n)) elif activity_tags: self.add_point(idx=n.id, activity_tags=activity_tags, geom=self.fab_point(n)) def area(self, a): activity_tags = self.get_filtered_tags(a.tags) if self.selects(a.tags): self.add_object(idx=a.id, osm_tags=a.tags, activity_tags=activity_tags, geom=self.fab_area(a)) elif activity_tags: self.add_area(idx=a.id, activity_tags=activity_tags, geom=self.fab_area(a)) def assign_tags(self): """ Assign unknown tags to buildings spatially. """ if not self.lazy: self.assign_tags_full() else: self.assign_tags_lazy() def assign_tags_full(self): """ Assign unknown tags to buildings spatially. """ for obj in helpers.progressBar(self.objects, prefix='Progress:', suffix='Complete', length=50): if obj.activity_tags: # if an onject already has activity tags, continue self.log["existing"] += 1 if obj.assign_points(self.points): # else try to assign activity tags based on contained point objects self.log["points"] += 1 continue if obj.assign_areas(self.areas): # else try to assign activity tags based on containing area objects self.log["areas"] += 1 continue if self.default_tags and not obj.activity_tags: # otherwise apply defaults if set self.log["defaults"] += 1 for a in self.default_tags: obj.apply_default_tag(a) def assign_tags_lazy(self): """Assign tags if filtered object does not already have useful tags.""" for obj in helpers.progressBar(self.objects, prefix='Progress:', suffix='Complete', length=50): if obj.activity_tags: # if an onject already has activity tags, continue self.log["existing"] += 1 continue if obj.assign_points(self.points): # else try to assign activity tags based on contained point objects self.log["points"] += 1 continue if obj.assign_areas(self.areas): # else try to assign activity tags based on containing area objects self.log["areas"] += 1 continue if self.default_tags: # otherwise apply defaults if set self.log["defaults"] += 1 for a in self.default_tags: obj.apply_default_tag(a) def assign_activities(self): for obj in helpers.progressBar(self.objects, prefix='Progress:', suffix='Complete', length=50): obj.assign_activities(self.activity_config) def fill_missing_activities( self, area_tags=("landuse", "residential"), required_acts="home", new_tags=("building", "house"), size=(10, 10), spacing=(25, 25) ): """ Fill "empty" areas with new objects. Empty areas are defined as areas with the select_tags but not containing any objects of the required_acts. An example of such missing objects would be missing home facilities in a residential area. Empty areas are filled with new objects of given size at given spacing. :param area_tags: Optional tuple to define (any) osm tags of areas to be considered. Defaults to ("landuse", "residential") :param required_acts: Optional string value representing expected (any) object activity types to be found in areas.Defaults to "home" :param new_tags: Optional tuple of tags for new objects. Defaults to ("building", "house"). :param size: Optional tuple of x,y dimensions of new object polygon. Defaults to (10, 10) :param spacing: Optional tuple of x,y dimensions of new objects spacing. Defaults to (25, 25) :returns: A tuple of two ints representing number of empty zones, number of new objects """ empty_zones = 0 # conuter for fill zones i = 0 # counter for object id new_osm_tags = [OSMTag(key=k, value=v) for k, v in area_tags] new_tags = [OSMTag(key=k, value=v) for k, v in new_tags] for area in helpers.progressBar(self.areas, prefix='Progress:', suffix='Complete', length=50): if not helpers.tag_match(a=area_tags, b=area.activity_tags): continue if self.required_activities_in_target(required_acts, area.geom): continue empty_zones += 1 # increment another empty zone # sample a grid points = helpers.area_grid(area=area.geom, spacing=spacing) for point in points: # add objects built from grid self.objects.auto_insert(helpers.fill_object(i, point, size, new_osm_tags, new_tags, required_acts)) i += 1 return empty_zones, i def required_activities_in_target(self, required_activities, target): found_activities = self.activities_from_area_intersection(target) return set(required_activities) & found_activities # in both def activities_from_area_intersection(self, target): objects = self.objects.intersection(target.bounds) objects = [o for o in objects if target.contains(o.geom)] return set([act for object in objects for act in object.activities]) def add_features(self): """ ["units", "floors", "area", "floor_area"] """ for obj in helpers.progressBar(self.objects, prefix='Progress:', suffix='Complete', length=50): obj.add_features(self.object_features) def assign_nearest_distance(self, target_act): """ For each facility, calculate euclidean distance to targets of given activity type. """ targets = self.extract_targets(target_act) for obj in helpers.progressBar(self.objects, prefix='Progress:', suffix='Complete', length=50): obj.get_closest_distance(targets, target_act) def extract_targets(self, target_act): """ Find targets """ targets = [] for obj in self.objects: if target_act in obj.activities: targets.append(obj.geom.centroid) return MultiPoint(targets) def geodataframe(self, single_use=False): if single_use: df = pd.DataFrame( (summary for o in self.objects for summary in o.single_activity_summaries()) ) return gp.GeoDataFrame(df, geometry='geometry', crs=self.crs) df = pd.DataFrame( (o.summary() for o in self.objects) ) return gp.GeoDataFrame(df, geometry='geometry', crs=self.crs) # def extract(self): # df = pd.DataFrame.from_records( # ((b.idx, b.geom.centroid) for b in self.objects), # columns=['idx', 'tags', 'geom'] # ) # return gp.GeoDataFrame(df, geometry='geom')
# %load penguinsimple.py from bokeh.models import ColumnDataSource, Range1d from bokeh.plotting import figure from bokeh.io import export_png, show, output_notebook import pandas as pd import numpy as np def line(x,b): return (250/200)*x+b def margin(x,y): return (250/200)*x-y+(400/200) penguins = '../../data/penguins/penguins_raw.csv' df = pd.read_csv(penguins) df['type'] = df['Species'].apply(lambda x: x.split()[0]) df['color'] = df['type'].map({'Adelie':'blue','Gentoo':'green','Chinstrap':'red'}) f = figure(toolbar_location=None,match_aspect=True) f.xaxis.axis_label = 'Culmen Depth (mm)' f.yaxis.axis_label = 'Body Mass (x200 g)' f.title.text = 'Penguin Features' f.x_range=Range1d(12.5,22) f.y_range=Range1d(12.5,33) df['Body Mass (x200 g)'] = df['Body Mass (g)']/200 df['Body Mass (x200 g)'] = df['Body Mass (g)']/200 adelie = df[df['type']=='Adelie'] gentoo = df[df['type']=='Gentoo'] f.circle(x='Culmen Depth (mm)',y='Body Mass (x200 g)',fill_color='color',legend_label = 'Adelie',source=ColumnDataSource(adelie)) f.circle(x='Culmen Depth (mm)',y='Body Mass (x200 g)',fill_color='color',legend_label = 'Gentoo',source=ColumnDataSource(gentoo)) x = np.linspace(12.5,25,100) #y = line(x,400) #f.line(x,y, color='red',line_width=2,line_dash='dotted',legend_label='y=250x+400') adelie_spot = margin(adelie['Culmen Depth (mm)'],adelie['Body Mass (x200 g)']).argmin() adelie_b = margin(adelie['Culmen Depth (mm)'],adelie['Body Mass (x200 g)']).min() gentoo_spot = margin(gentoo['Culmen Depth (mm)'],gentoo['Body Mass (x200 g)']).argmax() gentoo_b = margin(gentoo['Culmen Depth (mm)'],gentoo['Body Mass (x200 g)']).max() print(adelie_b,gentoo_b) f.diamond(x=[adelie.iloc[adelie_spot]['Culmen Depth (mm)']],y=[adelie.iloc[adelie_spot]['Body Mass (x200 g)']],size=15,fill_color='blue') f.diamond(x=[gentoo.iloc[gentoo_spot]['Culmen Depth (mm)']],y=[gentoo.iloc[gentoo_spot]['Body Mass (x200 g)']],size=15,fill_color='green') f.line(x,line(x,(2-adelie_b)),line_width=2,legend_label='y=1.25x+{:.3f}'.format((400-adelie_b)/200),color='blue') f.line(x,line(x,(2-gentoo_b)),line_width=2,legend_label='y=1.25x+{:.3f}'.format((400-gentoo_b)/200),color='green') adelie_bs = margin(adelie['Culmen Depth (mm)'],adelie['Body Mass (x200 g)']) gentoo_bs = margin(gentoo['Culmen Depth (mm)'],gentoo['Body Mass (x200 g)']) for b in adelie_bs: f.line(x,line(x,(2-b)),line_width=1,color='blue',alpha=.1) for b in gentoo_bs: f.line(x,line(x,(2-b)),line_width=1,color='green',alpha=.1) show(f) export_png(f,filename='../img/penguinhwy2.png')
# class A: # def __init__(self, s = "welcome"): # self.s = s # def print(self): # print(self.s) # a = A() # a.print() # def main(): # myCount = Count() # times = 0 # for i in range(0, 100): # increment(myCount, times) # print("myCount.count =", myCount.count, "times =", times) # def increment(c, times): # c.count += 1 # times += 1 # class Count: # def __init__(self): # self.count = 0 # main() # class A: # def __init__(self, i = 0): # self.i = i # class B(A): # def __init__(self, j = 0): # self.j = j # b = B() # print(b.i) # print(b.j) # class A: # def __init__(self, i = 1): # self.i = i # class B(A): # def __init__(self, j = 2): # super().__init__() # self.j = j # def main(): # b = B() # print(b.i, b.j) # main() # class A: # def __init__(self): # self.i = 1 # def m(self): # self.i = 10 # class B(A): # def m(self): # self.i += 1 # return self.i # def main(): # b = B() # print(b.m()) # main() # class A: # def __str__(self): # return "A" # class B(A): # def __init__(self): # super().__init__() # class C(B): # def __init__(self): # super().__init__() # def main(): # b = B() # a = A() # c = C() # print(a, b, c) # main() # class A: # def __init__(self, i = 2, j = 3): # self.i = i # self.j = j # def __str__(self): # return "A" # def __eq__(self, other): # return self.i * self.j == other.i * other.j # def main(): # x = A(1, 2) # y = A(2, 1) # print(x == y) # main() # class A: # def __init__(self): # self.setI(20) # def setI(self, i): # self.i = 2 * i; # class B(A): # def __init__(self): # super().__init__() # print("i from B is", self.i) # def setI(self, i): # self.i = 3 * i; # b = B()
from django.urls import path from . import views app_name = "frontend" urlpatterns = [ path('', views.homepage, name="index"), ]
''' Created on Jul 1, 2009 This module contains tests for the face recognition algorithms. @author: bolme ''' import unittest import pyvision as pv import numpy as np pv.disableCommercialUseWarnings() from pyvision.analysis.FaceAnalysis.FaceDatabase import ScrapShotsDatabase from pyvision.analysis.FaceAnalysis.EyeDetectionTest import EyeDetectionTest from pyvision.face.CascadeDetector import CascadeDetector from pyvision.face.FilterEyeLocator import FilterEyeLocator from pyvision.analysis.roc import ROC class TestFilterEyeLocator(unittest.TestCase): def test_ASEFEyeLocalization(self): '''FilterEyeLocator: Scrapshots Both10 rate == 0.4800...............''' ilog = None if 'ilog' in globals().keys(): ilog = globals()['ilog'] # Load a face database ssdb = ScrapShotsDatabase() # Create a face detector face_detector = CascadeDetector() # Create an eye locator eye_locator = FilterEyeLocator() # Create an eye detection test edt = EyeDetectionTest(name='asef_scraps') #print "Testing..." for face_id in ssdb.keys()[:25]: face = ssdb[face_id] im = face.image dist = face.left_eye.l2(face.right_eye) dist = np.ceil(0.1*dist) im.annotateCircle(face.left_eye,radius=dist,color='white') im.annotateCircle(face.right_eye,radius=dist,color='white') # Detect the faces faces = face_detector.detect(im) # Detect the eyes pred_eyes = eye_locator(im,faces) for rect,leye,reye in pred_eyes: im.annotateRect(rect) im.annotateCircle(leye,radius=1,color='red') im.annotateCircle(reye,radius=1,color='red') truth_eyes = [[face.left_eye,face.right_eye]] pred_eyes = [ [leye,reye] for rect,leye,reye in pred_eyes] # Add to eye detection test edt.addSample(truth_eyes, pred_eyes, im=im, annotate=True) if ilog != None: ilog.log(im,label='test_ASEFEyeLocalization') edt.createSummary() # Very poor accuracy on the scrapshots database self.assertAlmostEqual( edt.face_rate , 1.0000, places = 3 ) self.assertAlmostEqual( edt.both25_rate , 0.8800, places = 3 ) self.assertAlmostEqual( edt.both10_rate , 0.5200, places = 3 ) self.assertAlmostEqual( edt.both05_rate , 0.2800, places = 3 ) def test(): '''Run the face test suite.''' pv.disableCommercialUseWarnings() fel_suite = unittest.TestLoader().loadTestsFromTestCase(TestFilterEyeLocator) test_suites = [ fel_suite, ] pyvision_suite = unittest.TestSuite(test_suites) unittest.TextTestRunner(verbosity=2).run(pyvision_suite) if __name__ == '__main__': # By default run the test suite #ilog = pv.ImageLog() test() #ilog.show()
import apache_beam as beam import kubeflow_batch_predict.dataflow.batch_prediction as batch_prediction import code_search.do_fns.embeddings as embeddings import code_search.transforms.github_bigquery as github_bigquery class GithubBatchPredict(beam.PTransform): """Batch Prediction for Github dataset""" def __init__(self, project, problem, data_dir, saved_model_dir): super(GithubBatchPredict, self).__init__() self.project = project self.problem = problem self.data_dir = data_dir self.saved_model_dir = saved_model_dir ## # Target dataset and table to store prediction outputs. # Non-configurable for now. # self.index_dataset = 'code_search' self.index_table = 'search_index' self.batch_size = 100 def expand(self, input_or_inputs): rows = (input_or_inputs | "Read Processed Github Dataset" >> github_bigquery.ReadProcessedGithubData(self.project) ) batch_predict = (rows | "Prepare Encoded Input" >> beam.ParDo(embeddings.EncodeExample(self.problem, self.data_dir)) | "Execute Predictions" >> beam.ParDo(batch_prediction.PredictionDoFn(), self.saved_model_dir).with_outputs("errors", main="main") ) predictions = batch_predict.main formatted_predictions = (predictions | "Process Predictions" >> beam.ParDo(embeddings.ProcessPrediction()) ) (formatted_predictions # pylint: disable=expression-not-assigned | "Save Index Data" >> github_bigquery.WriteGithubIndexData(self.project, self.index_dataset, self.index_table, batch_size=self.batch_size) ) return formatted_predictions
from zahlabut.LogTool import * import os from configparser import ConfigParser from datetime import datetime import unittest conf_file = 'conf_unittest.ini' class NginxTestCases(unittest.TestCase): def setUp(self): test_start_time = datetime.today().strftime('%Y-%m-%d %H:%M:%S') parser = ConfigParser() parser.read('conf_unittest.ini') parser.set('Settings', 'time_grep', test_start_time) parser.set('Settings', 'log_tool_result_file', 'Test_Restart_NGINX_Errors.log') with open(conf_file, 'w') as configfile: parser.write(configfile) configfile.close() def test_restart_nginx(self): for x in range(0, 5): # os.system('nginx stop') os.system('nginx') def tearDown(self): load_conf_file(conf_file) result = start_analyzing() LogTool.print_in_color(result, 'green')
'Build the bundled capnp distribution' import subprocess import os import tempfile def build_libcapnp(bundle_dir, build_dir, verbose=False): bundle_dir = os.path.abspath(bundle_dir) capnp_dir = os.path.join(bundle_dir, 'capnproto-c++') build_dir = os.path.abspath(build_dir) with tempfile.TemporaryFile() as f: stdout = f if verbose: stdout = None cxxflags = os.environ.get('CXXFLAGS', None) os.environ['CXXFLAGS'] = (cxxflags or '') + ' -fPIC -O2 -DNDEBUG' conf = subprocess.Popen(['./configure', '--disable-shared', '--prefix', build_dir], cwd=capnp_dir, stdout=stdout) returncode = conf.wait() if returncode != 0: raise RuntimeError('Configure failed') make = subprocess.Popen(['make', '-j4', 'install'], cwd=capnp_dir, stdout=stdout) returncode = make.wait() if cxxflags is None: del os.environ['CXXFLAGS'] else: os.environ['CXXFLAGS'] = cxxflags if returncode != 0: raise RuntimeError('Make failed')
from typing import List if __name__ == '__main__': class Solution: def removeDuplicates(self, nums: List[int]) -> int: j = 0 len_nums = len(nums) if len_nums == 0 or len_nums ==1: return len_nums else: for i in range(len(nums)-1): if nums[i] != nums[i+1]: nums[j] = nums[i] j += 1 nums[j] = nums[len_nums -1] print(j+1) return nums solution = Solution() print(solution.removeDuplicates([0,0,1,1,1,2,2,3,3,4]))
"""add BotwSettings.renomination_cooldown Revision ID: 33ab173d34a2 Revises: 42180b951b16 Create Date: 2021-02-19 00:00:34.097366 """ import sqlalchemy as sa from alembic import op # revision identifiers, used by Alembic. revision = "33ab173d34a2" down_revision = "42180b951b16" branch_labels = None depends_on = None def upgrade(): op.add_column( "botw_settings", sa.Column("renomination_cooldown", sa.Integer(), nullable=True) ) def downgrade(): op.drop_column("botw_settings", "renomination_cooldown")
from vapory import * import numpy as np, PIL, PIL.ImageFont, PIL.Image def letter_pixels(letter): font = PIL.ImageFont.load_default() (width, height) = font.getsize(letter) txt_image = np.array(font.getmask(letter)).reshape((height, width)) xx,yy = np.logical_not(np.pad(txt_image, 1, 'constant')).nonzero()[::-1] return zip([width + 2.0] * len(xx), [height + 2.0] * len(yy), xx, yy) # Spec the cube # >>> " ".join(["L R U D F B M E L' R' U' D' F' B' M' E'".split()[int(d, 16)] for d in "".join([str(hex(ord(c)))[2:] for c in ("AI SAFETY")])]) # "F R F R' U L B D F R F M F B B F B R'" spec = \ [(i % 3, 2 - i // 3, 0, (0.05, 0.95), (0.05, 0.95), (0.05, 0.01), 'RGWYBWGYG'[i]) for i in range(9)] \ + [(0, 2 - i // 3, 2 - i % 3, (0.05, 0.01), (0.05, 0.95), (0.05, 0.95), 'ROYWYBRBW'[i]) for i in range(9)] \ + [(2 - i // 3, 2, 2 - i % 3, (0.05, 0.95), (0.95, 0.98), (0.05, 0.95), 'BYORROYBG'[i]) for i in range(9)] \ textures = { '0' : Texture(Pigment('color', [0.01, 0.01, 0.01])), '1' : Texture(Pigment('color', [1, 1, 1])), 'G' : Texture("Jade"), 'R' : Texture("Red_Marble"), 'W' : Texture("White_Marble"), 'B' : Texture("Blue_Agate"), 'Y' : Texture("Yellow_Pine"), 'O' : Texture("Tom_Wood"), 'Gold' : Texture("Gold_Texture"), } cubes = [([x + 0.03,y + 0.03,z + 0.03], [x + 0.97,y + 0.97,z + 0.97], textures['0']) for x,y,z,dx,dy,dz,c in spec] stickers = [([x + dx[0], y + dy[0], z + dz[0]], [x + dx[1],y + dy[1],z + dz[1]], textures[c]) for x,y,z,dx,dy,dz,c in spec][-9:] text = [] for letter, (x,y,z,dx,dy,dz,c) in zip('dexle./avharamporg', spec): for width, height, i, j in letter_pixels(letter): if dz == (0.05, 0.01): xx, yy, zz = i, height - j, width - 3 elif dx == (0.05, 0.01): xx, yy, zz = width / 2, height - j, width - i text.append(([ x + dx[0] + (dx[1] - dx[0]) * xx / width, y + dy[0] + (dy[1] - dy[0]) * yy / height, z + dz[0] + (dz[1] - dz[0]) * zz / width ], [x + dx[0] + (dx[1] - dx[0]) * (xx + 1) / width, y + dy[0] + (dy[1] - dy[0]) * (yy - 1) / height, z + dz[0] + (dz[1] - dz[0]) * (zz - 1) / width], textures[c] )) boxes = cubes + stickers + text scale = 0.5 boxes = [Box([x0 * scale, y0 * scale, z0 * scale],[x1 * scale, y1 * scale, z1 * scale], c, Interior('ior', 4)) for ((x0, y0, z0),(x1, y1, z1), c) in boxes] lights = LightSource([-20, 50, -20], 'color', 1) camera = Camera( 'location', [-10, 10, -4], 'look_at', [1.5 * scale, 1.5 * scale, 1.5 * scale], 'angle', 10) ground = Plane( [0, 1, 0], 0, Texture( Pigment( 'color', [1, 1, 1]), Finish( 'phong', 0.1, 'reflection',0.0, 'metallic', 0.3))) scene = Scene(camera, [lights, Background("White"), ground] + boxes, included=["colors.inc", "textures.inc", "glass.inc"]) #scene.render("brick.png", width=2000, height=2000, antialiasing=0.1) img = PIL.Image.open("brick.png") width, height = img.size def find_coeffs(pa, pb): matrix = [] for p1, p2 in zip(pa, pb): matrix.append([p1[0], p1[1], 1, 0, 0, 0, -p2[0]*p1[0], -p2[0]*p1[1]]) matrix.append([0, 0, 0, p1[0], p1[1], 1, -p2[1]*p1[0], -p2[1]*p1[1]]) A = np.matrix(matrix, dtype=np.float) B = np.array(pb).reshape(8) res = np.dot(np.linalg.inv(A.T * A) * A.T, B) return np.array(res).reshape(8) scale = 2000 / 640.0 pa = [(0, 0), (width, 0), (width, height), (0, height)] pb = [(0, 0), (640 * scale, 0), (595 * scale, 2500 * scale), (50 * scale, 2500 * scale)] print pa, pb coeffs = find_coeffs(pa, pb) img.transform((width, height), PIL.Image.PERSPECTIVE, coeffs, PIL.Image.BICUBIC).save("brick-perspective.png")
"""Export remote accesses in file xlsx""" from cbw_api_toolbox.cbw_file_xlsx import CBWXlsx API_KEY = '' SECRET_KEY = '' API_URL = '' FILE_XLSX = "" #Optional parameter XLSX = CBWXlsx(API_URL, API_KEY, SECRET_KEY) print(XLSX.export_remote_accesses_xlsx())
from setuptools import setup, find_packages setup( name='scaleout-cli', version='0.0.1', description="""Scaleout CLI""", author='Morgan Ekmefjord', author_email='morgan@scaleout.se', url='https://www.scaleoutsystems.com', include_package_data=True, py_modules=['scaleout'], python_requires='>=3.5,<4', install_requires=[ "attrdict>=2.0.1", "certifi>=2018.11.29", "chardet>=3.0.4", "Click>6.6", "cytoolz", "PyYAML>=4.2b1", "requests==2.21.0", "urllib3==1.24.2", "minio==5.0.6", "six>=1.14.0", "python-slugify", "prettytable", ], license="Copyright Scaleout Systems AB. See license for details", zip_safe=False, entry_points={ 'console_scripts': ["stackn=scaleout.cli:main"] }, keywords='', packages=find_packages(exclude=["tests", "tests.*"]), classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Developers', 'Natural Language :: English', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], )
# -*- coding: utf-8 -*- # # Undent - Dedent and format multiline strings into human-readable output # # Ansgar Grunseid # grunseid.com # grunseid@gmail.com # # License: MIT # from undent.api import undent from undent.__version__ import ( __title__, __version__, __license__, __author__, __contact__, __url__, __description__)
from django.db import models # Create your models here. class Maker(models.Model): name = models.CharField(max_length=10) country = models.CharField(max_length=10) def __str__(self): return self.name class PModel(models.Model): maker = models.ForeignKey(Maker, on_delete=models.CASCADE) name = models.CharField(max_length=20) url = models.URLField(default="http://mobiles.com") def __str__(self): return self.name class Product(models.Model): pmodel = models.ForeignKey(PModel, on_delete=models.SET_NULL, null=True, verbose_name="型號") nickname = models.CharField(max_length=15, default="暱稱") description = models.TextField(default="無") year = models.PositiveIntegerField(default=2022) price = models.PositiveIntegerField(default=0) def __str__(self): return self.nickname class PPhoto(models.Model): product = models.ForeignKey(Product, on_delete=models.CASCADE) description = models.CharField(max_length=20, default="圖片") url = models.URLField(default="http://mobiles.com") def __str__(self): return self.description
""" Well Registry ORM object. """ from django.conf import settings from django.core.validators import RegexValidator from django.db import models from smart_selects.db_fields import ChainedForeignKey class AgencyLookup(models.Model): """Model definition for the agency table, lookup only""" agency_cd = models.CharField(max_length=50, unique=True) agency_nm = models.CharField(max_length=150, blank=True, null=True) agency_med = models.CharField(max_length=200, blank=True, null=True) class Meta: db_table = 'agency' ordering = ['agency_nm'] def __str__(self): return self.agency_nm class AltitudeDatumLookup(models.Model): """Model definition for the altitude_datum table, lookup only""" adatum_cd = models.CharField(max_length=10, unique=True) adatum_desc = models.CharField(max_length=100, blank=True, null=True) class Meta: db_table = 'altitude_datum' ordering = ['adatum_cd'] def __str__(self): return self.adatum_cd class CountryLookup(models.Model): """Model definition for the country table, lookup only""" country_cd = models.CharField(unique=True, max_length=2) country_nm = models.CharField(max_length=48) class Meta: db_table = 'country' ordering = ['country_nm'] def __str__(self): return self.country_nm class CountyLookup(models.Model): """Model definition for the county table, lookup only""" country_cd = models.ForeignKey('CountryLookup', on_delete=models.PROTECT, db_column='country_cd', to_field='country_cd') state_id = models.ForeignKey('StateLookup', on_delete=models.PROTECT, db_column='state_id') county_cd = models.CharField(max_length=3) county_nm = models.CharField(max_length=48) class Meta: db_table = 'county' ordering = ['county_nm'] unique_together = (('country_cd', 'state_id', 'county_cd'),) def __str__(self): return self.county_nm class HorizontalDatumLookup(models.Model): """Model definition for the horizontal_datum table, lookup only""" hdatum_cd = models.CharField(max_length=10, unique=True) hdatum_desc = models.CharField(max_length=100, blank=True, null=True) class Meta: db_table = 'horizontal_datum' ordering = ['hdatum_cd'] def __str__(self): return self.hdatum_cd class NatAqfrLookup(models.Model): """Model definition for the nat_aqfr table, lookup only""" nat_aqfr_cd = models.CharField(unique=True, max_length=10) nat_aqfr_desc = models.CharField(blank=True, null=True, max_length=100) class Meta: db_table = 'nat_aqfr' ordering = ['nat_aqfr_desc'] def __str__(self): return self.nat_aqfr_desc class StateLookup(models.Model): """Model definition for the state table, lookup only""" country_cd = models.ForeignKey('CountryLookup', on_delete=models.PROTECT, db_column='country_cd', to_field='country_cd') state_cd = models.CharField(max_length=2) state_nm = models.CharField(max_length=53) class Meta: db_table = 'state' ordering = ['state_nm'] unique_together = (('country_cd', 'state_cd'),) def __str__(self): return self.state_nm class UnitsLookup(models.Model): """Model definition for the units_dim table, lookup only""" unit_id = models.IntegerField(unique=True) unit_desc = models.CharField(max_length=20, blank=True, null=True) class Meta: db_table = 'units' ordering = ['unit_desc'] def __str__(self): return self.unit_desc WELL_TYPES = [('Surveillance', 'Surveillance'), ('Trend', 'Trend'), ('Special', 'Special')] WELL_CHARACTERISTICS = [('Background', 'Background'), ('Suspected/Anticipated Changes', 'Suspected/Anticipated Changes'), ('Known Changes', 'Known Changes')] WELL_PURPOSES = [('Dedicated Monitoring/Observation', 'Dedicated Monitoring/Observation'), ('Other', 'Other')] non_blank_validator = RegexValidator( r'\S[\s\S]*', message='Field must not be blank') class MonitoringLocation(models.Model): """ Django Registry Model. # python manage.py makemigrations and migrate """ display_flag = models.BooleanField(default=False, verbose_name='Display Site?') agency = models.ForeignKey(AgencyLookup, on_delete=models.PROTECT, db_column='agency_cd', null=True, to_field='agency_cd') site_no = models.CharField(max_length=16) site_name = models.CharField(max_length=300, validators=[non_blank_validator,]) country = models.ForeignKey(CountryLookup, on_delete=models.PROTECT, db_column='country_cd', null=True, blank=True, to_field='country_cd') state = ChainedForeignKey(StateLookup, chained_field="country", chained_model_field="country_cd", show_all=False, auto_choose=True, sort=True, on_delete=models.PROTECT, db_column='state_id', null=True, blank=True) county = ChainedForeignKey(CountyLookup, chained_field="state", chained_model_field="state_id", show_all=False, auto_choose=True, sort=True, on_delete=models.PROTECT, db_column='county_id', null=True, blank=True) dec_lat_va = models.DecimalField(max_digits=11, decimal_places=8, null=True, blank=True, verbose_name='Latitude(decimal degrees)') dec_long_va = models.DecimalField(max_digits=11, decimal_places=8, null=True, blank=True, verbose_name='Longitude(decimal degrees)') horizontal_datum = models.ForeignKey(HorizontalDatumLookup, on_delete=models.PROTECT, db_column='horizontal_datum_cd', null=True, blank=True, to_field='hdatum_cd') horz_method = models.CharField(max_length=300, blank=True, verbose_name='Lat/Long method') horz_acy = models.CharField(max_length=300, blank=True, verbose_name='Lat/Long accuracy') alt_va = models.DecimalField(max_digits=10, decimal_places=6, null=True, blank=True, verbose_name='Altitude') altitude_units = models.ForeignKey(UnitsLookup, on_delete=models.PROTECT, db_column='altitude_units', to_field='unit_id', null=True, blank=True) altitude_datum = models.ForeignKey(AltitudeDatumLookup, on_delete=models.PROTECT, db_column='altitude_datum_cd', null=True, blank=True, to_field='adatum_cd') alt_method = models.CharField(max_length=300, blank=True, verbose_name='Altitude method') alt_acy = models.CharField(max_length=300, blank=True, verbose_name='Altitude accuracy') well_depth = models.DecimalField(max_digits=11, decimal_places=8, null=True, blank=True) well_depth_units = models.ForeignKey(UnitsLookup, related_name='+', db_column='well_depth_units', on_delete=models.PROTECT, to_field='unit_id', null=True, blank=True) nat_aqfr = models.ForeignKey(NatAqfrLookup, on_delete=models.PROTECT, db_column='nat_aqfr_cd', to_field='nat_aqfr_cd', null=True, blank=True, verbose_name='National aquifer') local_aquifer_name = models.CharField(max_length=100, blank=True) site_type = models.CharField(max_length=10, blank=True, choices=[('WELL', 'Well'), ('SPRING', 'Spring')]) aqfr_type = models.CharField(max_length=10, blank=True, db_column='aqfr_char', choices=[('CONFINED', 'Confined'), ('UNCONFINED', 'Unconfined')], verbose_name='Aquifer type') wl_sn_flag = models.BooleanField(default=False, verbose_name='In WL sub-network?') wl_network_name = models.CharField(max_length=50, blank=True, db_column='wl_sys_name', verbose_name='WL network name') wl_baseline_flag = models.BooleanField(default=False, verbose_name='WL baseline?') wl_well_type = models.CharField(max_length=32, blank=True, choices=WELL_TYPES, verbose_name='WL well type') wl_well_chars = models.CharField(max_length=32, blank=True, choices=WELL_CHARACTERISTICS, verbose_name='WL well characteristics') wl_well_purpose = models.CharField(max_length=32, blank=True, choices=WELL_PURPOSES, verbose_name='WL well purpose') wl_well_purpose_notes = models.CharField(max_length=4000, blank=True, verbose_name='WL well purpose notes') qw_sn_flag = models.BooleanField(default=False, verbose_name='In QW sub-network?') qw_network_name = models.CharField(max_length=50, blank=True, db_column='qw_sys_name', verbose_name='QW network name') qw_baseline_flag = models.BooleanField(default=False, verbose_name='QW baseline?') qw_well_type = models.CharField(max_length=32, blank=True, choices=WELL_TYPES, verbose_name='QW well type') qw_well_chars = models.CharField(max_length=32, blank=True, choices=WELL_CHARACTERISTICS, verbose_name='QW well characteristics') qw_well_purpose = models.CharField(max_length=32, blank=True, choices=WELL_PURPOSES, verbose_name='QW well purpose') qw_well_purpose_notes = models.CharField(max_length=4000, blank=True, verbose_name='QW well purpose notes') link = models.CharField(max_length=500, blank=True) insert_user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.PROTECT, editable=False, related_name='+') update_user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.PROTECT, editable=False, related_name='+') insert_date = models.DateTimeField(auto_now_add=True, editable=False) update_date = models.DateTimeField(auto_now=True, editable=False) class Meta: unique_together = (('site_no', 'agency'),) def __str__(self): """Default string.""" str_rep = f'{self.agency}:{self.site_no}' return str_rep
import asyncio import contextlib import discord from redbot.core import commands from redbot.core.commands import Context from redbot.core.i18n import Translator from redbot.core.utils.menus import start_adding_reactions from redbot.core.utils.predicates import ReactionPredicate, MessagePredicate from ...utils.parser import RaffleManager from ...utils.formatting import tick, cross from ...mixins.abc import RaffleMixin from ...utils.exceptions import RaffleError from ...utils.helpers import ( validator, cleanup_code, format_traceback ) _ = Translator("Raffle", __file__) class MiscCommands(RaffleMixin): """All the rest of the commands, such as guildowner-only, and ``[p]raffle parse``.""" @commands.group() async def raffle(self, ctx: Context): pass @raffle.command() async def parse(self, ctx: Context): """Parse a complex raffle without actually creating it.""" await ctx.trigger_typing() check = lambda x: x.author == ctx.author and x.channel == ctx.channel message = _( "Paste your YAML here. It will be validated, and if there is " "an exception, it will be returned to you." ) await ctx.send(message) try: content = await self.bot.wait_for("message", timeout=500, check=check) except asyncio.TimeoutError: with contextlib.suppress(discord.NotFound): await message.delete() content = content.content valid = validator(cleanup_code(content)) if not valid: return await ctx.send(_("This YAML is invalid.")) try: parser = RaffleManager(valid) parser.parser(ctx) except RaffleError as e: exc = _("An exception occured whilst parsing your data.") return await ctx.send(cross(exc) + format_traceback(e)) await ctx.send(tick(_("This YAML is good to go! No errors were found."))) await self.replenish_cache(ctx) @raffle.command() @commands.guildowner() async def refresh(self, ctx: Context): """Refresh all of the raffle caches.""" cleaner = await self.replenish_cache(ctx) if cleaner: return await ctx.send(_("Raffles updated.")) else: return await ctx.send(_("Everything was already up to date.")) @raffle.command() @commands.guildowner() async def teardown(self, ctx: Context): """End ALL ongoing raffles.""" raffles = await self.config.guild(ctx.guild).raffles() if not raffles: await ctx.send(_("There are no ongoing raffles in this guild.")) return message = _("Are you sure you want to tear down all ongoing raffles in this guild?") can_react = ctx.channel.permissions_for(ctx.me).add_reactions if not can_react: message += " (y/n)" message = await ctx.send(message) if can_react: start_adding_reactions(message, ReactionPredicate.YES_OR_NO_EMOJIS) predicate = ReactionPredicate.yes_or_no(message, ctx.author) event_type = "reaction_add" else: predicate = MessagePredicate.yes_or_no(ctx) event_type = "message" try: await self.bot.wait_for(event_type, check=predicate, timeout=30) except asyncio.TimeoutError: await ctx.send(_("You took too long to respond.")) return with contextlib.suppress(discord.NotFound): await message.delete() if predicate.result: async with self.config.guild(ctx.guild).raffles() as r: r.clear() await ctx.send(_("Raffles cleared.")) else: await ctx.send(_("No changes have been made.")) await self.replenish_cache(ctx)
import os import hashlib import hmac import time from adminapi.cmduser import get_auth_token from adminapi.filters import BaseFilter try: from urllib.request import urlopen, Request from urllib.error import HTTPError, URLError except ImportError: from urllib2 import urlopen, Request, HTTPError, URLError try: import simplejson as json except ImportError: import json BASE_URL = os.environ.get( 'SERVERADMIN_BASE_URL', 'https://serveradmin.innogames.de/api' ) def calc_security_token(auth_token, timestamp, content): message = str(timestamp) + ':' + str(content) return hmac.new( auth_token.encode('utf8'), message.encode('utf8'), hashlib.sha1 ).hexdigest() def send_request(endpoint, data, auth_token, timeout=None): if not auth_token: auth_token = get_auth_token() data_json = json.dumps(data, default=json_encode_extra) for retry in reversed(range(3)): try: req = _build_request(endpoint, auth_token, data_json) return json.loads( urlopen(req, timeout=timeout).read().decode('utf8')) except HTTPError as error: if error.code not in (500, 502): raise if retry == 0: raise except URLError: if retry == 0: raise # In case of an api error, sleep 5 seconds and try again three times time.sleep(5) def _build_request(endpoint, auth_token, data_json): timestamp = int(time.time()) application_id = hashlib.sha1(auth_token.encode('utf8')).hexdigest() security_token = calc_security_token(auth_token, timestamp, data_json) headers = { 'Content-Encoding': 'application/x-json', 'X-Timestamp': str(timestamp), 'X-Application': application_id, 'X-SecurityToken': security_token, } url = BASE_URL + endpoint return Request(url, data_json.encode('utf8'), headers) def json_encode_extra(obj): if isinstance(obj, BaseFilter): return obj.serialize() if isinstance(obj, set): return list(obj) return str(obj)
# -*- coding: utf-8 -*- """ Created on Mon May 01 23:02:52 2017 @author: dom """ from sys import argv def isvalid(line): # Checks if a read line can be used if line[0] == '#': return False elem = line.split(';') if not len(elem) == 3: return False if not float(elem[2]) > 0: return False return elem # Input #_, filename = argv filename='ex1-100.dat' txt = open(filename) # Params i = 0 # total line count n = [0,0] # valid lines per location gmean = [1.,1.] # geometric mean per location # Loop over lines for line in txt: i+=1 try: elem = isvalid(line) if not elem == False: loc = int(elem[1])-1 value = float(elem[2]) gmean[loc] = gmean[loc]**(n[loc]/(n[loc]+1.)) * value**(1./(n[loc]+1.)) n[loc] += 1 except: pass # Output print 'File: %s with %i valid lines' %(filename, i) print 'Valid values Loc1: %i with GeoMean: %.3f' %(n[0], gmean[0]) print 'Valid values Loc2: %i with GeoMean: %.3f' %(n[1], gmean[1]) txt.close()
# Copyright 2021-xx iiPython # Prism Internal Engine # Modules import os import random import secrets import discord from typing import Union from prism.config import config from discord.ext import commands, tasks from .utils import Utils from ..logging import logger from ..database import Database # Bot class class PrismBot(commands.Bot): def __init__(self, intents: Union[discord.Intents, None] = None, **kwargs) -> None: if intents is None: intents = discord.Intents.default() intents.members = True super().__init__( intents = intents, help_command = None, **kwargs ) # Initialize logging self.logger = logger self.log = logger.log # Load core self.db = Database() self.core = Utils(self) self.config = config self.cooldowns = self.core.cooldowns self.owner = config.get(["admins", "owner"]) def launch_bot(self) -> None: self.log("info", "Launching bot...") # Grab token token = os.getenv("TOKEN") if not token: return self.log("crash", "No token environment variable exists.") # Load commands self.load_cmds() # Launch bot self.run(token, reconnect = True) def load_cmds(self, cmd_path: str = None) -> None: tid = self.core.timer.start() if cmd_path is None: try: cmd_path = config.get(["paths", "cmd_path"]) except IndexError: pass if not cmd_path: return self.log("crash", "No command directory specified to load from.") elif not os.path.exists(cmd_path): return self.log("crash", "Command directory does not exist.") self.core.storage["cmd_path"] = cmd_path # Load commands for path, _, files in os.walk(cmd_path): for file in files: if not file.endswith(".py"): continue # Ignore __pycache__ and etc cmd_path = os.path.join(path, file) relpath = cmd_path.replace("\\", "/").replace(os.getcwd().replace("\\", "/"), "").lstrip("/") # Convert to unix-like path modpath = relpath[:-3].replace("/", ".") # Convert to Python dot-path self.load_extension(modpath) self.log("success", "Loaded commands in {} second(s).".format(self.core.timer.end(tid))) # Main events async def on_ready(self) -> None: # Start status status_data = config.get("status") if status_data is not None and status_data["enabled"]: self.update_status.start() # Log self.log("success", "Logged in as {}.".format(str(self.user))) async def on_application_command_error(self, ctx: commands.Context, error: Exception) -> any: error_map = { commands.BadUnionArgument: lambda e: "Invalid arguments provided.", commands.MemberNotFound: lambda e: "No such user exists.", commands.MissingPermissions: lambda e: "You need the following permissions to run this:\n" + ", ".join([_ for _ in " ".join(e.replace(",", "").split(" ")[3:][:-5]).split(" and ")]), commands.NSFWChannelRequired: lambda e: "This command only works in NSFW channels." } if type(error) in error_map: return await ctx.send(embed = self.core.error(error_map[type(error)](str(error)))) error_code = secrets.token_hex(8) self.log("error", f"{error_code} | {ctx.command.name} | {':'.join(str(error).split(':')[1:]).lstrip(' ')}") return await ctx.send( embed = self.core.error( f"An unexpected error has occured, please report this to {self.owner}.\nError code: `{error_code}`", syserror = True ) ) # Status handler @tasks.loop(minutes = 5) async def update_status(self) -> None: status_data = config.get("status") # Handle choices status_name = random.choice(["watching", "playing"]) status_type = { "watching": discord.ActivityType.watching, "playing": discord.ActivityType.playing }[status_name] # Handle updating await self.change_presence( status = { "online": discord.Status.online, "idle": discord.Status.idle, "dnd": discord.Status.dnd, }[status_data.get("status") or "online"], activity = discord.Activity( type = status_type, name = random.choice(status_data.get(status_name)) ) )
# -*- coding:utf-8 -*- # Author:lixuecheng import asyncio import time import requests import concurrent.futures import os import random class A: async def a(self, a1): time.sleep(0.1) # print(a1) return a1 + 1 async def b(self, b1): d = await self.a(b1) return d + 1 class B: def req(self, method,url, path,dict): a=time.time() r = requests.session().request(method, url + path) r.close() print(os.getpid(),'cost:',time.time()-a) return r class C: def ppp(self, k): time.sleep(0.05) # time.sleep(random.random()) print(k, os.getpid()) return k + 1 def fan(a): if a < 200: a += 1 a*=1.02 a/=0.999 if 'aaa'=='aaa': fan(a) async def main(): # cc = C() # bb=B() # l = [i for i in range(100)] # pool = concurrent.futures.ProcessPoolExecutor() loop = asyncio.get_event_loop() # a=time.time() # print('start_time:',a) # for _ in range(100): # res = await loop.run_in_executor(pool, bb.req, 'http://172.16.32.40:8082', '/webapi/api/token/gettoken?openid=f14f531c-2eef-4550-828b-0bdda49ae9dd') # res = await loop.run_in_executor(pool, time.sleep, 0.1) res = await loop.run_in_executor(None, time.sleep, 0.1) # res = await loop.run_in_executor(None, cc.ppp, l.pop()) # fan(res) # print(res.status_code,time.time()-a) # pool.shutdown() # print(time.time()-a,1) # async def aa(): # await main() if __name__ == '__main__': print(os.getpid()) st = time.time() loop = asyncio.get_event_loop() tasks=[main() for _ in range(200)] loop.run_until_complete(asyncio.wait(tasks)) print(time.time() - st) loop.close() # aa = A() # bb = B() # 1.3919999599456787 1.0190000534057617 # 10.760999917984009 10.152999877929688 #5.644999980926514 5.0950000286102295 # 5.0899999141693115 # tasks = loop.create_task(aa.b(1)) # tasks = loop.create_task( # bb.req('http://172.16.32.40:8082', '/webapi/api/token/gettoken?openid=f14f531c-2eef-4550-828b-0bdda49ae9dd')) # v = loop.run_until_complete(tasks) # print(tasks.result()) # print(v)
from heapq import heappush class KNearestDecks(object): """ Finds the decks that are most similar to a set of cards. """ MAX_CARDS_IN_DECK = 30 def __init__(self): self.decks = {} self.card_count = {} def update_deck(self, deck_entry): """ Adds a card to its corresponding deck. """ archetype = deck_entry['class'] + '##' + deck_entry['archetype'] if archetype not in self.decks: self.decks[archetype] = {} title = deck_entry['title'] if title not in self.decks[archetype]: self.decks[archetype][title] = [] self.decks[archetype][title].append(deck_entry) card_name = deck_entry['card-name'] deck_entry_id = archetype + '##' + title + '##' + card_name self.card_count[deck_entry_id] = int(deck_entry['card-count']) print self.decks print self.card_count def get_nearest_decks(self, k, hero_class, archetypes, cards): """ Finds the k decks that are closest to the passed set of cards. """ dist_sorted_decks = [] input_card_count = KNearestDecks.__get_count(cards) for archetype in archetypes: archetype_id = hero_class + '##' + archetype for deck_title, deck in self.decks[archetype_id].iteritems(): deck_id = archetype_id + '##' + deck_title dist = self.__get_distance(deck_id, cards) if dist == KNearestDecks.MAX_CARDS_IN_DECK: continue is_complete_match = KNearestDecks.MAX_CARDS_IN_DECK - dist == input_card_count heappush(dist_sorted_decks, (dist, { 'deck': deck, 'archetype': archetype, 'complete_match': is_complete_match })) return dist_sorted_decks[:k] def __get_distance(self, deck_id, cards): dist = KNearestDecks.MAX_CARDS_IN_DECK for card in cards: card_name = card['card-name'] card_count = int(card['card-count']) deck_entry_id = deck_id + '##' + card_name if deck_entry_id in self.card_count: count_in_deck = self.card_count[deck_entry_id] dist -= min(count_in_deck, card_count) return dist @staticmethod def __get_count(cards): count = 0 for card in cards: count += int(card['card-count']) return count
import os curr_dir = os.path.dirname(__file__) data_dir = curr_dir + '/../COVID-19/csse_covid_19_data/csse_covid_19_time_series/' confirmed_covid_jhu_time_series = data_dir + 'time_series_covid19_recovered_global.csv' deaths_covid_jhu_time_series = data_dir + 'time_series_covid19_deaths_global.csv' recovered_covid_jhu_time_series = data_dir + ' time_series_covid19_recovered_global.csv'
import re import pytest from reddiggit import post @pytest.mark.usefixtures('client') class TestNegative: def test_null_topic(self, client): """Test if error message shows when topic is null while posting.""" assert len(post.posts) == 0 response = client.post('/post/submit', data=dict( author='test_empty_topic' ), follow_redirects=True) assert response.status_code == 200 assert len(post.posts) == 0 assert "Post failed" in response.data def test_null_author(self, client): """Test if error message shows when author is null while posting.""" assert len(post.posts) == 0 response = client.post('/post/submit', data=dict( topic='neg_test' ), follow_redirects=True) assert response.status_code == 200 assert len(post.posts) == 0 assert "Post failed" in response.data def test_empty_topic(self, client): """Test if error message shows when topic is empty while posting.""" assert len(post.posts) == 0 response = client.post('/post/submit', data=dict( author='test_empty_topic', topic='' ), follow_redirects=True) assert response.status_code == 200 assert len(post.posts) == 0 assert "Post failed" in response.data def test_empty_author(self, client): """Test if error message shows when author is empty while posting.""" assert len(post.posts) == 0 response = client.post('/post/submit', data=dict( author='', topic='neg_test' ), follow_redirects=True) assert response.status_code == 200 assert len(post.posts) == 0 assert "Post failed" in response.data def test_both_empty(self, client): """Test if error message shows when both attribute are empty string.""" assert len(post.posts) == 0 response = client.post('/post/submit', data=dict( author='', topic='' ), follow_redirects=True) assert response.status_code == 200 assert len(post.posts) == 0 assert "Post failed" in response.data def test_both_null(self, client): """Test if error message shows when both attribute are Null.""" assert len(post.posts) == 0 response = client.post('/post/submit', follow_redirects=True) assert response.status_code == 200 assert len(post.posts) == 0 assert "Post failed" in response.data def test_topic_exceed_255(self, client): """Should return '400 Bad Request' when topic exceeds 255 chars""" assert len(post.posts) == 0 response = client.post('/post/submit', data=dict( author='neg_test', # A 256 chars string topic='T'*256 ), follow_redirects=True) assert response.status_code == 400 assert len(post.posts) == 0 def test_upvote_post_not_exist(self, client): """Should return '400 Bad Request' when upvote a post not exist""" response = client.get('/post/0/upvote') assert response.status_code == 400 def test_downvote_post_not_exist(self, client): """Should return '400 Bad Request' when downvote a post not exist""" response = client.get('/post/0/downvote') assert response.status_code == 400
#!/usr/bin/env python import glob import os import signal import six import sys import logging import tempfile import time import re from redis import StrictRedis from datetime import datetime if os.name == 'posix' and sys.version_info[0] < 3: import subprocess32 as subprocess else: import subprocess try: from lz4.block import compress except ImportError: from lz4 import compress logging.basicConfig() LOG = logging.getLogger(__name__) LOG.setLevel(logging.DEBUG) LOG.info("Starting up") db = StrictRedis(host=os.getenv('FLAMEGRAPH_REDIS_SERVICE_HOST'), password=os.getenv('FLAMEGRAPH_REDIS_SERVICE_PASSWORD'), port=os.getenv('FLAMEGRAPH_REDIS_SERVICE_PORT'), socket_timeout=1.0, socket_connect_timeout=1.0, socket_keepalive=True ) db.ping() # PYFLAME_REDIS_SERVICE_HOST # PYFLAME_REDIS_SERVICE_PORT_PUBLIC pod_name = os.getenv('MY_POD_NAME', None) node_name = os.getenv('MY_NODE_NAME', None) pod_filter = re.compile(os.getenv('POD_FILTER', '.*')) DURATION=1 running = True monitors = {} EPOCH = datetime(1970, 1, 1) def timestamp(now = None): now = now or datetime.utcnow() return (now - EPOCH).total_seconds() def stop_handler(signum, frame): global running running = False for _, monitor in six.iteritems(monitors): proc = monitor['process'] proc.send_signal(signum) def start_monitor(pid, monitor=None): if not running: return monitor = monitor or monitors[pid] now = datetime.utcnow() tmp_file = tempfile.mktemp(prefix=str(pid)) monitor['output'] = tmp_file proc = subprocess.Popen(["/usr/bin/pyflame", "-s", str(DURATION), "--threads", "-p", str(pid), "-o", tmp_file]) monitor['process'] = proc monitor['timestamp'] = timestamp(now) # LOG.debug("Started %d for %d", proc.pid, pid) def reaper(signum, _): for pid, monitor in six.iteritems(monitors): # LOG.debug("Checking %d", proc.pid) try: proc = monitor['process'] proc.wait(timeout=0) LOG.debug("Terminated %d", proc.pid) try: outs, _ = proc.communicate(timeout=1) except ValueError: outs = None output_file = monitor['output'] if running: start_monitor(pid, monitor) with open(output_file, 'r') as f: samples = f.read() os.remove(output_file) key = ':'.join([node_name, monitor['hostname'], monitor['name'], monitor['cgroup'], str(monitor['cgroup_pid'])]) LOG.debug("%s:%f: '%s'", key, monitor['timestamp'], len(samples)) with db.pipeline() as pipe: pipe.zadd(key, monitor['timestamp'], compress(samples.replace('/var/lib/kolla/venv/local/lib/python2.7/site-packages', ''))) # pipe.zadd() old = monitor['timestamp'] - 3600 * 24 if old > 0: pipe.zremrangebyscore(key, 0, old) pipe.execute() except subprocess.TimeoutExpired, KeyError: pass signal.signal(signal.SIGTERM, stop_handler) signal.signal(signal.SIGQUIT, stop_handler) # signal.signal(signal.SIGCHLD, reaper) if pod_name: own_ns_path = '/proc/{}/ns/net'.format(os.getpid()) own_ns = os.stat(own_ns_path) own_pid = os.getpid() current_pids = set([own_pid]) while running: reaper(signal.SIGCHLD, None) known_pids = current_pids current_pids = set() for ns_path in glob.glob('/proc/[1-9]*/ns/net'): try: pid_path = ns_path[:-6] _, _, pid, _ = pid_path.split('/', 3) pid = int(pid) current_pids.add(pid) if pid in known_pids or \ not 'python' in os.readlink(pid_path + 'exe') or \ pod_name and (ns_path == own_ns_path or os.stat(ns_path) != own_ns): continue cgroup_pid = pid with open(pid_path + 'cgroup', 'r') as f: cgroup = f.readline().split(':')[-1] with open(pid_path + 'status', 'r') as f: for line in f: if line.startswith('Name:\t'): name = line[6:] if line.startswith('NSpid:\t'): for ppid in line[6:].split(None): ppid = int(ppid) if ppid != pid: cgroup_pid = ppid hostname='unknown' with open(pid_path + 'environ', 'r') as f: for env in f.read().split('\0'): if env.startswith('HOSTNAME='): _, hostname = env.split("=", 2) break monitor = { 'host_pid': pid, 'cgroup': cgroup.strip(), 'cgroup_pid': cgroup_pid, 'hostname': hostname.strip(), 'name': name.strip() } # LOG.debug("Discovered %s", monitor) if pod_filter.search(hostname): LOG.info("Monitoring %s", monitor) monitors[pid] = monitor start_monitor(pid, monitor) except OSError as e: pass sys.stdout.flush() time.sleep(0.01)
from pytest import fixture from linkedin_scraper.parsers import EmploymentParser @fixture() def employment_parser(): parser = EmploymentParser() parser.professions_list = [] return parser def test_regular_linkedin_format(employment_parser): assert ('Python Developer', 'Foo Software') == employment_parser.parse( 'Python Developer at Foo Software') def test_unknown_format(employment_parser): assert ('Python Developer', '') == employment_parser.parse( 'Python Developer') def test_multiple_at_in_employment_str(employment_parser): assert ('Developer', 'Foo at Bar') == employment_parser.parse( 'Developer at Foo at Bar') def test_position_found_in_profession_list(employment_parser): employment_parser.professions_list = {'developer'} assert ('Senior Python Developer', 'Foo Software') == \ employment_parser.parse('Senior Python Developer, Foo Software') def test_position_found_in_profession_list_without_company(employment_parser): employment_parser.professions_list = {'developer'} assert ('Senior Python Developer', '') == employment_parser.parse( 'Senior Python Developer')
from helper import * ### 1a def grader1a(A, B, C, T, V, W): try: aae(A@B@C, H@T@V@W@H) except: return 'T, V, W are not correct' else: if np.all(V == W): return 'Unitary matrices are not unique' elif np.all(V == T): return 'Unitary matrices are not unique' else: return 'Congratulations, your answer is correct \U0001F389' ### 1b def grader1b(A, B, C, D, E, F, H, I, J, K, L, M): V = np.array([[0, -np.sqrt(2)/2 - np.sqrt(2)/2 * 1j], [1j, 0]]) try: aae(A@B@C@D@E@F, V@H@I@J@K@L@M@dagger(V)) except: return 'H, I, J, K, L, M are not correct' else: return 'Congratulations, your answer is correct \U0001F389' ### 2 def grader2(circ): correct_mat = np.array([[0], [0], [0], [1/np.sqrt(2)], [1/np.sqrt(2)], [0], [0], [0]]) circ_mat = get(circ, types = 'statevector', nice = False).reshape(8, 1) try: aae(np.abs(correct_mat), np.abs(circ_mat)) except: return 'Circuit is not correct' else: circ = transpile(circ, basis_gates = ['u', 'cx']) no_u = circ.count_ops()['u'] no_cx = circ.count_ops()['cx'] cost = 10*no_cx + no_u print(f"Congratulations \U0001F389! Your answer is correct. \n\nYour cost is {cost}.\n\nFeel free to submit your answer") def grader3(circ): no_qubits = circ.num_qubits correct_list = ['01010','11010','00010','10010','01110','11110','00110','10110','01000', '11000','10000','00000','01100','11100','00100','10100','01011','11011', '00011','10011','01111','11111','00111','10111','01001','11001','00001', '10001','01101','11101','00101','10101'] out_list = [] ind_list = np.arange(5) mask_list = list(product([False, True], repeat = 5)) qc = QuantumCircuit(no_qubits, 5) qc = qc.compose(circ, range(no_qubits)) qc.measure(list(range(5)), list(range(5))) out_list.append(list(simul(qc).keys())[0]) for mask in mask_list[1:]: ind = ind_list[np.array(mask)] qc = QuantumCircuit(no_qubits, 5) qc.x(list(ind)) qc = qc.compose(circ, range(no_qubits)) qc.measure(list(range(5)), list(range(5))) out_list.append(list(simul(qc).keys())[0]) if np.all(np.array(out_list) == np.array(correct_list)): circ = transpile(circ, basis_gates = ['u', 'cx']) no_u = circ.count_ops()['u'] no_cx = circ.count_ops()['cx'] cost = 20*(no_qubits - 5) + 10*no_cx + no_u print(f"Congratulations \U0001F389! Your answer is correct. \n\nYour cost is {cost}.\n\nFeel free to submit your answer") else: print('Circuit is not correct. Please try again') def grader4a(circ): correct_mat = np.array([[0., 0., 0., 0., 1., 0., 0., 0.], [0., 0., 0., 1., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 1., 0.], [0., 1., 0., 0., 0., 0., 0., 0.], [1., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 1.], [0., 0., 1., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 1., 0., 0.]]) circ_mat = get(circ, nice = False) try: aae(correct_mat, np.abs(circ_mat)) except: return 'Circuit is not correct' else: circ = transpile(circ, basis_gates = ['u', 'cx']) no_cx = circ.count_ops()['cx'] no_u = circ.count_ops()['u'] cost = 10*no_cx + no_u print(f"Congratulations \U0001F389! Your answer is correct. \n\nYour cost is {cost}.\n\nFeel free to submit your answer") def grader4b(circ): correct_mat = np.array([[0., 0., 0., 0., 0., 0., 0., 1.], [0., 1., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 1., 0., 0.], [0., 0., 0., 1., 0., 0., 0., 0.], [1., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 1., 0.], [0., 0., 1., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 1., 0., 0., 0.]]) circ_mat = get(circ, nice = False) try: aae(correct_mat, np.abs(circ_mat)) except: return 'Circuit is not correct' else: circ = transpile(circ, basis_gates = ['u', 'cx']) no_cx = circ.count_ops()['cx'] no_u = circ.count_ops()['u'] cost = 10*no_cx + no_u print(f"Congratulations \U0001F389! Your answer is correct. \n\nYour cost is {cost}.\n\nFeel free to submit your answer") def grader4c(circ): correct_mat = np.array([[0., 0., 0., 0., 0., 0., 1., 0.], [1., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 1., 0., 0., 0.], [0., 0., 0., 1., 0., 0., 0., 0.], [0., 0., 1., 0., 0., 0., 0., 0.], [0., 1., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 1., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 1.]]) circ_mat = get(circ, nice = False) try: aae(correct_mat, np.abs(circ_mat)) except: return 'Circuit is not correct' else: circ = transpile(circ, basis_gates = ['u', 'cx']) no_cx = circ.count_ops()['cx'] no_u = circ.count_ops()['u'] cost = 10*no_cx + no_u print(f"Congratulations \U0001F389! Your answer is correct. \n\nYour cost is {cost}.\n\nFeel free to submit your answer")
''' do nothing, for debug ''' class I2c(): def __init__(self, scl, sda): pass def address(self, addr): pass def writeWordReg(self, reg, data): pass
import fgrequests urls = ['https://www.google.com'] response = fgrequests.build(urls, verify=False) assert type(response) == list
# -*- coding: utf-8 -*- from collections import defaultdict, namedtuple from datetime import datetime import logging import os import re from google.appengine.api import taskqueue from google.appengine.ext import ndb from flask import has_request_context, request from roger import auth, config, files, models, notifs, report, slack_api, streams, strings from roger import threads from roger_common import errors, identifiers, random, security class HookCollection(dict): def trigger(self, *args, **kwargs): # Note: Generally the first positional argument should be an account handler. for hook_name, hook in self.iteritems(): try: hook(*args, **kwargs) except: logging.exception('Failed run hook "%s"' % (hook_name,)) activation_hooks = HookCollection() create_hooks = HookCollection() _static_handlers = {} _status_handlers = {} def create(identifier=None, display_name=None, image=None, status='temporary', status_reason=None, **kwargs): _validate_status_transition(None, status) if status == 'voicemail' and not display_name: # Special rule for voicemail display phone number. display_name = identifier if image and not isinstance(image, basestring): image = files.upload(image.filename, image.stream, persist=True) # Create the account entity and get a handler wrapping it. account = models.Account.create(status, display_name=display_name, identifier=identifier, image_url=image, **kwargs) handler = get_handler(account) # Run all create hooks (generally used to add default streams to the user). create_hooks.trigger(handler) # Report account activation. if status == 'active': if not status_reason: status_reason = 'created_active' report.account_activated(handler.account_id, 'none', status_reason) logging.debug('Account activated due to %s', status_reason) activation_hooks.trigger(handler) return handler def connect_email(handler): for identifier in handler.identifiers: identifier, identifier_type = identifiers.parse(identifier) if identifier_type != identifiers.EMAIL: continue _, team, user = identifiers.parse_service(identifier) handler.connect_service('email', team, user) activation_hooks['connect_email'] = connect_email def generate_username(handler): if handler.username: return if not handler.account.display_name_set: handler.generate_username() return base = identifiers.clean_username(handler.account.display_name) handler.generate_username(base) activation_hooks['generate_username'] = generate_username def get_account(identifier): """Utility function for getting an Account instance.""" account = models.Account.resolve(identifier) if not account: raise errors.ResourceNotFound('That account does not exist') return account def get_handler(identifier): """Returns an account handler for the given identifier.""" if isinstance(identifier, AccountHandler): # Assume that account handlers have already been resolved before. return identifier # Parse and normalize the identifier. identifier_type = 'unknown' if isinstance(identifier, basestring): identifier, identifier_type = identifiers.parse(identifier) # Attempt to find a Roger account for the provided identifier. account = models.Account.resolve(identifier) if account: # First check if there is a static handler for the account. for identity in account.identifiers: cls = _static_handlers.get(identity.id()) if cls: return cls.handler # Support custom handlers for specific statuses. if account.status in _status_handlers: return _status_handlers[account.status](account) # No special handling for this account. return AccountHandler(account) # Finally check if the identifier has a static handler. if identifier in _static_handlers: # Use a static handler for this identifier. return _static_handlers[identifier].handler # Give up. logging.info('Could not get a handler for "%s" (%s)', identifier, identifier_type) raise errors.ResourceNotFound('That account does not exist') def get_or_create(*args, **kwargs): if not args: raise ValueError('Need at least one identifier') args = map(identifiers.clean, args) notify_change = kwargs.pop('notify_change', True) # Find the first account matching an identifier and attempt to add the remaining # identifiers to it. If no account was found, create a new one with the available # identifiers added to it. Identifiers belonging to other accounts are ignored. identity_keys = [ndb.Key('Identity', identifier) for identifier in args] account_key = None claimables = [] for identifier, identity in zip(args, ndb.get_multi(identity_keys)): if not identity or not identity.account: claimables.append(identifier) continue if account_key and identity.account != account_key: logging.warning('%r does not belong to account %d', identifier, account_key.id()) continue account_key = identity.account # TODO: Transaction? if account_key: logging.debug('Found account %d for %r', account_key.id(), args) handler = get_handler(account_key) if not handler.image_url and 'image' in kwargs: logging.debug('Updating image for %d', account_key.id()) handler.set_image(kwargs['image']) else: handler = create(claimables.pop(0), **kwargs) logging.debug('Created account %d for %r', handler.account_id, args) for identifier in claimables: # TODO: Decide if/when to notify on connect for additional identifiers. handler.add_identifier(identifier, notify_change=False, notify_connect=False) # Only trigger one change notif, and only for existing accounts. if account_key and claimables and notify_change: handler._notify_account_change() return handler # Runs additional logic when an account receives a chunk. def handle_stream_chunk(e): receiver = get_handler(e.event_account) sender = get_handler(e.stream.lookup_account(e.chunk.sender)) receiver.on_new_chunk(sender, e.stream, e.chunk, mute_notification=e.mute_notification) notifs.add_handler(notifs.ON_STREAM_CHUNK, handle_stream_chunk) def handle_new_stream(e): if e.sender_id == e.event_account_key.id(): # Don't send the stream creator's greeting. return if len(e.stream.participants) != 2 or not e.event_account.greeting: return e.stream.send(e.event_account.greeting, e.event_account.greeting_duration, show_for_sender=False, start_now=True) notifs.add_handler(notifs.ON_STREAM_NEW, handle_new_stream) def notify_others_on_activation(handler): @ndb.tasklet def callback(stream): events = defaultdict(list) for account_key in stream.visible_by: if account_key == handler.account.key: continue events[account_key].append({ 'participant': handler.account, 'stream_id': stream.key.id() }) account_keys = list(events) accounts = yield ndb.get_multi_async(account_keys) futures = [] for i, (key, account) in enumerate(zip(account_keys, accounts)): hub = notifs.Hub(account) for event_data in events[key]: futures.append(hub.emit_async(notifs.ON_STREAM_PARTICIPANT_CHANGE, **event_data)) yield tuple(futures) q = models.Stream.query(models.Stream.participants.account == handler.account.key) q.map(callback) activation_hooks['notify_others_on_activation'] = notify_others_on_activation def static_handler(identifier, **kwargs): """ A decorator that registers a custom handler for the account with the specified identifier. If such an account doesn't exist, it will be created. """ def wrap(cls): # Static handlers default to creating active accounts. kwargs.setdefault('status', 'active') cls.handler = StaticHandlerDescriptor(identifier, **kwargs) _static_handlers[identifier] = cls return cls return wrap def status_handler(status): def wrap(cls): _status_handlers[status] = cls return cls return wrap def unregister_static_handler(identifier): cls = _static_handlers.pop(identifier, None) if cls: delattr(cls, 'handler') def _validate_status_transition(old_status, new_status): """Validates that a status may change from a certain value to another.""" can_change = False if old_status is not None else True for tier in config.VALID_STATUS_TRANSITIONS: if old_status in tier: can_change = True if new_status in tier: if not can_change: raise errors.ForbiddenAction('Cannot change status from "%s" to "%s"' % ( old_status, new_status)) break else: raise errors.InvalidArgument('Invalid status') class AccountHandler(object): # Avoid accidentally setting unsupported attributes. __slots__ = ['account', 'identifier', 'notifs'] def __eq__(self, other): if not isinstance(other, AccountHandler): return False return self.account == other.account def __getattr__(self, name): # By default, proxy to the underlying Account entity. return getattr(self.account, name) def __init__(self, account=None, identifier=None, **kwargs): if not account: account = get_account(identifier) self.account = account self.identifier = identifier self.notifs = notifs.Hub(account) @property def account_age(self): return datetime.utcnow() - self.created def add_identifier(self, identifier, notify_change=True, notify_connect=True, **kwargs): identifier, identifier_type = identifiers.parse(identifier) identity, account = models.Identity.add(identifier, self.account.key, **kwargs) if not identity: if self.has_identifier(identifier): # Just assume the identifier is already owned by the current account. return raise errors.AlreadyExists('That identifier is already in use') # Brazil can use two formats for one phone number. equivalent = self._get_alternate_identifier(identity.key.id()) if equivalent: i, a = models.Identity.add(equivalent, self.account.key) if i and a: identity, account = i, a else: logging.warning('Failed to reserve %r (based on %r) for %d', equivalent, identifier, self.account.key.id()) # Update in-memory instance to reflect reality. self.account.populate(**account.to_dict()) if identifier_type == identifiers.EMAIL and self.account.is_activated: # Connect the email "service". _, team, user = identifiers.parse_service(identifier) self.connect_service('email', team, user, notify=notify_connect) elif identifier_type == identifiers.SERVICE_ID: service, team, user = identifiers.parse_service(identifier) if service == 'fika': # fika.io "service" always gets connected. self.connect_service('fika', team, user, notify=notify_connect) if notify_change: self._notify_account_change() @ndb.tasklet def add_vote_async(self): account = yield models.Account.add_vote_async(self.account.key) if account: self.account.populate(**account.to_dict()) raise ndb.Return(True) raise ndb.Return(False) def block(self, identifier): blocked_account = models.Account.resolve_key(identifier) if blocked_account == self.account.key: raise errors.InvalidArgument('You cannot block yourself') models.Account.add_block(blocked_account, self.account.key) f1 = models.AccountFollow.unfollow_async(self.account.key, blocked_account) f2 = models.AccountFollow.unfollow_async(blocked_account, self.account.key) stream = self.streams.get([blocked_account]) if stream: stream.hide() ndb.Future.wait_all([f1, f2]) def change_identifier(self, old, new, notify_connect=True, primary=False): new, identifier_type = identifiers.parse(new) if not new: logging.warning('%r is invalid', new) raise errors.InvalidArgument('That identifier is not valid') if old not in self.identifiers: raise errors.ForbiddenAction('That identifier belongs to another account') if old == new: return # Get the service, team, resource from the new identifier. try: service, team, resource = identifiers.parse_service(new) new_team = not self.is_on_team(service, team) except: service, team, resource = (None, None, None) new_team = True identity, account = models.Identity.change( old, new, assert_account_key=self.account.key, primary=primary) if not identity: raise errors.AlreadyExists('That identifier is already in use') # Update in-memory instance to reflect reality. if account: self.account.populate(**account.to_dict()) if self.account.is_activated and service == 'email' and new_team: # Connect the email "service" (if the user is not already on this domain). self.connect_service(service, team, resource, notify=notify_connect) # TODO: We should also disconnect service if the old identifier was a service. self._notify_account_change() def change_status(self, status, status_reason=None): if self.account.status == status: # Don't do anything if there is no change. return # TODO: Report all status changes. old_status = self.account.status was_activated = self.account.is_activated _validate_status_transition(old_status, status) # Update the account and its related Identity entities. account = models.Account.set_status(self.account.key, status) self.account.populate(**account.to_dict()) if self.account.is_activated and not was_activated: # Report account activation. if not status_reason: status_reason = 'set_active' report.account_activated(self.account_id, old_status, status_reason) # Run activation hooks. logging.debug('Account activated due to %s', status_reason) activation_hooks.trigger(self) self._notify_account_change() def connect_service(self, service, team, identifier, client=None, notify=True, team_properties={}, **kwargs): key = models.ServiceAuth.resolve_key((self.account.key, service, team)) _, service_key, team_key = models.ServiceAuth.split_key(key) if team_key: # Ensure that the team exists. # TODO: In a transaction together with ServiceAuth? models.ServiceTeam.create_or_update(team_key, **team_properties) # Get or create the service authentication entry. auth = key.get() if not auth: auth = models.ServiceAuth(key=key, service=service_key, service_team=team_key) auth.service_identifier = identifier auth.populate(**kwargs) # Store the clients that have been used to connect to this service. client_key = ndb.Key('ServiceClient', client or 'api') if client_key not in auth.service_clients: auth.service_clients.append(client_key) new_client = True else: new_client = False auth.put() # Notify the first time a user connects with a new client. if new_client: logging.debug('New client %s for %s', client_key.id(), key.id()) self.on_new_service(auth, client_key, notify=notify) self._notify_account_change() return auth def create_access_token(self, **kwargs): return self.create_session(**kwargs).to_access_token() def create_session(self, skip_activation=False, **kwargs): # Activate the user when they log in. if not (self.is_activated or skip_activation) or self.is_inactive: if not self.can_activate: raise errors.ForbiddenAction('Account may not be activated at this time') self.change_status('active', status_reason='logged_in') scopes = {auth.SCOPE_REFRESH_TOKEN} return auth.Session(self.account, scopes=scopes, **kwargs) def disconnect_service(self, service, team, resource): # Remove the service authentication entry. key = models.ServiceAuth.resolve_key((self.account.key, service, team)) key.delete() identifier = models.Service.build_identifier(service, team, resource) logging.debug('Disconnected service %s for %d', identifier, self.account_id) if self.has_identifier(identifier): logging.warning('Account still has identifier %s', identifier) def generate_username(self, base=None): """Generates a new username based on display name.""" identifier = None new_username = base or random.username_generator() while True: identifier, account = models.Identity.add(new_username, self.account.key, primary=bool(base)) if identifier: logging.debug('Successfully added username %r', new_username) self.account.populate(**account.to_dict()) self._notify_account_change() return logging.debug('Could not add username %r', new_username) new_username = random.username_generator(base) def has_identifier(self, identifier): return identifiers.clean(identifier) in self.identifiers @property def has_password(self): """Checks if the account has a password associated with it.""" auth = models.PasswordAuth.get_by_id(str(self.account_id)) return bool(auth) @property def identifiers(self): return [identifier.id() for identifier in self.account.identifiers] def load(self): self.account.populate(**self.account.key.get().to_dict()) def on_new_chunk(self, sender, stream, chunk, mute_notification=False): """Handler for when an account has received a chunk.""" # When a temporary account receives something, it becomes invited. if self.is_temporary and not sender.is_bot: self.change_status('invited', status_reason='incoming_stream') report.invite(sender.account_id, self.account_id) def on_new_service(self, auth, client_key, notify=True): params = { 'account_id': self.account_id, 'client_id': client_key.id(), 'notify': 'true' if notify else 'false', 'service_id': auth.service.id(), 'team_id': auth.service_team.id() if auth.service_team else '', 'resource': auth.service_identifier, } logging.debug('Queueing job to set up %s for %d', auth.key.id(), self.account_id) taskqueue.add(method='GET', url='/_ah/jobs/set_up_new_service', params=params, queue_name=config.SERVICE_QUEUE_NAME) def remove_identifier(self, identifier): if identifier not in self.identifiers: raise errors.ForbiddenAction('That identifier belongs to another account') if len(self.identifiers) < 2: raise errors.ForbiddenAction('Can not remove last identifier') account = models.Identity.release(identifier, assert_account_key=self.account.key) # Update in-memory instance to reflect reality. if account: self.account.populate(**account.to_dict()) # Disconnect service if the identifier is a service identifier. identifier, identifier_type = identifiers.parse(identifier) if identifier_type in (identifiers.EMAIL, identifiers.SERVICE_ID): service, team, resource = identifiers.parse_service(identifier) self.disconnect_service(service, team, resource) self._notify_account_change() def send_greeting(self, account, mute_notification=True): if not self.greeting: logging.warning('Attempted to send greeting but there is no greeting') return self.streams.send( [account], self.greeting, duration=self.greeting_duration, mute_notification=mute_notification, reason='greeting') def set_display_name(self, display_name): if not isinstance(display_name, basestring): raise TypeError('Display name must be a string') # TODO: Validate display name more. display_name = display_name.strip() if not display_name: raise errors.InvalidArgument('Invalid display name') if display_name == self.account.display_name: return self.account.display_name = display_name self.account.put() if not self.account.primary_set: base = identifiers.clean_username(self.account.display_name) if base: logging.debug('User has no username, autosetting one') self.generate_username(base) self._notify_account_change() def set_greeting(self, payload, duration): if not files.is_persistent(payload): payload = files.make_persistent(payload) self.account.greeting = payload self.account.greeting_duration = duration self.account.put() def set_image(self, image): if image and not isinstance(image, basestring): image = files.upload(image.filename, image.stream, persist=True) self.account.image_url = image self.account.put() self._notify_account_change() def set_password(self, password): """Sets the password that is used to authenticate the account.""" auth = models.PasswordAuth(id=str(self.account_id)) auth.salt = os.urandom(32) auth.hash = security.salted_sha256(password, auth.salt) auth.put() def set_primary_identifier(self, identifier): """Moves an identifier to the top of the list of identifier, making it the primary one. """ account = models.Account.set_primary_identifier(self.account.key, identifier) # Update in-memory instance to reflect reality if account: self.account.populate(**account.to_dict()) self._notify_account_change() def set_username(self, new_username): new_username, identifier_type = identifiers.parse(new_username) if identifier_type != identifiers.USERNAME: # A user may not use this endpoint to add a phone number/e-mail. raise errors.InvalidArgument('A valid username must be provided') # Switch out the old username if it exists, otherwise just add the new one. old_username = self.username if old_username and self.account.primary_set: self.change_identifier(old_username, new_username, primary=True) else: self.add_identifier(new_username, primary=True) @property def streams(self): return streams.get_handler(self.account) @property def threads(self): return threads.Handler(self.account.key) def unblock(self, identifier): blocked_account = models.Account.resolve_key(identifier) models.Account.remove_block(blocked_account, self.account.key) def update_demographics(self, birthday, gender): changed = False if birthday is not None: birthday = models.Account.parse_birthday(birthday) models.Account.validate_birthday(birthday) if birthday != self.account.birthday: self.account.birthday = birthday changed = True if gender is not None: models.Account.validate_gender(gender) if gender != self.account.gender: self.account.gender = gender changed = True if not changed: return self.account.put() self._notify_account_change() def validate_password(self, password): """ Checks if the provided password matches the password that is used to authenticate the account. """ auth = models.PasswordAuth.get_by_id(str(self.account_id)) if not auth or auth.hash != security.salted_sha256(password, auth.salt): return False return True def _get_alternate_identifier(self, identifier): # For Brazil, support multiple valid formats for one number. if not identifier.startswith('+55'): return if len(identifier) < 13 or identifier[-8] not in '6789': # Only legacy numbers have this special logic. return if len(identifier) == 13: # Legacy format -> new format. return identifier[:5] + '9' + identifier[5:] if len(identifier) == 14: # New format -> legacy format. return identifier[:5] + identifier[6:] def _notify_account_change(self): self.notifs.emit(notifs.ON_ACCOUNT_CHANGE, account=self.account, public_options={'include_extras': True, 'view_account': self.account}) class Resolver(object): """Helper class for dealing with destinations of a stream.""" Route = namedtuple('Route', 'type value label') def __init__(self, routes=None): self.routes = routes or [] def __repr__(self): return 'Resolver(%r)' % (self.routes,) def add_route(self, route_type, route, label=None): route = self.Route(route_type, route, label) score = self.route_rank(route) for index, other_route in enumerate(self.routes): if self.route_rank(other_route) < score: self.routes.insert(index, route) break else: self.routes.append(route) def get_or_create_account_key(self, create_status='temporary', origin_account=None): # Identifier types that may have an account created. creatable_types = (identifiers.EMAIL, identifiers.PHONE, identifiers.SERVICE_ID) # Try to match the destination to an existing account. best_route = None for route in self.routes: key = models.Account.resolve_key(route.value) if key: return key if route.type in creatable_types and not best_route: best_route = route # Account not found, create one based on first usable contact detail. # TODO: This should check properly if route is externally verifiable (e.g., SMS). if not best_route: logging.warning('Failed to create an account for one of %s', self.routes) return None identifier = best_route.value # Locally verified accounts can be created immediately. if best_route.type != identifiers.SERVICE_ID: handler = create(identifier, status=create_status) return handler.account.key # Verify that this user is on the same service/team as the origin account. if not origin_account: raise errors.InvalidArgument('Cannot use third-party accounts') service_key, team_key, resource = models.Service.parse_identifier(identifier) if not origin_account.is_on_team(service_key, team_key): raise errors.InvalidArgument('Invalid third-party account') # Look up the third-party account. # TODO: Support multiple types of services dynamically. if service_key.id() != 'slack': raise errors.NotSupported('Only Slack accounts are supported') auth = origin_account.get_auth_key(service_key, team_key).get() # TODO: Put this API call elsewhere! info = slack_api.get_user_info(resource, auth.access_token) ids = [identifier, info['user']['profile']['email']] handler = get_or_create( *ids, display_name=info['user']['real_name'] or info['user']['name'], image=info['user']['profile'].get('image_original'), status=create_status) return handler.account.key @classmethod def parse(cls, value): if not isinstance(value, basestring): raise TypeError('Destination value must be a string') destination = cls() # Value is a comma separated list of routes. for route in value.split(','): # Route can contain a label prefix (with a colon to separate it). label_and_route = route.split(':', 1) if len(label_and_route) == 1: label = None route = label_and_route[0] else: label = re.sub(r'[\W_]', '', label_and_route[0].lower()) route = label_and_route[1] # Clean up route and get its type. route, route_type = identifiers.parse(route) destination.add_route(route_type, route, label) return destination @property def primary_route(self): return self.routes[0] @staticmethod def route_rank(route): # Score the route based on certain values. score = 0 if route.type in (identifiers.ACCOUNT_ID, identifiers.USERNAME): score += 500 elif route.type == identifiers.SERVICE_ID: score += 400 elif route.type == identifiers.EMAIL: score += 10 elif route.type == identifiers.PHONE: # TODO: Use Twilio API to look up numbers? if route.value.startswith('+3519') or route.value.startswith('+467'): # Swedish and Spanish cellular phone numbers are predictable. score += 5 score += 100 else: logging.debug('Unhandled route type: %s', route.type) if route.label == 'backend': score += 75 elif route.label == 'iphone': score += 55 elif route.label == 'mobile': score += 50 elif route.label == 'main': score += 10 elif route.label in ('home', 'work'): # These are unlikely to be cell phones, so penalize their score. score -= 10 elif route.label: logging.debug('Unhandled label: %s', route.label) return score class StaticHandlerDescriptor(object): """ A lazy evaluator of a single instance for the specified identifier. The type of the instance will be the type that this descriptor is assigned to. Important: The account will be created or loaded immediately when this descriptor is created. """ def __init__(self, identifier, **kwargs): self.account = models.Account.resolve(identifier) if not self.account: self.account = create(identifier, **kwargs).account self.identifier = identifier self._instance = None def __get__(self, obj, cls): if not self._instance: self._instance = cls(account=self.account, identifier=self.identifier) return self._instance
from folium.map import Marker from folium.utilities import parse_options from branca.element import MacroElement from jinja2 import Template class JavascriptMarker(Marker): """Add a Marker in the shape of a boat. Parameters ---------- location: tuple of length 2, default None The latitude and longitude of the marker. If None, then the middle of the map is used. heading: int, default 0 Heading of the boat to an angle value between 0 and 360 degrees wind_heading: int, default None Heading of the wind to an angle value between 0 and 360 degrees If None, then no wind is represented. wind_speed: int, default 0 Speed of the wind in knots. https://github.com/thomasbrueggemann/leaflet.boatmarker """ _template = Template(u""" {% macro script(this, kwargs) %} function markerClick(e){ console.log(this); let name = this._popup._content.innerText.trim() let url = new URL("http://localhost:5000/"); url.searchParams.set("start",name); console.log(url.href); //top because leaflet is loaded in an iFrame window.top.location.href = url.href; /*fetch("http://localhost:5000/?location="+e.latlng)*/ } var {{ this.get_name() }} = L.marker( {{ this.location|tojson }}, {{ this.options|tojson }} ).on("click",markerClick).addTo({{ this._parent.get_name() }}); {% endmacro %} """) def __init__(self, location, popup=None, icon=None,**kwargs): super().__init__( location, popup=popup, icon=icon )
# -*- coding: utf-8 -*- # @Author: XP import logging import os import jittor import utils.data_loaders as dataloader_jt from jittor import nn from datetime import datetime from tqdm import tqdm from time import time from tensorboardX import SummaryWriter from core.test_c3d import test_net from utils.average_meter import AverageMeter from models.model import PMPNetPlus as Model from core.chamfer import chamfer_loss_bidirectional as chamfer def lr_lambda(epoch): if 0 <= epoch <= 100: return 1 elif 100 < epoch <= 150: return 0.5 elif 150 < epoch <= 250: return 0.1 else: return 0.5 def train_net(cfg): # Enable the inbuilt cudnn auto-tuner to find the best algorithm to use # train_dataset_loader = utils.data_loaders.DATASET_LOADER_MAPPING[cfg.DATASET.TRAIN_DATASET](cfg) # test_dataset_loader = utils.data_loaders.DATASET_LOADER_MAPPING[cfg.DATASET.TEST_DATASET](cfg) train_dataset_loader = dataloader_jt.DATASET_LOADER_MAPPING[cfg.DATASET.TRAIN_DATASET](cfg) test_dataset_loader = dataloader_jt.DATASET_LOADER_MAPPING[cfg.DATASET.TEST_DATASET](cfg) train_data_loader = train_dataset_loader.get_dataset(dataloader_jt.DatasetSubset.TRAIN, batch_size=cfg.TRAIN.BATCH_SIZE, shuffle=True) val_data_loader = test_dataset_loader.get_dataset(dataloader_jt.DatasetSubset.VAL, batch_size=cfg.TRAIN.BATCH_SIZE, shuffle=False) # Set up folders for logs and checkpoints output_dir = os.path.join(cfg.DIR.OUT_PATH, '%s', datetime.now().isoformat()) cfg.DIR.CHECKPOINTS = output_dir % 'checkpoints' cfg.DIR.LOGS = output_dir % 'logs' if not os.path.exists(cfg.DIR.CHECKPOINTS): os.makedirs(cfg.DIR.CHECKPOINTS) # Create tensorboard writers train_writer = SummaryWriter(os.path.join(cfg.DIR.LOGS, 'train')) val_writer = SummaryWriter(os.path.join(cfg.DIR.LOGS, 'test')) model = Model(dataset=cfg.DATASET.TRAIN_DATASET) init_epoch = 0 best_metrics = float('inf') optimizer = nn.Adam(model.parameters(), lr=cfg.TRAIN.LEARNING_RATE, weight_decay=cfg.TRAIN.WEIGHT_DECAY, betas=cfg.TRAIN.BETAS) lr_scheduler = jittor.lr_scheduler.MultiStepLR(optimizer, milestones=cfg.TRAIN.LR_MILESTONES, gamma=cfg.TRAIN.GAMMA, last_epoch=init_epoch) # Training/Testing the network for epoch_idx in range(init_epoch + 1, cfg.TRAIN.N_EPOCHS + 1): epoch_start_time = time() # cd_eval = test_net(cfg, epoch_idx, val_data_loader, val_writer, model) batch_time = AverageMeter() data_time = AverageMeter() # cd_eval = test_net(cfg, epoch_idx, val_data_loader, val_writer, model) model.train() total_cd1 = 0 total_cd2 = 0 total_cd3 = 0 total_pmd = 0 batch_end_time = time() n_batches = len(train_data_loader) print('epoch: ', epoch_idx, 'optimizer: ', lr_scheduler.get_lr()) with tqdm(train_data_loader) as t: for batch_idx, (taxonomy_ids, model_ids, data) in enumerate(t): partial = jittor.array(data['partial_cloud']) gt = jittor.array(data['gtcloud']) pcds, deltas = model(partial) cd1 = chamfer(pcds[0], gt) cd2 = chamfer(pcds[1], gt) cd3 = chamfer(pcds[2], gt) loss_cd = cd1 + cd2 + cd3 delta_losses = [] for delta in deltas: delta_losses.append(jittor.sum(delta ** 2)) loss_pmd = jittor.sum(jittor.stack(delta_losses)) / 3 loss = loss_cd * cfg.TRAIN.LAMBDA_CD + loss_pmd * cfg.TRAIN.LAMBDA_PMD cd1_item = cd1.item() * 1e3 total_cd1 += cd1_item cd2_item = cd2.item() * 1e3 total_cd2 += cd2_item cd3_item = cd3.item() * 1e3 total_cd3 += cd3_item pmd_item = loss_pmd.item() total_pmd += pmd_item optimizer.step(loss) n_itr = (epoch_idx - 1) * n_batches + batch_idx train_writer.add_scalar('Loss/Batch/cd1', cd1_item, n_itr) train_writer.add_scalar('Loss/Batch/cd2', cd2_item, n_itr) train_writer.add_scalar('Loss/Batch/cd3', cd3_item, n_itr) train_writer.add_scalar('Loss/Batch/pmd', pmd_item, n_itr) batch_time.update(time() - batch_end_time) batch_end_time = time() t.set_description( '[Epoch %d/%d][Batch %d/%d]' % (epoch_idx, cfg.TRAIN.N_EPOCHS, batch_idx + 1, n_batches)) t.set_postfix(loss='%s' % ['%.4f' % l for l in [cd1_item, cd2_item, cd3_item, pmd_item]]) avg_cd1 = total_cd1 / n_batches avg_cd2 = total_cd2 / n_batches avg_cd3 = total_cd3 / n_batches avg_pmd = total_pmd / n_batches lr_scheduler.step() epoch_end_time = time() train_writer.add_scalar('Loss/Epoch/cd1', avg_cd1, epoch_idx) train_writer.add_scalar('Loss/Epoch/cd2', avg_cd2, epoch_idx) train_writer.add_scalar('Loss/Epoch/cd3', avg_cd3, epoch_idx) train_writer.add_scalar('Loss/Epoch/pmd', avg_pmd, epoch_idx) logging.info( '[Epoch %d/%d] EpochTime = %.3f (s) Losses = %s' % (epoch_idx, cfg.TRAIN.N_EPOCHS, epoch_end_time - epoch_start_time, ['%.4f' % l for l in [avg_cd1, avg_cd2, avg_cd3, avg_pmd]])) # Validate the current model cd_eval = test_net(cfg, epoch_idx, val_data_loader, val_writer, model) # Save checkpoints if epoch_idx % cfg.TRAIN.SAVE_FREQ == 0 or cd_eval < best_metrics: file_name = 'ckpt-best.pkl' if cd_eval < best_metrics else 'ckpt-epoch-%03d.pkl' % epoch_idx output_path = os.path.join(cfg.DIR.CHECKPOINTS, file_name) model.save(output_path) logging.info('Saved checkpoint to %s ...' % output_path) if cd_eval < best_metrics: best_metrics = cd_eval train_writer.close() val_writer.close()
'''The ``methods.account`` module provides simple implementations of common methods around account creation and management. ''' import create import login import logout import update
""" Takes a preprocessed file and generate a file which contaitn the name of the biffile, read name and percent value, and a possible metadata """ from scipy.optimize import curve_fit def get_target_percent(percent_file, g_percent_value,nbin=101,mixture=True,minimum_percent_highsample=0.5): #print(percent_file) target_percent = pd.read_csv(percent_file, sep=" ", names=["readname", "percent", "error", "mod"]) target_percent.readname = [standardize_name(name) for name in target_percent.readname] target_percent.percent /= 100 if not mixture: return target_percent,None,None,base target_percent_sub = target_percent[target_percent.error < args.threshold] print("Nselected",len(target_percent_sub)) print(f"Actual percent {np.nanmean(target_percent.percent)}, ground truth {g_percent_value}") #print(target_percent_sub) h, e = np.histogram(target_percent_sub.percent, bins=nbin, range=[0, 1],density=True) base = target_percent["mod"][0] m = np.max(h) p = np.argmax(h) print(m,p) width = 0 # print(p) i = 0 for i in range(p, 0, -1): if h[i] < m / 4: break separable = False pylab.clf() try: def gaus(x, a, x0, sigma): return a * np.exp(-(x - x0) ** 2 / (2 * sigma ** 2)) def two_gauss(x,a1,x01,sigma1,a2,x02,sigma2): if x01>x02 or x01< -0.1: return np.zeros_like(x) + 1000 if not(0<sigma1<0.42) or not(0<sigma2<0.4) or a1 < 0 or a2 < 0: return np.zeros_like(x)+1000 return gaus(x,a1,x01,sigma1) + gaus(x,a2,x02,sigma2) popt, pcov = curve_fit(two_gauss, e[:-1], h, p0=[m/2, 0.0, 0.1] + [m/2,g_percent_value / 100,0.1]) #pylab.plot(e[:-1], h, 'b+:', label='data') #print(g_percent_value) print(popt) #print(pcov) error = np.mean((two_gauss(e[:-1], *popt)-h)**2) print("error",error) p = 100*popt[-2] if error < 0.40 and (popt[-2] - popt[1] > 0.05): # to account for the fact that histo is not normalised separable = True if error < 100: pylab.plot(e[:-1], two_gauss(e[:-1], *popt), 'ro:', label='fit') except: #fit error pass if not separable and i * 100 / nbin > 10: # 10 is 10 percent separable=True threshold = p / nbin / 2 if np.mean(target_percent_sub.percent > threshold) < minimum_percent_highsample: print("Not separable because higher sample proportion is too low (minimum percent high sample %f)"%minimum_percent_highsample) print("Actual percent %.2f"%np.mean(target_percent_sub.percent > threshold)) separable = False if separable: # Separable threshold = p / nbin / 2 n_high = np.sum(target_percent_sub.percent > threshold) p_high = n_high / len(target_percent_sub) target_percent_value = min(g_percent_value / p_high, 100) print(f"threshold {threshold:.2f}, p read high {p_high:.2f} , target value high {target_percent_value:.2f}") pylab.hist(np.array(target_percent.percent), range=[0, 1], bins=nbin,density=True, label=f"thres {threshold:.2f}, p read high {p_high:.2f} , target value high {target_percent_value:.2f}") pylab.plot([p / nbin / 2, p / nbin / 2], [0, m]) pylab.legend() nf = args.output[:-4] + f"{base}_histo.png" print("Writing ", nf) pylab.savefig(nf) else: print("Not separable") pylab.hist(np.array(target_percent.percent), range=[0, 1], label="Not separable", bins=nbin,density=True) pylab.legend() nf = args.output[:-4] + f"{base}_histo.png" print("Writing ", nf) pylab.savefig(nf) target_percent_value = g_target threshold=0 return target_percent, target_percent_value, threshold, base if __name__ == "__main__": import h5py import pandas as pd import argparse import os import numpy as np import pylab import matplotlib as mpl from repnano.models.train_simple import iter_keys, get_type , standardize_name mpl.use("Agg") parser = argparse.ArgumentParser() parser.add_argument('--input', type=str ) parser.add_argument('--output', type=str) parser.add_argument('--percent', nargs="+",type=float) parser.add_argument('--type', type=str,help="Raw or Events",default="Events") parser.add_argument('--metadata', type=str,default="") parser.add_argument('--exclude', type=str,default="") parser.add_argument('--percent_file', nargs='+',type=str ,default=[""]) parser.add_argument('--mods', nargs='+',type=str ,default=[""]) parser.add_argument('--plot', action="store_true") parser.add_argument('--mixture', action="store_true") parser.add_argument('--threshold', type=float,default=0.17) parser.add_argument('--minimum_percent_highsample', type=float,default=0.5) args = parser.parse_args() #create output directory p,_ = os.path.split(args.output) os.makedirs(p,exist_ok=True) pf={} if args.percent_file != [""] and args.mixture: for pfile,g_target,mod in zip(args.percent_file,args.percent,args.mods): pf[mod] = get_target_percent(pfile,g_target,mixture=args.mixture,minimum_percent_highsample=args.minimum_percent_highsample) #print(pf[mod][0]) assert(mod==pf[mod][-1]) assert(len(args.mods) == len(args.percent)) data = [] file_path = os.path.abspath(args.input) h5 = h5py.File(file_path, "r") #print(target_percent) skip=0 typef = get_type(h5) #print(target_percent[:10]) for read_name in iter_keys(h5,typef=typef): #print(read_name) percent = [] error = [] for mod,initial_value in zip(args.mods,args.percent): if mod in pf.keys(): target_percent, target_percent_value, threshold,mod = pf[mod] selec = target_percent[target_percent.readname == standardize_name(read_name)] #print("Found",target_percent_value,threshold,np.array(selec.percent)[0]) #print(selec) if args.mixture: if len(selec) != 0 and (np.array(selec.percent)[0] > threshold): percent.append(target_percent_value) else: percent.append(0) else: percent.append(initial_value) if len(selec) != 0: error.append(np.array(selec.error)[0]) else: error.append(0) else: percent.append(initial_value) error.append(0) info = {"file_name":file_path,"readname":standardize_name(read_name), "type":args.type,"metadata":args.metadata,"exclude":args.exclude} for mod,p,e in zip(args.mods,percent,error): info[f"percent_{mod}"] = p info[f"error_{mod}"] = e #break data.append(info) #np.savetxt(args.output, pd.DataFrame(data), delimiter=';') pd.DataFrame(data).to_csv(args.output,index=False)
import numpy as np import matplotlib.pyplot as plt from matplotlib.ticker import AutoMinorLocator from matplotlib.ticker import MultipleLocator import importlib import sys import os if not '../aux/' in sys.path: sys.path.append('../aux/') import paths; importlib.reload(paths) import spec; importlib.reload(spec) import nessy; importlib.reload(nessy) import auxsys; importlib.reload(auxsys) import auxplt; importlib.reload(auxplt) import phys; importlib.reload(phys) from tqdm import tqdm #wvln, opan = nessy.read_lopa(paths.it0f + 'runtime/def', wvl1 = 1005., wvl2 = 2100) #wvln /= 10.0 #np.savez(paths.npz + 'unsorted_opac_100_200', w = wvln, o = opan) opac = np.load(paths.npz + 'unsorted_opac_100_200.npz') wvln = opac['w'] opan = opac['o'] wvla, opaa = np.loadtxt('/mnt/SSD/sim/nessy/inp/odf/high_res_100_210/fort.92', unpack = True) n = np.loadtxt(paths.it0f + '/runtime/def/atm.inp', usecols = [3]) apm = 2.137995438028139e-024 opaa *= n[54] * apm wvln_s, opan_s = spec.mean_within_delta(wvln, opan[54, :], 0.05) wvla_s, opaa_s = spec.mean_within_delta(wvla, opaa, 0.05) #wvl_n = wvln #wvl_a = wvla #opa_n = opan[54, :] #opa_a = opaa wvl_n = wvln_s wvl_a = wvla_s opa_n = opan_s opa_a = opaa_s plt.close('all') xlim = [[100, 110], [110, 120], [120, 130], [130, 140], [140, 150], [150, 160], [160, 170], [170, 180], [180, 190], [190, 200], [200, 210]] fig, ax = plt.subplots(nrows = len(xlim), ncols = 2, figsize = (12.0, 22.0)) for i in range(len(ax)): ax[i, 0].plot(wvl_n, opa_n, color = 'k') ax[i, 0].plot(wvl_a, opa_a, color = 'r') wvln_s = np.sort(wvl_n[np.where((wvl_n >= xlim[i][0]) & (wvl_n <= xlim[i][1]))]) wvla_s = np.sort(wvl_a[np.where((wvl_a >= xlim[i][0]) & (wvl_a <= xlim[i][1]))]) opan_s = np.sort(opa_n[np.where((wvl_n >= xlim[i][0]) & (wvl_n <= xlim[i][1]))]) opaa_s = np.sort(opa_a[np.where((wvl_a >= xlim[i][0]) & (wvl_a <= xlim[i][1]))]) ax[i, 1].plot(wvln_s, opan_s, color = 'k', label = 'NESSY') ax[i, 1].plot(wvla_s, opaa_s, color = 'r', label = 'ATLAS') ax[i, 0].set_yscale('log') ax[i, 1].set_yscale('log') ax[i, 0].set_xlim(xlim[i][0], xlim[i][1]) ax[i, 1].set_xlim(xlim[i][0], xlim[i][1]) ax[i, 0].set_ylabel('Opacity') ax[len(ax) - 1, 0].set_xlabel('Wavelength, nm') ax[len(ax) - 1, 1].set_xlabel('Wavelength, nm') #leg = ax[0, 1].legend(framealpha = 1, loc = 2, handletextpad = 1, prop = {'size': 7.5}, bbox_to_anchor=(0, 1.08)) leg = ax[0, 1].legend(framealpha = 1, loc = 2, handletextpad = 1, prop = {'size': 7.5}) for obj in leg.legendHandles: obj.set_linewidth(3.0) auxplt.savepdf('unsorted_100_200', paths.figdir + 'plt_opac/')
#!/usr/bin/python import hashlib import sys, getopt def main(argv): inputfile = '' logfile = '' try: opts, args = getopt.getopt(argv,"hi:l:",["ifile=","lfile="]) except getopt.GetoptError: print 'FeelingPhishyEmailParser.py -i <inputfile> -l <logfile>' sys.exit(2) for opt, arg in opts: if opt == '-h': print 'FeelingPhishyEmailParser.py -i <emaillistfile> -l <logfile>' sys.exit() elif opt in ("-i", "--ifile"): inputfile = arg elif opt in ("-l", "--lfile"): logfile = arg print 'Input Email file is:', inputfile print 'Log file is:', logfile m = hashlib.md5() salt = "" f = open(inputfile,'r') for l in f.readlines(): line = l.rstrip('\n') lineWithSalts = salt+line+salt print lineWithSalts hash = hashlib.md5(lineWithSalts).hexdigest() #print "Attempting to find email named " + line + " with hash " + hash ff = open(logfile,'r') for ll in ff.readlines(): csvLine = ll.split(',') #print "test hash:" + csvLine[0] if hash == csvLine[0]: print line +","+ ll +","+ csvLine[2] + "," + csvLine[3] f.close() f.close() if __name__ == "__main__": main(sys.argv[1:])
''' This is the primary touchpaper module; the `main` function is called when you run `zlist` on the command line. ''' import argparse import cssutils import logging import sys from ._version import get_version cssutils.log.setLevel(logging.CRITICAL) ''' Main package routine Parse CSS files for elements with a defined z-index and list them Usage: $ python zlist.py <file.css> <file2.css> .. ''' def main(): ''' Argument parser init ''' parser = argparse.ArgumentParser(description='Parse CSS files for elements ' 'with a defined z-index and ' 'list them') parser.add_argument('-v', '--version', dest='version', action='store_true', help='show package version information and exit') parser.add_argument('files', nargs='*') args = parser.parse_args() if args.version: print "zlist v%s" % get_version() sys.exit(0) ''' Iterate files supplied as args and parse them ''' for filename in args.files: sheet = cssutils.parseFile(filename) zlist = [] for rule in sheet: if rule.type == rule.STYLE_RULE: z = None for prop in rule.style: if prop.name == 'z-index': z = prop.value if z: zlist.append([z, rule.selectorList]) if zlist: print "%s: %d z-index declaration(s) found\n" % (filename, len(zlist)) print "index |".rjust(13), " selector\n", "".rjust(30, '-') zlist.sort(key=lambda entry: int(entry[0]) if entry[0].isdigit() else entry[0]) for entry in zlist: print entry[0].rjust(10), "".rjust(3), for selector in entry[1]: if selector != entry[1][0]: print "".rjust(14), print selector.selectorText print "" else: print "%s: No z-index declarations found" % filename if __name__ == "__main__": main()
from . import fast_nn from . import nn import tensorflow as tf from tensorflow.contrib.framework.python.ops import arg_scope import numpy as np UPDATE_V_STACK = 'update_v_stack' def undo_zeroth_row_bias_when_downshifting(row_output, row): '''The down_shifted_conv2d adds a bias to the row of all zeros. This removes that bias.''' return tf.cond( tf.equal(row, 0), lambda: tf.zeros_like(row_output), lambda: row_output) def undo_zeroth_column_bias_when_rightshifting(pixel_output, col): '''The down_shifted_conv2d adds a bias to the column of all zeros. This removes that bias.''' return tf.cond( tf.equal(col, 0), lambda: tf.zeros_like(pixel_output), lambda: pixel_output) def cache_v_stack_variable(v_stack_variable): '''Caches vertical stack hidden states. This avoids the need to pass the computed vertical stack in the feed_dict, which would involve CPU to GPU transfers.''' cache = tf.Variable( initial_value=np.zeros(v_stack_variable.get_shape().as_list()), name='v_stack_cache', dtype=tf.float32) update_v_stack_cache = cache.assign(v_stack_variable) tf.add_to_collection(UPDATE_V_STACK, update_v_stack_cache) reset_cache = cache.assign(tf.zeros_like(cache)) tf.add_to_collection(fast_nn.RESET_CACHE_COLLECTION, reset_cache) return cache def model_spec(row_input, pixel_input, row, col, image_size, h=None, nr_resnet=5, nr_filters=160, nr_logistic_mix=10, resnet_nonlinearity='concat_elu', seed=None): '''Creates the model. Follows the same model_spec structure as the original PixelCNN++.''' counters = {} with arg_scope( [ fast_nn.down_shifted_conv2d, fast_nn.down_right_shifted_conv2d, fast_nn.down_shifted_deconv2d, fast_nn.down_right_shifted_deconv2d, fast_nn.gated_resnet_vstack_only, fast_nn.gated_resnet_hstack, nn.dense ], counters=counters): # Parse resnet nonlinearity argument. if resnet_nonlinearity == 'concat_elu': resnet_nonlinearity = nn.concat_elu elif resnet_nonlinearity == 'elu': resnet_nonlinearity = tf.nn.elu elif resnet_nonlinearity == 'relu': resnet_nonlinearity = tf.nn.relu else: raise ('resnet nonlinearity ' + resnet_nonlinearity + ' is not supported') with arg_scope( [fast_nn.gated_resnet_vstack_only, fast_nn.gated_resnet_hstack], nonlinearity=resnet_nonlinearity, h=h): u_filter = [2, 3, nr_filters] ul_filter = [2, 2, nr_filters] cache_every, run_every = 1, 1 ## Downsampling pass. # The initial computation to the network. Importantly, it is assumed that the # vertical stack inputs are already downshifted, and the horizontal stack inputs # are already rightshifted. v_stack = [] u_list_input = fast_nn.down_shifted_conv2d( row_input, (image_size, u_filter), stride=1, row=row, cache_every=cache_every, run_every=run_every) u_list = [ undo_zeroth_row_bias_when_downshifting(u_list_input, row) ] v_stack.append(u_list[-1]) downshift_hstack_input = fast_nn.down_shifted_conv2d( row_input, (image_size, [1, 3, nr_filters]), stride=1, row=row, cache_every=cache_every, run_every=run_every) downshift_hstack_input = undo_zeroth_row_bias_when_downshifting( downshift_hstack_input, row) downshift_hstack_input = cache_v_stack_variable( downshift_hstack_input) v_stack.append(downshift_hstack_input) rightshift_hstack_input = fast_nn.down_right_shifted_conv2d( pixel_input, (image_size, [2, 1, nr_filters]), row=row, col=col, cache_every=cache_every, run_every=run_every) rightshift_hstack_input = undo_zeroth_column_bias_when_rightshifting( rightshift_hstack_input, col) ul_list = [ fast_nn.sum_rightshift_downshift(rightshift_hstack_input, downshift_hstack_input, col) ] # Gated resnet layers. image_size = (image_size[0], image_size[1], image_size[2], nr_filters) for rep in range(nr_resnet): u_list.append( fast_nn.gated_resnet_vstack_only( u_list[-1], (image_size, u_filter), row=row, cache_every=cache_every, run_every=run_every, nonlinearity=resnet_nonlinearity)) v_stack.append(u_list[-1]) ul_list.append( fast_nn.gated_resnet_hstack( ul_list[-1], cache_v_stack_variable(u_list[-1]), (image_size, ul_filter), row=row, col=col, cache_every=cache_every, run_every=run_every, nonlinearity=resnet_nonlinearity)) # Downsample. cache_every, run_every = 1, 2 u_list.append( fast_nn.down_shifted_conv2d( u_list[-1], (image_size, u_filter), stride=2, row=row, cache_every=cache_every, run_every=run_every)) v_stack.append(u_list[-1]) ul_list.append( fast_nn.down_right_shifted_conv2d( ul_list[-1], (image_size, ul_filter), row=row, col=col, cache_every=cache_every, run_every=run_every)) cache_every, run_every = 2, 2 image_size = (image_size[0], image_size[1] // 2, image_size[2] // 2, nr_filters) # Gated resnet layers. for rep in range(nr_resnet): u_list.append( fast_nn.gated_resnet_vstack_only( u_list[-1], (image_size, u_filter), row=row, cache_every=cache_every, run_every=run_every, nonlinearity=resnet_nonlinearity)) v_stack.append(u_list[-1]) ul_list.append( fast_nn.gated_resnet_hstack( ul_list[-1], cache_v_stack_variable(u_list[-1]), (image_size, ul_filter), row=row, col=col, cache_every=cache_every, run_every=run_every, nonlinearity=resnet_nonlinearity)) # Downsample. cache_every, run_every = 2, 4 u_list.append( fast_nn.down_shifted_conv2d( u_list[-1], (image_size, u_filter), stride=2, row=row, cache_every=cache_every, run_every=run_every)) v_stack.append(u_list[-1]) ul_list.append( fast_nn.down_right_shifted_conv2d( ul_list[-1], (image_size, ul_filter), row=row, col=col, cache_every=cache_every, run_every=run_every)) cache_every, run_every = 4, 4 image_size = (image_size[0], image_size[1] // 2, image_size[2] // 2, nr_filters) # Gated resnet layers. for rep in range(nr_resnet): u_list.append( fast_nn.gated_resnet_vstack_only( u_list[-1], (image_size, u_filter), row=row, cache_every=cache_every, run_every=run_every, nonlinearity=resnet_nonlinearity)) v_stack.append(u_list[-1]) ul_list.append( fast_nn.gated_resnet_hstack( ul_list[-1], cache_v_stack_variable(u_list[-1]), (image_size, ul_filter), row=row, col=col, cache_every=cache_every, run_every=run_every, nonlinearity=resnet_nonlinearity)) # Upsampling pass. u = u_list.pop() ul = ul_list.pop() for rep in range(nr_resnet): u = fast_nn.gated_resnet_vstack_only( u, (image_size, u_filter), extra_row_input=u_list.pop(), row=row, cache_every=cache_every, run_every=run_every, nonlinearity=resnet_nonlinearity) v_stack.append(u) ul = fast_nn.gated_resnet_hstack( ul, cache_v_stack_variable(u), (image_size, ul_filter), extra_pixel_input=ul_list.pop(), row=row, col=col, cache_every=cache_every, run_every=run_every, nonlinearity=resnet_nonlinearity) # Upsample. cache_every, run_every = 4, 2 u = fast_nn.down_shifted_deconv2d( u, (image_size, u_filter), stride=2, row=row, cache_every=cache_every, run_every=run_every) v_stack.append(u) ul = fast_nn.down_right_shifted_deconv2d( ul, (image_size, ul_filter), row=row, col=col, cache_every=cache_every, run_every=run_every) cache_every, run_every = 2, 2 image_size = (image_size[0], image_size[1] * 2, image_size[2] * 2, nr_filters) # Gated resnet layers. for rep in range(nr_resnet + 1): u = fast_nn.gated_resnet_vstack_only( u, (image_size, u_filter), extra_row_input=u_list.pop(), row=row, cache_every=cache_every, run_every=run_every, nonlinearity=resnet_nonlinearity) v_stack.append(u) ul = fast_nn.gated_resnet_hstack( ul, cache_v_stack_variable(u), (image_size, ul_filter), extra_pixel_input=ul_list.pop(), row=row, col=col, cache_every=cache_every, run_every=run_every, nonlinearity=resnet_nonlinearity) # Upsample. cache_every, run_every = 2, 1 u = fast_nn.down_shifted_deconv2d( u, (image_size, u_filter), stride=2, row=row, cache_every=cache_every, run_every=run_every) v_stack.append(u) ul = fast_nn.down_right_shifted_deconv2d( ul, (image_size, ul_filter), row=row, col=col, cache_every=cache_every, run_every=run_every) cache_every, run_every = 1, 1 image_size = (image_size[0], image_size[1] * 2, image_size[2] * 2, nr_filters) # Gated resnet layers. for rep in range(nr_resnet + 1): u = fast_nn.gated_resnet_vstack_only( u, (image_size, u_filter), extra_row_input=u_list.pop(), row=row, cache_every=cache_every, run_every=run_every, nonlinearity=resnet_nonlinearity) v_stack.append(u) ul = fast_nn.gated_resnet_hstack( ul, cache_v_stack_variable(u), (image_size, ul_filter), extra_pixel_input=ul_list.pop(), row=row, col=col, cache_every=cache_every, run_every=run_every, nonlinearity=resnet_nonlinearity) assert len(u_list) == 0 assert len(ul_list) == 0 x_out = nn.nin(tf.nn.elu(ul), 10 * nr_logistic_mix) sample = nn.sample_from_discretized_mix_logistic( x_out, nr_logistic_mix, seed=seed) cache_v_stack = tf.group(*tf.get_collection(UPDATE_V_STACK)) return sample, x_out, cache_v_stack
from docassemble.webapp.db_object import db from docassemble.webapp.core.models import Uploads def get_new_file_number(user_code, file_name, yaml_file_name=None): new_upload = Uploads(key=user_code, filename=file_name, yamlfile=yaml_file_name) db.session.add(new_upload) db.session.commit() return new_upload.indexno
import unittest from parse import * class ParseTestClass(unittest.TestCase): def setUp(self): # Load source code for test library self.library = LibrarySource('tests/example_library') self.routines = dict( (r.name, r) for r in self.library.public_subroutines) self.test_routine = ( self.routines["cmfe_Example_SomeInterfaceObj"]) def test_string_routines(self): """Only routines that use C strings should be included This is based purely on the routine name """ self.assertFalse("cmfe_StringRoutineVSObj" in self.routines) self.assertFalse("cmfe_StringRoutineVSNumber" in self.routines) self.assertTrue("cmfe_StringRoutineCObj" in self.routines) self.assertTrue("cmfe_StringRoutineCNumber" in self.routines) def test_public(self): """Check we don't get any non-public routines""" self.assertFalse("cmfe_NonPublicRoutine" in self.routines) def test_array_routines(self): """Routine that takes an array rather than scalar should be used This is based purely on the routine name """ self.assertFalse("cmfe_ArrayRoutine0" in self.routines) self.assertTrue("cmfe_ArrayRoutine1" in self.routines) def test_parameter_intent(self): """Check parameter intents are correct""" for param in self.test_routine.parameters: if param.name.startswith("Input"): assert param.intent == 'IN' elif param.name.endswith("Output"): assert param.intent == 'OUT' def test_parameter_arrays(self): """Check array dimensions are correct""" for param in self.test_routine.parameters: if param.name.endswith("Array2D"): self.assertEqual(param.array_dims, 2) self.assertEqual(param.array_spec, [':', ':']) self.assertEqual(param.required_sizes, 2) elif param.name.endswith("Array"): self.assertEqual(param.array_dims, 1) self.assertEqual(param.array_spec, [':']) self.assertEqual(param.required_sizes, 1) elif param.var_type == Parameter.CHARACTER: self.assertEqual(param.array_dims, 1) self.assertEqual(param.array_spec, [':']) self.assertEqual(param.required_sizes, 1) elif param.name == "ArrayWithSize": self.assertEqual(param.array_dims, 1) self.assertEqual(param.array_spec, ['2']) self.assertEqual(param.required_sizes, 0) else: self.assertEqual(param.array_dims, 0) self.assertEqual(param.array_spec, []) def test_parameter_comments(self): """Check doxygen comments are correct""" for param in self.test_routine.parameters: self.assertEqual(param.comment, "Comment for %s" % param.name) def test_parameters(self): """Check number of parameters and that Err isn't included""" self.assertEqual(len(self.test_routine.parameters), 10) self.assertFalse( "Err" in [p.name for p in self.test_routine.parameters]) def test_enum(self): """Check enum has correct parameters""" (enums, ungrouped_constants) = self.library.group_constants() self.assertEqual(len(ungrouped_constants), 1) self.assertEqual(len(enums), 1) enum = enums[0] self.assertEqual(len(enum.constants), 3) self.assertEqual(enum.comment, "Example of an enum") self.assertEqual(enum.name, "ExampleEnum") self.assertEqual(enum.constants[0].value, 1) self.assertFalse("NON_PUBLIC_CONSTANT" in ungrouped_constants) def test_method(self): """Test that correct routines are set as methods of type""" type = self.library.lib_source.types["cmfe_ExampleType"] # Make sure we get Initialise and CreateStart self.assertEqual(len(type.methods), 3) if __name__ == '__main__': unittest.main()
import pickle def output_file(obj, filename): f = open(filename, "wb") pickle.dump(obj, f, protocol=pickle.HIGHEST_PROTOCOL) f.close() def input_file(filename): f = open(filename, "rb") return pickle.load(f)
# Import all the models, so that Base has them before being # imported by Alembic from app.db.base_class import Base # noqa from app.db.base_class import FLBase # noqa from app.models.audit import Audit # noqa from app.models.item import Item # noqa from app.models.need import Need # noqa from app.models.need_note import NeedNote # noqa from app.models.note import Note # noqa from app.models.user import User # noqa
from dataclasses import dataclass from typing import Generic, Mapping, TypeVar __all__ = ["Curve"] T = TypeVar("T") U = TypeVar("U") @dataclass class _Curve(Generic[T, U]): mapping: Mapping[T, U]
# -*- coding: utf-8 -*- # Generated by Django 1.9.6 on 2016-07-21 12:46 from __future__ import unicode_literals import datetime from django.db import migrations, models from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('loans', '0001_initial'), ] operations = [ migrations.RemoveField( model_name='loan', name='expected_end', ), migrations.AddField( model_name='loan', name='time', field=models.IntegerField(default=1), preserve_default=False, ), migrations.AlterField( model_name='loan', name='start', field=models.DateField(default=datetime.datetime(2016, 7, 21, 12, 46, 32, 256425, tzinfo=utc)), preserve_default=False, ), ]
#!/bin/env python3 # pylint: disable=bad-builtin,deprecated-lambda import string def is_ascii(s): return all(map(lambda c: ord(c) < 127, s)) def is_ascii_punctuation(s): return all(map(lambda c: c in string.punctuation, s)) def is_ascii_printable(s): return all(map(lambda c: c in string.printable, s)) def test(): assert is_ascii("1235") is True assert is_ascii("1235\xaa") is False assert is_ascii_punctuation(",.;") is True assert is_ascii_punctuation("foo") is False assert is_ascii_printable("foo") is True assert is_ascii_printable("foo\xaa") is False if __name__ == '__main__': test()
# Authors: Hyunwoo Lee <hyunwoo9301@naver.com> # Released under the MIT license. from api.common import * synonym_data_file = 'synonym.txt' synonym_data = read_data(synonym_data_file) synonym_dictionary = {} for row in synonym_data: for word in row[1].split(','): synonym_dictionary[word] = row[0] def transform_synonym(text): for word in synonym_dictionary.keys(): text = text.replace(word, synonym_dictionary[word]) return text
from typing import Union import tkinter as tk import tkinter.ttk as ttk from matplotlib.figure import Figure from matplotlib.backends.backend_tkagg import ( FigureCanvasTkAgg, NavigationToolbar2Tk ) # Implement the default Matplotlib key bindings. from matplotlib.backend_bases import key_press_handler def createPlot(master: Union[tk.Frame, ttk.Frame], fig: Figure, canvasMaster: Union[tk.Frame, ttk.Frame]=None): """Creates the plot canvas and connects the toolbar to it. You have to place the canvas and toolbar yourself via `.pack()` or `.grid()`. To place the canvas call `canvas.get_tk_widget().pack()` """ canvas = FigureCanvasTkAgg(fig, master=canvasMaster or master) canvas.draw() canvas.mpl_connect("key_press_event", key_press_handler) toolbar = NavigationToolbar2Tk(canvas, master, pack_toolbar=False) toolbar.update() return canvas, toolbar
from .typeo import spoof, typeo
from setuptools import setup setup( name='property-caching', version='1.1.0', description='Property caching', author='Yola', author_email='engineers@yola.com', license='MIT (Expat)', url='https://github.com/yola/property-caching', packages=['property_caching'], test_suite='tests', classifiers=[ 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: MIT License', ] )
#!/usr/bin/python from pygame import camera import pygame import time from HeadsUpDisplay import HUD, Locations def main_loop(): camera.init() cam = camera.Camera(camera.list_cameras()[0]) (width, height) = cam.get_size() screen = pygame.display.set_mode((width, height)) pygame.display.set_caption('Webcam Thing') pygame.init() cam.start() clock = pygame.time.Clock() hud = HUD((width, height)) countdown = -1 running = True while running: for event in pygame.event.get(): if event.type == pygame.QUIT: running = False break if event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: running = False break elif event.key == pygame.K_SPACE: countdown = int(time.time()) + 3 print 'set countdown to {t}'.format(t=countdown) now = time.time() cam_surf = cam.get_image() screen.blit(cam_surf, (0,0)) if countdown > 0: hud.blit_to_surface(screen) pygame.display.flip() if countdown > now: hud.set_text(Locations.TOPCENTER, str(countdown - int(time.time()))) elif countdown > -1: filename = 'images/webcam_{t}.png'.format(t=time.time()) print 'saving image "{f}"'.format(f=filename) countdown = -1 pygame.image.save(cam_surf, filename) clock.tick(30) cam.stop() pygame.quit() exit() if __name__ == '__main__': main_loop()
import label_wav import csv import os # from input_data import AudioProcessor # audio_processor = AudioProcessor(data_dir='~/projects/tensorflow_data/test.7z') def prepare_csv_file(csv_dir, csv_headers): # Delete csv file if it exists try: os.remove(csv_dir) except OSError: pass # Add CSV header with open(csv_dir, 'w') as csvfile: csvwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL) csvwriter.writerow(csv_headers) def label_test_files(data_dir, graph, labels, input_name, output_name, how_many_labels, csv_dir=None, csv_headers=None): test_outputs = [] # Loop through files in all the directories to get the audio files # for testing if csv_dir: prepare_csv_file(csv_dir, csv_headers) count = 0 for subdir, dirs, files in os.walk(data_dir): for file in files: # print(os.path.join(subdir, file)) filepath = subdir + os.sep + file if filepath.endswith(".wav"): count += 1 print(count, ". File: ", file) test_outputs.append( label_wav.label_wav( wav=filepath, labels=labels, graph=graph, input_name=input_name, output_name=output_name, how_many_labels=how_many_labels)) return test_outputs def print_to_csv(data, csv_dir): # print ("CSV Here") with open(csv_dir, 'a+') as csvfile: csvwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL) csvwriter.writerows(data) graph = '/tmp/my_frozen_graph.pb' labels = '/Users/wcyn/venv-projects/tensor-f-tut/tensorflow/tensorflow/examples/speech_commands/speech_labels.txt' input_name, output_name, how_many_labels = ('wav_data:0', 'labels_softmax:0', 3) # data_dir = '/Users/wcyn/Documents/other' data_dir = '/Users/wcyn/projects/tensorflow_data/test' csv_dir = '/Users/wcyn/projects/tensorflow_data/test_outputs.csv' csv_headers = ['fname','label'] print_to_csv(label_test_files(data_dir, graph, labels, input_name, output_name, how_many_labels), csv_dir) python tensorflow/examples/speech_commands/freeze.py \ --start_checkpoint=/Users/wcyn/projects/tensorflow_data/speech_commands_train/conv.ckpt-18000 \ --output_file=/Users/wcyn/projects/tensorflow_data/my_frozen_graph.pb
""" pipe_base.py Unify DriverTask and PipeTask with one abstract base class. """ # Using print as a function makes it easier to switch between printing # during development and using logging.{debug, info, ...} in production. from __future__ import print_function from abc import ABCMeta, abstractmethod import os import sys import shutil import getpass import subprocess import inspect import collections import luigi import six from six.moves import urllib import numpy as np import pandas as pd import disdat.common as common from disdat.fs import DisdatFS from disdat.data_context import DataContext from disdat.hyperframe import LineageRecord, HyperFrameRecord, FrameRecord import disdat.hyperframe_pb2 as hyperframe_pb2 from disdat import logger as _logger CodeVersion = collections.namedtuple('CodeVersion', 'semver hash tstamp branch url dirty') class PipeBase(object): __metaclass__ = ABCMeta BUNDLE_META = 'bundle_meta' BUNDLE_LINEAGE = 'bundle_lineage' HFRAME = 'hframe' FRAME = 'frame' AUTH = 'auth' @property def pfs(self): return DisdatFS() @abstractmethod def bundle_outputs(self): """ Given this pipe, return the set of bundles created by this pipe. Mirrors Luigi task.outputs() :param pipe_task: A PipeTask or a DriverTask (both implement PipeBase) :return: list of bundle names """ pass @abstractmethod def bundle_inputs(self): """ Given this pipe, return the set of bundles created by the input pipes. Mirrors Luigi task.inputs() :param pipe_task: A PipeTask or a DriverTask (both implement PipeBase) Returns [(bundle_name, uuid), ... ] """ pass @abstractmethod def pipe_id(self): """ Given a pipe instance, return a unique string based on the class name and the parameters. Bundle Tag: Used to fill in bundle.processing_name """ pass @abstractmethod def pipeline_id(self): """ This is a "less unique" id than the unique id. It is supposed to be the "human readable" name of the stage this pipe occupies in the pipesline. Bundle Tag: Used to fill in bundle.bundle_name """ pass @staticmethod def add_bundle_meta_files(pipe_task): """ Given a pipe or driver task, create the bundle metaoutput files and Luigi output targets for them. Use the pipe_task (or driver task) to get the name of the bundle. Use the name of the bundle to look up the output path in the pipe cache in the PipeFS class object. Create an hframe. The individual frame records have to be written out before hand. Args: pipe_task: The pipe task that will use these outputs Returns: [ luigi output for meta file, luigi output for lineage file ] """ pce = DisdatFS.get_path_cache(pipe_task) if pce is None: # This can happen when the pipe has been created with non-deterministic parameters _logger.error("add_bundle_meta_files: could not find pce for task {}".format(pipe_task.pipe_id())) _logger.error("It is possible one of your tasks is parameterized in a non-deterministic fashion.") raise Exception("add_bundle_meta_files: Unable to find pce for task {}".format(pipe_task.pipe_id())) hframe = {PipeBase.HFRAME: luigi.LocalTarget(os.path.join(pce.path, HyperFrameRecord.make_filename(pce.uuid)))} return hframe @staticmethod def make_hframe(output_frames, output_bundle_uuid, depends_on, human_name, processing_name, class_to_version, start_ts=0, stop_ts=0, tags=None, presentation=hyperframe_pb2.DEFAULT): """ Create HyperFrameRecord or HFR HFR contains a LineageRecord HFR contains UUIDs of FrameRecords or FRs FR contains data or LinkRecords Use the pipe_task to look in the path cache for the output directory Use the pipe_task outputs to find the named file for the final HF proto buf file. Write out all Frames, and at the very last moment, write out the HF proto buff. Args: output_frames (:list:`FrameRecord`): List of frames to be placed in bundle / hframe output_bundle_uuid: depends_on (:list:tuple): must be the processing_name, uuid of the upstream pipes / base bundles human_name: processing_name: class_to_version: A python class whose file is under git control start_ts (float): timestamp of task start time stop_ts (float): timestamp of task stop time tags: presentation (enum): how to present this hframe when we use it as input to a function -- default None That default means it will be a HF, but it wasn't a "presentable" hyperframe. Returns: `HyperFrameRecord` """ # Grab code version and path cache entry -- only called if we ran pipeline_path = os.path.dirname(sys.modules[class_to_version.__module__].__file__) cv = DisdatFS().get_pipe_version(pipeline_path) lr = LineageRecord(hframe_name=processing_name, hframe_uuid=output_bundle_uuid, code_repo=cv.url, code_name='unknown', code_semver=cv.semver, code_hash=cv.hash, code_branch=cv.branch, depends_on=depends_on, start_ts=start_ts, stop_ts=stop_ts) hfr = HyperFrameRecord(owner=getpass.getuser(), human_name=human_name, processing_name=processing_name, uuid=output_bundle_uuid, frames=output_frames, lin_obj=lr, tags=tags, presentation=presentation) return hfr @staticmethod def _interpret_scheme(full_path): scheme = urllib.parse.urlparse(full_path).scheme if scheme == '' or scheme == 'file': ''' LOCAL FILE ''' return luigi.LocalTarget(full_path) elif scheme == 's3': ''' S3 FILE ''' return luigi.s3.S3Target(full_path) assert False def make_luigi_targets_from_fqp(self, output_value): """ Given Fully Qualified Path -- Determine the Luigi objects This is called from the output of PipeExternalBundle. Given [], return [] of Luigi targets. If len([]) == 1, return without [] Args: output_value: Returns: """ if isinstance(output_value, list) or isinstance(output_value, tuple) or isinstance(output_value, dict): assert False else: # This is principally for PipesExternalBundle, in which there is no index. luigi_outputs = self._interpret_scheme(output_value) print("OUTPUT VAL {} output {}".format(output_value, luigi_outputs)) return luigi_outputs @staticmethod def filename_to_luigi_targets(output_dir, output_value): """ Create Luigi file objects from a file name, dictionary of file names, or list of file names. Return the same object type as output_value, but with Luigi.Targets instead. Args: output_dir (str): Managed output path. output_value (str, dict, list): A basename, dictionary of basenames, or list of basenames. Return: (`luigi.LocalTarget`, `luigi.S3Target`): Singleton, list, or dictionary of Luigi Target objects. """ if isinstance(output_value, list) or isinstance(output_value, tuple): luigi_outputs = [] for i in output_value: full_path = os.path.join(output_dir, i) luigi_outputs.append(PipeBase._interpret_scheme(full_path)) if len(luigi_outputs) == 1: luigi_outputs = luigi_outputs[0] elif isinstance(output_value, dict): luigi_outputs = {} for k, v in output_value.items(): full_path = os.path.join(output_dir, v) luigi_outputs[k] = PipeBase._interpret_scheme(full_path) else: full_path = os.path.join(output_dir, output_value) luigi_outputs = PipeBase._interpret_scheme(full_path) return luigi_outputs def make_luigi_targets_from_basename(self, output_value): """ Determine the output paths AND create the Luigi objects. Return the same object type as output_value, but with Luigi.Targets instead. Note that we get the path from the DisdatFS Path Cache. The path cache is a dictionary from pipe.unique_id() to a path_cache_entry, which contains the fields: instance uuid path rerun Args: output_value (str, dict, list): A basename, dictionary of basenames, or list of basenames. Return: (`luigi.LocalTarget`, `luigi.S3Target`): Singleton, list, or dictionary of Luigi Target objects. """ # Find the path cache entry for this pipe to find its output path pce = self.pfs.get_path_cache(self) assert(pce is not None) return self.filename_to_luigi_targets(pce.path, output_value) @staticmethod def rm_bundle_dir(output_path, uuid, db_targets): """ We created a directory (managed path) to hold the bundle and any files. The files have been copied in. Removing the directory removes any created files. If the user has told us about any DBTargets, also call rm() on those. TODO: Integrate with data_context bundle remove. That deals with information already stored in the local DB. ASSUMES: That we haven't actually updated the local DB with information on this bundle. Args: output_path (str): uuid (str): db_targets (list(DBTarget)): Returns: None """ try: shutil.rmtree(output_path) # if people create s3 files, s3 file targets, inside of an s3 context, # then we will have to clean those up as well. for t in db_targets: t.rm() except IOError as why: _logger.error("Removal of hyperframe directory {} failed with error {}. Continuing removal...".format( uuid, why)) @staticmethod def parse_return_val(hfid, val, data_context): """ Interpret the return values and create an HFrame to wrap them. This means setting the correct presentation bit in the HFrame so that we call downstream tasks with parameters as the author intended. POLICY / NOTE: An non-HF output is a Presentable. NOTE: For now, a task output is *always* presentable. NOTE: No other code should set presentation in a HyperFrame. The mirror to this function (that unpacks a presentable is disdat.fs.present_hfr() Args: hfid (str): UUID val (object): A scalar, dict, tuple, list, dataframe data_context (DataContext): The data context into which to place this value Returns: (presentation, frames[]) """ possible_scalar_types = ( int, float, str, bool, np.bool_, np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64, np.float16, np.float32, np.float64, six.binary_type, six.text_type, np.unicode_, np.string_ ) frames = [] managed_path = os.path.join(data_context.get_object_dir(), hfid) if val is None: presentation = hyperframe_pb2.HF elif isinstance(val, HyperFrameRecord): presentation = hyperframe_pb2.HF frames.append(FrameRecord.make_hframe_frame(hfid, pipe.pipeline_id(), [val])) elif isinstance(val, np.ndarray) or isinstance(val, list): presentation = hyperframe_pb2.TENSOR if isinstance(val, list): val = np.array(val) frames.append(DataContext.convert_serieslike2frame(hfid, common.DEFAULT_FRAME_NAME + ':0', val, managed_path)) elif isinstance(val, tuple): presentation = hyperframe_pb2.ROW for i, _ in enumerate(val): frames.append(DataContext.convert_serieslike2frame(hfid, common.DEFAULT_FRAME_NAME + ':{}'.format(i), val, managed_path)) elif isinstance(val, dict): presentation = hyperframe_pb2.ROW for k, v in val.items(): if not isinstance(v, (list, tuple, pd.core.series.Series, np.ndarray, collections.Sequence)): # assuming this is a scalar assert isinstance(v, possible_scalar_types), 'Disdat requires dictionary values to be one of {} not {}'.format(possible_scalar_types, type(v)) frames.append(DataContext.convert_scalar2frame(hfid, k, v, managed_path)) else: assert isinstance(v, (list, tuple, pd.core.series.Series, np.ndarray, collections.Sequence)) frames.append(DataContext.convert_serieslike2frame(hfid, k, v, managed_path)) elif isinstance(val, pd.DataFrame): presentation = hyperframe_pb2.DF frames.extend(DataContext.convert_df2frames(hfid, val, managed_path)) else: presentation = hyperframe_pb2.SCALAR frames.append(DataContext.convert_scalar2frame(hfid, common.DEFAULT_FRAME_NAME + ':0', val, managed_path)) return presentation, frames
# encoding: utf-8 """ Some functions that cannot be in pdf.py file to prevent import loop. """ from cStringIO import StringIO import logging from tempfile import NamedTemporaryFile from django.conf import settings from django.core.files import File from membership.billing import pdf logger = logging.getLogger("membership.billing.pdf") def create_reminder_pdf(cycles, output_file, payments=None): """ Generate reminder pdf with billing cycles `cycles` to file `output_file` :param cycles: list of billingcycles :param output_file: File-like object :return: None """ p = pdf.PDFReminder(output_file) try: p.addCycles(cycles, payments=payments) p.generate() return None except Exception as e: logger.exception(e) logger.error("Failed to generate reminder pdf") raise def get_bill_pdf(bill, payments=None): """ Get from pdf_file field or generate pdf for Bill :param bill: Bill :return: pdf file content """ if not bill.pdf_file: buffer = StringIO() if bill.is_reminder(): # This is reminder p = pdf.PDFReminder(buffer) else: p = pdf.PDFInvoice(buffer) p.addBill(bill, payments=payments) p.generate() buffer.seek(0) myfile = File(buffer) bill.pdf_file.save('bill_%d.pdf' % bill.id, myfile) buffer.close() return bill.pdf_file.read()
#!/usr/bin/python # -*- coding: utf-8 -*- # vim: tabstop=4 shiftwidth=4 softtabstop=4 # /******************************************************* # * Copyright (C) 2013-2014 CloudRunner.io <info@cloudrunner.io> # * # * Proprietary and confidential # * This file is part of CloudRunner Server. # * # * CloudRunner Server can not be copied and/or distributed # * without the express permission of CloudRunner.io # *******************************************************/ import json import logging from pecan import expose, request, conf import threading from cloudrunner_server.api.decorators import wrap_command from cloudrunner_server.api.util import JsonOutput as O from cloudrunner_server.api.policy.decorators import check_policy from cloudrunner_server.api.model.nodes import (Node, NodeGroup, Org, Reservation) from cloudrunner_server.master.functions import CertController LOG = logging.getLogger() def _serialize(nodes): return [node.name for node in nodes] class Nodes(object): @expose('json', generic=True) @check_policy('is_admin') @wrap_command(Node, model_name='Node') def nodes(self, name=None, **kwargs): if name: node = Node.visible(request).filter(Node.name == name).first() return O.node(node.serialize(skip=['id', 'org_id'])) else: nodes = sorted(Node.visible(request).all(), key=lambda n: n.name) groups = sorted(NodeGroup.visible(request).all(), key=lambda g: g.name) return O._anon(nodes=[n.serialize( skip=['id', 'org_id'], rel=[('meta', 'meta', lambda n: json.loads(n) if n else {}), ('tags', 'tags', lambda lst: [x.value for x in lst] if lst else [])]) for n in nodes], groups=[g.serialize( skip=['id', 'org_id'], rel=[('nodes', 'members', _serialize)] ) for g in groups], quota=dict(allowed=request.user.tier.nodes)) @nodes.when(method='POST', template='json') @check_policy('is_admin') @nodes.wrap_create() def register(self, node=None, **kwargs): return O.error(msg="Not implemented") node = node or kwargs['node'] org = request.db.query(Org).filter( Org.name == request.user.org).one() n = Node(name=node, org=org) r = Reservation(node=n, username=kwargs['username'], password=kwargs['password'], ssh_pubkey=kwargs['ssh_pubkey'], disable_pass=kwargs.get('disable_pass') in ['1', 'true', 'True']) request.db.add(n) request.db.add(r) request.db.commit() t = threading.Thread(target=node_registration, args=n.serialize( rel=['reservations', 'reservations'])) t.start() return O.success(msg="Node registration started") @nodes.when(method='PUT', template='json') @check_policy('is_admin') @nodes.wrap_modify() def approve(self, node=None, **kwargs): node = node or kwargs['node'] n = Node.visible(request).filter(Node.name == node, Node.approved == False).first() # noqa if not n: return O.error(msg="Node not found") cert = CertController(conf.cr_config) msg, crt_file = cert.sign_node(n.name, ca=request.user.org) if not crt_file: LOG.error(msg) return O.error(msg="Cannot sign node") @nodes.when(method='DELETE', template='json') @check_policy('is_admin') @nodes.wrap_modify() def revoke(self, node): n = Node.visible(request).filter(Node.name == node).first() if not n: return O.error(msg="Node not found") cert = CertController(conf.cr_config) if n.approved: [m[1] for m in cert.revoke(n.name, ca=request.user.org)] request.db.delete(n) else: [m[1] for m in cert.clear_req(n.name, ca=request.user.org)] request.db.delete(n) @expose('json', generic=True) @check_policy('is_admin') @wrap_command(NodeGroup, model_name='Group') def nodegroups(self, name=None, **kwargs): if name: group = NodeGroup.visible(request).filter( NodeGroup.name == name).first() return O.group(group.serialize(skip=['id', 'org_id'])) else: groups = NodeGroup.visible(request).all() return O.groups(_list=[g.serialize( skip=['id', 'org_id'], rel=[('nodes', 'members', _serialize)] ) for g in groups]) @nodegroups.when(method='POST', template='json') @check_policy('is_admin') @nodegroups.wrap_modify() @wrap_command(NodeGroup, model_name='Group') def groups_create(self, name=None, **kwargs): if not name: return O.error(msg="Name not provided") org = request.db.query(Org).filter( Org.name == request.user.org).one() group = NodeGroup(name=name, org=org) request.db.add(group) @nodegroups.when(method='PATCH', template='json') @check_policy('is_admin') @nodegroups.wrap_modify() @wrap_command(NodeGroup, model_name='Group') def groups_modify(self, name=None, **kwargs): group = NodeGroup.visible(request).filter( NodeGroup.name == name).first() if not group: return O.error(msg="Group not found") nodes = request.POST.getall('nodes') if nodes: to_remove = [n for n in group.nodes if n.name not in nodes] for n in to_remove: group.nodes.remove(n) new_nodes = Node.visible(request).filter( Node.name.in_(nodes)).all() for n in new_nodes: group.nodes.append(n) else: group.nodes[:] = [] request.db.add(group) @nodegroups.when(method='DELETE', template='json') @check_policy('is_admin') @nodegroups.wrap_delete() @wrap_command(NodeGroup, model_name='Group') def groups_delete(self, name=None, **kwargs): if not name: return O.error(msg="Name not provided") group = NodeGroup.visible(request).filter( NodeGroup.name == name).first() if not group: return O.error(msg="Group not found") request.db.delete(group) def node_registration(data): print data
""" XRay Image Container """ import os.path class Image: #pylint: disable=too-few-public-methods """ Image file data """ def __init__(self, **kwargs): self.filename = kwargs.get('filename') abspath_file = os.path.abspath(self.filename) self.path, _imagename = os.path.split(abspath_file) self.imagename, self.extension = os.path.splitext(_imagename) if self.extension[0] == '.': self.extension = self.extension[1:] def __str__(self): return f"Image: {self.imagename} - Format: {self.extension}"
class Node: def __init__(self, data): self.data = data self.right_child = None self.left_child = None
from utils import train, Pars, create_image, create_outputfolder, init_textfile, vector_quantize, clamp_with_grad, replace_grad from dall_e import map_pixels, unmap_pixels, load_model from stylegan import g_synthesis from biggan import BigGAN from tqdm import tqdm from omegaconf import OmegaConf import sys sys.path.append("/") import create_video import tempfile import argparse import torch # import clip from CLIP import clip import glob import os import math from torch.nn import functional as F sys.path.append('/taming-transformers') # sys.path.append('./taming-transformers') from taming.models import cond_transformer, vqgan # Argsparse for commandline options parser = argparse.ArgumentParser(description='BigGan_Clip') parser.add_argument('--epochs', default = 100, type = int, help ='Number of Epochs') parser.add_argument('--generator', default = 'biggan', type = str, choices = ['biggan', 'dall-e', 'stylegan', 'vqgan'], help = 'Choose what type of generator you would like to use BigGan or Dall-E') parser.add_argument('--textfile', type = str, required= True, help ='Path for the text file') parser.add_argument('--audiofile', default = None, type = str, required= True, help ='Path for the mp3 file') parser.add_argument('--lyrics', default = True, type = bool, help ='Include lyrics') parser.add_argument('--interpolation', default = 10, type = int, help ='Number of elements to be interpolated per second and feed to the model') args = parser.parse_args() epochs = args.epochs generator = args.generator textfile = args.textfile audiofile = args.audiofile interpol = args.interpolation lyrics = args.lyrics sideX = 512 sideY = 512 def load_vqgan_model(config_path, checkpoint_path): config = OmegaConf.load(config_path) if config.model.target == 'taming.models.vqgan.VQModel': model = vqgan.VQModel(**config.model.params) model.eval().requires_grad_(False) model.init_from_ckpt(checkpoint_path) elif config.model.target == 'taming.models.cond_transformer.Net2NetTransformer': parent_model = cond_transformer.Net2NetTransformer(**config.model.params) parent_model.eval().requires_grad_(False) parent_model.init_from_ckpt(checkpoint_path) model = parent_model.first_stage_model else: raise ValueError(f'unknown model type: {config.model.target}') del model.loss return model def main(): # Automatically creates 'output' folder create_outputfolder() # Initialize Clip # perceptor, preprocess = clip.load('ViT-B/32') perceptor, preprocess = clip.load('ViT-B/32', jit=False) perceptor = perceptor.eval() # Load the model if generator == 'biggan': model = BigGAN.from_pretrained('biggan-deep-512') model = model.cuda().eval() elif generator == 'dall-e': model = load_model("decoder.pkl", 'cuda') elif generator == 'stylegan': model = g_synthesis.eval().cuda() elif generator == 'vqgan': model = load_vqgan_model( 'vqgan_imagenet_f16_1024.yaml', 'vqgan_imagenet_f16_1024.ckpt').cuda() # Read the textfile # descs - list to append the Description and Timestamps descs = init_textfile(textfile) # list of temporary PTFiles templist = [] # Loop over the description list for d in tqdm(descs): timestamp = d[0] line = d[1] # stamps_descs_list.append((timestamp, line)) lats = Pars(gen=generator, model=model).cuda() # Init Generator's latents if generator == 'biggan': par = lats.parameters() lr = 0.1#.07 elif generator == 'stylegan': par = [lats.normu] lr = .01 elif generator == 'dall-e': par = [lats.normu] lr = .1 elif generator == 'vqgan': par = [lats.normu] lr = 0.05 # Init optimizer optimizer = torch.optim.Adam(par, lr) # tokenize the current description with clip and encode the text txt = clip.tokenize(line + ", unreal engine") percep = perceptor.encode_text(txt.cuda()).detach().clone() # Training Loop for i in range(epochs): zs = train(i, model, lats, sideX, sideY, perceptor, percep, optimizer, line, txt, epochs=epochs, gen=generator) # save each line's last latent to a torch file temporarily latent_temp = tempfile.NamedTemporaryFile() torch.save(zs, latent_temp) #f'./output/pt_folder/{line}.pt') latent_temp.seek(0) #append it to templist so it can be accessed later templist.append(latent_temp) return templist, descs, model def sigmoid(x): x = x * 2. - 1. return math.tanh(1.5*x/(math.sqrt(1.- math.pow(x, 2.)) + 1e-6)) / 2 + .5 def interpolate(templist, descs, model, audiofile): video_temp_list = [] # interpole elements between each image for idx1, pt in enumerate(descs): # get the next index of the descs list, # if it z1_idx is out of range, break the loop z1_idx = idx1 + 1 if z1_idx >= len(descs): break current_lyric = pt[1] # get the interval betwee 2 lines/elements in seconds `ttime` d1 = pt[0] d2 = descs[z1_idx][0] ttime = d2 - d1 # if it is the very first index, load the first pt temp file # if not assign the previous pt file (z1) to zs variable if idx1 == 0: zs = torch.load(templist[idx1]) else: zs = z1 # compute for the number of elements to be insert between the 2 elements N = round(ttime * interpol) print(z1_idx) # the codes below determine if the output is list (for biggan) # if not insert it into a list if not isinstance(zs, list): z0 = [zs] z1 = [torch.load(templist[z1_idx])] else: z0 = zs z1 = torch.load(templist[z1_idx]) # loop over the range of elements and generate the images image_temp_list = [] for t in range(N): azs = [] for r in zip(z0, z1): z_diff = r[1] - r[0] inter_zs = r[0] + sigmoid(t / (N-1)) * z_diff azs.append(inter_zs) # Generate image with torch.no_grad(): if generator == 'biggan': img = model(azs[0], azs[1], 1).cpu().numpy() img = img[0] elif generator == 'dall-e': img = unmap_pixels(torch.sigmoid(model(azs[0])[:, :3]).cpu().float()).numpy() img = img[0] elif generator == 'stylegan': img = model(azs[0]) elif generator == 'vqgan': z_q = vector_quantize(azs[0].movedim(1, 3), model.quantize.embedding.weight).movedim(3, 1) img = clamp_with_grad(model.decode(z_q).add(1).div(2), 0, 1).cpu() img = img[0] # img = synth(azs[0]) image_temp = create_image(img, t, current_lyric, generator) image_temp_list.append(image_temp) video_temp = create_video.createvid(f'{current_lyric}', image_temp_list, duration=ttime / N) video_temp_list.append(video_temp) # Finally create the final output and save to output folder create_video.concatvids(descs, video_temp_list, audiofile, lyrics=lyrics) if __name__ == '__main__': templist, descs, model = main() interpolate(templist, descs, model, audiofile)
# Foremast - Pipeline Tooling # # Copyright 2016 Gogo, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Lookup AMI ID from a simple name.""" import json import logging from base64 import b64decode import gitlab import requests from ..consts import GIT_URL, GITLAB_TOKEN, AMI_JSON_URL LOG = logging.getLogger(__name__) def ami_lookup(region='us-east-1', name='tomcat8'): """Use _name_ to find AMI ID. If no ami_base_url or gitlab_token is provided, _name_ is returned as the ami id Args: region (str): AWS Region to find AMI ID. name (str): Simple AMI base name to lookup. Returns: str: AMI ID for _name_ in _region_. """ if AMI_JSON_URL: LOG.info("Getting AMI from %s", AMI_JSON_URL) response = requests.get(AMI_JSON_URL) assert response.ok, "Error getting ami info from {}".format( AMI_JSON_URL) ami_dict = response.json() LOG.debug('Lookup AMI table: %s', ami_dict) ami_id = ami_dict[region][name] elif GITLAB_TOKEN: # TODO: Remove GitLab repository in favour of JSON URL option. LOG.info("Getting AMI from Gitlab") server = gitlab.Gitlab(GIT_URL, token=GITLAB_TOKEN) project_id = server.getproject('devops/ansible')['id'] ami_blob = server.getfile(project_id, 'scripts/{0}.json'.format(region), 'master') ami_contents = b64decode(ami_blob['content']).decode() ami_dict = json.loads(ami_contents) LOG.debug('Lookup AMI table: %s', ami_dict) ami_id = ami_dict[name] else: ami_id = name LOG.info('Using AMI: %s', ami_id) return ami_id
#!/usr/bin/python3 # -*- coding: utf-8 -*- __author__ = 'Richard J. Sears' VERSION = "0.98 (2021-10-15)" # This script is part of my plot management set of tools. This # script is used to move plots from one location to another on # the same physical machine. # # If you are using a drive as a temp drive that falls within your # scope of drives used for drive_manager.py, it is recommended that # you offline that drive `./drive_manager -off drive43` to prevent # drive_manager.py from placing plots on that drive. import os import logging from system_logging import setup_logging import shutil from timeit import default_timer as timer from drive_manager import notify, check_space_available, get_all_available_system_space, get_internal_plot_drive_to_use, get_drive_info import subprocess import pathlib from drivemanager_classes import DriveManager, config_file, PlotManager chianas = DriveManager.read_configs() chiaplots = PlotManager.get_plot_info() # Some Housekeeping script_path = pathlib.Path(__file__).parent.resolve() # Are we testing? testing = False if testing: plot_dir = script_path.joinpath('test_plots/') plot_size = 10000000 status_file = script_path.joinpath('local_transfer_job_running_testing') drive_check = script_path.joinpath('drive_stats.sh') drive_check_output = script_path.joinpath('drive_stats.io') else: plot_dir = chianas.dst_dirs[0] # Where do you hold your plots before they are moved? plot_size = 108644374730 # Based on K32 plot size status_file = script_path.joinpath('local_transfer_job_running') drive_check = script_path.joinpath('drive_stats.sh') drive_check_output = script_path.joinpath('drive_stats.io') def are_we_configured(): #Check to see if we are configured and if there are any existing "move" errors: if not chianas.configured: log.debug('We have not been configured! Please edit the main config file') log.debug(f'{config_file} and try again!') exit() else: pass # Setup Module logging. Main logging is configured in system_logging.py setup_logging() level = logging._checkLevel(chianas.log_level) log = logging.getLogger('move_local_plots.py') log.setLevel(level) # Let's Get Started # Look in our plot directory and get a list of plots. Do a basic # size check for sanity's sake. def get_list_of_plots(): log.debug('get_list_of_plots() Started') try: plot_to_process = [plot for plot in pathlib.Path(plot_dir).glob("*.plot") if plot.stat().st_size > plot_size] log.debug(f'This is the next plot to process: {plot_to_process[0].name}') return (plot_to_process[0].name) except IndexError: log.debug(f'{plot_dir} is Empty: No Plots to Process. Will check again soon!') return False def update_move_local_plot(): """ This function just keeps our local plot moves off the same drive as our remote plot moves so we don't saturate a single drive with multiple inbound plots. Updated in 0.93.1 to support old style plot replacement as well as filling_local_drives_first. """ log.debug("update_move_local_plot() Started") internal_plot_drive_to_use = get_internal_plot_drive_to_use()[0] if internal_plot_drive_to_use == '/': internal_plot_drive_to_use = get_internal_plot_drive_to_use() if not chianas.replace_non_pool_plots: # If we are not replacing old plots with new portable plots, run the following code log.debug('Replace Plots has NOT been set in config, will build update_move_local_plot script for normal operation.') try: if chianas.current_internal_drive == get_internal_plot_drive_to_use()[0]: log.debug(f'Currently Configured Internal Plot Drive: {chianas.current_internal_drive}') log.debug(f'System Selected Internal Plot Drive: {internal_plot_drive_to_use}') log.debug('Configured and Selected Drives Match!') log.debug(f'No changes necessary to Internal Plotting Drive') log.debug( f'Plots left available on configured Internal plotting drive: {get_drive_info("space_free_plots_by_mountpoint", chianas.current_internal_drive)}') else: notify('Internal Plot Drive Updated', f'Internal Plot Drive Updated: Was: {chianas.current_internal_drive}, Now: {internal_plot_drive_to_use}') chianas.update_current_internal_drive(internal_plot_drive_to_use) log.info(f'Updated Internal Plot Drive, Was: {chianas.current_internal_drive}, Now: {internal_plot_drive_to_use}') except TypeError: log.debug ('No Additional Drives found to be used as internal plot drives!') log.debug('Please add additional drive manually or via auto_drive.py and try again!') else: log.debug('Replace Plots Set, will build update_move_local_plot script for plot replacement!') log.debug('Checking to see if we need to fill empty drives first......') if chianas.fill_empty_drives_first: log.debug('fill_empty_drives_first flag is set. Checking for empty drive space.....') if (get_all_available_system_space("free")[1]) > chianas.empty_drives_low_water_mark: log.debug('Found Empty Drive Space!') log.debug(f'Low Water Mark: {chianas.empty_drives_low_water_mark} and we have {get_all_available_system_space("free")[1]} available') drive = internal_plot_drive_to_use try: if chianas.current_internal_drive == drive: log.debug(f'Currently Configured Internal Plot Drive: {chianas.current_internal_drive}') log.debug(f'System Selected Internal Plot Drive: {drive}') log.debug('Configured and Selected Drives Match!') log.debug(f'No changes necessary to Internal Plotting Drive') log.debug(f'Plots left available on configured Internal plotting drive: {get_drive_info("space_free_plots_by_mountpoint", chianas.current_internal_drive)}') else: notify('Internal Plot Drive Updated', f'Internal Plot Drive Updated: Was: {chianas.current_internal_drive}, Now: {drive}') chianas.update_current_internal_drive(drive) log.info(f'Updated Internal Plot Drive, Was: {chianas.current_internal_drive}, Now: {drive}') except TypeError: log.debug('No Additional Drives found to be used as internal plot drives!') log.debug('We will now default to replacing old style plots!') log.debug('Checking to see if we have any old plots to replace.....') try: if chiaplots.plots_to_replace: log.debug(f'We found [{chiaplots.number_of_old_plots}] to replace. Continuing....') drive = chiaplots.local_plot_drive # Get the drive where we want to store local plots. This is reverse sorted from external plots coming in. if chianas.current_internal_drive == drive: log.debug(f'Currently Configured Internal Plot Drive: {chianas.current_internal_drive}') log.debug(f'System Selected Internal Plot Drive: {drive}') log.debug('Configured and Selected Drives Match!') log.debug(f'No changes necessary to Internal Plotting Drive') else: notify('Internal Plot Drive Updated', f'Internal Plot Drive Updated: Was: {chianas.current_internal_drive}, Now: {drive}') chianas.update_current_internal_drive(drive) log.info( f'Updated Internal Plot Drive, Was: {chianas.current_internal_drive}, Now: {drive}') else: log.debug('No old plots found, nothing left to do!') except TypeError: log.debug('No Additional Drives found that have old plots!') else: log.debug('fill_empty_drives_first flag is set, but we have no available free drive space....Defaulting to REPLACE PLOTS!') log.debug(f'Low Water Mark: {chianas.empty_drives_low_water_mark} and we have {get_all_available_system_space("free")[1]} available') log.debug('Checking to see if we have any old plots to replace.....') if chiaplots.plots_to_replace: log.debug(f'We found [{chiaplots.number_of_old_plots}] to replace. Continuing....') drive = chiaplots.local_plot_drive # Get the drive where we want to store local plots. This is reverse sorted from external plots coming in. try: if chianas.current_internal_drive == drive: log.debug(f'Currently Configured Internal Plot Drive: {chianas.current_internal_drive}') log.debug(f'System Selected Internal Plot Drive: {drive}') log.debug('Configured and Selected Drives Match!') log.debug(f'No changes necessary to Internal Plotting Drive') else: notify('Internal Plot Drive Updated', f'Internal Plot Drive Updated: Was: {chianas.current_internal_drive}, Now: {drive}') chianas.update_current_internal_drive(drive) log.info(f'Updated Internal Plot Drive, Was: {chianas.current_internal_drive}, Now: {drive}') except TypeError: log.debug('No Additional Drives found that have old plots!') else: log.debug('No old plots found, nothing left to do!') else: log.debug('fill_empty_drives_first flag NOT set, continuing....') log.debug('Checking to see if we have any old plots to replace.....') try: if chiaplots.plots_to_replace: log.debug(f'We found [{chiaplots.number_of_old_plots}] to replace. Continuing....') drive = chiaplots.local_plot_drive # Get the drive where we want to store local plots. This is reverse sorted from external plots coming in. if chianas.current_internal_drive == drive: log.debug(f'Currently Configured Internal Plot Drive: {chianas.current_internal_drive}') log.debug(f'System Selected Internal Plot Drive: {drive}') log.debug('Configured and Selected Drives Match!') log.debug(f'No changes necessary to Internal Plotting Drive') else: notify('Internal Plot Drive Updated', f'Internal Plot Drive Updated: Was: {chianas.current_internal_drive}, Now: {drive}') chianas.update_current_internal_drive(drive) log.info(f'Updated Internal Plot Drive, Was: {chianas.current_internal_drive}, Now: {drive}') else: log.debug('No old plots found, nothing left to do!') except TypeError: log.debug('No Additional Drives found that have old plots!') # If we have plots and we are NOT currently transferring another plot and # we are NOT testing the script, then process the next plot if there is # one to process. def process_plot(): if plot_dir == 'not_set': log.debug('You need to set the Directory where your local plots are located!') log.debug('Please set "plot_dir" to the correct mount point and try again!') log.debug(f'Edit {config_file} to update this setting.') exit() # Nothing left to do, we're outta here! else: log.debug('process_plot() Started') if not process_control('check_status', 0): plot_to_process = get_list_of_plots() if plot_to_process and not testing: plot_source = plot_dir + '/' + plot_to_process if chianas.pools: plot_destination = chianas.current_internal_drive + '/' + 'portable.' + plot_to_process else: plot_destination = chianas.current_internal_drive + '/' + plot_to_process process_control('set_status', 'start') log.info(f'Processing Plot: {plot_source}') log.debug(f'Current Internal Plotting Drive is: {chianas.current_internal_drive}') if check_space_available(chianas.current_internal_drive): log.debug(f'Starting Copy of {plot_source} to {plot_destination}') start_time = timer() try: shutil.copy2(plot_source, plot_destination) except: log.debug(f'ERROR: There was a problem copying: {plot_dir}!') chianas.set_local_move_error() if not chianas.local_move_error_alert_sent: notify('LOCAL MOVE ERROR', 'Local Move Error Encountered, You MUST manually reset error or no more local plots will get moved! Also reset Alert Sent to rearm this alert!') log.debug('Local Move Error Alert Sent') chianas.toggle_alert_sent('local_move_error_alert_sent') else: log.debug('Local Move Error Alert Already Sent - Not Resending') exit() end_time = timer() if verify_plot_move(plot_source, plot_destination): log.info('Plot Sizes Match, we have a good plot move!') log.info(f'Total Elapsed Time: {end_time - start_time:.2f} seconds or {(end_time - start_time)/60:.2f} Minutes') else: log.debug('FAILURE - Plot sizes DO NOT Match') process_control('set_status', 'stop') #Set to stop so it will attempt to run again in the event we want to retry.... main() # Try Again - no need to do anything with the file, shutil.copy2 will overwrite an existing file. process_control('set_status', 'stop') os.remove(plot_source) log.info(f'Removing: {plot_source}') else: if chianas.replace_non_pool_plots: # Double verify we want to remove non-pool plots before doing anything..... if chiaplots.plots_to_replace: log.debug('No available plot space left, we need to remove an old plot...') log.debug(f'Replace non-pool plots has been set....') log.debug(f'We have {chiaplots.number_of_old_plots} plots to replace.') log.debug(f'We will remove this plot first: {chiaplots.next_local_plot_to_replace}') log.debug(f'The next inbound plot will be saved here: {chiaplots.local_plot_drive}') log.debug(f'We currently have {chiaplots.number_of_portable_plots} portable plots on the system.') os.remove(chiaplots.next_local_plot_to_replace) if not os.path.isfile(chiaplots.next_local_plot_to_replace): print('Old Plot has been removed, making room for new Portable Plot! Continuing..... ') log.debug(f'Starting Copy of {plot_source} to {plot_destination}') start_time = timer() try: shutil.copy2(plot_source, plot_destination) except: log.debug(f'ERROR: There was a problem copying: {plot_dir}!') chianas.set_local_move_error() if not chianas.local_move_error_alert_sent: notify('LOCAL MOVE ERROR', 'Local Move Error Encountered, You MUST manually reset error or no more local plots will get moved! Also reset Alert Sent to rearm this alert!') log.debug('Local Move Error Alert Sent') chianas.toggle_alert_sent('local_move_error_alert_sent') else: log.debug('Local Move Error Alert Already Sent - Not Resending') exit() end_time = timer() if verify_plot_move(plot_source, plot_destination): log.info('Plot Sizes Match, we have a good plot move!') log.info( f'Total Elapsed Time: {end_time - start_time:.2f} seconds or {(end_time - start_time) / 60:.2f} Minutes') else: log.debug('FAILURE - Plot sizes DO NOT Match') process_control('set_status', 'stop') # Set to stop so it will attempt to run again in the event we want to retry.... main() # Try Again - no need to do anything with the file, shutil.copy2 will overwrite an existing file. process_control('set_status', 'stop') os.remove(plot_source) log.info(f'Removing: {plot_source}') else: log.debug('ERROR: Plot Still Exists!! EXITING') raise Exception else: log.debug('No more old plots to replace. Quitting!') exit() else: log.debug('We are out of space and replace_non_pool_plots is NOT set, I cannot do anything more.....') exit() elif testing: log.debug('Testing Only - Nothing will be Done!') else: return else: return def verify_plot_move(plot_source, plot_destination): log.debug('verify_plot_move() Started') log.debug (f'Verifing: {plot_source}') original_plot_size = os.path.getsize(plot_source) copied_plot_size = os.path.getsize(plot_destination) log.debug(f'Original Plot Size Reported as: {original_plot_size}') log.debug(f'Copied Plot Size Reported as: {copied_plot_size}') if original_plot_size == copied_plot_size: return True else: log.debug(f'Plot Size Mismatch!') return False def process_control(command, action): log.debug(f'process_control() called with [{command}] and [{action}]') if command == 'set_status': if action == "start": if os.path.isfile(status_file): log.debug(f'Status File: [{status_file}] already exists!') return else: os.open(status_file, os.O_CREAT) if action == "stop": if os.path.isfile(status_file): os.remove(status_file) else: log.debug(f'Status File: [{status_file}] does not exist!') return elif command == 'check_status': drive_io = check_drive_activity() if os.path.isfile(status_file) and drive_io: log.debug(f'Checkfile Exists and Disk I/O is present, We are currently Copying a Plot, Exiting') return True elif os.path.isfile(status_file) and not drive_io: log.debug('WARNING! - Checkfile exists but there is no Disk I/O! Forcing Reset') os.remove(status_file) return False else: log.debug(f'Checkfile Does Not Exist!') return False else: return def check_drive_activity(): """ Here we are checking drive activity on the drive we are moving plots to internally. If there is drive activity, then we are most likely moving a plot to that drive and do not want to 'double up' on moves. """ log.debug('check_drive_activity() called') try: subprocess.call([drive_check]) except subprocess.CalledProcessError as e: log.warning(e.output) with open(drive_check_output, 'rb') as f: f.seek(-2, os.SEEK_END) while f.read(1) != b'\n': f.seek(-2, os.SEEK_CUR) last_line = f.readline().decode() log.debug(last_line) if (str.split(last_line)[1]) == 'Time': log.debug('No Drive Activity detected') return False else: return True def main(): log.debug(f'Welcome to move_local_plots.py: Version {VERSION}') if chianas.local_move_error: log.debug('LOCAL MOVE ERROR Flag has been set, unable to continue!') log.debug('Determine nature of error and set local_move_error to false. Also') log.debug('reset Alert Sent notification to reenable this alert. These settings') log.debug(f'are located in your config file: {config_file}') if not chianas.local_move_error_alert_sent: #Verify that alert has been sent and send it if it has not notify('LOCAL MOVE ERROR', 'Local Move Error Encountered, You MUST manually reset error or no more local plots will get moved! Also reset Alert Sent to rearm this alert!') log.debug('Local Move Error Alert Sent') chianas.toggle_alert_sent('local_move_error_alert_sent') else: log.debug('Local Move Error Alert Already Sent - Not Resending') exit() # Nothing left to do, we're outta here! if chianas.local_plotter: are_we_configured() update_move_local_plot() process_plot() else: log.debug(f'Whoops! Local Plotting has not been configured in {config_file}. Quitting') exit() if __name__ == '__main__': main()
from flask import Blueprint bp = Blueprint("Users",__name__) from app.users import routes
import json import csv import secrets import string from subprocess import check_output, call fieldnames = ("Azure AD Group","First Name","Last Name","Extension","Voice DID","Fax DID","Caller ID","ID for MS Exchange","Home Phone","Cell Phone", "Fax Number", "E-mail","Alternate E-mail","User Name","Password","PIN","Pseudonym","User Profile","ID","Admin Profile","Paging Profile", "Recording Profile", "Home MX","Current MX", "Default Role","Assigned Device(s)","CallGroup","AA") password_length = 8 pin_length = 6 def generate_password (N): return ''.join(secrets.choice(string.ascii_letters + string.digits) for _ in range(N)) def generate_pin (N): return ''.join(secrets.choice(string.digits) for _ in range(N)) def main(): # ID and extension format, increments by one for every new user id_number = 100 extension_number = 100 # Login call(['az', 'login']) # Get Azure user list user_list_json = json.loads(check_output(['az', 'ad', 'user', 'list']).decode('utf-8')) # CSV output with open("csv_output.csv","w") as csv_file: writer = csv.DictWriter(csv_file, fieldnames=fieldnames, dialect='excel') writer.writeheader() for user in user_list_json: extension_number += 1 id_number += 1 user_group_list = check_output(['az', 'ad', 'user', 'get-member-groups', '--upn-or-object-id', user['userPrincipalName'], '--query', '[].displayName']).decode('utf-8') # Formatting names user_name = user['givenName'] user_surname = user['surname'] if user_name is None and user_surname is None: if user['displayName'] is not None: user_name, *middle_name, user_surname = user['displayName'].split() # Write to CSV file writer.writerow({"Azure AD Group" : user_group_list, "First Name" : user_name, "Last Name" : user_surname, "Home Phone" : user['telephoneNumber'], "Cell Phone" : user['mobile'], "E-mail" : user['userPrincipalName'], "User Name" : user['userPrincipalName'].split("@")[0], "Password" : generate_password(password_length), "PIN" : generate_pin(pin_length), "Extension" : extension_number, "ID" : id_number }) if __name__ == '__main__': main()
class Window(object): def __init__(self, win_id, name, geom=None, d_num=None): self.win_id = win_id self.name = name self.geom = geom self.children = [] self.desktop_number = d_num return def __repr__(self): """ An inheritable string representation. Prints the window type and ID. """ id_r = f", id: {self.win_id}, " if self.name is None: name_r = "(has no name)" else: name_r = f'"{self.name}"' level_repr_indent_size = 2 indent = " " * level_repr_indent_size if "level" in self.__dict__: level_r = f", level: {self.level}" level_indent = indent * self.level else: level_r = "" level_indent = "" n_children = len(self.children) chdn_repr = f"\n{indent}".join([f"{ch}" for ch in self.children]) if n_children == 1: child_r = f"\n{level_indent} 1 child: {chdn_repr}" elif n_children > 1: child_r = f"\n{level_indent} {n_children} children: {chdn_repr}" else: child_r = "" d_num_r = "" if self.desktop_number is not None: d_num_r += f", desktop {self.desktop_number}" return f"{type(self).__name__}{id_r}{name_r}{level_r}{child_r}{d_num_r}" @staticmethod def check_id(win_id): assert win_id.startswith("0x"), "Window ID is not a hexadecimal" return def check_line_entry(self, line, prefix): bad_input_err_msg = f"Not a valid {type(self).__name__} line entry" assert line.lstrip().startswith(prefix), bad_input_err_msg return @property def win_id(self): return self._win_id @win_id.setter def win_id(self, win_id): self.check_id(win_id) self._win_id = win_id return @property def name(self): return self._name @name.setter def name(self, name): self._name = name return @property def children(self): return self._children @children.setter def children(self, vals): self._children = vals return def add_children(self, subnode_list): self.children.extend(subnode_list) return @property def geom(self): return self._geom @geom.setter def geom(self, geom): self._geom = geom return @property def desktop_number(self): return self._desktop_number @desktop_number.setter def desktop_number(self, d_num): self._desktop_number = d_num return class WindowGeom(object): """ TODO: structure this class to represent window geometry (for now it only applies to the SubnodeDesc class, which is given on the lines of xwininfo output which describe the children of another node, so no need to add to base Window class). """ def __init__(self, width, height, abs_x, abs_y, rel_x, rel_y): self.width = width self.height = height self.abs_x = abs_x self.abs_y = abs_y self.rel_x = rel_x self.rel_y = rel_y return @property def width(self): return self._width @width.setter def width(self, w): self._width = w return @property def height(self): return self._height @height.setter def height(self, h): self._height = h return @property def abs_x(self): return self._abs_x @abs_x.setter def abs_x(self, abs_x): self._abs_x = abs_x return @property def abs_y(self): return self._abs_y @abs_y.setter def abs_y(self, abs_y): self._abs_y = abs_y return @property def rel_x(self): return self._rel_x @rel_x.setter def rel_x(self, rel_x): self._rel_x = rel_x return @property def rel_y(self): return self._rel_y @rel_y.setter def rel_y(self, rel_y): self._rel_y = rel_y return class WindowDesc(Window): def __init__(self, line): super(WindowDesc, self).__init__(*self.parse_descline(line)) return @staticmethod def parse_descline(line): """ Window ID is given in the middle of a line, after a colon (window type format), followed by the window's name (if any). """ win_id = line.lstrip().split(":")[1].lstrip().split()[0] if line.endswith("(has no name)") or win_id == "0x0": name = None else: assert line.count('"') > 1, ValueError("Missing enclosing quotation marks") first_quote = line.find('"') assert line.endswith('"'), ValueError("Expected quotation mark at EOL") name = line[first_quote + 1 : -1] return win_id, name class SourceWindow(WindowDesc): def __init__(self, line): self.check_line_entry(line, "xwininfo: Window") # Remove xwininfo output prefix before processing line for ID and window name super(SourceWindow, self).__init__(line[line.find(":") + 1 :]) return @property def parent(self): return self._parent @parent.setter def parent(self, parent): self._parent = parent return def assign_parent(self, parent_line): self.parent = ParentWindow(parent_line) class RootWindow(WindowDesc): def __init__(self, line): self.check_line_entry(line, "Root window") super(RootWindow, self).__init__(line) return class ParentWindow(WindowDesc): def __init__(self, line): self.check_line_entry(line, "Parent window") super(ParentWindow, self).__init__(line) return class SubnodeDesc(Window): def __init__(self, line): super(SubnodeDesc, self).__init__(*self.parse_descline(line)) return @staticmethod def parse_descline(line): """ Window ID is given at the start of a line (child window format), then the window's name, then a colon, then a bracketed set of window tags, then window width and height, then absolute and relative window positions. """ win_id = line.lstrip().split()[0] tag_open_br = 0 - line[::-1].find("(") tag_close_br = -1 - line[::-1].find(")") tags = [x.strip('"') for x in line[tag_open_br:tag_close_br].split()] if line.split(":")[0].endswith("(has no name)"): name = None else: assert line.count('"') > 1, ValueError("Missing enclosing quotation marks") name_open_qm = line.find('"') name_close_qm = tag_open_br - line[:tag_open_br][::-1].find(":") - 1 name = line[name_open_qm:name_close_qm].strip('"') geom = WindowGeom(*SubnodeDesc.parse_geomline(line[tag_close_br + 1 :])) return win_id, name, geom @staticmethod def parse_geomline(line): w_h_abs, rel = line.lstrip().split() width = w_h_abs.split("x")[0] height, abs_x, abs_y = w_h_abs.split("x")[1].split("+") rel_x, rel_y = rel.split("+")[-2:] return width, height, abs_x, abs_y, rel_x, rel_y class ChildWindow(SubnodeDesc): def __init__(self, line, level): self.check_line_entry(line, "0x") self.level = level super(ChildWindow, self).__init__(line) return
import asyncio import io from enum import IntEnum from haproxyspoa.payloads.agent_hello import AgentHelloPayload from haproxyspoa.spoa_data_types import parse_varint, write_varint class FrameType(IntEnum): FRAGMENT = 0 HAPROXY_HELLO = 1 HAPROXY_DISCONNECT = 2 HAPROXY_NOTIFY = 3 AGENT_HELLO = 101 AGENT_DISCONNECT = 102 ACK = 103 class FrameHeaders: def __init__( self, frame_type: int, flags: int, stream_id: int, frame_id: int, ): self.type = frame_type self.flags = flags self.stream_id = stream_id self.frame_id = frame_id def is_fragmented_or_unset(self): # Note: This implementation doesn't support fragmented frames, so # if the frame is fragmented, we're toast. return self.type == FrameType.FRAGMENT def is_haproxy_hello(self): return self.type == FrameType.HAPROXY_HELLO def is_haproxy_disconnect(self): return self.type == FrameType.HAPROXY_DISCONNECT def is_haproxy_notify(self): return self.type == FrameType.HAPROXY_NOTIFY def is_agent_hello(self): return self.type == FrameType.AGENT_HELLO def is_agent_disconnect(self): return self.type == FrameType.AGENT_DISCONNECT def is_ack(self): return self.type == FrameType.ACK class Frame: def __init__( self, frame_type: int, flags: int, stream_id: int, frame_id: int, payload: io.BytesIO, ): self.headers = FrameHeaders( frame_type, flags, stream_id, frame_id ) self.payload = payload @staticmethod async def read_frame(reader: asyncio.StreamReader): frame_length = int.from_bytes(await reader.readexactly(4), byteorder='big', signed=False) frame_bytes: bytes = await reader.readexactly(frame_length) frame_bytesio = io.BytesIO(frame_bytes) frame_type = int.from_bytes(frame_bytesio.read(1), byteorder='big', signed=False) flags = int.from_bytes(frame_bytesio.read(4), byteorder='big', signed=False) stream_id = parse_varint(frame_bytesio) frame_id = parse_varint(frame_bytesio) return Frame( frame_type, flags, stream_id, frame_id, frame_bytesio ) async def write_frame(self, writer: asyncio.StreamWriter): header_buffer = io.BytesIO() header_buffer.write(self.headers.type.to_bytes(1, byteorder='big')) header_buffer.write(self.headers.flags.to_bytes(4, byteorder='big')) header_buffer.write(write_varint(self.headers.stream_id)) header_buffer.write(write_varint(self.headers.frame_id)) frame_header_bytes = header_buffer.getvalue() frame_payload_bytes = self.payload.getvalue() frame_length = len(frame_header_bytes) + len(frame_payload_bytes) writer.write(frame_length.to_bytes(4, byteorder='big')) writer.write(frame_header_bytes) writer.write(frame_payload_bytes) await writer.drain() class AgentHelloFrame(Frame): def __init__(self, payload: AgentHelloPayload, flags: int = 1, stream_id: int = 0, frame_id: int = 0): super().__init__( FrameType.AGENT_HELLO, flags, stream_id, frame_id, io.BytesIO(payload.to_bytes()) )
"""Tests for main cli """ from subprocess import check_output from unittest import TestCase, main import re from project import __version__ class TestHelp(TestCase): def test_help(self): output = check_output(['project','-h']) self.assertTrue('Usage:' in str(output) ) output = check_output(['project','--help']) self.assertTrue('Usage:' in str(output) ) class TestVersion(TestCase): def test_version(self): output = check_output(['project', '--version']) match = re.search('(\d.\d.\d)', str(output.strip()) ) self.assertEqual(match.group(0), __version__) output = check_output(['project', '-v']) match = re.search('(\d.\d.\d)', str(output.strip()) ) self.assertEqual(match.group(0), __version__) if __name__ == '__main__': main()
import csv import pathlib import pickle import os from collections.abc import Iterable import numpy as np import pandas as pd import scipy.stats as stats def processed_expression_table(df): df.index.name = 'genes' return df.groupby('genes').mean() def expression_ranks(expression_table_df, ascending, rank_method='max'): return expression_table_df.rank(method=rank_method, ascending=ascending) # bg_genes: df of samples with background gene count def bg_genes(expression_ranks_df): return expression_ranks_df.count() def pathway_ranks(pathway_genes, expression_ranks_df, rank_method): return expression_ranks_df.reindex(pathway_genes).rank(method=rank_method).dropna(how='all') def effective_pathway(pathway_ranks_df): return pathway_ranks_df.max() def b(expression_ranks_df, pathway_ranks_df): return expression_ranks_df.subtract(pathway_ranks_df).dropna(how='all') def c(effective_pathway_series, pathway_ranks_df): return effective_pathway_series - pathway_ranks_df def d(bg_series, pathway_ranks_df, b_df, c_df): return bg_series - pathway_ranks_df - b_df - c_df def sample_2x2(pathway_ranks_dict, b_dict, c_dict, d_dict): final_dict = { sample: { gene: [ [val, b_dict[sample][gene]], [c_dict[sample][gene], d_dict[sample][gene]] ] for (gene, val) in genes.items() } for (sample, genes) in pathway_ranks_dict.items() } return pd.DataFrame(final_dict) def clean_fisher_exact(table): try: if np.isnan(table).any(): return np.nan else: return stats.fisher_exact(table, alternative='greater')[1] except ValueError: print(table) def p_values(sample_2x2_df): return sample_2x2_df.apply(np.vectorize(clean_fisher_exact)) def neg_log(table): return -np.log(table) def harmonic_average(iterable): if 0 in iterable: return 0 reciprocal_iterable = [1/el for el in iterable if ~np.isnan(el)] denom = sum(reciprocal_iterable) if denom == 0: return np.nan else: return len(reciprocal_iterable) / denom def geometric_average(iterable): try: clean_iterable = [el for el in iterable if ~np.isnan(el)] if not len(clean_iterable): return np.nan return np.exp(np.sum(np.log(clean_iterable)) / len(clean_iterable)) except ZeroDivisionError: return 0 def user_pw_metadata_f(pw_data, output_dir_path): output_loc = '{}/user_pathways.tsv'.format(output_dir_path) pd.DataFrame.from_dict(pw_data, orient='index').to_csv(output_loc, sep='\t') def pw_metadata_f(pw_db_choice, output_dir_path): output_loc = '{}/{}.tsv'.format(output_dir_path, pw_db_choice) pw_data = pickle.load(open('databases/metadata/{}.pkl'.format(pw_db_choice), 'rb')) pw_data.to_csv(output_loc, sep='\t') def output_dir(output_dir_path): pathlib.Path(output_dir_path).mkdir(parents=True, exist_ok=True) def user_pathways(f): pathway_db = {} pw_data = {} with open(f, 'r') as csv_in: reader = csv.reader(csv_in) for row in reader: pw = row[0] db = row[1] genes = set(row[2:]) pathway_db[pw] = genes pw_data[pw] = { 'db': db, 'count': len(genes) } return pathway_db, pw_data def validate_db_name(db_name): available_dbs = ['kegg', 'hallmark', 'reactome', 'hmdb_smpdb'] if db_name.lower() not in available_dbs: raise ValueError( "{} not recognized. Available dbs: {}".format(db_name, ",".join(available_dbs)) ) return True def db_pathways_dict(db_name): validate_db_name(db_name) db_parent = os.path.dirname(os.path.abspath(__file__)) with open('{}/databases/{}.pkl'.format(db_parent, db_name.lower()), 'rb') as f: pathways = pickle.load(f) return pathways def validate_pathways(pw_dict): if not isinstance(pw_dict, dict): raise TypeError("Pathways should be a dictionary of lists or sets") if any(not isinstance(gene_list, Iterable) for gene_list in pw_dict.values()): raise TypeError("Pathways should be a dictionary of lists or sets") return True def all( expression_table, pathways=None, db='kegg', geometric=True, min_p_val=True, ascending=True, rank_method='max' ): if not pathways: pathways = db_pathways_dict(db) else: validate_pathways(pathways) harmonic_averages = [None] * len(pathways) geometric_averages = [] min_p_vals = [] if geometric: geometric_averages = [None] * len(pathways) if min_p_val: min_p_vals = [None] * len(pathways) expression_table_df = processed_expression_table(expression_table) expression_ranks_df = expression_ranks(expression_table_df, ascending=ascending, rank_method=rank_method) bg_genes_df = bg_genes(expression_ranks_df) sample_order = expression_table_df.columns # perform analysis for each pathway for i, pathway in enumerate(pathways): print('starting: {}'.format(pathway)) pathway_ranks_df = pathway_ranks(pathways[pathway], expression_ranks_df, rank_method=rank_method) effective_pathway_df = effective_pathway(pathway_ranks_df) b_df = b(expression_ranks_df, pathway_ranks_df) c_df = c(effective_pathway_df, pathway_ranks_df) d_df = d(bg_genes_df, pathway_ranks_df, b_df, c_df) sample_2x2_df = sample_2x2( pathway_ranks_df.to_dict(), b_df.to_dict(), c_df.to_dict(), d_df.to_dict() ) p_values_df = p_values(sample_2x2_df) # Harmonic averaging is default harmonic_averages_series = neg_log(p_values_df.apply(harmonic_average).loc[sample_order]) harmonic_averages_series.name = pathway harmonic_averages[i] = harmonic_averages_series if geometric: geometric_averages_series = neg_log(p_values_df.apply(geometric_average).loc[sample_order]) geometric_averages_series.name = pathway geometric_averages[i] = geometric_averages_series if min_p_val: min_p_vals_series = neg_log(p_values_df.min().loc[sample_order]) min_p_vals_series.name = pathway min_p_vals[i] = min_p_vals_series print('finished: {}'.format(pathway)) harmonic_averages_df = pd.concat(harmonic_averages, axis=1).T if geometric: geometric_averages_df = pd.concat(geometric_averages, axis=1).T else: geometric_averages_df = None if min_p_val: min_p_vals_df = pd.concat(min_p_vals, axis=1).T else: min_p_vals_df = None return { 'harmonic': harmonic_averages_df, 'geometric': geometric_averages_df, 'min_p_val': min_p_vals_df } def pa_stats( expression_table, mode='harmonic', pathways=None, db='kegg', ascending=True, rank_method='max' ): if not pathways: pathways = db_pathways_dict(db) else: validate_pathways(pathways) averages = [None] * len(pathways) expression_table_df = processed_expression_table(expression_table) expression_ranks_df = expression_ranks(expression_table_df, ascending=ascending, rank_method=rank_method) bg_genes_df = bg_genes(expression_ranks_df) sample_order = expression_table_df.columns # perform analysis for each pathway for i, pathway in enumerate(pathways): pathway_ranks_df = pathway_ranks(pathways[pathway], expression_ranks_df, rank_method=rank_method) effective_pathway_df = effective_pathway(pathway_ranks_df) b_df = b(expression_ranks_df, pathway_ranks_df) c_df = c(effective_pathway_df, pathway_ranks_df) d_df = d(bg_genes_df, pathway_ranks_df, b_df, c_df) sample_2x2_df = sample_2x2( pathway_ranks_df.to_dict(), b_df.to_dict(), c_df.to_dict(), d_df.to_dict() ) p_values_df = p_values(sample_2x2_df) if mode == 'geometric': averages_series = neg_log(p_values_df.apply(geometric_average).loc[sample_order]) if mode == 'harmonic': averages_series = neg_log(p_values_df.apply(harmonic_average).loc[sample_order]) if mode == 'min': averages_series = neg_log(p_values_df.min().loc[sample_order]) averages_series.name = pathway averages[i] = averages_series averages_df = pd.concat(averages, axis=1).T return averages_df # Try doing this with a decorator def harmonic( expression_table, pathways=None, db='kegg', ascending=True, rank_method='max' ): return pa_stats(expression_table, 'harmonic', pathways, db, ascending, rank_method) def geometric( expression_table, pathways=None, db='kegg', ascending=True, rank_method='max' ): return pa_stats(expression_table, 'geometric', pathways, db, ascending, rank_method) def min_p_val( expression_table, pathways=None, db='kegg', ascending=True, rank_method='max' ): return pa_stats(expression_table, 'min', pathways, db, ascending, rank_method)
import random import numpy as np import os import sys import torch from torch.autograd import Variable import utils class DataLoader(object): """ Handles all aspects of the data. Stores the dataset_params, vocabulary and tags with their mappings to indices. """ def __init__(self, data_dir, params, path_glove=''): """ Loads dataset_params, vocabulary and tags. Ensure you have run `build_vocab.py` on data_dir before using this class. Args: data_dir: (string) directory containing the dataset params: (Params) hyperparameters of the training process. This function modifies params and appends dataset_params (such as vocab size, num_of_tags etc.) to params. """ self.data_dir = data_dir # loading dataset_params json_path = os.path.join(data_dir, 'dataset_params.json') assert os.path.isfile(json_path), "No json file found at {}, run build_vocab.py".format(json_path) self.dataset_params = utils.Params(json_path) # loading vocab (we require this to map words to their indices) vocab_path = os.path.join(data_dir, 'words.txt') self.vocab = {} self.vocab_lower = {} # only keep lower case words self.idx_vocab = {} self.idx_vocab_lower = {} with open(vocab_path) as f: c = 0 for i, l in enumerate(f.read().splitlines()): # l = self.unicodeToAscii(l) self.vocab[l] = i self.idx_vocab[i] = l w = l.lower() if w not in self.vocab_lower: self.vocab_lower[w] = c self.idx_vocab_lower[c] = w c += 1 # setting the indices for UNKnown words and PADding symbols self.unk_ind = self.vocab[self.dataset_params.unk_word] self.pad_ind = self.vocab[self.dataset_params.pad_word] # loading tags (we require this to map tags to their indices) tags_path = os.path.join(data_dir, 'tags.txt') self.tag_map = {} self.idx_tag = {} with open(tags_path) as f: for i, t in enumerate(f.read().splitlines()): self.tag_map[t] = i self.idx_tag[i] = t # Glove embeddings glove_dict = {} if params.use_glove and path_glove: # glove_vocab = set() with open(path_glove) as f: for line in f: s = line.strip().split(' ') word = s[0] # glove_vocab.add(word) if word in self.vocab_lower: emb = [float(j) for j in s[1:]] glove_dict[word] = emb self.embedding = np.zeros((len(self.vocab), params.embedding_dim)) # Embedding Initialization # If word not present in glove, then initialize randomly with open(vocab_path) as f: for i, word in enumerate(f.read().splitlines()): word = word.lower() if glove_dict.get(word): self.embedding[i] = glove_dict[word] else: self.embedding[i] = np.random.randn(params.embedding_dim) # adding dataset parameters to param (e.g. vocab size, ) params.update(json_path) assert params.vocab_size == len(self.vocab), 'Vocabulary sizes from build_vocab and in Data loader should match' def load_sentences_labels(self, sentences_file, labels_file, d): """ Loads sentences and labels from their corresponding files. Maps tokens and tags to their indices and stores them in the provided dict d. Args: sentences_file: (string) file with sentences with tokens space-separated labels_file: (string) file with NER tags for the sentences in labels_file d: (dict) a dictionary in which the loaded data is stored """ sentences = [] labels = [] with open(sentences_file, encoding='ISO-8859-1') as f: for sentence in f.readlines(): # replace each token by its index if it is in vocab # else use index of UNK_WORD sentence = sentence.strip() if sentence: # sent_uni = [self.unicodeToAscii(token) for token in sentence.split(' ')] s = [self.vocab[token] if token in self.vocab else self.unk_ind for token in sentence.split(' ')] sentences.append(s) with open(labels_file) as f: for sentence in f.read().splitlines(): # replace each label by its index l = [self.tag_map[label] for label in sentence.split(' ')] labels.append(l) print(len(sentences), len(labels)) # checks to ensure there is a tag for each token assert len(labels) == len(sentences) for i in range(len(labels)): assert len(labels[i]) == len(sentences[i]) # storing sentences and labels in dict d d['data'] = sentences d['labels'] = labels d['size'] = len(sentences) def load_data(self, types): """ Loads the data for each type in types from data_dir. Args: types: (list) has one or more of 'train', 'val', 'test' depending on which data is required data_dir: (string) directory containing the dataset Returns: data: (dict) contains the data with labels for each type in types """ data = {} for split in ['train', 'val', 'test']: if split in types: sentences_file = os.path.join(self.data_dir, split, "sentences.txt") labels_file = os.path.join(self.data_dir, split, "labels.txt") data[split] = {} self.load_sentences_labels(sentences_file, labels_file, data[split]) return data def data_iterator(self, data, params, shuffle=False): """ Returns a generator that yields batches data with labels. Batch size is params.batch_size. Expires after one pass over the data. Args: data: (dict) contains data which has keys 'data', 'labels' and 'size' params: (Params) hyperparameters of the training process. shuffle: (bool) whether the data should be shuffled Yields: batch_data: (Variable) dimension batch_size x seq_len with the sentence data batch_labels: (Variable) dimension batch_size x seq_len with the corresponding labels """ # make a list that decides the order in which we go over the data- this avoids explicit shuffling of data order = list(range(data['size'])) if shuffle: # random.seed(230) random.shuffle(order) # one pass over data for i in range((data['size']+1)//params.batch_size): # fetch sentences and tags batch_sentences = [data['data'][idx] for idx in order[i*params.batch_size:(i+1)*params.batch_size]] batch_tags = [data['labels'][idx] for idx in order[i*params.batch_size:(i+1)*params.batch_size]] # compute length of longest sentence in batch seql = [len(s) for s in batch_sentences] batch_max_len = max(seql) # prepare a numpy array with the data, initialising the data with pad_ind and all labels with -1 # initialising labels to -1 differentiates tokens with tags from PADding tokens batch_data = self.pad_ind*np.ones((len(batch_sentences), batch_max_len)) batch_labels = -1*np.ones((len(batch_sentences), batch_max_len)) # copy the data to the numpy array for j in range(len(batch_sentences)): cur_len = len(batch_sentences[j]) batch_data[j][:cur_len] = batch_sentences[j] batch_labels[j][:cur_len] = batch_tags[j] # since all data are indices, we convert them to torch LongTensors batch_data, batch_labels = torch.LongTensor(batch_data), torch.LongTensor(batch_labels) # shift tensors to GPU if available if params.cuda: batch_data, batch_labels = batch_data.cuda(), batch_labels.cuda() # convert them to Variables to record operations in the computational graph batch_data, batch_labels = Variable(batch_data), Variable(batch_labels) yield batch_data, batch_labels, seql
from textwrap import dedent from django.core import management from mock import mock_open, patch from six import StringIO from graphene import ObjectType, Schema, String @patch("graphene_django.management.commands.graphql_schema.Command.save_json_file") def test_generate_json_file_on_call_graphql_schema(savefile_mock): out = StringIO() management.call_command("graphql_schema", schema="", stdout=out) assert "Successfully dumped GraphQL schema to schema.json" in out.getvalue() @patch("json.dump") def test_json_files_are_canonical(dump_mock): open_mock = mock_open() with patch("graphene_django.management.commands.graphql_schema.open", open_mock): management.call_command("graphql_schema", schema="") open_mock.assert_called_once() dump_mock.assert_called_once() assert dump_mock.call_args[1][ "sort_keys" ], "json.mock() should be used to sort the output" assert ( dump_mock.call_args[1]["indent"] > 0 ), "output should be pretty-printed by default" def test_generate_graphql_file_on_call_graphql_schema(): class Query(ObjectType): hi = String() mock_schema = Schema(query=Query) open_mock = mock_open() with patch("graphene_django.management.commands.graphql_schema.open", open_mock): management.call_command( "graphql_schema", schema=mock_schema, out="schema.graphql" ) open_mock.assert_called_once() handle = open_mock() assert handle.write.called_once() schema_output = handle.write.call_args[0][0] assert schema_output == dedent( """\ schema { query: Query } type Query { hi: String } """ )
#!/usr/bin/env python3.4 -i ##MIT License ## ##Copyright (c) 2018 Douglas E. Moore ## ##Permission is hereby granted, free of charge, to any person obtaining a copy ##of this software and associated documentation files (the "Software"), to deal ##in the Software without restriction, including without limitation the rights ##to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ##copies of the Software, and to permit persons to whom the Software is ##furnished to do so, subject to the following conditions: ## ##The above copyright notice and this permission notice shall be included in all ##copies or substantial portions of the Software. ## ##THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ##IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ##FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ##AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ##LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ##OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ##SOFTWARE. import re # needed for parsing optomux ASCII packets from time import perf_counter,sleep # needed for computing turnaround timeouts from itertools import chain # needed for combining ranges in arg verification import OmuxTTY as tty # communications via serial port import OmuxUTL as utl # logging and TBD import datetime # for timedelta from collections import namedtuple # init the utils such as logging utl._init() class OmuxNET: def __init__(self): self.tty = tty.OmuxTTY() self.turnaround_delay = {} self.timer_resolution = {} self.temperature_probes = {} self.read_as_analog_input = {} def compute_pkt_checksum(self,pkt): """ computes the checksum an optomux command packet for example, the command on.initiate_square_wave(0,15,1,1) generates the packet '>00Z000FL01019E\r' Note: this function assumes the packet does not yet have an appended checksum or \r and therefore is using len(pkt) as the final character to be summed. compute_pkt_checksum computes the checksum of '00Z000FL0101' returning the result as an integer """ cks = 0 for i in range(1,len(pkt)): cks = ((cks + ord(pkt[i])) & 255) return cks def compute_data_checksum(self,data): """ computes the checksum an optomux response packet for example, the command on.read_configuration(0) generated the packet '>00jCA\r' The optomux device responded with 'A000FD6\r' compute_data_checksum computes the checksum of '000F' which is the data portion of the response """ cks = 0 for i in range(len(data)): cks = ((cks + ord(data[i])) & 255) return cks """ errors returned by Optomux controller and our emulation of the Optoware driver """ errors = { # Optomux errors returned by the controller or brain 0:'Power-Up Clear Expected - Command Ignored', -1:'Power Up Clear Expected', -2:'Undefined Command', -3:'Checksum Error', -4:'Input Buffer Overrun', -5:'Non-printable ASCII Character Received', -6:'Data Field Error', -7:'Serial Watchdog Time-out', -8:'Invalid Limit Set', # Optoware errors returned when packet is verified -20:'Invalid Command Number', -21:'Invalid Module Position', -22:'Data Range Error', -23:'Invalid First Modifiers', -24:'Invalid Second Modifiers', -25:'Invalid Address', -27:'Not Enough Return Data', -28:'Invalid Return Data', -29:'Turnaround Time Out', -30:'Input Buffer Overrun', -31:'Checksum Error', -33:'Send Error', -34:'Incorrect Command Echo In Four-Pass' } uint16_valid_range = utl.RangeList([range(65536)]) uint15_valid_range = utl.RangeList([range(32768)]) uint8_valid_range = utl.RangeList([range(256)]) uint12_valid_range = utl.RangeList([range(4096)]) """ dcit of valid turn around delay values """ turnaround_delay = { 0:'No Delay', 1:'10 ms', 2:'100 ms', 3:'500 ms' } """ range of valid turn around values """ turnaround_delay_valid_range = \ utl.RangeList([range(len(turnaround_delay))]) """ dict digital watchdog values """ digital_watchdog_delay = { 0:'Disable', 1:'10s, All outputs off', 2:'1m, All outouts off', 3:'10m, All outputs off', 4:'Disable', 5:'10s, 0 output on, rest off', 6:'1m, 0 output on, rest off', 7:'10m, 0 output on, rest off' } """ set of valid digital watchdog delay values """ digital_watchdog_delay_valid_range = \ utl.RangeList([range(len(digital_watchdog_delay)), range(2,65536)]) """ dict analog watchdog times and actions """ analog_watchdog_delay = { 0:'Disable', 1:'10s, Zero Scale', 2:'1m, Zero Scale', 3:'10m, Zero Scale', 4:'Disable', 5:'10s, Full Scale', 6:'1m, Full Scale', 7:'10m, Full Scale' } """ set of valid digital watchdog delay values """ analog_watchdog_delay_valid_range = \ utl.RangeList([range(len(analog_watchdog_delay)), range(20,65536)]) """ dict of protocol pass values """ protocol_passes = { 0:'2 pass', 1:'4 pass' } """ range of valid protocol pass values """ protocol_passes_valid_range = \ utl.RangeList([range(len(protocol_passes))]) """ dict of optomux controller types """ optomux_type = { 0:'Digital', 1:'Analog' } """ range of optomux controller types """ optomux_type_valid_range = \ utl.RangeList([range(len(optomux_type))]) """ set of valid enhanced digital watchdog values """ enhanced_digital_watchdog_delay_valid_range = utl.RangeList(\ [range(0),range(200,65536)]) """ range of valid timer resolutions, note: 0 means the max time of 2.56 seconds """ timer_resolution_valid_range = \ utl.RangeList([range(256)]) """ dict of temperature probe types """ temperature_probe_types = { 0:'no temperature probe', 1:'ICTD probe', 2:'10 ohm RTD probe', 3:'100 ohm RTD probe', 4:'Type J thermocouple', 5:'Type K thermocouple', 6:'Type R thermocouple', 7:'Type S thermocouple', 8:'Type T thermocouple', 9:'Type E thermocouple', } """ 'Set Temperature Probe Type' valid range """ temperature_probe_type_valid_range = \ utl.RangeList([range(len(temperature_probe_types))]) """ 'Initiate Square Wave' widths """ square_wave_on_off_time_valid_range = \ uint8_valid_range """ 'Set Time Delay' valid range """ set_time_delay_valid_range = \ uint16_valid_range """ 'Generate N Pulses' pulse width valid range """ generate_pulses_on_time_valid_range = \ uint8_valid_range """ 'Generate N Pulses' valid count """ generate_pulses_n_valid_range = \ uint16_valid_range """ 'Set Number of Averages' valid range """ number_of_averages_valid_range = \ utl.RangeList([range(1,256)]) """ 0 NOP 1 cancels within 1 timer tick otherwise, multiplied by timer resolution """ start_on_off_pulse_valid_range = \ uint16_valid_range """ Analog ADC/DAV values fir in 12 bits """ analog_value_valid_range = \ uint12_valid_range """ time for 1/2 square wave or a sawtooth ramp """ output_waveform_time = { 0: 'Disable Waveform', 1: '2.18 minutes', 2: '3.28 minutes', 3: '4.37 minutes', 4: '5.46 minutes', 5: '6.56 minutes', 6: '7.65 minutes', 7: '8.74 minutes', 8: '1.09 minutes', 9: '32.8 seconds', 10:'21.89 seconds', 11:'16.4 seconds', 12:'13.1 seconds', 13:'10.9 seconds', 14:'9.4 seconds', 15:'8.2 seconds', } """ 'Set Output Waveform' valid periods """ output_waveform_time_valid_range = \ utl.RangeList([range(1,len(output_waveform_time))]) """ 'Set Output Waveform" valid shapes for the Optomux ASCII command """ output_waveform_type = { 0:'No waveform', 1:'Triangle up', 2:'Ramp to FS', 3:'Sawtooth up', 4:'Square wave', 5:'Triangle down', 6:'Ramp to ZS', 7:'Sawtooth down' } """ 'Set Output Waveform' valid types """ output_waveform_type_valid_range = \ utl.RangeList([range(1,len(output_waveform_type))]) """ 'Set Output Waveform' limits """ output_waveform_lo_hi_limit_valid_range = \ uint8_valid_range """ 'Enhanced Output Waveform' valid types for the Optomux ASCII command """ enhanced_output_waveform_type = { 0:'Turn waveform off', 1:'Triangle wave with positive inital slope', 2:'Ramp up—waveform terminates upon reaching the upper limit', 3:'Sawtooth, continuous ramp up', 4:'Square wave (50 % duty cycle)', 5:'Triangle wave with negative initial slope', 6:'Ramp down—waveform termihnates at lower limit', 7:'Sawtooth, continuous ramp down' } """ 'Enhanced Output Waveform' valid range """ enhanced_output_waveform_type_valid_range = \ utl.RangeList([range(1,len(enhanced_output_waveform_type))]) """ list of optomux command format and name strings keyed by optoware command number """ commands = { 0: ('A', 'Power Up Clear'), 1: ('B', 'Reset'), 2: ('C[data]', 'Set Turnaround Delay'), 3: ('D[data]', 'Set Digital Watchdog'), 4: ('E[data]', 'Set Protocol'), 5: ('F', 'Identify Optomux Type'), 6: ('G[positions]', 'Configure Positions'), 7: ('H[positions]', 'Configure As Inputs'), 8: ('I[positions]', 'Configure As Outputs'), 9: ('J[positions]', 'Write Digital Outputs'), 10: ('K[positions]', 'Activate Digital Outputs'), 11: ('L[positions]', 'Deactivate Digital Outputs'), 12: ('M', 'Read On Off Status'), 13: ('N[positions]', 'Set Latch Edges'), 14: ('O[positions]', 'Set Off To On Latches'), 15: ('P[positions]', 'Set On To Off Latches'), 16: ('Q', 'Read Latches'), 17: ('R[positions]', 'Read and Clear Latches'), 18: ('S[positions]', 'Clear Latches'), 19: ('T[positions]', 'Start and Stop Counters'), 20: ('U[positions]', 'Start Counters'), 21: ('V[positions]', 'Stop Counters'), 22: ('W[positions]', 'Read Counters'), 23: ('X[positions]', 'Read and Clear Counters'), 24: ('Y[positions]', 'Clear Counters'), 25: ('Z[positions][modifiers][data]', 'Set Time Delay'), 26: ('Z[positions][modifiers][data]', 'Initiate Square Wave'), 27: ('Z[positions][modifiers]', 'Turn Off Time Delay Square Wave'), 28: ('a[positions]', 'Set Pulse Trigger Polarity'), 29: ('b[positions]', 'Trigger On Positive Pulse'), 30: ('c[positions]', 'Trigger On Negative Pulse'), 31: ('d', 'Read Pulse Complete Bits'), 32: ('e[positions]', 'Read Pulse Duration Counters'), 33: ('f[positions]', 'Read and Clear Duration Counters'), 34: ('g[positions]', 'Clear Duration Counters'), 35: ('J[positions][data]', 'Write Analog Outputs'), 36: ('K[positions]', 'Read Analog Outputs'), 37: ('L[positions]', 'Read Analog Inputs'), 38: ('M[positions][data]', 'Average and Read Input'), 39: ('N[positions][data]', 'Set Input Range'), 40: ('O', 'Read Out Of Range Latches'), 41: ('P[positions]', 'Read and Clear Out Of Range Latches'), 42: ('Q[positions]', 'Clear Out Of Range Latches'), 43: ('R[positions][modifiers][data]', 'Set Output Waveform'), 44: ('R[positions][modifiers]', 'Turn Off Existing Waveforms'), 45: ('D[positions][data]', 'Set Analog Watchdog'), 46: ('S[positions][data]', 'Update Analog Outputs'), 47: ('T[positions][data]', 'Start Averaging Inputs'), 48: ('i', 'Read Average Complete Bits'), 49: ('U[positions]', 'Read Averaged Inputs'), 50: ('V[positions][modifiers][data]', 'Enhanced Output Waveform'), 51: ('V[positions][modifiers]', 'Cancel Enhanced Waveforms'), 52: ('g[positions]', 'Calculate Offsets'), 53: ('W[positions][data]', 'Set Offsets'), 54: ('h[positions]', 'Calculate and Set Offsets'), 55: ('X[positions]', 'Calculate Gain Coefficients'), 56: ('Y[positions][data]', 'Set Gain Coefficients'), 57: ('Z[positions]', 'Calculate and Set Gain Coefficients'), 58: ('a[positions]', 'Read Lowest Values'), 59: ('b[positions]', 'Clear Lowest Values'), 60: ('c[positions]', 'Read and Clear Lowest Values'), 61: ('d[positions]', 'Read Peak Values'), 62: ('e[positions]', 'Clear Peak Values'), 63: ('f[positions]', 'Read and Clear Peak Values'), 64: ('M', 'Read Binary On Off Status'), 65: ('J[positions]', 'Write Binary Outputs'), 66: ('Q', 'Read Binary Latches'), 67: ('R[positions]', 'Read and Clear Binary Latches'), 68: ('Z[positions][modifiers][data]', 'High Resolution Square Wave'), 69: ('h[positions]', 'Retrigger Time Delay'), 70: ('j', 'Read Configuration'), 71: ('m[positions][data]', 'Set Enhanced Digital Watchdog'), 72: ('i[positions][modifiers][data]', 'Generate N Pulses'), 73: ('k[positions][data]', 'Start On Pulse'), 74: ('l[positions][data]', 'Start Off Pulse'), 75: ('n[data]', 'Set Timer Resolution'), 76: ('k[positions][data]', 'Set Temperature Probe Type'), 77: ('l[positions]', 'Read Temperature Inputs'), 78: ('m[positions][data]', 'Set Analog Watchdog Timeout'), 79: ('o[positions]', 'Read Average Temperature Inputs'), 80: ('`', 'Date Of Firmware') } """ create a dictionary mapping optoware command description strings to command numbers. These are used in various functions to look up the command number as an index into the commands dictionary. """ command_name = {v[1]:k for (k,v) in commands.items()} @utl.logger def verify_address(self,address): """ valid addresses are 0 to 255 """ if isinstance(address,int) and address in range(0,256): return (0,'{:02X}'.format(address)) return ('E',-25) @utl.logger def verify_command(self,command): """ Optoware commands fit range(80) and each command value is a key in the commands dictionary """ # check the command number if isinstance(command,int) and command in self.commands.keys(): return (0,'{:s}'.format(self.commands[command][0][0])) return ('E',-20) @utl.logger def verify_positions(self,command,afmt,**kwargs): """ Positions may be a 16 bit mask or a tuple of range(len(16)) having a value in range(16). """ if 'positions' in afmt: if 'positions' in kwargs: if isinstance(kwargs['positions'],tuple): # at least 1 but not more than 16 points # max from 0 to 16 # min from 0 to 16 if len(kwargs['positions']) in range(1,17) \ and max(kwargs['positions']) in range(16) \ and min(kwargs['positions']) in range(16): mask = 0 for position in kwargs['positions']: # build a bit mask mask |= (1 << position) return (0,'{:04X}'.format(mask)) # allow passing bitmask or single position directly elif isinstance(kwargs['positions'],int): # 'Average and Read Input' takes a single position and # B3000 seems to return 'N01\r' which states in the docs: # "Undefined Command. The command character was not a # legal command character. This error may also be received # if the unit is an older model that cannot recognize a # newer command.* and it does this for the exact example # given in the Optomux guide for this command # >03M70A58\r if command == self.command_name['Average and Read Input']: if kwargs['positions'] in range(16): return (0,'{:X}'.format(kwargs['positions'])) # must fit in uint16 elif kwargs['positions'] in range(1,65536): return (0,'{:04X}'.format(kwargs['positions'])) # some commands assume all 16 points are written if # optomux message doesn't include positions elif kwargs['positions'] == None: return (0,'') return ('E',-21) return (0,'') @utl.logger def verify_single_modifier(self,command,modifier): """ commands which take a singleASCII char or an int can be passed in a list of len 1, or directly as a str or int """ if isinstance(modifier,str): ## 'HIJK' 25: ('Z[positions][modifiers][data]', 'Set Time Delay'), if command == self.command_name['Set Time Delay'] \ and modifier in 'HIJK': return (0,modifier) ## 'L' 26: ('Z[positions][modifiers][data]', 'Initiate Square Wave'), elif command == self.command_name['Initiate Square Wave'] \ and modifier == 'L': return (0,modifier) ## 'G' 27: ('Z[positions][modifiers]', 'Turn Off Time Delay Square Wave'), elif command == self.command_name['Turn Off Time Delay Square Wave'] \ and modifier == 'G': return (0,modifier) ## 'M' 68: ('Z[positions][modifiers][data]', 'High Resolution Square Wave'), elif command == self.command_name['Turn Off Time Delay Square Wave'] \ and modifier == 'M': return (0,modifier) # single int modifier elif isinstance(modifier,int): ## 50: ('V[positions][modifiers][data]', 'Enhanced Output Waveform'), if command == self.command_name['Enhanced Output Waveform'] \ and modifier in self.enhanced_output_waveform_type_valid_range: return (0,'{:X}'.format(modifier)) ## 51: ('V[positions][modifiers]', 'Cancel Enhanced Waveforms'), elif command == self.command_name['Cancel Enhanced Waveforms'] \ and modifier == 0: return (0,'0') ## 44: ('R[positions][modifiers]', 'Turn Off Existing Waveforms'), elif command == self.command_name['Turn Off Existing Waveforms'] \ and modifier == 0: return (0,'0') ## 72: ('i[positions][modifiers][data]', 'Generate N Pulses'), elif command == self.command_name['Generate N Pulses'] \ and modifier in self.uint8_valid_range: return (0,'{:02X}'.format(modifier)) return ('E',-21) @utl.logger def verify_double_modifier(self,command,modifiers): """ arguments which take two modifiers """ if command == self.command_name['Set Output Waveform'] \ and modifiers[0] in self.output_waveform_time_valid_range \ and modifiers[1] in self.output_waveform_type_valid_range: return (0,'{:X}{:X}'.format(modifiers[0],modifiers[1])) return ('E',-21) @utl.logger def verify_modifiers(self,command,afmt,**kwargs): """ not all commands require modifiers, so this function compares the optomux command format string and the kwargs to make sure modifiers is present in both. if present, the value or values must be checked for validity as the number and range differ from command to command """ # optomux command format contains 'modifiers' if 'modifiers' in afmt: if 'modifiers' in kwargs: if isinstance(kwargs['modifiers'],tuple): if len(kwargs['modifiers']) == 1: return self.verify_single_modifier(command,kwargs['modifiers'][0]) elif len(kwargs['modifiers']) == 2: return self.verify_double_modifier(command,kwargs['modifiers']) elif isinstance(kwargs['modifiers'],str) \ or isinstance(kwargs['modifiers'],int): return self.verify_single_modifier(command,kwargs['modifiers']) return ('E',-23) return (0,'') @utl.logger def verify_single_data_value(self,command,value): """ convert a single data value to a string appropriate for the command number """ ## 2: ('C[data]', 'Set Turnaround Delay'), if command == self.command_name['Set Turnaround Delay']\ and value in self.turnaround_delay_valid_range: return (0,'{:X}'.format(value)) ## 3: ('D[data]', 'Set Digital Watchdog'), elif command == self.command_name['Set Digital Watchdog']\ and value in self.digital_watchdog_delay_valid_range: return (0,'{:X}'.format(value)) ## 4: ('E[data]', 'Set Protocol'), elif command == self.command_name['Set Protocol']\ and value in self.protocol_passes_valid_range: return (0,'{:X}'.format(value)) ## 25: ('Z[positions][modifiers][data]', 'Set Time Delay'), elif command == self.command_name['Set Time Delay']\ and value in self.set_time_delay_valid_range: return (0,'{:X}'.format(value)) ## 35: ('J[positions][data]', 'Write Analog Outputs'), elif command == self.command_name['Write Analog Outputs']\ and value in self.analog_value_valid_range: return (0,'{:03X}'.format(value)) ## 38: ('M[positions][data]', 'Average and Read Input'), elif command == self.command_name['Average and Read Input']\ and value in self.number_of_averages_valid_range: return (0,'{:02X}'.format(value)) ## 45: ('D[positions][data]', 'Set Analog Watchdog'), elif command == self.command_name['Set Analog Watchdog']: if value in self.analog_watchdog_delay_valid_range: return [0,'{:X}'.format(value)] else: print('Set Analog Watchdog',value) ## 47: ('T[positions][data]', 'Start Averaging Inputs'), elif command == self.command_name['Start Averaging Inputs']\ and value in self.number_of_averages_valid_range: return (0,'{:02X}'.format(value)) ## 71: ('m[positions][data]', 'Set Enhanced Digital Watchdog'), elif command == self.command_name['Set Enhanced Digital Watchdog']\ and value in self.enhanced_digital_watchdog_valid_range: return (0,'{:X}'.format(value)) ## 72: ('i[positions][modifiers][data]', 'Generate N Pulses'), elif command == self.command_name['Generate N Pulses']\ and value in self.generate_pulses_n_valid_range: return (0,'{:X}'.format(value)) ## 73: ('k[positions][data]', 'Start On Pulse'), elif command == self.command_name['Start On Pulse']\ and value in self.start_on_off_pulse_valid_range: return (0,'{:X}'.format(value)) ## 74: ('l[positions][data]', 'Start Off Pulse'), elif command == self.command_name['Start Off Pulse']\ and value in self.start_on_off_pulse_valid_range: return (0,'{:X}'.format(value)) ## 75: ('n[data]', 'Set Timer Resolution'), elif command == self.command_name['Set Timer Resolution']\ and value in self.uint8_valid_range: return (0,'{:02X}'.format(value)) ## 76: ('k[positions][data]', 'Set Temperature Probe Type'), elif command == self.command_name['Set Temperature Probe Type']\ and value in self.temperature_probe_type_valid_range: return (0,'{:X}'.format(value)) return ('E',-22) @utl.logger def verify_list_of_data_values(self,command,values): """ commands which require multiple values in the info array but are shared by all points in the position mask """ ## 26: ('Z[positions]L[data]', 'Initiate Square Wave'), if command == self.command_name['Initiate Square Wave']\ and values[0] in range(256) \ and values[1] in range(256): return [0,'{:02X}{:02X}'.format(values[0],values[1])] ## 39: ('N[positions][data]', 'Set Input Range'), elif command == self.command_name['Set Input Range']\ and values[0] in self.analog_value_valid_range\ and values[1] in self.analog_value_valid_range: return [0,'{:04X}{:04X}'.format(values[0],values[1])] ## 43: ('R[positions][modifiers][data]', 'Set Output Waveform'), elif command == self.command_name['Set Output Waveform']\ and values[0] in self.analog_value_valid_range\ and values[1] in self.analog_value_valid_range: # allowing 12 bit value entry because it seems more # reasonable but command only uses the top 8 bits # thus the divide by 16 return [0,'{:02X}{:02X}'.format(values[0]>>4,values[1]>>4)] ## 50: ('V[positions][modifiers][data]', 'Enhanced Output Waveform'), elif command == self.command_name['Enhanced Output Waveform']\ and values[0] in self.analog_value_valid_range\ and values[1] in self.analog_value_valid_range\ and values[2] in range(1,32768): return [0,'{:03X}{:03X}{:04X}'.format(\ values[0],values[1],values[2])] ## 68: ('Z[positions]M[data]', 'High Resolution Square Wave'), elif command == self.command_name['High Resolution Square Wave']\ and values[0] in self.uint8_valid_range\ and values(1) in self.uint8_valid_range: return [0,'{:02x}{:02X}'.format(values[0],values[1])] return ('E',-22) @utl.logger def verify_list_of_data_values_per_point(self,command,positions,values): """ commands which output a value in the info array for each point in the positions mask are handled here """ # if a position mask if isinstance(positions,int): # make a tuple positions = self.positions_mask_to_tuple(positions) # if a single value if isinstance(values,int): # make a tuple values = (values,) # if there is a value for each position if len(positions) == len(values): v = '' ## 46: ('S[positions][data]', 'Update Analog Outputs'), if command == self.command_name['Update Analog Outputs']: for i in reversed(range(len(values))): v += '{:03X}'.format(values[i]) return [0,v] ## 53: ('W[positions][data]', 'Set Offsets'), elif command == self.command_name['Set Offsets']: # the user's guide indicates the highest position index # is the first value returned, but in the Optoware info # array it looks like the lowest index value is first for i in reversed(range(len(values))): v += '{:04X}'.format(values[i]) return [0,v] ## 56: ('Y[positions][data]', 'Set Gain Coefficients'), elif command == self.command_name['Set Gain Coefficients']: # the user's guide indicates the highest position index # is the first value returned, but in the Optoware info # array it looks like the lowest index value is first for i in reversed(range(len(values))): v += '{:04X}'.format(values[i]) return [0,v] ## 78: ('m[positions][data]', 'Set Analog Watchdog Timeout'), elif command == self.command_name['Set Analog Watchdog Timeout']: # the user's guide indicates the highest position index # is the first value returned, but in the Optoware info # array it looks like the lowest index value is first for i in reversed(range(len(values))): v += '{:03X}'.format(values[i]) return [0,v] return ('E',-22) @utl.logger def verify_data(self,command,afmt,**kwargs): """ not all commands require data, so this function compares the optomux command format string and the kwargs to make sure data is present in both. if present, the value or values must be checked for validity as the number and range differ from command to command """ if 'data' in afmt: if 'data' in kwargs: # commands which take one data value if command in (2,3,4,25,35,38,45,47,71,72,73,74,75,76): if isinstance(kwargs['data'],tuple)\ and len(kwargs['data']) == 1\ and isinstance(kwargs['data'],int): return self.verify_single_data_value(command,kwargs['data'][0]) elif isinstance(kwargs['data'],int): return self.verify_single_data_value(command,kwargs['data']) # commands which take several data elements elif command in (26,39,43,50,68): if isinstance(kwargs['data'],tuple): return self.verify_list_of_data_values(command,kwargs['data']) # commands which take one data value per position elif command in (46,53,56,78): if isinstance(kwargs['data'],tuple): return self.verify_list_of_data_values_per_point(\ command,kwargs['positions'],kwargs['data']) return ('E',-22) return (0,'') def parse_format_string(self,sfmt): """ parse the optomux command format string example commands to be parsed are: 12: ('M', 'Read On Off Status'), 24: ('Y[positions]', 'Clear Counters'), 25: ('Z[positions][modifiers][data]', 'Set Time Delay'), 26: ('Z[positions]L[data]', 'Initiate Square Wave'), 27: ('Z[positions]G', 'Turn Off Time Delay Square Wave'), 73: ('k[positions][data]', 'Start On Pulse'), 75: ('n[data]', 'Set Timer Resolution'), typical would be m.groups(1) = command char m.groups(2) = positions or data m.groups(3) = modifiers or data m.groups(4) = data """ p = re.compile(\ r'^([A-Za-o`])'\ +'\[?(\w*)\]?'\ +'\[?([GHIJKLM]|\w*)\]?'\ +'\[?(\w*)\]?$') m = p.search(sfmt) # remove empty groups return list(filter(lambda x: x, m.groups())) @utl.logger def build_packet(self,address,command,**kwargs): """ Purpose: Verifies the arguments passed based on the command, and appends the required fields as required. Parameters: address - optomux device address command - optoware command number **kwargs - command dependent values positions - an int or tuple of points to be changed modifiers - command specific modifier data - command dependent int or tuple of values Note: A single element tuple must have a comma, ie: (0,) is evaluated as a tuple but (0) is evaluated as an int """ pkt = '>' if address not in self.timer_resolution: self.timer_resolution[address] = 256 try: # verify the address rtn = self.verify_address(address) if rtn[0] == 'E': utl.log_error_message('verify_address failed') raise ValueError(rtn[1]) pkt += rtn[1] # verify the command rtn = self.verify_command(command) if rtn[0] == 'E': utl.log_error_message('verify_command failed') raise ValueError(rtn[1]) pkt += rtn[1] # use the command value to get the optomax command format string sfmt = self.commands[command][0] afmt = self.parse_format_string(sfmt) # verify the positions rtn = self.verify_positions(command,afmt,**kwargs) if rtn[0] == 'E': utl.log_error_message('verify_positions failed') raise ValueError(rtn[1]) pkt += rtn[1] # verify the modifiers rtn = self.verify_modifiers(command,afmt,**kwargs) if rtn[0] == 'E': utl.log_error_message('verify_modifiers failed') raise ValueError(rtn[1]) pkt += rtn[1] # verify the data rtn = self.verify_data(command,afmt,**kwargs) if rtn[0] == 'E': utl.log_error_message('verify_data failed') raise ValueError(rtn[1]) pkt += rtn[1] pkt += '{:02X}\r'.format(self.compute_pkt_checksum(pkt)) # '??\r' return (0,pkt) except Exception as ex: return rtn def get_response_timeout(self,address): """ Compute a response timeout so read won't hang forever. It is based on the time at the current baudrate of sending the longest command and receiving the longest response, plus the 'Set Turnaround Delay' time plus a 10ms buffer starting at the current perf_counter reading. """ if address not in self.turnaround_delay: self.turnaround_delay[address] = 0 # likely the longest response packet # would be reading 16 analog values max_opto_msg = \ 'A' + \ '00000000000000000000000000000000000' + \ '00000000000000000000000000000000000' + \ '\r' ta_secs = [0,0.010,0.100,0.500] # time required to send two max sized messages # could scope this # 2 * secs/char * len(maxmsg) # + turnaround delay setting # + current perf_counter # + 10 ms fudge factor to = 2 * 10/self.tty.baud * len(max_opto_msg) \ + ta_secs[self.turnaround_delay[address]] \ + perf_counter() \ + 0.010 return to @utl.logger def send_receive(self,address,command,**kwargs): """ Build the Optomux ASCII packet from the args. Flush the input buffer because the only way to sync a response to a command is that it immediately follows. Then write the packet over the serial port, start a response timer,and collect input until we either timeout or receive a valid response packet. """ pkt = self.build_packet(address,command,**kwargs) if pkt[0] != 'E': rxPkt = '' # start with a clean slate self.tty.flush_input_buffer() # send the bytes tb = perf_counter() self.tty.write(pkt[1]) # while response timer running timeout = self.get_response_timeout(address) utl.log_info_message('Response timeout is {:f} secs'.format(timeout-perf_counter())) while perf_counter() < timeout: # if there are bytes waiting to be read n = self.tty.rx_bytes_available() if n > 0: # read all of them s = self.tty.read(n) rxPkt += s # if an optomux response is complete if rxPkt.startswith(('A','N')) and rxPkt.endswith('\r'): rsp = (rxPkt[0:1],rxPkt) utl.log_info_message('Response time took {:f} secs'.format(perf_counter()-tb)) return self.process_response(command,rsp[1]) else: # turnaround timeout err = -29 utl.log_error_message(self.errors[err]) return ('E',err) else: # parrot caller's error err = -pkt[1] utl.log_error_message(self.errors[err]) return pkt @utl.logger def process_response(self,command,rsp): """ Check the response against the command that supposidly initiated it and parse the returned data. """ if rsp.startswith('A'): if rsp == 'A\r': return ('A',0) else: data = rsp[1:-3] chks = int(rsp[-3:-1],16) if self.compute_data_checksum(data) == chks: if command in [5,64,66,67]: return ('A',int(rsp[1:-3],16)) elif command in [12,16,17,31,48,70]: return ('A',self.optomux_data_to_binary_tuple(rsp[1:-3])) # counters use full 16 bits elif command in [22,23,32,33]: return ('A',self.optomux_data_to_counter_tuple(rsp[1:-3])) elif command in [37,38,49,58,60,61,63]: return ('A',self.optomux_data_to_analog_input_tuple(rsp[1:-3])) # reading temperatures elif command in [77,79]: return ('A',self.optomux_data_to_temperature_tuple(rsp[1:-3])) # reading back what we sent to an analog output elif command in [36]: return ('A',self.optomux_data_to_analog_output_tuple(rsp[1:-3])) elif command in [40,41]: # opto returns HHHHLLLL nibbles rsp = (0,self.optomux_data_to_tuple(rsp[1:-3])) # rsp[1][0:16] contains hi limit violation flags # rsp[1][16:32] contains lo limit violation flags # the values are massaged to: # 0 - no limit violation # 1 - lo limit violation # 2 - hi limit violation # 3 - hi and lo limit violation hilo = tuple(rsp[i]<<1|rsp[i+16] for i in range(16)) # change the answer return ('A',hilo) elif command in [80]: # B1, B2, E1, E2'ish # A07/05/05*B9 if rsp[3] == '/': return ('A',rsp[1,-3]) # B3000'ish # A811609019911050100300000B7 else: return ('A',rsp[5:7]+'/'+rsp[7:9]+'/'+rsp[9:11]) else: err = -31 utl.log_error_message(self.errors[err]) return ('E',err) elif rsp.startswith('N'): err = -int(rsp[1:-1],16) utl.log_error_message(self.errors[err]) return ('N',err) else: return rsp ## 0: ('A', 'Power Up Clear'), @utl.logger def power_up_clear(self,address): """ Purpose: Provide a command to clear the Power-Up Clear Expected error. Parameters: address - optomux controller address in range(256) Description: On powerup of a device, a Power-Up Clear Expected error 'N00\r' will be returned in response to the first Optomux command sent. There is no harm in sending the 'Power Up Clear' command first if one knows that a power loss has occured. Once the device has sent the 'N00\r' error, it responds normally to further commands. Just resend the rejected command. This command has NO effect on the Optomux unit’s operation or setup—the Power-up Clear Expected error provides an indication to the host that there has been a power failure and that Optomux has been reset to power-up conditions (see page 44). """ cmd = self.command_name['Power Up Clear'] return self.send_receive(address,cmd) ## 1: ('B', 'Reset'), @utl.logger def reset(self,address): """ Purpose: Resets the Optomux unit to power-up conditions. Parameters: address - optomux controller address in range(256) Description: Digital device conditions on reset: All outputs turned off All points then configured as inputs Protocol as set by jumper B10 (not on B3000) Watchdog timer disabled Turnaround delay = 0 Counters/duration timers cancelled Latches cleared Timer resolution = 10 ms. 0 scale written to all output points Analog device conditions on reset: All points then configured as inputs Protocol as set by jumper B10 (not on B3000) Watchdog timer disabled Turnaround delay = 0 All offsets set to 0 All gain coefficients set to 1 All averaging cancelled All temperature probe types cancelled NOTE: After using a Reset command, the Optomux application should wait before trying to communicate the reset device. For a B1, E1, B2, or E2 brain board, wait about 100ms. For a B3000, wait about 800 ms. If you are using a B3000 brain, a Reset command affects all four virtual addresses within the brain; you cannot reset just one. After sending a Reset command to a B3000 brain, you must send a Power-Up Clear command. Otherwise, the brain responds with an error. """ cmd = self.command_name['Reset'] return self.send_receive(address,cmd) ## 2: ('C[data]', 'Set Turnaround Delay'), @utl.logger def set_turnaround_delay(self,address,data=0): """ Purpose: Allow the host to tell the Optomux unit to delay before responding to commands sent from host. This command is helpful in some half-duplex radio modem systems. Parameters: address - optomux controller address in range(256) data - one of 4 delays in range(len(turnaround_delay)) turnaround_delay = { 0:'No Delay', 1:'10 ms', 2:'100 ms', 3:'500 ms' } Description: If no delay is specified, delay = 0 is assumed. On power-up, delay = 0. """ kwargs = {'data':data} cmd = self.command_name['Set Turnaround Delay'] rsp = self.send_receive(address,cmd,**kwargs) # change local setting if command has been acked if rsp[0] == 'A': self.turnaround_delay[address] = data return rsp ## 3: ('D[data]', 'Set Digital Watchdog'), @utl.logger def set_digital_watchdog(self,address,positions,action=0): """ Purpose: Instructs a digital Optomux unit to monitor activity on the communications link and to take a predetermined action if there is no activity within a specified time. No activity means no activity of any kind on the serial link, or no communication with this brain board on the Ethernet link. Parameters: address - optomux device address in range(256) positions - tuple of up to 16 ints in range(16), or a unit16 mask in range(65536) action - int in range(8) corresponding to these preset actions 0 -- Watchdog disabled 1 10 seconds Turn all outputs OFF 2 1 minute Turn all outputs OFF 3 10 minutes Turn all outputs OFF 4 -- Watchdog disabled 5 10 seconds Turn output 0 on, all other outputs OFF 6 1 minute Turn output 0 on, all other outputs OFF 7 10 minutes Turn output 0 on, all other outputs OFF Notes: Watchdog is disabled on power-up. The Optomux unit will respond with 'N06\r' to the first command following a watchdog timeout as a warning to let the host know a watchdog timeout occurred, unless the command is a'Power Up Clear' which get's an 'A\r'. """ kwargs = { 'positions':positions, 'data':action } cmd = self.command_name['Set Digital Watchdog'] return self.send_receive(address,cmd,**kwargs) ## 4: ('E[data]', 'Set Protocol'), @utl.logger def set_protocol(self,address,protocol=0): """ Purpose: Some Optomux boards allow a diagnostic 4 pass communications mode. In the 4 pass mode, the brain replaces the leading '>' with a 'A' in the command and returns it. The host knows the command made it through and can then send an 'E' command for execute. Parameters: address - optomux device address in range(256) protocol - in range (2) 0 2-pass protocol 1 4-pass protocol Notes: 4-pass is used for diagnostics only. Does not apply to B3000 Optomux units. """ kwargs = { 'data':protocol } cmd = self.command_name['Set Protocol'] return self.send_receive(address,cmd,**kwargs) ## 5: ('F', 'Identify Optomux Type'), @utl.logger def identify_optomux_type(self,address): """ Purpose: Instructs the Optomux unit to identify itself as digital or analog. Parameters: address - device address in range(256) Returns: ('A',0) = digital ('A',1) = analog B3000 Example: >>> on.identify_type(0) ('A', 0) >>> on.identify_type(2) ('A', 1) """ cmd = self.command_name['Identify Optomux Type'] return self.send_receive(address,cmd) ## 6: ('G[positions]', 'Configure Positions'), @utl.logger def configure_positions(self,address,positions): """ Purpose: Points to function as outputs. Points not specified here are configured as inputs. Parameters: address - optomux device address in range(256) positions - mask or tuple of positions to be made outputs Remarks: If the configuration for any point is changed by this command, then any time delay, latch, etc. is cleared. On power up, all points are configured as inputs. """ kwargs = {'positions':positions} cmd = self.command_name['Configure Positions'] return self.send_receive(address,cmd,**kwargs) ## 7: ('H[positions]', 'Configure As Inputs'), @utl.logger def configure_as_inputs(self,address,positions): """ Purpose: Positions array contains points to function as inputs. Points not specified are left unchanged. Parameters: address - optomux device address in range(256) positions - mask or tuple of positions to be made inputs Remarks: If the configuration for any point is changed by this command, then any time delay, latch, etc. is cleared. On power up, all points are configured as inputs. """ kwargs = {'positions':positions} cmd = self.command_name['Configure As Inputs'] return self.send_receive(address,cmd,**kwargs) ## 8: ('I[positions]', 'Configure As Outputs'), @utl.logger def configure_as_outputs(self,address,positions): """ Purpose: Positions array contains points to function as outputs. Points not specified are left unchanged. Parameters: address - optomux device address in range(256) positions - mask or tuple of positions to be made outputs Remarks: If the configuration for any point is changed by this command, then any time delay, latch, etc. is cleared. On power up, all points are configured as inputs. """ kwargs = {'positions':positions} cmd = self.command_name['Configure As Outputs'] return self.send_receive(address,cmd,**kwargs) ## 9: ('J[positions]', 'Write Digital Outputs'), @utl.logger def write_digital_outputs(self,address,positions=None): """ Purpose: Points specified in positions array are turned on, those not specified are turned off. Parameters: address - optomux device address in range(256) positions - mask or tuple of positions to be turned on Remarks: Time delays, if set, are implemented when this command is executed. Points that have been configured to function as inputs are not affected by this command. Note: Optomux manual says all outputs are turned on if positions are not specified. Here positions=None is used for that. """ kwargs = {'positions':positions} cmd = self.command_name['Write Digital Outputs'] return self.send_receive(address,cmd,**kwargs) ## 10: ('K[positions]', 'Activate Digital Outputs'), @utl.logger def activate_digital_outputs(self,address,positions=None): """ Purpose: Points specified in positions array are turned on, those not specified are unaffected. Parameters: address - optomux device address in range(256) positions - mask or tuple of positions to turned on Remarks: Time delays, if set, are implemented when this command is executed. Points that have been configured to function as inputs are not affected by this command. Note: Optomux manual says all outputs are turned on if positions are not specified. Here a positions=None is used for that. """ kwargs = {'positions':positions} cmd = self.command_name['Activate Digital Outputs'] return self.send_receive(address,cmd,**kwargs) ## 11: ('L[positions]', 'Deactivate Digital Outputs'), @utl.logger def deactivate_digital_outputs(self,address,positions=None): """ Purpose: Points specified in positions array are turned off, those not specified are unaffected. Parameters: address - optomux device address in range(256) positions - mask or tuple of positions to turned off Remarks: Time delays, if set, are implemented when this command is executed. Points that have been configured to function as inputs are not affected by this command. Note: Optomux manual says all outputs are turned off if positions are not specified. Here a positions=None is used for that. """ kwargs = {'positions':positions} cmd = self.command_name['Deactivate Digital Outputs'] return self.send_receive(address,cmd,**kwargs) ## 12: ('M', 'Read On Off Status'), @utl.logger def read_on_off_status(self,address): """ Purpose: Returns the current on/off status of all 16 points. Parameters: address: optomux device address in range(256) Returns: A tuple with a 1 (on) or 0 (off) in each position. Example: Address 0 outputs (0,1,2,3) wired to Address 4 inputs (0,1,2,3) >>> on.read_on_off_status(0) ('A', (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) >>> on.activate_digital_outputs(0,15) ('A', 0) >>> on.read_on_off_status(0) ('A', (1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) """ cmd = self.command_name['Read On Off Status'] return self.send_receive(address,cmd) ## 13: ('N[positions]', 'Set Latch Edges'), @utl.logger def set_latch_edges(self,address,positions): """ Purpose: All points specified in positions will latch ON-to-OFF, others are OFF-to_ON. Parameters: address - optomux device address in range(256) Positions: Input points to latch on ON-to-OFF transitions. All other points remain unchanged. Description: The default on power up is OFF-to-ON. Points configured as outputs are not affected by this command. On power up, all points are set to latch on OFF-to-ON transitions. Example: Address 0 outputs (0,1,2,3) wired to Address 4 inputs (0,1,2,3) >>> on.read_on_off_status(0) ('A', (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) >>> on.read_latches(4) ('A', (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) >>> on.set_latch_edges(4,12) ('A', 0) >>> on.activate_digital_outputs(0,15) ('A', 0) >>> on.read_on_off_status(4) ('A', (1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) >>> on.read_latches(4) ('A', (1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) >>> on.deactivate_digital_outputs(0,15) ('A', 0) >>> on.read_on_off_status(4) ('A', (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) >>> on.read_latches(4) ('A', (1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) """ kwargs = {'positions':positions} cmd = self.command_name['Set Latch Edges'] return self.send_receive(address,cmd,**kwargs) ## 14: ('O[positions]', 'Set Off To On Latches'), @utl.logger def set_off_to_on_latches(self,address,positions): """ Purpose: Sets input points to latch on OFF-to-ON transitions. Parameters: address - optomux device address in range(256) Positions: Input points to latch on OFF-to-ON transitions. All other points remain unchanged. Description: On power up, all points are set to latch OFF-to-ON transitions. Points configured as outputs are not affected by this command. Example: Address 0 outputs (0,1,2,3) wired to Address 4 inputs (0,1,2,3) >>> on.clear_latches(4) ('A', 0) >>> on.set_on_to_off_latches(4,15) ('A', 0) >>> on.set_off_to_on_latches(4,12) ('A', 0) >>> on.read_latches(4) ('A', (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) >>> on.activate_digital_outputs(0,15) ('A', 0) >>> on.read_on_off_status(4) ('A', (1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) >>> on.read_latches(4) ('A', (0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) >>> on.deactivate_digital_outputs(0,15) ('A', 0) >>> on.read_on_off_status(4) ('A', (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) >>> on.read_latches(4) ('A', (1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) """ kwargs = {'positions':positions} cmd = self.command_name['Set Off To On Latches'] return self.send_receive(address,cmd,**kwargs) ## 15: ('P[positions]', 'Set On To Off Latches'), @utl.logger def set_on_to_off_latches(self,address,positions): """ Purpose: Sets input points to latch on ON-to-OFF transitions. Parameters: address - optomux device address in range(256) Positions: Input points to latch on ON-to-OFF transitions. All other points remain unchanged. Description: On power up, all points are set to latch OFF-to-ON transitions. Points configured as outputs are not affected by this command. Example: Address 0 outputs (0,1,2,3) wired to Address 4 inputs (0,1,2,3) >>> on.reset(4) ('A', 0) >>> on.set_on_to_off_latches(4,3) ('A', 0) >>> on.clear_latches(4) ('A', 0) >>> on.read_latches(4) ('A', (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) >>> on.activate_digital_outputs(0,15) ('A', 0) >>> on.read_on_off_status(4) ('A', (1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) >>> on.read_latches(4) ('A', (0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) >>> on.deactivate_digital_outputs(0,15) ('A', 0) >>> on.read_on_off_status(4) ('A', (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) >>> on.read_latches(4) ('A', (1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) """ kwargs = {'positions':positions} cmd = self.command_name['Set On To Off Latches'] return self.send_receive(address,cmd,**kwargs) ## 16: ('Q', 'Read Latches'), @utl.logger def read_latches(self,address): """ Purpose: Returns data indicating which of the inputs have latched. Parameters: address - optomux device address in range(256) positions - Input points to latch on ON-to-OFF transitions. All other points remain unchanged. Description: This command does not clear the latches. Subsequent Read Latches commands will return the same results. Example: Address 0 outputs (0,1,2,3) wired to Address 4 inputs (0,1,2,3) >>> on.reset(4) ('A', 0) >>> on.activate_digital_outputs(0,15) ('A', 0) >>> on.read_latches(4) ('A', (1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) """ cmd = self.command_name['Read Latches'] return self.send_receive(address,cmd) ## 17: ('R[positions]', 'Read and Clear Latches'), @utl.logger def read_and_clear_latches(self,address,positions=None): """ Purpose: Returns data indicating which inputs have latched and then resets specified latches. Parameters: address - optomux device address in range(256) positions - Input points to latch on ON-to-OFF transitions. All other points remain unchanged. Description: This command returns the latch status for all points. It clears latches only for the specified points. All other latches remain unchanged. Example: Address 0 outputs (0,1,2,3) wired to Address 4 inputs (0,1,2,3) >>> on.reset(4) ('A', 0) >>> on.activate_digital_outputs(0,15) ('A', 0) >>> on.read_latches(4) ('A', (1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) >>> on.read_and_clear_latches(4) ('A', (1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) >>> on.read_latches(4) ('A', (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) """ kwargs = {'positions':positions} cmd = self.command_name['Read and Clear Latches'] return self.send_receive(address,cmd,**kwargs) ## 18: ('S[positions]', 'Clear Latches'), @utl.logger def clear_latches(self,address,positions=None): """ Purpose: Clear latches specified inpositions array. Parameters: address - optomux device address in range(256) positions - Input latches to clear. Description: This command clears the specified input latches. All other latches remain unchanged. Example: Address 0 outputs (0,1,2,3) wired to Address 4 inputs (0,1,2,3) >>> on.reset(4) ('A', 0) >>> on.read_latches(4) ('A', (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) >>> on.activate_digital_outputs(0,15) ('A', 0) >>> on.read_latches(4) ('A', (1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) >>> on.clear_latches(4) ('A', 0) >>> on.read_latches(4) ('A', (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) """ kwargs = {'positions':positions} cmd = self.command_name['Clear Latches'] return self.send_receive(address,cmd,**kwargs) ## 19: ('T[positions]', 'Start and Stop Counters'), @utl.logger def start_and_stop_counters(self,address,positions): """ Purpose: Positions specifies counters to be enabled. Others are disabled. Parameters: address - optomux device address in range(256) positions - Input counters to enable Description: This command has no effect on the stored count. Counting can start or resume at any time. The maximum count is 65,535; after that, the count resets to 0. Notes: Frequencies up to 400 Hz (50% duty cycle, minimum pulse width of 1.25 milliseconds) can be counted. Using the Generate N Pulses command 72 (i) will degrade the maximum counting frequency to about 350 Hz. Example: Address 0 outputs (0,1,2,3) wired to Address 4 inputs (0,1,2,3) >>> on.reset(4) ('A', 0) >>> on.start_and_stop_counters(4,0x0005) ('A', 0) >>> on.generate_n_pulses(0,15,25,10) ('A', 0) >>> on.read_counters(4) ('A', (10, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) >>> on.start_and_stop_counters(4,0x000A) ('A', 0) >>> on.generate_n_pulses(0,15,25,15) ('A', 0) >>> on.read_counters(4) ('A', (10, 15, 10, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) """ kwargs = {'positions':positions} cmd = self.command_name['Start and Stop Counters'] return self.send_receive(address,cmd,**kwargs) ## 20: ('U[positions]', 'Start Counters'), @utl.logger def start_counters(self,address,positions): """ Purpose: Positions specifies counters to be enabled. Others are unaffected. Parameters: address - optomux device address in range(256) positions - Input counters to enable. Description: This command has no effect on the stored count. Counting can start or resume at any time. The maximum count is 65,535; after that, the count resets to 0. Notes: Frequencies up to 400 Hz (50% duty cycle, minimum pulse width of 1.25 milliseconds) can be counted. Using the Generate N Pulses command 72 (i) will degrade the maximum counting frequency to about 350 Hz. Example: Address 0 outputs (0,1,2,3) wired to Address 4 inputs (0,1,2,3) >>> on.reset(4) ('A', 0) >>> on.start_counters(4,0x0005) ('A', 0) >>> on.read_counters(4) ('A', (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) >>> on.generate_n_pulses(0,15,25,10) ('A', 0) >>> on.read_counters(4) ('A', (10, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) >>> on.start_counters(4,0x000A) ('A', 0) >>> on.generate_n_pulses(0,15,25,10) ('A', 0) >>> on.read_counters(4) ('A', (20, 10, 20, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) """ kwargs = {'positions':positions} cmd = self.command_name['Start Counters'] return self.send_receive(address,cmd,**kwargs) ## 21: ('V[positions]', 'Stop Counters'), @utl.logger def stop_counters(self,address,positions): """ Purpose: Positions specifies counters to be disabled. Others are unaffected. Parameters: address - optomux device address in range(256) positions - Input counters to disable. Description: This command has no effect on the stored count. Counting can start or resume at any time. The maximum count is 65,535; after that, the count resets to 0. Notes: Frequencies up to 400 Hz (50% duty cycle, minimum pulse width of 1.25 milliseconds) can be counted. Using the Generate N Pulses command 72 (i) will degrade the maximum counting frequency to about 350 Hz. Example: Address 0 outputs (0,1,2,3) wired to Address 4 inputs (0,1,2,3) >>> on.reset(4) ('A', 0) >>> on.read_counters(4) ('A', (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) >>> on.start_counters(4,15) ('A', 0) >>> on.generate_n_pulses(0,15,25,10) ('A', 0) >>> on.read_counters(4) ('A', (10, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) >>> on.stop_counters(4,0x000a) ('A', 0) >>> on.generate_n_pulses(0,15,25,10) ('A', 0) >>> on.read_counters(4) ('A', (20, 10, 20, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) """ kwargs = {'positions':positions} cmd = self.command_name['Stop Counters'] return self.send_receive(address,cmd,**kwargs) ## 22: ('W[positions]', 'Read Counters'), @utl.logger def read_counters(self,address,positions=None): """ Purpose: Positions specifies counters to be read. Counts are unaffected. Parameters: address - optomux device address in range(256) positions - Input counters to read. Description: This command has no effect on the stored count. Counting can start or resume at any time. The maximum count is 65,535; after that, the count resets to 0. Notes: Frequencies up to 400 Hz (50% duty cycle, minimum pulse width of 1.25 milliseconds) can be counted. Using the Generate N Pulses command 72 (i) will degrade the maximum counting frequency to about 350 Hz. Example: Address 0 outputs (0,1,2,3) wired to Address 4 inputs (0,1,2,3) >>> on.reset(4) ('A', 0) >>> on.read_counters(4) ('A', (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) >>> on.start_counters(4,15) ('A', 0) >>> on.generate_n_pulses(0,15,25,10) ('A', 0) >>> on.read_counters(4) ('A', (10, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) """ kwargs = {'positions':positions} cmd = self.command_name['Read Counters'] return self.send_receive(address,cmd,**kwargs) ## 23: ('X[positions]', 'Read and Clear Counters'), @utl.logger def read_and_clear_counters(self,address,positions=None): """ Purpose: Positions specifies counters to be read then cleared. Other counts are unaffected. Parameters: address - optomux device address in range(256) positions - Input counters to read/clear. Description: This command has no effect on the stored count. Counting can start or resume at any time. The maximum count is 65,535; after that, the count resets to 0. Notes: Frequencies up to 400 Hz (50% duty cycle, minimum pulse width of 1.25 milliseconds) can be counted. Using the Generate N Pulses command 72 (i) will degrade the maximum counting frequency to about 350 Hz. Example: Address 0 outputs (0,1,2,3) wired to Address 4 inputs (0,1,2,3) >>> on.reset(4) ('A', 0) >>> on.start_counters(4,(0,1,2,3)) ('A', 0) >>> on.generate_n_pulses(0,(0,1,2,3),25,10) ('A', 0) >>> on.read_and_clear_counters(4,(0,1,2,3)) ('A', (10, 10, 10, 10)) >>> on.read_counters(4) ('A', (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) """ kwargs = {'positions':positions} cmd = self.command_name['Read and Clear Counters'] return self.send_receive(address,cmd,**kwargs) ## 24: ('Y[positions]', 'Clear Counters'), @utl.logger def clear_counters(self,address,positions=None): """ Purpose: Positions specifies counters to be cleared. Other counts are unaffected. Parameters: address - optomux device address in range(256) positions - Input counters to clear. Description: This command has no effect on the stored count. Counting can start or resume at any time. The maximum count is 65,535; after that, the count resets to 0. Notes: Frequencies up to 400 Hz (50% duty cycle, minimum pulse width of 1.25 milliseconds) can be counted. Using the Generate N Pulses command 72 (i) will degrade the maximum counting frequency to about 350 Hz. Example: Address 0 outputs (0,1,2,3) wired to Address 4 inputs (0,1,2,3) >>> on.reset(4) ('A', 0) >>> on.start_counters(4,(0,1,2,3)) ('A', 0) >>> on.generate_n_pulses(0,(0,1,2,3),25,10) ('A', 0) >>> on.read_counters(4,(0,1,2,3)) ('A', (10, 10, 10, 10)) >>> on.clear_counters(4,(0,1,2,3)) ('A', 0) >>> on.read_counters(4) ('A', (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) """ kwargs = {'positions':positions} cmd = self.command_name['Clear Counters'] return self.send_receive(address,cmd,**kwargs) ## 25: ('Z[positions][modifiers][data]', 'Set Time Delay'), @utl.logger def pulse_on(self,address,positions,ticks): """ Purpose: Arm the outputs specified in positions to generate a single on pulse of duration ticks * 10ms * timer resolution when the output is next activated. Parameters: address - optomux device address in range(256) positions - Outputs to pulse on ticks - number of timer ticks Description: Modifier in optomux ASCII command is 'H'. Desired time = delay length × timer resolution × 10 ms Valid delay lengths are 0 through 65,535. A 0 value is equal to a delay length of 65,535 (FFFF). """ kwargs = { 'positions':positions, 'modifiers':'H', 'data':ticks } cmd = self.command_name['Set Time Delay'] return self.send_receive(address,cmd,**kwargs) @utl.logger def delay_on(self,address,positions,ticks): """ Purpose: Arm the outputs specified in positions to delay the next OFF-to-ON request by ticks * 10ms * timer resolution. Parameters: address - optomux device address in range(256) positions - Outputs to delat ticks - number of timer ticks Description: Modifier in optomux ASCII command is 'I'. Desired time = delay length × timer resolution × 10 ms Valid delay lengths are 0 through 65,535. A 0 value is equal to a delay length of 65,535 (FFFF). """ kwargs = { 'positions':positions, 'modifiers':'I', 'data':ticks } cmd = self.command_name['Set Time Delay'] return self.send_receive(address,cmd,**kwargs) @utl.logger def pulse_off(self,address,positions,ticks): """ Purpose: Arm the outputs specified in positions to generate a single off pulse of duration ticks * 10ms * timer resolution when the output is next activated. Parameters: address - optomux device address in range(256) positions - Outputs to pulse on ticks - number of timer ticks Description: Modifier in optomux ASCII command is 'J'. Desired time = delay length × timer resolution × 10 ms Valid delay lengths are 0 through 65,535. A 0 value is equal to a delay length of 65,535 (FFFF). """ kwargs = { 'positions':positions, 'modifiers':'J', 'data':ticks } cmd = self.command_name['Set Time Delay'] return self.send_receive(address,cmd,**kwargs) @utl.logger def delay_off(self,address,positions,delay): """ Purpose: Arm the outputs specified in positions to delay the next ON-to-OFF request by ticks * 10ms * timer resolution. Parameters: address - optomux device address in range(256) positions - Outputs to delat ticks - number of timer ticks Description: Modifier in optomux ASCII command is 'K'. Desired time = delay length × timer resolution × 10 ms Valid delay lengths are 0 through 65,535. A 0 value is equal to a delay length of 65,535 (FFFF). """ kwargs = { 'positions':positions, 'modifiers':'K', 'data':delay } cmd = self.command_name['Set Time Delay'] return self.send_receive(address,cmd,**kwargs) return rsp ## 26: ('Z[positions][modifiers][data]', 'Initiate Square Wave'), @utl.logger def initiate_square_wave(self,address,positions,on_ticks,off_ticks): """ Purpose: Starts a continuous square wave at specified output points. Parameters: address - optomux device address in range(256) positions - points to output a square wave on_ticks - numnber of timer ticks to stay on off_ticks - number of timer ticks to stay off Description: The square wave continues until it is turned off using command 27 (Z) or modified with Set Time Delay command 25 (Z) on page 81 or High Resolution Square Wave command 68 (Z) on page 85. Current timer resolution (Set Timer Resolution command 75) and values in the Info Array or [data] fields are used to calculate on and off times of the square wave, as described below. On time = timer resolution x 256 x first element value Off time = timer resolution x 256 x second element value. Values can be between 0 and 255 (0 = 256). Maximum for on and off times is 2.56 seconds x 256 x 256 (2796.20 minutes or 46.6 hours). Example: Address 0 outputs (0,1,2,3) wired to Address 4 inputs (0,1,2,3) >>> on.reset(4) ('A', 0) >>> on.start_counters(4,15) ('A', 0) >>> on.initiate_square_wave(0,15,1,1) ('A', 0) >>> on.read_counters(4,15) ('A', (5, 5, 5, 5)) >>> on.stop_counters(4,(0,2)) ('A', 0) >>> on.read_counters(4) ('A', (8, 10, 8, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) >>> on.turn_off_time_delay_or_square_wave(0,(1,3)) ('A', 0) >>> on.read_counters(4,15) ('A', (8, 21, 8, 21)) >>> on.read_counters(4,15) ('A', (8, 21, 8, 21)) >>> on.start_counters(4,(0,2)) ('A', 0) >>> on.read_counters(4,15) ('A', (12, 21, 12, 21)) >>> on.turn_off_time_delay_or_square_wave(0,15) ('A', 0) >>> on.read_and_clear_counters(4,15) ('A', (16, 21, 16, 21)) >>> on.read_counters(4,15) ('A', (0, 0, 0, 0)) """ cmd = self.command_name['Initiate Square Wave'] kwargs = {'positions':positions, 'modifiers':'L', 'data':(on_ticks,off_ticks)} return self.send_receive(address,cmd,**kwargs) ## 27: ('Z[positions][modifiers]', 'Turn Off Time Delay Square Wave'), @utl.logger def turn_off_time_delay_or_square_wave(self,address,positions): """ Purpose: Turns off existing time delay on specified outputs. Parameters: address - optomux device address in range(256) positions - outputs which are to discontinue square wave Example: Address 0 outputs (0,1,2,3) wired to Address 4 inputs (0,1,2,3) >>> on.reset(4) ('A', 0) >>> on.start_counters(4,15) ('A', 0) >>> on.initiate_square_wave(0,15,1,1) ('A', 0) >>> on.read_counters(4,15) ('A', (5, 5, 5, 5)) >>> on.turn_off_time_delay_or_square_wave(0,(1,3)) ('A', 0) >>> on.read_counters(4,15) ('A', (9, 7, 9, 7)) >>> on.read_counters(4,15) ('A', (11, 7, 11, 7)) >>> on.turn_off_time_delay_or_square_wave(0,(0,2)) ('A', 0) >>> on.read_counters(4,15) ('A', (14, 7, 14, 7)) >>> on.read_counters(4,15) ('A', (14, 7, 14, 7)) """ kwargs = {'positions':positions, 'modifiers':'G'} cmd = self.command_name['Turn Off Time Delay Square Wave'] return self.send_receive(address,cmd,**kwargs) ## 28: ('a[positions]', 'Set Pulse Trigger Polarity'), @utl.logger def set_pulse_trigger_polarity(self,address,positions): """ Purpose: Configure the specified inputs to measure the duration of the next ON pulse. The others will measure off pulse duration. Parameters: address - optomux device address in range(256) positions - specified inputs measure ON time, others measure OFF time. Description: This command measures the duration of the first pulse of the appropriate level and stores the result to be recalled. As soon as a complete pulse is measured for a point, a pulse complete bit is set. Send a “Read Pulse Complete Bits” command to find out when the duration is done. The resolution for the duration counters is dependent upon the current timer resolution (see “Set Timer Resolution” on page 52). The default value of 10 ms allows you to measure a pulse of up to 10.92 minutes. At the lowest resolution (2.56 seconds as opposed to 0.01 seconds), you could measure a pulse of up to 2,796.16 minutes (46.6 hours). This command does not clear preexisting duration counter values or pulse complete bits. If a point’s pulse complete bit has been previously set, no measurements are made until that point’s pulse complete bit and duration counter are cleared by sending either “Clear Duration Counters” or “Read and Clear Duration Counters”. """ kwargs = {'positions':positions} cmd = self.command_name['Set Pulse Trigger Polarity'] return self.send_receive(address,cmd,**kwargs) ## 29: ('b[positions]', 'Trigger On Positive Pulse'), @utl.logger def trigger_on_positive_pulse(self,address,positions): """ Purpose: Configure the specified inputs to measure the duration of the next ON pulse. The others are unaffected. Parameters: address - optomux device address in range(256) positions - specified inputs ON time is measured. Description: This command measures the duration of the first pulse of the appropriate level and stores the result to be recalled. As soon as a complete pulse is measured for a point, a pulse complete bit is set. Send a “Read Pulse Complete Bits” command to find out when the duration is done. The resolution for the duration counters is dependent upon the current timer resolution (see “Set Timer Resolution” on page 52). The default value of 10 ms allows you to measure a pulse of up to 10.92 minutes. At the lowest resolution (2.56 seconds as opposed to 0.01 seconds), you could measure a pulse of up to 2,796.16 minutes (46.6 hours). This command does not clear preexisting duration counter values or pulse complete bits. If a point’s pulse complete bit has been previously set, no measurements are made until that point’s pulse complete bit and duration counter are cleared by sending either “Clear Duration Counters” or “Read and Clear Duration Counters”. """ kwargs = {'positions':positions} cmd = self.command_name['Trigger On Positive Pulse'] return self.send_receive(address,cmd,**kwargs) ## 30: ('c[positions]', 'Trigger On Negative Pulse'), @utl.logger def trigger_on_negative_pulse(self,address,positions): """ Purpose: Configure the specified inputs to measure the duration of the next OFF pulse. The others are unaffected. Parameters: address - optomux device address in range(256) positions - specified inputs ON time is measured. Description: This command measures the duration of the first pulse of the appropriate level and stores the result to be recalled. As soon as a complete pulse is measured for a point, a pulse complete bit is set. Send a “Read Pulse Complete Bits” command to find out when the duration is done. The resolution for the duration counters is dependent upon the current timer resolution (see “Set Timer Resolution” on page 52). The default value of 10 ms allows you to measure a pulse of up to 10.92 minutes. At the lowest resolution (2.56 seconds as opposed to 0.01 seconds), you could measure a pulse of up to 2,796.16 minutes (46.6 hours). This command does not clear preexisting duration counter values or pulse complete bits. If a point’s pulse complete bit has been previously set, no measurements are made until that point’s pulse complete bit and duration counter are cleared by sending either “Clear Duration Counters” or “Read and Clear Duration Counters”. """ kwargs = {'positions':positions} cmd = self.command_name['Trigger On Negative Pulse'] return self.send_receive(address,cmd,**kwargs) ## 31: ('d', 'Read Pulse Complete Bits'), @utl.logger def read_pulse_complete_bits(self,address): """ Purpose: Allows the host computer to determine which points have finished measuring pulse duration. Parameters: address - optomux device address in range(256) Description: When a duration measurement completes, a Pulse Complete bit is set which causes the duration counter to hold it's value. This command does not clear preexisting duration counter values or pulse complete bits. If a point’s pulse complete bit has been previously set, no measurements are made until that point’s pulse complete bit and duration counter are cleared by sending either “Clear Duration Counters” or “Read and Clear Duration Counters”. Example: Address 0 outputs (0,1,2,3) wired to Address 4 inputs (0,1,2,3) >>> on.set_timer_resolution(0,1) ('A', 0) >>> on.set_timer_resolution(4,1) ('A', 0) >>> on.trigger_on_positive_pulse(4,15) ('A', 0) >>> on.clear_duration_counters(4,15) ('A', 0) >>> on.read_pulse_duration_counters(4,15) ('A', (0, 0, 0, 0)) >>> on.start_on_pulse(0,15,100) ('A', 0) >>> on.read_pulse_complete_bits(4) ('A', (1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) >>> on.clear_duration_counters(4,15) ('A', 0) >>> on.read_pulse_complete_bits(4) ('A', (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) >>> """ cmd = self.command_name['Read Pulse Complete Bits'] return self.send_receive(address,cmd) ## 32: ('e[positions]', 'Read Pulse Duration Counters'), @utl.logger def read_pulse_duration_counters(self,address,positions): """ Purpose: Allows the host to read the current value of the pulse duration counters specified in positions. Parameters: address - optomux device address in range(256) positions - specified inputs whose duration counter is to be read. Description: Values are returned in the current timer resolution For example, a value of 2 equals a pulse length of: 2 x timer resolution ). Pulses up to 10.92 minutes can be timed with a resolution of 10 ms. If this command is used before the pulse is finished, the current duration is returned. If the final duration is desired, use the read_pulse_complete_bits command to poll the device. Example: Address 0 outputs (0,1,2,3) wired to Address 4 inputs (0,1,2,3) >>> on.set_timer_resolution(0,1) ('A', 0) >>> on.set_timer_resolution(4,1) ('A', 0) >>> on.trigger_on_positive_pulse(4,15) ('A', 0) >>> on.read_and_clear_pulse_duration_counters(4,15) ('A', (0, 0, 0, 0)) >>> on.start_on_pulse(0,15,100) ('A', 0) >>> on.read_pulse_duration_counters(4,15) ('A', (99, 100, 99, 100)) >>> on.clear_duration_counters(4,15) ('A', 0) >>> on.read_pulse_duration_counters(4,15) ('A', (0, 0, 0, 0)) """ kwargs = {'positions':positions} cmd = self.command_name['Read Pulse Duration Counters'] return self.send_receive(address,cmd,**kwargs) ## 33: ('f[positions]', 'Read and Clear Duration Counters'), @utl.logger def read_and_clear_pulse_duration_counters(self,address,positions): """ Purpose: Allows the host to read/clear the current value of the pulse duration counters specified in positions. Parameters: address - optomux device address in range(256) positions - specified inputs whose duration counter is to be read/cleared. Description: Values are returned in the current timer resolution For example, a value of 2 equals a pulse length of: 2 x timer resolution ). Pulses up to 10.92 minutes can be timed with a resolution of 10 ms. If this command is used before the pulse is finished, the current duration is returned. If the final duration is desired, use the read_pulse_complete_bits command to poll the device. Example: Address 0 outputs (0,1,2,3) wired to Address 4 inputs (0,1,2,3) >>> on.set_timer_resolution(0,1) ('A', 0) >>> on.set_timer_resolution(4,1) ('A', 0) >>> on.trigger_on_positive_pulse(4,15) ('A', 0) >>> on.read_and_clear_pulse_duration_counters(4,15) ('A', (0, 0, 0, 0)) >>> on.start_on_pulse(0,15,100) ('A', 0) >>> on.read_pulse_duration_counters(4,15) ('A', (99, 100, 99, 100)) >>> on.clear_duration_counters(4,15) ('A', 0) >>> on.read_pulse_duration_counters(4,15) ('A', (0, 0, 0, 0)) """ kwargs = {'positions':positions} cmd = self.command_name['Read and Clear Duration Counters'] return self.send_receive(address,cmd,**kwargs) ## 34: ('g[positions]', 'Clear Duration Counters'), @utl.logger def clear_duration_counters(self,address,positions): """ Purpose: Allows the host to clear the current value of the pulse duration counters specified in positions. Parameters: address - optomux device address in range(256) positions - specified inputs whose duration counter is to be cleared. Description: Counters not specified are left unchanged. Example: Address 0 outputs (0,1,2,3) wired to Address 4 inputs (0,1,2,3) >>> on.set_timer_resolution(0,1) ('A', 0) >>> on.set_timer_resolution(4,1) ('A', 0) >>> on.trigger_on_positive_pulse(4,15) ('A', 0) >>> on.read_and_clear_pulse_duration_counters(4,15) ('A', (0, 0, 0, 0)) >>> on.start_on_pulse(0,15,100) ('A', 0) >>> on.read_pulse_duration_counters(4,15) ('A', (99, 100, 99, 100)) >>> on.clear_duration_counters(4,15) ('A', 0) >>> on.read_pulse_duration_counters(4,15) ('A', (0, 0, 0, 0)) """ kwargs = {'positions':positions} cmd = self.command_name['Clear Duration Counters'] return self.send_receive(address,cmd,**kwargs) ## 35: ('J[positions][data]', 'Write Analog Outputs'), @utl.logger def write_analog_outputs(self,address,positions,value): """ Purpose: Writes a specified value to one or more analog outputs. Description: Writes the same value to all specified in outputs. """ kwargs = { 'positions':positions, 'data':value } cmd = self.command_name['Write Analog Outputs'] return self.send_receive(address,cmd,**kwargs) ## 36: ('K[positions]', 'Read Analog Outputs'), @utl.logger def read_analog_outputs(self,address,positions): """ Purpose: Reads values from specified outputs. Description: Response tuple contains data for specified outputs. """ kwargs = {'positions':positions} cmd = self.command_name['Read Analog Outputs'] return self.send_receive(address,cmd,**kwargs) ## 37: ('L[positions]', 'Read Analog Inputs'), @utl.logger def read_analog_inputs(self,address,positions): """ Purpose: Reads values from specified inputs. Description: Response tuple contains data for specified inputs. """ kwargs = {'positions':positions} cmd = self.command_name['Read Analog Inputs'] return self.send_receive(address,cmd,**kwargs) ## 38: ('M[positions][data]', 'Average and Read Input'), @utl.logger def average_and_read_input(self,address,position,samples): """ Purpose: Averages the value of a single point over a specified number of samples and returns the result. Description: This command returns a response only when it is finished averaging. If the number of samples is very large and the system is communicating serially, it can tie up the bus while waiting for an acknowledgment message. You can use Start Averaging Inputs, Read Average Complete Bits, and Read Averaged Inputs instead. . Averaging is done using a continuous running average with a sample rate of 100 milliseconds. After the number of samples has been reached, the value is returned to the host. The following equation shows how the average is calculated: Average = ((N-1) (Old Average) + (New Reading))/N Even though the positions argument may be a tuple which is not sorted, the averages are returned in ascending positions order. """ if isinstance(position,list): position = position[0] kwargs = { 'positions':position, 'data':samples } cmd = self.command_name['Average and Read Input'] return self.send_receive(address,cmd,**kwargs) ## 39: ('N[positions][data]', 'Set Input Range'), @utl.logger def set_input_range(self,address,position,hi_limit,lo_limit): """ Purpose: Defines the high and low limits for the specified input points. Parameters: address - optomux device address in range(256) positions - points to read/clear hi_limit - hi limit latch trigger value lo_limit - lo limit latch trigger value Description: This command defines a range for the specified inputs. If an input is out of the specified range, one of two latches is set: high or low limit exceeded. To read these latches, send the command Read Out-of-Range Latches. All specified inputs are set to the same limits. """ kwargs = { 'positions':positions, 'data':(hi_limit,lo_limit) } cmd = self.command_name['Set Input Range'] return self.send_receive(address,cmd,**kwargs) ## 40: ('O', 'Read Out Of Range Latches'), @utl.logger def read_out_of_range_latches(self,address): """ Purpose: Returns the high and low out-of-range latches for all points. Remarks: Before using this command, send Set Input Range to set high and low imits for the points. This command does not clear the latches. Parameters: address - optomux device address in range(256) Returns: A tuple of limit flags for all points: 0 = point has remained within limits or is an output. 1 = low-limit latch has been set 2 = high-limit latch has been set 3 = low-limit and high-limit latches have been set """ cmd = self.command_name['Read Out Of Range Latches'] return self.send_receive(address,cmd) ## 41: ('P[positions]', 'Read and Clear Out Of Range Latches'), @utl.logger def read_and_clear_out_of_range_latches(self,address,positions): """ Purpose: Reads/clears the high and low out-of-range latches for specified points. Parameters: address - optomux device address in range(256) positions - points to read/clear Description: Before using this command, send Set Input Range to set the value of high and low limits for the specified points. This command does not clear the latches. Returns: A tuple of limit flags for all points: 0 = point has remained within limits or is an output. 1 = low-limit latch has been set 2 = high-limit latch has been set 3 = both low- and high-limit latches have been set """ kwargs = {'positions':positions} cmd = self.command_name['Read and Clear Out Of Range Latches'] return self.send_receive(address,cmd,**kwargs) ## 42: ('Q[positions]', 'Clear Out Of Range Latches'), @utl.logger def clear_out_of_range_latches(self,address,positions=None): """ Purpose: Clears the high and low out-of-range latches for the specified points. Parameters: address - optomux device address in range(256) positions - points to clear Description: Before using this command, send Set Input Range to set the value of high and low limits for the specified points. """ kwargs = {'positions':positions} cmd = self.command_name['Clear Out Of Range Latches'] return self.send_receive(address,cmd,**kwargs) ## 43: ('R[positions][modifiers][data]', 'Set Output Waveform'), @utl.logger def set_output_waveform(self,address,positions,rate,shape,hi_limit,lo_limit): """ Purpose: Starts a constant waveform at specified output points. Parameters: address - optomux device address in range(256) positions - points to generate waveform rate - period of ramp or 1/2 period of square wave or sawtooth shape - waveform shape hi_limit - max value of waveform lo_limit - min value of waveform Description: The rate sets the period of a ramp one-half the period of a triangle or square wave and assume a full scale change. Valid waveform rates: 0 Disable waveform 1 2.18 minutes 2 3.28 minutes 3 4.37 minutes 4 5.46 minutes 5 6.56 minutes 6 7.65 minutes 7 8.74 minutes 8 1.09 minutes 9 32.8 seconds 10 21.8 seconds 11 16.4 seconds 12 13.1 seconds 13 10.9 seconds 14 9.4 seconds 15 8.2 seconds Valid waveform shapes: 0 No waveform 1 Triangle wave with positive initial slope 2 Ramp up—waveform terminates upon reaching the upper limit 3 Continuous ramp up 4 Square wave (50 % duty cycle) 5 Triangle wave with negative initial slope 6 Ramp down—waveform terminates at lower limit 7 Continuous ramp down """ kwargs = { 'positions':positions, 'modifiers':(rate,shape), 'data':(hi_limit,lo_limit) } cmd = self.command_name['Set Output Waveform'] return self.send_receive(address,cmd,**kwargs) ## 44: ('R[positions][modifiers]', 'Turn Off Existing Waveforms'), @utl.logger def turn_off_existing_waveform(self,address,positions): """ Purpose: Turns off existing waveforms that were started with Set Output Waveforms Parameters: address - optomux device address in range(256) positions - points having waveform canceled Description: Cancels waveforms that were started using 'Set Output Waveform'. """ kwargs = {'positions':positions} cmd = self.command_name['Turn Off Existing Waveforms'] return self.send_receive(address,cmd,**kwargs) ## 45: ('D[positions][data]', 'Set Analog Watchdog'), @utl.logger def set_analog_watchdog(self,address,positions,action): """ Purpose: Instructs an analog Optomux unit to monitor activity on the communications link and to take a predetermined action if there is no activity within a specified time. No activity means no activity of any kind on the serial link, or no communication with this brain board on the Ethernet link. Parameters: address - optomux device address in range(256) positions - points having waveform canceled action - timeout and output action to be taken Description: Valid times and actions are: 0 -- Watchdog disabled 1 10 seconds Write zero-scale 2 1 minute Write zero-scale 3 10 minutes Write zero-scale 4 -- Watchdog disabled 5 10 seconds Write full-scale 6 1 minute Write full-scale 7 10 minutes Write full-scale 20–65,535 Sets timeout value to action x 10 ms. Use 'Set Analog Watchdog Timeout' command to determine output value 8–19 -- invalid, returns limit error If no data is specified, 0 (watchdog disabled) is assumed. The Optomux unit will respond to the first command after a watchdog timeout with an error -7 for the driver or N06cr for the protocol, and the command will NOT be executed (unless it is a PUC). The error message is a warning to let the host know a watchdog timeout occured. """ kwargs = { 'positions':positions, 'data':action } cmd = self.command_name['Set Analog Watchdog'] return self.send_receive(address,cmd,**kwargs) @utl.logger def disable_analog_watchdog(self,address,positions): """ Purpose: When typing at the terminal, short watchdog delays make it virtually impossible to recover. It takes two commands to clear the error since the first is rejected with a N06\r. Parameters: address - optomux device address in range(256) positions - points having waveform canceled Description: Send a 'Power Up Clear' to get rid of the N06\r error, then follow immediately with a 'Set Analog Watchdog' type 0 to disable. """ self.power_up_clear(address) self.set_analog_watchdog(address,positions,0) ## 46: ('S[positions][data]', 'Update Analog Outputs'), @utl.logger def update_analog_outputs(self,address,positions,values): """ Purpose: Writes values to one or more analog outputs. Parameters: address - optomux device address in range(256) positions - mask or tuple of points to cancel wave values - tuple of values for specified positions Description: Use this command to write different values to multiple outputs. Use 'Write Analog Outputs' to write the same value to multiple outputs. Attempts to write values to points configured as inputs will be ignored. Driver """ kwargs = { 'positions':positions, 'data':values } cmd = self.command_name['Update Analog Outputs'] return self.send_receive(address,cmd,**kwargs) ## 47: ('T[positions][data]', 'Start Averaging Inputs'), @utl.logger def start_averaging_inputs(self,address,positions,samples): """ Purpose: Averages the value of a single point over a specified number of samples and returns the result. Parameters: address - optomux device address in range(256) positions - mask or tuple of points to cancel wave samples - number of samples to average Description: Averaging is done using a continuous running average with a sample rate of 100 milliseconds. The equation is: Average = ((N-1) (Old Average) + (New Reading))/N Poll with 'Read Average Complete Bits' in (samples * 100)ms to see if average is ready. """ kwargs = { 'positions':positions, 'data':samples } cmd = self.command_name['Start Averaging Inputs'] return self.send_receive(address,cmd,**kwargs) ## 48: ('i', 'Read Average Complete Bits'), @utl.logger def read_average_complete_bits(self,address): """ Purpose: Allows the host to determine which points have completed averaging. Parameters: address - optomux device address in range(256) Description: Use Start Averaging Inputs (page 104) before using this command. A 1 bit indicates that input averaging has been completed; a 0 bit indicates that it has not. Ignore response bits corresponding to points configured as outputs or where averaging has not been started. """ cmd = self.command_name['Read Average Complete Bits'] return self.send_receive(address,cmd) ## 49: ('U[positions]', 'Read Averaged Inputs'), @utl.logger def read_averaged_inputs(self,address,positions): """ Purpose: Read the averaged analog values of the specified input positions Parameters: address - optomux device address in range(256) positions - mask or tuple of points to read averages Description: Input averaging must have already been started using 'Start Averaging Inputs'. Check whether averaging is completed by using 'Read Average Complete Bits' If averaging has not been completed, 'Read Averaged Inputs' returns the current value of the average. """ kwargs = { 'positions':positions } cmd = self.command_name['Read Averaged Inputs'] return self.send_receive(address,cmd,**kwargs) ## 50: ('V[positions][modifiers][data]', 'Enhanced Output Waveform'), @utl.logger def enhanced_output_waveform(self,address,positions,shape,hi_limit,lo_limit,period): """ Purpose: Starts a constant waveform at specified output points. Parameters: address - optomux device address in range(256) positions - mask or tuple of positions to generate waveform shape - waveform shape hi_limit - max value of waveform lo_limit - min value of waveform period - period of ramp or 1/2 period of square wave or sawtooth Description: This command offers greater flexibility in setting the period of the waveform than the older command 'Set Output Waveform'. Valid waveform types are: 0 Triangle wave with positive inital slope 1 Ramp up—waveform terminates upon reaching the upper limit 2 Sawtooth, continuous ramp up 3 Square wave (50 % duty cycle) 4 Triangle wave with negative initial slope 5 Ramp down—waveform termihnates at lower limit 6 Sawtooth, continuous ramp down """ kwargs = { 'positions':positions, 'modifiers':shape, 'data':(hi_limit,lo_limit,period) } cmd = self.command_name['Enhanced Output Waveform'] return self.send_receive(address,cmd,**kwargs) ## 51: ('V[positions][modifiers]', 'Cancel Enhanced Waveforms'), @utl.logger def cancel_enhanced_waveforms(self,address,positions): """ Purpose: Turns off existing waveforms that were started with 'Enhanced Output Waveform' Parameters: address - optomux device address in range(256) positions - mask or tuple of positions to generate waveform Description: This command cancels waveforms that were started using command 'Enhanced Output Waveform'. If the 'Set Output Waveform' command was used, then the 'Turn Off Existing Waveforms' must be used. """ kwargs = { 'positions':positions, 'modifiers':0 } cmd = self.command_name['Cancel Enhanced Waveforms'] return self.send_receive(address,cmd,**kwargs) ## 52: ('g[positions]', 'Calculate Offsets'), @utl.logger def calculate_offsets(self,address,positions): """ Purpose: Calculates and returns offsets for specified input points. Parameters: address - optomux device address in range(256) positions - mask or tuple of positions to calculate offsets Description: Because offset values are calculated using the current values of the inputs, use this command when the specified points are receiving the value you wish to consider zero scale. This is usually done during system installation and calibration, when known inputs (zero scale) can be applied to the points. Use the offset values obtained from this command to set offset values during Optomux initialization. Always set offsets before calculating gain coefficients (see page 126). """ kwargs = { 'positions':positions, } cmd = self.command_name['Calculate Offsets'] return self.send_receive(address,cmd,**kwargs) ## 53: ('W[positions][data]', 'Set Offsets'), @utl.logger def set_offsets(self,address,positions,offsets): """ Purpose: Set analog offsets for specified input points. Parameters: address - optomux device address in range(256) positions - mask or tuple of positions to receive offsets offsets - tuple of offsets from a previous calibration Description: Use the offset values obtained from 'Calculate Offsets' in this command to set offset values during Optomux initialization. Always set offsets before calculating gain coefficients. """ kwargs = { 'positions':positions, 'data':offsets } cmd = self.command_name['Set Offsets'] return self.send_receive(address,cmd,**kwargs) ## 54: ('h[positions]', 'Calculate and Set Offsets'), @utl.logger def calculate_and_set_offsets(self,address,positions): """ Purpose: Calculates and sets offsets for specified input points, and then returns caluclated offsets to the host. Parameters: address - optomux device address in range(256) positions - mask or tuple of positions to calculate offsets Description: Because offset values are calculated using the current values of the inputs, use this command when the specified points are receiving the value you wish to consider zero scale. This is usually done during system installation and calibration, when known inputs (zero scale) can be applied to the points. Always set offsets before calculating gain coefficients. """ kwargs = { 'positions':positions, } cmd = self.command_name['Calculate and Set Offsets'] return self.send_receive(address,cmd,**kwargs) ## 55: ('X[positions]', 'Calculate Gain Coefficients'), @utl.logger def calculate_gain_coefficients(self,address,positions): """ Purpose: Calculates and returns gain coeffients for specified input points. Parameters: address - optomux device address in range(256) positions - mask or tuple of positions to calculate gains Description: Before using this command, first calculate and set the offsets. Because gain values are calculated using the current values of the inputs, use this command when the specified points are receiving the value you wish to consider full scale. This is usually done during system installation and calibration, when known inputs (full scale) can be applied to the points. Use the values obtained from this command to set gain coefficient values during Optomux initialization. Returns: Tuple of elements elements containing gain coefficient values for the corresponding points. Values returned are 10,000 times the actual gain coefficients. Note: A returned value of 14,000 represents a gain coefficient of 1.40. """ kwargs = { 'positions':positions, } cmd = self.command_name['Calculate Gain Coefficients'] return self.send_receive(address,cmd,**kwargs) ## 56: ('Y[positions][data]', 'Set Gain Coefficients'), @utl.logger def set_gain_coefficients(self,address,positions,gains): """ Purpose: Sets the gain coeffients for specified input points. Parameters: address - optomux device address in range(256) positions - mask or tuple of positions to set gains gains - tuple of gain values from earlier calibartion Description: It is assumed that the offsets and gains have been conputed using calibrated input values previously previously. Use this command when necessary to set the gains to a previously good calibration value. """ kwargs = { 'positions':positions, 'data':gains } cmd = self.command_name['Set Gain Coefficients'] return self.send_receive(address,cmd,**kwargs) ## 57: ('Z[positions]', 'Calculate and Set Gain Coefficients'), @utl.logger def calculate_and_set_gain_coefficients(self,address,positions): """ Purpose: Calculates and sets the gain coeffients for specified input points. Parameters: address - optomux device address in range(256) positions - mask or tuple of positions to set calculate/gains Description: It is assumed that the offsets and gains have been conputed using calibrated input values previously previously. Use this command to calibrate the gain when a cal source is applied to the input. """ kwargs = { 'positions':positions, } cmd = self.command_name['Calculate and Set Gain Coefficients'] return self.send_receive(address,cmd,**kwargs) ## 58: ('a[positions]', 'Read Lowest Values'), @utl.logger def read_lowest_values(self,address,positions): """ Purpose: Returns the lowest readings at specified input points. Parameters: address - optomux device address in range(256) positions - mask or tuple of positions to read lowest values Description: The reading will be the lowest the unit has encountered since last receiving a 'Read and Clear Lowest Values' or 'Clear Lowest Values'. Units set all low values to an extreme over range of 2000 hex (for the driver) or 3000 hex (for the protocol) upon power-up. Returns: A tuple of lowest values for the specified positions """ kwargs = { 'positions':positions } cmd = self.command_name['Read Lowest Values'] return self.send_receive(address,cmd,**kwargs) ## 59: ('b[positions]', 'Clear Lowest Values'), @utl.logger def clear_lowest_values(self,address,positions=None): """ Purpose: Clears the lowest reading for specified input points. Parameters: address - optomux device address in range(256) positions - mask or tuple of positions to clear Description: Units clear the lowest readings by setting the lowest value to an extreme over range of 2000 hex (for the driver) or 3000 hex (for the protocol). This allows the unit to store the lowest value encountered in subsequent readings. """ kwargs = { 'positions':positions, } cmd = self.command_name['Clear Lowest Values'] return self.send_receive(address,cmd,**kwargs) ## 60: ('c[positions]', 'Read and Clear Lowest Values'), @utl.logger def read_and_clear_lowest_values(self,address,positions): """ Purpose: Returns/clears the lowest readings at specified input points. Parameters: address - optomux device address in range(256) positions - mask or tuple of positions to read lowest values Description: The reading will be the lowest the unit has encountered since last receiving a 'Read and Clear Lowest Values' or 'Clear Lowest Values'. Units set all low values to an extreme over range of 2000 hex (for the driver) or 3000 hex (for the protocol) upon power-up. Returns: A tuple of lowest values for the specified positions """ kwargs = { 'positions':positions } cmd = self.command_name['Read and Clear Lowest Values'] return self.send_receive(address,cmd,**kwargs) ## 61: ('d[positions]', 'Read Peak Values'), @utl.logger def read_peak_values(self,address,positions): """ Purpose: Returns the highest readings at specified input points. Parameters: address - optomux device address in range(256) positions - mask or tuple of positions to read highest values Description: The reading will be the highest the unit has encountered since last receiving a 'Read and Clear Peak Values' or 'Clear Peak Values'. Units set all low values to an extreme over range of 0000 hex upon power-up. Returns: A tuple of highest values for the specified positions """ kwargs = { 'positions':positions } cmd = self.command_name['Read Peak Values'] return self.send_receive(address,cmd,**kwargs) ## 62: ('e[positions]', 'Clear Peak Values'), @utl.logger def clear_peak_values(self,address,positions=None): """ Purpose: Clears the peak reading for specified input points. Parameters: address - optomux device address in range(256) positions - mask or tuple of positions to clear Description: Units clear the peak readings by setting the them to 0. """ kwargs = { 'positions':positions, } cmd = self.command_name['Clear Peak Values'] return self.send_receive(address,cmd,**kwargs) ## 63: ('f[positions]', 'Read and Clear Peak Values'), @utl.logger def read_and_clear_peak_values(self,address,positions=None): """ Purpose: Returns/clears the highest readings at specified input points. Parameters: address - optomux device address in range(256) positions - mask or tuple of positions to read/clear Description: The reading will be the highest the unit has encountered since last receiving a 'Read and Clear Peak Values' or 'Clear Peak Values'. Units set all peak values to 0000 hex upon power-up. Returns: A tuple of highest values for the specified positions """ kwargs = { 'positions':positions } cmd = self.command_name['Read and Clear Peak Values'] return self.send_receive(address,cmd,**kwargs) ## 64: ('M', 'Read Binary On Off Status'), @utl.logger def read_binary_on_off_status(self,address): """ Purpose: Returns the on/off status of all 16 points in the form of a 16-bit binary number. Parameters: address - optomux device address in range(256) Returns: A bitmask (0=off, 1=on) """ cmd = self.command_name['Read Binary On Off Status'] return self.send_receive(address,cmd) ## 65: ('J[positions]', 'Write Binary Outputs'), @utl.logger def write_binary_outputs(self,address,mask): """ Purpose: Writes all 16 points with the value form a bitmask Parameters: address - optomux device address in range(256) mask - uint16 with a bit for each output, 0=off, 1=on """ kwargs = {'positions':mask} rsp = self.write_digital_outputs(address,**kwargs) return rsp ## 66: ('Q', 'Read Binary Latches'), @utl.logger def read_binary_latches(self,address): """ Purpose: Returns data indicating which of the inputs have latched. Parameters: address - optomux device address in range(256) Description: This command does not clear the latches. Subsequent Read Latches commands will return the same results. """ cmd = self.command_name['Read Binary Latches'] return self.send_receive(address,cmd) ## 67: ('R[positions]', 'Read and Clear Binary Latches'), @utl.logger def read_and_clear_binary_latches(self,address,positions): """ Purpose: Returns data indicating which of the inputs have latched and clears the latches. Parameters: address - optomux device address in range(256) positions - specifies which latches to read/clear Description: This command first reads then clears the latches. """ kwargs = {'positions':positions} cmd = self.command_name['Read and Clear Binary Latches'] return self.send_receive(address,cmd,**kwargs) ## 68: ('Z[positions][modifiers][data]', 'High Resolution Square Wave'), @utl.logger def high_resolution_square_wave(self,address,positions,on_time,off_time): """ Purpose: Starts a continuous square wave at specified output points. Parameters: address - optomux device address in range(256) positions - mask or tuple of positions to output squarewave on_ticks - timer ticks to stay on off_ticks - timer ticks to stay off Description: Before using this command, use the Set Timer Resolution. On time = timer resolution x first element value Off time = timer resolution x second element value. Maximum for on and off times is 256 x 2.56 seconds (10.92 minutes). This command operates the same as the standard square wave command except that it uses the current timer resolution instead of a resolution of 2.56 seconds. The square wave stops by sending one of the commands: 'Turn Off Time Delay/Square Wave' is sent 'Set Time Delay' 'Initiate Square Wave' Write , Activate, and Deactivate Digital Outputs have no effect """ kwargs = { 'positions':positions, 'modifiers':'M', 'data':(on_time,off_time) } cmd = self.command_name['High Resolution Square Wave'] return self.send_receive(address,cmd,**kwargs) ## 69: ('h[positions]', 'Retrigger Time Delay'), @utl.logger def retrigger_time_delay(self,address,positions=None): """ Purpose: Restarts or triggers an existing time delay. Parameters: address - optomux device address in range(256) positions - position to retrigger Description: Use this command along with 'Set Time Delay' to dynamically change an active time delay. This command overrides an existing time delayed output by setting the time delay counter to the value established with 'Set Time Delay'. """ kwargs = {'positions':positions} cmd = self.command_name['Retrigger Time Delay'] return self.send_receive(address,cmd,**kwargs) ## 70: ('j', 'Read Configuration'), @utl.logger def read_configuration(self,address): """ Purpose: Returns the current input/output configuration for all 16 points. Parameters: address - optomux device address in range(256) Description: 0=Input 1=Output Returns: Tuple of bits Example: >>> on.read_configuration(0) ('A', (1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) """ cmd = self.command_name['Read Configuration'] return self.send_receive(address,cmd) """ on timeout, outputs set in positions are turned on, others off """ ## 71: ('m[positions][data]', 'Set Enhanced Digital Watchdog'), @utl.logger def set_enhanced_digital_watchdog(self,address,positions,ticks): """ Purpose: Instructs a digital Optomux unit to monitor activity on the communications link and to take a specified action if there is no activity within a specified time. Parameters: address - optomux device address in range(256) positions - mask or tuple of positions to output squarewave ticks - number of 10ms ticks before generating a watchdog Description: After this command is issued, if a character is not received within the time specified delay, the unit will turn on or off all outputs specified. All time delay outputs are cancelled. Inputs are not affected. The delay time is set delay * 10 milliseconds. Delays of less than 200 milliseconds (except 0) result in a limit error and the command is not executed. A delay time of zero disables the digital watchdog function. If no delay time is sent, 0 is assumed. The Optomux unit will respond to the first command after a watchdog timeout with N06cr (Optomux protocol) error, and the command will NOT be executed (unless it is a PUC). This error code is sent as a warning to let the host know a watchdog timeout occurred. """ kwargs = {'positions':positions, 'data':ticks} cmd = self.command_name['Set Enhanced Digital Watchdog'] return self.send_receive(address,cmd,**kwargs) ## 72: ('i[positions][modifiers][data]', 'Generate N Pulses'), @utl.logger def generate_n_pulses(self,address,positions,ticks,count): """ Purpose: Instructs Optomux unit to output a counted string of pulses of a specified duration. Parameters: address - target optomux device address positions - outputs to pulse ticks - 50% time in ticks (see 'Set Timer Resolution' count - number of pulses Description: Generates count pulses on specified outputs with a period of 2 * timer resolution * ticks """ kwargs = {'positions':positions, 'modifiers':ticks, 'data':count} cmd = self.command_name['Generate N Pulses'] return self.send_receive(address,cmd,**kwargs) ## 73: ('k[positions][data]', 'Start On Pulse'), @utl.logger def start_on_pulse(self,address,positions,ticks): """ Purpose: Turns on specified outputs for a the specified number of timer ticks, then turns them off. Parameters: address - target optomux device address positions - outputs to pulse ticks - pulse on time in ticks (see 'Set Timer Resolution' Description: Assumes the 'Set Timer Resolution' command has been used to tick rate. Because this command is retriggerable, it can be used as a watchdog circuit by continuously sending this command at a rate faster than the pulse length. To cancel this command, resend it with ticks = 1 """ kwargs = { 'positions':positions, 'data':time } cmd = self.command_name['Start On Pulse'] return self.send_receive(address,cmd,**kwargs) ## 74: ('l[positions][data]', 'Start Off Pulse'), @utl.logger def start_off_pulse(self,address,positions,time): """ Purpose: Turns off specified outputs for a the specified number of timer ticks, then turns them on. Parameters: address - target optomux device address positions - outputs to pulse ticks - pulse off time in ticks (see 'Set Timer Resolution' Description: Assumes the 'Set Timer Resolution' command has been used to tick rate. Because this command is retriggerable, it can be used as a watchdog circuit by continuously sending this command at a rate faster than the pulse length. To cancel this command, resend it with ticks = 1 """ kwargs = { 'positions':positions, 'data':time } cmd = self.command_name['Start Off Pulse'] return self.send_receive(address,cmd,**kwargs) ## 75: ('n[data]', 'Set Timer Resolution'), @utl.logger def set_timer_resolution(self,address,ticks): """ Purpose: Sets a global timer value for all timing functions on the Optomux digital brain. Parameters: address - target optomux device address ticks - timer period in 10ms ticks Description: If the value is 0, the timer resolution is 2.56 seconds. This command is a global command and affects the timing resolution for the following commands SET TIME DELAY INITIATE SQUARE WAVE HIGH RESOLUTION SQUARE WAVE RETRIGGER TIME DELAY GENERATE N PULSES START ON PULSE START OFF PULSE READ PULSE COMPLETE BITS READ PULSE DURATION COUNTERS READ AND CLEAR DURATION COUNTERS """ self.timer_resolution[address] = ticks kwargs = {'data':ticks} cmd = self.command_name['Set Timer Resolution'] return self.send_receive(address,cmd,**kwargs) ## 76: ('k[positions][data]', 'Set Temperature Probe Type'), @utl.logger def set_temperature_probe_type(self,address,positions,probe,useReadAnalogInputs=False): """ Purpose: Sets the probe type for points using temperature input modules (thermocouples, ICTDs, and RTDs), so that the READ TEMPERATURE INPUTS command can be used to read the temperature directly. Parameters: address - target optomux device address positions - outputs to pulse type - probe type Description: Valid probe types are: 0 no temperature probe 1 ICTD probe 2 10 ohm RTD probe 3 100 ohm RTD probe 4 Type J thermocouple 5 Type K thermocouple 6 Type R thermocouple 7 Type S thermocouple 8 Type T thermocouple 9 Type E thermocouple Notes: For some reason the B3000 changes the value returned by 'Read Analog Inputs' if a probe type was set using 'Set Temperature Probe Type'. Therefore, don't send the 'Set Temperature Probe Type' command if we want to compute our own temperature using the equations/tables found in the users manual. """ kwargs = { 'positions':positions, 'data':probe } if not useReadAnalogInputs: cmd = self.command_name['Set Temperature Probe Type'] rtn = self.send_receive(address,cmd,**kwargs) else: rtn = ('A',0) if rtn[0] == 'A': if isinstance(positions,int): positions = self.positions_mask_to_tuple(kwargs['positions']) for position in positions: key = address<<4+position self.temperature_probes[key] = probe self.read_as_analog_input[key] = useReadAnalogInputs return rtn @utl.logger def convert_to_icdt(self,value): """ Optomux Protocol Guide, Form 1572-140618—June 2014 Converting Temperature Readings ICTD Input Module—AD4, p155 """ v = ((0.08262 * value) - 188.4) return v @utl.logger def convert_to_10_ohm_rtd(self,value): """ Optomux Protocol Guide, Form 1572-140618—June 2014 Converting Temperature Readings """ return value @utl.logger def convert_to_100_ohm_rtd(self,value): """ Optomux Protocol Guide, Form 1572-140618—June 2014 Converting Temperature Readings 100 Ohm RTD Input Module—AD10T2, p158 """ rtd100row = namedtuple('rtd100row',['A0', 'A1', 'A2', 'A3', 'A4', 'A5']) rtd100tbl = ( rtd100row(range(-32768,2110), 0, 9.474182E-02, 50, -0.000156, 0.0156, -1.11), rtd100row(range(2111,4095), 2111, 0.1008065, 100, -0.000156, 0, 248.45), rtd100row(range(4095,6219), 4095, 0.1082863, 115, -0.00017, 0, 462.76), rtd100row(range(6219,32767), 6219, 0.118525, 135, -0.000188, 0, 711.56) ) for rtd100row in rtd100tbl: if value in rtd100tbl.range: tv0 = (value - rtd100row.A0) * rtd100row.A1 - rtd100row.A2 return tv0 - (rtd100row.A3 * tv0**2) - (rtd100row.A4 * tv0) + rtd100row.A5 return None @utl.logger def convert_to_type_j_thermocouple(self,value): """ Optomux Protocol Guide, Form 1572-140618—June 2014 Converting Temperature Readings Type J Thermocouple—AD5, AD5T, p156 """ # linearization table jrow = namedtuple('jrow',['range','A','B','C']) jtbl = ( jrow(range(-4096,2),104,0.1923076,20.15), jrow(range(1,162),0,0.1863354,0.10), jrow(range(162,355),161,0.1813472,30.19), jrow(range(355,552),354,0.1776649,65.10), jrow(range(552,868),551,0.1740506,100.15), jrow(range(868,1767),867,0.1724137,155.10), jrow(range(1767,2547),1766,0.1730769,310.00), jrow(range(2547,2896),2546,0.1719197,445.05), jrow(range(2896,3192),2895,0.1689189,505.10), jrow(range(3192,3465),3191,0.1654411,555.10), jrow(range(3465,3744),3464,0.1612903,600.20), jrow(range(3744,3901),3743,0.1572327,645.17), jrow(range(3901,4096),3901,0.1546391,670.10)) for jrow in jtbl: if value in jrow.range: # TEMP = (VALUE% - A) * B + C return (value - jrow.A) * jrow.B + jrow.C return None @utl.logger def convert_to_type_k_thermocouple(self,value): """ Optomux Protocol Guide, Form 1572-140618—June 2014 Converting Temperature Readings Type K Thermocouple—AD8, AD8T, p157 """ krow = namedtuple('krow',['range', 'A', 'B', 'C']) ktbl = ( krow(range(-32768,97), 0, 0.3167899, -99.6), krow(range(96,199), 95, 0.2892961, -69.6), krow(range(199,349), 198, 0.2675585, -39.8), krow(range(349,579), 348, 0.2518454, 0.3), krow(range(579,910), 478, 0.2478090, 57.9), krow(range(910,1366), 909, 0.2541073, 140.2), krow(range(1366,1871), 1365, 0.2456905, 256.2), krow(range(1871,3085), 1870, 0.2405867, 380.1), krow(range(3085,3622), 3084, 0.2456271, 671.8), krow(range(3622,4096), 3621, 0.2532714, 803.8), krow(range(4096,4525), 4095, 0.2611331, 924.0), krow(range(4525,4875), 4524, 0.2685714, 1035.9), krow(range(4875,5171), 4874, 0.2770270, 1130.0), krow(range(5171,5422), 5170, 0.2858277, 1211.9), krow(range(5422,32767), 5422, 0.2959973, 1283.9) ) for krow in ktbl: if value in krow.range: # TEMP = (VALUE% - A) * B + C return (value - krow.A) * krow.B + krow.C return None @utl.logger def convert_to_type_r_thermocouple(self,value): """ Optomux Protocol Guide, Form 1572-140618—June 2014 Converting Temperature Readings Type R Thermocouple—AD17T, p159 """ rrow = namedtuple('rrow',['range','A0', 'A1', 'A2', 'A3', 'A4']) rtbl = ( rrow(range(-32768,740), 0, 0.1625144, -2.045438E-05, 2.540494E-09, -1.7679E-13), rrow(range(740,32767), 46.67453, 0.1117991, -2.565926E-06, 5.347317E-11, 0)) for rrow in rtbl: if value in rrow.range: tv0 = 2.43663 * value # temp = A0 + (A1 * TV0) + (A2 * (TV0^2)) + (A3 * (TV0^3)) + (A4 * (TV0^4)) return (rrow.A0 \ + (rrow.A1 * tv0) \ + (rrow.A2 * tv0**2) \ + (rrow.A3 * tv0**3) \ + (rrow.A4 * tv0**4)) return None @utl.logger def convert_to_type_s_thermocouple(self,value): """ Optomux Protocol Guide, Form 1572-140618—June 2014 Converting Temperature Readings Type S Thermocouple—AD17T, p160 """ srow = namedtuple('srow',['range','A0', 'A1', 'A2', 'A3', 'A4']) stbl = ( srow(range(-32768,479), 0, 0.1641405, -2.024176E-05, 2.784973E-09, -1.41721E-13), srow(range(479,32767), 30.1319, 0.1215561, -2.752449E-06, 6.475822E-11, 0) ) for srow in stbl: if value in srow.range: tv0 = 2.43663 * value # temp = A0 + (A1 * TV0) + (A2 * (TV0^2)) + (A3 * (TV0^3)) + (A4 * (TV0^4)) return (srow.A0 \ + (srow.A1 * tv0) \ + (srow.A2 * tv0**2) \ + (srow.A3 * tv0**3) \ + (srow.A4 * tv0**4)) return None @utl.logger def convert_to_type_t_thermocouple(self,value): """ Optomux Protocol Guide, Form 1572-140618—June 2014 Converting Temperature Readings Type T Thermocouple—AD18T, p161 """ trow = namedtuple('trow',['range','A0', 'A1', 'A2', 'A3', 'A4']) ttbl = ( trow(range(-32768,1419), 0, 2.383709E-02, -2.987884E-06, -7.194581E-10, -1.004194E-13), trow(range(1419,32767), 0, 0.0256613, -6.195487E-07, 2.218164E-11, -3.55009E-16) ) for trow in ttbl: if value in trow.range: tv0 = 3.951286 * value + 5602.92 return (trow.A0 \ + (trow.A1 * tv0) \ + (trow.A2 * tv0**2) \ + (trow.A3 * tv0**3) \ + (trow.A4 * tv0**4)) return None @utl.logger def convert_to_type_e_thermocouple(self,value): """ Optomux Protocol Guide, Form 1572-140618—June 2014 Converting Temperature Readings Type E Thermocouple—AD19T, p162 """ erow = namedtuple('erow',['range','A0', 'A1', 'A2', 'A3', 'A4']) etbl = ( erow(range(-32768,367), 0, 1.572665E-02, -1.210215E-06, -1.95778E-10, -1.66963E-14), erow(range(367,3784), 0, 1.702253E-02, -2.209724E-07, 5.480931E-12, -5.766989E-17), erow(range(3784,32767), 19.66945, 1.420774E-02, -5.184451E-08, 5.636137E-13, -1.564634E-18) ) for erow in etbl: if value in erow.range: tv0 = 3.951286 * value + 5602.92 return (trow.A0 \ + (erow.A1 * tv0) \ + (erow.A2 * tv0**2) \ + (erow.A3 * tv0**3) \ + (erow.A4 * tv0**4)) return None @utl.logger def convert_probe_temperature_readings(self,address,positions,values): """ Convert analog reading to temperature based on probe type """ if isinstance(positions,int): positions = sorted(self.positions_mask_to_tuple(positions)) if isinstance(values,int): values = list(values) # make mutable for position in positions: value = values[positions.index(position)] probe_type = -1 try: key = address<<4+position probe_type = self.temperature_probes[key] if probe_type in self.temperature_probe_type_valid_range: if isinstance(value,int): value /= 16 else: value = -4096 except: value = -4096 return values @utl.logger def convert_analog_temperature_readings(self,address,positions,values): """ Refer to: Optomux Protocol Guide, Form 1572-140618—June 2014 Converting Temperature Readings, p155 There seems to be an issue with the B3000 changing the value returned by 'Read Analog Inputs' when the temperature probe type changes. Thus it is necessary to avoid sending the 'Set Temperature Probe Type' command to the B3000 if the temperature module is to be read using the 'Read Analog Inputs' command. The set_temperature_probe_type function """ if isinstance(positions,int): positions = self.positions_mask_to_tuple(positions) if isinstance(values,int): values = tuple(values) values = list(values) # make mutable for position in positions: i = positions.index(position) value = values[i] probe_type = -1 try: key = address<<4+position probe_type = self.temperature_probes[key] if probe_type in self.temperature_probe_type_valid_range: if self.temperature_probe_types[probe_type] == 'ICTD probe': value = self.convert_to_icdt(value) elif self.temperature_probe_types[probe_type] == '10 ohm RTD probe': value = self.convert_to_10_ohm_rtd(value) elif self.temperature_probe_types[probe_type] == '100 ohm RTD probe': value = self.convert_to_100_ohm_rtd(value) elif self.temperature_probe_types[probe_type] == 'Type J thermocouple': value = self.convert_to_type_j_thermocouple(value) elif self.temperature_probe_types[probe_type] == 'Type K thermocouple': value = self.convert_to_type_k_thermocouple(value) elif self.temperature_probe_types[probe_type] == 'Type R thermocouple': value = self.convert_to_type_r_thermocouple(value) elif self.temperature_probe_types[probe_type] == 'Type S thermocouple': value = self.convert_to_type_s_thermocouple(value) elif self.temperature_probe_types[probe_type] == 'Type T thermocouple': value = self.convert_to_type_t_thermocouple(value) elif self.temperature_probe_types[probe_type] == 'Type E thermocouple': value = self.convert_to_type_e_thermocouple(value) else: value = -4096 except: value = -4096 utl.log_error_message('Invalid Temperature Probe Type {:d}'.format(probe_type)) values[i] = value return tuple(values) ## 77: ('l[positions]', 'Read Temperature Inputs'), @utl.logger def read_temperature_inputs(self,address,positions): """ Purpose: Returns the temperature at the specified input points. Parameters: address - target optomux device address positions - positions for which temps are to be read Remarks: Before using this command, set the temperature probe type For additional information on reading temperatures, see Appendix C, “Reading Negative Numbers and Temperature." Returns: A tuple with temperature readings in 1/16ths of degrees C. -4096 means no probe type set """ if isinstance(positions,int): positions = self.positions_mask_to_tuple(positions) # create empty lists of positions temperature_positions = [] analog_positions = [] # for each position specified for position in positions: # compute key key = address << 4 + position # using 'Read Analog Inputs'? if key in self.read_as_analog_input \ and self.read_as_analog_input[key]: # append to simulation list analog_positions.append(position) # append to real temps list else: temperature_positions.append(position) # back to tuples temperature_positions = tuple(sorted(temperature_positions)) analog_positions = tuple(sorted(analog_positions)) # if there are real temp modules to be read if len(temperature_positions): # send the 'Read Temperature Inputs' command kwargs = {'positions':temperature_positions} cmd = self.command_name['Read Temperature Inputs'] temperature_response = self.send_receive(address,cmd,**kwargs) if temperature_response[0] == 'A': temperature_response = ('A',self.convert_probe_temperature_readings(\ address,temperature_positions,temperature_response[1])) # if there are simulated temps to read if len(analog_positions): # send the 'Read Analog Inputs' command kwargs = {'positions':analog_positions} cmd = self.command_name['Read Analog Inputs'] analog_response = self.send_receive(address,cmd,**kwargs) if analog_response[0] == 'A': analog_response = ('A',self.convert_analog_temperature_readings(\ address,analog_positions,analog_response[1])) # combine simulated and read temps positions_and_values = sorted(\ list(zip(temperature_positions,temperature_response[1])) + list(zip(analog_positions,analog_response[1])),\ key=lambda pv: pv[0]) # extract values return ('A',tuple([pv[1] for pv in positions_and_values])) ## 78: ('m[positions][data]', 'Set Analog Watchdog Timeout'), @utl.logger def set_analog_watchdog_timeout(self,address,positions,values): kwargs = { 'positions':positions, 'data':values } cmd = self.command_name['Set Analog Watchdog Timeout'] return self.send_receive(address,cmd,**kwargs) ## 79: ('o[positions]', 'Read Average Temperature Inputs'), @utl.logger def read_average_temperature_inputs(self,address,positions): """ Purpose: Returns the temperature at the specified input points. Parameters: address - target optomux device address positions - positions for which temps are to be read Remarks: Before using this command, set the temperature probe type, set number of averages using 'Start Averaging Inputs'. Poll 'Check Average Complete Bits' to see if an average is ready. For additional information on reading temperatures, see Appendix C, “Reading Negative Numbers and Temperature." Returns: A tuple with temperature readings in 1/16ths of degrees C. -4096 means no probe type set """ if isinstance(positions,int): positions = self.positions_mask_to_tuple(positions) # create empty lists of positions temperature_positions = [] analog_positions = [] # for each position specified for position in positions: # compute key key = address << 4 + position # using 'Read Analog Inputs'? if key in self.read_as_analog_input \ and self.read_as_analog_input[key]: # append to simulation list analog_positions.append(position) # append to real temps list else: temperature_positions.append(position) # back to tuples temperature_positions = tuple(sorted(temperature_positions)) analog_positions = tuple(sorted(analog_positions)) # if there are real temp modules to be read if len(temperature_positions): # send the 'Read Temperature Inputs' command kwargs = {'positions':temperature_positions} cmd = self.command_name['Read Average Temperature Inputs'] temperature_response = self.send_receive(address,cmd,**kwargs) if temperature_response[0] == 'A': temperature_response = ('A',self.convert_probe_temperature_readings(\ address,temperature_positions,temperature_response[1])) # if there are simulated temps to read if len(analog_positions): # send the 'Read Analog Inputs' command kwargs = {'positions':analog_positions} cmd = self.command_name['Read Averaged Inputs'] analog_response = self.send_receive(address,cmd,**kwargs) if analog_response[0] == 'A': analog_response = ('A',self.convert_analog_temperature_readings(\ address,analog_positions,analog_response[1])) # combine simulated and read temps positions_and_values = sorted(\ list(zip(temperature_positions,temperature_response[1])) + list(zip(analog_positions,analog_response[1])),\ key=lambda pv: pv[0]) # extract values return ('A',tuple([pv[1] for pv in positions_and_values])) ## 80: ('`', 'Date Of Firmware') @utl.logger def date_of_firmware(self,address): """ Purpose: Identifies brain firmware revision by date of release. Parameters: address - target optomux device address Description: ‘ is the single quote under the tilde sign on the computer keyboard and is an ASCII 60 hex. Response from a B1, B2, E1, or E2: A07/05/05*B9 Response from a B3000: A811609019911050100300000B7 Very old brain boards may not understand this command and return an error. """ cmd = self.command_name['Date Of Firmware'] return self.send_receive(address,cmd) @utl.logger def get_timer_resolution(self,address): """ A way to get the timer resolution from an instance variable for use in computations of time related delays and waveform periods. Not part of Optomux command set but there is no corresponding Optomux command to read the info from the controller. """ return self.timer_resolution[address] @utl.logger def get_initiate_square_wave_on_off_time_limits(self,address): """ A way to get the limits for square wave pulse widths. This is meant to help determine if the current timer resolution set with 'Set Timer Resolution' will support the desired pulse width. """ square_wave_time_limits = [ 256 * 0.010 * self.timer_resolution[address], 256 * 256 * 0.010 * self.timer_resolution[address] ] return square_wave_time_limits @utl.logger def positions_tuple_to_mask(self,positions): """ convert a positions tuple into a bit mask """ if isinstance(positions,list): mask = 0 for position in positions: mask |= (1<<position) return mask elif isinstance(positions,int): return positions return 0 @utl.logger def positions_mask_to_tuple(self,positions): """ convert a positions mask into a tuple """ return tuple(i for i in range(16) if (positions & (1 << i)) != 0) @utl.logger def optomux_data_to_binary_tuple(self,data): """ create a tuple of bit values """ return self.optomux_data_to_tuple(data) @utl.logger def optomux_data_to_counter_tuple(self,data): """ create a tuple from 16 bit counter values """ return self.optomux_data_to_tuple(data,16) @utl.logger def optomux_data_to_analog_input_tuple(self,data): """ create a tuple from analog input values """ values = list(self.optomux_data_to_tuple(data,16)) for i in range(len(values)): if isinstance(values[i],int): values[i] -= 4096 return tuple(values) @utl.logger def optomux_data_to_temperature_tuple(self,data): """ Temperature Readings—Some Optomux commands are specifically designed for temperature measurement (“Read Temperature Inputs” on page 107 and “Read Average Temperature Inputs” on page 109). When you use a Read Temperature command, the brain board normally takes care of thermocouple linearization and returns temperature in degrees C. However, the commands are valid only when the temperature is within the nominal range for the module. If temperature is outside the module’s nominal range (or if your software does not support the Read Temperature commands), then you will need to read counts from the module, linearize the counts, and convert them to temperature. More information and equations showing how to linearize and convert readings from temperature modules are in “Converting Temperature Readings” on page 155. Points that read below the scale of the set probe type return a value of -273 C. Points that read above the scale for the set probe type return 2047 C. """ values = list(self.optomux_data_to_tuple(data,16)) for i in range(len(values)): if isinstance(values[i],int): # if sign bit is set if values[i] & 0x8000: # handle twos complement values[i] -= 0x10000 return tuple(values) @utl.logger def optomux_data_to_analog_output_tuple(self,data): return self.optomux_data_to_tuple(data,12) @utl.logger def optomux_data_to_tuple(self,data,bits=1): """ Return data in a tuple. Since optomux returns it as an array of hex nibbles and the command determines bits per field, this routine first converts from hex to binary, then extracts the bits per the width arg. Special '?' twiddling needs to be done because optomux returns '?'s in a field where data makes no sense. For example, reading analog averages of analog outputs, latched digital outputs, etc. """ b = '' # for each hex digit in string for i in range(len(data)): # get the next nibble c = data[i] # if a valid hex digit if c in '0123456789abcdefABCDEF': # conver to int n = int(data[i],16) # for each bit in the nibble starting at msb for j in reversed(range(4)): # append a binary digit b += chr(ord('0') + ((n >> j) & 1)) # tried to read an output counter elif c == '?': # 4 binary '?' s for i in range(4): b += '?' # create a tuple of ints using substrings of binary width bits # and expand optomux '????' as if the '?'s were binary digits # of all fields will be the same width lv = [] for i in reversed(range(0,len(b),bits)): # read bits worth of binary digits v = b[i:i+bits] # try to convert to an int using base 2 try: n = int(v,2) # poke a '?' placeholder so caller knows not to use # the value except: n = '?' # append the value to the list finally: lv.append(n) return tuple(lv) def list_optomux_devices(self): """ Build a list of optomux devices by sending a 'Power Up Clear' to all addresses and looking for an ACK. If we get an ACK, get the type of device for grins """ devices = [] for address in range(256): msg = 'checking address {:02X}'.format(address) print(msg,end='',flush=True) print(chr(8)*len(msg),end='',flush=True) rtn = self.power_up_clear(address) if rtn[0] == 'A': rtn = self.identify_optomux_type(address) if rtn[0] == 'A': print('Found {:s} device at address {:02X}'\ .format(self.optomux_type[int(rtn[1])],address)) devices.append(address) print('\nDone') return devices if __name__ == "__main__": # createthe OmuxNET object on = OmuxNET() # list the available ttys ttys = on.tty.list_ttys() # print a menu for tty in ttys: print(tty,ttys[tty]) # ask user to select a port ttychoice = int(input('choose a tty by number from the above list: '),10) print('\n') if ttychoice in ttys: baudrates = on.tty.list_baudrates() for baudrate in baudrates: print(baudrate,baudrates[baudrate]) baudratechoice = int(input('choose a baudrate (check brain jumpers): '),10) if baudratechoice in on.tty.baudrates: baudrate = int(on.tty.baudrates[baudratechoice]) print('\n') # open the port if on.tty.open(ttys[ttychoice],int(on.tty.baudrates[baudratechoice])): # build a list of devices by seeing which # addresses ACK a 'Power Up Clear' command devices = on.list_optomux_devices() for device in devices: # for grins, get the firmware date rtn = on.date_of_firmware(device) if rtn[0] == 'A': print('Date Of Firmware: {}'.format(rtn[1])) else: print('() open failed'.format(ttys[choice])) else: print('{:d} : invalid port selection'.format(choice))
# imos 法 from sys import stdin from itertools import accumulate readline = stdin.readline n = int(readline()) t = [0] * (1000000 + 2) for _ in range(n): a, b = map(int, readline().split()) t[a] += 1 t[b + 1] -= 1 print(max(accumulate(t[:-1])))
"""Module for implementations of the MetadataExtractor interface.""" from __future__ import absolute_import, unicode_literals from rdflib.term import Literal from rdflib.term import URIRef from gutenberg._domain_model.types import rdf_bind_to_string from gutenberg._domain_model.vocabulary import DCTERMS from gutenberg._domain_model.vocabulary import PGTERMS from gutenberg._domain_model.vocabulary import RDFTERMS from gutenberg._util.abc import abstractclassmethod from gutenberg.query.api import MetadataExtractor class _SimplePredicateRelationshipExtractor(MetadataExtractor): """Extracts any sort of meta-data that is directly connected to a text via a simple predicate relationship or a simple predicate-path relationship. """ @abstractclassmethod def predicate(cls): """Returns the predicate relationship that connects the text with the meta-data value to extract. This should be a RDF Term or Path object. """ raise NotImplementedError # pragma: no cover @abstractclassmethod def contains(cls, value): raise NotImplementedError # pragma: no cover @classmethod def get_metadata(cls, etextno): etext = cls._etext_to_uri(etextno) query = cls._metadata()[etext:cls.predicate():] return frozenset(result.toPython() for result in query) @classmethod def get_etexts(cls, requested_value): query = cls._metadata()[:cls.predicate():cls.contains(requested_value)] results = (cls._uri_to_etext(result) for result in query) return frozenset(result for result in results if result is not None) class AuthorExtractor(_SimplePredicateRelationshipExtractor): """Extracts book authors. """ @classmethod def feature_name(cls): return 'author' @classmethod def predicate(cls): return DCTERMS.creator / PGTERMS.name @classmethod def contains(cls, value): return Literal(value) class TitleExtractor(_SimplePredicateRelationshipExtractor): """Extracts book titles. """ @classmethod def feature_name(cls): return 'title' @classmethod def predicate(cls): return DCTERMS.title @classmethod def contains(cls, value): return Literal(value) class FormatURIExtractor(_SimplePredicateRelationshipExtractor): """Extracts book format URIs. """ @classmethod def feature_name(cls): return 'formaturi' @classmethod def predicate(cls): return DCTERMS.hasFormat @classmethod def contains(cls, value): return URIRef(value) class RightsExtractor(_SimplePredicateRelationshipExtractor): """Extracts the copyright information. """ @classmethod def feature_name(cls): return 'rights' @classmethod def predicate(cls): return DCTERMS.rights @classmethod def contains(cls, value): return Literal(value) class LanguageExtractor(_SimplePredicateRelationshipExtractor): """Extracts the language. """ _DATATYPE = URIRef('http://purl.org/dc/terms/RFC4646') rdf_bind_to_string(_DATATYPE) @classmethod def feature_name(cls): return 'language' @classmethod def predicate(cls): return DCTERMS.language / RDFTERMS.value @classmethod def contains(cls, value): return Literal(value, datatype=cls._DATATYPE) class SubjectExtractor(_SimplePredicateRelationshipExtractor): """Extracts the subject(s). """ @classmethod def feature_name(cls): return 'subject' @classmethod def predicate(cls): return DCTERMS.subject / RDFTERMS.value @classmethod def contains(cls, value): return Literal(value)
import random import string import pyperclip class Credentials: credentials_list = [] ''' class that defines the credentials blueprint ''' def __init__(self,appName,appPassword): self.appName = appName self.appPassword = appPassword def save_credential(self): Credentials.credentials_list.append(self) def delete_credential(self): ''' frunction that enables a user to delete a credential ''' Credentials.credentials_list.remove(self) @classmethod def find_by_app_name(cls,appName): ''' method that takes in application name and returns its details ''' for credentials in cls.credentials_list: if credentials.appName == appName: return credentials @classmethod def display_credentials(cls): ''' method tha returns a list of all credentials ''' return cls.credentials_list @classmethod def generate_password(cls): ''' method that generates password ''' password = string.ascii_lowercase return "".join(random.choice(password)for i in range (10)) @classmethod def coppy(cls,appName): found_app = Credentials.find_by_app_name(appName) pyperclip.copy(found_app.appName)
import os import torch import torchvision import tqdm import utils if __name__ == '__main__': # Create components builder builder = utils.builder.Builder() config = builder.config model_name = builder.model_name amp_enabled = config['train']['amp_enabled'] # Device device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 1. Dataset valset, valloader = builder.build_dataset('val') # 2. Model model = builder.build_model(valset.num_classes, pretrained=True).to(device) model.eval() print(f'Activated model: {model_name}') # Collect image names image_names = [os.path.basename(image_path) for image_path in valset.images] # Save segmentation results result_dir = os.path.join('demo', model_name.lower()) groundtruth_dir = os.path.join('demo', 'groundtruth') os.makedirs(result_dir, exist_ok=True) os.makedirs(groundtruth_dir, exist_ok=True) for i, (images, targets) in enumerate(tqdm.tqdm(valloader, desc='Demo')): images, targets = images.to(device), targets.to(device) with torch.cuda.amp.autocast(amp_enabled): with torch.no_grad(): outputs = model(images) outputs = torch.argmax(outputs, dim=1) mean = torch.tensor(valset.transforms.normalize.mean) std = torch.tensor(valset.transforms.normalize.std) images = utils.util.inverse_to_tensor_normalize(utils.util.inverse_normalize(images, mean, std)) outputs = utils.util.draw_segmentation_masks(images, outputs, valset.colors) targets = utils.util.draw_segmentation_masks(images, targets, valset.colors) # process per 1 batch assert len(outputs) == len(targets) for j, (output, target) in enumerate(zip(outputs, targets)): file_name = image_names[targets.shape[0] * i + j] torchvision.io.write_jpeg(output.cpu(), os.path.join(result_dir, file_name), quality=100) torchvision.io.write_jpeg(target.cpu(), os.path.join(groundtruth_dir, file_name), quality=100)
#!/usr/bin/env python #-*-*- encoding: utf-8 -*-*- # # Copyright (C) 2005 onwards University of Deusto # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. # # This software consists of contributions made by many individuals, # listed below: # # Author: Pablo Orduña <pablo@ordunya.com> # from __future__ import print_function, unicode_literals import hashlib import time as time_module from voodoo.sha0 import sha0 import weblab.experiment.util as ExperimentUtil import weblab.configuration_doc as configuration_doc def _get_time_in_str(): cur_time = time_module.time() s = time_module.strftime('%Y_%m_%d___%H_%M_%S_',time_module.gmtime(cur_time)) millis = int((cur_time - int(cur_time)) * 1000) return s + str(millis) class FileStorer(object): def __init__(self, cfg_manager, reservation_id, time_module = time_module): self._cfg_manager = cfg_manager self.time_module = time_module self._reservation_id = reservation_id def _utc_timestamp(self): return self.time_module.time() def store_file(self, file_content, file_info): import weblab.data.experiments as Experiments # TODO: this is a very dirty way to implement this. Anyway until the good approach is taken, this will store the students programs # TODO: there should be two global variables: first, if store_student_files is not activated, do nothing. # but, if store_student_files is activated, it should check that for a given experiment, they should be stored or not. # For instance, I may want to store GPIB experiments but not FPGA experiments. Indeed, this should be stored in the db # in the permission of the student/group with the particular experiment, with a default value to True. should_i_store = self._cfg_manager.get_doc_value(configuration_doc.CORE_STORE_STUDENTS_PROGRAMS) timestamp_before = self._utc_timestamp() if should_i_store: # TODO not tested if isinstance(file_content, unicode): file_content_encoded = file_content.encode('utf8') else: file_content_encoded = file_content deserialized_file_content = ExperimentUtil.deserialize(file_content_encoded) storage_path = self._cfg_manager.get_doc_value(configuration_doc.CORE_STORE_STUDENTS_PROGRAMS_PATH) relative_file_path = _get_time_in_str() + '_' + self._reservation_id file_hash = sha0(deserialized_file_content) where = storage_path + '/' + relative_file_path f = open(where,'w') f.write(deserialized_file_content) f.close() return Experiments.FileSent(relative_file_path, "{sha}%s" % file_hash, timestamp_before, file_info = file_info) else: return Experiments.FileSent("<file not stored>","<file not stored>", timestamp_before, file_info = file_info)
#!/usr/bin/env python3 # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import yaml import json import os import sys from html.parser import HTMLParser class DependencyReportParser(HTMLParser): # This class parses the given html file to find all dependency reports under "Project dependencies" # and "Projection transparent dependencies" sections. # The parser works based on the state machine and its state is updated whenever it reads a new tag. # The state changes as below: # # none -> h2_start -> project_dependencies_start -> h3_start -> compile_start -> table_start -> row_start -> th_start / td_start -> th_end / td_end -> row_end -> table_end -> compile_end -> h3_end -> project_dependencies_end -> h2_end -> none attr_index = 0 group_id = None artifact_id = None version = None classifier = None dep_type = None license = None state = "none" dep_to_license = None compatible_license_names = None include_classifier = False druid_module_name = None def __init__(self, druid_module_name, compatible_license_names): HTMLParser.__init__(self) self.state = "none" self.druid_module_name = druid_module_name self.compatible_license_names = compatible_license_names def parse(self, f): self.dep_to_license = {} self.feed(f.read()) return self.dep_to_license def handle_starttag(self, tag, attrs): # print("current: {}, start tag: {}, attrs:{} ".format(self.state, tag, attrs)) if self.state == "none": if tag == "h2": self.state = "h2_start" if self.state == "h2_start": if tag == "a": for attr in attrs: if attr[0] == "name" and (attr[1] == "Project_Dependencies" or attr[1] == "Project_Transitive_Dependencies"): self.state = "project_dependencies_start" self.include_classifier = False if self.state == "h2_end": if tag == "h3": self.state = "h3_start" if self.state == "h3_start": if tag == "a": for attr in attrs: if attr[0] == "name" and attr[1] == "compile": self.state = "compile_start" if self.state == "h3_end": if tag == "table": self.state = "table_start" if self.state == "table_start": if tag == "tr": self.state = "row_start" self.clear_attr() if self.state == "row_end": if tag == "tr": self.state = "row_start" self.clear_attr() if self.state == "row_start": if tag == "td": self.state = "td_start" elif tag == "th": self.state = "th_start" if self.state == "th_end": if tag == "th": self.state = "th_start" if self.state == "td_end": if tag == "td": self.state = "td_start" def handle_endtag(self, tag): # print("current: {}, end tag: {}".format(self.state, tag)) if self.state == "project_dependencies_start": if tag == "a": self.state = "project_dependencies_end" if self.state == "h2_start": if tag == "h2": self.state = "h2_end" if self.state == "project_dependencies_end": if tag == "h2": self.state = "h2_end" if self.state == "compile_start": if tag == "a": self.state = "compile_end" if self.state == "compile_end": if tag == "h3": self.state = "h3_end" if self.state == "table_start": if tag == "table": self.state = "none" if self.state == "td_start": if tag == "td": self.state = "td_end" self.attr_index = self.attr_index + 1 if self.state == "th_start": if tag == "th": self.state = "th_end" if self.state == "row_start": if tag == "tr": self.state = "row_end" if self.state == "th_end": if tag == "tr": self.state = "row_end" if self.state == "td_end": if tag == "tr": self.state = "row_end" # print(json.dumps({"groupId": self.group_id, "artifactId": self.artifact_id, "version": self.version, "classifier": self.classifier, "type": self.dep_type, "license": self.license})) if self.group_id.find("org.apache.druid") < 0: self.dep_to_license[get_dep_key(self.group_id, self.artifact_id, self.version)] = (self.license, self.druid_module_name) if self.state == "row_end": if tag == "table": self.state = "none" def handle_data(self, data): if self.state == "td_start": self.set_attr(data) elif self.state == "th_start": if data.lower() == "classifier": self.include_classifier = True def clear_attr(self): self.group_id = None self.artifact_id = None self.version = None self.classifier = None self.dep_type = None self.license = None self.attr_index = 0 def set_attr(self, data): #print("set data: {}".format(data)) if self.attr_index == 0: self.group_id = data elif self.attr_index == 1: self.artifact_id = data elif self.attr_index == 2: self.version = get_version_string(data) elif self.attr_index == 3: if self.include_classifier: self.classifier = data else: self.dep_type = data elif self.attr_index == 4: if self.include_classifier: self.dep_type = data else: self.set_license(data) elif self.attr_index == 5: if self.include_classifier: self.set_license(data) else: raise Exception("Unknown attr_index [{}]".format(self.attr_index)) else: raise Exception("Unknown attr_index [{}]".format(self.attr_index)) def set_license(self, data): if data.upper().find("GPL") < 0: if self.license != 'Apache License version 2.0': self.license = self.compatible_license_names[data] outfile = None def get_dep_key(group_id, artifact_id, version): return (group_id, artifact_id, version) def build_compatible_license_names(): compatible_licenses = {} compatible_licenses['Apache License, Version 2.0'] = 'Apache License version 2.0' compatible_licenses['The Apache Software License, Version 2.0'] = 'Apache License version 2.0' compatible_licenses['Apache 2.0'] = 'Apache License version 2.0' compatible_licenses['Apache 2'] = 'Apache License version 2.0' compatible_licenses['Apache License 2.0'] = 'Apache License version 2.0' compatible_licenses['Apache Software License - Version 2.0'] = 'Apache License version 2.0' compatible_licenses['The Apache License, Version 2.0'] = 'Apache License version 2.0' compatible_licenses['Apache License version 2.0'] = 'Apache License version 2.0' compatible_licenses['Apache License Version 2.0'] = 'Apache License version 2.0' compatible_licenses['Apache License Version 2'] = 'Apache License version 2.0' compatible_licenses['Apache License v2.0'] = 'Apache License version 2.0' compatible_licenses['Apache License, version 2.0'] = 'Apache License version 2.0' compatible_licenses['Public Domain'] = 'Public Domain' compatible_licenses['BSD-2-Clause License'] = 'BSD-2-Clause License' compatible_licenses['BSD-3-Clause License'] = 'BSD-3-Clause License' compatible_licenses['New BSD license'] = 'BSD-3-Clause License' compatible_licenses['BSD'] = 'BSD-3-Clause License' compatible_licenses['The BSD License'] = 'BSD-3-Clause License' compatible_licenses['BSD licence'] = 'BSD-3-Clause License' compatible_licenses['BSD License'] = 'BSD-3-Clause License' compatible_licenses['BSD-like'] = 'BSD-3-Clause License' compatible_licenses['The BSD 3-Clause License'] = 'BSD-3-Clause License' compatible_licenses['Revised BSD'] = 'BSD-3-Clause License' compatible_licenses['New BSD License'] = 'BSD-3-Clause License' compatible_licenses['ICU License'] = 'ICU License' compatible_licenses['SIL Open Font License 1.1'] = 'SIL Open Font License 1.1' compatible_licenses['CDDL 1.1'] = 'CDDL 1.1' compatible_licenses['CDDL/GPLv2+CE'] = 'CDDL 1.1' compatible_licenses['CDDL + GPLv2 with classpath exception'] = 'CDDL 1.1' compatible_licenses['CDDL License'] = 'CDDL 1.1' compatible_licenses['Eclipse Public License 1.0'] = 'Eclipse Public License 1.0' compatible_licenses['The Eclipse Public License, Version 1.0'] = 'Eclipse Public License 1.0' compatible_licenses['Eclipse Public License - Version 1.0'] = 'Eclipse Public License 1.0' compatible_licenses['Eclipse Public License, Version 1.0'] = 'Eclipse Public License 1.0' compatible_licenses['Mozilla Public License Version 2.0'] = 'Mozilla Public License Version 2.0' compatible_licenses['Mozilla Public License, Version 2.0'] = 'Mozilla Public License Version 2.0' compatible_licenses['Creative Commons Attribution 2.5'] = 'Creative Commons Attribution 2.5' compatible_licenses['Creative Commons CC0'] = 'Creative Commons CC0' compatible_licenses['CC0'] = 'Creative Commons CC0' compatible_licenses['The MIT License'] = 'MIT License' compatible_licenses['MIT License'] = 'MIT License' compatible_licenses['-'] = '-' return compatible_licenses def module_to_upper(module): extensions_offset = module.lower().find("extensions") if extensions_offset < 0: return module.upper() elif extensions_offset == 0: return module[0:len("extensions")].upper() + module[len("extensions"):len(module)] else: raise Exception("Expected extensions at 0, but {}".format(extensions_offset)) def print_outfile(string): print(string, file=outfile) def print_error(string): print(string, file=sys.stderr) def get_version_string(version): if type(version) == str: return version else: return str(version) def print_license_phrase(license_phrase): remaining = license_phrase while len(remaining) > 0: # print("remaining: {}".format(remaining)) # print("len: {}".format(len(remaining))) if len(remaining) > 120: chars_of_200 = remaining[0:120] phrase_len = chars_of_200.rfind(" ") if phrase_len < 0: raise Exception("Can't find whitespace in {}".format(chars_of_200)) print_outfile(" {}".format(remaining[0:phrase_len])) remaining = remaining[phrase_len:] else: print_outfile(" {}".format(remaining)) remaining = "" def is_non_empty(dic, key): if key in dic and dic[key] is not None: if type(dic[key]) == str: return len(dic[key]) > 0 else: return True else: return False def print_license(license): license_phrase = "This product" if license['license_category'] == "source": license_phrase += " contains" elif license['license_category'] == "binary": license_phrase += " bundles" license_phrase += " {}".format(license['name']) if is_non_empty(license, 'version'): license_phrase += " version {}".format(license['version']) if is_non_empty(license, 'copyright'): license_phrase += ", copyright {}".format(license['copyright']) if is_non_empty(license, 'additional_copyright_statement'): license_phrase += ", {}".format(license['additional_copyright_statement']) if license['license_name'] != 'Apache License version 2.0': license_phrase += " which is available under {}".format(license['license_name']) if is_non_empty(license, 'additional_license_statement'): license_phrase += ", {}".format(license['additional_license_statement']) if is_non_empty(license, 'license_file_path'): license_file_list = [] if type(license['license_file_path']) == list: license_file_list.extend(license['license_file_path']) else: license_file_list.append(license['license_file_path']) if len(license_file_list) == 1: license_phrase += ". For details, see {}".format(license_file_list[0]) else: license_phrase += ". For details, " for each_file in license_file_list: if each_file == license_file_list[-1]: license_phrase += ", and {}".format(each_file) elif each_file == license_file_list[0]: license_phrase += "see {}".format(each_file) else: license_phrase += ", {}".format(each_file) license_phrase += "." print_license_phrase(license_phrase) if 'source_paths' in license: for source_path in license['source_paths']: if type(source_path) is dict: for class_name, path in source_path.items(): print_outfile(" {}:".format(class_name)) print_outfile(" * {}".format(path)) else: print_outfile(" * {}".format(source_path)) if 'libraries' in license: for library in license['libraries']: if type(library) is not dict: raise Exception("Expected dict but got {}[{}]".format(type(library), library)) if len(library) > 1: raise Exception("Expected 1 groupId and artifactId, but got [{}]".format(library)) for group_id, artifact_id in library.items(): print_outfile(" * {}:{}".format(group_id, artifact_id)) def find_druid_module_name(dirpath): ext_start = dirpath.find("/ext/") if ext_start > 0: # Found an extension subpath = dirpath[(len("/ext/") + ext_start):] ext_name_end = subpath.find("/") if ext_name_end < 0: raise Exception("Can't determine extension name from [{}]".format(dirpath)) else: return subpath[0:ext_name_end] else: # Druid core return "core" def check_licenses(license_yaml, dependency_reports_root): # Build a dictionary to facilitate comparing reported licenses and registered ones. # These dictionaries are the mapping of (group_id, artifact_id, version) to license_name. # Build reported license dictionary. reported_dep_to_licenses = {} compatible_license_names = build_compatible_license_names() for dirpath, dirnames, filenames in os.walk(dependency_reports_root): for filename in filenames: if filename == "dependencies.html": full_path = os.path.join(dirpath, filename) # Determine if it's druid core or an extension druid_module_name = find_druid_module_name(dirpath) print_error("Parsing {}".format(full_path)) with open(full_path) as report_file: parser = DependencyReportParser(druid_module_name, compatible_license_names) reported_dep_to_licenses.update(parser.parse(report_file)) if len(reported_dep_to_licenses) == 0: raise Exception("No dependency reports are found") print_error("Found {} reported licenses\n".format(len(reported_dep_to_licenses))) # Build registered license dictionary. registered_dep_to_licenses = {} skipping_licenses = {} with open(license_yaml) as registry_file: licenses_list = list(yaml.load_all(registry_file)) for license in licenses_list: if 'libraries' in license: for library in license['libraries']: if type(library) is not dict: raise Exception("Expected dict but got {}[{}]".format(type(library), library)) if len(library) > 1: raise Exception("Expected 1 groupId and artifactId, but got [{}]".format(library)) for group_id, artifact_id in library.items(): if 'version' not in license: raise Exception("version is missing in {}".format(license)) if 'license_name' not in license: raise Exception("name is missing in {}".format(license)) if 'skip_dependency_report_check' in license and license['skip_dependency_report_check']: if 'version' not in license: version = "-" else: version = get_version_string(license['version']) skipping_licenses[get_dep_key(group_id, artifact_id, version)] = license else: registered_dep_to_licenses[get_dep_key(group_id, artifact_id, get_version_string(license['version']))] = compatible_license_names[license['license_name']] if len(registered_dep_to_licenses) == 0: raise Exception("No registered licenses are found") # Compare licenses in registry and those in dependency reports. mismatched_licenses = [] missing_licenses = [] unchecked_licenses = [] # Iterate through registered licenses and check if its license is same with the reported one. for key, registered_license in registered_dep_to_licenses.items(): if key in reported_dep_to_licenses: # key is (group_id, artifact_id, version) reported_license_druid_module = reported_dep_to_licenses[key] reported_license = reported_license_druid_module[0] druid_module = reported_license_druid_module[1] if reported_license is not None and reported_license != "-" and reported_license != registered_license: group_id = key[0] artifact_id = key[1] version = key[2] mismatched_licenses.append((druid_module, group_id, artifact_id, version, reported_license, registered_license)) # If we find any mismatched license, stop immediately. if len(mismatched_licenses) > 0: print_error("Error: found {} mismatches between reported licenses and registered licenses".format(len(mismatched_licenses))) for mismatched_license in mismatched_licenses: print_error("druid_module: {}, groupId: {}, artifactId: {}, version: {}, reported_license: {}, registered_license: {}".format(mismatched_license[0], mismatched_license[1], mismatched_license[2], mismatched_license[3], mismatched_license[4], mismatched_license[5])) print_error("") # Let's find missing licenses, which are reported but missing in the registry. for key, reported_license_druid_module in reported_dep_to_licenses.items(): if reported_license_druid_module[0] != "-" and key not in registered_dep_to_licenses and key not in skipping_licenses: missing_licenses.append((reported_license_druid_module[1], key[0], key[1], key[2], reported_license_druid_module[0])) if len(missing_licenses) > 0: print_error("Error: found {} missing licenses. These licenses are reported, but missing in the registry".format(len(missing_licenses))) for missing_license in missing_licenses: print_error("druid_module: {}, groupId: {}, artifactId: {}, version: {}, license: {}".format(missing_license[0], missing_license[1], missing_license[2], missing_license[3], missing_license[4])) print_error("") # Let's find unchecked licenses, which are registered but missing in the report. # These licenses should be checked manually. for key, registered_license in registered_dep_to_licenses.items(): if key not in reported_dep_to_licenses: unchecked_licenses.append((key[0], key[1], key[2], registered_license)) elif reported_dep_to_licenses[key][0] == "-": unchecked_licenses.append((key[0], key[1], key[2], registered_license)) if len(unchecked_licenses) > 0: print_error("Warn: found {} unchecked licenses. These licenses are registered, but not found in dependency reports.".format(len(unchecked_licenses))) print_error("These licenses must be checked manually.") for unchecked_license in unchecked_licenses: print_error("groupId: {}, artifactId: {}, version: {}, reported_license: {}".format(unchecked_license[0], unchecked_license[1], unchecked_license[2], unchecked_license[3])) print_error("") if len(mismatched_licenses) > 0 or len(missing_licenses) > 0: sys.exit(1) def print_license_name_underbar(license_name): underbar = "" for _ in range(len(license_name)): underbar += "=" print_outfile("{}\n".format(underbar)) def generate_license(apache_license_v2, license_yaml): # Generate LICENSE.BINARY file print_error("=== Generating the contents of LICENSE.BINARY file ===\n") # Print Apache license first. print_outfile(apache_license_v2) with open(license_yaml) as registry_file: licenses_list = list(yaml.load_all(registry_file)) # Group licenses by license_name, license_category, and then module. licenses_map = {} for license in licenses_list: if license['license_name'] not in licenses_map: licenses_map[license['license_name']] = {} licenses_of_name = licenses_map[license['license_name']] if license['license_category'] not in licenses_of_name: licenses_of_name[license['license_category']] = {} licenses_of_category = licenses_of_name[license['license_category']] if license['module'] not in licenses_of_category: licenses_of_category[license['module']] = [] licenses_of_module = licenses_of_category[license['module']] licenses_of_module.append(license) for license_name, licenses_of_name in sorted(licenses_map.items()): print_outfile(license_name) print_license_name_underbar(license_name) for license_category, licenses_of_category in licenses_of_name.items(): for module, licenses in licenses_of_category.items(): print_outfile("{}/{}".format(license_category.upper(), module_to_upper(module))) for license in licenses: print_license(license) print_outfile("") print_outfile("") # TODO: add options: debug mode if len(sys.argv) != 5: sys.stderr.write("usage: {} <path to apache license file> <path to license.yaml> <root to maven dependency reports> <path to output file>".format(sys.argv[0])) sys.exit(1) with open(sys.argv[1]) as apache_license_file: apache_license_v2 = apache_license_file.read() license_yaml = sys.argv[2] dependency_reports_root = sys.argv[3] with open(sys.argv[4], "w") as outfile: check_licenses(license_yaml, dependency_reports_root) generate_license(apache_license_v2, license_yaml)
import cv2 import numpy as np cap = cv2.VideoCapture(0) while(1): _, frame = cap.read() hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) lower_red = np.array([160,150,20]) upper_red = np.array([190,255,50]) lower_blue = np.array([110,150,100]) upper_blue = np.array([120,2250,255]) lower_green = np.array([30,160,80]) upper_green = np.array([60,220,160]) mask1 = cv2.inRange(hsv, lower_red, upper_red) mask2 = cv2.inRange(hsv, lower_blue, upper_blue) mask3 = cv2.inRange(hsv, lower_green, upper_green) res1 = cv2.bitwise_and(frame,frame, mask= mask1) res2 = cv2.bitwise_and(frame,frame, mask= mask2) res3 = cv2.bitwise_and(frame,frame, mask= mask3) cv2.imshow('frame',frame) cv2.imshow('maskRED',mask1) cv2.imshow('red',res1) cv2.imshow('blue',res2) cv2.imshow('green',res3) k = cv2.waitKey(5) & 0xFF if k == 27: break cv2.destroyAllWindows() cap.release()
""" tests.test_component_demo ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tests demo component. """ import unittest import homeassistant.core as ha import homeassistant.components.automation as automation import homeassistant.components.automation.state as state from homeassistant.const import CONF_PLATFORM class TestAutomationState(unittest.TestCase): """ Test the event automation. """ def setUp(self): # pylint: disable=invalid-name self.hass = ha.HomeAssistant() self.hass.states.set('test.entity', 'hello') self.calls = [] def record_call(service): self.calls.append(service) self.hass.services.register('test', 'automation', record_call) def tearDown(self): # pylint: disable=invalid-name """ Stop down stuff we started. """ self.hass.stop() def test_setup_fails_if_no_entity_id(self): self.assertFalse(automation.setup(self.hass, { automation.DOMAIN: { CONF_PLATFORM: 'state', automation.CONF_SERVICE: 'test.automation' } })) def test_if_fires_on_entity_change(self): self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { CONF_PLATFORM: 'state', state.CONF_ENTITY_ID: 'test.entity', automation.CONF_SERVICE: 'test.automation' } })) self.hass.states.set('test.entity', 'world') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) def test_if_fires_on_entity_change_with_from_filter(self): self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { CONF_PLATFORM: 'state', state.CONF_ENTITY_ID: 'test.entity', state.CONF_FROM: 'hello', automation.CONF_SERVICE: 'test.automation' } })) self.hass.states.set('test.entity', 'world') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) def test_if_fires_on_entity_change_with_to_filter(self): self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { CONF_PLATFORM: 'state', state.CONF_ENTITY_ID: 'test.entity', state.CONF_TO: 'world', automation.CONF_SERVICE: 'test.automation' } })) self.hass.states.set('test.entity', 'world') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) def test_if_fires_on_entity_change_with_both_filters(self): self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { CONF_PLATFORM: 'state', state.CONF_ENTITY_ID: 'test.entity', state.CONF_FROM: 'hello', state.CONF_TO: 'world', automation.CONF_SERVICE: 'test.automation' } })) self.hass.states.set('test.entity', 'world') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) def test_if_not_fires_if_to_filter_not_match(self): self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { CONF_PLATFORM: 'state', state.CONF_ENTITY_ID: 'test.entity', state.CONF_FROM: 'hello', state.CONF_TO: 'world', automation.CONF_SERVICE: 'test.automation' } })) self.hass.states.set('test.entity', 'moon') self.hass.pool.block_till_done() self.assertEqual(0, len(self.calls)) def test_if_not_fires_if_from_filter_not_match(self): self.hass.states.set('test.entity', 'bye') self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { CONF_PLATFORM: 'state', state.CONF_ENTITY_ID: 'test.entity', state.CONF_FROM: 'hello', state.CONF_TO: 'world', automation.CONF_SERVICE: 'test.automation' } })) self.hass.states.set('test.entity', 'world') self.hass.pool.block_till_done() self.assertEqual(0, len(self.calls)) def test_if_not_fires_if_entity_not_match(self): self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { CONF_PLATFORM: 'state', state.CONF_ENTITY_ID: 'test.another_entity', automation.CONF_SERVICE: 'test.automation' } })) self.hass.states.set('test.entity', 'world') self.hass.pool.block_till_done() self.assertEqual(0, len(self.calls))
# -*- coding: utf-8 -*- import pynvim @pynvim.plugin class TestPlugin(object): def __init__(self, nvim): self.nvim = nvim @pynvim.function('TestFunction', sync=True) def testfunction(self, args): return 3 @pynvim.command('TestCommand', nargs='*', range='') def testcommand(self, args, range): self.nvim.current.line = ('Command with args: {}, range: {}' .format(args, range)) @pynvim.autocmd('BufEnter', pattern='*.py', eval='expand("<afile>")', sync=True) def on_bufenter(self, filename): self.nvim.out_write('testplugin is in ' + filename + '\n')
counter = 1 while counter < 50: print("I like Python") counter = counter + 10 counter = 1 while counter < 10: print("I like Python") counter = counter + 1
""" This file aggregates nox commands for various development tasks. To learn more about nox, visit https://nox.thea.codes/en/stable/index.html """ import sys import nox sys.path.append("noxfiles") from ci_nox import * from dev_nox import * from docker_nox import * from docs_nox import * from utils_nox import * # Sets the default session to `--list` nox.options.sessions = []
import unittest from utils.group_by import group_by class GroupByTestCase(unittest.TestCase): def setUp(self): self.maxDiff = None def test_simple(self): items = [{"id": 1, "type": "string"}, {"id": 2, "type": "string"}] expected = {"string": [{"id": 1, "type": "string"}, {"id": 2, "type": "string"}]} actual = group_by(items=items, grouping_function=lambda item: item["type"]) self.assertEqual(expected, actual) def test_expanded(self): class Person: def __init__(self, first_name, last_name, date_of_birth): self.first_name = first_name self.last_name = last_name self.date_of_birth = date_of_birth bob_fisher = Person("Bob", "Fisher", "1985-05-21") sara_miller = Person("Sara", "Miller", "1997-11-01") eve_miller = Person("Eve", "Miller", "1997-01-07") george_smith = Person("George", "Smith", "1956-09-25") items = [bob_fisher, sara_miller, eve_miller, george_smith] expected = {"Fisher": [bob_fisher], "Miller": [sara_miller, eve_miller], "Smith": [george_smith]} actual = group_by(items, lambda person: person.last_name) self.assertEqual(expected, actual) # Using the 'autogrouping' option expected = {"Bob": bob_fisher, "Sara": sara_miller, "Eve": eve_miller, "George": george_smith} actual = group_by(items, lambda person: person.first_name, auto_grouping=True) self.assertEqual(expected, actual)
# Copyright 2019-2020 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import json import logging import os import docker import emoji import requests from klio_cli.utils import multi_line_terminal_writer def check_docker_connection(docker_client): try: docker_client.ping() except (docker.errors.APIError, requests.exceptions.ConnectionError): logging.error(emoji.emojize("Could not reach Docker! :whale:")) logging.error("Is it installed and running?") raise SystemExit(1) def check_dockerfile_present(job_dir): dockerfile_path = job_dir + "/Dockerfile" if not os.path.exists(dockerfile_path): logging.error("Klio can't run job without a Dockerfile.") logging.error("Please supply \033[4m{}\033[4m".format(dockerfile_path)) raise SystemExit(1) def docker_image_exists(name, client): try: client.images.get(name) exists = True except docker.errors.ImageNotFound: exists = False except docker.errors.APIError as e: msg = ( "Docker ran into the error checking if image {}" "has already been built:\n{}".format(name, e) ) logging.error(msg) raise SystemExit(1) return exists def build_docker_image(job_dir, image_name, image_tag, config_file=None): """Build given Docker image. Note: This uses the python Docker SDK's low-level API in order to capture and emit build logs as they are generated by Docker. Using the high-level API, you only get access to logs at the end of the build, which creates a bad user experience. Args: job_dir (str): Relative path to directory containing Dockerfile. image_name (str): Name to build the image with (forms a ‘name:tag’ pair) image_tag (str): Tag to build the image with (forms a ‘name:tag’ pair) Raises: SystemExit(1) If Docker build errors out, process terminates. """ def clean_logs(log_generator): # Loop through lines containing log JSON objects. # Example line: {"stream":"Starting build..."}\r\n{"stream":"\\n"}\n for line in log_generator: if isinstance(line, bytes): line = line.decode("utf-8") # Some lines contain multiple whitespace-separated objects. # Split them so json.loads doesn't choke. for log_obj in line.split("\r\n"): # Some log objects only wrap newlines. # Split sometimes produces '' char. # Remove these artifacts. if log_obj != '{"stream":"\\n"}' and log_obj != "": yield log_obj def print_log(log): if "stream" in log: logging.info(log["stream"].strip("\n")) if "error" in log: fail_color = "\033[91m" end_color = "\033[0m" logging.info( "{}{}{}".format( fail_color, log["errorDetail"]["message"], end_color ) ) logging.error("\nDocker hit an error while building job image.") logging.error( "Please fix your Dockerfile: {}/Dockerfile".format(job_dir) ) raise SystemExit(1) build_flag = { "path": job_dir, "tag": "{}:{}".format(image_name, image_tag), "rm": True, "buildargs": { "tag": image_tag, "KLIO_CONFIG": config_file or "klio-job.yaml", }, } # Remove intermediate build containers. logs = docker.APIClient(base_url="unix://var/run/docker.sock").build( **build_flag ) for log_obj in clean_logs(logs): log = json.loads(log_obj) print_log(log) def _get_layer_id_and_message(clean_line): line_json = json.loads(clean_line) layer_id = line_json.get("id") # very first log message doesn't have an id msg_pfx = "" if layer_id: msg_pfx = "{}: ".format(layer_id) msg = "{prefix}{status}{progress}".format( prefix=msg_pfx, status=line_json.get("status", ""), progress=line_json.get("progress", ""), ) return layer_id, msg def push_image_to_gcr(image, tag, client): kwargs = {"repository": image, "tag": tag, "stream": True} writer = multi_line_terminal_writer.MultiLineTerminalWriter() for raw_line in client.images.push(**kwargs): clean_line = raw_line.decode("utf-8").strip("\r\n") clean_lines = clean_line.split("\r\n") for line in clean_lines: layer_id, msg = _get_layer_id_and_message(line) writer.emit_line(layer_id, msg.strip()) def get_docker_image_client(job_dir, image_tag, image_name, force_build): """Returns the docker image and client for running klio commands. Args: job_dir (str): Relative path to directory containing Dockerfile. image_tag (str): Tag to build the image with (forms a ‘name:tag’ pair) image_name (str): Name to build the image with (forms a ‘name:tag’ pair) force_build(bool): Flag to force a new docker image build. Raises: Valid docker image and client. """ image = "{}:{}".format(image_name, image_tag) client = docker.from_env() check_docker_connection(client) check_dockerfile_present(job_dir) if not docker_image_exists(image, client) or force_build: logging.info("Building worker image: {}".format(image)) build_docker_image(job_dir, image_name, image_tag) else: logging.info("Found worker image: {}".format(image)) return image, client
# -*- coding: utf-8 -*- from tccli.services.tsw.tsw_client import action_caller
"""rioxarray version""" __version__ = "0.4.1.dev0"
import json from aws_xray_sdk.core import patch_all, xray_recorder from okdata.aws.logging import log_add, logging_wrapper patch_all() @logging_wrapper @xray_recorder.capture("say_hello") def say_hello(event, context): log_add(relevant_information="Hello from Python blueprint!") return { "statusCode": 200, "headers": {}, "body": json.dumps({"hello": "world!"}), }
import tensorflow as tf hparams = tf.contrib.training.HParams( num_mels=80, frame_length_ms=50, frame_shift_ms=12.5, hop_length=int(16000 * 0.0125), # samples. win_length=int(16000 * 0.05), # samples. max_db=100, ref_db=20, preemphasis=0.97, max_abs_value=4.0, symmetric_mel=True, sr=16000, n_fft=2048, n_iter=60, power=1.5, max_generation_frames=1100, max_eval_batches=20, max_eval_sample_length=1000, eval_sample_per_speaker=4, vocab_size=6000, embed_size=512, encoder_hidden=512, decoder_hidden=768, n_encoder_layer=6, n_decoder_layer=6, n_attention_head=8, transformer_dropout_rate=0.1, decoder_dropout_rate=0.5, prenet_hidden=256, postnet_hidden=512, n_postnet_layer=5, data_format="nlti", use_sos=True, bucket_size=512, shuffle_training_data=True, batch_frame_limit=8000, batch_frame_quad_limit=7000000, balanced_training=True, lg_prob_scale=0.2, adapt_start_step=30000, adapt_end_step=30000, final_adapt_rate=0.25, data_warmup_steps=30000, target_length_lower_bound=240, target_length_upper_bound=800, reg_weight=5e-9, multi_speaker=True, max_num_speaker=1000, speaker_embedding_size=128, multi_lingual=True, max_num_language=100, language_net_hidden=128, language_embedding_size=128, warmup_steps=50000, max_lr=1e-3, min_lr=1e-5, lr_decay_step=550000, lr_decay_rate=1e-2, adam_eps=5e-8, external_embed_dim=1024, use_external_embed=False, )
# -*- coding: utf-8 -*- # Generated by Django 1.11.29 on 2020-12-29 03:48 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('feewaiver', '0033_campground'), ] operations = [ migrations.AddField( model_name='feewaivervisit', name='campgrounds', field=models.ManyToManyField(to='feewaiver.CampGround'), ), ]
#! -*- coding: utf-8 -*- import csv class DataProcessor: """Base class for data converters for sequence classification data sets.""" def get_examples(self, data_dir): """Gets a collection of :class:`InputExample` for the data set.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with open(input_file, "r", encoding="utf-8-sig") as f: return list(csv.reader(f, delimiter="\t", quotechar=quotechar))
from typing import Type import pgtrigger from django.db.models import Model class Notify(pgtrigger.Trigger): """A trigger which notifies a channel""" def get_func(self, model: Type[Model]): return f''' {self._build_payload(model)} {self._pre_notify()} perform pg_notify('{self.name}', payload); RETURN NEW; ''' def get_declare(self, model: Type[Model]): return [('payload', 'TEXT')] def _pre_notify(self): return '' def _build_payload(self, model): return f''' payload := json_build_object( 'app', '{model._meta.app_label}', 'model', '{model.__name__}', 'old', row_to_json(OLD), 'new', row_to_json(NEW) ); ''' class LockableNotify(Notify): def _pre_notify(self): return f''' INSERT INTO pgpubsub_notification (channel, payload) VALUES ('{self.name}', to_json(payload::text)); '''