hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
126ed0c4b5b9953ffae260a135fbd0a7f5870c29
| 3,556
|
py
|
Python
|
src/inmanta/db/versions/v4.py
|
inmanta/inmanta-core
|
ae2153d57f124d00ad1b58e6d4bc6818364be4a8
|
[
"Apache-2.0"
] | 6
|
2021-03-09T10:24:02.000Z
|
2022-01-16T03:52:11.000Z
|
src/inmanta/db/versions/v4.py
|
inmanta/inmanta-core
|
ae2153d57f124d00ad1b58e6d4bc6818364be4a8
|
[
"Apache-2.0"
] | 1,319
|
2020-12-18T08:52:29.000Z
|
2022-03-31T18:17:32.000Z
|
src/inmanta/db/versions/v4.py
|
inmanta/inmanta-core
|
ae2153d57f124d00ad1b58e6d4bc6818364be4a8
|
[
"Apache-2.0"
] | 4
|
2021-03-03T15:36:50.000Z
|
2022-03-11T11:41:51.000Z
|
"""
Copyright 2020 Inmanta
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contact: code@inmanta.com
"""
from asyncpg import Connection
DISABLED = False
async def update(connection: Connection) -> None:
await connection.execute(
"""
--------------------------------
-- Remove forms functionality --
--------------------------------
DROP TABLE IF EXISTS public.formrecord;
DROP TABLE IF EXISTS public.form;
--------------------------------------------------------------------------------
-- Remove join table resourceversionid and put fields in resourceaction table --
--------------------------------------------------------------------------------
-- Add new columns to resourceaction table
ALTER TABLE public.resourceaction
ADD COLUMN environment uuid,
ADD COLUMN version integer,
ADD COLUMN resource_version_ids varchar[];
-- Populate the environment and resource_version_ids columns
UPDATE public.resourceaction AS r
SET environment=(SELECT DISTINCT rvi.environment
FROM public.resourceversionid rvi
WHERE rvi.action_id=r.action_id),
resource_version_ids=ARRAY(SELECT rvi.resource_version_id
FROM public.resourceversionid rvi
WHERE rvi.action_id=r.action_id);
-- Remove dangling resource actions. Due to a bug, the environment is
-- unknown when no resources are associated with a resource action.
DELETE FROM public.resourceaction WHERE environment IS NULL;
-- Populate the version column
UPDATE public.resourceaction AS ra
SET version=(SELECT model
FROM public.resource AS rs
WHERE rs.environment=ra.environment AND rs.resource_version_id=ra.resource_version_ids[1]);
-- Delete resource actions from the database for which the configuration model doesn't exist anymore.
-- This is caused by a cascading delete issue.
DELETE FROM public.resourceaction AS ra WHERE NOT EXISTS(SELECT 1
FROM public.resource AS r
WHERE r.environment=ra.environment AND r.model=ra.version);
-- Set constraints on the new columns in the resourceaction table
ALTER TABLE public.resourceaction
ALTER COLUMN environment SET NOT NULL,
ALTER COLUMN version SET NOT NULL,
ALTER COLUMN resource_version_ids SET NOT NULL,
ADD FOREIGN KEY (environment, version) REFERENCES configurationmodel (environment, version) ON DELETE CASCADE;
-- Drop resourceversionid table and its indexes
DROP INDEX IF EXISTS resourceversionid_environment_resource_version_id_index;
DROP INDEX IF EXISTS resourceversionid_action_id_index;
DROP TABLE IF EXISTS public.resourceversionid;
-- Setup/Remove indexes
CREATE INDEX resourceaction_resource_version_ids_index ON resourceaction USING gin(resource_version_ids);
DROP INDEX resourceaction_action_id_started_index;
CREATE INDEX resourceaction_environment_action_started_index ON resourceaction(environment,action,started DESC);
"""
)
| 41.348837
| 116
| 0.691789
|
1421bb4be638e2a3c3966a6940ebbf88b122c194
| 19,414
|
py
|
Python
|
examples/pytorch/multiple-choice/run_swag.py
|
bugface/transformers
|
ba286fe7d51db12ad663effac83bed8199dd7141
|
[
"Apache-2.0"
] | 5
|
2020-09-01T09:15:48.000Z
|
2020-09-15T03:25:05.000Z
|
examples/pytorch/multiple-choice/run_swag.py
|
bugface/transformers
|
ba286fe7d51db12ad663effac83bed8199dd7141
|
[
"Apache-2.0"
] | 2
|
2022-03-08T04:58:59.000Z
|
2022-03-19T03:45:14.000Z
|
examples/pytorch/multiple-choice/run_swag.py
|
bugface/transformers
|
ba286fe7d51db12ad663effac83bed8199dd7141
|
[
"Apache-2.0"
] | 3
|
2020-08-20T04:46:25.000Z
|
2020-10-14T08:39:13.000Z
|
#!/usr/bin/env python
# coding=utf-8
# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for multiple choice.
"""
# You can also adapt this script on your own multiple choice task. Pointers for this are left as comments.
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.20.0.dev0")
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": (
"Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
)
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_seq_length: Optional[int] = field(
default=None,
metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},
)
def __post_init__(self):
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class DataCollatorForMultipleChoice:
"""
Data collator that will dynamically pad the inputs for multiple choice received.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
"""
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
def __call__(self, features):
label_name = "label" if "label" in features[0].keys() else "labels"
labels = [feature.pop(label_name) for feature in features]
batch_size = len(features)
num_choices = len(features[0]["input_ids"])
flattened_features = [
[{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features
]
flattened_features = list(chain(*flattened_features))
batch = self.tokenizer.pad(
flattened_features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
)
# Un-flatten
batch = {k: v.view(batch_size, num_choices, -1) for k, v in batch.items()}
# Add back labels
batch["labels"] = torch.tensor(labels, dtype=torch.int64)
return batch
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1]
raw_datasets = load_dataset(
extension,
data_files=data_files,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
# Downloading and loading the swag dataset from the hub.
raw_datasets = load_dataset(
"swag",
"regular",
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# When using your own dataset or a different dataset from swag, you will probably need to change this.
ending_names = [f"ending{i}" for i in range(4)]
context_name = "sent1"
question_header_name = "sent2"
if data_args.max_seq_length is None:
max_seq_length = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx."
)
max_seq_length = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
# Preprocessing the datasets.
def preprocess_function(examples):
first_sentences = [[context] * 4 for context in examples[context_name]]
question_headers = examples[question_header_name]
second_sentences = [
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(question_headers)
]
# Flatten out
first_sentences = list(chain(*first_sentences))
second_sentences = list(chain(*second_sentences))
# Tokenize
tokenized_examples = tokenizer(
first_sentences,
second_sentences,
truncation=True,
max_length=max_seq_length,
padding="max_length" if data_args.pad_to_max_length else False,
)
# Un-flatten
return {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
if data_args.max_train_samples is not None:
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
# Data collator
data_collator = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None)
)
# Metric
def compute_metrics(eval_predictions):
predictions, label_ids = eval_predictions
preds = np.argmax(predictions, axis=1)
return {"accuracy": (preds == label_ids).astype(np.float32).mean().item()}
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
kwargs = dict(
finetuned_from=model_args.model_name_or_path,
tasks="multiple-choice",
dataset_tags="swag",
dataset_args="regular",
dataset="SWAG",
language="en",
)
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 40.70021
| 119
| 0.667096
|
54cdb369b0b4e01c130aab22125b7a3645c210db
| 657
|
py
|
Python
|
app/table_booker/migrations/0002_auto_20210614_0707.py
|
Jonwodi/Django-PostgreSQL-Docker
|
150c4b63446408978bda7c14b1e92992da0200d2
|
[
"MIT"
] | null | null | null |
app/table_booker/migrations/0002_auto_20210614_0707.py
|
Jonwodi/Django-PostgreSQL-Docker
|
150c4b63446408978bda7c14b1e92992da0200d2
|
[
"MIT"
] | null | null | null |
app/table_booker/migrations/0002_auto_20210614_0707.py
|
Jonwodi/Django-PostgreSQL-Docker
|
150c4b63446408978bda7c14b1e92992da0200d2
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.8 on 2021-06-14 07:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('table_booker', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='booking',
name='total_guests',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='table',
name='restaurant',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tables', to='table_booker.Restaurant'),
),
]
| 26.28
| 134
| 0.619482
|
78bf9c998dfcee7b0d42b7e5731c028cc2d45719
| 2,768
|
py
|
Python
|
openpype/hosts/nuke/plugins/publish/extract_gizmo.py
|
dangerstudios/OpenPype
|
10ddcc4699137888616eec57cd7fac9648189714
|
[
"MIT"
] | 87
|
2021-05-07T08:40:46.000Z
|
2022-03-19T00:36:25.000Z
|
openpype/hosts/nuke/plugins/publish/extract_gizmo.py
|
dangerstudios/OpenPype
|
10ddcc4699137888616eec57cd7fac9648189714
|
[
"MIT"
] | 1,019
|
2021-04-26T06:22:56.000Z
|
2022-03-31T16:30:43.000Z
|
openpype/hosts/nuke/plugins/publish/extract_gizmo.py
|
dangerstudios/OpenPype
|
10ddcc4699137888616eec57cd7fac9648189714
|
[
"MIT"
] | 33
|
2021-04-29T12:35:54.000Z
|
2022-03-25T14:48:42.000Z
|
import pyblish.api
from avalon.nuke import lib as anlib
from openpype.hosts.nuke.api import utils as pnutils
import nuke
import os
import openpype
class ExtractGizmo(openpype.api.Extractor):
"""Extracting Gizmo (Group) node
Will create nuke script only with the Gizmo node.
"""
order = pyblish.api.ExtractorOrder
label = "Extract Gizmo (Group)"
hosts = ["nuke"]
families = ["gizmo"]
def process(self, instance):
tmp_nodes = list()
orig_grpn = instance[0]
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = "{0}.nk".format(instance.name)
path = os.path.join(stagingdir, filename)
# maintain selection
with anlib.maintained_selection():
orig_grpn_name = orig_grpn.name()
tmp_grpn_name = orig_grpn_name + "_tmp"
# select original group node
anlib.select_nodes([orig_grpn])
# copy to clipboard
nuke.nodeCopy("%clipboard%")
# reset selection to none
anlib.reset_selection()
# paste clipboard
nuke.nodePaste("%clipboard%")
# assign pasted node
copy_grpn = nuke.selectedNode()
copy_grpn.setXYpos((orig_grpn.xpos() + 120), orig_grpn.ypos())
# convert gizmos to groups
pnutils.bake_gizmos_recursively(copy_grpn)
# remove avalonknobs
knobs = copy_grpn.knobs()
avalon_knobs = [k for k in knobs.keys()
for ak in ["avalon:", "ak:"]
if ak in k]
avalon_knobs.append("publish")
for ak in avalon_knobs:
copy_grpn.removeKnob(knobs[ak])
# add to temporary nodes
tmp_nodes.append(copy_grpn)
# swap names
orig_grpn.setName(tmp_grpn_name)
copy_grpn.setName(orig_grpn_name)
# create tmp nk file
# save file to the path
nuke.nodeCopy(path)
# Clean up
for tn in tmp_nodes:
nuke.delete(tn)
# rename back to original
orig_grpn.setName(orig_grpn_name)
if "representations" not in instance.data:
instance.data["representations"] = []
# create representation
representation = {
'name': 'gizmo',
'ext': 'nk',
'files': filename,
"stagingDir": stagingdir
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '{}' to: {}".format(
instance.name, path))
self.log.info("Data {}".format(
instance.data))
| 29.136842
| 74
| 0.562139
|
9bf3b7e5b44e2b57c1922e1f1d05b59452d26ab1
| 18,050
|
py
|
Python
|
tutorials/video_classification_example/train.py
|
kevinmtian/pytorchvideo
|
168e16859a6029ef8ebeb476f9163bebb6c6b87d
|
[
"Apache-2.0"
] | null | null | null |
tutorials/video_classification_example/train.py
|
kevinmtian/pytorchvideo
|
168e16859a6029ef8ebeb476f9163bebb6c6b87d
|
[
"Apache-2.0"
] | null | null | null |
tutorials/video_classification_example/train.py
|
kevinmtian/pytorchvideo
|
168e16859a6029ef8ebeb476f9163bebb6c6b87d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import argparse
import itertools
import logging
import os
import pytorch_lightning
import pytorchvideo.data
import pytorchvideo.models.resnet
import torch
import torch.nn.functional as F
from pytorch_lightning.callbacks import LearningRateMonitor
from pytorchvideo.transforms import (
ApplyTransformToKey,
Normalize,
RandomShortSideScale,
RemoveKey,
ShortSideScale,
UniformTemporalSubsample,
)
from slurm import copy_and_run_with_config
from torch.utils.data import DistributedSampler, RandomSampler
from torchaudio.transforms import MelSpectrogram, Resample
from torchvision.transforms import (
CenterCrop,
Compose,
Lambda,
RandomCrop,
RandomHorizontalFlip,
)
"""
This video classification example demonstrates how PyTorchVideo models, datasets and
transforms can be used with PyTorch Lightning module. Specifically it shows how a
simple pipeline to train a Resnet on the Kinetics video dataset can be built.
Don't worry if you don't have PyTorch Lightning experience. We'll provide an explanation
of how the PyTorch Lightning module works to accompany the example.
The code can be separated into three main components:
1. VideoClassificationLightningModule (pytorch_lightning.LightningModule), this defines:
- how the model is constructed,
- the inner train or validation loop (i.e. computing loss/metrics from a minibatch)
- optimizer configuration
2. KineticsDataModule (pytorch_lightning.LightningDataModule), this defines:
- how to fetch/prepare the dataset
- the train and val dataloaders for the associated dataset
3. pytorch_lightning.Trainer, this is a concrete PyTorch Lightning class that provides
the training pipeline configuration and a fit(<lightning_module>, <data_module>)
function to start the training/validation loop.
All three components are combined in the train() function. We'll explain the rest of the
details inline.
"""
class VideoClassificationLightningModule(pytorch_lightning.LightningModule):
def __init__(self, args):
"""
This LightningModule implementation constructs a PyTorchVideo ResNet,
defines the train and val loss to be trained with (cross_entropy), and
configures the optimizer.
"""
self.args = args
super().__init__()
self.train_accuracy = pytorch_lightning.metrics.Accuracy()
self.val_accuracy = pytorch_lightning.metrics.Accuracy()
#############
# PTV Model #
#############
# Here we construct the PyTorchVideo model. For this example we're using a
# ResNet that works with Kinetics (e.g. 400 num_classes). For your application,
# this could be changed to any other PyTorchVideo model (e.g. for SlowFast use
# create_slowfast).
if self.args.arch == "video_resnet":
self.model = pytorchvideo.models.resnet.create_resnet(
input_channel=3,
model_num_class=400,
)
self.batch_key = "video"
elif self.args.arch == "audio_resnet":
self.model = pytorchvideo.models.resnet.create_acoustic_resnet(
input_channel=1,
model_num_class=400,
)
self.batch_key = "audio"
else:
raise Exception("{self.args.arch} not supported")
def on_train_epoch_start(self):
"""
For distributed training we need to set the datasets video sampler epoch so
that shuffling is done correctly
"""
epoch = self.trainer.current_epoch
if self.trainer.use_ddp:
self.trainer.datamodule.train_dataset.dataset.video_sampler.set_epoch(epoch)
def forward(self, x):
"""
Forward defines the prediction/inference actions.
"""
return self.model(x)
def training_step(self, batch, batch_idx):
"""
This function is called in the inner loop of the training epoch. It must
return a loss that is used for loss.backwards() internally. The self.log(...)
function can be used to log any training metrics.
PyTorchVideo batches are dictionaries containing each modality or metadata of
the batch collated video clips. Kinetics contains the following notable keys:
{
'video': <video_tensor>,
'audio': <audio_tensor>,
'label': <action_label>,
}
- "video" is a Tensor of shape (batch, channels, time, height, Width)
- "audio" is a Tensor of shape (batch, channels, time, 1, frequency)
- "label" is a Tensor of shape (batch, 1)
The PyTorchVideo models and transforms expect the same input shapes and
dictionary structure making this function just a matter of unwrapping the dict and
feeding it through the model/loss.
"""
x = batch[self.batch_key]
y_hat = self.model(x)
loss = F.cross_entropy(y_hat, batch["label"])
acc = self.train_accuracy(F.softmax(y_hat, dim=-1), batch["label"])
self.log("train_loss", loss)
self.log(
"train_acc", acc, on_step=True, on_epoch=True, prog_bar=True, sync_dist=True
)
return loss
def validation_step(self, batch, batch_idx):
"""
This function is called in the inner loop of the evaluation cycle. For this
simple example it's mostly the same as the training loop but with a different
metric name.
"""
x = batch[self.batch_key]
y_hat = self.model(x)
loss = F.cross_entropy(y_hat, batch["label"])
acc = self.val_accuracy(F.softmax(y_hat, dim=-1), batch["label"])
self.log("val_loss", loss)
self.log(
"val_acc", acc, on_step=True, on_epoch=True, prog_bar=True, sync_dist=True
)
return loss
def configure_optimizers(self):
"""
We use the SGD optimizer with per step cosine annealing scheduler.
"""
optimizer = torch.optim.SGD(
self.parameters(),
lr=self.args.lr,
momentum=self.args.momentum,
weight_decay=self.args.weight_decay,
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, self.args.max_epochs, last_epoch=-1
)
return [optimizer], [scheduler]
class KineticsDataModule(pytorch_lightning.LightningDataModule):
"""
This LightningDataModule implementation constructs a PyTorchVideo Kinetics dataset for both
the train and val partitions. It defines each partition's augmentation and
preprocessing transforms and configures the PyTorch DataLoaders.
"""
def __init__(self, args):
self.args = args
super().__init__()
def _make_transforms(self, mode: str):
"""
##################
# PTV Transforms #
##################
# Each PyTorchVideo dataset has a "transform" arg. This arg takes a
# Callable[[Dict], Any], and is used on the output Dict of the dataset to
# define any application specific processing or augmentation. Transforms can
# either be implemented by the user application or reused from any library
# that's domain specific to the modality. E.g. for video we recommend using
# TorchVision, for audio we recommend TorchAudio.
#
# To improve interoperation between domain transform libraries, PyTorchVideo
# provides a dictionary transform API that provides:
# - ApplyTransformToKey(key, transform) - applies a transform to specific modality
# - RemoveKey(key) - remove a specific modality from the clip
#
# In the case that the recommended libraries don't provide transforms that
# are common enough for PyTorchVideo use cases, PyTorchVideo will provide them in
# the same structure as the recommended library. E.g. TorchVision didn't
# have a RandomShortSideScale video transform so it's been added to PyTorchVideo.
"""
if self.args.data_type == "video":
transform = [
self._video_transform(mode),
RemoveKey("audio"),
]
elif self.args.data_type == "audio":
transform = [
self._audio_transform(),
RemoveKey("video"),
]
else:
raise Exception(f"{self.args.data_type} not supported")
return Compose(transform)
def _video_transform(self, mode: str):
"""
This function contains example transforms using both PyTorchVideo and TorchVision
in the same Callable. For 'train' mode, we use augmentations (prepended with
'Random'), for 'val' mode we use the respective determinstic function.
"""
args = self.args
return ApplyTransformToKey(
key="video",
transform=Compose(
[
UniformTemporalSubsample(args.video_num_subsampled),
Normalize(args.video_means, args.video_stds),
]
+ (
[
RandomShortSideScale(
min_size=args.video_min_short_side_scale,
max_size=args.video_max_short_side_scale,
),
RandomCrop(args.video_crop_size),
RandomHorizontalFlip(p=args.video_horizontal_flip_p),
]
if mode == "train"
else [
ShortSideScale(args.video_min_short_side_scale),
CenterCrop(args.video_crop_size),
]
)
),
)
def _audio_transform(self):
"""
This function contains example transforms using both PyTorchVideo and TorchAudio
in the same Callable.
"""
args = self.args
n_fft = int(
float(args.audio_resampled_rate) / 1000 * args.audio_mel_window_size
)
hop_length = int(
float(args.audio_resampled_rate) / 1000 * args.audio_mel_step_size
)
eps = 1e-10
return ApplyTransformToKey(
key="audio",
transform=Compose(
[
Resample(
orig_freq=args.audio_raw_sample_rate,
new_freq=args.audio_resampled_rate,
),
MelSpectrogram(
sample_rate=args.audio_resampled_rate,
n_fft=n_fft,
hop_length=hop_length,
n_mels=args.audio_num_mels,
center=False,
),
Lambda(lambda x: x.clamp(min=eps)),
Lambda(torch.log),
UniformTemporalSubsample(args.audio_mel_num_subsample),
Lambda(lambda x: x.transpose(1, 0)), # (F, T) -> (T, F)
Lambda(
lambda x: x.view(1, x.size(0), 1, x.size(1))
), # (T, F) -> (1, T, 1, F)
Normalize((args.audio_logmel_mean,), (args.audio_logmel_std,)),
]
),
)
def train_dataloader(self):
"""
Defines the train DataLoader that the PyTorch Lightning Trainer trains/tests with.
"""
sampler = DistributedSampler if self.trainer.use_ddp else RandomSampler
train_transform = self._make_transforms(mode="train")
self.train_dataset = LimitDataset(
pytorchvideo.data.Kinetics(
data_path=os.path.join(self.args.data_path, "train.csv"),
clip_sampler=pytorchvideo.data.make_clip_sampler(
"random", self.args.clip_duration
),
video_path_prefix=self.args.video_path_prefix,
transform=train_transform,
video_sampler=sampler,
)
)
return torch.utils.data.DataLoader(
self.train_dataset,
batch_size=self.args.batch_size,
num_workers=self.args.workers,
)
def val_dataloader(self):
"""
Defines the train DataLoader that the PyTorch Lightning Trainer trains/tests with.
"""
sampler = DistributedSampler if self.trainer.use_ddp else RandomSampler
val_transform = self._make_transforms(mode="val")
self.val_dataset = pytorchvideo.data.Kinetics(
data_path=os.path.join(self.args.data_path, "val.csv"),
clip_sampler=pytorchvideo.data.make_clip_sampler(
"uniform", self.args.clip_duration
),
video_path_prefix=self.args.video_path_prefix,
transform=val_transform,
video_sampler=sampler,
)
return torch.utils.data.DataLoader(
self.val_dataset,
batch_size=self.args.batch_size,
num_workers=self.args.workers,
)
class LimitDataset(torch.utils.data.Dataset):
"""
To ensure a constant number of samples are retrieved from the dataset we use this
LimitDataset wrapper. This is necessary because several of the underlying videos
may be corrupted while fetching or decoding, however, we always want the same
number of steps per epoch.
"""
def __init__(self, dataset):
super().__init__()
self.dataset = dataset
self.dataset_iter = itertools.chain.from_iterable(
itertools.repeat(iter(dataset), 2)
)
def __getitem__(self, index):
return next(self.dataset_iter)
def __len__(self):
return self.dataset.num_videos
def main():
"""
To train the ResNet with the Kinetics dataset we construct the two modules above,
and pass them to the fit function of a pytorch_lightning.Trainer.
This example can be run either locally (with default parameters) or on a Slurm
cluster. To run on a Slurm cluster provide the --on_cluster argument.
"""
setup_logger()
pytorch_lightning.trainer.seed_everything()
parser = argparse.ArgumentParser()
# Cluster parameters.
parser.add_argument("--on_cluster", action="store_true")
parser.add_argument("--job_name", default="ptv_video_classification", type=str)
parser.add_argument("--working_directory", default=".", type=str)
parser.add_argument("--partition", default="dev", type=str)
# Model parameters.
parser.add_argument("--lr", "--learning-rate", default=0.1, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight_decay", default=1e-4, type=float)
parser.add_argument(
"--arch",
default="video_resnet",
choices=["video_resnet", "audio_resnet"],
type=str,
)
# Data parameters.
parser.add_argument("--data_path", default=None, type=str, required=True)
parser.add_argument("--video_path_prefix", default="", type=str)
parser.add_argument("--workers", default=8, type=int)
parser.add_argument("--batch_size", default=32, type=int)
parser.add_argument("--clip_duration", default=2, type=float)
parser.add_argument(
"--data_type", default="video", choices=["video", "audio"], type=str
)
parser.add_argument("--video_num_subsampled", default=8, type=int)
parser.add_argument("--video_means", default=(0.45, 0.45, 0.45), type=tuple)
parser.add_argument("--video_stds", default=(0.225, 0.225, 0.225), type=tuple)
parser.add_argument("--video_crop_size", default=224, type=int)
parser.add_argument("--video_min_short_side_scale", default=256, type=int)
parser.add_argument("--video_max_short_side_scale", default=320, type=int)
parser.add_argument("--video_horizontal_flip_p", default=0.5, type=float)
parser.add_argument("--audio_raw_sample_rate", default=44100, type=int)
parser.add_argument("--audio_resampled_rate", default=16000, type=int)
parser.add_argument("--audio_mel_window_size", default=32, type=int)
parser.add_argument("--audio_mel_step_size", default=16, type=int)
parser.add_argument("--audio_num_mels", default=80, type=int)
parser.add_argument("--audio_mel_num_subsample", default=128, type=int)
parser.add_argument("--audio_logmel_mean", default=-7.03, type=float)
parser.add_argument("--audio_logmel_std", default=4.66, type=float)
# Trainer parameters.
parser = pytorch_lightning.Trainer.add_argparse_args(parser)
parser.set_defaults(
max_epochs=200,
callbacks=[LearningRateMonitor()],
replace_sampler_ddp=False,
reload_dataloaders_every_epoch=False,
)
# Build trainer, ResNet lightning-module and Kinetics data-module.
args = parser.parse_args()
if args.on_cluster:
copy_and_run_with_config(
train,
args,
args.working_directory,
job_name=args.job_name,
time="72:00:00",
partition=args.partition,
gpus_per_node=args.gpus,
ntasks_per_node=args.gpus,
cpus_per_task=10,
mem="470GB",
nodes=args.num_nodes,
constraint="volta32gb",
)
else: # local
train(args)
def train(args):
trainer = pytorch_lightning.Trainer.from_argparse_args(args)
classification_module = VideoClassificationLightningModule(args)
data_module = KineticsDataModule(args)
trainer.fit(classification_module, data_module)
def setup_logger():
ch = logging.StreamHandler()
formatter = logging.Formatter("\n%(asctime)s [%(levelname)s] %(name)s: %(message)s")
ch.setFormatter(formatter)
logger = logging.getLogger("pytorchvideo")
logger.setLevel(logging.DEBUG)
logger.addHandler(ch)
if __name__ == "__main__":
main()
| 38.650964
| 95
| 0.631413
|
1ec508699970ddb779f3212536dcc8ccaf1f644e
| 3,568
|
py
|
Python
|
Django Email sender/mysite/mysite/settings.py
|
Mihir008/Web-Ideas
|
f8ccb6dfdec2c34c65618bd4e8b43426b7bdfe3a
|
[
"MIT"
] | 10
|
2021-10-08T15:09:14.000Z
|
2021-12-27T10:38:07.000Z
|
Django Email sender/mysite/mysite/settings.py
|
Mihir008/Web-Ideas
|
f8ccb6dfdec2c34c65618bd4e8b43426b7bdfe3a
|
[
"MIT"
] | 35
|
2021-10-07T10:40:24.000Z
|
2021-10-31T16:47:08.000Z
|
Django Email sender/mysite/mysite/settings.py
|
Mihir008/Web-Ideas
|
f8ccb6dfdec2c34c65618bd4e8b43426b7bdfe3a
|
[
"MIT"
] | 29
|
2021-10-08T09:44:09.000Z
|
2021-12-12T05:54:03.000Z
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os.path
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-jx-fuxz36u%_plkfa$vu#w616(l1yl2nemmkwhq9s(yg@d4k^#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'mysite'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'my_pro_static')]
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_HOST_USER = 'sketchkumar9565@gmail.com'
EMAIL_HOST_PASSWORD = 'xoidcpfminxvvpxs'
EMAIL_USE_SSL = False
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 26.043796
| 91
| 0.710202
|
e860226915423b5b1197b56eac23499b742bfa49
| 12,433
|
py
|
Python
|
sdks/python/apache_beam/runners/interactive/pipeline_analyzer_test.py
|
RusOr10n/beam
|
ede14d4aa7d239f74d5565a28a7c4433eaaa7d47
|
[
"Apache-2.0"
] | 2
|
2019-12-10T11:32:48.000Z
|
2020-03-18T04:50:52.000Z
|
sdks/python/apache_beam/runners/interactive/pipeline_analyzer_test.py
|
RusOr10n/beam
|
ede14d4aa7d239f74d5565a28a7c4433eaaa7d47
|
[
"Apache-2.0"
] | 14
|
2020-02-12T22:20:41.000Z
|
2021-11-09T19:41:23.000Z
|
sdks/python/apache_beam/runners/interactive/pipeline_analyzer_test.py
|
violalyu/beam
|
dd605e568d70b1a6ebea60c15b2aec3e240f3914
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for apache_beam.runners.interactive.pipeline_ananlyzer.
This module is experimental. No backwards-compatibility guarantees.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import apache_beam as beam
from apache_beam.runners.direct import direct_runner
from apache_beam.runners.interactive import cache_manager as cache
from apache_beam.runners.interactive import pipeline_analyzer
def to_stable_runner_api(p):
"""The extra round trip ensures a stable pipeline proto.
"""
return (beam.pipeline.Pipeline.from_runner_api(
p.to_runner_api(use_fake_coders=True),
p.runner,
p._options).to_runner_api(use_fake_coders=True))
class PipelineAnalyzerTest(unittest.TestCase):
def setUp(self):
self.runner = direct_runner.DirectRunner()
self.cache_manager = cache.FileBasedCacheManager()
def tearDown(self):
self.cache_manager.cleanup()
def assertPipelineEqual(self, pipeline_proto1, pipeline_proto2):
"""A naive check for Pipeline proto equality.
"""
components1 = pipeline_proto1.components
components2 = pipeline_proto2.components
self.assertEqual(len(components1.transforms), len(components2.transforms))
self.assertEqual(len(components1.pcollections),
len(components2.pcollections))
# GreatEqual instead of Equal because the pipeline_proto_to_execute could
# include more windowing_stratagies and coders than necessary.
self.assertGreaterEqual(len(components1.windowing_strategies),
len(components2.windowing_strategies))
self.assertGreaterEqual(len(components1.coders), len(components2.coders))
self.assertTransformEqual(pipeline_proto1,
pipeline_proto1.root_transform_ids[0],
pipeline_proto2,
pipeline_proto2.root_transform_ids[0])
def assertTransformEqual(self, pipeline_proto1, transform_id1,
pipeline_proto2, transform_id2):
"""A naive check for Transform proto equality.
"""
transform_proto1 = pipeline_proto1.components.transforms[transform_id1]
transform_proto2 = pipeline_proto2.components.transforms[transform_id2]
self.assertEqual(transform_proto1.spec.urn, transform_proto2.spec.urn)
# Skipping payload checking because PTransforms of the same functionality
# could generate different payloads.
self.assertEqual(len(transform_proto1.subtransforms),
len(transform_proto2.subtransforms))
self.assertSetEqual(set(transform_proto1.inputs),
set(transform_proto2.inputs))
self.assertSetEqual(set(transform_proto1.outputs),
set(transform_proto2.outputs))
def test_basic(self):
p = beam.Pipeline(runner=self.runner)
# The cold run.
pcoll = (p
| 'Create' >> beam.Create([1, 2, 3])
| 'Double' >> beam.Map(lambda x: x*2)
| 'Square' >> beam.Map(lambda x: x**2))
analyzer = pipeline_analyzer.PipelineAnalyzer(self.cache_manager,
to_stable_runner_api(p),
self.runner)
pipeline_to_execute = beam.pipeline.Pipeline.from_runner_api(
analyzer.pipeline_proto_to_execute(),
p.runner,
p._options
)
pipeline_to_execute.run().wait_until_finish()
self.assertEqual(
len(analyzer.tl_required_trans_ids()),
7 # Create, Double, Square, CacheSample * 3, CacheFull
)
self.assertEqual(len(analyzer.tl_referenced_pcoll_ids()), 3)
self.assertEqual(len(analyzer.read_cache_ids()), 0)
self.assertEqual(len(analyzer.write_cache_ids()), 4)
# The second run.
_ = (pcoll
| 'Triple' >> beam.Map(lambda x: x*3)
| 'Cube' >> beam.Map(lambda x: x**3))
analyzer = pipeline_analyzer.PipelineAnalyzer(self.cache_manager,
to_stable_runner_api(p),
self.runner)
pipeline_to_execute = beam.pipeline.Pipeline.from_runner_api(
analyzer.pipeline_proto_to_execute(),
p.runner,
p._options
)
self.assertEqual(
len(analyzer.tl_required_trans_ids()),
6 # Read, Triple, Cube, CacheSample * 2, CacheFull
)
self.assertEqual(len(analyzer.tl_referenced_pcoll_ids()), 3)
self.assertEqual(len(analyzer.read_cache_ids()), 1)
self.assertEqual(len(analyzer.write_cache_ids()), 3)
# No need to actually execute the second run.
def test_word_count(self):
p = beam.Pipeline(runner=self.runner)
class WordExtractingDoFn(beam.DoFn):
def process(self, element):
text_line = element.strip()
words = text_line.split()
return words
# Count the occurrences of each word.
pcoll1 = p | beam.Create(['to be or not to be that is the question'])
pcoll2 = pcoll1 | 'Split' >> beam.ParDo(WordExtractingDoFn())
pcoll3 = pcoll2 | 'Pair with One' >> beam.Map(lambda x: (x, 1))
pcoll4 = pcoll3 | 'Group' >> beam.GroupByKey()
pcoll5 = pcoll4 | 'Count' >> beam.Map(lambda item: (item[0], sum(item[1])))
analyzer = pipeline_analyzer.PipelineAnalyzer(self.cache_manager,
to_stable_runner_api(p),
self.runner)
cache_label1 = 'PColl-1111111'
cache_label2 = 'PColl-2222222'
cache_label3 = 'PColl-3333333'
cache_label4 = 'PColl-4444444'
cache_label5 = 'PColl-5555555'
# pylint: disable=expression-not-assigned
pcoll1 | 'CacheSample%s' % cache_label1 >> cache.WriteCache(
self.cache_manager, cache_label1, sample=True, sample_size=10)
pcoll2 | 'CacheSample%s' % cache_label2 >> cache.WriteCache(
self.cache_manager, cache_label2, sample=True, sample_size=10)
pcoll3 | 'CacheSample%s' % cache_label3 >> cache.WriteCache(
self.cache_manager, cache_label3, sample=True, sample_size=10)
pcoll4 | 'CacheSample%s' % cache_label4 >> cache.WriteCache(
self.cache_manager, cache_label3, sample=True, sample_size=10)
pcoll5 | 'CacheSample%s' % cache_label5 >> cache.WriteCache(
self.cache_manager, cache_label3, sample=True, sample_size=10)
pcoll5 | 'CacheFull%s' % cache_label5 >> cache.WriteCache(
self.cache_manager, cache_label3)
expected_pipeline_proto = to_stable_runner_api(p)
self.assertPipelineEqual(analyzer.pipeline_proto_to_execute(),
expected_pipeline_proto)
pipeline_to_execute = beam.pipeline.Pipeline.from_runner_api(
analyzer.pipeline_proto_to_execute(),
p.runner,
p._options
)
pipeline_to_execute.run().wait_until_finish()
def test_write_cache_expansion(self):
p = beam.Pipeline(runner=self.runner)
pcoll1 = p | 'Create' >> beam.Create([1, 2, 3])
pcoll2 = pcoll1 | 'Double' >> beam.Map(lambda x: x*2)
pcoll3 = pcoll2 | 'Square' >> beam.Map(lambda x: x**2)
analyzer = pipeline_analyzer.PipelineAnalyzer(self.cache_manager,
to_stable_runner_api(p),
self.runner)
cache_label1 = 'PColl-1234567'
cache_label2 = 'PColl-7654321'
cache_label3 = 'PColl-3141593'
# pylint: disable=expression-not-assigned
pcoll1 | 'CacheSample%s' % cache_label1 >> cache.WriteCache(
self.cache_manager, cache_label1, sample=True, sample_size=10)
pcoll2 | 'CacheSample%s' % cache_label2 >> cache.WriteCache(
self.cache_manager, cache_label2, sample=True, sample_size=10)
pcoll3 | 'CacheSample%s' % cache_label3 >> cache.WriteCache(
self.cache_manager, cache_label3, sample=True, sample_size=10)
pcoll3 | 'CacheFull%s' % cache_label3 >> cache.WriteCache(
self.cache_manager, cache_label3)
expected_pipeline_proto = to_stable_runner_api(p)
# Since WriteCache expansion leads to more than 50 PTransform protos in the
# pipeline, a simple check of proto map size is enough.
self.assertPipelineEqual(analyzer.pipeline_proto_to_execute(),
expected_pipeline_proto)
def test_read_cache_expansion(self):
p = beam.Pipeline(runner=self.runner)
# The cold run.
pcoll = (p
| 'Create' >> beam.Create([1, 2, 3])
| 'Double' >> beam.Map(lambda x: x*2)
| 'Square' >> beam.Map(lambda x: x**2))
pipeline_proto = to_stable_runner_api(p)
pipeline_info = pipeline_analyzer.PipelineInfo(pipeline_proto.components)
pcoll_id = 'ref_PCollection_PCollection_3' # Output PCollection of Square
cache_label1 = pipeline_info.cache_label(pcoll_id)
analyzer = pipeline_analyzer.PipelineAnalyzer(self.cache_manager,
pipeline_proto,
self.runner)
pipeline_to_execute = beam.pipeline.Pipeline.from_runner_api(
analyzer.pipeline_proto_to_execute(),
p.runner,
p._options
)
pipeline_to_execute.run().wait_until_finish()
# The second run.
_ = (pcoll
| 'Triple' >> beam.Map(lambda x: x*3)
| 'Cube' >> beam.Map(lambda x: x**3))
analyzer = pipeline_analyzer.PipelineAnalyzer(self.cache_manager,
to_stable_runner_api(p),
self.runner)
expected_pipeline = beam.Pipeline(runner=self.runner)
pcoll1 = (expected_pipeline
| 'Load%s' % cache_label1 >> cache.ReadCache(
self.cache_manager, cache_label1))
pcoll2 = pcoll1 | 'Triple' >> beam.Map(lambda x: x*3)
pcoll3 = pcoll2 | 'Cube' >> beam.Map(lambda x: x**3)
cache_label2 = 'PColl-7654321'
cache_label3 = 'PColl-3141593'
# pylint: disable=expression-not-assigned
pcoll2 | 'CacheSample%s' % cache_label2 >> cache.WriteCache(
self.cache_manager, cache_label2, sample=True, sample_size=10)
pcoll3 | 'CacheSample%s' % cache_label3 >> cache.WriteCache(
self.cache_manager, cache_label3, sample=True, sample_size=10)
pcoll3 | 'CacheFull%s' % cache_label3 >> cache.WriteCache(
self.cache_manager, cache_label3)
# Since ReadCache & WriteCache expansion leads to more than 50 PTransform
# protos in the pipeline, a simple check of proto map size is enough.
self.assertPipelineEqual(analyzer.pipeline_proto_to_execute(),
to_stable_runner_api(expected_pipeline))
class PipelineInfoTest(unittest.TestCase):
def setUp(self):
self.runner = direct_runner.DirectRunner()
def test_passthrough(self):
"""
Test that PTransforms which pass through their input PCollection can be
used with PipelineInfo.
"""
class Passthrough(beam.PTransform):
def expand(self, pcoll):
return pcoll
p = beam.Pipeline(runner=self.runner)
p | beam.Impulse() | Passthrough() # pylint: disable=expression-not-assigned
proto = to_stable_runner_api(p).components
info = pipeline_analyzer.PipelineInfo(proto)
for pcoll_id in info.all_pcollections():
# FIXME: If PipelineInfo does not support passthrough PTransforms, this
# will only fail some of the time, depending on the ordering of
# transforms in the Pipeline proto.
# Should not throw exception
info.cache_label(pcoll_id)
if __name__ == '__main__':
unittest.main()
| 41.168874
| 81
| 0.665487
|
c6919a72ad6dfaa50b41b6c3d187c951dcb08b5f
| 23,113
|
py
|
Python
|
nova/tests/unit/test_policy.py
|
gotostack/nova
|
c0b85389acc7a7d666bfabd542c2695c87192544
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/test_policy.py
|
gotostack/nova
|
c0b85389acc7a7d666bfabd542c2695c87192544
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/test_policy.py
|
gotostack/nova
|
c0b85389acc7a7d666bfabd542c2695c87192544
|
[
"Apache-2.0"
] | 1
|
2020-07-24T00:41:49.000Z
|
2020-07-24T00:41:49.000Z
|
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test of Policy Engine For Nova."""
import os.path
import mock
from oslo_policy import policy as oslo_policy
from oslo_serialization import jsonutils
import requests_mock
import nova.conf
from nova import context
from nova import exception
from nova import policy
from nova import test
from nova.tests.unit import fake_policy
from nova.tests.unit import policy_fixture
from nova import utils
CONF = nova.conf.CONF
class PolicyFileTestCase(test.NoDBTestCase):
def setUp(self):
super(PolicyFileTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.target = {}
def test_modified_policy_reloads(self):
with utils.tempdir() as tmpdir:
tmpfilename = os.path.join(tmpdir, 'policy')
self.flags(policy_file=tmpfilename, group='oslo_policy')
# NOTE(uni): context construction invokes policy check to determine
# is_admin or not. As a side-effect, policy reset is needed here
# to flush existing policy cache.
policy.reset()
policy.init()
rule = oslo_policy.RuleDefault('example:test', "")
policy._ENFORCER.register_defaults([rule])
action = "example:test"
with open(tmpfilename, "w") as policyfile:
policyfile.write('{"example:test": ""}')
policy.authorize(self.context, action, self.target)
with open(tmpfilename, "w") as policyfile:
policyfile.write('{"example:test": "!"}')
policy._ENFORCER.load_rules(True)
self.assertRaises(exception.PolicyNotAuthorized, policy.authorize,
self.context, action, self.target)
class PolicyTestCase(test.NoDBTestCase):
def setUp(self):
super(PolicyTestCase, self).setUp()
rules = [
oslo_policy.RuleDefault("true", '@'),
oslo_policy.RuleDefault("example:allowed", '@'),
oslo_policy.RuleDefault("example:denied", "!"),
oslo_policy.RuleDefault("example:get_http",
"http://www.example.com"),
oslo_policy.RuleDefault("example:my_file",
"role:compute_admin or "
"project_id:%(project_id)s"),
oslo_policy.RuleDefault("example:early_and_fail", "! and @"),
oslo_policy.RuleDefault("example:early_or_success", "@ or !"),
oslo_policy.RuleDefault("example:lowercase_admin",
"role:admin or role:sysadmin"),
oslo_policy.RuleDefault("example:uppercase_admin",
"role:ADMIN or role:sysadmin"),
]
policy.reset()
policy.init()
# before a policy rule can be used, its default has to be registered.
policy._ENFORCER.register_defaults(rules)
self.context = context.RequestContext('fake', 'fake', roles=['member'])
self.target = {}
def test_authorize_nonexistent_action_throws(self):
action = "example:noexist"
self.assertRaises(oslo_policy.PolicyNotRegistered, policy.authorize,
self.context, action, self.target)
def test_authorize_bad_action_throws(self):
action = "example:denied"
self.assertRaises(exception.PolicyNotAuthorized, policy.authorize,
self.context, action, self.target)
def test_authorize_bad_action_noraise(self):
action = "example:denied"
result = policy.authorize(self.context, action, self.target, False)
self.assertFalse(result)
def test_authorize_good_action(self):
action = "example:allowed"
result = policy.authorize(self.context, action, self.target)
self.assertTrue(result)
@requests_mock.mock()
def test_authorize_http_true(self, req_mock):
req_mock.post('http://www.example.com/',
text='True')
action = "example:get_http"
target = {}
result = policy.authorize(self.context, action, target)
self.assertTrue(result)
@requests_mock.mock()
def test_authorize_http_false(self, req_mock):
req_mock.post('http://www.example.com/',
text='False')
action = "example:get_http"
target = {}
self.assertRaises(exception.PolicyNotAuthorized, policy.authorize,
self.context, action, target)
def test_templatized_authorization(self):
target_mine = {'project_id': 'fake'}
target_not_mine = {'project_id': 'another'}
action = "example:my_file"
policy.authorize(self.context, action, target_mine)
self.assertRaises(exception.PolicyNotAuthorized, policy.authorize,
self.context, action, target_not_mine)
def test_early_AND_authorization(self):
action = "example:early_and_fail"
self.assertRaises(exception.PolicyNotAuthorized, policy.authorize,
self.context, action, self.target)
def test_early_OR_authorization(self):
action = "example:early_or_success"
policy.authorize(self.context, action, self.target)
def test_ignore_case_role_check(self):
lowercase_action = "example:lowercase_admin"
uppercase_action = "example:uppercase_admin"
# NOTE(dprince) we mix case in the Admin role here to ensure
# case is ignored
admin_context = context.RequestContext('admin',
'fake',
roles=['AdMiN'])
policy.authorize(admin_context, lowercase_action, self.target)
policy.authorize(admin_context, uppercase_action, self.target)
@mock.patch.object(policy.LOG, 'warning')
def test_warning_when_deprecated_user_based_rule_used(self, mock_warning):
policy._warning_for_deprecated_user_based_rules(
[("os_compute_api:servers:index",
"project_id:%(project_id)s or user_id:%(user_id)s")])
mock_warning.assert_called_once_with(
u"The user_id attribute isn't supported in the rule "
"'%s'. All the user_id based policy enforcement will be removed "
"in the future.", "os_compute_api:servers:index")
@mock.patch.object(policy.LOG, 'warning')
def test_no_warning_for_user_based_resource(self, mock_warning):
policy._warning_for_deprecated_user_based_rules(
[("os_compute_api:os-keypairs:index",
"user_id:%(user_id)s")])
mock_warning.assert_not_called()
@mock.patch.object(policy.LOG, 'warning')
def test_no_warning_for_no_user_based_rule(self, mock_warning):
policy._warning_for_deprecated_user_based_rules(
[("os_compute_api:servers:index",
"project_id:%(project_id)s")])
mock_warning.assert_not_called()
class IsAdminCheckTestCase(test.NoDBTestCase):
def setUp(self):
super(IsAdminCheckTestCase, self).setUp()
policy.init()
def test_init_true(self):
check = policy.IsAdminCheck('is_admin', 'True')
self.assertEqual(check.kind, 'is_admin')
self.assertEqual(check.match, 'True')
self.assertTrue(check.expected)
def test_init_false(self):
check = policy.IsAdminCheck('is_admin', 'nottrue')
self.assertEqual(check.kind, 'is_admin')
self.assertEqual(check.match, 'False')
self.assertFalse(check.expected)
def test_call_true(self):
check = policy.IsAdminCheck('is_admin', 'True')
self.assertTrue(check('target', dict(is_admin=True),
policy._ENFORCER))
self.assertFalse(check('target', dict(is_admin=False),
policy._ENFORCER))
def test_call_false(self):
check = policy.IsAdminCheck('is_admin', 'False')
self.assertFalse(check('target', dict(is_admin=True),
policy._ENFORCER))
self.assertTrue(check('target', dict(is_admin=False),
policy._ENFORCER))
class AdminRolePolicyTestCase(test.NoDBTestCase):
def setUp(self):
super(AdminRolePolicyTestCase, self).setUp()
self.policy = self.useFixture(policy_fixture.RoleBasedPolicyFixture())
self.context = context.RequestContext('fake', 'fake', roles=['member'])
self.actions = policy.get_rules().keys()
self.target = {}
def test_authorize_admin_actions_with_nonadmin_context_throws(self):
"""Check if non-admin context passed to admin actions throws
Policy not authorized exception
"""
for action in self.actions:
self.assertRaises(exception.PolicyNotAuthorized, policy.authorize,
self.context, action, self.target)
class RealRolePolicyTestCase(test.NoDBTestCase):
def setUp(self):
super(RealRolePolicyTestCase, self).setUp()
self.policy = self.useFixture(policy_fixture.RealPolicyFixture())
self.non_admin_context = context.RequestContext('fake', 'fake',
roles=['member'])
self.admin_context = context.RequestContext('fake', 'fake', True,
roles=['member'])
self.target = {}
self.fake_policy = jsonutils.loads(fake_policy.policy_data)
self.admin_only_rules = (
"cells_scheduler_filter:DifferentCellFilter",
"cells_scheduler_filter:TargetCellFilter",
"network:attach_external_network",
"os_compute_api:servers:create:forced_host",
"os_compute_api:servers:detail:get_all_tenants",
"os_compute_api:servers:index:get_all_tenants",
"os_compute_api:servers:show:host_status",
"os_compute_api:servers:migrations:force_complete",
"os_compute_api:servers:migrations:delete",
"os_compute_api:os-admin-actions",
"os_compute_api:os-admin-actions:reset_network",
"os_compute_api:os-admin-actions:inject_network_info",
"os_compute_api:os-admin-actions:reset_state",
"os_compute_api:os-aggregates:index",
"os_compute_api:os-aggregates:create",
"os_compute_api:os-aggregates:show",
"os_compute_api:os-aggregates:update",
"os_compute_api:os-aggregates:delete",
"os_compute_api:os-aggregates:add_host",
"os_compute_api:os-aggregates:remove_host",
"os_compute_api:os-aggregates:set_metadata",
"os_compute_api:os-agents",
"os_compute_api:os-baremetal-nodes",
"os_compute_api:os-cells",
"os_compute_api:os-cells:create",
"os_compute_api:os-cells:delete",
"os_compute_api:os-cells:update",
"os_compute_api:os-cells:sync_instances",
"os_compute_api:os-cloudpipe",
"os_compute_api:os-evacuate",
"os_compute_api:os-extended-server-attributes",
"os_compute_api:os-fixed-ips",
"os_compute_api:os-flavor-access:remove_tenant_access",
"os_compute_api:os-flavor-access:add_tenant_access",
"os_compute_api:os-flavor-extra-specs:create",
"os_compute_api:os-flavor-extra-specs:update",
"os_compute_api:os-flavor-extra-specs:delete",
"os_compute_api:os-flavor-manage",
"os_compute_api:os-floating-ips-bulk",
"os_compute_api:os-floating-ip-dns:domain:delete",
"os_compute_api:os-floating-ip-dns:domain:update",
"os_compute_api:os-fping:all_tenants",
"os_compute_api:os-hosts",
"os_compute_api:os-hypervisors",
"os_compute_api:os-instance-actions:events",
"os_compute_api:os-instance-usage-audit-log",
"os_compute_api:os-lock-server:unlock:unlock_override",
"os_compute_api:os-migrate-server:migrate",
"os_compute_api:os-migrate-server:migrate_live",
"os_compute_api:os-networks",
"os_compute_api:os-networks-associate",
"os_compute_api:os-pci:index",
"os_compute_api:os-pci:detail",
"os_compute_api:os-pci:show",
"os_compute_api:os-quota-sets:update",
"os_compute_api:os-quota-sets:delete",
"os_compute_api:os-quota-sets:detail",
"os_compute_api:os-security-group-default-rules",
"os_compute_api:os-server-diagnostics",
"os_compute_api:os-services",
"os_compute_api:os-shelve:shelve_offload",
"os_compute_api:os-simple-tenant-usage:list",
"os_compute_api:os-availability-zone:detail",
"os_compute_api:os-used-limits",
"os_compute_api:os-migrations:index",
"os_compute_api:os-assisted-volume-snapshots:create",
"os_compute_api:os-assisted-volume-snapshots:delete",
"os_compute_api:os-console-auth-tokens",
"os_compute_api:os-quota-class-sets:update",
"os_compute_api:os-server-external-events:create",
"os_compute_api:os-volumes-attachments:update",
"os_compute_api:servers:migrations:index",
"os_compute_api:servers:migrations:show",
)
self.admin_or_owner_rules = (
"os_compute_api:servers:start",
"os_compute_api:servers:stop",
"os_compute_api:servers:trigger_crash_dump",
"os_compute_api:os-create-backup",
"os_compute_api:ips:index",
"os_compute_api:ips:show",
"os_compute_api:os-keypairs:create",
"os_compute_api:os-keypairs:delete",
"os_compute_api:os-keypairs:index",
"os_compute_api:os-keypairs:show",
"os_compute_api:os-lock-server:lock",
"os_compute_api:os-lock-server:unlock",
"os_compute_api:os-pause-server:pause",
"os_compute_api:os-pause-server:unpause",
"os_compute_api:os-quota-sets:show",
"os_compute_api:server-metadata:index",
"os_compute_api:server-metadata:show",
"os_compute_api:server-metadata:delete",
"os_compute_api:server-metadata:create",
"os_compute_api:server-metadata:update",
"os_compute_api:server-metadata:update_all",
"os_compute_api:os-simple-tenant-usage:show",
"os_compute_api:os-suspend-server:suspend",
"os_compute_api:os-suspend-server:resume",
"os_compute_api:os-tenant-networks",
"os_compute_api:extensions",
"os_compute_api:os-config-drive",
"os_compute_api:servers:confirm_resize",
"os_compute_api:servers:create",
"os_compute_api:servers:create:attach_network",
"os_compute_api:servers:create:attach_volume",
"os_compute_api:servers:create_image",
"os_compute_api:servers:delete",
"os_compute_api:servers:detail",
"os_compute_api:servers:index",
"os_compute_api:servers:reboot",
"os_compute_api:servers:rebuild",
"os_compute_api:servers:resize",
"os_compute_api:servers:revert_resize",
"os_compute_api:servers:show",
"os_compute_api:servers:update",
"os_compute_api:servers:create_image:allow_volume_backed",
"os_compute_api:os-admin-password",
"os_compute_api:os-attach-interfaces",
"os_compute_api:os-attach-interfaces:create",
"os_compute_api:os-attach-interfaces:delete",
"os_compute_api:os-certificates:create",
"os_compute_api:os-certificates:show",
"os_compute_api:os-consoles:create",
"os_compute_api:os-consoles:delete",
"os_compute_api:os-consoles:index",
"os_compute_api:os-consoles:show",
"os_compute_api:os-console-output",
"os_compute_api:os-remote-consoles",
"os_compute_api:os-deferred-delete",
"os_compute_api:os-extended-status",
"os_compute_api:os-extended-availability-zone",
"os_compute_api:os-extended-volumes",
"os_compute_api:os-flavor-access",
"os_compute_api:os-flavor-rxtx",
"os_compute_api:flavors",
"os_compute_api:os-flavor-extra-specs:index",
"os_compute_api:os-flavor-extra-specs:show",
"os_compute_api:os-floating-ip-dns",
"os_compute_api:os-floating-ip-pools",
"os_compute_api:os-floating-ips",
"os_compute_api:os-fping",
"os_compute_api:image-size",
"os_compute_api:os-instance-actions",
"os_compute_api:os-keypairs",
"os_compute_api:limits",
"os_compute_api:os-multinic",
"os_compute_api:os-networks:view",
"os_compute_api:os-pci:pci_servers",
"os_compute_api:os-rescue",
"os_compute_api:os-security-groups",
"os_compute_api:os-server-password",
"os_compute_api:os-server-usage",
"os_compute_api:os-server-groups",
"os_compute_api:os-shelve:shelve",
"os_compute_api:os-shelve:unshelve",
"os_compute_api:os-virtual-interfaces",
"os_compute_api:os-volumes",
"os_compute_api:os-volumes-attachments:index",
"os_compute_api:os-volumes-attachments:show",
"os_compute_api:os-volumes-attachments:create",
"os_compute_api:os-volumes-attachments:delete",
"os_compute_api:os-availability-zone:list",
)
self.non_admin_only_rules = (
"os_compute_api:os-hide-server-addresses",)
self.allow_all_rules = (
"os_compute_api:os-quota-sets:defaults",
"os_compute_api:extensions:discoverable",
"os_compute_api:os-admin-actions:discoverable",
"os_compute_api:os-admin-password:discoverable",
"os_compute_api:os-aggregates:discoverable",
"os_compute_api:os-agents:discoverable",
"os_compute_api:os-attach-interfaces:discoverable",
"os_compute_api:os-baremetal-nodes:discoverable",
"os_compute_api:os-block-device-mapping:discoverable",
"os_compute_api:os-block-device-mapping-v1:discoverable",
"os_compute_api:os-cells:discoverable",
"os_compute_api:os-certificates:discoverable",
"os_compute_api:os-cloudpipe:discoverable",
"os_compute_api:os-config-drive:discoverable",
"os_compute_api:os-consoles:discoverable",
"os_compute_api:os-console-output:discoverable",
"os_compute_api:os-remote-consoles:discoverable",
"os_compute_api:os-create-backup:discoverable",
"os_compute_api:os-deferred-delete:discoverable",
"os_compute_api:os-evacuate:discoverable",
"os_compute_api:os-extended-server-attributes:discoverable",
"os_compute_api:os-extended-status:discoverable",
"os_compute_api:os-extended-availability-zone:discoverable",
"os_compute_api:extension_info:discoverable",
"os_compute_api:os-extended-volumes:discoverable",
"os_compute_api:os-fixed-ips:discoverable",
"os_compute_api:os-flavor-access:discoverable",
"os_compute_api:os-flavor-rxtx:discoverable",
"os_compute_api:flavors:discoverable",
"os_compute_api:os-flavor-extra-specs:discoverable",
"os_compute_api:os-flavor-manage:discoverable",
"os_compute_api:os-floating-ip-dns:discoverable",
"os_compute_api:os-floating-ip-pools:discoverable",
"os_compute_api:os-floating-ips:discoverable",
"os_compute_api:os-floating-ips-bulk:discoverable",
"os_compute_api:os-fping:discoverable",
"os_compute_api:os-hide-server-addresses:discoverable",
"os_compute_api:os-hosts:discoverable",
"os_compute_api:os-hypervisors:discoverable",
"os_compute_api:images:discoverable",
"os_compute_api:image-metadata:discoverable",
"os_compute_api:image-size:discoverable",
"os_compute_api:os-instance-actions:discoverable",
"os_compute_api:os-instance-usage-audit-log:discoverable",
"os_compute_api:ips:discoverable",
"os_compute_api:os-keypairs:discoverable",
"os_compute_api:limits:discoverable",
"os_compute_api:os-lock-server:discoverable",
"os_compute_api:os-migrate-server:discoverable",
"os_compute_api:os-multinic:discoverable",
"os_compute_api:os-multiple-create:discoverable",
"os_compute_api:os-networks:discoverable",
"os_compute_api:os-networks-associate:discoverable",
"os_compute_api:os-pause-server:discoverable",
"os_compute_api:os-pci:discoverable",
"os_compute_api:os-quota-sets:discoverable",
"os_compute_api:os-quota-class-sets:discoverable",
"os_compute_api:os-rescue:discoverable",
"os_compute_api:os-scheduler-hints:discoverable",
"os_compute_api:os-security-group-default-rules:discoverable",
"os_compute_api:os-security-groups:discoverable",
"os_compute_api:os-server-diagnostics:discoverable",
"os_compute_api:os-server-password:discoverable",
"os_compute_api:os-server-usage:discoverable",
"os_compute_api:os-server-groups:discoverable",
"os_compute_api:os-server-tags:delete",
"os_compute_api:os-server-tags:delete_all",
"os_compute_api:os-server-tags:discoverable",
"os_compute_api:os-server-tags:index",
"os_compute_api:os-server-tags:show",
"os_compute_api:os-server-tags:update",
"os_compute_api:os-server-tags:update_all",
"os_compute_api:os-services:discoverable",
"os_compute_api:server-metadata:discoverable",
"os_compute_api:server-migrations:discoverable",
"os_compute_api:servers:discoverable",
"os_compute_api:os-shelve:discoverable",
"os_compute_api:os-simple-tenant-usage:discoverable",
"os_compute_api:os-suspend-server:discoverable",
"os_compute_api:os-tenant-networks:discoverable",
"os_compute_api:os-user-data:discoverable",
"os_compute_api:os-virtual-interfaces:discoverable",
"os_compute_api:os-volumes:discoverable",
"os_compute_api:os-volumes-attachments:discoverable",
"os_compute_api:os-availability-zone:discoverable",
"os_compute_api:os-used-limits:discoverable",
"os_compute_api:os-migrations:discoverable",
"os_compute_api:os-assisted-volume-snapshots:discoverable",
"os_compute_api:os-console-auth-tokens:discoverable",
"os_compute_api:os-server-external-events:discoverable",
"os_compute_api:versions:discoverable",
)
def test_all_rules_in_sample_file(self):
special_rules = ["context_is_admin", "admin_or_owner", "default"]
for (name, rule) in self.fake_policy.items():
if name in special_rules:
continue
self.assertIn(name, policy.get_rules())
def test_admin_only_rules(self):
for rule in self.admin_only_rules:
self.assertRaises(exception.PolicyNotAuthorized, policy.authorize,
self.non_admin_context, rule,
{'project_id': 'fake', 'user_id': 'fake'})
policy.authorize(self.admin_context, rule, self.target)
def test_non_admin_only_rules(self):
for rule in self.non_admin_only_rules:
self.assertRaises(exception.PolicyNotAuthorized, policy.authorize,
self.admin_context, rule, self.target)
policy.authorize(self.non_admin_context, rule, self.target)
def test_admin_or_owner_rules(self):
for rule in self.admin_or_owner_rules:
self.assertRaises(exception.PolicyNotAuthorized, policy.authorize,
self.non_admin_context, rule, self.target)
policy.authorize(self.non_admin_context, rule,
{'project_id': 'fake', 'user_id': 'fake'})
def test_allow_all_rules(self):
for rule in self.allow_all_rules:
policy.authorize(self.non_admin_context, rule, self.target)
def test_rule_missing(self):
rules = policy.get_rules()
# eliqiao os_compute_api:os-quota-class-sets:show requires
# admin=True or quota_class match, this rule won't belong to
# admin_only, non_admin, admin_or_user, empty_rule
special_rules = ('admin_api', 'admin_or_owner', 'context_is_admin',
'os_compute_api:os-quota-class-sets:show')
result = set(rules.keys()) - set(self.admin_only_rules +
self.admin_or_owner_rules + self.non_admin_only_rules +
self.allow_all_rules + special_rules)
self.assertEqual(set([]), result)
| 41.645045
| 79
| 0.721109
|
6d9813a45fcdfac4d11a910ffbcc88d7d3b07c4b
| 3,908
|
py
|
Python
|
yandexcloud/_operation_waiter.py
|
korsar182/python-sdk
|
873bf2a9b136a8f2faae72e86fae1f5b5c3d896a
|
[
"MIT"
] | 36
|
2018-12-23T13:51:50.000Z
|
2022-03-25T07:48:24.000Z
|
yandexcloud/_operation_waiter.py
|
korsar182/python-sdk
|
873bf2a9b136a8f2faae72e86fae1f5b5c3d896a
|
[
"MIT"
] | 15
|
2019-02-28T04:55:09.000Z
|
2022-03-06T23:17:24.000Z
|
yandexcloud/_operation_waiter.py
|
korsar182/python-sdk
|
873bf2a9b136a8f2faae72e86fae1f5b5c3d896a
|
[
"MIT"
] | 18
|
2019-02-23T07:10:57.000Z
|
2022-03-28T14:41:08.000Z
|
import time
from datetime import datetime
import logging
import grpc
from google.protobuf.empty_pb2 import Empty
from yandex.cloud.operation.operation_service_pb2_grpc import OperationServiceStub
from yandex.cloud.operation.operation_service_pb2 import GetOperationRequest
from yandexcloud._retry_interceptor import RetryInterceptor
from yandexcloud.operations import OperationResult, OperationError
def operation_waiter(sdk, operation_id, timeout):
retriable_codes = (
grpc.StatusCode.UNAVAILABLE,
grpc.StatusCode.RESOURCE_EXHAUSTED,
grpc.StatusCode.INTERNAL,
)
operation_service = sdk.client(
OperationServiceStub,
interceptor=RetryInterceptor(max_retry_count=5, retriable_codes=retriable_codes),
)
return OperationWaiter(operation_id, operation_service, timeout)
def wait_for_operation(sdk, operation_id, timeout):
waiter = operation_waiter(sdk, operation_id, timeout)
for _ in waiter:
time.sleep(1)
return waiter.operation
def get_operation_result(sdk, operation, response_type=None, meta_type=None, timeout=None, logger=None):
if not logger:
logger = logging.getLogger()
logger.addHandler(logging.NullHandler())
operation_result = OperationResult(operation)
created_at = datetime.fromtimestamp(operation.created_at.seconds)
message = 'Running Yandex.Cloud operation. ID: {id}. ' + \
'Description: {description}. Created at: {created_at}. ' + \
'Created by: {created_by}.'
message = message.format(
id=operation.id,
description=operation.description,
created_at=created_at,
created_by=operation.created_by,
)
if meta_type and meta_type is not Empty:
unpacked_meta = meta_type()
operation.metadata.Unpack(unpacked_meta)
operation_result.meta = unpacked_meta
message += ' Meta: {unpacked_meta}.'.format(unpacked_meta=unpacked_meta)
logger.info(message)
result = wait_for_operation(sdk, operation.id, timeout=timeout)
if result.error and result.error.code:
error_message = 'Error Yandex.Cloud operation. ID: {id}. ' + \
'Error code: {code}. Details: {details}. ' + \
'Message: {message}.'
error_message = error_message.format(
id=result.id,
code=result.error.code,
details=result.error.details,
message=result.error.message,
)
logger.error(error_message)
raise OperationError(message=error_message, operation_result=result)
else:
log_message = 'Done Yandex.Cloud operation. ID: {id}.'.format(id=operation.id)
if response_type and response_type is not Empty:
unpacked_response = response_type()
result.response.Unpack(unpacked_response)
operation_result.response = unpacked_response
log_message += ' Response: {unpacked_response}.'.format(unpacked_response=unpacked_response)
logger.info(log_message)
return operation_result
class OperationWaiter:
def __init__(self, operation_id, operation_service, timeout=None):
self.__operation = None
self.__operation_id = operation_id
self.__operation_service = operation_service
self.__deadline = time.time() + timeout if timeout else None
@property
def operation(self):
return self.__operation
@property
def done(self):
self.__operation = self.__operation_service.Get(GetOperationRequest(operation_id=self.__operation_id))
return self.__operation is not None and self.__operation.done
def __iter__(self):
return self
def __next__(self):
if self.done or self.__deadline is not None and time.time() >= self.__deadline:
raise StopIteration()
return None
next = __next__ # for Python 2
| 37.219048
| 110
| 0.694729
|
efb2c6f9ef40ed24ad9b5f65eed87899c6f12a9a
| 250
|
py
|
Python
|
exercise034.py
|
AlissonRaphael/python_exercises
|
3f1185c4f2fff24c9fa2ffd6b60f90599044c985
|
[
"MIT"
] | null | null | null |
exercise034.py
|
AlissonRaphael/python_exercises
|
3f1185c4f2fff24c9fa2ffd6b60f90599044c985
|
[
"MIT"
] | null | null | null |
exercise034.py
|
AlissonRaphael/python_exercises
|
3f1185c4f2fff24c9fa2ffd6b60f90599044c985
|
[
"MIT"
] | null | null | null |
salario = float(input('Digite o salário: R$ '))
if salario >= 1250:
print('Com reajuste de 10%, o novo salário é R$ {:.2f}'.format(salario+salario*0.1))
else:
print('Com reajuste de 15%, o novo salário é R$ {:.2f}'.format(salario+salario*0.15))
| 35.714286
| 87
| 0.664
|
c303173ebda4877f183222c724141eb7cc9180cf
| 823
|
py
|
Python
|
tests/test_app.py
|
Ravillatypov/macrobase
|
17382e498a516bb6aa03aa98eea8f4e9932f5788
|
[
"MIT"
] | null | null | null |
tests/test_app.py
|
Ravillatypov/macrobase
|
17382e498a516bb6aa03aa98eea8f4e9932f5788
|
[
"MIT"
] | 4
|
2020-04-30T07:17:56.000Z
|
2020-07-31T08:42:20.000Z
|
tests/test_app.py
|
Ravillatypov/macrobase
|
17382e498a516bb6aa03aa98eea8f4e9932f5788
|
[
"MIT"
] | 2
|
2020-12-23T15:52:17.000Z
|
2021-06-09T10:09:10.000Z
|
import pytest
from macrobase.app import Application, HookNames
from tests.sample_app import SampleApplication
@pytest.mark.timeout(3)
def test_empty_application(application: Application):
application.run()
def test_hooks_before_start(mocker, application: Application, sample_app: SampleApplication):
hook_spy = mocker.spy(sample_app, 'hook_before_start')
application.add_hook(HookNames.before_start, sample_app.hook_before_start)
application.run()
hook_spy.assert_called_once_with(application)
def test_hooks_after_stop(mocker, application: Application, sample_app: SampleApplication):
hook_spy = mocker.spy(sample_app, 'hook_after_stop')
application.add_hook(HookNames.before_start, sample_app.hook_after_stop)
application.run()
hook_spy.assert_called_once_with(application)
| 29.392857
| 93
| 0.808019
|
be5504e00d86c406144bd1ebad8cbfc313829c7d
| 1,103
|
py
|
Python
|
code/kafka_producer.py
|
t1191578/moniteredit
|
48465d7cbee2452392720a26320d8753f8807bc9
|
[
"Apache-2.0"
] | null | null | null |
code/kafka_producer.py
|
t1191578/moniteredit
|
48465d7cbee2452392720a26320d8753f8807bc9
|
[
"Apache-2.0"
] | null | null | null |
code/kafka_producer.py
|
t1191578/moniteredit
|
48465d7cbee2452392720a26320d8753f8807bc9
|
[
"Apache-2.0"
] | null | null | null |
from kafka import KafkaProducer
from datetime import datetime
import time
from json import dumps
import json ,requests, logging, time
KAFKA_TOPIC_NAME = "20A1"
KAFKA_BOOTSTRAP_SERVERS_CONS = '192.168.29.228:9092'
def success(metadata):
print(metadata.topic)
def error(exception):
print(exception)
if __name__ == "__main__":
print("Kafka Producer Application Started ... ")
kafka_producer_obj = KafkaProducer(bootstrap_servers=KAFKA_BOOTSTRAP_SERVERS_CONS,
value_serializer=lambda x: dumps(x).encode('utf-8'))
try:
while True:
r = requests.get('http://3.7.183.103:8080/metrics')
messagecontent = json.dumps(r.text)
val = messagecontent.split('\\n')
messagecontent = val[121:145:3]
kafka_producer_obj.send(KAFKA_TOPIC_NAME, messagecontent).add_callback(success).add_errback(error)
time.sleep(1)
#producer.flush()
print('Message published successfully.')
except Exception as ex:
print('Exception in publishing message')
print(str(ex))
| 35.580645
| 110
| 0.669084
|
d1202a74bff78dabe8244f1e89d65350e8a77afb
| 2,150
|
py
|
Python
|
src/arch/x86/isa/insts/x87/arithmetic/square_root.py
|
qianlong4526888/haha
|
01baf923693873c11ae072ce4dde3d8f1d7b6239
|
[
"BSD-3-Clause"
] | 135
|
2016-10-21T03:31:49.000Z
|
2022-03-25T01:22:20.000Z
|
src/arch/x86/isa/insts/x87/arithmetic/square_root.py
|
qianlong4526888/haha
|
01baf923693873c11ae072ce4dde3d8f1d7b6239
|
[
"BSD-3-Clause"
] | 35
|
2017-03-10T17:57:46.000Z
|
2022-02-18T17:34:16.000Z
|
src/arch/x86/isa/insts/x87/arithmetic/square_root.py
|
qianlong4526888/haha
|
01baf923693873c11ae072ce4dde3d8f1d7b6239
|
[
"BSD-3-Clause"
] | 48
|
2016-12-08T12:03:13.000Z
|
2022-02-16T09:16:13.000Z
|
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
# FSQRT
'''
| 52.439024
| 72
| 0.793953
|
a8de8b4cdcaac1ce63615791fcc1a672212a4359
| 1,871
|
py
|
Python
|
doc/processing/pypro1.py
|
rasql/edunum
|
c9e3be3dbf86a6eb6c28d46b055f8a72bff00ee5
|
[
"MIT"
] | 3
|
2021-02-15T22:30:03.000Z
|
2021-06-16T13:28:25.000Z
|
doc/processing/pypro1.py
|
rasql/edunum
|
c9e3be3dbf86a6eb6c28d46b055f8a72bff00ee5
|
[
"MIT"
] | null | null | null |
doc/processing/pypro1.py
|
rasql/edunum
|
c9e3be3dbf86a6eb6c28d46b055f8a72bff00ee5
|
[
"MIT"
] | 3
|
2021-02-15T10:37:12.000Z
|
2021-08-10T13:25:42.000Z
|
import pygame
from pygame.locals import *
screen = None
width = 640
height = 480
mousePressed = False
mouseX = 0
mouseY = 0
stroke = (0, 0, 0)
RED = 255, 0, 0
def background(*rgb):
global bg
if len(rgb) == 1:
bg = (rgb) * 3
else:
bg = rgb
screen.fill(bg)
def keyPressed():
pass
def size(w, h):
global width, hight, screen
width = w
height = h
screen = pygame.display.set_mode((w, h))
print(w, h)
def set_up():
print('set up')
size(500, 500)
print(screen)
set_up()
def line(x0, y0, x1, y1):
pygame.draw.line(screen, RED, (x0, y0), (x1, y1))
def rect(x, y, w, h):
pygame.draw.rect(screen, RED, (x-w/2, y-h/2, w, h), 1)
def ellipse(x, y, a, b):
pygame.draw.ellipse(screen, RED, (x-a/2, y-b/2, a, b), 1)
def draw():
background(100, 0, 0)
# line(0, 0, width, height)
# line(0, height, width, 0)
# ellipse(width/2, height/2, 200, 100)
#
#
# rect(100, 100, 20, 100)
# ellipse(100, 70, 60, 60)
# ellipse(81, 70, 16, 32)
# ellipse(119, 70, 16, 32)
# line(90, 150, 80, 160)
# line(110, 150, 120, 160)
if mousePressed:
fill(0):
else:
fill(255)
ellipse(mouseX, mouseY, 80, 80)
pygame.init()
set_up()
# screen = pygame.display.set_mode((640, 480))
running = True
while running:
for event in pygame.event.get():
if event.type == QUIT:
print('quit')
running = False
elif event.type == MOUSEBUTTONDOWN:
mousePressed = True
elif event.type == MOUSEBUTTONUP:
mousePressed = False
elif event.type == MOUSEMOTION:
mouseX, mouseY = event.pos
draw()
pygame.display.update()
pygame.quit()
| 18.165049
| 61
| 0.520043
|
24d46508899868d1356954ee43bfe59ec2f17a2e
| 7,048
|
py
|
Python
|
gym-hls/gym_hls/envs/getcycle.py
|
XiaoSanchez/autophase
|
3d8d173ad27b9786e36efd22d0ceacbcf1cb1dfb
|
[
"BSD-3-Clause"
] | null | null | null |
gym-hls/gym_hls/envs/getcycle.py
|
XiaoSanchez/autophase
|
3d8d173ad27b9786e36efd22d0ceacbcf1cb1dfb
|
[
"BSD-3-Clause"
] | null | null | null |
gym-hls/gym_hls/envs/getcycle.py
|
XiaoSanchez/autophase
|
3d8d173ad27b9786e36efd22d0ceacbcf1cb1dfb
|
[
"BSD-3-Clause"
] | null | null | null |
import re
import subprocess
import os
# Available LLVM optimizatons
# tailduplicate, simplify-libcalls, -block-placement
#opt_passes_str="-inline -jump-threading -simplifycfg -gvn -loop-rotate -codegenprepare"
opt_passes_str = "-correlated-propagation -scalarrepl -lowerinvoke -strip -strip-nondebug -sccp -globalopt -gvn -jump-threading -globaldce -loop-unswitch -scalarrepl-ssa -loop-reduce -break-crit-edges -loop-deletion -reassociate -lcssa -codegenprepare -memcpyopt -functionattrs -loop-idiom -lowerswitch -constmerge -loop-rotate -partial-inliner -inline -early-cse -indvars -adce -loop-simplify -instcombine -simplifycfg -dse -loop-unroll -lower-expect -tailcallelim -licm -sink -mem2reg -prune-eh -functionattrs -ipsccp -deadargelim -sroa -loweratomic -terminate"
# This is not used before extra dependency in Makefile.config need to be set
compile_str = """
../../mark_labels.pl test_c_code.c > test_c_code_labeled.c
clang-3.5 test_c_code_labeled.c -emit-llvm -c -fno-builtin -I ../../lib/include/ -m32 -I /usr/include/i386-linux-gnu -O0 -fno-vectorize -fno-slp-vectorize -o test_c_code.prelto.1.bc
cp -f test_c_code.prelto.1.bc test_c_code.prelto.cv.bc
../../../llvm/Release+Asserts/bin/opt opt_passes < test_c_code.prelto.cv.bc > test_c_code.prelto.2.bc
cp test_c_code.prelto.2.bc test_c_code.prelto.linked.bc
../../../llvm/Release+Asserts/bin/opt -load=../../../llvm/Release+Asserts/lib/LLVMLegUp.so -legup-config=../../legup.tcl -std-link-opts < test_c_code.prelto.linked.bc -o test_c_code.prelto.linked.1.bc
../../../llvm/Release+Asserts/bin/opt -load=../../../llvm/Release+Asserts/lib/LLVMLegUp.so -legup-config=../../legup.tcl -legup-prelto < test_c_code.prelto.linked.1.bc > test_c_code.prelto.6.bc
../../../llvm/Release+Asserts/bin/opt -load=../../../llvm/Release+Asserts/lib/LLVMLegUp.so -legup-config=../../legup.tcl -std-link-opts < test_c_code.prelto.6.bc -o test_c_code.prelto.bc
../../../llvm/Release+Asserts/bin/llvm-link test_c_code.prelto.bc ../../lib/llvm/liblegup.bc ../../lib/llvm/libm.bc -o test_c_code.postlto.6.bc
../../../llvm/Release+Asserts/bin/opt -internalize-public-api-list=main -internalize -globaldce test_c_code.postlto.6.bc -o test_c_code.postlto.8.bc
../../../llvm/Release+Asserts/bin/opt -load=../../../llvm/Release+Asserts/lib/LLVMLegUp.so -legup-config=../../legup.tcl -instcombine -std-link-opts < test_c_code.postlto.8.bc -o test_c_code.postlto.bc
../../../llvm/Release+Asserts/bin/opt -load=../../../llvm/Release+Asserts/lib/LLVMLegUp.so -legup-config=../../legup.tcl -basicaa -loop-simplify -indvars2 -loop-pipeline test_c_code.postlto.bc -o test_c_code.1.bc
../../../llvm/Release+Asserts/bin/opt -load=../../../llvm/Release+Asserts/lib/LLVMLegUp.so -legup-config=../../legup.tcl -instcombine test_c_code.1.bc -o test_c_code.bc
../../../llvm/Release+Asserts/bin/llc -legup-config=../../legup.tcl -march=v test_c_code.bc -o test_c_code.v
../../../llvm/Release+Asserts/bin/opt -load=../../../llvm/Release+Asserts/lib/LLVMLegUp.so -legup-config=../../legup.tcl -legup-track-bb < test_c_code.bc > test_c_code.track_bb.bc
../../../llvm/Release+Asserts/bin/llvm-dis test_c_code.track_bb.bc
../../../llvm/Release+Asserts/bin/llc -march=x86-64 test_c_code.track_bb.bc
gcc test_c_code.track_bb.s -o test_c_code.track_bb
./test_c_code.track_bb | grep 'Track@' | sed 's/Track@//' > test_c_code.lli_bb.trace
rm test_c_code.track_bb
perl ../../../tiger/tool_source/profiling_tools/../partition_analysis/get_hw_cycle.pl test_c_code.lli_bb.trace test_c_code.acel_cycle.rpt
"""
# Generate makefile instead, need to modify Makefile.common 119
## ifdef CUSTOM_OPT
## $(LLVM_HOME)opt $(EXTRA_OPT_FLAGS) < $(NAME).prelto.cv.bc > $(NAME).prelto.2.bc
## else # CUSTOM_OPT
## ifdef UNROLL
## $(LLVM_HOME)opt -mem2reg -loops -loop-simplify -loop-unroll $(UNROLL) < $(NAME).prelto.cv.bc > $(NAME).prelto.2.bc
## else # UNROLL
## ifeq ($(DEBUG_KEEP_VARS_IN_MEM),1)
## $(LLVM_HOME)opt -loops -loop-simplify < $(NAME).prelto.cv.bc > $(NAME).prelto.2.bc
## else # DEBUG_KEEP_VARS_IN_MEM
## $(LLVM_HOME)opt -mem2reg -loops -loop-simplify < $(NAME).prelto.cv.bc > $(NAME).prelto.2.bc
## endif # DEBUG_KEEP_VARS_IN_MEM
## endif # UNROLL
## endif # CUSTOM_OPT
makefile_str= """
NAME= test_c_code
NO_OPT=1
CUSTOM_OPT=1
EXTRA_OPT_FLAGS = opt_passes\n""" + "LEVEL = "+ os.environ["LEGUP_PATH"] + "/examples"+"""
include $(LEVEL)/Makefile.common
"""
def qw(s):
return tuple(s.split())
def countPasses():
count=len(qw(opt_passes_str))
return count
# Get a tuple of optimizations
def getPasses(opt_indice):
return map((lambda x: opt_passes[x]), opt_indice)
opt_passes = qw(opt_passes_str)
def passes2indice(passes):
indices = []
passes = qw(passes)
for passs in passes:
for i in range(len(opt_passes)):
if passs == opt_passes[i]:
indices.append(i)
break
#print(indices)
return indices
def getHWCycles(c_code, opt_indice, path=".", sim=False):
#print(len(opt_passes))
ga_seq = getPasses(opt_indice)
ga_seq_str = " ".join(ga_seq)
#print("Passes: %s"%ga_seq_str)
makefile_new = makefile_str.replace("test_c_code", c_code)
makefile_new = makefile_new.replace("opt_passes", ga_seq_str)
#print(makefile_new)
# Update the Makefile
f = open( path+"/Makefile","w")
f.write(makefile_new)
f.close()
done = False
# Run HLS
#proc = subprocess.Popen(["make accelerationCycle -s"], stdout=subprocess.PIPE, shell=True)
if sim:
proc = subprocess.Popen(["make clean p v -s"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd=path)
(out, err) = proc.communicate()
print(out)
#print(err)
p = re.compile(r".*Cycles:\s+(\d+)", re.DOTALL)
m = re.match(p, out.decode("utf-8") )
if m:
hw_cycle = m.group(1)
if int(hw_cycle) == 0:
hw_cycle = 10000000
else:
#print ("NM")
hw_cycle = 10000000 # problematic
else:
proc = subprocess.Popen(["make clean accelerationCycle -s"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd=path)
(out, err) = proc.communicate()
#print(err)
#if (err):
# print (err)
# f = open(c_code+"err.trace", "w")
# f.write(err)
# f.close()
#print "program output:", out
#print "program error:", err
p = re.compile(r"^.*main \|\s+(\d+).*", re.DOTALL)
#p = re.compile(r'main')
m = re.match(p, out.decode("utf-8") )
# Parse Results
if m:
hw_cycle = m.group(1)
if int(hw_cycle) == 0:
hw_cycle = 10000000
done = True
else:
#print ("NM")
hw_cycle = 10000000 # problematic
done = True
#print("Cycles: %s"%hw_cycle)
return int(hw_cycle), done
def main():
indices=[23, 9, 31, 0, 25, 30]
passes=getPasses(indices)
passes_str =" ".join(str(x) for x in passes)
print(passes_str)
if __name__ == "__main__":
main()
| 46.368421
| 563
| 0.67168
|
5ef02b64c05b60ac04f89979b2eaa7477f943994
| 9,452
|
py
|
Python
|
aiohomekit/testing.py
|
jaredhobbs/aiohomekit
|
de4f708af4b2fb93a0c3d62e28b453b74a2dcecb
|
[
"Apache-2.0"
] | 21
|
2020-02-14T20:45:47.000Z
|
2022-03-29T01:36:14.000Z
|
aiohomekit/testing.py
|
jaredhobbs/aiohomekit
|
de4f708af4b2fb93a0c3d62e28b453b74a2dcecb
|
[
"Apache-2.0"
] | 78
|
2020-03-22T22:25:50.000Z
|
2022-03-10T23:08:43.000Z
|
aiohomekit/testing.py
|
jaredhobbs/aiohomekit
|
de4f708af4b2fb93a0c3d62e28b453b74a2dcecb
|
[
"Apache-2.0"
] | 18
|
2020-04-09T23:02:18.000Z
|
2022-03-01T23:20:19.000Z
|
#
# Copyright 2019 aiohomekit team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import logging
from typing import Dict
from aiohomekit import exceptions
from aiohomekit.controller import Controller
from aiohomekit.controller.ip.connection import HomeKitConnection
from aiohomekit.controller.pairing import AbstractPairing
from aiohomekit.exceptions import AccessoryNotFoundError
from aiohomekit.model import Accessories
from aiohomekit.model.characteristics import CharacteristicsTypes
from aiohomekit.protocol.statuscodes import HapStatusCode
_LOGGER = logging.getLogger(__name__)
FAKE_CAMERA_IMAGE = (
b"/9j/2wBDAAMCAgICAgMCAgIDAwMDBAYEBAQEBAgGBgUGCQgKCgkICQkKDA8MCgsOCwkJDRE"
b"NDg8QEBEQCgwSExIQEw8QEBD/yQALCAABAAEBAREA/8wABgAQEAX/2gAIAQEAAD8A0s8g/9k="
)
class FakeDiscovery:
def __init__(
self, controller: "FakeController", device_id: str, accessories: Accessories
):
self.controller = controller
self.device_id = device_id
self.accessories = accessories
self.pairing_code = "111-22-333"
@property
def info(self):
sf = 0
# Is accessory unpaired?
if self.device_id not in self.controller.pairings:
sf = sf | 0x01
return {
"name": "TestDevice",
"address": "127.0.0.1",
"port": 8080,
"md": "TestDevice",
"pv": "1.0",
"id": self.device_id,
"c#": 1,
"s#": 1,
"ff": 0,
"ci": 0,
"sf": sf,
}
async def perform_pairing(self, alias: str, pin):
finish_pairing = await self.start_pairing(alias)
return await finish_pairing(pin)
async def start_pairing(self, alias: str):
if self.device_id in self.controller.pairings:
raise exceptions.AlreadyPairedError(f"{self.device_id} already paired")
async def finish_pairing(pairing_code):
if pairing_code != self.pairing_code:
raise exceptions.AuthenticationError("M4")
pairing_data = {}
pairing_data["AccessoryIP"] = self.info["address"]
pairing_data["AccessoryPort"] = self.info["port"]
pairing_data["Connection"] = "IP"
obj = self.controller.pairings[alias] = FakePairing(
self.controller, pairing_data, self.accessories
)
return obj
return finish_pairing
async def identify(self):
return True
class PairingTester:
"""
A holding class for test-only helpers.
This is done to minimize the difference between a FakePairing and a real pairing.
"""
def __init__(self, pairing):
self.pairing = pairing
self.events_enabled = True
self.characteristics = {}
self.services = {}
name_uuid = CharacteristicsTypes.get_uuid(CharacteristicsTypes.NAME)
for accessory in self.pairing.accessories:
for service in accessory.services:
service_map = {}
for char in service.characteristics:
self.characteristics[(accessory.aid, char.iid)] = char
service_map[char.type] = char
if char.type == name_uuid:
self.services[char.get_value()] = service_map
def set_events_enabled(self, value):
self.events_enabled = value
def update_named_service(self, name: str, new_values):
"""
Finds a named service then sets characteristics by type.
pairing.test.update_named_service("kitchen lamp", {
CharacteristicTypes.ON: True
})
Triggers events if enabled.
"""
if name not in self.services:
raise RuntimeError(f"Fake error: service {name!r} not found")
service = self.services[name]
changed = []
for uuid, value in new_values.items():
uuid = CharacteristicsTypes.get_uuid(uuid)
if uuid not in service:
raise RuntimeError(
f"Unexpected characteristic {uuid!r} applied to service {name!r}"
)
char = service[uuid]
char.set_value(value)
changed.append((char.service.accessory.aid, char.iid))
self._send_events(changed)
def update_aid_iid(self, characteristics):
changed = []
for (aid, iid, value) in characteristics:
self.characteristics[(aid, iid)].set_value(value)
changed.append((aid, iid))
self._send_events(changed)
def _send_events(self, changed):
if not self.events_enabled:
return
event = {}
for (aid, iid) in changed:
if (aid, iid) not in self.pairing.subscriptions:
continue
event[(aid, iid)] = {"value": self.characteristics[(aid, iid)].get_value()}
if not event:
return
for listener in self.pairing.listeners:
try:
listener(event)
except Exception:
_LOGGER.exception("Unhandled error when processing event")
class FakePairing(AbstractPairing):
"""
A test fake that pretends to be a paired HomeKit accessory.
This only contains methods and values that exist on the upstream Pairing
class.
"""
def __init__(self, controller, pairing_data, accessories: Accessories):
"""Create a fake pairing from an accessory model."""
super().__init__(controller)
self.connection = HomeKitConnection(None, "fake_host", 1234)
self.connection.transport = "mock_transport"
self.connection.protocol = "mock_protocol"
self.accessories = accessories
self.pairing_data: Dict[str, AbstractPairing] = {}
self.available = True
self.testing = PairingTester(self)
async def close(self):
pass
async def identify(self):
pass
async def list_pairings(self):
return []
async def remove_pairing(self, pairing_id):
pass
async def list_accessories_and_characteristics(self):
"""Fake implementation of list_accessories_and_characteristics."""
return self.accessories.serialize()
async def get_characteristics(self, characteristics):
"""Fake implementation of get_characteristics."""
if not self.available:
raise AccessoryNotFoundError("Accessory not found")
results = {}
for aid, cid in characteristics:
accessory = self.accessories.aid(aid)
char = accessory.characteristics.iid(cid)
if char.status != HapStatusCode.SUCCESS:
results[(aid, cid)] = {"status": char.status.value}
continue
results[(aid, cid)] = {"value": char.get_value()}
return results
async def put_characteristics(self, characteristics):
"""Fake implementation of put_characteristics."""
filtered = []
results = {}
for (aid, cid, value) in characteristics:
accessory = self.accessories.aid(aid)
char = accessory.characteristics.iid(cid)
if char.status != HapStatusCode.SUCCESS:
results[(aid, cid)] = {"status": char.status.value}
continue
filtered.append((aid, cid, value))
self.testing.update_aid_iid(filtered)
return results
async def image(self, accessory, width, height):
return base64.b64decode(FAKE_CAMERA_IMAGE)
class FakeController(Controller):
"""
A test fake that pretends to be a paired HomeKit accessory.
This only contains methods and values that exist on the upstream Controller
class.
"""
def __init__(self):
"""Create a Fake controller with no pairings."""
self.pairings = {}
self.discoveries = {}
def add_device(self, accessories):
device_id = "00:00:00:00:00:00"
discovery = self.discoveries[device_id] = FakeDiscovery(
self,
device_id,
accessories=accessories,
)
return discovery
async def add_paired_device(self, accessories: Accessories, alias: str = None):
discovery = self.add_device(accessories)
finish_pairing = await discovery.start_pairing(alias or discovery.device_id)
return await finish_pairing(discovery.pairing_code)
async def discover_ip(self, max_seconds: int = 10):
return self.discoveries.values()
async def find_ip_by_device_id(self, device_id, max_seconds=10):
return self.discoveries[device_id]
async def remove_pairing(self, alias: str) -> None:
del self.pairings[alias]
def load_pairing(self, alias: str, pairing_data):
# This assumes a test has already preseed self.pairings with a fake via
# add_paired_device
return self.pairings[alias]
| 32.259386
| 87
| 0.633834
|
25b8a36c1a353f0b998ee032011bc3380d307bd9
| 32,911
|
py
|
Python
|
CalcFOMForm.py
|
johnmgregoire/JCAPDataProcess
|
c8120e5b2f8fc840a6307b40293dccaf94bd8c2c
|
[
"BSD-3-Clause"
] | 5
|
2017-03-24T21:05:22.000Z
|
2021-09-15T18:18:05.000Z
|
CalcFOMForm.py
|
johnmgregoire/JCAPDataProcess
|
c8120e5b2f8fc840a6307b40293dccaf94bd8c2c
|
[
"BSD-3-Clause"
] | null | null | null |
CalcFOMForm.py
|
johnmgregoire/JCAPDataProcess
|
c8120e5b2f8fc840a6307b40293dccaf94bd8c2c
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:\Google Drive\Documents\PythonCode\JCAP\JCAPDataProcess\QtDesign\CalcFOMForm.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_CalcFOMDialog(object):
def setupUi(self, CalcFOMDialog):
CalcFOMDialog.setObjectName(_fromUtf8("CalcFOMDialog"))
CalcFOMDialog.resize(1142, 882)
self.BatchComboBox = QtGui.QComboBox(CalcFOMDialog)
self.BatchComboBox.setGeometry(QtCore.QRect(10, 80, 271, 22))
self.BatchComboBox.setObjectName(_fromUtf8("BatchComboBox"))
self.BatchPushButton = QtGui.QPushButton(CalcFOMDialog)
self.BatchPushButton.setGeometry(QtCore.QRect(10, 60, 131, 21))
self.BatchPushButton.setObjectName(_fromUtf8("BatchPushButton"))
self.gridLayoutWidget_3 = QtGui.QWidget(CalcFOMDialog)
self.gridLayoutWidget_3.setGeometry(QtCore.QRect(10, 360, 181, 163))
self.gridLayoutWidget_3.setObjectName(_fromUtf8("gridLayoutWidget_3"))
self.gridLayout_2 = QtGui.QGridLayout(self.gridLayoutWidget_3)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.label_17 = QtGui.QLabel(self.gridLayoutWidget_3)
self.label_17.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_17.setObjectName(_fromUtf8("label_17"))
self.gridLayout_2.addWidget(self.label_17, 1, 0, 1, 1)
self.label_18 = QtGui.QLabel(self.gridLayoutWidget_3)
self.label_18.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_18.setObjectName(_fromUtf8("label_18"))
self.gridLayout_2.addWidget(self.label_18, 2, 0, 1, 1)
self.label_19 = QtGui.QLabel(self.gridLayoutWidget_3)
self.label_19.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_19.setObjectName(_fromUtf8("label_19"))
self.gridLayout_2.addWidget(self.label_19, 3, 0, 1, 1)
self.UserNameLineEdit = QtGui.QLineEdit(self.gridLayoutWidget_3)
self.UserNameLineEdit.setObjectName(_fromUtf8("UserNameLineEdit"))
self.gridLayout_2.addWidget(self.UserNameLineEdit, 2, 1, 1, 1)
self.AnaTypeLineEdit = QtGui.QLineEdit(self.gridLayoutWidget_3)
self.AnaTypeLineEdit.setObjectName(_fromUtf8("AnaTypeLineEdit"))
self.gridLayout_2.addWidget(self.AnaTypeLineEdit, 0, 1, 1, 1)
self.AnaNameLineEdit = QtGui.QLineEdit(self.gridLayoutWidget_3)
self.AnaNameLineEdit.setEnabled(False)
self.AnaNameLineEdit.setObjectName(_fromUtf8("AnaNameLineEdit"))
self.gridLayout_2.addWidget(self.AnaNameLineEdit, 1, 1, 1, 1)
self.AccessLineEdit = QtGui.QLineEdit(self.gridLayoutWidget_3)
self.AccessLineEdit.setObjectName(_fromUtf8("AccessLineEdit"))
self.gridLayout_2.addWidget(self.AccessLineEdit, 3, 1, 1, 1)
self.label_16 = QtGui.QLabel(self.gridLayoutWidget_3)
self.label_16.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_16.setObjectName(_fromUtf8("label_16"))
self.gridLayout_2.addWidget(self.label_16, 0, 0, 1, 1)
self.AnaDescLineEdit = QtGui.QLineEdit(self.gridLayoutWidget_3)
self.AnaDescLineEdit.setObjectName(_fromUtf8("AnaDescLineEdit"))
self.gridLayout_2.addWidget(self.AnaDescLineEdit, 5, 0, 1, 2)
self.label_11 = QtGui.QLabel(self.gridLayoutWidget_3)
self.label_11.setObjectName(_fromUtf8("label_11"))
self.gridLayout_2.addWidget(self.label_11, 4, 0, 1, 2)
self.ImportExpPushButton = QtGui.QPushButton(CalcFOMDialog)
self.ImportExpPushButton.setGeometry(QtCore.QRect(0, 30, 71, 21))
self.ImportExpPushButton.setObjectName(_fromUtf8("ImportExpPushButton"))
self.ImportAnaPushButton = QtGui.QPushButton(CalcFOMDialog)
self.ImportAnaPushButton.setGeometry(QtCore.QRect(0, 10, 71, 21))
self.ImportAnaPushButton.setObjectName(_fromUtf8("ImportAnaPushButton"))
self.textBrowser_plate = QtGui.QTextBrowser(CalcFOMDialog)
self.textBrowser_plate.setGeometry(QtCore.QRect(570, 530, 561, 341))
self.textBrowser_plate.setObjectName(_fromUtf8("textBrowser_plate"))
self.textBrowser_h = QtGui.QTextBrowser(CalcFOMDialog)
self.textBrowser_h.setGeometry(QtCore.QRect(760, 20, 371, 231))
self.textBrowser_h.setObjectName(_fromUtf8("textBrowser_h"))
self.textBrowser_comp = QtGui.QTextBrowser(CalcFOMDialog)
self.textBrowser_comp.setGeometry(QtCore.QRect(510, 250, 621, 281))
self.textBrowser_comp.setObjectName(_fromUtf8("textBrowser_comp"))
self.line = QtGui.QFrame(CalcFOMDialog)
self.line.setGeometry(QtCore.QRect(276, 10, 20, 351))
self.line.setLineWidth(2)
self.line.setFrameShape(QtGui.QFrame.VLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.line_2 = QtGui.QFrame(CalcFOMDialog)
self.line_2.setGeometry(QtCore.QRect(500, 0, 20, 521))
self.line_2.setLineWidth(2)
self.line_2.setFrameShape(QtGui.QFrame.VLine)
self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
self.line_2.setObjectName(_fromUtf8("line_2"))
self.line_3 = QtGui.QFrame(CalcFOMDialog)
self.line_3.setGeometry(QtCore.QRect(0, 350, 281, 20))
self.line_3.setLineWidth(2)
self.line_3.setFrameShape(QtGui.QFrame.HLine)
self.line_3.setFrameShadow(QtGui.QFrame.Sunken)
self.line_3.setObjectName(_fromUtf8("line_3"))
self.line_4 = QtGui.QFrame(CalcFOMDialog)
self.line_4.setGeometry(QtCore.QRect(0, 99, 281, 21))
self.line_4.setLineWidth(2)
self.line_4.setFrameShape(QtGui.QFrame.HLine)
self.line_4.setFrameShadow(QtGui.QFrame.Sunken)
self.line_4.setObjectName(_fromUtf8("line_4"))
self.AnalysisNamesComboBox = QtGui.QComboBox(CalcFOMDialog)
self.AnalysisNamesComboBox.setGeometry(QtCore.QRect(200, 410, 301, 22))
self.AnalysisNamesComboBox.setObjectName(_fromUtf8("AnalysisNamesComboBox"))
self.label_20 = QtGui.QLabel(CalcFOMDialog)
self.label_20.setGeometry(QtCore.QRect(208, 390, 301, 21))
self.label_20.setObjectName(_fromUtf8("label_20"))
self.AnaTreeWidget = QtGui.QTreeWidget(CalcFOMDialog)
self.AnaTreeWidget.setGeometry(QtCore.QRect(10, 530, 551, 341))
self.AnaTreeWidget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.AnaTreeWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.AnaTreeWidget.setHeaderHidden(True)
self.AnaTreeWidget.setExpandsOnDoubleClick(False)
self.AnaTreeWidget.setObjectName(_fromUtf8("AnaTreeWidget"))
self.AnaTreeWidget.headerItem().setText(0, _fromUtf8("1"))
self.AnaTreeWidget.header().setVisible(False)
self.AnaTreeWidget.header().setCascadingSectionResizes(False)
self.AnaTreeWidget.header().setStretchLastSection(True)
self.getplatemapCheckBox = QtGui.QCheckBox(CalcFOMDialog)
self.getplatemapCheckBox.setGeometry(QtCore.QRect(170, 10, 111, 21))
self.getplatemapCheckBox.setChecked(True)
self.getplatemapCheckBox.setObjectName(_fromUtf8("getplatemapCheckBox"))
self.CompPlotOrderComboBox = QtGui.QComboBox(CalcFOMDialog)
self.CompPlotOrderComboBox.setGeometry(QtCore.QRect(520, 220, 111, 22))
self.CompPlotOrderComboBox.setObjectName(_fromUtf8("CompPlotOrderComboBox"))
self.label_2 = QtGui.QLabel(CalcFOMDialog)
self.label_2.setGeometry(QtCore.QRect(520, 200, 111, 21))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.label = QtGui.QLabel(CalcFOMDialog)
self.label.setGeometry(QtCore.QRect(520, 90, 111, 16))
self.label.setObjectName(_fromUtf8("label"))
self.CompPlotTypeComboBox = QtGui.QComboBox(CalcFOMDialog)
self.CompPlotTypeComboBox.setGeometry(QtCore.QRect(520, 110, 111, 31))
self.CompPlotTypeComboBox.setObjectName(_fromUtf8("CompPlotTypeComboBox"))
self.label_4 = QtGui.QLabel(CalcFOMDialog)
self.label_4.setGeometry(QtCore.QRect(520, 150, 111, 21))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.compplotsizeLineEdit = QtGui.QLineEdit(CalcFOMDialog)
self.compplotsizeLineEdit.setGeometry(QtCore.QRect(520, 170, 111, 22))
self.compplotsizeLineEdit.setObjectName(_fromUtf8("compplotsizeLineEdit"))
self.label_3 = QtGui.QLabel(CalcFOMDialog)
self.label_3.setGeometry(QtCore.QRect(520, 40, 119, 21))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.fomplotchoiceComboBox = QtGui.QComboBox(CalcFOMDialog)
self.fomplotchoiceComboBox.setGeometry(QtCore.QRect(520, 60, 111, 22))
self.fomplotchoiceComboBox.setObjectName(_fromUtf8("fomplotchoiceComboBox"))
self.usedaqtimeCheckBox = QtGui.QCheckBox(CalcFOMDialog)
self.usedaqtimeCheckBox.setGeometry(QtCore.QRect(640, 50, 119, 20))
self.usedaqtimeCheckBox.setObjectName(_fromUtf8("usedaqtimeCheckBox"))
self.label_9 = QtGui.QLabel(CalcFOMDialog)
self.label_9.setGeometry(QtCore.QRect(640, 80, 119, 16))
self.label_9.setObjectName(_fromUtf8("label_9"))
self.aboverangecolLineEdit = QtGui.QLineEdit(CalcFOMDialog)
self.aboverangecolLineEdit.setGeometry(QtCore.QRect(640, 100, 119, 22))
self.aboverangecolLineEdit.setObjectName(_fromUtf8("aboverangecolLineEdit"))
self.label_6 = QtGui.QLabel(CalcFOMDialog)
self.label_6.setGeometry(QtCore.QRect(640, 120, 119, 20))
self.label_6.setObjectName(_fromUtf8("label_6"))
self.belowrangecolLineEdit = QtGui.QLineEdit(CalcFOMDialog)
self.belowrangecolLineEdit.setGeometry(QtCore.QRect(640, 140, 119, 22))
self.belowrangecolLineEdit.setObjectName(_fromUtf8("belowrangecolLineEdit"))
self.label_8 = QtGui.QLabel(CalcFOMDialog)
self.label_8.setGeometry(QtCore.QRect(640, 160, 119, 21))
self.label_8.setObjectName(_fromUtf8("label_8"))
self.vminmaxLineEdit = QtGui.QLineEdit(CalcFOMDialog)
self.vminmaxLineEdit.setGeometry(QtCore.QRect(640, 180, 119, 22))
self.vminmaxLineEdit.setObjectName(_fromUtf8("vminmaxLineEdit"))
self.stdcsvplotchoiceComboBox = QtGui.QComboBox(CalcFOMDialog)
self.stdcsvplotchoiceComboBox.setGeometry(QtCore.QRect(520, 20, 111, 22))
self.stdcsvplotchoiceComboBox.setObjectName(_fromUtf8("stdcsvplotchoiceComboBox"))
self.label_5 = QtGui.QLabel(CalcFOMDialog)
self.label_5.setGeometry(QtCore.QRect(520, 0, 119, 21))
self.label_5.setObjectName(_fromUtf8("label_5"))
self.colormapLineEdit = QtGui.QLineEdit(CalcFOMDialog)
self.colormapLineEdit.setGeometry(QtCore.QRect(640, 220, 119, 22))
self.colormapLineEdit.setObjectName(_fromUtf8("colormapLineEdit"))
self.label_10 = QtGui.QLabel(CalcFOMDialog)
self.label_10.setGeometry(QtCore.QRect(640, 200, 119, 21))
self.label_10.setObjectName(_fromUtf8("label_10"))
self.label_13 = QtGui.QLabel(CalcFOMDialog)
self.label_13.setGeometry(QtCore.QRect(290, 150, 219, 21))
self.label_13.setObjectName(_fromUtf8("label_13"))
self.TechTypeRadioButton_0 = QtGui.QRadioButton(CalcFOMDialog)
self.TechTypeRadioButton_0.setGeometry(QtCore.QRect(290, 170, 219, 16))
self.TechTypeRadioButton_0.setText(_fromUtf8(""))
self.TechTypeRadioButton_0.setObjectName(_fromUtf8("TechTypeRadioButton_0"))
self.TechTypeButtonGroup = QtGui.QButtonGroup(CalcFOMDialog)
self.TechTypeButtonGroup.setObjectName(_fromUtf8("TechTypeButtonGroup"))
self.TechTypeButtonGroup.addButton(self.TechTypeRadioButton_0)
self.TechTypeRadioButton_1 = QtGui.QRadioButton(CalcFOMDialog)
self.TechTypeRadioButton_1.setGeometry(QtCore.QRect(290, 190, 219, 16))
self.TechTypeRadioButton_1.setText(_fromUtf8(""))
self.TechTypeRadioButton_1.setObjectName(_fromUtf8("TechTypeRadioButton_1"))
self.TechTypeButtonGroup.addButton(self.TechTypeRadioButton_1)
self.TechTypeRadioButton_2 = QtGui.QRadioButton(CalcFOMDialog)
self.TechTypeRadioButton_2.setGeometry(QtCore.QRect(290, 210, 219, 16))
self.TechTypeRadioButton_2.setText(_fromUtf8(""))
self.TechTypeRadioButton_2.setObjectName(_fromUtf8("TechTypeRadioButton_2"))
self.TechTypeButtonGroup.addButton(self.TechTypeRadioButton_2)
self.TechTypeRadioButton_3 = QtGui.QRadioButton(CalcFOMDialog)
self.TechTypeRadioButton_3.setGeometry(QtCore.QRect(290, 230, 219, 16))
self.TechTypeRadioButton_3.setText(_fromUtf8(""))
self.TechTypeRadioButton_3.setObjectName(_fromUtf8("TechTypeRadioButton_3"))
self.TechTypeButtonGroup.addButton(self.TechTypeRadioButton_3)
self.TechTypeRadioButton_4 = QtGui.QRadioButton(CalcFOMDialog)
self.TechTypeRadioButton_4.setGeometry(QtCore.QRect(290, 250, 219, 16))
self.TechTypeRadioButton_4.setText(_fromUtf8(""))
self.TechTypeRadioButton_4.setObjectName(_fromUtf8("TechTypeRadioButton_4"))
self.TechTypeButtonGroup.addButton(self.TechTypeRadioButton_4)
self.TechTypeRadioButton_5 = QtGui.QRadioButton(CalcFOMDialog)
self.TechTypeRadioButton_5.setGeometry(QtCore.QRect(290, 270, 219, 16))
self.TechTypeRadioButton_5.setText(_fromUtf8(""))
self.TechTypeRadioButton_5.setObjectName(_fromUtf8("TechTypeRadioButton_5"))
self.TechTypeButtonGroup.addButton(self.TechTypeRadioButton_5)
self.TechTypeRadioButton_6 = QtGui.QRadioButton(CalcFOMDialog)
self.TechTypeRadioButton_6.setGeometry(QtCore.QRect(290, 290, 219, 16))
self.TechTypeRadioButton_6.setText(_fromUtf8(""))
self.TechTypeRadioButton_6.setObjectName(_fromUtf8("TechTypeRadioButton_6"))
self.TechTypeButtonGroup.addButton(self.TechTypeRadioButton_6)
self.TechTypeRadioButton_7 = QtGui.QRadioButton(CalcFOMDialog)
self.TechTypeRadioButton_7.setGeometry(QtCore.QRect(290, 310, 219, 16))
self.TechTypeRadioButton_7.setText(_fromUtf8(""))
self.TechTypeRadioButton_7.setObjectName(_fromUtf8("TechTypeRadioButton_7"))
self.TechTypeButtonGroup.addButton(self.TechTypeRadioButton_7)
self.TechTypeRadioButton_8 = QtGui.QRadioButton(CalcFOMDialog)
self.TechTypeRadioButton_8.setGeometry(QtCore.QRect(290, 330, 219, 16))
self.TechTypeRadioButton_8.setText(_fromUtf8(""))
self.TechTypeRadioButton_8.setObjectName(_fromUtf8("TechTypeRadioButton_8"))
self.TechTypeButtonGroup.addButton(self.TechTypeRadioButton_8)
self.TechTypeRadioButton_9 = QtGui.QRadioButton(CalcFOMDialog)
self.TechTypeRadioButton_9.setGeometry(QtCore.QRect(290, 350, 219, 16))
self.TechTypeRadioButton_9.setText(_fromUtf8(""))
self.TechTypeRadioButton_9.setObjectName(_fromUtf8("TechTypeRadioButton_9"))
self.TechTypeButtonGroup.addButton(self.TechTypeRadioButton_9)
self.TechTypeRadioButton_10 = QtGui.QRadioButton(CalcFOMDialog)
self.TechTypeRadioButton_10.setGeometry(QtCore.QRect(290, 370, 219, 16))
self.TechTypeRadioButton_10.setText(_fromUtf8(""))
self.TechTypeRadioButton_10.setObjectName(_fromUtf8("TechTypeRadioButton_10"))
self.TechTypeButtonGroup.addButton(self.TechTypeRadioButton_10)
self.label_7 = QtGui.QLabel(CalcFOMDialog)
self.label_7.setGeometry(QtCore.QRect(10, 110, 267, 21))
self.label_7.setObjectName(_fromUtf8("label_7"))
self.ExpRunUseComboBox = QtGui.QComboBox(CalcFOMDialog)
self.ExpRunUseComboBox.setGeometry(QtCore.QRect(0, 130, 267, 20))
self.ExpRunUseComboBox.setObjectName(_fromUtf8("ExpRunUseComboBox"))
self.label_14 = QtGui.QLabel(CalcFOMDialog)
self.label_14.setGeometry(QtCore.QRect(10, 150, 265, 21))
self.label_14.setObjectName(_fromUtf8("label_14"))
self.RunSelectTreeWidget = QtGui.QTreeWidget(CalcFOMDialog)
self.RunSelectTreeWidget.setGeometry(QtCore.QRect(10, 170, 271, 181))
self.RunSelectTreeWidget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.RunSelectTreeWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.RunSelectTreeWidget.setHeaderHidden(True)
self.RunSelectTreeWidget.setExpandsOnDoubleClick(False)
self.RunSelectTreeWidget.setObjectName(_fromUtf8("RunSelectTreeWidget"))
self.RunSelectTreeWidget.headerItem().setText(0, _fromUtf8("1"))
self.RunSelectTreeWidget.header().setVisible(False)
self.RunSelectTreeWidget.header().setCascadingSectionResizes(False)
self.RunSelectTreeWidget.header().setStretchLastSection(True)
self.UserFOMLineEdit = QtGui.QLineEdit(CalcFOMDialog)
self.UserFOMLineEdit.setGeometry(QtCore.QRect(200, 500, 301, 20))
self.UserFOMLineEdit.setObjectName(_fromUtf8("UserFOMLineEdit"))
self.line_5 = QtGui.QFrame(CalcFOMDialog)
self.line_5.setGeometry(QtCore.QRect(0, 50, 281, 21))
self.line_5.setLineWidth(2)
self.line_5.setFrameShape(QtGui.QFrame.HLine)
self.line_5.setFrameShadow(QtGui.QFrame.Sunken)
self.line_5.setObjectName(_fromUtf8("line_5"))
self.label_21 = QtGui.QLabel(CalcFOMDialog)
self.label_21.setGeometry(QtCore.QRect(208, 480, 301, 20))
self.label_21.setObjectName(_fromUtf8("label_21"))
self.FOMProcessNamesComboBox = QtGui.QComboBox(CalcFOMDialog)
self.FOMProcessNamesComboBox.setGeometry(QtCore.QRect(200, 450, 301, 22))
self.FOMProcessNamesComboBox.setObjectName(_fromUtf8("FOMProcessNamesComboBox"))
self.label_22 = QtGui.QLabel(CalcFOMDialog)
self.label_22.setGeometry(QtCore.QRect(208, 430, 301, 21))
self.label_22.setObjectName(_fromUtf8("label_22"))
self.autoplotCheckBox = QtGui.QCheckBox(CalcFOMDialog)
self.autoplotCheckBox.setGeometry(QtCore.QRect(640, 20, 119, 20))
self.autoplotCheckBox.setChecked(True)
self.autoplotCheckBox.setObjectName(_fromUtf8("autoplotCheckBox"))
self.RaiseErrorPushButton = QtGui.QPushButton(CalcFOMDialog)
self.RaiseErrorPushButton.setGeometry(QtCore.QRect(1120, 0, 31, 21))
self.RaiseErrorPushButton.setObjectName(_fromUtf8("RaiseErrorPushButton"))
self.OpenInfoPushButton = QtGui.QPushButton(CalcFOMDialog)
self.OpenInfoPushButton.setGeometry(QtCore.QRect(70, 10, 91, 21))
self.OpenInfoPushButton.setObjectName(_fromUtf8("OpenInfoPushButton"))
self.expfilenameLineEdit = QtGui.QLineEdit(CalcFOMDialog)
self.expfilenameLineEdit.setGeometry(QtCore.QRect(70, 30, 211, 21))
self.expfilenameLineEdit.setText(_fromUtf8(""))
self.expfilenameLineEdit.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.expfilenameLineEdit.setObjectName(_fromUtf8("expfilenameLineEdit"))
self.line_6 = QtGui.QFrame(CalcFOMDialog)
self.line_6.setGeometry(QtCore.QRect(189, 360, 16, 161))
self.line_6.setLineWidth(2)
self.line_6.setFrameShape(QtGui.QFrame.VLine)
self.line_6.setFrameShadow(QtGui.QFrame.Sunken)
self.line_6.setObjectName(_fromUtf8("line_6"))
self.EditAnalysisParamsPushButton = QtGui.QPushButton(CalcFOMDialog)
self.EditAnalysisParamsPushButton.setGeometry(QtCore.QRect(290, 70, 102, 21))
self.EditAnalysisParamsPushButton.setObjectName(_fromUtf8("EditAnalysisParamsPushButton"))
self.AnalyzeDataPushButton = QtGui.QPushButton(CalcFOMDialog)
self.AnalyzeDataPushButton.setGeometry(QtCore.QRect(290, 90, 102, 21))
self.AnalyzeDataPushButton.setObjectName(_fromUtf8("AnalyzeDataPushButton"))
self.ImportAnalysisParamsPushButton = QtGui.QPushButton(CalcFOMDialog)
self.ImportAnalysisParamsPushButton.setGeometry(QtCore.QRect(400, 10, 102, 20))
self.ImportAnalysisParamsPushButton.setObjectName(_fromUtf8("ImportAnalysisParamsPushButton"))
self.SaveAnaPushButton = QtGui.QPushButton(CalcFOMDialog)
self.SaveAnaPushButton.setGeometry(QtCore.QRect(290, 111, 102, 20))
self.SaveAnaPushButton.setObjectName(_fromUtf8("SaveAnaPushButton"))
self.SaveViewPushButton = QtGui.QPushButton(CalcFOMDialog)
self.SaveViewPushButton.setGeometry(QtCore.QRect(290, 130, 102, 21))
self.SaveViewPushButton.setObjectName(_fromUtf8("SaveViewPushButton"))
self.EditDfltVisPushButton = QtGui.QPushButton(CalcFOMDialog)
self.EditDfltVisPushButton.setGeometry(QtCore.QRect(400, 30, 102, 23))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.EditDfltVisPushButton.sizePolicy().hasHeightForWidth())
self.EditDfltVisPushButton.setSizePolicy(sizePolicy)
self.EditDfltVisPushButton.setObjectName(_fromUtf8("EditDfltVisPushButton"))
self.ClearAnalysisPushButton = QtGui.QPushButton(CalcFOMDialog)
self.ClearAnalysisPushButton.setGeometry(QtCore.QRect(290, 10, 102, 21))
self.ClearAnalysisPushButton.setObjectName(_fromUtf8("ClearAnalysisPushButton"))
self.ClearSingleAnalysisPushButton = QtGui.QPushButton(CalcFOMDialog)
self.ClearSingleAnalysisPushButton.setGeometry(QtCore.QRect(290, 32, 102, 21))
self.ClearSingleAnalysisPushButton.setObjectName(_fromUtf8("ClearSingleAnalysisPushButton"))
self.ViewResultPushButton = QtGui.QPushButton(CalcFOMDialog)
self.ViewResultPushButton.setGeometry(QtCore.QRect(400, 132, 102, 21))
self.ViewResultPushButton.setObjectName(_fromUtf8("ViewResultPushButton"))
self.UpdatePlotPushButton = QtGui.QPushButton(CalcFOMDialog)
self.UpdatePlotPushButton.setGeometry(QtCore.QRect(400, 111, 101, 20))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.UpdatePlotPushButton.sizePolicy().hasHeightForWidth())
self.UpdatePlotPushButton.setSizePolicy(sizePolicy)
self.UpdatePlotPushButton.setObjectName(_fromUtf8("UpdatePlotPushButton"))
self.OpenAuxExpAnaPushButton = QtGui.QPushButton(CalcFOMDialog)
self.OpenAuxExpAnaPushButton.setGeometry(QtCore.QRect(400, 70, 101, 21))
self.OpenAuxExpAnaPushButton.setObjectName(_fromUtf8("OpenAuxExpAnaPushButton"))
self.AttachMiscPushButton = QtGui.QPushButton(CalcFOMDialog)
self.AttachMiscPushButton.setGeometry(QtCore.QRect(400, 90, 101, 21))
self.AttachMiscPushButton.setObjectName(_fromUtf8("AttachMiscPushButton"))
self.retranslateUi(CalcFOMDialog)
QtCore.QMetaObject.connectSlotsByName(CalcFOMDialog)
def retranslateUi(self, CalcFOMDialog):
CalcFOMDialog.setWindowTitle(_translate("CalcFOMDialog", "Process Data, Calc FOM from EXP", None))
self.BatchComboBox.setToolTip(_translate("CalcFOMDialog", "Apply all other filteres in this section to only this run", None))
self.BatchPushButton.setToolTip(_translate("CalcFOMDialog", "Considering the files already in the EXP, keep the files that meet all criteria", None))
self.BatchPushButton.setText(_translate("CalcFOMDialog", "Run Batch Process:", None))
self.label_17.setText(_translate("CalcFOMDialog", "Analysis name:", None))
self.label_18.setText(_translate("CalcFOMDialog", "created by:", None))
self.label_19.setText(_translate("CalcFOMDialog", "access:", None))
self.UserNameLineEdit.setToolTip(_translate("CalcFOMDialog", "Comment string to be included in EXP", None))
self.UserNameLineEdit.setText(_translate("CalcFOMDialog", "eche", None))
self.AnaTypeLineEdit.setToolTip(_translate("CalcFOMDialog", "Comment string to be included in EXP", None))
self.AnaTypeLineEdit.setText(_translate("CalcFOMDialog", "eche", None))
self.AnaNameLineEdit.setToolTip(_translate("CalcFOMDialog", "Comment string to be included in EXP", None))
self.AnaNameLineEdit.setText(_translate("CalcFOMDialog", "eche", None))
self.AccessLineEdit.setToolTip(_translate("CalcFOMDialog", "Comment string to be included in EXP", None))
self.AccessLineEdit.setText(_translate("CalcFOMDialog", "hte", None))
self.label_16.setText(_translate("CalcFOMDialog", "Analysis type:", None))
self.AnaDescLineEdit.setToolTip(_translate("CalcFOMDialog", "Comment string to be included in EXP.\n"
"If you modify the beginning with a\"<comment>;\" the \n"
"comment will remain as you change analysis options", None))
self.label_11.setText(_translate("CalcFOMDialog", "Analysis description:", None))
self.ImportExpPushButton.setToolTip(_translate("CalcFOMDialog", "Import a .exp file, which will provide options for the data type, RUNs and analysis type", None))
self.ImportExpPushButton.setText(_translate("CalcFOMDialog", "Import EXP", None))
self.ImportAnaPushButton.setToolTip(_translate("CalcFOMDialog", "Grab the EXP from the \"Create EXP\" window", None))
self.ImportAnaPushButton.setText(_translate("CalcFOMDialog", "Open ANA", None))
self.AnalysisNamesComboBox.setToolTip(_translate("CalcFOMDialog", "The name of the functions that will be applied to data\n"
"to generate Intermediate and FOM results", None))
self.label_20.setText(_translate("CalcFOMDialog", "Choose analysis function:", None))
self.getplatemapCheckBox.setText(_translate("CalcFOMDialog", "Get platemaps", None))
self.CompPlotOrderComboBox.setToolTip(_translate("CalcFOMDialog", "Apply all other filteres in this section to only this run", None))
self.label_2.setText(_translate("CalcFOMDialog", "Element plot order:", None))
self.label.setText(_translate("CalcFOMDialog", "Comp. plot type:", None))
self.CompPlotTypeComboBox.setToolTip(_translate("CalcFOMDialog", "Apply all other filteres in this section to only this run", None))
self.label_4.setText(_translate("CalcFOMDialog", "Comp. point size:", None))
self.compplotsizeLineEdit.setToolTip(_translate("CalcFOMDialog", "Comment string to be included in EXP", None))
self.compplotsizeLineEdit.setText(_translate("CalcFOMDialog", "patch", None))
self.label_3.setText(_translate("CalcFOMDialog", "fom to plot", None))
self.fomplotchoiceComboBox.setToolTip(_translate("CalcFOMDialog", "Apply all other filteres in this section to only this run", None))
self.usedaqtimeCheckBox.setText(_translate("CalcFOMDialog", "Use DAQ time", None))
self.label_9.setText(_translate("CalcFOMDialog", "above color", None))
self.aboverangecolLineEdit.setToolTip(_translate("CalcFOMDialog", "Comment string to be included in EXP", None))
self.label_6.setText(_translate("CalcFOMDialog", "below color", None))
self.belowrangecolLineEdit.setToolTip(_translate("CalcFOMDialog", "Comment string to be included in EXP", None))
self.label_8.setText(_translate("CalcFOMDialog", "fom range min,max", None))
self.vminmaxLineEdit.setToolTip(_translate("CalcFOMDialog", "Comment string to be included in EXP", None))
self.stdcsvplotchoiceComboBox.setToolTip(_translate("CalcFOMDialog", "Apply all other filteres in this section to only this run", None))
self.label_5.setText(_translate("CalcFOMDialog", "standard plot", None))
self.colormapLineEdit.setToolTip(_translate("CalcFOMDialog", "Comment string to be included in EXP", None))
self.colormapLineEdit.setText(_translate("CalcFOMDialog", "jet", None))
self.label_10.setText(_translate("CalcFOMDialog", "colormap", None))
self.label_13.setText(_translate("CalcFOMDialog", "Choose analysis scope:", None))
self.label_7.setText(_translate("CalcFOMDialog", "Primary data type (run_use)", None))
self.ExpRunUseComboBox.setToolTip(_translate("CalcFOMDialog", "This \"use\" is specified in the EXP \n"
"and determines what types of analysis \n"
"can be performed", None))
self.label_14.setText(_translate("CalcFOMDialog", "Choose RUNs to include:", None))
self.UserFOMLineEdit.setToolTip(_translate("CalcFOMDialog", "enter comma-delimited list of string or\n"
"number FOMS that will become a constant column in the .csv generated by \"Analyze Data\".\n"
"After entry complete, you will be prompted for fom names", None))
self.label_21.setText(_translate("CalcFOMDialog", "User-defined FOMs", None))
self.FOMProcessNamesComboBox.setToolTip(_translate("CalcFOMDialog", "The name of the functions that will be applied to data\n"
"to generate Intermediate and FOM results", None))
self.label_22.setText(_translate("CalcFOMDialog", "Choose FOM post-process function:", None))
self.autoplotCheckBox.setText(_translate("CalcFOMDialog", "Auto plot ana__x", None))
self.RaiseErrorPushButton.setText(_translate("CalcFOMDialog", "err", None))
self.OpenInfoPushButton.setText(_translate("CalcFOMDialog", "Open via Search", None))
self.expfilenameLineEdit.setToolTip(_translate("CalcFOMDialog", "Comment string to be included in EXP", None))
self.EditAnalysisParamsPushButton.setToolTip(_translate("CalcFOMDialog", "Edit parameters involved inthe analysis", None))
self.EditAnalysisParamsPushButton.setText(_translate("CalcFOMDialog", "Edit Params", None))
self.AnalyzeDataPushButton.setToolTip(_translate("CalcFOMDialog", "Perform the selected analysis", None))
self.AnalyzeDataPushButton.setText(_translate("CalcFOMDialog", "Analyze Data", None))
self.ImportAnalysisParamsPushButton.setToolTip(_translate("CalcFOMDialog", "Import a .par file", None))
self.ImportAnalysisParamsPushButton.setText(_translate("CalcFOMDialog", "Import Params", None))
self.SaveAnaPushButton.setToolTip(_translate("CalcFOMDialog", "Save .fom, FOR THE SELECTED ANALYSIS TYPE ONLY.\n"
" Intermediate data will also be saved", None))
self.SaveAnaPushButton.setText(_translate("CalcFOMDialog", "Save ANA", None))
self.SaveViewPushButton.setToolTip(_translate("CalcFOMDialog", "Send Raw, Intermediate and FOM data to the Visualize window", None))
self.SaveViewPushButton.setText(_translate("CalcFOMDialog", "Save+View", None))
self.EditDfltVisPushButton.setToolTip(_translate("CalcFOMDialog", "Edit the FOM visualization parameters in the .csv\n"
"ONLY WORKS ON MOST RECENT \"Analyze Data\"", None))
self.EditDfltVisPushButton.setText(_translate("CalcFOMDialog", "Update Dflt Vis", None))
self.ClearAnalysisPushButton.setToolTip(_translate("CalcFOMDialog", "Clear the analysis, removing intermediate data and FOMs", None))
self.ClearAnalysisPushButton.setText(_translate("CalcFOMDialog", "Clear Analysis", None))
self.ClearSingleAnalysisPushButton.setToolTip(_translate("CalcFOMDialog", "Clear the analysis, removing intermediate data and FOMs", None))
self.ClearSingleAnalysisPushButton.setText(_translate("CalcFOMDialog", "Del 1 ana__x", None))
self.ViewResultPushButton.setToolTip(_translate("CalcFOMDialog", "Send Raw, Intermediate and FOM data to the Visualize window", None))
self.ViewResultPushButton.setText(_translate("CalcFOMDialog", "View Result", None))
self.UpdatePlotPushButton.setToolTip(_translate("CalcFOMDialog", "Edit the FOM visualization parameters in the .csv\n"
"ONLY WORKS ON MOST RECENT \"Analyze Data\"", None))
self.UpdatePlotPushButton.setText(_translate("CalcFOMDialog", "Update Plots", None))
self.OpenAuxExpAnaPushButton.setToolTip(_translate("CalcFOMDialog", "Perform the selected analysis", None))
self.OpenAuxExpAnaPushButton.setText(_translate("CalcFOMDialog", "SelectAuxEXP/ANA", None))
self.AttachMiscPushButton.setToolTip(_translate("CalcFOMDialog", "Perform the selected analysis", None))
self.AttachMiscPushButton.setText(_translate("CalcFOMDialog", "Attach Files to ana", None))
| 72.173246
| 170
| 0.7427
|
b39588fee3c6bfbd3c0774f877692cd5008a8b61
| 7,920
|
py
|
Python
|
util.py
|
Harsha-Musunuri/stylegan2-pytorch
|
bd9b42f7031aa1d16245ac64bc562baf0fc0945f
|
[
"MIT",
"BSD-2-Clause",
"Apache-2.0"
] | 7
|
2021-11-13T02:31:21.000Z
|
2022-03-30T01:30:15.000Z
|
util.py
|
Harsha-Musunuri/stylegan2-pytorch
|
bd9b42f7031aa1d16245ac64bc562baf0fc0945f
|
[
"MIT",
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
util.py
|
Harsha-Musunuri/stylegan2-pytorch
|
bd9b42f7031aa1d16245ac64bc562baf0fc0945f
|
[
"MIT",
"BSD-2-Clause",
"Apache-2.0"
] | 1
|
2021-03-03T03:46:18.000Z
|
2021-03-03T03:46:18.000Z
|
import os
import sys
import ast
import math
import torch
import shutil
import random
import numpy as np
from torchvision.io import write_video
from torchvision import utils
from torch.nn import functional as F
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.vec2sca_avg = 0
self.vec2sca_val = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if torch.is_tensor(self.val) and torch.numel(self.val) != 1:
self.avg[self.count == 0] = 0
self.vec2sca_avg = self.avg.sum() / len(self.avg)
self.vec2sca_val = self.val.sum() / len(self.val)
def seed_everything(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def set_log_dir(args):
args.log_dir = os.path.join(args.log_root, args.name)
os.makedirs(args.log_dir, exist_ok=True)
os.makedirs(os.path.join(args.log_dir, 'sample'), exist_ok=True)
os.makedirs(os.path.join(args.log_dir, 'weight'), exist_ok=True)
return args
def print_args(parser, args):
message = f"Name: {getattr(args, 'name', 'NA')}\n"
message += '--------------- Arguments ---------------\n'
for k, v in sorted(vars(args).items()):
comment = ''
default = parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '------------------ End ------------------'
# print(message) # suppress messages to std out
# save to the disk
exp_dir = args.log_dir
if not os.path.exists(exp_dir):
os.makedirs(exp_dir)
file_name = os.path.join(exp_dir, 'args.txt')
with open(file_name, 'wt') as f:
f.write(message)
f.write('\n')
# save command to disk
file_name = os.path.join(exp_dir, 'cmd.txt')
with open(file_name, 'wt') as f:
if os.getenv('CUDA_VISIBLE_DEVICES'):
f.write('CUDA_VISIBLE_DEVICES=%s ' % os.getenv('CUDA_VISIBLE_DEVICES'))
f.write('python ')
f.write(' '.join(sys.argv))
f.write('\n')
# backup train code
shutil.copyfile(sys.argv[0], os.path.join(args.log_dir, f'{os.path.basename(sys.argv[0])}.txt'))
def print_models(models, args):
if not isinstance(models, (list, tuple)):
models = [models]
exp_dir = args.log_dir
if not os.path.exists(exp_dir):
os.makedirs(exp_dir)
file_name = os.path.join(exp_dir, 'models.txt')
with open(file_name, 'a+') as f:
f.write(f"Name: {getattr(args, 'name', 'NA')}\n{'-'*50}\n")
for model in models:
f.write(str(model))
f.write("\n\n")
def str2list(attr_bins):
assert (isinstance(attr_bins, str))
attr_bins = attr_bins.strip()
if attr_bins.endswith(('.npy', '.npz')):
attr_bins = np.load(attr_bins)
else:
assert (attr_bins.startswith('[') and attr_bins.endswith(']'))
# attr_bins = np.array(ast.literal_eval(attr_bins))
attr_bins = ast.literal_eval(attr_bins)
return attr_bins
def str2bool(v):
"""
borrowed from:
https://stackoverflow.com/questions/715417/converting-from-a-string-to-boolean-in-python
:param v:
:return: bool(v)
"""
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def linspace(idx_range, val_range, idx_max, val_default=None):
if len(idx_range) >= 2 and len(val_range) >= 2:
dtype = np.array(val_range).dtype
val_list = (
[val_range[0]] * max(0, int(idx_range[0])) +
list(np.linspace(val_range[0], val_range[1],
idx_range[1] - idx_range[0] + 1, dtype=dtype)) +
[val_range[1]] * max(0, int(idx_max - idx_range[1] + 2))
)
else:
val_list = [val_default] * (idx_max + 1)
return val_list
def save_image(ximg, path):
n_sample = ximg.shape[0]
utils.save_image(ximg, path, nrow=int(n_sample ** 0.5), normalize=True, value_range=(-1, 1))
def save_video(xseq, path):
video = xseq.data.cpu().clamp(-1, 1)
video = ((video+1.)/2.*255).type(torch.uint8).permute(0, 2, 3, 1)
write_video(path, video, fps=15)
def estimate_optical_flow(netNetwork, tenFirst, tenSecond):
# Copied from https://github.com/sniklaus/pytorch-pwc/blob/master/run.py
# Assume tensors are normalized to [-1, 1]
tenFirst = (tenFirst + 1.) / 2
tenSecond = (tenSecond + 1.) / 2
assert(tenFirst.shape[1] == tenSecond.shape[1])
assert(tenFirst.shape[2] == tenSecond.shape[2])
intWidth = tenFirst.shape[2]
intHeight = tenFirst.shape[1]
# assert(intWidth == 1024) # remember that there is no guarantee for correctness, comment this line out if you acknowledge this and want to continue
# assert(intHeight == 436) # remember that there is no guarantee for correctness, comment this line out if you acknowledge this and want to continue
tenPreprocessedFirst = tenFirst.cuda().view(1, 3, intHeight, intWidth)
tenPreprocessedSecond = tenSecond.cuda().view(1, 3, intHeight, intWidth)
intPreprocessedWidth = min(int(math.floor(math.ceil(intWidth / 64.0) * 64.0)), 128)
intPreprocessedHeight = min(int(math.floor(math.ceil(intHeight / 64.0) * 64.0)), 128)
tenPreprocessedFirst = torch.nn.functional.interpolate(input=tenPreprocessedFirst, size=(intPreprocessedHeight, intPreprocessedWidth), mode='bilinear', align_corners=False)
tenPreprocessedSecond = torch.nn.functional.interpolate(input=tenPreprocessedSecond, size=(intPreprocessedHeight, intPreprocessedWidth), mode='bilinear', align_corners=False)
# tenFlow = 20.0 * torch.nn.functional.interpolate(input=netNetwork(tenPreprocessedFirst, tenPreprocessedSecond), size=(intHeight, intWidth), mode='bilinear', align_corners=False)
tenFlow = 20.0 * netNetwork(tenPreprocessedFirst, tenPreprocessedSecond)
tenFlow[:, 0, :, :] *= float(intWidth) / float(intPreprocessedWidth)
tenFlow[:, 1, :, :] *= float(intHeight) / float(intPreprocessedHeight)
return tenFlow[0, :, :, :]
def randperm(n, ordered=False):
# ordered: include ordered permutation?
if ordered:
return torch.randperm(n)
else:
perm_ord = torch.tensor(range(n))
while True:
perm = torch.randperm(n)
if (perm != perm_ord).any():
return perm
def permute_dim(tensor, i=0, j=1, ordered=False):
# Permute along dim i for each j.
# e.g.: Factor-VAE, i = 0, j = 1; Jigsaw, i = 2, j = 0
device = tensor.device
n = tensor.shape[i]
return torch.cat([torch.index_select(t, i, randperm(n, ordered).to(device)) for t in tensor.split(1, j)], j)
"""
Negative Data Augmentations
"""
def negative_augment(img, nda_type='jigsaw_4'):
img_aug = None
if nda_type.startswith('jigsaw'):
n, c, h, w = img.shape
n_patch = int(nda_type.split('_')[1]) # number of patches
n_patch_sqrt = int(n_patch ** 0.5)
h_patch, w_patch = h//n_patch_sqrt, w//n_patch_sqrt
patches = F.unfold(img, kernel_size=(h_patch, w_patch), stride=(h_patch, w_patch))
patches_perm = permute_dim(patches, 2, 0)
img_aug = F.fold(patches_perm, (h, w), kernel_size=(h_patch, w_patch), stride=(h_patch, w_patch))
return img_aug, None
| 35.515695
| 183
| 0.632449
|
75267806d63bae7582741a125a7d6a94d637239d
| 1,290
|
py
|
Python
|
newsletter/migrations/0012_newslettereventlink.py
|
OpenCanada/website
|
6334ff412addc0562ac247080194e5d182e8e924
|
[
"MIT"
] | 10
|
2015-12-18T16:41:33.000Z
|
2018-11-11T08:36:46.000Z
|
newsletter/migrations/0012_newslettereventlink.py
|
OpenCanada/website
|
6334ff412addc0562ac247080194e5d182e8e924
|
[
"MIT"
] | 96
|
2015-07-14T22:45:56.000Z
|
2017-07-25T19:59:48.000Z
|
newsletter/migrations/0012_newslettereventlink.py
|
OpenCanada/website
|
6334ff412addc0562ac247080194e5d182e8e924
|
[
"MIT"
] | 9
|
2015-07-28T14:38:43.000Z
|
2019-01-04T17:38:42.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.db.models.deletion
import modelcluster.fields
import wagtail.core.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0002_auto_20150728_1915'),
('newsletter', '0011_newsletterlistpage'),
]
operations = [
migrations.CreateModel(
name='NewsletterEventLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('override_text', wagtail.core.fields.RichTextField(default='', help_text='Text to describe this event.', blank=True)),
('event', models.ForeignKey(related_name='newsletter_links', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='events.EventPage', help_text='Link to an event', null=True)),
('newsletter', modelcluster.fields.ParentalKey(related_name='event_links', to='newsletter.NewsletterPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
| 39.090909
| 200
| 0.633333
|
d71457e936bc7bcfdd701e0ba03477310dd717f2
| 1,319
|
py
|
Python
|
ENV/lib/python3.5/site-packages/pyrogram/api/types/update_saved_gifs.py
|
block1o1/CryptoPredicted
|
7f660cdc456fb8252b3125028f31fd6f5a3ceea5
|
[
"MIT"
] | 4
|
2021-10-14T21:22:25.000Z
|
2022-03-12T19:58:48.000Z
|
ENV/lib/python3.5/site-packages/pyrogram/api/types/update_saved_gifs.py
|
inevolin/CryptoPredicted
|
7f660cdc456fb8252b3125028f31fd6f5a3ceea5
|
[
"MIT"
] | null | null | null |
ENV/lib/python3.5/site-packages/pyrogram/api/types/update_saved_gifs.py
|
inevolin/CryptoPredicted
|
7f660cdc456fb8252b3125028f31fd6f5a3ceea5
|
[
"MIT"
] | 1
|
2022-03-15T22:52:53.000Z
|
2022-03-15T22:52:53.000Z
|
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2018 Dan Tès <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.api.core import *
class UpdateSavedGifs(Object):
"""Attributes:
ID: ``0x9375341e``
No parameters required.
"""
ID = 0x9375341e
def __init__(self):
pass
@staticmethod
def read(b: BytesIO, *args) -> "UpdateSavedGifs":
# No flags
return UpdateSavedGifs()
def write(self) -> bytes:
b = BytesIO()
b.write(Int(self.ID, False))
# No flags
return b.getvalue()
| 26.918367
| 74
| 0.681577
|
08c2ff72344f8a1657965f5f5032443b325bfce1
| 33,990
|
py
|
Python
|
atom.symlink/packages/wakatime/lib/wakatime-master/wakatime/packages/pytz/__init__.py
|
sanata-/dotenv
|
1dda3581ba86ebd78ddfe410ca11d54f096e4427
|
[
"MIT"
] | 1
|
2015-05-25T15:31:24.000Z
|
2015-05-25T15:31:24.000Z
|
packages/wakatime/wakatime/packages/pytz/__init__.py
|
AppVentus/AvTime-client
|
164333c3e1d80d1baba00cd0e1d308f2c1023156
|
[
"BSD-3-Clause"
] | null | null | null |
packages/wakatime/wakatime/packages/pytz/__init__.py
|
AppVentus/AvTime-client
|
164333c3e1d80d1baba00cd0e1d308f2c1023156
|
[
"BSD-3-Clause"
] | null | null | null |
'''
datetime.tzinfo timezone definitions generated from the
Olson timezone database:
ftp://elsie.nci.nih.gov/pub/tz*.tar.gz
See the datetime section of the Python Library Reference for information
on how to use these modules.
'''
# The Olson database is updated several times a year.
OLSON_VERSION = '2013d'
VERSION = OLSON_VERSION
# Version format for a patch release - only one so far.
#VERSION = OLSON_VERSION + '.2'
__version__ = OLSON_VERSION
OLSEN_VERSION = OLSON_VERSION # Old releases had this misspelling
__all__ = [
'timezone', 'utc', 'country_timezones', 'country_names',
'AmbiguousTimeError', 'InvalidTimeError',
'NonExistentTimeError', 'UnknownTimeZoneError',
'all_timezones', 'all_timezones_set',
'common_timezones', 'common_timezones_set',
]
import sys, datetime, os.path, gettext
try:
from pkg_resources import resource_stream
except ImportError:
resource_stream = None
from pytz.exceptions import AmbiguousTimeError
from pytz.exceptions import InvalidTimeError
from pytz.exceptions import NonExistentTimeError
from pytz.exceptions import UnknownTimeZoneError
from pytz.lazy import LazyDict, LazyList, LazySet
from pytz.tzinfo import unpickler
from pytz.tzfile import build_tzinfo, _byte_string
try:
unicode
except NameError: # Python 3.x
# Python 3.x doesn't have unicode(), making writing code
# for Python 2.3 and Python 3.x a pain.
unicode = str
def ascii(s):
r"""
>>> ascii('Hello')
'Hello'
>>> ascii('\N{TRADE MARK SIGN}') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnicodeEncodeError: ...
"""
s.encode('US-ASCII') # Raise an exception if not ASCII
return s # But return the original string - not a byte string.
else: # Python 2.x
def ascii(s):
r"""
>>> ascii('Hello')
'Hello'
>>> ascii(u'Hello')
'Hello'
>>> ascii(u'\N{TRADE MARK SIGN}') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnicodeEncodeError: ...
"""
return s.encode('US-ASCII')
def open_resource(name):
"""Open a resource from the zoneinfo subdir for reading.
Uses the pkg_resources module if available and no standard file
found at the calculated location.
"""
name_parts = name.lstrip('/').split('/')
for part in name_parts:
if part == os.path.pardir or os.path.sep in part:
raise ValueError('Bad path segment: %r' % part)
filename = os.path.join(os.path.dirname(__file__),
'zoneinfo', *name_parts)
if not os.path.exists(filename) and resource_stream is not None:
# http://bugs.launchpad.net/bugs/383171 - we avoid using this
# unless absolutely necessary to help when a broken version of
# pkg_resources is installed.
return resource_stream(__name__, 'zoneinfo/' + name)
return open(filename, 'rb')
def resource_exists(name):
"""Return true if the given resource exists"""
try:
open_resource(name).close()
return True
except IOError:
return False
# Enable this when we get some translations?
# We want an i18n API that is useful to programs using Python's gettext
# module, as well as the Zope3 i18n package. Perhaps we should just provide
# the POT file and translations, and leave it up to callers to make use
# of them.
#
# t = gettext.translation(
# 'pytz', os.path.join(os.path.dirname(__file__), 'locales'),
# fallback=True
# )
# def _(timezone_name):
# """Translate a timezone name using the current locale, returning Unicode"""
# return t.ugettext(timezone_name)
_tzinfo_cache = {}
def timezone(zone):
r''' Return a datetime.tzinfo implementation for the given timezone
>>> from datetime import datetime, timedelta
>>> utc = timezone('UTC')
>>> eastern = timezone('US/Eastern')
>>> eastern.zone
'US/Eastern'
>>> timezone(unicode('US/Eastern')) is eastern
True
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
>>> loc_dt = utc_dt.astimezone(eastern)
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> loc_dt.strftime(fmt)
'2002-10-27 01:00:00 EST (-0500)'
>>> (loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 00:50:00 EST (-0500)'
>>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:50:00 EDT (-0400)'
>>> (loc_dt + timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:10:00 EST (-0500)'
Raises UnknownTimeZoneError if passed an unknown zone.
>>> try:
... timezone('Asia/Shangri-La')
... except UnknownTimeZoneError:
... print('Unknown')
Unknown
>>> try:
... timezone(unicode('\N{TRADE MARK SIGN}'))
... except UnknownTimeZoneError:
... print('Unknown')
Unknown
'''
if zone.upper() == 'UTC':
return utc
try:
zone = ascii(zone)
except UnicodeEncodeError:
# All valid timezones are ASCII
raise UnknownTimeZoneError(zone)
zone = _unmunge_zone(zone)
if zone not in _tzinfo_cache:
if zone in all_timezones_set:
fp = open_resource(zone)
try:
_tzinfo_cache[zone] = build_tzinfo(zone, fp)
finally:
fp.close()
else:
raise UnknownTimeZoneError(zone)
return _tzinfo_cache[zone]
def _unmunge_zone(zone):
"""Undo the time zone name munging done by older versions of pytz."""
return zone.replace('_plus_', '+').replace('_minus_', '-')
ZERO = datetime.timedelta(0)
HOUR = datetime.timedelta(hours=1)
class UTC(datetime.tzinfo):
"""UTC
Optimized UTC implementation. It unpickles using the single module global
instance defined beneath this class declaration.
"""
zone = "UTC"
_utcoffset = ZERO
_dst = ZERO
_tzname = zone
def fromutc(self, dt):
if dt.tzinfo is None:
return self.localize(dt)
return super(utc.__class__, self).fromutc(dt)
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
def __reduce__(self):
return _UTC, ()
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime'''
if dt.tzinfo is self:
return dt
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.astimezone(self)
def __repr__(self):
return "<UTC>"
def __str__(self):
return "UTC"
UTC = utc = UTC() # UTC is a singleton
def _UTC():
"""Factory function for utc unpickling.
Makes sure that unpickling a utc instance always returns the same
module global.
These examples belong in the UTC class above, but it is obscured; or in
the README.txt, but we are not depending on Python 2.4 so integrating
the README.txt examples with the unit tests is not trivial.
>>> import datetime, pickle
>>> dt = datetime.datetime(2005, 3, 1, 14, 13, 21, tzinfo=utc)
>>> naive = dt.replace(tzinfo=None)
>>> p = pickle.dumps(dt, 1)
>>> naive_p = pickle.dumps(naive, 1)
>>> len(p) - len(naive_p)
17
>>> new = pickle.loads(p)
>>> new == dt
True
>>> new is dt
False
>>> new.tzinfo is dt.tzinfo
True
>>> utc is UTC is timezone('UTC')
True
>>> utc is timezone('GMT')
False
"""
return utc
_UTC.__safe_for_unpickling__ = True
def _p(*args):
"""Factory function for unpickling pytz tzinfo instances.
Just a wrapper around tzinfo.unpickler to save a few bytes in each pickle
by shortening the path.
"""
return unpickler(*args)
_p.__safe_for_unpickling__ = True
class _CountryTimezoneDict(LazyDict):
"""Map ISO 3166 country code to a list of timezone names commonly used
in that country.
iso3166_code is the two letter code used to identify the country.
>>> def print_list(list_of_strings):
... 'We use a helper so doctests work under Python 2.3 -> 3.x'
... for s in list_of_strings:
... print(s)
>>> print_list(country_timezones['nz'])
Pacific/Auckland
Pacific/Chatham
>>> print_list(country_timezones['ch'])
Europe/Zurich
>>> print_list(country_timezones['CH'])
Europe/Zurich
>>> print_list(country_timezones[unicode('ch')])
Europe/Zurich
>>> print_list(country_timezones['XXX'])
Traceback (most recent call last):
...
KeyError: 'XXX'
Previously, this information was exposed as a function rather than a
dictionary. This is still supported::
>>> print_list(country_timezones('nz'))
Pacific/Auckland
Pacific/Chatham
"""
def __call__(self, iso3166_code):
"""Backwards compatibility."""
return self[iso3166_code]
def _fill(self):
data = {}
zone_tab = open_resource('zone.tab')
try:
for line in zone_tab:
line = line.decode('US-ASCII')
if line.startswith('#'):
continue
code, coordinates, zone = line.split(None, 4)[:3]
if zone not in all_timezones_set:
continue
try:
data[code].append(zone)
except KeyError:
data[code] = [zone]
self.data = data
finally:
zone_tab.close()
country_timezones = _CountryTimezoneDict()
class _CountryNameDict(LazyDict):
'''Dictionary proving ISO3166 code -> English name.
>>> print(country_names['au'])
Australia
'''
def _fill(self):
data = {}
zone_tab = open_resource('iso3166.tab')
try:
for line in zone_tab.readlines():
line = line.decode('US-ASCII')
if line.startswith('#'):
continue
code, name = line.split(None, 1)
data[code] = name.strip()
self.data = data
finally:
zone_tab.close()
country_names = _CountryNameDict()
# Time-zone info based solely on fixed offsets
class _FixedOffset(datetime.tzinfo):
zone = None # to match the standard pytz API
def __init__(self, minutes):
if abs(minutes) >= 1440:
raise ValueError("absolute offset is too large", minutes)
self._minutes = minutes
self._offset = datetime.timedelta(minutes=minutes)
def utcoffset(self, dt):
return self._offset
def __reduce__(self):
return FixedOffset, (self._minutes, )
def dst(self, dt):
return ZERO
def tzname(self, dt):
return None
def __repr__(self):
return 'pytz.FixedOffset(%d)' % self._minutes
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime'''
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.replace(tzinfo=self)
def FixedOffset(offset, _tzinfos = {}):
"""return a fixed-offset timezone based off a number of minutes.
>>> one = FixedOffset(-330)
>>> one
pytz.FixedOffset(-330)
>>> one.utcoffset(datetime.datetime.now())
datetime.timedelta(-1, 66600)
>>> one.dst(datetime.datetime.now())
datetime.timedelta(0)
>>> two = FixedOffset(1380)
>>> two
pytz.FixedOffset(1380)
>>> two.utcoffset(datetime.datetime.now())
datetime.timedelta(0, 82800)
>>> two.dst(datetime.datetime.now())
datetime.timedelta(0)
The datetime.timedelta must be between the range of -1 and 1 day,
non-inclusive.
>>> FixedOffset(1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', 1440)
>>> FixedOffset(-1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', -1440)
An offset of 0 is special-cased to return UTC.
>>> FixedOffset(0) is UTC
True
There should always be only one instance of a FixedOffset per timedelta.
This should be true for multiple creation calls.
>>> FixedOffset(-330) is one
True
>>> FixedOffset(1380) is two
True
It should also be true for pickling.
>>> import pickle
>>> pickle.loads(pickle.dumps(one)) is one
True
>>> pickle.loads(pickle.dumps(two)) is two
True
"""
if offset == 0:
return UTC
info = _tzinfos.get(offset)
if info is None:
# We haven't seen this one before. we need to save it.
# Use setdefault to avoid a race condition and make sure we have
# only one
info = _tzinfos.setdefault(offset, _FixedOffset(offset))
return info
FixedOffset.__safe_for_unpickling__ = True
def _test():
import doctest, os, sys
sys.path.insert(0, os.pardir)
import pytz
return doctest.testmod(pytz)
if __name__ == '__main__':
_test()
all_timezones = \
['Africa/Abidjan',
'Africa/Accra',
'Africa/Addis_Ababa',
'Africa/Algiers',
'Africa/Asmara',
'Africa/Asmera',
'Africa/Bamako',
'Africa/Bangui',
'Africa/Banjul',
'Africa/Bissau',
'Africa/Blantyre',
'Africa/Brazzaville',
'Africa/Bujumbura',
'Africa/Cairo',
'Africa/Casablanca',
'Africa/Ceuta',
'Africa/Conakry',
'Africa/Dakar',
'Africa/Dar_es_Salaam',
'Africa/Djibouti',
'Africa/Douala',
'Africa/El_Aaiun',
'Africa/Freetown',
'Africa/Gaborone',
'Africa/Harare',
'Africa/Johannesburg',
'Africa/Juba',
'Africa/Kampala',
'Africa/Khartoum',
'Africa/Kigali',
'Africa/Kinshasa',
'Africa/Lagos',
'Africa/Libreville',
'Africa/Lome',
'Africa/Luanda',
'Africa/Lubumbashi',
'Africa/Lusaka',
'Africa/Malabo',
'Africa/Maputo',
'Africa/Maseru',
'Africa/Mbabane',
'Africa/Mogadishu',
'Africa/Monrovia',
'Africa/Nairobi',
'Africa/Ndjamena',
'Africa/Niamey',
'Africa/Nouakchott',
'Africa/Ouagadougou',
'Africa/Porto-Novo',
'Africa/Sao_Tome',
'Africa/Timbuktu',
'Africa/Tripoli',
'Africa/Tunis',
'Africa/Windhoek',
'America/Adak',
'America/Anchorage',
'America/Anguilla',
'America/Antigua',
'America/Araguaina',
'America/Argentina/Buenos_Aires',
'America/Argentina/Catamarca',
'America/Argentina/ComodRivadavia',
'America/Argentina/Cordoba',
'America/Argentina/Jujuy',
'America/Argentina/La_Rioja',
'America/Argentina/Mendoza',
'America/Argentina/Rio_Gallegos',
'America/Argentina/Salta',
'America/Argentina/San_Juan',
'America/Argentina/San_Luis',
'America/Argentina/Tucuman',
'America/Argentina/Ushuaia',
'America/Aruba',
'America/Asuncion',
'America/Atikokan',
'America/Atka',
'America/Bahia',
'America/Bahia_Banderas',
'America/Barbados',
'America/Belem',
'America/Belize',
'America/Blanc-Sablon',
'America/Boa_Vista',
'America/Bogota',
'America/Boise',
'America/Buenos_Aires',
'America/Cambridge_Bay',
'America/Campo_Grande',
'America/Cancun',
'America/Caracas',
'America/Catamarca',
'America/Cayenne',
'America/Cayman',
'America/Chicago',
'America/Chihuahua',
'America/Coral_Harbour',
'America/Cordoba',
'America/Costa_Rica',
'America/Creston',
'America/Cuiaba',
'America/Curacao',
'America/Danmarkshavn',
'America/Dawson',
'America/Dawson_Creek',
'America/Denver',
'America/Detroit',
'America/Dominica',
'America/Edmonton',
'America/Eirunepe',
'America/El_Salvador',
'America/Ensenada',
'America/Fort_Wayne',
'America/Fortaleza',
'America/Glace_Bay',
'America/Godthab',
'America/Goose_Bay',
'America/Grand_Turk',
'America/Grenada',
'America/Guadeloupe',
'America/Guatemala',
'America/Guayaquil',
'America/Guyana',
'America/Halifax',
'America/Havana',
'America/Hermosillo',
'America/Indiana/Indianapolis',
'America/Indiana/Knox',
'America/Indiana/Marengo',
'America/Indiana/Petersburg',
'America/Indiana/Tell_City',
'America/Indiana/Vevay',
'America/Indiana/Vincennes',
'America/Indiana/Winamac',
'America/Indianapolis',
'America/Inuvik',
'America/Iqaluit',
'America/Jamaica',
'America/Jujuy',
'America/Juneau',
'America/Kentucky/Louisville',
'America/Kentucky/Monticello',
'America/Knox_IN',
'America/Kralendijk',
'America/La_Paz',
'America/Lima',
'America/Los_Angeles',
'America/Louisville',
'America/Lower_Princes',
'America/Maceio',
'America/Managua',
'America/Manaus',
'America/Marigot',
'America/Martinique',
'America/Matamoros',
'America/Mazatlan',
'America/Mendoza',
'America/Menominee',
'America/Merida',
'America/Metlakatla',
'America/Mexico_City',
'America/Miquelon',
'America/Moncton',
'America/Monterrey',
'America/Montevideo',
'America/Montreal',
'America/Montserrat',
'America/Nassau',
'America/New_York',
'America/Nipigon',
'America/Nome',
'America/Noronha',
'America/North_Dakota/Beulah',
'America/North_Dakota/Center',
'America/North_Dakota/New_Salem',
'America/Ojinaga',
'America/Panama',
'America/Pangnirtung',
'America/Paramaribo',
'America/Phoenix',
'America/Port-au-Prince',
'America/Port_of_Spain',
'America/Porto_Acre',
'America/Porto_Velho',
'America/Puerto_Rico',
'America/Rainy_River',
'America/Rankin_Inlet',
'America/Recife',
'America/Regina',
'America/Resolute',
'America/Rio_Branco',
'America/Rosario',
'America/Santa_Isabel',
'America/Santarem',
'America/Santiago',
'America/Santo_Domingo',
'America/Sao_Paulo',
'America/Scoresbysund',
'America/Shiprock',
'America/Sitka',
'America/St_Barthelemy',
'America/St_Johns',
'America/St_Kitts',
'America/St_Lucia',
'America/St_Thomas',
'America/St_Vincent',
'America/Swift_Current',
'America/Tegucigalpa',
'America/Thule',
'America/Thunder_Bay',
'America/Tijuana',
'America/Toronto',
'America/Tortola',
'America/Vancouver',
'America/Virgin',
'America/Whitehorse',
'America/Winnipeg',
'America/Yakutat',
'America/Yellowknife',
'Antarctica/Casey',
'Antarctica/Davis',
'Antarctica/DumontDUrville',
'Antarctica/Macquarie',
'Antarctica/Mawson',
'Antarctica/McMurdo',
'Antarctica/Palmer',
'Antarctica/Rothera',
'Antarctica/South_Pole',
'Antarctica/Syowa',
'Antarctica/Vostok',
'Arctic/Longyearbyen',
'Asia/Aden',
'Asia/Almaty',
'Asia/Amman',
'Asia/Anadyr',
'Asia/Aqtau',
'Asia/Aqtobe',
'Asia/Ashgabat',
'Asia/Ashkhabad',
'Asia/Baghdad',
'Asia/Bahrain',
'Asia/Baku',
'Asia/Bangkok',
'Asia/Beirut',
'Asia/Bishkek',
'Asia/Brunei',
'Asia/Calcutta',
'Asia/Choibalsan',
'Asia/Chongqing',
'Asia/Chungking',
'Asia/Colombo',
'Asia/Dacca',
'Asia/Damascus',
'Asia/Dhaka',
'Asia/Dili',
'Asia/Dubai',
'Asia/Dushanbe',
'Asia/Gaza',
'Asia/Harbin',
'Asia/Hebron',
'Asia/Ho_Chi_Minh',
'Asia/Hong_Kong',
'Asia/Hovd',
'Asia/Irkutsk',
'Asia/Istanbul',
'Asia/Jakarta',
'Asia/Jayapura',
'Asia/Jerusalem',
'Asia/Kabul',
'Asia/Kamchatka',
'Asia/Karachi',
'Asia/Kashgar',
'Asia/Kathmandu',
'Asia/Katmandu',
'Asia/Khandyga',
'Asia/Kolkata',
'Asia/Krasnoyarsk',
'Asia/Kuala_Lumpur',
'Asia/Kuching',
'Asia/Kuwait',
'Asia/Macao',
'Asia/Macau',
'Asia/Magadan',
'Asia/Makassar',
'Asia/Manila',
'Asia/Muscat',
'Asia/Nicosia',
'Asia/Novokuznetsk',
'Asia/Novosibirsk',
'Asia/Omsk',
'Asia/Oral',
'Asia/Phnom_Penh',
'Asia/Pontianak',
'Asia/Pyongyang',
'Asia/Qatar',
'Asia/Qyzylorda',
'Asia/Rangoon',
'Asia/Riyadh',
'Asia/Saigon',
'Asia/Sakhalin',
'Asia/Samarkand',
'Asia/Seoul',
'Asia/Shanghai',
'Asia/Singapore',
'Asia/Taipei',
'Asia/Tashkent',
'Asia/Tbilisi',
'Asia/Tehran',
'Asia/Tel_Aviv',
'Asia/Thimbu',
'Asia/Thimphu',
'Asia/Tokyo',
'Asia/Ujung_Pandang',
'Asia/Ulaanbaatar',
'Asia/Ulan_Bator',
'Asia/Urumqi',
'Asia/Ust-Nera',
'Asia/Vientiane',
'Asia/Vladivostok',
'Asia/Yakutsk',
'Asia/Yekaterinburg',
'Asia/Yerevan',
'Atlantic/Azores',
'Atlantic/Bermuda',
'Atlantic/Canary',
'Atlantic/Cape_Verde',
'Atlantic/Faeroe',
'Atlantic/Faroe',
'Atlantic/Jan_Mayen',
'Atlantic/Madeira',
'Atlantic/Reykjavik',
'Atlantic/South_Georgia',
'Atlantic/St_Helena',
'Atlantic/Stanley',
'Australia/ACT',
'Australia/Adelaide',
'Australia/Brisbane',
'Australia/Broken_Hill',
'Australia/Canberra',
'Australia/Currie',
'Australia/Darwin',
'Australia/Eucla',
'Australia/Hobart',
'Australia/LHI',
'Australia/Lindeman',
'Australia/Lord_Howe',
'Australia/Melbourne',
'Australia/NSW',
'Australia/North',
'Australia/Perth',
'Australia/Queensland',
'Australia/South',
'Australia/Sydney',
'Australia/Tasmania',
'Australia/Victoria',
'Australia/West',
'Australia/Yancowinna',
'Brazil/Acre',
'Brazil/DeNoronha',
'Brazil/East',
'Brazil/West',
'CET',
'CST6CDT',
'Canada/Atlantic',
'Canada/Central',
'Canada/East-Saskatchewan',
'Canada/Eastern',
'Canada/Mountain',
'Canada/Newfoundland',
'Canada/Pacific',
'Canada/Saskatchewan',
'Canada/Yukon',
'Chile/Continental',
'Chile/EasterIsland',
'Cuba',
'EET',
'EST',
'EST5EDT',
'Egypt',
'Eire',
'Etc/GMT',
'Etc/GMT+0',
'Etc/GMT+1',
'Etc/GMT+10',
'Etc/GMT+11',
'Etc/GMT+12',
'Etc/GMT+2',
'Etc/GMT+3',
'Etc/GMT+4',
'Etc/GMT+5',
'Etc/GMT+6',
'Etc/GMT+7',
'Etc/GMT+8',
'Etc/GMT+9',
'Etc/GMT-0',
'Etc/GMT-1',
'Etc/GMT-10',
'Etc/GMT-11',
'Etc/GMT-12',
'Etc/GMT-13',
'Etc/GMT-14',
'Etc/GMT-2',
'Etc/GMT-3',
'Etc/GMT-4',
'Etc/GMT-5',
'Etc/GMT-6',
'Etc/GMT-7',
'Etc/GMT-8',
'Etc/GMT-9',
'Etc/GMT0',
'Etc/Greenwich',
'Etc/UCT',
'Etc/UTC',
'Etc/Universal',
'Etc/Zulu',
'Europe/Amsterdam',
'Europe/Andorra',
'Europe/Athens',
'Europe/Belfast',
'Europe/Belgrade',
'Europe/Berlin',
'Europe/Bratislava',
'Europe/Brussels',
'Europe/Bucharest',
'Europe/Budapest',
'Europe/Busingen',
'Europe/Chisinau',
'Europe/Copenhagen',
'Europe/Dublin',
'Europe/Gibraltar',
'Europe/Guernsey',
'Europe/Helsinki',
'Europe/Isle_of_Man',
'Europe/Istanbul',
'Europe/Jersey',
'Europe/Kaliningrad',
'Europe/Kiev',
'Europe/Lisbon',
'Europe/Ljubljana',
'Europe/London',
'Europe/Luxembourg',
'Europe/Madrid',
'Europe/Malta',
'Europe/Mariehamn',
'Europe/Minsk',
'Europe/Monaco',
'Europe/Moscow',
'Europe/Nicosia',
'Europe/Oslo',
'Europe/Paris',
'Europe/Podgorica',
'Europe/Prague',
'Europe/Riga',
'Europe/Rome',
'Europe/Samara',
'Europe/San_Marino',
'Europe/Sarajevo',
'Europe/Simferopol',
'Europe/Skopje',
'Europe/Sofia',
'Europe/Stockholm',
'Europe/Tallinn',
'Europe/Tirane',
'Europe/Tiraspol',
'Europe/Uzhgorod',
'Europe/Vaduz',
'Europe/Vatican',
'Europe/Vienna',
'Europe/Vilnius',
'Europe/Volgograd',
'Europe/Warsaw',
'Europe/Zagreb',
'Europe/Zaporozhye',
'Europe/Zurich',
'GB',
'GB-Eire',
'GMT',
'GMT+0',
'GMT-0',
'GMT0',
'Greenwich',
'HST',
'Hongkong',
'Iceland',
'Indian/Antananarivo',
'Indian/Chagos',
'Indian/Christmas',
'Indian/Cocos',
'Indian/Comoro',
'Indian/Kerguelen',
'Indian/Mahe',
'Indian/Maldives',
'Indian/Mauritius',
'Indian/Mayotte',
'Indian/Reunion',
'Iran',
'Israel',
'Jamaica',
'Japan',
'Kwajalein',
'Libya',
'MET',
'MST',
'MST7MDT',
'Mexico/BajaNorte',
'Mexico/BajaSur',
'Mexico/General',
'NZ',
'NZ-CHAT',
'Navajo',
'PRC',
'PST8PDT',
'Pacific/Apia',
'Pacific/Auckland',
'Pacific/Chatham',
'Pacific/Chuuk',
'Pacific/Easter',
'Pacific/Efate',
'Pacific/Enderbury',
'Pacific/Fakaofo',
'Pacific/Fiji',
'Pacific/Funafuti',
'Pacific/Galapagos',
'Pacific/Gambier',
'Pacific/Guadalcanal',
'Pacific/Guam',
'Pacific/Honolulu',
'Pacific/Johnston',
'Pacific/Kiritimati',
'Pacific/Kosrae',
'Pacific/Kwajalein',
'Pacific/Majuro',
'Pacific/Marquesas',
'Pacific/Midway',
'Pacific/Nauru',
'Pacific/Niue',
'Pacific/Norfolk',
'Pacific/Noumea',
'Pacific/Pago_Pago',
'Pacific/Palau',
'Pacific/Pitcairn',
'Pacific/Pohnpei',
'Pacific/Ponape',
'Pacific/Port_Moresby',
'Pacific/Rarotonga',
'Pacific/Saipan',
'Pacific/Samoa',
'Pacific/Tahiti',
'Pacific/Tarawa',
'Pacific/Tongatapu',
'Pacific/Truk',
'Pacific/Wake',
'Pacific/Wallis',
'Pacific/Yap',
'Poland',
'Portugal',
'ROC',
'ROK',
'Singapore',
'Turkey',
'UCT',
'US/Alaska',
'US/Aleutian',
'US/Arizona',
'US/Central',
'US/East-Indiana',
'US/Eastern',
'US/Hawaii',
'US/Indiana-Starke',
'US/Michigan',
'US/Mountain',
'US/Pacific',
'US/Pacific-New',
'US/Samoa',
'UTC',
'Universal',
'W-SU',
'WET',
'Zulu']
all_timezones = LazyList(
tz for tz in all_timezones if resource_exists(tz))
all_timezones_set = LazySet(all_timezones)
common_timezones = \
['Africa/Abidjan',
'Africa/Accra',
'Africa/Addis_Ababa',
'Africa/Algiers',
'Africa/Asmara',
'Africa/Bamako',
'Africa/Bangui',
'Africa/Banjul',
'Africa/Bissau',
'Africa/Blantyre',
'Africa/Brazzaville',
'Africa/Bujumbura',
'Africa/Cairo',
'Africa/Casablanca',
'Africa/Ceuta',
'Africa/Conakry',
'Africa/Dakar',
'Africa/Dar_es_Salaam',
'Africa/Djibouti',
'Africa/Douala',
'Africa/El_Aaiun',
'Africa/Freetown',
'Africa/Gaborone',
'Africa/Harare',
'Africa/Johannesburg',
'Africa/Juba',
'Africa/Kampala',
'Africa/Khartoum',
'Africa/Kigali',
'Africa/Kinshasa',
'Africa/Lagos',
'Africa/Libreville',
'Africa/Lome',
'Africa/Luanda',
'Africa/Lubumbashi',
'Africa/Lusaka',
'Africa/Malabo',
'Africa/Maputo',
'Africa/Maseru',
'Africa/Mbabane',
'Africa/Mogadishu',
'Africa/Monrovia',
'Africa/Nairobi',
'Africa/Ndjamena',
'Africa/Niamey',
'Africa/Nouakchott',
'Africa/Ouagadougou',
'Africa/Porto-Novo',
'Africa/Sao_Tome',
'Africa/Tripoli',
'Africa/Tunis',
'Africa/Windhoek',
'America/Adak',
'America/Anchorage',
'America/Anguilla',
'America/Antigua',
'America/Araguaina',
'America/Argentina/Buenos_Aires',
'America/Argentina/Catamarca',
'America/Argentina/Cordoba',
'America/Argentina/Jujuy',
'America/Argentina/La_Rioja',
'America/Argentina/Mendoza',
'America/Argentina/Rio_Gallegos',
'America/Argentina/Salta',
'America/Argentina/San_Juan',
'America/Argentina/San_Luis',
'America/Argentina/Tucuman',
'America/Argentina/Ushuaia',
'America/Aruba',
'America/Asuncion',
'America/Atikokan',
'America/Bahia',
'America/Bahia_Banderas',
'America/Barbados',
'America/Belem',
'America/Belize',
'America/Blanc-Sablon',
'America/Boa_Vista',
'America/Bogota',
'America/Boise',
'America/Cambridge_Bay',
'America/Campo_Grande',
'America/Cancun',
'America/Caracas',
'America/Cayenne',
'America/Cayman',
'America/Chicago',
'America/Chihuahua',
'America/Costa_Rica',
'America/Creston',
'America/Cuiaba',
'America/Curacao',
'America/Danmarkshavn',
'America/Dawson',
'America/Dawson_Creek',
'America/Denver',
'America/Detroit',
'America/Dominica',
'America/Edmonton',
'America/Eirunepe',
'America/El_Salvador',
'America/Fortaleza',
'America/Glace_Bay',
'America/Godthab',
'America/Goose_Bay',
'America/Grand_Turk',
'America/Grenada',
'America/Guadeloupe',
'America/Guatemala',
'America/Guayaquil',
'America/Guyana',
'America/Halifax',
'America/Havana',
'America/Hermosillo',
'America/Indiana/Indianapolis',
'America/Indiana/Knox',
'America/Indiana/Marengo',
'America/Indiana/Petersburg',
'America/Indiana/Tell_City',
'America/Indiana/Vevay',
'America/Indiana/Vincennes',
'America/Indiana/Winamac',
'America/Inuvik',
'America/Iqaluit',
'America/Jamaica',
'America/Juneau',
'America/Kentucky/Louisville',
'America/Kentucky/Monticello',
'America/Kralendijk',
'America/La_Paz',
'America/Lima',
'America/Los_Angeles',
'America/Lower_Princes',
'America/Maceio',
'America/Managua',
'America/Manaus',
'America/Marigot',
'America/Martinique',
'America/Matamoros',
'America/Mazatlan',
'America/Menominee',
'America/Merida',
'America/Metlakatla',
'America/Mexico_City',
'America/Miquelon',
'America/Moncton',
'America/Monterrey',
'America/Montevideo',
'America/Montreal',
'America/Montserrat',
'America/Nassau',
'America/New_York',
'America/Nipigon',
'America/Nome',
'America/Noronha',
'America/North_Dakota/Beulah',
'America/North_Dakota/Center',
'America/North_Dakota/New_Salem',
'America/Ojinaga',
'America/Panama',
'America/Pangnirtung',
'America/Paramaribo',
'America/Phoenix',
'America/Port-au-Prince',
'America/Port_of_Spain',
'America/Porto_Velho',
'America/Puerto_Rico',
'America/Rainy_River',
'America/Rankin_Inlet',
'America/Recife',
'America/Regina',
'America/Resolute',
'America/Rio_Branco',
'America/Santa_Isabel',
'America/Santarem',
'America/Santiago',
'America/Santo_Domingo',
'America/Sao_Paulo',
'America/Scoresbysund',
'America/Shiprock',
'America/Sitka',
'America/St_Barthelemy',
'America/St_Johns',
'America/St_Kitts',
'America/St_Lucia',
'America/St_Thomas',
'America/St_Vincent',
'America/Swift_Current',
'America/Tegucigalpa',
'America/Thule',
'America/Thunder_Bay',
'America/Tijuana',
'America/Toronto',
'America/Tortola',
'America/Vancouver',
'America/Whitehorse',
'America/Winnipeg',
'America/Yakutat',
'America/Yellowknife',
'Antarctica/Casey',
'Antarctica/Davis',
'Antarctica/DumontDUrville',
'Antarctica/Macquarie',
'Antarctica/Mawson',
'Antarctica/McMurdo',
'Antarctica/Palmer',
'Antarctica/Rothera',
'Antarctica/South_Pole',
'Antarctica/Syowa',
'Antarctica/Vostok',
'Arctic/Longyearbyen',
'Asia/Aden',
'Asia/Almaty',
'Asia/Amman',
'Asia/Anadyr',
'Asia/Aqtau',
'Asia/Aqtobe',
'Asia/Ashgabat',
'Asia/Baghdad',
'Asia/Bahrain',
'Asia/Baku',
'Asia/Bangkok',
'Asia/Beirut',
'Asia/Bishkek',
'Asia/Brunei',
'Asia/Choibalsan',
'Asia/Chongqing',
'Asia/Colombo',
'Asia/Damascus',
'Asia/Dhaka',
'Asia/Dili',
'Asia/Dubai',
'Asia/Dushanbe',
'Asia/Gaza',
'Asia/Harbin',
'Asia/Hebron',
'Asia/Ho_Chi_Minh',
'Asia/Hong_Kong',
'Asia/Hovd',
'Asia/Irkutsk',
'Asia/Jakarta',
'Asia/Jayapura',
'Asia/Jerusalem',
'Asia/Kabul',
'Asia/Kamchatka',
'Asia/Karachi',
'Asia/Kashgar',
'Asia/Kathmandu',
'Asia/Khandyga',
'Asia/Kolkata',
'Asia/Krasnoyarsk',
'Asia/Kuala_Lumpur',
'Asia/Kuching',
'Asia/Kuwait',
'Asia/Macau',
'Asia/Magadan',
'Asia/Makassar',
'Asia/Manila',
'Asia/Muscat',
'Asia/Nicosia',
'Asia/Novokuznetsk',
'Asia/Novosibirsk',
'Asia/Omsk',
'Asia/Oral',
'Asia/Phnom_Penh',
'Asia/Pontianak',
'Asia/Pyongyang',
'Asia/Qatar',
'Asia/Qyzylorda',
'Asia/Rangoon',
'Asia/Riyadh',
'Asia/Sakhalin',
'Asia/Samarkand',
'Asia/Seoul',
'Asia/Shanghai',
'Asia/Singapore',
'Asia/Taipei',
'Asia/Tashkent',
'Asia/Tbilisi',
'Asia/Tehran',
'Asia/Thimphu',
'Asia/Tokyo',
'Asia/Ulaanbaatar',
'Asia/Urumqi',
'Asia/Ust-Nera',
'Asia/Vientiane',
'Asia/Vladivostok',
'Asia/Yakutsk',
'Asia/Yekaterinburg',
'Asia/Yerevan',
'Atlantic/Azores',
'Atlantic/Bermuda',
'Atlantic/Canary',
'Atlantic/Cape_Verde',
'Atlantic/Faroe',
'Atlantic/Madeira',
'Atlantic/Reykjavik',
'Atlantic/South_Georgia',
'Atlantic/St_Helena',
'Atlantic/Stanley',
'Australia/Adelaide',
'Australia/Brisbane',
'Australia/Broken_Hill',
'Australia/Currie',
'Australia/Darwin',
'Australia/Eucla',
'Australia/Hobart',
'Australia/Lindeman',
'Australia/Lord_Howe',
'Australia/Melbourne',
'Australia/Perth',
'Australia/Sydney',
'Canada/Atlantic',
'Canada/Central',
'Canada/Eastern',
'Canada/Mountain',
'Canada/Newfoundland',
'Canada/Pacific',
'Europe/Amsterdam',
'Europe/Andorra',
'Europe/Athens',
'Europe/Belgrade',
'Europe/Berlin',
'Europe/Bratislava',
'Europe/Brussels',
'Europe/Bucharest',
'Europe/Budapest',
'Europe/Busingen',
'Europe/Chisinau',
'Europe/Copenhagen',
'Europe/Dublin',
'Europe/Gibraltar',
'Europe/Guernsey',
'Europe/Helsinki',
'Europe/Isle_of_Man',
'Europe/Istanbul',
'Europe/Jersey',
'Europe/Kaliningrad',
'Europe/Kiev',
'Europe/Lisbon',
'Europe/Ljubljana',
'Europe/London',
'Europe/Luxembourg',
'Europe/Madrid',
'Europe/Malta',
'Europe/Mariehamn',
'Europe/Minsk',
'Europe/Monaco',
'Europe/Moscow',
'Europe/Oslo',
'Europe/Paris',
'Europe/Podgorica',
'Europe/Prague',
'Europe/Riga',
'Europe/Rome',
'Europe/Samara',
'Europe/San_Marino',
'Europe/Sarajevo',
'Europe/Simferopol',
'Europe/Skopje',
'Europe/Sofia',
'Europe/Stockholm',
'Europe/Tallinn',
'Europe/Tirane',
'Europe/Uzhgorod',
'Europe/Vaduz',
'Europe/Vatican',
'Europe/Vienna',
'Europe/Vilnius',
'Europe/Volgograd',
'Europe/Warsaw',
'Europe/Zagreb',
'Europe/Zaporozhye',
'Europe/Zurich',
'GMT',
'Indian/Antananarivo',
'Indian/Chagos',
'Indian/Christmas',
'Indian/Cocos',
'Indian/Comoro',
'Indian/Kerguelen',
'Indian/Mahe',
'Indian/Maldives',
'Indian/Mauritius',
'Indian/Mayotte',
'Indian/Reunion',
'Pacific/Apia',
'Pacific/Auckland',
'Pacific/Chatham',
'Pacific/Chuuk',
'Pacific/Easter',
'Pacific/Efate',
'Pacific/Enderbury',
'Pacific/Fakaofo',
'Pacific/Fiji',
'Pacific/Funafuti',
'Pacific/Galapagos',
'Pacific/Gambier',
'Pacific/Guadalcanal',
'Pacific/Guam',
'Pacific/Honolulu',
'Pacific/Johnston',
'Pacific/Kiritimati',
'Pacific/Kosrae',
'Pacific/Kwajalein',
'Pacific/Majuro',
'Pacific/Marquesas',
'Pacific/Midway',
'Pacific/Nauru',
'Pacific/Niue',
'Pacific/Norfolk',
'Pacific/Noumea',
'Pacific/Pago_Pago',
'Pacific/Palau',
'Pacific/Pitcairn',
'Pacific/Pohnpei',
'Pacific/Port_Moresby',
'Pacific/Rarotonga',
'Pacific/Saipan',
'Pacific/Tahiti',
'Pacific/Tarawa',
'Pacific/Tongatapu',
'Pacific/Wake',
'Pacific/Wallis',
'US/Alaska',
'US/Arizona',
'US/Central',
'US/Eastern',
'US/Hawaii',
'US/Mountain',
'US/Pacific',
'UTC']
common_timezones = LazyList(
tz for tz in common_timezones if tz in all_timezones)
common_timezones_set = LazySet(common_timezones)
| 22.465301
| 81
| 0.66293
|
26ed295ad5e1dcfc1e0d44d47e9c63e21d362f1d
| 1,024
|
py
|
Python
|
example.py
|
f416720001/PythonHMM
|
f50a9a883122950cab7b574be8d8c3da1b21d12d
|
[
"BSD-3-Clause"
] | null | null | null |
example.py
|
f416720001/PythonHMM
|
f50a9a883122950cab7b574be8d8c3da1b21d12d
|
[
"BSD-3-Clause"
] | null | null | null |
example.py
|
f416720001/PythonHMM
|
f50a9a883122950cab7b574be8d8c3da1b21d12d
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from hmm import Model
# 來自維基百科 https://zh.wikipedia.org/wiki/%E9%9A%90%E9%A9%AC%E5%B0%94%E5%8F%AF%E5%A4%AB%E6%A8%A1%E5%9E%8B
# 假設你有一個住得很遠的朋友,他每天跟你打電話告訴你他那天做了什麼。
# 你的朋友僅僅對三種活動感興趣:公園散步,購物以及清理房間。
# 他選擇做什麼事情只憑天氣。你對於他所住的地方的天氣情況並不了解,但是你知道總的趨勢。在他告訴你每天所做的事情基礎上,你想要猜測他所在地的天氣情況。
# 天氣的狀態,你無法直接觀察 = 它們對你來說是隱藏的
states = ('rainy', 'sunny')
# 你能觀察的,你朋友的行動
symbols = ('walk', 'shop', 'clean')
# 初始機率
start_prob = {
'rainy' : 0.5,
'sunny' : 0.5
}
# 基於馬可夫鏈模型的天氣變遷。看第一行,如果今天下雨,那麼明天天晴的機率只有30%
trans_prob = {
'rainy': { 'rainy' : 0.7, 'sunny' : 0.3 },
'sunny': { 'rainy' : 0.4, 'sunny' : 0.6 }
}
# 你朋友每天做某件事的機率。看第一行,如果下雨,有50% 的機率他在清理房間
emit_prob = {
'rainy': { 'walk' : 0.1, 'shop' : 0.4, 'clean' : 0.5 },
'sunny': { 'walk' : 0.6, 'shop' : 0.3, 'clean' : 0.1 }
}
# 開始了,你朋友所告知他的每日活動
sequence = ['walk', 'shop', 'clean', 'clean', 'walk', 'walk', 'walk', 'clean']
model = Model(states, symbols, start_prob, trans_prob, emit_prob)
print(model.evaluate(sequence))
print(model.decode(sequence))
| 26.25641
| 102
| 0.638672
|
a7d7ed530e7084ad556e92084e088c1592867303
| 2,052
|
py
|
Python
|
src/main/python/cosmopusher/n560reader.py
|
konz/cosmopusher
|
7701b3ecd5d2e7fa90e34e75cb53a0dd12c6124c
|
[
"Apache-2.0"
] | null | null | null |
src/main/python/cosmopusher/n560reader.py
|
konz/cosmopusher
|
7701b3ecd5d2e7fa90e34e75cb53a0dd12c6124c
|
[
"Apache-2.0"
] | null | null | null |
src/main/python/cosmopusher/n560reader.py
|
konz/cosmopusher
|
7701b3ecd5d2e7fa90e34e75cb53a0dd12c6124c
|
[
"Apache-2.0"
] | null | null | null |
import logging
import re
from datetime import datetime
DATA_REGEX = re.compile('^(\d{2}-\w{3}-\d{2} \d{2}:\d{2}:\d{2})\s+(\d+|---)\*?\s+(\d+|---)\*?\s+(\d+|---)(.*)$')
SETTINGS_REGEX = re.compile('^(\S+)\s+VERSION (\S+).*?SpO2 Limit: (\d+)-(\d+)%\s+PR Limit: (\d+)-(\d+)BPM$')
LOGGER = logging.getLogger('cosmopusher')
class N560Reader:
def __init__(self, text_stream, pusher):
self.stream = text_stream
self.running = True
self.pusher = pusher
def run(self):
for line in self.stream.readlines():
if not (self.running and line):
break
self.process_line(line)
LOGGER.info("no more data - exiting")
def process_line(self, line):
data_match = DATA_REGEX.match(line)
settings_match = SETTINGS_REGEX.match(line)
time = datetime.utcnow().replace(microsecond=0).isoformat()
if data_match:
payload = {'time': time}
payload = self.add_int_value(payload, 'spO2', data_match, 2)
payload = self.add_int_value(payload, 'pulse', data_match, 3)
self.pusher.push("data", payload)
elif settings_match:
sp_o2_lower_limit = int(settings_match.group(3))
sp_o2_upper_limit = int(settings_match.group(4))
pulse_lower_limit = int(settings_match.group(5))
pulse_upper_limit = int(settings_match.group(6))
self.pusher.push("settings", {
'time': time,
'spO2LowerLimit': sp_o2_lower_limit,
'spO2UpperLimit': sp_o2_upper_limit,
'pulseLowerLimit': pulse_lower_limit,
'pulseUpperLimit': pulse_upper_limit
})
else:
LOGGER.info("unparsable line: {}".format(line))
@staticmethod
def add_int_value(payload, key, matcher, group):
try:
payload[key] = int(matcher.group(group))
except ValueError:
pass
return payload
def stop(self):
self.running = False
| 31.569231
| 112
| 0.575536
|
12647855880d55ecf3c852631919d9142408df9e
| 7,151
|
py
|
Python
|
gesture_digit_prediction_app/hello/views.py
|
qiulongquan/DS_Research
|
864181b458e5af6f22b471d1598b7d96b3efc757
|
[
"Apache-2.0"
] | 1
|
2020-11-08T07:26:22.000Z
|
2020-11-08T07:26:22.000Z
|
gesture_digit_prediction_app/hello/views.py
|
qiulongquan/DS_Research
|
864181b458e5af6f22b471d1598b7d96b3efc757
|
[
"Apache-2.0"
] | null | null | null |
gesture_digit_prediction_app/hello/views.py
|
qiulongquan/DS_Research
|
864181b458e5af6f22b471d1598b7d96b3efc757
|
[
"Apache-2.0"
] | null | null | null |
from .models import Img
from django.shortcuts import render
from tensorflow.keras.preprocessing import image
from django.utils import timezone
from django.utils.timezone import localtime
import matplotlib.pyplot as plt
import numpy as np
import os
# shutil是文件 文件夹操作相关方法的package
import shutil
import tensorflow as tf
import keras
import pickle
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image_dataset_from_directory
from keras import models
from keras.models import load_model
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
# https://docs.djangoproject.com/ja/3.0/topics/http/file-uploads/
dirs='/workspace/python/temp_pic'
dirs_0='/workspace/python/temp_pic/patterndir'
BATCH_SIZE=32
IMG_SIZE = (224, 224)
def index(request):
now = localtime(timezone.now())
print("qiulongquan_now={}".format(now))
context = {'now': now, 'qiu': "customization"}
return render(request, 'hello/index.html', context)
def create_temp_pic_dir(img_path):
if not os.path.exists(dirs):
os.mkdir(dirs)
os.mkdir(dirs_0)
else:
# 强制删除dirs文件夹内的所有内容并删除dirs文件夹
shutil.rmtree(dirs)
os.mkdir(dirs)
os.mkdir(dirs_0)
if img_path is not None and os.path.splitext(img_path.split('/')[-1])[-1] == ".jpg":
src=img_path
dst=os.path.join(dirs_0,img_path.split('/')[-1])
shutil.copyfile(src,dst)
print("copy done={}".format(dst))
else:
print("no file copy")
def preprocessing(dir_path):
print("qiu_dir_path",dir_path)
picture_data = image_dataset_from_directory(dir_path,
shuffle=True,
batch_size=BATCH_SIZE,
image_size=IMG_SIZE)
AUTOTUNE = tf.data.experimental.AUTOTUNE
picture_data = picture_data.prefetch(buffer_size=AUTOTUNE)
image, _ = next(iter(picture_data))
return image
def prediction_process(img_paths):
print(tf.__version__)
classes = ['zero', 'one', 'two', 'three', 'four',
'five', 'six', 'seven', 'eight', 'nine']
model_path = os.path.join("model", 'efficientnet_unfreeze_model.h5')
# model_path = os.path.join("model", 'mobilenet_v2_model.h5')
# model_path = os.path.join("model", 'sign_language_vgg16.h5')
model = load_model(model_path)
print(model.summary())
prediction_results = []
result_set = []
for img_path in img_paths:
if img_path is not None:
create_temp_pic_dir(img_path)
prepro_image=preprocessing(dirs)
pred = model.predict(prepro_image)
Predict_value = pred[0].argmax()
print("qiulongquan_predict={}".format(Predict_value))
prediction_results.append(Predict_value)
top_indices = pred[0].argsort()[-5:][::-1]
result_five = [(classes[i], pred[0][i]) for i in top_indices]
print("result_five type={}".format(type(result_five)))
result_set.append(result_five)
for x in result_five:
print(x)
elif img_path is None:
prediction_results.append("")
result_set.append("")
return prediction_results, result_set
def prediction(request):
print("prediction image")
if request.method == 'POST':
checkbox_values = request.POST.getlist('checks[]')
print("qiulongquan_POST_checkbox[]={}".format(checkbox_values))
imgs = Img.objects.all()
img_paths = []
for i, img in enumerate(imgs):
if str(i) in checkbox_values:
print("img_path={}".format(os.path.join("media", img.img.name)))
img_paths.append(os.path.join("media", img.img.name))
else:
img_paths.append(None)
prediction_results, result_set = prediction_process(img_paths)
content = {
'imgs': imgs,
'port': request.META['SERVER_PORT'],
'host': request.get_host(),
'img_paths': img_paths,
'prediction_results': prediction_results,
'result_set': result_set,
}
return render(request, 'hello/showing.html', content)
def handle_uploaded_file(f, img_path):
with open(img_path, 'wb') as destination:
for chunk in f.chunks():
destination.write(chunk)
destination.close()
def uploadImg(request):
"""
:param request:
:return:
"""
print("upload files and then show files")
if request.method == 'POST':
img_files = request.FILES.getlist("img")
for img_file in img_files:
name = img_file.name
img_path = os.path.join("media/file", name)
if os.path.exists(img_path.encode('utf-8')):
print(str(img_path.encode('utf-8')) + " exists.")
continue
else:
print(str(img_path) + " no exists.")
handle_uploaded_file(img_file, img_path)
Img(img=os.path.join("file", name), name=name).save()
print("{} upload done.".format(name))
return render(request, 'hello/uploading.html')
# return HttpResponsePermanentRedirect("/s/" + code + "/")
# from django.shortcuts import render
# from .models import Img
#
#
# def uploadImg(request):
# """
# :param request:
# :return:
# """
# if request.method == 'POST':
# new_img = Img(
# img=request.FILES.get('img'),
# name=request.FILES.get('img').name
# )
# new_img.save()
# return render(request, 'hello/uploading.html')
def showImg(request):
"""
:param request:
:return:
"""
print("qiulongquan_showImg_start")
imgs = Img.objects.all()
content = {
'imgs': imgs,
'port': request.META['SERVER_PORT'],
'host': request.get_host(),
}
print("qiulongquan_content={}".format(content))
for i in imgs:
print("qiulongquan_url={}".format(i.img.url))
return render(request, 'hello/showing.html', content)
def deleteImg(request):
delete_list = request.POST.getlist('checks[]')
print("qiulongquan_delete_list={}".format(delete_list))
imgs = Img.objects.all()
for img in imgs:
if str(img.id) in delete_list:
file_path=str(img.img.url)
if file_path[0:1]=='/':
file_path=file_path[1:]
if os.path.exists(os.path.join('media', file_path)):
os.remove(os.path.join('media', file_path))
print("%s delete completed" %
os.path.join('media', file_path))
Img.objects.get(id=img.id).delete()
else:
print('no such file:%s' %
os.path.join('media', file_path))
print("delete files done.")
imgs = Img.objects.all()
content = {
'imgs': imgs,
'port': request.META['SERVER_PORT'],
'host': request.get_host(),
}
return render(request, 'hello/showing.html', content)
| 33.260465
| 88
| 0.602154
|
6625f7cc49d40c6595aefdda25775656f289b47f
| 1,014
|
py
|
Python
|
shopping_list.py
|
ecampostrini/shopping-buddy
|
9748c972f5df3b102c41d245c92dfc87bcf0700f
|
[
"Apache-2.0"
] | null | null | null |
shopping_list.py
|
ecampostrini/shopping-buddy
|
9748c972f5df3b102c41d245c92dfc87bcf0700f
|
[
"Apache-2.0"
] | null | null | null |
shopping_list.py
|
ecampostrini/shopping-buddy
|
9748c972f5df3b102c41d245c92dfc87bcf0700f
|
[
"Apache-2.0"
] | null | null | null |
import yaml
from collections import namedtuple
from units import BaseQuantity
class ShoppingList:
Item = namedtuple("ShoppingListItem", "name quantity")
@classmethod
def fromFile(cls, filename):
with open(filename, "r") as shopping_list:
return cls(yaml.safe_load(shopping_list))
def __init__(self, sl_yaml):
assert sl_yaml["kind"] == "shopping_list"
self.name = sl_yaml["name"]
self.store = sl_yaml["store"]
self.items = {}
for item in sl_yaml["items"]:
name = item["name"]
# TODO add repeated items based on their store instead of complaining
if name in self.items:
self.items[name] = self.items[name] + BaseQuantity.fromText(item["quantity"])
else:
self.items[name] = BaseQuantity.fromText(item["quantity"])
def __repr__(self):
return "{}:\n{}\n".format(self.name, repr(self.items))
def __iter__(self):
self.iterator = iter(self.items.items())
return self
def __next__(self):
return next(self.iterator)
| 27.405405
| 85
| 0.674556
|
462c0a59dae2a39d94d267894599be4ab29cf159
| 629
|
py
|
Python
|
setup.py
|
zapatacomputing/z-quantum-qcbm
|
ac0899b1b3b556e25d475459a37cb5889cfd62c6
|
[
"Apache-2.0"
] | 5
|
2020-06-02T16:52:01.000Z
|
2021-11-17T11:37:55.000Z
|
setup.py
|
zapatacomputing/z-quantum-qcbm
|
ac0899b1b3b556e25d475459a37cb5889cfd62c6
|
[
"Apache-2.0"
] | 40
|
2020-05-15T19:45:01.000Z
|
2022-03-30T19:16:38.000Z
|
setup.py
|
zapatacomputing/z-quantum-qcbm
|
ac0899b1b3b556e25d475459a37cb5889cfd62c6
|
[
"Apache-2.0"
] | 5
|
2020-08-17T08:31:30.000Z
|
2021-09-08T12:32:22.000Z
|
import setuptools
setuptools.setup(
name="z-quantum-qcbm",
use_scm_version=True,
author="Zapata Computing, Inc.",
author_email="info@zapatacomputing.com",
description="QCBM package for Orquestra.",
url="https://github.com/zapatacomputing/z-quantum-qcbm ",
packages=setuptools.find_namespace_packages(
include=["zquantum.*"], where="src/python"
),
package_dir={"": "src/python"},
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
setup_requires=["setuptools_scm~=6.0"],
install_requires=["z-quantum-core"],
)
| 29.952381
| 61
| 0.659777
|
368c76167a3daff86d8478a9de1dd552123661c5
| 3,182
|
py
|
Python
|
model-optimizer/mo/ops/memoryoffset.py
|
evgenytalanin-intel/openvino
|
c3aa866a3318fe9fa8c7ebd3bd333b075bb1cc36
|
[
"Apache-2.0"
] | 1
|
2021-07-30T17:03:50.000Z
|
2021-07-30T17:03:50.000Z
|
model-optimizer/mo/ops/memoryoffset.py
|
evgenytalanin-intel/openvino
|
c3aa866a3318fe9fa8c7ebd3bd333b075bb1cc36
|
[
"Apache-2.0"
] | 4
|
2021-04-01T08:29:48.000Z
|
2021-08-30T16:12:52.000Z
|
model-optimizer/mo/ops/memoryoffset.py
|
evgenytalanin-intel/openvino
|
c3aa866a3318fe9fa8c7ebd3bd333b075bb1cc36
|
[
"Apache-2.0"
] | 3
|
2021-03-09T08:27:29.000Z
|
2021-04-07T04:58:54.000Z
|
"""
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from mo.front.common.partial_infer.elemental import copy_shape_infer
from mo.graph.graph import Graph, Node
from mo.ops.op import Op
from mo.utils.error import Error
class MemoryOffset(Op):
op = 'MemoryOffset'
enabled = True
def __init__(self, graph: Graph, attrs: dict):
super().__init__(graph, {
'op': 'MemoryOffset',
'pair_name': None,
'has_default': False,
'infer': __class__.infer,
'in_ports_count': 1,
'out_ports_count': 1,
}, attrs)
def supported_attrs(self):
return ['t']
@staticmethod
def infer(node: Node):
# MemoryOffset is splitted in 2 parts to avoid cycle in graph
# Calculate shape from shape of previous layer where possible
# In other cases information about shapes from initial Kaldi model used
if not node.in_port(0).disconnected():
copy_shape_infer(node)
pair_node = Node(node.graph, node.pair_name)
pair_node.out_port(0).data.set_shape(node.out_port(0).data.get_shape())
else:
pair_node = Node(node.graph, node.pair_name)
if pair_node.in_port(0).data.get_shape() is not None:
node.out_port(0).data.set_shape(pair_node.in_port(0).data.get_shape())
copy_shape_infer(pair_node)
elif pair_node.has_valid('element_size'):
# TODO Add here real batch
node.out_port(0).data.set_shape(np.array([1, pair_node['element_size']]))
elif pair_node.in_port(0).get_source().node.has_valid('out-size') :
out_size = pair_node.in_port(0).get_source().node['out-size']
node.out_port(0).data.set_shape(np.array([1, out_size]))
elif pair_node.in_port(0).get_source().node.op == "Add" and \
pair_node.in_port(0).get_source().node.in_port(0).get_source().node.has_valid('out-size'):
out_size = pair_node.in_port(0).get_source().node.in_port(0).get_source().node['out-size']
node.out_port(0).data.set_shape(np.array([1, out_size]))
elif pair_node.in_port(0).get_source().node.has_valid('in_dim'):
out_size = pair_node.in_port(0).get_source().node['in_dim']
node.out_port(0).data.set_shape(np.array([1, out_size]))
else:
raise Error("Can't calculate MemoryOffset shape for node {}. ".format(node.id) +
"Possibly you need to add shape for it through --input_shape")
| 44.816901
| 110
| 0.638906
|
29321bef7979fde779819a2596fdee24534152ed
| 970
|
py
|
Python
|
examples/multi_goal/__init__.py
|
limash/softlearning
|
aaa9d8c5cc3ba4bf459eb5a1a7469b3ae3b4f94f
|
[
"MIT"
] | 920
|
2018-12-11T16:22:23.000Z
|
2022-03-28T08:17:07.000Z
|
examples/multi_goal/__init__.py
|
limash/softlearning
|
aaa9d8c5cc3ba4bf459eb5a1a7469b3ae3b4f94f
|
[
"MIT"
] | 133
|
2018-12-09T08:08:33.000Z
|
2022-02-28T12:43:14.000Z
|
examples/multi_goal/__init__.py
|
limash/softlearning
|
aaa9d8c5cc3ba4bf459eb5a1a7469b3ae3b4f94f
|
[
"MIT"
] | 241
|
2018-12-03T21:35:24.000Z
|
2022-03-20T06:24:45.000Z
|
"""Provides functions that are utilized by the command line interface.
In particular, the examples are exposed to the command line interface
(defined in `softlearning.scripts.console_scripts`) through the
`get_trainable_class`, `get_variant_spec`, and `get_parser` functions.
"""
def get_trainable_class(*args, **kwargs):
from .main import run_experiment
return run_experiment
def get_variant_spec(command_line_args, *args, **kwargs):
from .variants import get_variant_spec
variant_spec = get_variant_spec(command_line_args, *args, **kwargs)
return variant_spec
def get_parser():
from examples.utils import get_parser
parser = get_parser()
for dest, value in (('universe', 'gym'),
('task', 'MultiGoal'),
('domain', 'Default-v0')):
option = next(x for x in parser._actions if x.dest == dest)
option.default = value
option.choices = {value}
return parser
| 30.3125
| 71
| 0.683505
|
2f63f14926c480ac108ca49a407157dd1dcaa670
| 5,463
|
py
|
Python
|
utils/provider.py
|
MatrixSuper/3DObjectKnowledge
|
15617aacc086eeede6860142ddf1c7c7e5c5a0bc
|
[
"MIT"
] | 1
|
2018-02-13T04:08:09.000Z
|
2018-02-13T04:08:09.000Z
|
utils/provider.py
|
MatrixSuper/3DObjectKnowledge
|
15617aacc086eeede6860142ddf1c7c7e5c5a0bc
|
[
"MIT"
] | null | null | null |
utils/provider.py
|
MatrixSuper/3DObjectKnowledge
|
15617aacc086eeede6860142ddf1c7c7e5c5a0bc
|
[
"MIT"
] | null | null | null |
import os
import sys
import numpy as np
import h5py
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
# Download dataset for point cloud classification
DATA_DIR = os.path.join(BASE_DIR, 'data')
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')):
www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip'
zipfile = os.path.basename(www)
os.system('wget %s; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
os.system('rm %s' % (zipfile))
def shuffle_data(data, labels):
""" Shuffle data and labels.
Input:
data: B,N,... numpy array
label: B,... numpy array
Return:
shuffled data, label and shuffle indices
"""
idx = np.arange(len(labels))
np.random.shuffle(idx)
return data[idx, ...], labels[idx], idx
def rotate_point_cloud(batch_data):
""" Randomly rotate the point clouds to augument the dataset
rotation is per shape based along up direction
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, rotated batch of point clouds
"""
rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
for k in xrange(batch_data.shape[0]):
rotation_angle = np.random.uniform() * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, 0, sinval],
[0, 1, 0],
[-sinval, 0, cosval]])
shape_pc = batch_data[k, ...]
rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)
return rotated_data
def rotate_point_cloud_by_angle(batch_data, rotation_angle):
""" Rotate the point cloud along up direction with certain angle.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, rotated batch of point clouds
"""
rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
for k in xrange(batch_data.shape[0]):
#rotation_angle = np.random.uniform() * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, 0, sinval],
[0, 1, 0],
[-sinval, 0, cosval]])
shape_pc = batch_data[k, ...]
rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)
return rotated_data
def rotate_perturbation_point_cloud(batch_data, angle_sigma=0.06, angle_clip=0.18):
""" Randomly perturb the point clouds by small rotations
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, rotated batch of point clouds
"""
rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
for k in xrange(batch_data.shape[0]):
angles = np.clip(angle_sigma*np.random.randn(3), -angle_clip, angle_clip)
Rx = np.array([[1,0,0],
[0,np.cos(angles[0]),-np.sin(angles[0])],
[0,np.sin(angles[0]),np.cos(angles[0])]])
Ry = np.array([[np.cos(angles[1]),0,np.sin(angles[1])],
[0,1,0],
[-np.sin(angles[1]),0,np.cos(angles[1])]])
Rz = np.array([[np.cos(angles[2]),-np.sin(angles[2]),0],
[np.sin(angles[2]),np.cos(angles[2]),0],
[0,0,1]])
R = np.dot(Rz, np.dot(Ry,Rx))
shape_pc = batch_data[k, ...]
rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), R)
return rotated_data
def jitter_point_cloud(batch_data, sigma=0.01, clip=0.05):
""" Randomly jitter points. jittering is per point.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, jittered batch of point clouds
"""
B, N, C = batch_data.shape
assert(clip > 0)
jittered_data = np.clip(sigma * np.random.randn(B, N, C), -1*clip, clip)
jittered_data += batch_data
return jittered_data
def shift_point_cloud(batch_data, shift_range=0.1):
""" Randomly shift point cloud. Shift is per point cloud.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, shifted batch of point clouds
"""
B, N, C = batch_data.shape
shifts = np.random.uniform(-shift_range, shift_range, (B,3))
for batch_index in range(B):
batch_data[batch_index,:,:] += shifts[batch_index,:]
return batch_data
def random_scale_point_cloud(batch_data, scale_low=0.8, scale_high=1.25):
""" Randomly scale the point cloud. Scale is per point cloud.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, scaled batch of point clouds
"""
B, N, C = batch_data.shape
scales = np.random.uniform(scale_low, scale_high, B)
for batch_index in range(B):
batch_data[batch_index,:,:] *= scales[batch_index]
return batch_data
def getDataFiles(list_filename):
return [line.rstrip() for line in open(list_filename)]
def load_h5(h5_filename):
f = h5py.File(h5_filename)
data = f['data'][:]
label = f['label'][:]
return (data, label)
def loadDataFile(filename):
return load_h5(filename)
| 36.42
| 83
| 0.607542
|
ef51361ce78e93bcbddc55e3940944924644d508
| 3,724
|
py
|
Python
|
src/pynwb/ndx_simulation_output/simulation_output.py
|
ben-dichter-consulting/ndx-simulation-output
|
aca738b2463868255efbbd1eac8354471197f8c6
|
[
"BSD-3-Clause"
] | 2
|
2020-05-16T19:51:18.000Z
|
2020-06-09T19:26:43.000Z
|
src/pynwb/ndx_simulation_output/simulation_output.py
|
catalystneuro/ndx-simulation-output
|
aca738b2463868255efbbd1eac8354471197f8c6
|
[
"BSD-3-Clause"
] | 1
|
2019-11-04T17:05:23.000Z
|
2019-11-04T17:05:23.000Z
|
src/pynwb/ndx_simulation_output/simulation_output.py
|
catalystneuro/ndx-simulation-output
|
aca738b2463868255efbbd1eac8354471197f8c6
|
[
"BSD-3-Clause"
] | 2
|
2019-10-16T06:23:19.000Z
|
2019-10-16T06:24:50.000Z
|
import numpy as np
from pynwb import register_class, docval, get_class
from hdmf.common.table import VectorIndex, VectorData, DynamicTable, ElementIdentifiers
from hdmf.utils import call_docval_func
namespace = 'ndx-simulation-output'
def create_ragged_array(name, values):
"""
:param values: list of lists
:return:
"""
vector_data = VectorData(
name, 'indicates which compartments the data refers to',
[item for sublist in values for item in sublist])
vector_index = VectorIndex(
name + '_index', np.cumsum([len(x) for x in values]), target=vector_data)
return vector_data, vector_index
@register_class('Compartments', namespace)
class Compartments(DynamicTable):
__columns__ = (
{'name': 'number', 'index': True,
'description': 'cell compartment ids corresponding to a each column in the data'},
{'name': 'position', 'index': True,
'description': 'the observation intervals for each unit'},
{'name': 'label', 'description': 'the electrodes that each spike unit came from',
'index': True, 'table': True}
)
@docval({'name': 'name', 'type': str, 'doc': 'Name of this Compartments object',
'default': 'compartments'},
{'name': 'id', 'type': ('array_data', ElementIdentifiers),
'doc': 'the identifiers for the units stored in this interface', 'default': None},
{'name': 'columns', 'type': (tuple, list), 'doc': 'the columns in this table', 'default': None},
{'name': 'colnames', 'type': 'array_data', 'doc': 'the names of the columns in this table',
'default': None},
{'name': 'description', 'type': str, 'doc': 'a description of what is in this table',
'default': "Table that holds information about what places are being recorded."},
)
def __init__(self, **kwargs):
call_docval_func(super(Compartments, self).__init__, kwargs)
@staticmethod
def _compartment_finder(cell_compartments, cond, dtype, start_ind):
cell_compartments = np.array(cell_compartments)
if isinstance(cond, dtype):
return start_ind + np.where(cell_compartments == cond)[0]
else:
return np.array([start_ind + np.where(cell_compartments == x)[0] for x in cond]).ravel()
def find_compartments(self, cell, compartment_numbers=None, compartment_labels=None):
"""
Parameters
----------
cell: int
find indices of compartments of this cell
compartment_numbers: int | Iterable(int) (optional)
where these are (this is) the compartment(s)
compartment_labels: str | Iterable(str) (optional)
or where these are (this is) the label(s)
Returns
-------
np.array(dtype=int)
"""
if compartment_numbers is not None and compartment_labels is not None:
raise ValueError('you cannot specify both compartments and compartment_labels')
if cell == 0:
start_ind = 0
else:
start_ind = self.compartments['number_index'].data[cell-1]
cell_compartments = self.compartments['number'][cell]
if compartment_numbers is not None:
return self._compartment_finder(cell_compartments, compartment_numbers, int, start_ind)
elif compartment_labels is not None:
return self._compartment_finder(cell_compartments, compartment_labels, str, start_ind)
else:
return np.arange(start_ind, start_ind + len(cell_compartments), dtype=int)
CompartmentSeries = get_class('CompartmentSeries', namespace)
CompartmentSeries._compartment_finder = _compartment_finder
CompartmentSeries.find_compartments = find_compartments
SimulationMetaData = get_class('SimulationMetaData', namespace)
| 39.617021
| 108
| 0.675886
|
976a88bc5347aa16532ffe28d9b07de90099da28
| 2,599
|
py
|
Python
|
qiskit_nature/circuit/library/ansatzes/uccsd.py
|
Ryand1234/qiskit-nature
|
e39594b8ede4533af7d22b2b121f29d624e569ed
|
[
"Apache-2.0"
] | 132
|
2021-01-28T14:51:11.000Z
|
2022-03-25T21:10:47.000Z
|
qiskit_nature/circuit/library/ansatzes/uccsd.py
|
Ryand1234/qiskit-nature
|
e39594b8ede4533af7d22b2b121f29d624e569ed
|
[
"Apache-2.0"
] | 449
|
2021-01-28T19:57:43.000Z
|
2022-03-31T17:01:50.000Z
|
qiskit_nature/circuit/library/ansatzes/uccsd.py
|
Ryand1234/qiskit-nature
|
e39594b8ede4533af7d22b2b121f29d624e569ed
|
[
"Apache-2.0"
] | 109
|
2021-01-28T13:17:46.000Z
|
2022-03-30T23:53:39.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
The UCCSD Ansatz.
"""
from typing import Optional, Tuple
from qiskit.circuit import QuantumCircuit
from qiskit_nature.converters.second_quantization import QubitConverter
from .ucc import UCC
class UCCSD(UCC):
"""The UCCSD Ansatz.
This is a convenience subclass of the UCC Ansatz. For more information refer to :class:`UCC`.
"""
def __init__(
self,
qubit_converter: Optional[QubitConverter] = None,
num_particles: Optional[Tuple[int, int]] = None,
num_spin_orbitals: Optional[int] = None,
reps: int = 1,
initial_state: Optional[QuantumCircuit] = None,
generalized: bool = False,
preserve_spin: bool = True,
):
"""
Args:
qubit_converter: the QubitConverter instance which takes care of mapping a
:class:`~.SecondQuantizedOp` to a :class:`PauliSumOp` as well as performing all
configured symmetry reductions on it.
num_particles: the tuple of the number of alpha- and beta-spin particles.
num_spin_orbitals: the number of spin orbitals.
reps: The number of times to repeat the evolved operators.
initial_state: A `QuantumCircuit` object to prepend to the circuit.
generalized: boolean flag whether or not to use generalized excitations, which ignore
the occupation of the spin orbitals. As such, the set of generalized excitations is
only determined from the number of spin orbitals and independent from the number of
particles.
preserve_spin: boolean flag whether or not to preserve the particle spins.
"""
super().__init__(
qubit_converter=qubit_converter,
num_particles=num_particles,
num_spin_orbitals=num_spin_orbitals,
excitations="sd",
alpha_spin=True,
beta_spin=True,
max_spin_excitation=None,
generalized=generalized,
preserve_spin=preserve_spin,
reps=reps,
initial_state=initial_state,
)
| 38.791045
| 99
| 0.664486
|
fa478c20788796b0608d8b48c0789df71adbce75
| 15,079
|
py
|
Python
|
var/spack/repos/builtin/packages/openjdk/package.py
|
renjithravindrankannath/spack
|
043b2cbb7c99d69a373f3ecbf35bc3b4638bcf85
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/openjdk/package.py
|
renjithravindrankannath/spack
|
043b2cbb7c99d69a373f3ecbf35bc3b4638bcf85
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/openjdk/package.py
|
renjithravindrankannath/spack
|
043b2cbb7c99d69a373f3ecbf35bc3b4638bcf85
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import platform
import re
from spack.package import *
from spack.util.prefix import Prefix
# If you need to add a new version, please be aware that:
# - versions in the following dict are automatically added to the package
# - version tuple must be in the form (checksum, url)
# - checksum must be sha256
# - package key must be in the form '{os}-{arch}' where 'os' is in the
# format returned by platform.system() and 'arch' by platform.machine()
_versions = {
'17.0.3_7': {
'Linux-x86_64': ('81f5bed21077f9fbb04909b50391620c78b9a3c376593c0992934719c0de6b73', 'https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.3%2B7/OpenJDK17U-jdk_x64_linux_hotspot_17.0.3_7.tar.gz'),
'Linux-aarch64': ('2e3c19c1707205c6b90cc04b416e8d83078ed98417d5a69dce3cf7dc0d7cfbca', 'https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.3%2B7/OpenJDK17U-jdk_aarch64_linux_hotspot_17.0.3_7.tar.gz'),
'Linux-ppc64le': ('a04587018c9719dca21073f19d56b335c4985f41afe7d99b24852c1a94b917e5', 'https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.3%2B7/OpenJDK17U-jdk_ppc64le_linux_hotspot_17.0.3_7.tar.gz'),
'Darwin-x86_64': ('a5db5927760d2864316354d98ff18d18bec2e72bfac59cd25a416ed67fa84594', 'https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.3%2B7/OpenJDK17U-jdk_x64_mac_hotspot_17.0.3_7.tar.gz'),
'Darwin-arm64': ('ff42be4d7a348d0d7aee07749e4daec9f427dcc7eb46b343f8131e8f3906c05b', 'https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.3%2B7/OpenJDK17U-jdk_aarch64_mac_hotspot_17.0.3_7.tar.gz'),
},
'17.0.2_8': {
'Linux-x86_64': ('288f34e3ba8a4838605636485d0365ce23e57d5f2f68997ac4c2e4c01967cd48', 'https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.2%2B8/OpenJDK17U-jdk_x64_linux_hotspot_17.0.2_8.tar.gz'),
'Linux-aarch64': ('302caf29f73481b2b914ba2b89705036010c65eb9bc8d7712b27d6e9bedf6200', 'https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.2%2B8/OpenJDK17U-jdk_aarch64_linux_hotspot_17.0.2_8.tar.gz'),
'Linux-ppc64le': ('532d831d6a977e821b7331ecf9ed995e5bbfe76f18a1b00ffa8dbb3a4e2887de', 'https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.2%2B8/OpenJDK17U-jdk_ppc64le_linux_hotspot_17.0.2_8.tar.gz'),
'Darwin-x86_64': ('3630e21a571b7180876bf08f85d0aac0bdbb3267b2ae9bd242f4933b21f9be32', 'https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.2%2B8/OpenJDK17U-jdk_x64_mac_hotspot_17.0.2_8.tar.gz'),
'Darwin-arm64': ('157518e999d712b541b883c6c167f8faabbef1d590da9fe7233541b4adb21ea4', 'https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.2%2B8/OpenJDK17U-jdk_aarch64_mac_hotspot_17.0.2_8.tar.gz')
},
'17.0.0_35': {
'Linux-x86_64': ('6f1335d9a7855159f982dac557420397be9aa85f3f7bc84e111d25871c02c0c7', 'https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17%2B35/OpenJDK17-jdk_x64_linux_hotspot_17_35.tar.gz'),
'Linux-aarch64': ('e08e6d8c84da28a2c49ccd511f8835c329fbdd8e4faff662c58fa24cca74021d', 'https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17%2B35/OpenJDK17-jdk_aarch64_linux_hotspot_17_35.tar.gz'),
'Linux-ppc64le': ('2e58f76fd332b73f323e47c73d0a81b76739debab067e7a32ed6abd73fd64c57', 'https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17%2B35/OpenJDK17-jdk_ppc64le_linux_hotspot_17_35.tar.gz'),
'Darwin-x86_64': ('e9de8b1b62780fe99270a5b30f0645d7a91eded60438bcf836a05fa7b93c182f', 'https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17%2B35/OpenJDK17-jdk_x64_mac_hotspot_17_35.tar.gz'),
'Darwin-arm64': ('910bb88543211c63298e5b49f7144ac4463f1d903926e94a89bfbf10163bbba1', 'https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17%2B35/OpenJDK17-jdk_aarch64_mac_hotspot_17_35.tar.gz')
},
'16.0.2': {
'Linux-x86_64': ('6c714ded7d881ca54970ec949e283f43d673a142fda1de79b646ddd619da9c0c', 'https://download.java.net/java/GA/jdk16.0.2/d4a915d82b4c4fbb9bde534da945d746/7/GPL/openjdk-16.0.2_linux-x64_bin.tar.gz'),
'Linux-aarch64': ('1ffb9c7748334945d9056b3324de3f797d906fce4dad86beea955153aa1e28fe', 'https://download.java.net/java/GA/jdk16.0.2/d4a915d82b4c4fbb9bde534da945d746/7/GPL/openjdk-16.0.2_linux-aarch64_bin.tar.gz'),
},
'11.0.15_10': {
'Linux-x86_64': ('5fdb4d5a1662f0cca73fec30f99e67662350b1fa61460fa72e91eb9f66b54d0b', 'https://github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.15%2B10/OpenJDK11U-jdk_x64_linux_hotspot_11.0.15_10.tar.gz'),
'Linux-aarch64': ('999fbd90b070f9896142f0eb28354abbeb367cbe49fd86885c626e2999189e0a', 'https://github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.15%2B10/OpenJDK11U-jdk_aarch64_linux_hotspot_11.0.15_10.tar.gz'),
'Linux-ppc64le': ('a8fba686f6eb8ae1d1a9566821dbd5a85a1108b96ad857fdbac5c1e4649fc56f', 'https://github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.15%2B10/OpenJDK11U-jdk_ppc64le_linux_hotspot_11.0.15_10.tar.gz'),
'Darwin-x86_64': ('ebd8b9553a7b4514599bc0566e108915ce7dc95d29d49a9b10b8afe4ab7cc9db', 'https://github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.15%2B10/OpenJDK11U-jdk_x64_mac_hotspot_11.0.15_10.tar.gz'),
'Darwin-arm64': ('e84143a6c633a26aeefcb1fd5ad8dfb9e952cfec2a1af5c9d9b69f2390990dac', 'https://github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.15%2B10/OpenJDK11U-jdk_aarch64_mac_hotspot_11.0.15_10.tar.gz'),
},
'11.0.14.1_1': {
'Linux-x86_64': ('43fb84f8063ad9bf6b6d694a67b8f64c8827552b920ec5ce794dfe5602edffe7', 'https://github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.14.1%2B1/OpenJDK11U-jdk_x64_linux_hotspot_11.0.14.1_1.tar.gz'),
'Linux-aarch64': ('79572f5172c6a040591d34632f98a20ed148702bbce2f57649e8ac01c0d2e3db', 'https://github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.14.1%2B1/OpenJDK11U-jdk_aarch64_linux_hotspot_11.0.14.1_1.tar.gz'),
'Linux-ppc64le': ('9750e11721282a9afd18a07743f19c699b2b71ce20d02f3f0a906088b9ae6d9a', 'https://github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.14.1%2B1/OpenJDK11U-jdk_ppc64le_linux_hotspot_11.0.14.1_1.tar.gz'),
'Darwin-x86_64': ('8c69808f5d9d209b195575e979de0e43cdf5d0f1acec1853a569601fe2c1f743', 'https://github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.14.1%2B1/OpenJDK11U-jdk_x64_mac_hotspot_11.0.14.1_1.tar.gz')
},
'11.0.12_7': {
'Linux-x86_64': ('8770f600fc3b89bf331213c7aa21f8eedd9ca5d96036d1cd48cb2748a3dbefd2', 'https://github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.12%2B7/OpenJDK11U-jdk_x64_linux_hotspot_11.0.12_7.tar.gz'),
'Linux-aarch64': ('105bdc12fcd54c551e8e8ac96bc82412467244c32063689c41cee29ceb7452a2', 'https://github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.12%2B7/OpenJDK11U-jdk_aarch64_linux_hotspot_11.0.12_7.tar.gz'),
'Linux-ppc64le': ('234a9bafe029ea6cab5d46f9617b5d016a29faa187a42081d0e066f23647b7e5', 'https://github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.12%2B7/OpenJDK11U-jdk_ppc64le_linux_hotspot_11.0.12_7.tar.gz'),
'Darwin-x86_64': ('13d056ee9a57bf2d5b3af4504c8f8cf7a246c4dff78f96b70dd05dad98075855', 'https://github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.12%2B7/OpenJDK11U-jdk_x64_mac_hotspot_11.0.12_7.tar.gz')
},
'11.0.9.1_1': {
'Linux-ppc64le': ('d94b6b46a14ab0974b1c1b89661741126d8cf8a0068b471b8f5fa286a71636b1', 'https://github.com/AdoptOpenJDK/openjdk11-binaries/releases/download/jdk-11.0.9.1%2B1/OpenJDK11U-jdk_ppc64le_linux_hotspot_11.0.9.1_1.tar.gz')},
'11.0.8_10': {
'Linux-x86_64': ('6e4cead158037cb7747ca47416474d4f408c9126be5b96f9befd532e0a762b47', 'https://github.com/AdoptOpenJDK/openjdk11-binaries/releases/download/jdk-11.0.8%2B10/OpenJDK11U-jdk_x64_linux_hotspot_11.0.8_10.tar.gz')},
'11.0.0-2020-01-01': {
'Linux-aarch64': ('05c7d9c90edacd853850fbb0f52f8aa482809d0452c599cb9fe0b28b3b4bf329', 'https://github.com/AdoptOpenJDK/openjdk11-binaries/releases/download/jdk11u-2020-01-01-06-13/OpenJDK11U-jdk_aarch64_linux_hotspot_2020-01-01-06-13.tar.gz')},
'11.0.2': {
'Linux-x86_64': ('99be79935354f5c0df1ad293620ea36d13f48ec3ea870c838f20c504c9668b57', 'https://download.java.net/java/GA/jdk11/9/GPL/openjdk-11.0.2_linux-x64_bin.tar.gz'),
'Darwin-x86_64': ('f365750d4be6111be8a62feda24e265d97536712bc51783162982b8ad96a70ee', 'https://download.java.net/java/GA/jdk11/9/GPL/openjdk-11.0.2_osx-x64_bin.tar.gz')
},
'11.0.1': {
'Linux-x86_64': ('7a6bb980b9c91c478421f865087ad2d69086a0583aeeb9e69204785e8e97dcfd', 'https://download.java.net/java/GA/jdk11/13/GPL/openjdk-11.0.1_linux-x64_bin.tar.gz'),
'Darwin-x86_64': ('fa07eee08fa0f3de541ee1770de0cdca2ae3876f3bd78c329f27e85c287cd070', 'https://download.java.net/java/GA/jdk11/13/GPL/openjdk-11.0.1_osx-x64_bin.tar.gz')
},
'1.8.0_265-b01': {
'Linux-x86_64': ('1285da6278f2d38a790a21148d7e683f20de0799c44b937043830ef6b57f58c4', 'https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/jdk8u265-b01/OpenJDK8U-jdk_x64_linux_hotspot_8u265b01.tar.gz')},
'1.8.0_191-b12': {
'Linux-aarch64': ('8eee0aede947b804f9a5f49c8a38b52aace8a30a9ebd9383b7d06042fb5a237c', 'https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/jdk8u191-b12/OpenJDK8U-jdk_aarch64_linux_hotspot_8u191b12.tar.gz')},
'1.8.0_222-b10': {
'Linux-x86_64': ('20cff719c6de43f8bb58c7f59e251da7c1fa2207897c9a4768c8c669716dc819', 'https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/jdk8u222-b10_openj9-0.15.1/OpenJDK8U-jdk_x64_linux_openj9_8u222b10_openj9-0.15.1.tar.gz')},
'1.8.0_202-b08': {
'Linux-x86_64': ('533dcd8d9ca15df231a1eb392fa713a66bca85a8e76d9b4ee30975f3823636b7', 'https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/jdk8u202-b08/OpenJDK8U-jdk_x64_linux_openj9_8u202b08_openj9-0.12.0.tar.gz')},
'1.8.0_40-b25': {
'Linux-x86_64': ('79e96dce03a14271040023231a7d0ae374b755d48adf68bbdaec30294e4e2b88', 'https://download.java.net/openjdk/jdk8u40/ri/jdk_ri-8u40-b25-linux-x64-10_feb_2015.tar.gz')},
}
class Openjdk(Package):
"""The free and opensource java implementation"""
homepage = "https://jdk.java.net"
preferred_prefix = "11."
preferred_defined = False
for ver, packages in _versions.items():
key = "{0}-{1}".format(platform.system(), platform.machine())
pkg = packages.get(key)
if pkg:
is_preferred = not preferred_defined and ver.startswith(preferred_prefix)
if is_preferred:
preferred_defined = True
version(ver, sha256=pkg[0], url=pkg[1], preferred=is_preferred)
provides('java@17', when='@17.0:17')
provides('java@16', when='@16.0:16')
provides('java@11', when='@11.0:11')
provides('java@10', when='@10.0:10')
provides('java@9', when='@9.0:9')
provides('java@8', when='@1.8.0:1.8')
conflicts('target=ppc64:', msg='openjdk is not available for ppc64 (big endian)')
# FIXME:
# 1. `extends('java')` doesn't work, you need to use `extends('openjdk')`
# 2. Packages cannot extend multiple packages, see #987
# 3. Update `YamlFilesystemView.merge` to allow a Package to completely
# override how it is symlinked into a view prefix. Then, spack activate
# can symlink all *.jar files to `prefix.lib.ext`
extendable = True
executables = ['^java$']
@classmethod
def determine_version(cls, exe):
output = Executable(exe)('-version', output=str, error=str)
# Make sure this is actually OpenJDK, not Oracle JDK
if 'openjdk' not in output:
return None
match = re.search(r'\(build (\S+)\)', output)
return match.group(1).replace('+', '_') if match else None
@property
def home(self):
"""Most of the time, ``JAVA_HOME`` is simply ``spec['java'].prefix``.
However, if the user is using an externally installed JDK, it may be
symlinked. For example, on macOS, the ``java`` executable can be found
in ``/usr/bin``, but ``JAVA_HOME`` is actually
``/Library/Java/JavaVirtualMachines/jdk-10.0.1.jdk/Contents/Home``.
Users may not know the actual installation directory and add ``/usr``
to their ``packages.yaml`` unknowingly. Run ``java_home`` if it exists
to determine exactly where it is installed. Specify which version we
are expecting in case multiple Java versions are installed.
See ``man java_home`` for more details."""
prefix = self.prefix
java_home = prefix.libexec.java_home
if os.path.exists(java_home):
java_home = Executable(java_home)
version = str(self.version.up_to(2))
prefix = java_home('--version', version, output=str).strip()
prefix = Prefix(prefix)
return prefix
@property
def libs(self):
"""Depending on the version number and whether the full JDK or just
the JRE was installed, Java libraries can be in several locations:
* ``lib/libjvm.so``
* ``jre/lib/libjvm.dylib``
Search recursively to find the correct library location."""
return find_libraries(['libjvm'], root=self.home, recursive=True)
def install(self, spec, prefix):
top_dir = 'Contents/Home/' if platform.system() == "Darwin" else '.'
install_tree(top_dir, prefix)
def setup_run_environment(self, env):
"""Set JAVA_HOME."""
env.set('JAVA_HOME', self.home)
def setup_dependent_build_environment(self, env, dependent_spec):
"""Set JAVA_HOME and CLASSPATH.
CLASSPATH contains the installation prefix for the extension and any
other Java extensions it depends on."""
env.set('JAVA_HOME', self.home)
class_paths = []
for d in dependent_spec.traverse(deptype=('build', 'run', 'test')):
if d.package.extends(self.spec):
class_paths.extend(find(d.prefix, '*.jar'))
classpath = os.pathsep.join(class_paths)
env.set('CLASSPATH', classpath)
def setup_dependent_run_environment(self, env, dependent_spec):
"""Set CLASSPATH.
CLASSPATH contains the installation prefix for the extension and any
other Java extensions it depends on."""
# For runtime environment set only the path for
# dependent_spec and prepend it to CLASSPATH
if dependent_spec.package.extends(self.spec):
class_paths = find(dependent_spec.prefix, '*.jar')
classpath = os.pathsep.join(class_paths)
env.prepend_path('CLASSPATH', classpath)
| 71.804762
| 253
| 0.7405
|
fec96f2d2ccebb6fb0a46b592b42dec506999a4e
| 228
|
py
|
Python
|
project/main/serializers/students.py
|
SYSU-MATHZH/drf-starter
|
68b0845840657ced0a22e97183c1f40f211d1652
|
[
"MIT"
] | 1
|
2021-02-16T09:42:40.000Z
|
2021-02-16T09:42:40.000Z
|
project/main/serializers/students.py
|
Demonhero0/2020TCM
|
028a7a1d22591c496923a011316353debc3290af
|
[
"MIT"
] | null | null | null |
project/main/serializers/students.py
|
Demonhero0/2020TCM
|
028a7a1d22591c496923a011316353debc3290af
|
[
"MIT"
] | 2
|
2019-06-14T07:38:08.000Z
|
2020-02-06T00:41:33.000Z
|
from rest_framework import serializers
from project.main.models import Student
class StudentSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Student
fields = ('url', 'name', 'number')
| 28.5
| 64
| 0.736842
|
3b89d28910b7d4947763e17720b160901866e706
| 3,282
|
py
|
Python
|
third_party/catapult/dashboard/dashboard/find_change_points_exp.py
|
maidiHaitai/haitaibrowser
|
a232a56bcfb177913a14210e7733e0ea83a6b18d
|
[
"BSD-3-Clause"
] | 1
|
2020-09-15T08:43:34.000Z
|
2020-09-15T08:43:34.000Z
|
third_party/catapult/dashboard/dashboard/find_change_points_exp.py
|
maidiHaitai/haitaibrowser
|
a232a56bcfb177913a14210e7733e0ea83a6b18d
|
[
"BSD-3-Clause"
] | null | null | null |
third_party/catapult/dashboard/dashboard/find_change_points_exp.py
|
maidiHaitai/haitaibrowser
|
a232a56bcfb177913a14210e7733e0ea83a6b18d
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Contains experimental alerting functions."""
from dashboard import find_change_points
from dashboard.models import anomaly_config
def RunFindChangePoints(
test, series, find_change_points_func=find_change_points.FindChangePoints,
**kwargs):
"""Runs an change-point-finding function on part of a data series.
This function will be repeatedly called by SimulateAlertProcessingPipeline
in the bench_find_change_points module with the same Test entity but with more
and more points added to the end.
This is meant to imitate the current behavior of FindChangePoints on the perf
dashboard.
Args:
test: A graph_data.Test entity.
series: A list of ordered (x, y) pairs.
find_change_points_func: A function that has the same interface as
find_change_points.FindChangePoints.
**kwargs: Extra parameters to add to the anomaly config dict.
Returns:
A list of objects with the property x_value.
"""
# The anomaly threshold config dictionary determines how many points are
# analyzed and how far apart alerts should be, as well as other thresholds.
config = anomaly_config.GetAnomalyConfigDict(test)
config.update(kwargs)
# |series| contains all data so far in the Test, but typically when
# a test is processed (in find_anomalies.ProcessTest) only the last "window"
# of points is looked at. This window size depends on the test. To get the
# same behavior as the current default, we take only the last window.
series = _GetLastWindow(series, config.get('max_window_size'))
if len(series) < 2:
return []
# Find anomalies for the requested test.
change_points = find_change_points_func(series, **config)
return _RemoveKnownAnomalies(test, change_points)
def _GetLastWindow(series, window_size):
"""Returns the last "window" of points in the data series."""
if not window_size:
return series
return series[-window_size:]
def _RemoveKnownAnomalies(test, change_points):
"""Removes some anomalies and updates the given Test entity.
Args:
test: A Test entity, which has a property last_alerted_revision.
This property will be updated when this function is called.
change_points: A list of find_change_points.ChangePoint objects. It is
assumed that this list is sorted by the x_value property.
Returns:
A list of objects with the property x_value.
"""
# Avoid duplicates.
if test.last_alerted_revision:
change_points = [c for c in change_points
if c.x_value > test.last_alerted_revision]
if change_points:
# No need to call put(). The given Test entity will be re-used and we don't
# want to modify Test entity in the datastore.
test.last_alerted_revision = change_points[-1].x_value
return change_points
def FindChangePointsWithAbsoluteChangeThreshold(test, series):
"""Runs FindChangePoints, always setting an absolute change threshold."""
return RunFindChangePoints(
test, series,
max_window_size=50,
multiple_of_std_dev=3.5,
min_relative_change=0.1,
min_absolute_change=1.0,
min_segment_size=6)
| 35.290323
| 80
| 0.746496
|
7b8199377279be1b4c242fbc6e5dc408e0bd8097
| 10,337
|
py
|
Python
|
hypha/apply/api/v1/review/views.py
|
slifty/hypha
|
93313933c26589858beb9a861e33431658cd3b24
|
[
"BSD-3-Clause"
] | 20
|
2021-04-08T16:38:49.000Z
|
2022-02-09T20:05:57.000Z
|
hypha/apply/api/v1/review/views.py
|
OpenTechFund/WebApp
|
d6e2bb21a39d1fa7566cb60fe19f372dabfa5f0f
|
[
"BSD-3-Clause"
] | 1,098
|
2017-12-15T11:23:03.000Z
|
2020-01-24T07:58:07.000Z
|
hypha/apply/api/v1/review/views.py
|
OpenTechFund/WebApp
|
d6e2bb21a39d1fa7566cb60fe19f372dabfa5f0f
|
[
"BSD-3-Clause"
] | 10
|
2021-02-23T12:00:26.000Z
|
2022-03-24T13:03:37.000Z
|
from django.shortcuts import get_object_or_404
from rest_framework import permissions, status, viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import ValidationError
from rest_framework.response import Response
from wagtail.core.blocks.field_block import RichTextBlock
from hypha.apply.activity.messaging import MESSAGES, messenger
from hypha.apply.funds.models import AssignedReviewers
from hypha.apply.review.models import Review, ReviewOpinion
from hypha.apply.stream_forms.models import BaseStreamForm
from ..mixin import SubmissionNestedMixin
from ..permissions import IsApplyStaffUser
from ..stream_serializers import WagtailSerializer
from .permissions import (
HasReviewCreatePermission,
HasReviewDeletePermission,
HasReviewDetailPermission,
HasReviewDraftPermission,
HasReviewEditPermission,
HasReviewOpinionPermission,
)
from .serializers import (
FieldSerializer,
ReviewOpinionWriteSerializer,
SubmissionReviewSerializer,
)
from .utils import get_review_form_fields_for_stage, review_workflow_actions
class SubmissionReviewViewSet(
BaseStreamForm,
WagtailSerializer,
SubmissionNestedMixin,
viewsets.GenericViewSet
):
permission_classes = (
permissions.IsAuthenticated, IsApplyStaffUser,
)
permission_classes_by_action = {
'create': [permissions.IsAuthenticated, HasReviewCreatePermission, IsApplyStaffUser, ],
'retrieve': [permissions.IsAuthenticated, HasReviewDetailPermission, IsApplyStaffUser, ],
'update': [permissions.IsAuthenticated, HasReviewEditPermission, IsApplyStaffUser, ],
'delete': [permissions.IsAuthenticated, HasReviewDeletePermission, IsApplyStaffUser, ],
'opinions': [permissions.IsAuthenticated, HasReviewOpinionPermission, IsApplyStaffUser, ],
'fields': [permissions.IsAuthenticated, HasReviewCreatePermission, IsApplyStaffUser, ],
'draft': [permissions.IsAuthenticated, HasReviewDraftPermission, IsApplyStaffUser, ],
}
serializer_class = SubmissionReviewSerializer
def get_permissions(self):
try:
# return permission_classes depending on `action`
return [permission() for permission in self.permission_classes_by_action[self.action]]
except KeyError:
# action is not set return default permission_classes
return [permission() for permission in self.permission_classes]
def get_defined_fields(self):
"""
Get form fields created for reviewing this submission.
These form fields will be used to get respective serializer fields.
"""
if self.action in ['retrieve', 'update', 'opinions']:
# For detail and edit api form fields used while submitting
# review should be used.
review = self.get_object()
return review.form_fields
if self.action == 'draft':
review = self.get_review_by_reviewer()
return review.form_fields
submission = self.get_submission_object()
return get_review_form_fields_for_stage(submission)
def get_serializer_class(self):
"""
Override get_serializer_class to send draft parameter
if the request is to save as draft or the review submitted
is saved as draft.
"""
if self.action == 'retrieve':
review = self.get_object()
draft = review.is_draft
elif self.action == 'draft':
draft = True
else:
draft = self.request.data.get('is_draft', False)
return super().get_serializer_class(draft)
def get_queryset(self):
submission = self.get_submission_object()
return Review.objects.filter(submission=submission, is_draft=False)
def get_object(self):
"""
Get the review object by id. If not found raise 404.
"""
queryset = self.get_queryset()
obj = get_object_or_404(queryset, id=self.kwargs['pk'])
self.check_object_permissions(self.request, obj)
return obj
def get_reviewer(self):
"""
Get the AssignedReviewers for the current user on a submission.
"""
submission = self.get_submission_object()
ar, _ = AssignedReviewers.objects.get_or_create_for_user(
submission=submission,
reviewer=self.request.user,
)
return ar
def create(self, request, *args, **kwargs):
"""
Create a review on a submission.
Accept a post data in form of `{field_id: value}`.
`field_id` is same id which you get from the `/fields` api.
`value` should be submitted with html tags, so that response can
be displayed with correct formatting, e.g. in case of rich text field,
we need to show the data with same formatting user has submitted.
Accepts optional parameter `is_draft` when a review is to be saved as draft.
Raise ValidationError if a review is already submitted by the user.
"""
submission = self.get_submission_object()
ser = self.get_serializer(data=request.data)
ser.is_valid(raise_exception=True)
instance, create = ser.Meta.model.objects.get_or_create(
submission=submission, author=self.get_reviewer()
)
if not create and not instance.is_draft:
raise ValidationError({'detail': 'You have already posted a review for this submission'})
instance.form_fields = self.get_defined_fields()
instance.save()
ser.update(instance, ser.validated_data)
if not instance.is_draft:
messenger(
MESSAGES.NEW_REVIEW,
request=self.request,
user=self.request.user,
source=submission,
related=instance,
)
# Automatic workflow actions.
review_workflow_actions(self.request, submission)
ser = self.get_serializer(
self.get_review_data(instance)
)
return Response(ser.data, status=status.HTTP_201_CREATED)
def get_review_data(self, review):
"""
Get review data which will be used for review detail api.
"""
review_data = review.form_data
review_data['id'] = review.id
review_data['score'] = review.score
review_data['opinions'] = review.opinions
review_data['is_draft'] = review.is_draft
for field_block in review.form_fields:
if isinstance(field_block.block, RichTextBlock):
review_data[field_block.id] = field_block.value.source
return review_data
def retrieve(self, request, *args, **kwargs):
"""
Get details of a review on a submission
"""
review = self.get_object()
ser = self.get_serializer(
self.get_review_data(review)
)
return Response(ser.data)
def update(self, request, *args, **kwargs):
"""
Update a review submitted on a submission.
"""
review = self.get_object()
ser = self.get_serializer(data=request.data)
ser.is_valid(raise_exception=True)
ser.update(review, ser.validated_data)
messenger(
MESSAGES.EDIT_REVIEW,
user=self.request.user,
request=self.request,
source=review.submission,
related=review,
)
# Automatic workflow actions.
review_workflow_actions(self.request, review.submission)
ser = self.get_serializer(
self.get_review_data(review)
)
return Response(ser.data)
def destroy(self, request, *args, **kwargs):
"""Delete a review on a submission"""
review = self.get_object()
messenger(
MESSAGES.DELETE_REVIEW,
user=request.user,
request=request,
source=review.submission,
related=review,
)
review.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
def get_review_by_reviewer(self):
submission = self.get_submission_object()
review = Review.objects.get(
submission=submission, author__reviewer=self.request.user
)
return review
@action(detail=False, methods=['get'])
def draft(self, request, *args, **kwargs):
"""
Returns the draft review submitted on a submission by current user.
"""
try:
review = self.get_review_by_reviewer()
except Review.DoesNotExist:
return Response({})
if not review.is_draft:
return Response({})
ser = self.get_serializer(
self.get_review_data(review)
)
return Response(ser.data)
@action(detail=False, methods=['get'])
def fields(self, request, *args, **kwargs):
"""
List details of all the form fields that were created by admin for adding reviews.
These field details will be used in frontend to render the review form.
"""
fields = self.get_form_fields()
fields = FieldSerializer(fields.items(), many=True)
return Response(fields.data)
@action(detail=True, methods=['post'])
def opinions(self, request, *args, **kwargs):
"""
Used to add opinions on a review.
Options are 0 and 1. DISAGREE = 0 AGREE = 1
Response is similar to detail api of the review.
"""
review = self.get_object()
ser = ReviewOpinionWriteSerializer(data=request.data)
ser.is_valid(raise_exception=True)
opinion = ser.validated_data['opinion']
try:
review_opinion = ReviewOpinion.objects.get(
review=review,
author=self.get_reviewer()
)
except ReviewOpinion.DoesNotExist:
ReviewOpinion.objects.create(
review=review,
author=self.get_reviewer(),
opinion=opinion
)
else:
review_opinion.opinion = opinion
review_opinion.save()
ser = self.get_serializer(
self.get_review_data(review)
)
return Response(ser.data, status=status.HTTP_201_CREATED)
| 36.786477
| 101
| 0.645448
|
b600bb45e029e9a8901872617254764a816cc2da
| 876
|
py
|
Python
|
Django Planner/users/forms.py
|
karsteneugene/Django-Planner
|
0dadda440266e282676b4a3b2f18e60a1ac3d813
|
[
"MIT"
] | null | null | null |
Django Planner/users/forms.py
|
karsteneugene/Django-Planner
|
0dadda440266e282676b4a3b2f18e60a1ac3d813
|
[
"MIT"
] | null | null | null |
Django Planner/users/forms.py
|
karsteneugene/Django-Planner
|
0dadda440266e282676b4a3b2f18e60a1ac3d813
|
[
"MIT"
] | null | null | null |
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import Profile
# Uses the User model so the new user will be added to the table
class UserRegisterForm(UserCreationForm):
# Added an email field for registration
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
# Uses the User model so the logged in users will be able to update their information
class UserUpdateForm(forms.ModelForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email']
# Uses the Profile model so the logged in users will be able to edit their profile picture
class ProfileUpdateForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['image']
| 28.258065
| 90
| 0.707763
|
bd99f2d1991753c8d900a65edb8d29e66bcfc11a
| 1,416
|
py
|
Python
|
tests/test_ilp.py
|
FilipKlaesson/cops
|
67d2e5dd4534b3f3eec95b6cfda9d4c9c1746ef0
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_ilp.py
|
FilipKlaesson/cops
|
67d2e5dd4534b3f3eec95b6cfda9d4c9c1746ef0
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_ilp.py
|
FilipKlaesson/cops
|
67d2e5dd4534b3f3eec95b6cfda9d4c9c1746ef0
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import scipy.sparse as sp
from cops.optimization_wrappers import Constraint, solve_ilp
def import_gurobi():
try:
import gurobipy
return True
except ModuleNotFoundError as e:
return False
def import_mosek():
try:
import mosek
return True
except ModuleNotFoundError as e:
return False
def test_int_bin():
A_iq = sp.coo_matrix(np.array([[1, 0], [0, 1], [-1, 0], [0, -1]]))
b_iq = np.array([2.5, 2.5, 2.5, 2.5])
constr = Constraint(A_iq=A_iq, b_iq=b_iq)
c = np.array([-1, -1])
if import_gurobi():
sol_int = solve_ilp(c, constr, [0, 1], [], solver="gurobi")
np.testing.assert_equal(sol_int["x"], np.array([2, 2]))
sol_bin = solve_ilp(c, constr, [], [0, 1], solver="gurobi")
np.testing.assert_equal(sol_bin["x"], np.array([1, 1]))
sol_mix = solve_ilp(c, constr, [0], [1], solver="gurobi")
np.testing.assert_equal(sol_mix["x"], np.array([2, 1]))
if import_mosek():
sol_int = solve_ilp(c, constr, [0, 1], [], solver="mosek")
np.testing.assert_equal(sol_int["x"], np.array([2, 2]))
sol_bin = solve_ilp(c, constr, [], [0, 1], solver="mosek")
np.testing.assert_equal(sol_bin["x"], np.array([1, 1]))
sol_mix = solve_ilp(c, constr, [0], [1], solver="mosek")
np.testing.assert_equal(sol_mix["x"], np.array([2, 1]))
| 30.12766
| 70
| 0.588277
|
fff7e5e1e6556e9aad6f5f3a6d9af14019b43c81
| 59,008
|
py
|
Python
|
uproot/model.py
|
wiso/uproot4
|
b1cf6e6b48e75dcd2e1f0c30668fb8583fe0382d
|
[
"BSD-3-Clause"
] | null | null | null |
uproot/model.py
|
wiso/uproot4
|
b1cf6e6b48e75dcd2e1f0c30668fb8583fe0382d
|
[
"BSD-3-Clause"
] | null | null | null |
uproot/model.py
|
wiso/uproot4
|
b1cf6e6b48e75dcd2e1f0c30668fb8583fe0382d
|
[
"BSD-3-Clause"
] | null | null | null |
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE
"""
This module defines utilities for modeling C++ objects as Python objects and the
:doc:`uproot.model.Model` class, which is the superclass of all objects that
are read from ROOT files.
The :doc:`uproot.model.VersionedModel` class is the superclass of all models
whose deserialization routines are specialized by ROOT class version.
A :doc:`uproot.model.DispatchByVersion` subclass selects a versioned model
after reading its version bytes.
The :doc:`uproot.model.UnknownClass` and
:doc:`uproot.model.UnknownClassVersion` are placeholders for data that could
not be modeled, either because the class has no streamer or no streamer for its
version.
"""
from __future__ import absolute_import
import re
import sys
import weakref
import numpy
import uproot
bootstrap_classnames = [
"TStreamerInfo",
"TStreamerElement",
"TStreamerArtificial",
"TStreamerBase",
"TStreamerBasicPointer",
"TStreamerBasicType",
"TStreamerLoop",
"TStreamerObject",
"TStreamerObjectAny",
"TStreamerObjectAnyPointer",
"TStreamerObjectPointer",
"TStreamerSTL",
"TStreamerSTLstring",
"TStreamerString",
"TList",
"TObjArray",
"TObjString",
]
def bootstrap_classes():
"""
Returns the basic classes that are needed to load other classes (streamers,
TList, TObjArray, TObjString).
"""
import uproot.streamers
import uproot.models.TList
import uproot.models.TObjArray
import uproot.models.TObjString
custom_classes = {}
for classname in bootstrap_classnames:
custom_classes[classname] = uproot.classes[classname]
return custom_classes
def reset_classes():
"""
Removes all classes from ``uproot.classes`` and ``uproot.unknown_classes``
and refills ``uproot.classes`` with original versions of these classes.
"""
if uproot._util.py2:
reload = __builtins__["reload"]
else:
from importlib import reload
uproot.classes = {}
uproot.unknown_classes = {}
reload(uproot.streamers)
reload(uproot.models.TObject)
reload(uproot.models.TString)
reload(uproot.models.TArray)
reload(uproot.models.TNamed)
reload(uproot.models.TList)
reload(uproot.models.THashList)
reload(uproot.models.TObjArray)
reload(uproot.models.TObjString)
reload(uproot.models.TAtt)
reload(uproot.models.TRef)
reload(uproot.models.TTree)
reload(uproot.models.TBranch)
reload(uproot.models.TLeaf)
reload(uproot.models.TBasket)
reload(uproot.models.RNTuple)
_classname_regularize = re.compile(r"\s*(<|>|::)\s*")
_classname_encode_pattern = re.compile(br"[^a-zA-Z0-9]+")
_classname_decode_antiversion = re.compile(br".*_([0-9a-f][0-9a-f])+_v([0-9]+)$")
_classname_decode_version = re.compile(br".*_v([0-9]+)$")
_classname_decode_pattern = re.compile(br"_(([0-9a-f][0-9a-f])+)_")
if uproot._util.py2:
def _classname_decode_convert(hex_characters):
g = hex_characters.group(1)
return b"".join(
chr(int(g[i : i + 2], 16)) for i in uproot._util.range(0, len(g), 2)
)
def _classname_encode_convert(bad_characters):
g = bad_characters.group(0)
return b"_" + b"".join("{0:02x}".format(ord(x)).encode() for x in g) + b"_"
else:
def _classname_decode_convert(hex_characters):
g = hex_characters.group(1)
return bytes(int(g[i : i + 2], 16) for i in uproot._util.range(0, len(g), 2))
def _classname_encode_convert(bad_characters):
g = bad_characters.group(0)
return b"_" + b"".join("{0:02x}".format(x).encode() for x in g) + b"_"
def classname_regularize(classname):
"""
Removes spaces around ``<``, ``>``, and ``::`` characters in a classname
so that they can be matched by string name.
If ``classname`` is None, this function returns None. Otherwise, it must be
a string and it returns a string.
"""
if classname is None:
return classname
else:
return re.sub(_classname_regularize, r"\1", classname)
def classname_decode(encoded_classname):
"""
Converts a Python (encoded) classname, such as ``Model_Some_3a3a_Thing``
into a C++ (decoded) classname, such as ``Some::Thing``.
C++ classnames can include namespace delimiters (``::``) and template
arguments (``<`` and ``>``), which have to be translated into
``[A-Za-z_][A-Za-z0-9_]*`` for Python. Non-conforming characters and also
underscores are translated to their hexadecimal equivalents and surrounded
by underscores. Additionally, Python models of C++ classes are prepended
with ``Model_`` (or ``Unknown_`` if a streamer isn't found).
"""
if encoded_classname.startswith("Unknown_"):
raw = encoded_classname[8:].encode()
elif encoded_classname.startswith("Model_"):
raw = encoded_classname[6:].encode()
else:
raise ValueError("not an encoded classname: {0}".format(encoded_classname))
if _classname_decode_antiversion.match(raw) is not None:
version = None
else:
m = _classname_decode_version.match(raw)
if m is None:
version = None
else:
version = int(m.group(1))
raw = raw[: -len(m.group(1)) - 2]
out = _classname_decode_pattern.sub(_classname_decode_convert, raw)
return out.decode(), version
def classname_encode(classname, version=None, unknown=False):
"""
Converts a C++ (decoded) classname, such as ``Some::Thing`` into a Python
classname (encoded), such as ``Model_Some_3a3a_Thing``.
If ``version`` is a number such as ``2``, the Python name is suffixed by
version, such as ``Model_Some_3a3a_Thing_v2``.
If ``unknown`` is True, the ``Model_`` prefix becomes ``Unknown_``.
C++ classnames can include namespace delimiters (``::``) and template
arguments (``<`` and ``>``), which have to be translated into
``[A-Za-z_][A-Za-z0-9_]*`` for Python. Non-conforming characters and also
underscores are translated to their hexadecimal equivalents and surrounded
by underscores. Additionally, Python models of C++ classes are prepended
with ``Model_`` (or ``Unknown_`` if a streamer isn't found).
"""
if unknown:
prefix = "Unknown_"
else:
prefix = "Model_"
if classname.startswith(prefix):
raise ValueError("classname is already encoded: {0}".format(classname))
if version is None:
v = ""
else:
v = "_v" + str(version)
raw = classname.encode()
out = _classname_encode_pattern.sub(_classname_encode_convert, raw)
return prefix + out.decode() + v
def classname_version(encoded_classname):
"""
Extracts a version number from a Python (encoded) classname, if it has one.
For example, ``Model_Some_3a3a_Thing_v2`` returns ``2``.
A name without a version number, such as ``Model_Some_3a3a_Thing``, returns
None.
"""
raw = encoded_classname.encode()
if _classname_decode_antiversion.match(raw) is not None:
return None
else:
m = _classname_decode_version.match(raw)
if m is None:
return None
else:
return int(m.group(1))
def class_named(classname, version=None, custom_classes=None):
"""
Returns a class with a given C++ (decoded) classname.
If ``version`` is None, no attempt is made to find a specific version.
* If the class is a :doc:`uproot.model.DispatchByVersion`, then this is
object returned.
* If the class is a versionless model, then this is the object returned.
If ``version`` is an integer, an attempt is made to find the specific
version.
* If the class is a :doc:`uproot.model.DispatchByVersion`, then it is
queried for a versioned model.
* If the class is a versionless model, then this is the object returned.
If ``custom_classes`` are provided, then these are searched (exclusively)
for the class. If ``custom_classes`` is None, then ``uproot.classes`` is
used.
No classes are created if a class is not found (an error is raised).
"""
if custom_classes is None:
classes = uproot.classes
where = "the 'custom_classes' dict"
else:
where = "uproot.classes"
cls = classes.get(classname)
if cls is None:
raise ValueError("no class named {0} in {1}".format(classname, where))
if version is not None and isinstance(cls, DispatchByVersion):
versioned_cls = cls.class_of_version(version)
if versioned_cls is not None:
return versioned_cls
else:
raise ValueError(
"no class named {0} with version {1} in {2}".format(
classname, version, where
)
)
else:
return cls
def has_class_named(classname, version=None, custom_classes=None):
"""
Returns True if :doc:`uproot.model.class_named` would find a class,
False if it would raise an exception.
"""
cls = maybe_custom_classes(custom_classes).get(classname)
if cls is None:
return False
if version is not None and isinstance(cls, DispatchByVersion):
return cls.has_version(version)
else:
return True
def maybe_custom_classes(custom_classes):
"""
Passes through ``custom_classes`` if it is not None; returns
``uproot.classes`` otherwise.
"""
if custom_classes is None:
return uproot.classes
else:
return custom_classes
class Model(object):
"""
Abstract class for all objects extracted from ROOT files (except for
:doc:`uproot.reading.ReadOnlyFile`, :doc:`uproot.reading.ReadOnlyDirectory`,
and :doc:`uproot.reading.ReadOnlyKey`).
A model is instantiated from a file using the :ref:`uproot.model.Model.read`
classmethod or synthetically using the :ref:`uproot.model.Model.empty`
classmethod, not through a normal constructor.
Models point back to the file from which they were created, though only a
few classes (named in ``uproot.reading.must_be_attached``) have an open,
readable file attached; the rest have a :doc:`uproot.reading.DetachedFile`
with information about the file, while not holding the file open.
Uproot recognizes *some* of ROOT's thousands of classes, by way of methods
and properties defined in :doc:`uproot.behaviors`. Examples include
* :doc:`uproot.behaviors.TTree.TTree`
* :doc:`uproot.behaviors.TH1.TH1`
These classes are the most convenient to work with and have specialized
documentation.
Classes that don't have any predefined behaviors are still usable through
their member data.
* :ref:`uproot.model.Model.members`: a dict of C++ member names and values
directly in this class.
* :ref:`uproot.model.Model.all_members`: a dict of C++ member names and
values in this class or any superclasses.
* :ref:`uproot.model.Model.member`: method that takes a C++ member name
and returns its value (from this or any superclass).
* :ref:`uproot.model.Model.has_member`: method that takes a C++ member
name and returns True if it exists (in this or any superclass), False
otherwise.
Accessing a data structure through its C++ members may be a prelude to
adding custom behaviors for it. Before we know what conveniences to add, we
need to know how they'll be used: this information comes from the user
community.
Pythonic models don't follow the same class inheritance tree as their C++
counterparts: most of them are direct subclasses of
:doc:`uproot.model.Model`, :doc:`uproot.model.DispatchByVersion`, or
:doc:`uproot.model.VersionedModel`. To separate an object's members
from its superclass members, a model instance is created for each and
the superclass parts are included in a list called
:ref:`uproot.model.Model.bases`.
"""
class_streamer = None
behaviors = ()
def __repr__(self):
if self.class_version is None:
version = ""
else:
version = " (version {0})".format(self.class_version)
return "<{0}{1} at 0x{2:012x}>".format(self.classname, version, id(self))
def __enter__(self):
if isinstance(self._file, uproot.reading.ReadOnlyFile):
self._file.source.__enter__()
return self
def __exit__(self, exception_type, exception_value, traceback):
if isinstance(self._file, uproot.reading.ReadOnlyFile):
self._file.source.__exit__(exception_type, exception_value, traceback)
@property
def classname(self):
"""
The C++ (decoded) classname of the modeled class.
See :doc:`uproot.model.classname_decode`,
:doc:`uproot.model.classname_encode`, and
:doc:`uproot.model.classname_version`.
"""
return classname_decode(self.encoded_classname)[0]
@property
def encoded_classname(self):
"""
The Python (encoded) classname of the modeled class. May or may not
include version.
See :doc:`uproot.model.classname_decode`,
:doc:`uproot.model.classname_encode`, and
:doc:`uproot.model.classname_version`.
"""
return type(self).__name__
@property
def class_version(self):
"""
The version number of the modeled class (int) if any; None otherwise.
See :doc:`uproot.model.classname_decode`,
:doc:`uproot.model.classname_encode`, and
:doc:`uproot.model.classname_version`.
"""
return classname_decode(self.encoded_classname)[1]
@property
def cursor(self):
"""
A cursor pointing to the start of this instance in the byte stream
(before :ref:`uproot.model.Model.read_numbytes_version`).
"""
return self._cursor
@property
def file(self):
"""
A :doc:`uproot.reading.ReadOnlyFile`, which may be open and readable,
or a :doc:`uproot.reading.DetachedFile`, which only contains
information about the original file (not an open file handle).
"""
return self._file
def close(self):
"""
Closes the file from which this object is derived, if such a file is
still attached (i.e. not :doc:`uproot.reading.DetachedFile`).
"""
if isinstance(self._file, uproot.reading.ReadOnlyFile):
self._file.close()
@property
def closed(self):
"""
True if the associated file is known to be closed; False if it is known
to be open. If the associated file is detached
(:doc:`uproot.reading.DetachedFile`), then the value is None.
"""
if isinstance(self._file, uproot.reading.ReadOnlyFile):
return self._file.closed
else:
return None
@property
def parent(self):
"""
The object that was deserialized before this one in recursive descent,
usually the containing object (or the container's container).
"""
return self._parent
@property
def concrete(self):
"""
The Python instance corresponding to the concrete (instantiated) class
in C++, which is ``self`` if this is the concrete class or another
object if this is actually a holder of superclass members for that other
object (i.e. if this object is in the other's
:ref:`uproot.model.Model.bases`).
"""
if self._concrete is None:
return self
return self._concrete
@property
def members(self):
"""
A dict of C++ member data directly associated with this class (i.e. not
its superclasses). For all members, see
:ref:`uproot.model.Model.all_members`.
"""
return self._members
@property
def all_members(self):
"""
A dict of C++ member data for this class and its superclasses. For only
direct members, see :ref:`uproot.model.Model.members`.
"""
out = {}
for base in self._bases:
out.update(base.all_members)
out.update(self._members)
return out
def has_member(self, name, all=True):
"""
Returns True if calling :ref:`uproot.model.Model.member` with the same
arguments would return a value; False if the member is missing.
"""
if name in self._members:
return True
if all:
for base in reversed(self._bases):
if base.has_member(name, all=True):
return True
return False
def member(self, name, all=True, none_if_missing=False):
"""
Args:
name (str): The name of the member datum to retrieve.
all (bool): If True, recursively search all superclasses in
:ref:`uproot.model.Model.bases`. Otherwise, search the
direct class only.
none_if_missing (bool): If a member datum doesn't exist in the
search path, ``none_if_missing=True`` has this function return
None, but ``none_if_missing=False`` would have it raise an
exception. Note that None is a possible value for some member
data.
Returns a C++ member datum by name.
"""
if name in self._members:
return self._members[name]
if all:
for base in reversed(self._bases):
if base.has_member(name, all=True):
return base.member(name, all=True)
if none_if_missing:
return None
else:
raise uproot.KeyInFileError(
name,
because="""{0}.{1} has only the following members:
{2}
""".format(
type(self).__module__,
type(self).__name__,
", ".join(repr(x) for x in self.all_members),
),
file_path=getattr(self._file, "file_path"),
)
@property
def bases(self):
"""
List of :doc:`uproot.model.Model` objects representing superclass data
for this object in the order given in C++ (opposite method resolution
order).
* If this object has no superclasses, ``bases`` is empty.
* If it has one superclass, which itself might have superclasses,
``bases`` has length 1.
* Only if this object *multiply inherits* from more than one superclass
at the same level does ``bases`` have length greater than 1.
Since multiple inheritance is usually avoided, ``bases`` rarely has
length greater than 1. A linear chain of superclasses deriving from
super-superclasses is represented by ``bases`` containing an object
whose ``bases`` contains objects.
"""
return self._bases
def base(self, *cls):
"""
Extracts instances from :ref:`uproot.model.Model.bases` by Python class
type.
The ``cls`` arguments may be Python classes or C++ classname strings to match.
"""
cpp_names = [classname_regularize(x) for x in cls if uproot._util.isstr(x)]
py_types = tuple(x for x in cls if not uproot._util.isstr(x))
out = []
for x in getattr(self, "_bases", []):
if isinstance(x, py_types) or any(
getattr(x, "classname", None) == n for n in cpp_names
):
out.append(x)
if isinstance(x, Model):
out.extend(x.base(*cls))
return out
def is_instance(self, *cls):
"""
Returns True if this object matches a given type in the C++ class hierarchy.
The ``cls`` arguments may be Python classes or C++ classname strings to match.
"""
cpp_names = [classname_regularize(x) for x in cls if uproot._util.isstr(x)]
py_types = tuple(x for x in cls if not uproot._util.isstr(x))
if isinstance(self, py_types) or any(self.classname == n for n in cpp_names):
return True
else:
return len(self.base(*cls)) != 0
@property
def num_bytes(self):
"""
Number of bytes expected in the (uncompressed) serialization of this
instance.
This value may be None (unknown before reading) or an integer.
If the value is an integer and the object exists (no exceptions in
:ref:`uproot.model.Model.read`), then the expected number of bytes
agreed with the actual number of bytes, and this numer is reliable.
If this object is re-serialized, it won't necessarily occupy the same
number of bytes.
"""
return self._num_bytes
@property
def instance_version(self):
"""
Version of this instance as read from the byte stream.
If this model is versioned (:doc:`uproot.model.VersionedModel`), the
``instance_version`` ought to be equal to the
:ref:`uproot.model.Model.class_version`.
If this model is versionless, the ``instance_version`` contains new
information about the actual version deserialized.
"""
return self._instance_version
@property
def is_memberwise(self):
"""
True if the object was serialized in ROOT's memberwise format; False
otherwise.
"""
return self._is_memberwise
@classmethod
def awkward_form(
cls, file, index_format="i64", header=False, tobject_header=True, breadcrumbs=()
):
"""
Args:
cls (subclass of :doc:`uproot.model.Model`): This class.
file (:doc:`uproot.reading.ReadOnlyFile`): File to use to generate
:doc:`uproot.model.Model` classes from its
:ref:`uproot.reading.ReadOnlyFile.streamers` and ``file_path``
for error messages.
index_format (str): Format to use for indexes of the
``awkward.forms.Form``; may be ``"i32"``, ``"u32"``, or
``"i64"``.
header (bool): If True, include headers in the Form's ``"uproot"``
parameters.
tobject_header (bool): If True, include headers for ``TObject``
classes in the Form's ``"uproot"`` parameters.
breadcrumbs (tuple of class objects): Used to check for recursion.
Types that contain themselves cannot be Awkward Arrays because the
depth of instances is unknown.
The ``awkward.forms.Form`` to use to put objects of type type in an
Awkward Array.
"""
raise uproot.interpretation.objects.CannotBeAwkward(
classname_decode(cls.__name__)[0]
)
@classmethod
def strided_interpretation(
cls, file, header=False, tobject_header=True, breadcrumbs=(), original=None
):
"""
Args:
cls (subclass of :doc:`uproot.model.Model`): This class.
file (:doc:`uproot.reading.ReadOnlyFile`): File to use to generate
:doc:`uproot.model.Model` classes from its
:ref:`uproot.reading.ReadOnlyFile.streamers` and ``file_path``
for error messages.
header (bool): If True, assume the outermost object has a header.
tobject_header (bool): If True, assume that ``TObjects`` have headers.
original (None, :doc:`uproot.model.Model`, or :doc:`uproot.containers.Container`): The
original, non-strided model or container.
breadcrumbs (tuple of class objects): Used to check for recursion.
Types that contain themselves cannot be strided because the
depth of instances is unknown.
Returns a list of (str, ``numpy.dtype``) pairs to build a
:doc:`uproot.interpretation.objects.AsStridedObjects` interpretation.
"""
raise uproot.interpretation.objects.CannotBeStrided(
classname_decode(cls.__name__)[0]
)
def tojson(self):
"""
Serializes this object in its ROOT JSON form (as Python lists and dicts,
which can be passed to ``json.dump`` or ``json.dumps``).
"""
out = {}
for base in self._bases:
tmp = base.tojson()
if isinstance(tmp, dict):
out.update(tmp)
for k, v in self.members.items():
if isinstance(v, Model):
out[k] = v.tojson()
elif isinstance(v, (numpy.number, numpy.ndarray)):
out[k] = v.tolist()
else:
out[k] = v
out["_typename"] = self.classname
return out
@classmethod
def empty(cls):
"""
Creates a model instance (of subclass ``cls``) with no data; all
required attributes are None or empty.
"""
self = cls.__new__(cls)
self._cursor = None
self._file = None
self._parent = None
self._members = {}
self._bases = []
self._num_bytes = None
self._instance_version = None
self._is_memberwise = False
return self
@classmethod
def read(cls, chunk, cursor, context, file, selffile, parent, concrete=None):
"""
Args:
cls (subclass of :doc:`uproot.model.Model`): Class to instantiate.
chunk (:doc:`uproot.source.chunk.Chunk`): Buffer of contiguous data
from the file :doc:`uproot.source.chunk.Source`.
cursor (:doc:`uproot.source.cursor.Cursor`): Current position in
that ``chunk``.
context (dict): Auxiliary data used in deserialization.
file (:doc:`uproot.reading.ReadOnlyFile`): An open file object,
capable of generating new :doc:`uproot.model.Model` classes
from its :ref:`uproot.reading.ReadOnlyFile.streamers`.
selffile (:doc:`uproot.reading.CommonFileMethods`): A possibly
:doc:`uproot.reading.DetachedFile` associated with this object.
parent (None or calling object): The previous ``read`` in the
recursive descent.
concrete (None or :doc:`uproot.model.Model` instance): If None,
this model corresponds to the concrete (instantiated) class in
C++. Otherwise, this model represents a superclass part of the
object, and ``concrete`` points to the concrete instance.
Creates a model instance by reading data from a file.
"""
self = cls.__new__(cls)
self._cursor = cursor.copy()
self._file = selffile
self._parent = parent
self._concrete = concrete
self._members = {}
self._bases = []
self._num_bytes = None
self._instance_version = None
self._is_memberwise = False
old_breadcrumbs = context.get("breadcrumbs", ())
context["breadcrumbs"] = old_breadcrumbs + (self,)
self.hook_before_read(chunk=chunk, cursor=cursor, context=context, file=file)
if context.get("reading", True):
self.read_numbytes_version(chunk, cursor, context)
if (
issubclass(cls, VersionedModel)
and self._instance_version != classname_version(cls.__name__)
and self._instance_version is not None
):
correct_cls = file.class_named(self.classname, self._instance_version)
if classname_version(correct_cls.__name__) != classname_version(
cls.__name__
):
cursor.move_to(self._cursor.index)
context["breadcrumbs"] = old_breadcrumbs
return correct_cls.read(
chunk,
cursor,
context,
file,
selffile,
parent,
concrete=concrete,
)
if context.get("in_TBranch", False):
if self._num_bytes is None and self._instance_version != self.class_version:
self._instance_version = None
cursor = self._cursor
elif self._instance_version == 0:
cursor.skip(4)
if context.get("reading", True):
self.hook_before_read_members(
chunk=chunk, cursor=cursor, context=context, file=file
)
self.read_members(chunk, cursor, context, file)
self.hook_after_read_members(
chunk=chunk, cursor=cursor, context=context, file=file
)
self.check_numbytes(chunk, cursor, context)
self.hook_before_postprocess(
chunk=chunk, cursor=cursor, context=context, file=file
)
out = self.postprocess(chunk, cursor, context, file)
context["breadcrumbs"] = old_breadcrumbs
return out
def read_numbytes_version(self, chunk, cursor, context):
"""
Args:
chunk (:doc:`uproot.source.chunk.Chunk`): Buffer of contiguous data
from the file :doc:`uproot.source.chunk.Source`.
cursor (:doc:`uproot.source.cursor.Cursor`): Current position in
that ``chunk``.
context (dict): Auxiliary data used in deserialization.
Reads the number of bytes and instance version from the byte stream,
which is usually 6 bytes (4 + 2). Bits with special meanings are
appropriately masked out.
Some types don't have a 6-byte header or handle it differently; in
those cases, this method should be overridden.
"""
import uproot.deserialization
(
self._num_bytes,
self._instance_version,
self._is_memberwise,
) = uproot.deserialization.numbytes_version(chunk, cursor, context)
def read_members(self, chunk, cursor, context, file):
"""
Args:
chunk (:doc:`uproot.source.chunk.Chunk`): Buffer of contiguous data
from the file :doc:`uproot.source.chunk.Source`.
cursor (:doc:`uproot.source.cursor.Cursor`): Current position in
that ``chunk``.
context (dict): Auxiliary data used in deserialization.
file (:doc:`uproot.reading.ReadOnlyFile`): An open file object,
capable of generating new :doc:`uproot.model.Model` classes
from its :ref:`uproot.reading.ReadOnlyFile.streamers`.
Reads the member data for this class. The abstract class
:doc:`uproot.model.Model` has an empty ``read_members`` method; this
*must* be overridden by subclasses.
"""
pass
def check_numbytes(self, chunk, cursor, context):
"""
Args:
chunk (:doc:`uproot.source.chunk.Chunk`): Buffer of contiguous data
from the file :doc:`uproot.source.chunk.Source`.
cursor (:doc:`uproot.source.cursor.Cursor`): Current position in
that ``chunk``.
context (dict): Auxiliary data used in deserialization.
Reads nothing; checks the expected number of bytes against the actual
movement of the ``cursor`` at the end of the object, possibly raising
a :doc:`uproot.deserialization.DeserializationError` exception.
If :ref:`uproot.model.Model.num_bytes` is None, this method does
nothing.
It is *possible* that a subclass would override this method, but not
likely.
"""
import uproot.deserialization
uproot.deserialization.numbytes_check(
chunk,
self._cursor,
cursor,
self._num_bytes,
self.classname,
context,
getattr(self._file, "file_path"),
)
def postprocess(self, chunk, cursor, context, file):
"""
Args:
chunk (:doc:`uproot.source.chunk.Chunk`): Buffer of contiguous data
from the file :doc:`uproot.source.chunk.Source`.
cursor (:doc:`uproot.source.cursor.Cursor`): Current position in
that ``chunk``.
context (dict): Auxiliary data used in deserialization.
file (:doc:`uproot.reading.ReadOnlyFile`): An open file object,
capable of generating new :doc:`uproot.model.Model` classes
from its :ref:`uproot.reading.ReadOnlyFile.streamers`.
Called for any additional processing after the object has been fully
read.
The return value from this method is the object that actually represents
the ROOT data, which might be a different instance or even a different
type from this class. The default in :doc:`uproot.model.Model` is to
return ``self``.
Note that for versioned models,
:ref:`uproot.model.VersionedModel.postprocess` is called first, then
:ref:`uproot.model.DispatchByVersion.postprocess` is called on its
output, allowing a :doc:`uproot.model.DispatchByVersion` to refine all
data of its type, regardless of version.
"""
return self
def hook_before_read(self, **kwargs):
"""
Called in :ref:`uproot.model.Model.read`, before any data have been
read.
"""
pass
def hook_before_read_members(self, **kwargs):
"""
Called in :ref:`uproot.model.Model.read`, after
:ref:`uproot.model.Model.read_numbytes_version` and before
:ref:`uproot.model.Model.read_members`.
"""
pass
def hook_after_read_members(self, **kwargs):
"""
Called in :ref:`uproot.model.Model.read`, after
:ref:`uproot.model.Model.read_members` and before
:ref:`uproot.model.Model.check_numbytes`.
"""
pass
def hook_before_postprocess(self, **kwargs):
"""
Called in :ref:`uproot.model.Model.read`, after
:ref:`uproot.model.Model.check_numbytes` and before
:ref:`uproot.model.Model.postprocess`.
"""
pass
class VersionedModel(Model):
"""
A Python class that models a specific version of a ROOT C++ class.
Classes that inherit directly from :doc:`uproot.model.Model` are versionless,
classes that inherit from :doc:`uproot.model.VersionedModel` depend on
version.
Note that automatically generated :doc:`uproot.model.VersionedModel` classes
are placed in the ``uproot.dynamic`` namespace. This namespace can generate
:doc:`uproot.model.DynamicModel` classes on demand in Python 3.7 and above,
which automatically generated :doc:`uproot.model.VersionedModel` classes
rely upon to be pickleable. Therefore, ROOT object types without predefined
:doc:`uproot.model.Model` classes cannot be pickled in Python versions
before 3.7.
"""
def __getstate__(self):
return (
{
"base_names_versions": self.base_names_versions,
"member_names": self.member_names,
"class_flags": self.class_flags,
"class_code": self.class_code,
"class_streamer": self.class_streamer,
"behaviors": self.behaviors,
},
dict(self.__dict__),
)
def __setstate__(self, state):
class_data, instance_data = state
self.__dict__.update(instance_data)
class DispatchByVersion(object):
"""
A Python class that models all versions of a ROOT C++ class by maintaining
a dict of :doc:`uproot.model.VersionedModel` classes.
The :ref:`uproot.model.DispatchByVersion.read` classmethod reads the
instance version number from the byte stream, backs up the
:doc:`uproot.source.cursor.Cursor` to the starting position, and invokes
the appropriate :doc:`uproot.model.VersionedModel`'s ``read`` classmethod.
If a :doc:`uproot.model.VersionedModel` does not exist for the specified
version, the ``file``'s ``TStreamerInfo`` is queried to attempt to create
one, and failing that, an :doc:`uproot.model.UnknownClassVersion` is
created instead.
Note that :doc:`uproot.model.DispatchByVersion` is not a subclass of
:doc:`uproot.model.Model`. Instances of this class are not usable as
stand-ins for ROOT data.
"""
@classmethod
def awkward_form(
cls, file, index_format="i64", header=False, tobject_header=True, breadcrumbs=()
):
"""
Args:
cls (subclass of :doc:`uproot.model.DispatchByVersion`): This class.
file (:doc:`uproot.reading.ReadOnlyFile`): File to use to generate
:doc:`uproot.model.Model` classes from its
:ref:`uproot.reading.ReadOnlyFile.streamers` and ``file_path``
for error messages.
index_format (str): Format to use for indexes of the
``awkward.forms.Form``; may be ``"i32"``, ``"u32"``, or
``"i64"``.
header (bool): If True, include headers in the Form's ``"uproot"``
parameters.
tobject_header (bool): If True, include headers for ``TObject``
classes in the Form's ``"uproot"`` parameters.
breadcrumbs (tuple of class objects): Used to check for recursion.
Types that contain themselves cannot be Awkward Arrays because the
depth of instances is unknown.
The ``awkward.forms.Form`` to use to put objects of type type in an
Awkward Array.
"""
versioned_cls = file.class_named(classname_decode(cls.__name__)[0], "max")
return versioned_cls.awkward_form(
file, index_format, header, tobject_header, breadcrumbs
)
@classmethod
def strided_interpretation(
cls, file, header=False, tobject_header=True, breadcrumbs=(), original=None
):
"""
Args:
cls (subclass of :doc:`uproot.model.DispatchByVersion`): This class.
file (:doc:`uproot.reading.ReadOnlyFile`): File to use to generate
:doc:`uproot.model.Model` classes from its
:ref:`uproot.reading.ReadOnlyFile.streamers` and ``file_path``
for error messages.
header (bool): If True, assume the outermost object has a header.
tobject_header (bool): If True, assume that ``TObjects`` have headers.
original (None, :doc:`uproot.model.Model`, or :doc:`uproot.containers.Container`): The
original, non-strided model or container.
breadcrumbs (tuple of class objects): Used to check for recursion.
Types that contain themselves cannot be strided because the
depth of instances is unknown.
Returns a list of (str, ``numpy.dtype``) pairs to build a
:doc:`uproot.interpretation.objects.AsStridedObjects` interpretation.
"""
versioned_cls = file.class_named(classname_decode(cls.__name__)[0], "max")
return versioned_cls.strided_interpretation(
file, header=header, tobject_header=tobject_header, breadcrumbs=breadcrumbs
)
@classmethod
def class_of_version(cls, version):
"""
Returns the class corresponding to a specified ``version`` if it exists.
If not, this classmethod returns None. No attempt is made to create a
missing class.
"""
return cls.known_versions.get(version)
@classmethod
def has_version(cls, version):
"""
Returns True if a class corresponding to a specified ``version``
currently exists; False otherwise.
"""
return version in cls.known_versions
@classmethod
def new_class(cls, file, version):
"""
Uses ``file`` to create a new class for a specified ``version``.
As a side-effect, this new class is added to ``cls.known_versions``
(for :ref:`uproot.model.DispatchByVersion.class_of_version` and
:ref:`uproot.model.DispatchByVersion.has_version`).
If the ``file`` lacks a ``TStreamerInfo`` for the class, this function
returns a :doc:`uproot.model.UnknownClassVersion` (adding it to
``uproo4.unknown_classes`` if it's not already there).
"""
classname, _ = classname_decode(cls.__name__)
classname = classname_regularize(classname)
streamer = file.streamer_named(classname, version)
if streamer is None:
streamer = file.streamer_named(classname, "max")
if streamer is not None:
versioned_cls = streamer.new_class(file)
versioned_cls.class_streamer = streamer
cls.known_versions[streamer.class_version] = versioned_cls
return versioned_cls
else:
unknown_cls = uproot.unknown_classes.get(classname)
if unknown_cls is None:
unknown_cls = uproot._util.new_class(
classname_encode(classname, version, unknown=True),
(UnknownClassVersion,),
{},
)
uproot.unknown_classes[classname] = unknown_cls
return unknown_cls
@classmethod
def read(cls, chunk, cursor, context, file, selffile, parent, concrete=None):
"""
Args:
cls (subclass of :doc:`uproot.model.DispatchByVersion`): This class.
chunk (:doc:`uproot.source.chunk.Chunk`): Buffer of contiguous data
from the file :doc:`uproot.source.chunk.Source`.
cursor (:doc:`uproot.source.cursor.Cursor`): Current position in
that ``chunk``.
context (dict): Auxiliary data used in deserialization.
file (:doc:`uproot.reading.ReadOnlyFile`): An open file object,
capable of generating new :doc:`uproot.model.Model` classes
from its :ref:`uproot.reading.ReadOnlyFile.streamers`.
selffile (:doc:`uproot.reading.CommonFileMethods`): A possibly
:doc:`uproot.reading.DetachedFile` associated with this object.
parent (None or calling object): The previous ``read`` in the
recursive descent.
concrete (None or :doc:`uproot.model.Model` instance): If None,
this model corresponds to the concrete (instantiated) class in
C++. Otherwise, this model represents a superclass part of the
object, and ``concrete`` points to the concrete instance.
Reads the instance version number from the byte stream, backs up the
:doc:`uproot.source.cursor.Cursor` to the starting position, and
invokes the appropriate :doc:`uproot.model.VersionedModel`'s ``read``
classmethod.
If a :doc:`uproot.model.VersionedModel` does not exist for the
specified version, the ``file``'s ``TStreamerInfo`` is queried to
attempt to create one, and failing that, an
:doc:`uproot.model.UnknownClassVersion` is created instead.
"""
import uproot.deserialization
# Ignores context["reading"], because otherwise, there would be nothing to do.
(
num_bytes,
version,
is_memberwise,
) = uproot.deserialization.numbytes_version(chunk, cursor, context, move=False)
versioned_cls = cls.known_versions.get(version)
if versioned_cls is not None:
pass
elif version is not None:
versioned_cls = cls.new_class(file, version)
elif context.get("in_TBranch", False):
versioned_cls = cls.new_class(file, "max")
else:
raise ValueError(
"""Unknown version {0} for class {1} that cannot be skipped """
"""because its number of bytes is unknown.
""".format(
version,
classname_decode(cls.__name__)[0],
)
)
# versioned_cls.read starts with numbytes_version again because move=False (above)
return cls.postprocess(
versioned_cls.read(
chunk, cursor, context, file, selffile, parent, concrete=concrete
),
chunk,
cursor,
context,
file,
)
@classmethod
def postprocess(cls, self, chunk, cursor, context, file):
"""
Args:
cls (subclass of :doc:`uproot.model.DispatchByVersion`): This class.
chunk (:doc:`uproot.source.chunk.Chunk`): Buffer of contiguous data
from the file :doc:`uproot.source.chunk.Source`.
cursor (:doc:`uproot.source.cursor.Cursor`): Current position in
that ``chunk``.
context (dict): Auxiliary data used in deserialization.
file (:doc:`uproot.reading.ReadOnlyFile`): An open file object,
capable of generating new :doc:`uproot.model.Model` classes
from its :ref:`uproot.reading.ReadOnlyFile.streamers`.
Called for any additional processing after the object has been fully
read.
The return value from this method is the object that actually represents
the ROOT data, which might be a different instance or even a different
type from this class. The default in :doc:`uproot.model.Model` is to
return ``self``.
Note that for versioned models,
:ref:`uproot.model.VersionedModel.postprocess` is called first, then
:ref:`uproot.model.DispatchByVersion.postprocess` is called on its
output, allowing a :doc:`uproot.model.DispatchByVersion` to refine all
data of its type, regardless of version.
"""
return self
class UnknownClass(Model):
"""
Placeholder for a C++ class instance that has no
:doc:`uproot.model.DispatchByVersion` and no ``TStreamerInfo`` in the
current :doc:`uproot.reading.ReadOnlyFile` to produce one.
"""
@property
def chunk(self):
"""
The ``chunk`` of data associated with the unknown class, referred to by
a weak reference (to avoid memory leaks in
:doc:`uproot.model.UnknownClass` objects). If the original ``chunk``
has been garbage-collected, this raises ``RuntimeError``.
Primarily useful in the :ref:`uproot.model.UnknownClass.debug` method.
"""
chunk = self._chunk()
if chunk is None:
raise RuntimeError(
"the 'chunk' associated with this unknown class has been deleted"
)
else:
return chunk
@property
def context(self):
"""
The auxiliary data used in deserialization.
Primarily useful in the :ref:`uproot.model.UnknownClass.debug` method.
"""
return self._context
def __repr__(self):
return "<Unknown {0} at 0x{1:012x}>".format(self.classname, id(self))
def debug(
self, skip_bytes=0, limit_bytes=None, dtype=None, offset=0, stream=sys.stdout
):
"""
Args:
skip_bytes (int): Number of bytes to skip before presenting the
remainder of the :doc:`uproot.source.chunk.Chunk`. May be
negative, to examine the byte stream leading up to the attempted
instantiation. The default, ``0``, starts where the number
of bytes and version number would be (just before
:ref:`uproot.model.Model.read_numbytes_version`).
limit_bytes (None or int): Number of bytes to limit the output to.
A line of debugging output (without any ``offset``) is 20 bytes,
so multiples of 20 show full lines. If None, everything is
shown to the end of the :doc:`uproot.source.chunk.Chunk`,
which might be large.
dtype (None, ``numpy.dtype``, or its constructor argument): If None,
present only the bytes as decimal values (0-255). Otherwise,
also interpret them as an array of a given NumPy type.
offset (int): Number of bytes to skip before interpreting a ``dtype``;
can be helpful if the numerical values are out of phase with
the first byte shown. Not to be confused with ``skip_bytes``,
which determines which bytes are shown at all. Any ``offset``
values that are equivalent modulo ``dtype.itemsize`` show
equivalent interpretations.
stream (object with a ``write(str)`` method): Stream to write the
debugging output to.
Presents the byte stream at the point where this instance would have been
deserialized.
Example output with ``dtype=">f4"`` and ``offset=3``.
.. code-block::
--+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
123 123 123 63 140 204 205 64 12 204 205 64 83 51 51 64 140 204 205 64
{ { { ? --- --- --- @ --- --- --- @ S 3 3 @ --- --- --- @
1.1 2.2 3.3 4.4
--+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
176 0 0 64 211 51 51 64 246 102 102 65 12 204 205 65 30 102 102 66
--- --- --- @ --- 3 3 @ --- f f A --- --- --- A --- f f B
5.5 6.6 7.7 8.8 9.9
--+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
202 0 0 67 74 0 0 67 151 128 0 123 123
--- --- --- C J --- --- C --- --- --- { {
101.0 202.0 303.0
"""
cursor = self._cursor.copy()
cursor.skip(skip_bytes)
cursor.debug(
self.chunk,
context=self._context,
limit_bytes=limit_bytes,
dtype=dtype,
offset=offset,
stream=stream,
)
def debug_array(self, skip_bytes=0, dtype=numpy.dtype("u1")):
"""
Args:
skip_bytes (int): Number of bytes to skip before presenting the
remainder of the :doc:`uproot.source.chunk.Chunk`. May be
negative, to examine the byte stream leading up to the attempted
instantiation. The default, ``0``, starts where the number
of bytes and version number would be (just before
:ref:`uproot.model.Model.read_numbytes_version`).
dtype (``numpy.dtype`` or its constructor argument): Data type in
which to interpret the data. (The size of the array returned is
truncated to this ``dtype.itemsize``.)
Like :ref:`uproot.model.UnknownClass.debug`, but returns a NumPy array
for further inspection.
"""
dtype = numpy.dtype(dtype)
cursor = self._cursor.copy()
cursor.skip(skip_bytes)
out = self.chunk.remainder(cursor.index, cursor, self._context)
return out[: (len(out) // dtype.itemsize) * dtype.itemsize].view(dtype)
def read_members(self, chunk, cursor, context, file):
self._chunk = weakref.ref(chunk)
self._context = context
if self._num_bytes is not None:
cursor.skip(self._num_bytes - cursor.displacement(self._cursor))
else:
raise ValueError(
"""unknown class {0} that cannot be skipped because its """
"""number of bytes is unknown
in file {1}""".format(
self.classname, file.file_path
)
)
class UnknownClassVersion(VersionedModel):
"""
Placeholder for a C++ class instance that has no ``TStreamerInfo`` in the
current :doc:`uproot.reading.ReadOnlyFile` to produce one.
"""
@property
def chunk(self):
"""
The ``chunk`` of data associated with the class of unknown version,
referred to by a weak reference (to avoid memory leaks in
:doc:`uproot.model.UnknownClassVersion` objects). If the original
``chunk`` has been garbage-collected, this raises ``RuntimeError``.
Primarily useful in the :ref:`uproot.model.UnknownClassVersion.debug`
method.
"""
chunk = self._chunk()
if chunk is None:
raise RuntimeError(
"the 'chunk' associated with this class of unknown version has "
"been deleted"
)
else:
return chunk
@property
def context(self):
"""
The auxiliary data used in deserialization.
Primarily useful in the :ref:`uproot.model.UnknownClass.debug` method.
"""
return self._context
def debug(
self, skip_bytes=0, limit_bytes=None, dtype=None, offset=0, stream=sys.stdout
):
"""
Args:
skip_bytes (int): Number of bytes to skip before presenting the
remainder of the :doc:`uproot.source.chunk.Chunk`. May be
negative, to examine the byte stream leading up to the attempted
instantiation. The default, ``0``, starts where the number
of bytes and version number would be (just before
:ref:`uproot.model.Model.read_numbytes_version`).
limit_bytes (None or int): Number of bytes to limit the output to.
A line of debugging output (without any ``offset``) is 20 bytes,
so multiples of 20 show full lines. If None, everything is
shown to the end of the :doc:`uproot.source.chunk.Chunk`,
which might be large.
dtype (None, ``numpy.dtype``, or its constructor argument): If None,
present only the bytes as decimal values (0-255). Otherwise,
also interpret them as an array of a given NumPy type.
offset (int): Number of bytes to skip before interpreting a ``dtype``;
can be helpful if the numerical values are out of phase with
the first byte shown. Not to be confused with ``skip_bytes``,
which determines which bytes are shown at all. Any ``offset``
values that are equivalent modulo ``dtype.itemsize`` show
equivalent interpretations.
stream (object with a ``write(str)`` method): Stream to write the
debugging output to.
Presents the byte stream at the point where this instance would have been
deserialized.
Example output with ``dtype=">f4"`` and ``offset=3``.
.. code-block::
--+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
123 123 123 63 140 204 205 64 12 204 205 64 83 51 51 64 140 204 205 64
{ { { ? --- --- --- @ --- --- --- @ S 3 3 @ --- --- --- @
1.1 2.2 3.3 4.4
--+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
176 0 0 64 211 51 51 64 246 102 102 65 12 204 205 65 30 102 102 66
--- --- --- @ --- 3 3 @ --- f f A --- --- --- A --- f f B
5.5 6.6 7.7 8.8 9.9
--+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
202 0 0 67 74 0 0 67 151 128 0 123 123
--- --- --- C J --- --- C --- --- --- { {
101.0 202.0 303.0
"""
cursor = self._cursor.copy()
cursor.skip(skip_bytes)
cursor.debug(
self.chunk,
context=self._context,
limit_bytes=limit_bytes,
dtype=dtype,
offset=offset,
stream=stream,
)
def debug_array(self, skip_bytes=0, dtype=numpy.dtype("u1")):
"""
Args:
skip_bytes (int): Number of bytes to skip before presenting the
remainder of the :doc:`uproot.source.chunk.Chunk`. May be
negative, to examine the byte stream leading up to the attempted
instantiation. The default, ``0``, starts where the number
of bytes and version number would be (just before
:ref:`uproot.model.Model.read_numbytes_version`).
dtype (``numpy.dtype`` or its constructor argument): Data type in
which to interpret the data. (The size of the array returned is
truncated to this ``dtype.itemsize``.)
Like :ref:`uproot.model.UnknownClassVersion.debug`, but returns a
NumPy array for further inspection.
"""
dtype = numpy.dtype(dtype)
cursor = self._cursor.copy()
cursor.skip(skip_bytes)
out = self.chunk.remainder(cursor.index, cursor, self._context)
return out[: (len(out) // dtype.itemsize) * dtype.itemsize].view(dtype)
def read_members(self, chunk, cursor, context, file):
self._chunk = weakref.ref(chunk)
self._context = context
if self._num_bytes is not None:
cursor.skip(self._num_bytes - cursor.displacement(self._cursor))
else:
raise ValueError(
"""class {0} with unknown version {1} cannot be skipped """
"""because its number of bytes is unknown
in file {2}""".format(
self.classname, self._instance_version, file.file_path
)
)
def __repr__(self):
return "<{0} with unknown version {1} at 0x{2:012x}>".format(
self.classname, self._instance_version, id(self)
)
class DynamicModel(VersionedModel):
"""
A :doc:`uproot.model.VersionedModel` subclass generated by any attempt to
extract it from the ``uproot.dynamic`` namespace in Python 3.7 and later.
This dynamically generated model allows ROOT object types without predefined
:doc:`uproot.model.Model` classes to be pickled in Python 3.7 and later.
"""
def __setstate__(self, state):
cls = type(self)
class_data, instance_data = state
for k, v in class_data.items():
if not hasattr(cls, k):
setattr(cls, k, v)
cls.__bases__ = (
tuple(x for x in class_data["behaviors"] if x not in cls.__bases__)
+ cls.__bases__
)
self.__dict__.update(instance_data)
| 38.897825
| 98
| 0.600613
|
00b7cb59260ed681f3321c8e1234d48c7299443d
| 1,190
|
py
|
Python
|
setup.py
|
GuyShane/virtualfish
|
2d463a02b2fcdf7e65438b44ef5b56d897900413
|
[
"MIT"
] | null | null | null |
setup.py
|
GuyShane/virtualfish
|
2d463a02b2fcdf7e65438b44ef5b56d897900413
|
[
"MIT"
] | null | null | null |
setup.py
|
GuyShane/virtualfish
|
2d463a02b2fcdf7e65438b44ef5b56d897900413
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name='virtualfish',
description='A virtualenv wrapper for the Fish shell',
author='Adam Brenecki',
author_email='adam@brenecki.id.au',
url='https://github.com/adambrenecki/virtualfish',
packages=find_packages(),
include_package_data=True,
setup_requires=[
'setuptools_scm>=1.11.1',
],
use_scm_version=True,
install_requires=[
'pkgconfig>=1.2.2,<2',
'psutil>=5.2.2,<6',
'virtualenv',
'xdg>=1.0.5,<2',
],
extras_require={
'dev': [
'pytest>=3.1.3,<3.2',
'pytest-xdist>=1.22.2,<1.23',
],
},
entry_points={
'console_scripts': ['vf = virtualfish.loader.cli:main'],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: System :: Shells',
]
)
| 28.333333
| 64
| 0.567227
|
17627583e519728b8782b8e5428539094f971ae6
| 25
|
py
|
Python
|
code.py
|
flavio-fernandes/kitchen_clock
|
920ba7d201b7895f414775aaa4685aa66b040802
|
[
"MIT"
] | null | null | null |
code.py
|
flavio-fernandes/kitchen_clock
|
920ba7d201b7895f414775aaa4685aa66b040802
|
[
"MIT"
] | null | null | null |
code.py
|
flavio-fernandes/kitchen_clock
|
920ba7d201b7895f414775aaa4685aa66b040802
|
[
"MIT"
] | null | null | null |
import kitchen_clock.py
| 8.333333
| 23
| 0.84
|
758a81d82e83cf7f8e82b3955436ac1dc1162d32
| 1,014
|
py
|
Python
|
Python/q1.py
|
uday256071/DataStructures-and-Algorithms
|
d5740a27a8e4141616307ec3771bc7ad95ff9f72
|
[
"MIT"
] | null | null | null |
Python/q1.py
|
uday256071/DataStructures-and-Algorithms
|
d5740a27a8e4141616307ec3771bc7ad95ff9f72
|
[
"MIT"
] | null | null | null |
Python/q1.py
|
uday256071/DataStructures-and-Algorithms
|
d5740a27a8e4141616307ec3771bc7ad95ff9f72
|
[
"MIT"
] | null | null | null |
'''This problem was recently asked by Google.
Given a list of numbers and a number k, return whether any two numbers from the list add up to k.
For example, given [10, 15, 3, 7] and k of 17, return true since 10 + 7 is 17'''
# using hashMap edge case checked for k/2 - HASHMAP
def sumOfTwo(arr,k):
arrSet = set(arr)
for x in arr:
if x == k/2 :
if arr.count(x) >= 2:
return "pair found - "+str(x)+" "+str(x)
else:
if k-x in arrSet:
return "pair found - "+str(x)+" "+str(k-x)
#use this approach if the array is sorted - TWO POINTER APPROACH
def sumOfTwo1(arr,k):
arr.sort()
i,n = 0 ,max(len(arr)-1,0)
while i != n:
if arr[i]+arr[n]>k:
n=n-1
elif arr[i]+arr[n]<k:
i=i+1
else:
return "pair found - "+str(arr[i])+" "+str(arr[n])
if __name__ == "__main__" :
arr = [10,15,3,3,7]
# arr =[]
print(sumOfTwo(arr,22))
print(sumOfTwo1(arr,22))
| 27.405405
| 97
| 0.534517
|
29ccb9ef339b14d760442644b36a9a22fa24b220
| 14,872
|
py
|
Python
|
slack_bolt/oauth/async_oauth_flow.py
|
hirosassa/bolt-python
|
befc3a1463f3ac8dbb780d66decc304e2bdf3e7a
|
[
"MIT"
] | 1
|
2021-08-14T15:14:02.000Z
|
2021-08-14T15:14:02.000Z
|
slack_bolt/oauth/async_oauth_flow.py
|
hirosassa/bolt-python
|
befc3a1463f3ac8dbb780d66decc304e2bdf3e7a
|
[
"MIT"
] | null | null | null |
slack_bolt/oauth/async_oauth_flow.py
|
hirosassa/bolt-python
|
befc3a1463f3ac8dbb780d66decc304e2bdf3e7a
|
[
"MIT"
] | null | null | null |
import logging
import os
from logging import Logger
from typing import Optional, Dict, Callable, Awaitable, Sequence
from slack_bolt.error import BoltError
from slack_bolt.logger.messages import error_oauth_settings_invalid_type_async
from slack_bolt.oauth.async_callback_options import (
AsyncCallbackOptions,
DefaultAsyncCallbackOptions,
AsyncSuccessArgs,
AsyncFailureArgs,
)
from slack_bolt.oauth.async_oauth_settings import AsyncOAuthSettings
from slack_bolt.oauth.internals import _build_default_install_page_html
from slack_bolt.request.async_request import AsyncBoltRequest
from slack_bolt.response import BoltResponse
from slack_sdk.errors import SlackApiError
from slack_sdk.oauth import OAuthStateUtils
from slack_sdk.oauth.installation_store import Installation
from slack_sdk.oauth.installation_store.sqlite3 import SQLite3InstallationStore
from slack_sdk.oauth.state_store.sqlite3 import SQLite3OAuthStateStore
from slack_sdk.web.async_client import AsyncWebClient
from slack_sdk.web.async_slack_response import AsyncSlackResponse
from slack_bolt.util.async_utils import create_async_web_client
class AsyncOAuthFlow:
settings: AsyncOAuthSettings
client_id: str
redirect_uri: Optional[str]
install_path: str
redirect_uri_path: str
success_handler: Callable[[AsyncSuccessArgs], Awaitable[BoltResponse]]
failure_handler: Callable[[AsyncFailureArgs], Awaitable[BoltResponse]]
@property
def client(self) -> AsyncWebClient:
if self._async_client is None:
self._async_client = create_async_web_client(logger=self.logger)
return self._async_client
@property
def logger(self) -> Logger:
if self._logger is None:
self._logger = logging.getLogger(__name__)
return self._logger
def __init__(
self,
*,
client: Optional[AsyncWebClient] = None,
logger: Optional[Logger] = None,
settings: AsyncOAuthSettings,
):
"""The module to run the Slack app installation flow (OAuth flow).
Args:
client: The `slack_sdk.web.async_client.AsyncWebClient` instance.
logger: The logger.
settings: OAuth settings to configure this module.
"""
self._async_client = client
self._logger = logger
if not isinstance(settings, AsyncOAuthSettings):
raise BoltError(error_oauth_settings_invalid_type_async())
self.settings = settings
self.settings.logger = self._logger
self.client_id = self.settings.client_id
self.redirect_uri = self.settings.redirect_uri
self.install_path = self.settings.install_path
self.redirect_uri_path = self.settings.redirect_uri_path
self.default_callback_options = DefaultAsyncCallbackOptions(
logger=logger,
state_utils=self.settings.state_utils,
redirect_uri_page_renderer=self.settings.redirect_uri_page_renderer,
)
if settings.callback_options is None:
settings.callback_options = self.default_callback_options
self.success_handler = settings.callback_options.success
self.failure_handler = settings.callback_options.failure
# -----------------------------
# Factory Methods
# -----------------------------
@classmethod
def sqlite3(
cls,
database: str,
# OAuth flow parameters/credentials
authorization_url: Optional[str] = None,
client_id: Optional[str] = None, # required
client_secret: Optional[str] = None, # required
scopes: Optional[Sequence[str]] = None,
user_scopes: Optional[Sequence[str]] = None,
redirect_uri: Optional[str] = None,
# Handler configuration
install_path: Optional[str] = None,
redirect_uri_path: Optional[str] = None,
callback_options: Optional[AsyncCallbackOptions] = None,
success_url: Optional[str] = None,
failure_url: Optional[str] = None,
# Installation Management
# state parameter related configurations
state_cookie_name: str = OAuthStateUtils.default_cookie_name,
state_expiration_seconds: int = OAuthStateUtils.default_expiration_seconds,
installation_store_bot_only: bool = False,
client: Optional[AsyncWebClient] = None,
logger: Optional[Logger] = None,
) -> "AsyncOAuthFlow":
client_id = client_id or os.environ["SLACK_CLIENT_ID"] # required
client_secret = client_secret or os.environ["SLACK_CLIENT_SECRET"] # required
scopes = scopes or os.environ.get("SLACK_SCOPES", "").split(",")
user_scopes = user_scopes or os.environ.get("SLACK_USER_SCOPES", "").split(",")
redirect_uri = redirect_uri or os.environ.get("SLACK_REDIRECT_URI")
return AsyncOAuthFlow(
client=client or AsyncWebClient(),
logger=logger,
settings=AsyncOAuthSettings(
# OAuth flow parameters/credentials
authorization_url=authorization_url,
client_id=client_id,
client_secret=client_secret,
scopes=scopes,
user_scopes=user_scopes,
redirect_uri=redirect_uri,
# Handler configuration
install_path=install_path,
redirect_uri_path=redirect_uri_path,
callback_options=callback_options,
success_url=success_url,
failure_url=failure_url,
# Installation Management
installation_store=SQLite3InstallationStore(
database=database,
client_id=client_id,
logger=logger,
),
installation_store_bot_only=installation_store_bot_only,
# state parameter related configurations
state_store=SQLite3OAuthStateStore(
database=database,
expiration_seconds=state_expiration_seconds,
logger=logger,
),
state_cookie_name=state_cookie_name,
state_expiration_seconds=state_expiration_seconds,
),
)
# -----------------------------
# Installation
# -----------------------------
async def handle_installation(self, request: AsyncBoltRequest) -> BoltResponse:
state = await self.issue_new_state(request)
url = await self.build_authorize_url(state, request)
set_cookie_value = self.settings.state_utils.build_set_cookie_for_new_state(
state
)
if self.settings.install_page_rendering_enabled:
html = await self.build_install_page_html(url, request)
return BoltResponse(
status=200,
body=html,
headers={
"Content-Type": "text/html; charset=utf-8",
"Set-Cookie": [set_cookie_value],
},
)
else:
return BoltResponse(
status=302,
body="",
headers={
"Content-Type": "text/html; charset=utf-8",
"Location": url,
"Set-Cookie": [set_cookie_value],
},
)
# ----------------------
# Internal methods for Installation
async def issue_new_state(self, request: AsyncBoltRequest) -> str:
return await self.settings.state_store.async_issue()
async def build_authorize_url(self, state: str, request: AsyncBoltRequest) -> str:
return self.settings.authorize_url_generator.generate(state)
async def build_install_page_html(self, url: str, request: AsyncBoltRequest) -> str:
return _build_default_install_page_html(url)
# -----------------------------
# Callback
# -----------------------------
async def handle_callback(self, request: AsyncBoltRequest) -> BoltResponse:
# failure due to end-user's cancellation or invalid redirection to slack.com
error = request.query.get("error", [None])[0]
if error is not None:
return await self.failure_handler(
AsyncFailureArgs(
request=request,
reason=error, # type: ignore
suggested_status_code=200,
settings=self.settings,
default=self.default_callback_options,
)
)
# state parameter verification
state: Optional[str] = request.query.get("state", [None])[0]
if not self.settings.state_utils.is_valid_browser(state, request.headers):
return await self.failure_handler(
AsyncFailureArgs(
request=request,
reason="invalid_browser",
suggested_status_code=400,
settings=self.settings,
default=self.default_callback_options,
)
)
valid_state_consumed = await self.settings.state_store.async_consume(state)
if not valid_state_consumed:
return await self.failure_handler(
AsyncFailureArgs(
request=request,
reason="invalid_state",
suggested_status_code=401,
settings=self.settings,
default=self.default_callback_options,
)
)
# run installation
code = request.query.get("code", [None])[0]
if code is None:
return await self.failure_handler(
AsyncFailureArgs(
request=request,
reason="missing_code",
suggested_status_code=401,
settings=self.settings,
default=self.default_callback_options,
)
)
installation = await self.run_installation(code)
if installation is None:
# failed to run installation with the code
return await self.failure_handler(
AsyncFailureArgs(
request=request,
reason="invalid_code",
suggested_status_code=401,
settings=self.settings,
default=self.default_callback_options,
)
)
# persist the installation
try:
await self.store_installation(request, installation)
except BoltError as err:
return await self.failure_handler(
AsyncFailureArgs(
request=request,
reason="storage_error",
error=err,
suggested_status_code=500,
settings=self.settings,
default=self.default_callback_options,
)
)
# display a successful completion page to the end-user
return await self.success_handler(
AsyncSuccessArgs(
request=request,
installation=installation,
settings=self.settings,
default=self.default_callback_options,
)
)
# ----------------------
# Internal methods for Callback
async def run_installation(self, code: str) -> Optional[Installation]:
try:
oauth_response: AsyncSlackResponse = await self.client.oauth_v2_access(
code=code,
client_id=self.settings.client_id,
client_secret=self.settings.client_secret,
redirect_uri=self.settings.redirect_uri, # can be None
)
installed_enterprise: Dict[str, str] = (
oauth_response.get("enterprise") or {}
)
is_enterprise_install: bool = (
oauth_response.get("is_enterprise_install") or False
)
installed_team: Dict[str, str] = oauth_response.get("team") or {}
installer: Dict[str, str] = oauth_response.get("authed_user") or {}
incoming_webhook: Dict[str, str] = (
oauth_response.get("incoming_webhook") or {}
)
bot_token: Optional[str] = oauth_response.get("access_token")
# NOTE: oauth.v2.access doesn't include bot_id in response
bot_id: Optional[str] = None
enterprise_url: Optional[str] = None
if bot_token is not None:
auth_test = await self.client.auth_test(token=bot_token)
bot_id = auth_test["bot_id"]
if is_enterprise_install is True:
enterprise_url = auth_test.get("url")
return Installation(
app_id=oauth_response.get("app_id"),
enterprise_id=installed_enterprise.get("id"),
enterprise_name=installed_enterprise.get("name"),
enterprise_url=enterprise_url,
team_id=installed_team.get("id"),
team_name=installed_team.get("name"),
bot_token=bot_token,
bot_id=bot_id,
bot_user_id=oauth_response.get("bot_user_id"),
bot_scopes=oauth_response.get("scope"), # comma-separated string
bot_refresh_token=oauth_response.get("refresh_token"), # since v1.7
bot_token_expires_in=oauth_response.get("expires_in"), # since v1.7
user_id=installer.get("id"),
user_token=installer.get("access_token"),
user_scopes=installer.get("scope"), # comma-separated string
user_refresh_token=installer.get("refresh_token"), # since v1.7
user_token_expires_in=installer.get("expires_in"), # since v1.7
incoming_webhook_url=incoming_webhook.get("url"),
incoming_webhook_channel=incoming_webhook.get("channel"),
incoming_webhook_channel_id=incoming_webhook.get("channel_id"),
incoming_webhook_configuration_url=incoming_webhook.get(
"configuration_url"
),
is_enterprise_install=is_enterprise_install,
token_type=oauth_response.get("token_type"),
)
except SlackApiError as e:
message = (
f"Failed to fetch oauth.v2.access result with code: {code} - error: {e}"
)
self.logger.warning(message)
return None
async def store_installation(
self, request: AsyncBoltRequest, installation: Installation
):
# may raise BoltError
await self.settings.installation_store.async_save(installation)
| 40.194595
| 88
| 0.600524
|
ac4226358c67d36faad3cd47d4cd22003343c0e2
| 8,831
|
py
|
Python
|
netbox/secrets/views.py
|
vietbm-hcm/netbox
|
a16218b3111c2c8dd952c1fa35c13e5d5fd81ea9
|
[
"Apache-2.0"
] | null | null | null |
netbox/secrets/views.py
|
vietbm-hcm/netbox
|
a16218b3111c2c8dd952c1fa35c13e5d5fd81ea9
|
[
"Apache-2.0"
] | null | null | null |
netbox/secrets/views.py
|
vietbm-hcm/netbox
|
a16218b3111c2c8dd952c1fa35c13e5d5fd81ea9
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
import base64
from django.contrib import messages
from django.contrib.auth.decorators import permission_required, login_required
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.db import transaction, IntegrityError
from django.db.models import Count
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.views.generic import View
from dcim.models import Device
from utilities.views import (
BulkDeleteView, BulkEditView, BulkImportView, ObjectDeleteView, ObjectEditView, ObjectListView,
)
from . import filters, forms, tables
from .decorators import userkey_required
from .models import SecretRole, Secret, SessionKey
def get_session_key(request):
"""
Extract and decode the session key sent with a request. Returns None if no session key was provided.
"""
session_key = request.COOKIES.get('session_key', None)
if session_key is not None:
return base64.b64decode(session_key)
return session_key
#
# Secret roles
#
class SecretRoleListView(ObjectListView):
queryset = SecretRole.objects.annotate(secret_count=Count('secrets'))
table = tables.SecretRoleTable
template_name = 'secrets/secretrole_list.html'
class SecretRoleCreateView(PermissionRequiredMixin, ObjectEditView):
permission_required = 'secrets.add_secretrole'
model = SecretRole
form_class = forms.SecretRoleForm
def get_return_url(self, request, obj):
return reverse('secrets:secretrole_list')
class SecretRoleEditView(SecretRoleCreateView):
permission_required = 'secrets.change_secretrole'
class SecretRoleBulkDeleteView(PermissionRequiredMixin, BulkDeleteView):
permission_required = 'secrets.delete_secretrole'
cls = SecretRole
default_return_url = 'secrets:secretrole_list'
#
# Secrets
#
@method_decorator(login_required, name='dispatch')
class SecretListView(ObjectListView):
queryset = Secret.objects.select_related('role', 'device')
filter = filters.SecretFilter
filter_form = forms.SecretFilterForm
table = tables.SecretTable
template_name = 'secrets/secret_list.html'
@method_decorator(login_required, name='dispatch')
class SecretView(View):
def get(self, request, pk):
secret = get_object_or_404(Secret, pk=pk)
return render(request, 'secrets/secret.html', {
'secret': secret,
})
@permission_required('secrets.add_secret')
@userkey_required()
def secret_add(request, pk):
# Retrieve device
device = get_object_or_404(Device, pk=pk)
secret = Secret(device=device)
session_key = get_session_key(request)
if request.method == 'POST':
form = forms.SecretForm(request.POST, instance=secret)
if form.is_valid():
# We need a valid session key in order to create a Secret
if session_key is None:
form.add_error(None, "No session key was provided with the request. Unable to encrypt secret data.")
# Create and encrypt the new Secret
else:
master_key = None
try:
sk = SessionKey.objects.get(userkey__user=request.user)
master_key = sk.get_master_key(session_key)
except SessionKey.DoesNotExist:
form.add_error(None, "No session key found for this user.")
if master_key is not None:
secret = form.save(commit=False)
secret.plaintext = str(form.cleaned_data['plaintext'])
secret.encrypt(master_key)
secret.save()
messages.success(request, "Added new secret: {}.".format(secret))
if '_addanother' in request.POST:
return redirect('dcim:device_addsecret', pk=device.pk)
else:
return redirect('secrets:secret', pk=secret.pk)
else:
form = forms.SecretForm(instance=secret)
return render(request, 'secrets/secret_edit.html', {
'secret': secret,
'form': form,
'return_url': device.get_absolute_url(),
})
@permission_required('secrets.change_secret')
@userkey_required()
def secret_edit(request, pk):
secret = get_object_or_404(Secret, pk=pk)
session_key = get_session_key(request)
if request.method == 'POST':
form = forms.SecretForm(request.POST, instance=secret)
if form.is_valid():
# Re-encrypt the Secret if a plaintext and session key have been provided.
if form.cleaned_data['plaintext'] and session_key is not None:
# Retrieve the master key using the provided session key
master_key = None
try:
sk = SessionKey.objects.get(userkey__user=request.user)
master_key = sk.get_master_key(session_key)
except SessionKey.DoesNotExist:
form.add_error(None, "No session key found for this user.")
# Create and encrypt the new Secret
if master_key is not None:
secret = form.save(commit=False)
secret.plaintext = str(form.cleaned_data['plaintext'])
secret.encrypt(master_key)
secret.save()
messages.success(request, "Modified secret {}.".format(secret))
return redirect('secrets:secret', pk=secret.pk)
else:
form.add_error(None, "Invalid session key. Unable to encrypt secret data.")
# We can't save the plaintext without a session key.
elif form.cleaned_data['plaintext']:
form.add_error(None, "No session key was provided with the request. Unable to encrypt secret data.")
# If no new plaintext was specified, a session key is not needed.
else:
secret = form.save()
messages.success(request, "Modified secret {}.".format(secret))
return redirect('secrets:secret', pk=secret.pk)
else:
form = forms.SecretForm(instance=secret)
return render(request, 'secrets/secret_edit.html', {
'secret': secret,
'form': form,
'return_url': reverse('secrets:secret', kwargs={'pk': secret.pk}),
})
class SecretDeleteView(PermissionRequiredMixin, ObjectDeleteView):
permission_required = 'secrets.delete_secret'
model = Secret
default_return_url = 'secrets:secret_list'
class SecretBulkImportView(BulkImportView):
permission_required = 'ipam.add_vlan'
model_form = forms.SecretCSVForm
table = tables.SecretTable
default_return_url = 'secrets:secret_list'
master_key = None
def _save_obj(self, obj_form):
"""
Encrypt each object before saving it to the database.
"""
obj = obj_form.save(commit=False)
obj.encrypt(self.master_key)
obj.save()
return obj
def post(self, request):
# Grab the session key from cookies.
session_key = request.COOKIES.get('session_key')
if session_key:
# Attempt to derive the master key using the provided session key.
try:
sk = SessionKey.objects.get(userkey__user=request.user)
self.master_key = sk.get_master_key(base64.b64decode(session_key))
except SessionKey.DoesNotExist:
messages.error(request, "No session key found for this user.")
if self.master_key is not None:
return super(SecretBulkImportView, self).post(request)
else:
messages.error(request, "Invalid private key! Unable to encrypt secret data.")
else:
messages.error(request, "No session key was provided with the request. Unable to encrypt secret data.")
return render(request, self.template_name, {
'form': self._import_form(request.POST),
'fields': self.model_form().fields,
'obj_type': self.model_form._meta.model._meta.verbose_name,
'return_url': self.default_return_url,
})
class SecretBulkEditView(PermissionRequiredMixin, BulkEditView):
permission_required = 'secrets.change_secret'
cls = Secret
filter = filters.SecretFilter
form = forms.SecretBulkEditForm
template_name = 'secrets/secret_bulk_edit.html'
default_return_url = 'secrets:secret_list'
class SecretBulkDeleteView(PermissionRequiredMixin, BulkDeleteView):
permission_required = 'secrets.delete_secret'
cls = Secret
filter = filters.SecretFilter
default_return_url = 'secrets:secret_list'
| 34.767717
| 116
| 0.659042
|
22a885e96f918da0288f47ae29b93fc7bd9b9900
| 4,499
|
py
|
Python
|
backend/common/json_data.py
|
maura-dev/zc_plugin_company_sales_prospect
|
08a35a796169d137b9a111390cfe239900c3940a
|
[
"BSD-3-Clause"
] | 9
|
2021-08-28T10:44:25.000Z
|
2021-09-09T19:16:12.000Z
|
backend/common/json_data.py
|
maura-dev/zc_plugin_company_sales_prospect
|
08a35a796169d137b9a111390cfe239900c3940a
|
[
"BSD-3-Clause"
] | 117
|
2021-08-28T08:02:48.000Z
|
2021-10-04T15:14:18.000Z
|
backend/common/json_data.py
|
maura-dev/zc_plugin_company_sales_prospect
|
08a35a796169d137b9a111390cfe239900c3940a
|
[
"BSD-3-Clause"
] | 182
|
2021-08-28T08:43:30.000Z
|
2021-12-21T01:58:14.000Z
|
import json
import requests
from django.conf import settings
# from rest_framework.response import Response
PLUGIN_ID = settings.PLUGIN_ID
ORGANISATION_ID = settings.ORGANISATION_ID
ROOM_COLLECTION_NAME = settings.ROOM_COLLECTION_NAME
PROSPECTS_COLLECTION_NAME = settings.PROSPECTS_COLLECTION_NAME
PROSPECTS_ROOM_ID = settings.PROSPECTS_ROOM_ID
DEALS_ROOM_ID = settings.DEALS_ROOM_ID
ADDED_ROOM_COLLECTION_NAME = settings.ADDED_ROOM_COLLECTION_NAME
PLUGIN_NAME = settings.PLUGIN_NAME
DESCRIPTION = settings.DESCRIPTION
# Fetch all the test room - public rooms
# Fetch all the actual room - joined rooms
# def getsidebbarinfo(org,user):
# public_url = f"https://api.zuri.chat/data/read/{PLUGIN_ID}/{ROOM_COLLECTION_NAME}/{ORGANISATION_ID}"
# private_url = f"https://api.zuri.chat/data/read/{PLUGIN_ID}/{ADDED_ROOM_COLLECTION_NAME}/{ORGANISATION_ID}"
# public_r = requests.get(public_url)
# private_r = requests.get(private_url)
# public_response = json.loads(public_r.text)
# private_response = json.loads(private_r.text)
# if private_response['status']!=200:
# return Response({"name": PLUGIN_NAME,
# "description": DESCRIPTION,
# "plugin_id": PLUGIN_ID,
# "organisation_id": org,
# "user_id": user,
# "group_name": "SALES",
# "show_group": False,
# "Public rooms":public_response['data'],
# "Joined rooms":[]})
# else:
# return Response({"name": PLUGIN_NAME,
# "description": DESCRIPTION,
# "plugin_id": PLUGIN_ID,
# "organisation_id": org,
# "user_id": user,
# "group_name": "SALES",
# "show_group": False,
# "Public rooms":private_response['data'],
# "Joined rooms":private_response['data']})
def sidebardealsrooms():
"""[summary]
Returns:
[type]: [description]
"""
url = f"https://api.zuri.chat/data/read/{PLUGIN_ID}/{ROOM_COLLECTION_NAME}/{ORGANISATION_ID}?id={DEALS_ROOM_ID}"
res = requests.get(url)
response = json.loads(res.text)
if response["status"] == 200:
data = {"_id": response["data"]["_id"], "deals": response["data"]["deals"]}
return data
else:
data = {"error": "rooms not available"}
return data
def sidebarprospectsrooms():
"""[summary]
Returns:
[type]: [description]
"""
url = f"https://api.zuri.chat/data/read/{PLUGIN_ID}/{ROOM_COLLECTION_NAME}/{ORGANISATION_ID}?id={PROSPECTS_ROOM_ID}"
res = requests.get(url)
response = json.loads(res.text)
if response["status"] == 200:
data = {
"_id": response["data"]["_id"],
"prospects": response["data"]["prospects"],
}
return data
else:
data = {"error": "rooms not available"}
return data
def success_query():
"""[summary]
Returns:
[type]: [description]
"""
data = {
"name": settings.PLUGIN_NAME,
"plugin_id": settings.PLUGIN_ID,
"description": settings.DESCRIPTION,
"organisation_id": settings.ORGANISATION_ID,
"group_name": "Plugin",
"show_group": False,
"joined_rooms": [
{
"title": "prospects",
"id": "6139391dd941c451490f3f2f",
"url": "https://sales.zuri.chat/api/v1/prospects/",
"unread": 0,
"badge_type": "info",
"members": 15,
"icon": "spear.png",
"action": "open",
},
{
"title": "deals",
"id": "6139393ed941c451490f3f30",
"url": "https://sales.zuri.chat/api/v1/deals/",
"unread": 0,
"badge_type": "info",
"members": 2,
"icon": "spear.png",
"action": "open",
},
],
"public_rooms": [
{
"title": "prospects",
"url": "https://sales.zuri.chat/api/v1/prospects/",
"icon": "cdn.cloudflare.com/445345453345/hello.jpeg",
"action": "open",
"auto-join": True,
},
{
"title": "deals",
"url": "https://sales.zuri.chat/api/v1/deals/",
"icon": "cdn.cloudflare.com/445345453345/hello.jpeg",
"action": "open",
"auto-join": True,
},
],
}
return data
| 30.605442
| 120
| 0.560569
|
bfeea00f39be3f5ae6eed5de581d6cd09ee440e0
| 3,503
|
py
|
Python
|
tests/test_utils.py
|
puetzk/packaging
|
64f82b53576497102de6b53bdeb79fc1c66c280a
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
tests/test_utils.py
|
puetzk/packaging
|
64f82b53576497102de6b53bdeb79fc1c66c280a
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
tests/test_utils.py
|
puetzk/packaging
|
64f82b53576497102de6b53bdeb79fc1c66c280a
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import pytest
from packaging.tags import Tag
from packaging.utils import (
InvalidSdistFilename,
InvalidWheelFilename,
canonicalize_name,
canonicalize_version,
parse_sdist_filename,
parse_wheel_filename,
)
from packaging.version import Version
@pytest.mark.parametrize(
("name", "expected"),
[
("foo", "foo"),
("Foo", "foo"),
("fOo", "foo"),
("foo.bar", "foo-bar"),
("Foo.Bar", "foo-bar"),
("Foo.....Bar", "foo-bar"),
("foo_bar", "foo-bar"),
("foo___bar", "foo-bar"),
("foo-bar", "foo-bar"),
("foo----bar", "foo-bar"),
],
)
def test_canonicalize_name(name, expected):
assert canonicalize_name(name) == expected
@pytest.mark.parametrize(
("version", "expected"),
[
(Version("1.4.0"), "1.4"),
("1.4.0", "1.4"),
("1.40.0", "1.40"),
("1.4.0.0.00.000.0000", "1.4"),
("1.0", "1"),
("1.0+abc", "1+abc"),
("1.0.dev0", "1.dev0"),
("1.0.post0", "1.post0"),
("1.0a0", "1a0"),
("1.0rc0", "1rc0"),
("100!0.0", "100!0"),
("1.0.1-test7", "1.0.1-test7"), # LegacyVersion is unchanged
],
)
def test_canonicalize_version(version, expected):
assert canonicalize_version(version) == expected
@pytest.mark.parametrize(
("filename", "name", "version", "build", "tags"),
[
(
"foo-1.0-py3-none-any.whl",
"foo",
Version("1.0"),
(),
{Tag("py3", "none", "any")},
),
(
"some_PACKAGE-1.0-py3-none-any.whl",
"some-package",
Version("1.0"),
(),
{Tag("py3", "none", "any")},
),
(
"foo-1.0-1000-py3-none-any.whl",
"foo",
Version("1.0"),
(1000, ""),
{Tag("py3", "none", "any")},
),
(
"foo-1.0-1000abc-py3-none-any.whl",
"foo",
Version("1.0"),
(1000, "abc"),
{Tag("py3", "none", "any")},
),
],
)
def test_parse_wheel_filename(filename, name, version, build, tags):
assert parse_wheel_filename(filename) == (name, version, build, tags)
@pytest.mark.parametrize(
("filename"),
[
("foo-1.0.whl"), # Missing tags
("foo-1.0-py3-none-any.wheel"), # Incorrect file extension (`.wheel`)
("foo__bar-1.0-py3-none-any.whl"), # Invalid name (`__`)
("foo#bar-1.0-py3-none-any.whl"), # Invalid name (`#`)
# Build number doesn't start with a digit (`abc`)
("foo-1.0-abc-py3-none-any.whl"),
("foo-1.0-200-py3-none-any-junk.whl"), # Too many dashes (`-junk`)
],
)
def test_parse_wheel_invalid_filename(filename):
with pytest.raises(InvalidWheelFilename):
parse_wheel_filename(filename)
@pytest.mark.parametrize(
("filename", "name", "version"), [("foo-1.0.tar.gz", "foo", Version("1.0"))]
)
def test_parse_sdist_filename(filename, name, version):
assert parse_sdist_filename(filename) == (name, version)
@pytest.mark.parametrize(("filename"), [("foo-1.0.zip"), ("foo1.0.tar.gz")])
def test_parse_sdist_invalid_filename(filename):
with pytest.raises(InvalidSdistFilename):
parse_sdist_filename(filename)
| 28.25
| 80
| 0.533543
|
5a60d1415994e60667ed89b0131810579d59be1d
| 2,843
|
py
|
Python
|
tslearn/datasets/cached.py
|
andriyor/tslearn
|
6c93071b385a89112b82799ae5870daeca1ab88b
|
[
"BSD-2-Clause"
] | 1,151
|
2020-04-30T08:24:14.000Z
|
2022-03-29T22:12:01.000Z
|
tslearn/datasets/cached.py
|
andriyor/tslearn
|
6c93071b385a89112b82799ae5870daeca1ab88b
|
[
"BSD-2-Clause"
] | 216
|
2017-05-10T23:33:10.000Z
|
2020-04-30T07:28:26.000Z
|
tslearn/datasets/cached.py
|
andriyor/tslearn
|
6c93071b385a89112b82799ae5870daeca1ab88b
|
[
"BSD-2-Clause"
] | 172
|
2017-06-02T11:53:50.000Z
|
2020-04-22T15:29:13.000Z
|
import numpy
import os
class CachedDatasets:
"""A convenience class to access cached time series datasets.
Note, that these *cached datasets* are statically included into *tslearn*
and are distinct from the ones in :class:`UCR_UEA_datasets`.
When using the Trace dataset, please cite [1]_.
See Also
--------
UCR_UEA_datasets : Provides more datasets and supports caching.
References
----------
.. [1] A. Bagnall, J. Lines, W. Vickers and E. Keogh, The UEA & UCR Time
Series Classification Repository, www.timeseriesclassification.com
"""
def __init__(self):
self.path = os.path.join(os.path.dirname(__file__),
"..",
".cached_datasets")
def list_datasets(self):
"""List cached datasets.
Examples
--------
>>> from tslearn.datasets import UCR_UEA_datasets
>>> _ = UCR_UEA_datasets().load_dataset("Trace")
>>> cached = UCR_UEA_datasets().list_cached_datasets()
>>> "Trace" in cached
True
Returns
-------
list of str:
A list of names of all cached (univariate and multivariate) dataset
namas.
"""
return [fname[:fname.rfind(".")]
for fname in os.listdir(self.path)
if fname.endswith(".npz")]
def load_dataset(self, dataset_name):
"""Load a cached dataset from its name.
Parameters
----------
dataset_name : str
Name of the dataset. Should be in the list returned by
:meth:`~list_datasets`.
Returns
-------
numpy.ndarray of shape (n_ts_train, sz, d) or None
Training time series. None if unsuccessful.
numpy.ndarray of integers with shape (n_ts_train, ) or None
Training labels. None if unsuccessful.
numpy.ndarray of shape (n_ts_test, sz, d) or None
Test time series. None if unsuccessful.
numpy.ndarray of integers with shape (n_ts_test, ) or None
Test labels. None if unsuccessful.
Examples
--------
>>> data_loader = CachedDatasets()
>>> X_train, y_train, X_test, y_test = data_loader.load_dataset(
... "Trace")
>>> print(X_train.shape)
(100, 275, 1)
>>> print(y_train.shape)
(100,)
Raises
------
IOError
If the dataset does not exist or cannot be read.
"""
npzfile = numpy.load(os.path.join(self.path, dataset_name + ".npz"))
X_train = npzfile["X_train"]
X_test = npzfile["X_test"]
y_train = npzfile["y_train"]
y_test = npzfile["y_test"]
return X_train, y_train, X_test, y_test
| 32.306818
| 79
| 0.558565
|
db01ba7869aba42f1979058ece66997bfc3f709c
| 35,526
|
py
|
Python
|
mkdocs/tests/config/config_options_tests.py
|
wmc1992/huggingface-accelerate-docs-zh
|
308851c27408a21ea80b4c23d3ab2bda548c4e8c
|
[
"BSD-2-Clause"
] | 1
|
2022-01-04T12:26:45.000Z
|
2022-01-04T12:26:45.000Z
|
mkdocs/tests/config/config_options_tests.py
|
wmc1992/huggingface-accelerate-docs-zh
|
308851c27408a21ea80b4c23d3ab2bda548c4e8c
|
[
"BSD-2-Clause"
] | null | null | null |
mkdocs/tests/config/config_options_tests.py
|
wmc1992/huggingface-accelerate-docs-zh
|
308851c27408a21ea80b4c23d3ab2bda548c4e8c
|
[
"BSD-2-Clause"
] | 1
|
2022-02-13T09:41:59.000Z
|
2022-02-13T09:41:59.000Z
|
import os
import sys
import textwrap
import unittest
from unittest.mock import patch
import mkdocs
from mkdocs.config import config_options
from mkdocs.config.base import Config
from mkdocs.tests.base import tempdir
from mkdocs.utils import yaml_load
class OptionallyRequiredTest(unittest.TestCase):
def test_empty(self):
option = config_options.OptionallyRequired()
value = option.validate(None)
self.assertEqual(value, None)
self.assertEqual(option.is_required(), False)
def test_required(self):
option = config_options.OptionallyRequired(required=True)
with self.assertRaises(config_options.ValidationError):
option.validate(None)
self.assertEqual(option.is_required(), True)
def test_required_no_default(self):
option = config_options.OptionallyRequired(required=True)
value = option.validate(2)
self.assertEqual(2, value)
def test_default(self):
option = config_options.OptionallyRequired(default=1)
value = option.validate(None)
self.assertEqual(1, value)
def test_replace_default(self):
option = config_options.OptionallyRequired(default=1)
value = option.validate(2)
self.assertEqual(2, value)
class TypeTest(unittest.TestCase):
def test_single_type(self):
option = config_options.Type(str)
value = option.validate("Testing")
self.assertEqual(value, "Testing")
def test_multiple_types(self):
option = config_options.Type((list, tuple))
value = option.validate([1, 2, 3])
self.assertEqual(value, [1, 2, 3])
value = option.validate((1, 2, 3))
self.assertEqual(value, (1, 2, 3))
with self.assertRaises(config_options.ValidationError):
option.validate({'a': 1})
def test_length(self):
option = config_options.Type(str, length=7)
value = option.validate("Testing")
self.assertEqual(value, "Testing")
with self.assertRaises(config_options.ValidationError):
option.validate("Testing Long")
class ChoiceTest(unittest.TestCase):
def test_valid_choice(self):
option = config_options.Choice(('python', 'node'))
value = option.validate('python')
self.assertEqual(value, 'python')
def test_invalid_choice(self):
option = config_options.Choice(('python', 'node'))
with self.assertRaises(config_options.ValidationError):
option.validate('go')
def test_invalid_choices(self):
self.assertRaises(ValueError, config_options.Choice, '')
self.assertRaises(ValueError, config_options.Choice, [])
self.assertRaises(ValueError, config_options.Choice, 5)
class DeprecatedTest(unittest.TestCase):
def test_deprecated_option_simple(self):
option = config_options.Deprecated()
option.pre_validation({'d': 'value'}, 'd')
self.assertEqual(len(option.warnings), 1)
option.validate('value')
def test_deprecated_option_message(self):
msg = 'custom message for {} key'
option = config_options.Deprecated(message=msg)
option.pre_validation({'d': 'value'}, 'd')
self.assertEqual(len(option.warnings), 1)
self.assertEqual(option.warnings[0], msg.format('d'))
def test_deprecated_option_with_type(self):
option = config_options.Deprecated(option_type=config_options.Type(str))
option.pre_validation({'d': 'value'}, 'd')
self.assertEqual(len(option.warnings), 1)
option.validate('value')
def test_deprecated_option_with_invalid_type(self):
option = config_options.Deprecated(option_type=config_options.Type(list))
config = {'d': 'string'}
option.pre_validation({'d': 'value'}, 'd')
self.assertEqual(len(option.warnings), 1)
with self.assertRaises(config_options.ValidationError):
option.validate(config['d'])
def test_removed_option(self):
option = config_options.Deprecated(removed=True, moved_to='foo')
with self.assertRaises(config_options.ValidationError):
option.pre_validation({'d': 'value'}, 'd')
def test_deprecated_option_with_type_undefined(self):
option = config_options.Deprecated(option_type=config_options.Type(str))
option.validate(None)
def test_deprecated_option_move(self):
option = config_options.Deprecated(moved_to='new')
config = {'old': 'value'}
option.pre_validation(config, 'old')
self.assertEqual(len(option.warnings), 1)
self.assertEqual(config, {'new': 'value'})
def test_deprecated_option_move_complex(self):
option = config_options.Deprecated(moved_to='foo.bar')
config = {'old': 'value'}
option.pre_validation(config, 'old')
self.assertEqual(len(option.warnings), 1)
self.assertEqual(config, {'foo': {'bar': 'value'}})
def test_deprecated_option_move_existing(self):
option = config_options.Deprecated(moved_to='foo.bar')
config = {'old': 'value', 'foo': {'existing': 'existing'}}
option.pre_validation(config, 'old')
self.assertEqual(len(option.warnings), 1)
self.assertEqual(config, {'foo': {'existing': 'existing', 'bar': 'value'}})
def test_deprecated_option_move_invalid(self):
option = config_options.Deprecated(moved_to='foo.bar')
config = {'old': 'value', 'foo': 'wrong type'}
option.pre_validation(config, 'old')
self.assertEqual(len(option.warnings), 1)
self.assertEqual(config, {'old': 'value', 'foo': 'wrong type'})
class IpAddressTest(unittest.TestCase):
def test_valid_address(self):
addr = '127.0.0.1:8000'
option = config_options.IpAddress()
value = option.validate(addr)
self.assertEqual(str(value), addr)
self.assertEqual(value.host, '127.0.0.1')
self.assertEqual(value.port, 8000)
def test_valid_IPv6_address(self):
addr = '::1:8000'
option = config_options.IpAddress()
value = option.validate(addr)
self.assertEqual(str(value), addr)
self.assertEqual(value.host, '::1')
self.assertEqual(value.port, 8000)
def test_named_address(self):
addr = 'localhost:8000'
option = config_options.IpAddress()
value = option.validate(addr)
self.assertEqual(str(value), addr)
self.assertEqual(value.host, 'localhost')
self.assertEqual(value.port, 8000)
def test_default_address(self):
addr = '127.0.0.1:8000'
option = config_options.IpAddress(default=addr)
value = option.validate(None)
self.assertEqual(str(value), addr)
self.assertEqual(value.host, '127.0.0.1')
self.assertEqual(value.port, 8000)
@unittest.skipIf(
sys.version_info < (3, 9, 5),
"Leading zeros allowed in IP addresses before Python3.9.5",
)
def test_invalid_leading_zeros(self):
addr = '127.000.000.001:8000'
option = config_options.IpAddress(default=addr)
with self.assertRaises(config_options.ValidationError):
option.validate(addr)
def test_invalid_address_range(self):
option = config_options.IpAddress()
with self.assertRaises(config_options.ValidationError):
option.validate('277.0.0.1:8000')
def test_invalid_address_format(self):
option = config_options.IpAddress()
with self.assertRaises(config_options.ValidationError):
option.validate('127.0.0.18000')
def test_invalid_address_type(self):
option = config_options.IpAddress()
with self.assertRaises(config_options.ValidationError):
option.validate(123)
def test_invalid_address_port(self):
option = config_options.IpAddress()
with self.assertRaises(config_options.ValidationError):
option.validate('127.0.0.1:foo')
def test_invalid_address_missing_port(self):
option = config_options.IpAddress()
with self.assertRaises(config_options.ValidationError):
option.validate('127.0.0.1')
def test_unsupported_address(self):
option = config_options.IpAddress()
value = option.validate('0.0.0.0:8000')
option.post_validation({'dev_addr': value}, 'dev_addr')
self.assertEqual(len(option.warnings), 1)
def test_unsupported_IPv6_address(self):
option = config_options.IpAddress()
value = option.validate(':::8000')
option.post_validation({'dev_addr': value}, 'dev_addr')
self.assertEqual(len(option.warnings), 1)
def test_invalid_IPv6_address(self):
# The server will error out with this so we treat it as invalid.
option = config_options.IpAddress()
with self.assertRaises(config_options.ValidationError):
option.validate('[::1]:8000')
class URLTest(unittest.TestCase):
def test_valid_url(self):
option = config_options.URL()
self.assertEqual(option.validate("https://mkdocs.org"), "https://mkdocs.org")
self.assertEqual(option.validate(""), "")
def test_valid_url_is_dir(self):
option = config_options.URL(is_dir=True)
self.assertEqual(option.validate("http://mkdocs.org/"), "http://mkdocs.org/")
self.assertEqual(option.validate("https://mkdocs.org"), "https://mkdocs.org/")
def test_invalid_url(self):
option = config_options.URL()
with self.assertRaises(config_options.ValidationError):
option.validate("www.mkdocs.org")
with self.assertRaises(config_options.ValidationError):
option.validate("//mkdocs.org/test")
with self.assertRaises(config_options.ValidationError):
option.validate("http:/mkdocs.org/")
with self.assertRaises(config_options.ValidationError):
option.validate("/hello/")
def test_invalid_type(self):
option = config_options.URL()
with self.assertRaises(config_options.ValidationError):
option.validate(1)
class RepoURLTest(unittest.TestCase):
def test_repo_name_github(self):
option = config_options.RepoURL()
config = {'repo_url': "https://github.com/mkdocs/mkdocs"}
option.post_validation(config, 'repo_url')
self.assertEqual(config['repo_name'], "GitHub")
def test_repo_name_bitbucket(self):
option = config_options.RepoURL()
config = {'repo_url': "https://bitbucket.org/gutworth/six/"}
option.post_validation(config, 'repo_url')
self.assertEqual(config['repo_name'], "Bitbucket")
def test_repo_name_gitlab(self):
option = config_options.RepoURL()
config = {'repo_url': "https://gitlab.com/gitlab-org/gitlab-ce/"}
option.post_validation(config, 'repo_url')
self.assertEqual(config['repo_name'], "GitLab")
def test_repo_name_custom(self):
option = config_options.RepoURL()
config = {'repo_url': "https://launchpad.net/python-tuskarclient"}
option.post_validation(config, 'repo_url')
self.assertEqual(config['repo_name'], "Launchpad")
def test_edit_uri_github(self):
option = config_options.RepoURL()
config = {'repo_url': "https://github.com/mkdocs/mkdocs"}
option.post_validation(config, 'repo_url')
self.assertEqual(config['edit_uri'], 'edit/master/docs/')
def test_edit_uri_bitbucket(self):
option = config_options.RepoURL()
config = {'repo_url': "https://bitbucket.org/gutworth/six/"}
option.post_validation(config, 'repo_url')
self.assertEqual(config['edit_uri'], 'src/default/docs/')
def test_edit_uri_gitlab(self):
option = config_options.RepoURL()
config = {'repo_url': "https://gitlab.com/gitlab-org/gitlab-ce/"}
option.post_validation(config, 'repo_url')
self.assertEqual(config['edit_uri'], 'edit/master/docs/')
def test_edit_uri_custom(self):
option = config_options.RepoURL()
config = {'repo_url': "https://launchpad.net/python-tuskarclient"}
option.post_validation(config, 'repo_url')
self.assertEqual(config.get('edit_uri'), '')
def test_repo_name_custom_and_empty_edit_uri(self):
option = config_options.RepoURL()
config = {'repo_url': "https://github.com/mkdocs/mkdocs",
'repo_name': 'mkdocs'}
option.post_validation(config, 'repo_url')
self.assertEqual(config.get('edit_uri'), 'edit/master/docs/')
class DirTest(unittest.TestCase):
def test_valid_dir(self):
d = os.path.dirname(__file__)
option = config_options.Dir(exists=True)
value = option.validate(d)
self.assertEqual(d, value)
def test_missing_dir(self):
d = os.path.join("not", "a", "real", "path", "I", "hope")
option = config_options.Dir()
value = option.validate(d)
self.assertEqual(os.path.abspath(d), value)
def test_missing_dir_but_required(self):
d = os.path.join("not", "a", "real", "path", "I", "hope")
option = config_options.Dir(exists=True)
with self.assertRaises(config_options.ValidationError):
option.validate(d)
def test_file(self):
d = __file__
option = config_options.Dir(exists=True)
with self.assertRaises(config_options.ValidationError):
option.validate(d)
def test_incorrect_type_attribute_error(self):
option = config_options.Dir()
with self.assertRaises(config_options.ValidationError):
option.validate(1)
def test_incorrect_type_type_error(self):
option = config_options.Dir()
with self.assertRaises(config_options.ValidationError):
option.validate([])
def test_dir_unicode(self):
cfg = Config(
[('dir', config_options.Dir())],
config_file_path=os.path.join(os.path.abspath('.'), 'mkdocs.yml'),
)
test_config = {
'dir': 'юникод'
}
cfg.load_dict(test_config)
fails, warns = cfg.validate()
self.assertEqual(len(fails), 0)
self.assertEqual(len(warns), 0)
self.assertIsInstance(cfg['dir'], str)
def test_dir_filesystemencoding(self):
cfg = Config(
[('dir', config_options.Dir())],
config_file_path=os.path.join(os.path.abspath('.'), 'mkdocs.yml'),
)
test_config = {
'dir': 'Übersicht'.encode(encoding=sys.getfilesystemencoding())
}
cfg.load_dict(test_config)
fails, warns = cfg.validate()
# str does not include byte strings so validation fails
self.assertEqual(len(fails), 1)
self.assertEqual(len(warns), 0)
def test_dir_bad_encoding_fails(self):
cfg = Config(
[('dir', config_options.Dir())],
config_file_path=os.path.join(os.path.abspath('.'), 'mkdocs.yml'),
)
test_config = {
'dir': 'юникод'.encode(encoding='ISO 8859-5')
}
cfg.load_dict(test_config)
fails, warns = cfg.validate()
self.assertEqual(len(fails), 1)
self.assertEqual(len(warns), 0)
def test_config_dir_prepended(self):
base_path = os.path.abspath('.')
cfg = Config(
[('dir', config_options.Dir())],
config_file_path=os.path.join(base_path, 'mkdocs.yml'),
)
test_config = {
'dir': 'foo'
}
cfg.load_dict(test_config)
fails, warns = cfg.validate()
self.assertEqual(len(fails), 0)
self.assertEqual(len(warns), 0)
self.assertIsInstance(cfg['dir'], str)
self.assertEqual(cfg['dir'], os.path.join(base_path, 'foo'))
def test_dir_is_config_dir_fails(self):
cfg = Config(
[('dir', config_options.Dir())],
config_file_path=os.path.join(os.path.abspath('.'), 'mkdocs.yml'),
)
test_config = {
'dir': '.'
}
cfg.load_dict(test_config)
fails, warns = cfg.validate()
self.assertEqual(len(fails), 1)
self.assertEqual(len(warns), 0)
class ListOfPathsTest(unittest.TestCase):
def test_valid_path(self):
paths = [os.path.dirname(__file__)]
option = config_options.ListOfPaths()
option.validate(paths)
def test_missing_path(self):
paths = [os.path.join("does", "not", "exist", "i", "hope")]
option = config_options.ListOfPaths()
with self.assertRaises(config_options.ValidationError):
option.validate(paths)
def test_empty_list(self):
paths = []
option = config_options.ListOfPaths()
option.validate(paths)
def test_non_list(self):
paths = os.path.dirname(__file__)
option = config_options.ListOfPaths()
with self.assertRaises(config_options.ValidationError):
option.validate(paths)
def test_file(self):
paths = [__file__]
option = config_options.ListOfPaths()
option.validate(paths)
def test_paths_localized_to_config(self):
base_path = os.path.abspath('.')
cfg = Config(
[('watch', config_options.ListOfPaths())],
config_file_path=os.path.join(base_path, 'mkdocs.yml'),
)
test_config = {
'watch': ['foo']
}
cfg.load_dict(test_config)
fails, warns = cfg.validate()
self.assertEqual(len(fails), 0)
self.assertEqual(len(warns), 0)
self.assertIsInstance(cfg['watch'], list)
self.assertEqual(cfg['watch'], [os.path.join(base_path, 'foo')])
class SiteDirTest(unittest.TestCase):
def validate_config(self, config):
""" Given a config with values for site_dir and doc_dir, run site_dir post_validation. """
site_dir = config_options.SiteDir()
docs_dir = config_options.Dir()
fname = os.path.join(os.path.abspath('..'), 'mkdocs.yml')
config['docs_dir'] = docs_dir.validate(config['docs_dir'])
config['site_dir'] = site_dir.validate(config['site_dir'])
schema = [
('site_dir', site_dir),
('docs_dir', docs_dir),
]
cfg = Config(schema, fname)
cfg.load_dict(config)
failed, warned = cfg.validate()
if failed:
raise config_options.ValidationError(failed)
return True
def test_doc_dir_in_site_dir(self):
j = os.path.join
# The parent dir is not the same on every system, so use the actual dir name
parent_dir = mkdocs.__file__.split(os.sep)[-3]
test_configs = (
{'docs_dir': j('site', 'docs'), 'site_dir': 'site'},
{'docs_dir': 'docs', 'site_dir': '.'},
{'docs_dir': '.', 'site_dir': '.'},
{'docs_dir': 'docs', 'site_dir': ''},
{'docs_dir': '', 'site_dir': ''},
{'docs_dir': j('..', parent_dir, 'docs'), 'site_dir': 'docs'},
{'docs_dir': 'docs', 'site_dir': '/'}
)
for test_config in test_configs:
with self.assertRaises(config_options.ValidationError):
self.validate_config(test_config)
def test_site_dir_in_docs_dir(self):
j = os.path.join
test_configs = (
{'docs_dir': 'docs', 'site_dir': j('docs', 'site')},
{'docs_dir': '.', 'site_dir': 'site'},
{'docs_dir': '', 'site_dir': 'site'},
{'docs_dir': '/', 'site_dir': 'site'},
)
for test_config in test_configs:
with self.assertRaises(config_options.ValidationError):
self.validate_config(test_config)
def test_common_prefix(self):
""" Legitimate settings with common prefixes should not fail validation. """
test_configs = (
{'docs_dir': 'docs', 'site_dir': 'docs-site'},
{'docs_dir': 'site-docs', 'site_dir': 'site'},
)
for test_config in test_configs:
assert self.validate_config(test_config)
class ThemeTest(unittest.TestCase):
def test_theme_as_string(self):
option = config_options.Theme()
value = option.validate("mkdocs")
self.assertEqual({'name': 'mkdocs'}, value)
def test_uninstalled_theme_as_string(self):
option = config_options.Theme()
with self.assertRaises(config_options.ValidationError):
option.validate("mkdocs2")
def test_theme_default(self):
option = config_options.Theme(default='mkdocs')
value = option.validate(None)
self.assertEqual({'name': 'mkdocs'}, value)
def test_theme_as_simple_config(self):
config = {
'name': 'mkdocs'
}
option = config_options.Theme()
value = option.validate(config)
self.assertEqual(config, value)
def test_theme_as_complex_config(self):
config = {
'name': 'mkdocs',
'custom_dir': 'custom',
'static_templates': ['sitemap.html'],
'show_sidebar': False
}
option = config_options.Theme()
value = option.validate(config)
self.assertEqual(config, value)
def test_theme_name_is_none(self):
config = {
'name': None
}
option = config_options.Theme()
value = option.validate(config)
self.assertEqual(config, value)
def test_theme_config_missing_name(self):
config = {
'custom_dir': 'custom',
}
option = config_options.Theme()
with self.assertRaises(config_options.ValidationError):
option.validate(config)
def test_uninstalled_theme_as_config(self):
config = {
'name': 'mkdocs2'
}
option = config_options.Theme()
with self.assertRaises(config_options.ValidationError):
option.validate(config)
def test_theme_invalid_type(self):
config = ['mkdocs2']
option = config_options.Theme()
with self.assertRaises(config_options.ValidationError):
option.validate(config)
def test_post_validation_none_theme_name_and_missing_custom_dir(self):
config = {
'theme': {
'name': None
}
}
option = config_options.Theme()
with self.assertRaises(config_options.ValidationError):
option.post_validation(config, 'theme')
@tempdir()
def test_post_validation_inexisting_custom_dir(self, abs_base_path):
config = {
'theme': {
'name': None,
'custom_dir': abs_base_path + '/inexisting_custom_dir',
}
}
option = config_options.Theme()
with self.assertRaises(config_options.ValidationError):
option.post_validation(config, 'theme')
def test_post_validation_locale_none(self):
config = {
'theme': {
'name': 'mkdocs',
'locale': None
}
}
option = config_options.Theme()
with self.assertRaises(config_options.ValidationError):
option.post_validation(config, 'theme')
def test_post_validation_locale_invalid_type(self):
config = {
'theme': {
'name': 'mkdocs',
'locale': 0
}
}
option = config_options.Theme()
with self.assertRaises(config_options.ValidationError):
option.post_validation(config, 'theme')
def test_post_validation_locale(self):
config = {
'theme': {
'name': 'mkdocs',
'locale': 'fr'
}
}
option = config_options.Theme()
option.post_validation(config, 'theme')
self.assertEqual('fr', config['theme']['locale'].language)
class NavTest(unittest.TestCase):
def test_old_format(self):
option = config_options.Nav()
with self.assertRaises(config_options.ValidationError) as cm:
option.validate([['index.md']])
self.assertEqual(str(cm.exception), "Expected nav item to be a string or dict, got a list: ['index.md']")
def test_provided_dict(self):
option = config_options.Nav()
value = option.validate([
'index.md',
{"Page": "page.md"}
])
self.assertEqual(['index.md', {'Page': 'page.md'}], value)
option.post_validation({'extra_stuff': []}, 'extra_stuff')
self.assertEqual(option.warnings, [])
def test_provided_empty(self):
option = config_options.Nav()
value = option.validate([])
self.assertEqual(None, value)
option.post_validation({'extra_stuff': []}, 'extra_stuff')
self.assertEqual(option.warnings, [])
def test_normal_nav(self):
nav = yaml_load(textwrap.dedent('''\
- Home: index.md
- getting-started.md
- User Guide:
- Overview: user-guide/index.md
- Installation: user-guide/installation.md
''').encode())
option = config_options.Nav()
self.assertEqual(option.validate(nav), nav)
self.assertEqual(option.warnings, [])
def test_invalid_type_dict(self):
option = config_options.Nav()
with self.assertRaises(config_options.ValidationError) as cm:
option.validate({})
self.assertEqual(str(cm.exception), "Expected nav to be a list, got a dict: {}")
def test_invalid_type_int(self):
option = config_options.Nav()
with self.assertRaises(config_options.ValidationError) as cm:
option.validate(5)
self.assertEqual(str(cm.exception), "Expected nav to be a list, got a int: 5")
def test_invalid_item_int(self):
option = config_options.Nav()
with self.assertRaises(config_options.ValidationError) as cm:
option.validate([1])
self.assertEqual(str(cm.exception), "Expected nav item to be a string or dict, got a int: 1")
def test_invalid_item_none(self):
option = config_options.Nav()
with self.assertRaises(config_options.ValidationError) as cm:
option.validate([None])
self.assertEqual(str(cm.exception), "Expected nav item to be a string or dict, got None")
def test_invalid_children_config_int(self):
option = config_options.Nav()
with self.assertRaises(config_options.ValidationError) as cm:
option.validate([{"foo.md": [{"bar.md": 1}]}])
self.assertEqual(str(cm.exception), "Expected nav to be a list, got a int: 1")
def test_invalid_children_config_none(self):
option = config_options.Nav()
with self.assertRaises(config_options.ValidationError) as cm:
option.validate([{"foo.md": None}])
self.assertEqual(str(cm.exception), "Expected nav to be a list, got None")
def test_invalid_children_empty_dict(self):
option = config_options.Nav()
nav = ['foo', {}]
with self.assertRaises(config_options.ValidationError) as cm:
option.validate(nav)
self.assertEqual(str(cm.exception), "Expected nav item to be a dict of size 1, got a dict: {}")
def test_invalid_nested_list(self):
option = config_options.Nav()
nav = [{'aaa': [[{"bbb": "user-guide/index.md"}]]}]
with self.assertRaises(config_options.ValidationError) as cm:
option.validate(nav)
msg = "Expected nav item to be a string or dict, got a list: [{'bbb': 'user-guide/index.md'}]"
self.assertEqual(str(cm.exception), msg)
def test_invalid_children_oversized_dict(self):
option = config_options.Nav()
nav = [{"aaa": [{"bbb": "user-guide/index.md", "ccc": "user-guide/installation.md"}]}]
with self.assertRaises(config_options.ValidationError) as cm:
option.validate(nav)
msg = "Expected nav item to be a dict of size 1, got dict with keys ('bbb', 'ccc')"
self.assertEqual(str(cm.exception), msg)
def test_warns_for_dict(self):
option = config_options.Nav()
option.validate([{"a": {"b": "c.md", "d": "e.md"}}])
self.assertEqual(option.warnings, ["Expected nav to be a list, got dict with keys ('b', 'd')"])
class PrivateTest(unittest.TestCase):
def test_defined(self):
option = config_options.Private()
with self.assertRaises(config_options.ValidationError):
option.validate('somevalue')
class MarkdownExtensionsTest(unittest.TestCase):
@patch('markdown.Markdown')
def test_simple_list(self, mockMd):
option = config_options.MarkdownExtensions()
config = {
'markdown_extensions': ['foo', 'bar']
}
config['markdown_extensions'] = option.validate(config['markdown_extensions'])
option.post_validation(config, 'markdown_extensions')
self.assertEqual({
'markdown_extensions': ['foo', 'bar'],
'mdx_configs': {}
}, config)
@patch('markdown.Markdown')
def test_list_dicts(self, mockMd):
option = config_options.MarkdownExtensions()
config = {
'markdown_extensions': [
{'foo': {'foo_option': 'foo value'}},
{'bar': {'bar_option': 'bar value'}},
{'baz': None}
]
}
config['markdown_extensions'] = option.validate(config['markdown_extensions'])
option.post_validation(config, 'markdown_extensions')
self.assertEqual({
'markdown_extensions': ['foo', 'bar', 'baz'],
'mdx_configs': {
'foo': {'foo_option': 'foo value'},
'bar': {'bar_option': 'bar value'}
}
}, config)
@patch('markdown.Markdown')
def test_mixed_list(self, mockMd):
option = config_options.MarkdownExtensions()
config = {
'markdown_extensions': [
'foo',
{'bar': {'bar_option': 'bar value'}}
]
}
config['markdown_extensions'] = option.validate(config['markdown_extensions'])
option.post_validation(config, 'markdown_extensions')
self.assertEqual({
'markdown_extensions': ['foo', 'bar'],
'mdx_configs': {
'bar': {'bar_option': 'bar value'}
}
}, config)
@patch('markdown.Markdown')
def test_dict_of_dicts(self, mockMd):
option = config_options.MarkdownExtensions()
config = {
'markdown_extensions': {
'foo': {'foo_option': 'foo value'},
'bar': {'bar_option': 'bar value'},
'baz': {}
}
}
config['markdown_extensions'] = option.validate(config['markdown_extensions'])
option.post_validation(config, 'markdown_extensions')
self.assertEqual({
'markdown_extensions': ['foo', 'bar', 'baz'],
'mdx_configs': {
'foo': {'foo_option': 'foo value'},
'bar': {'bar_option': 'bar value'}
}
}, config)
@patch('markdown.Markdown')
def test_builtins(self, mockMd):
option = config_options.MarkdownExtensions(builtins=['meta', 'toc'])
config = {
'markdown_extensions': ['foo', 'bar']
}
config['markdown_extensions'] = option.validate(config['markdown_extensions'])
option.post_validation(config, 'markdown_extensions')
self.assertEqual({
'markdown_extensions': ['meta', 'toc', 'foo', 'bar'],
'mdx_configs': {}
}, config)
def test_duplicates(self):
option = config_options.MarkdownExtensions(builtins=['meta', 'toc'])
config = {
'markdown_extensions': ['meta', 'toc']
}
config['markdown_extensions'] = option.validate(config['markdown_extensions'])
option.post_validation(config, 'markdown_extensions')
self.assertEqual({
'markdown_extensions': ['meta', 'toc'],
'mdx_configs': {}
}, config)
def test_builtins_config(self):
option = config_options.MarkdownExtensions(builtins=['meta', 'toc'])
config = {
'markdown_extensions': [
{'toc': {'permalink': True}}
]
}
config['markdown_extensions'] = option.validate(config['markdown_extensions'])
option.post_validation(config, 'markdown_extensions')
self.assertEqual({
'markdown_extensions': ['meta', 'toc'],
'mdx_configs': {'toc': {'permalink': True}}
}, config)
@patch('markdown.Markdown')
def test_configkey(self, mockMd):
option = config_options.MarkdownExtensions(configkey='bar')
config = {
'markdown_extensions': [
{'foo': {'foo_option': 'foo value'}}
]
}
config['markdown_extensions'] = option.validate(config['markdown_extensions'])
option.post_validation(config, 'markdown_extensions')
self.assertEqual({
'markdown_extensions': ['foo'],
'bar': {
'foo': {'foo_option': 'foo value'}
}
}, config)
def test_none(self):
option = config_options.MarkdownExtensions(default=[])
config = {
'markdown_extensions': None
}
config['markdown_extensions'] = option.validate(config['markdown_extensions'])
option.post_validation(config, 'markdown_extensions')
self.assertEqual({
'markdown_extensions': [],
'mdx_configs': {}
}, config)
@patch('markdown.Markdown')
def test_not_list(self, mockMd):
option = config_options.MarkdownExtensions()
with self.assertRaises(config_options.ValidationError):
option.validate('not a list')
@patch('markdown.Markdown')
def test_invalid_config_option(self, mockMd):
option = config_options.MarkdownExtensions()
config = {
'markdown_extensions': [
{'foo': 'not a dict'}
]
}
with self.assertRaises(config_options.ValidationError):
option.validate(config['markdown_extensions'])
@patch('markdown.Markdown')
def test_invalid_config_item(self, mockMd):
option = config_options.MarkdownExtensions()
config = {
'markdown_extensions': [
['not a dict']
]
}
with self.assertRaises(config_options.ValidationError):
option.validate(config['markdown_extensions'])
@patch('markdown.Markdown')
def test_invalid_dict_item(self, mockMd):
option = config_options.MarkdownExtensions()
config = {
'markdown_extensions': [
{'key1': 'value', 'key2': 'too many keys'}
]
}
with self.assertRaises(config_options.ValidationError):
option.validate(config['markdown_extensions'])
def test_unknown_extension(self):
option = config_options.MarkdownExtensions()
config = {
'markdown_extensions': ['unknown']
}
with self.assertRaises(config_options.ValidationError):
option.validate(config['markdown_extensions'])
| 34.126801
| 113
| 0.613016
|
7b140c06fc71663447eaff17a05271e0256189a1
| 209
|
py
|
Python
|
py2app_tests/basic_app_with_plugin/setup.py
|
flupke/py2app
|
8eb6c618f9c63d6ac970fb145a7f7782b71bcb4d
|
[
"MIT"
] | 81
|
2015-11-29T12:17:39.000Z
|
2021-08-02T07:06:51.000Z
|
py2app_tests/basic_app_with_plugin/setup.py
|
flupke/py2app
|
8eb6c618f9c63d6ac970fb145a7f7782b71bcb4d
|
[
"MIT"
] | 11
|
2016-10-23T16:34:10.000Z
|
2022-01-30T05:45:54.000Z
|
py2app_tests/basic_app_with_plugin/setup.py
|
flupke/py2app
|
8eb6c618f9c63d6ac970fb145a7f7782b71bcb4d
|
[
"MIT"
] | 21
|
2016-01-25T18:46:31.000Z
|
2021-01-08T17:38:03.000Z
|
from setuptools import setup
setup(
name='BasicApp',
app=['main.py'],
options=dict(
py2app=dict(
include_plugins=['dummy1.qlgenerator', 'dummy2.mdimporter'],
)
)
)
| 17.416667
| 72
| 0.574163
|
f692995960b6a5e6ccf264d3c194143612da3765
| 615
|
py
|
Python
|
proyectos_de_ley/stats/migrations/0001_initial.py
|
napsta32/proyectos_de_ley
|
63b7737e194a0958f9e95ca92773887000867bc7
|
[
"MIT"
] | 12
|
2016-07-27T06:23:52.000Z
|
2021-09-08T16:09:52.000Z
|
proyectos_de_ley/stats/migrations/0001_initial.py
|
napsta32/proyectos_de_ley
|
63b7737e194a0958f9e95ca92773887000867bc7
|
[
"MIT"
] | 58
|
2015-01-18T14:53:45.000Z
|
2021-02-19T06:27:19.000Z
|
proyectos_de_ley/stats/migrations/0001_initial.py
|
napsta32/proyectos_de_ley
|
63b7737e194a0958f9e95ca92773887000867bc7
|
[
"MIT"
] | 10
|
2015-01-28T02:20:38.000Z
|
2020-11-22T06:23:26.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='ComisionCount',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('count', models.IntegerField()),
('comision', models.CharField(max_length=250)),
],
options={
},
bases=(models.Model,),
),
]
| 24.6
| 114
| 0.547967
|
77fef25acf03e45ecec9b6d5e666515b3dbedfe2
| 7,674
|
py
|
Python
|
docs/src/conf.py
|
Xilinx/graphanalytics
|
7b8923b6494bc11708619edeac8d96eccfc76413
|
[
"BSD-3-Clause"
] | 11
|
2021-01-31T17:50:55.000Z
|
2022-03-23T12:40:39.000Z
|
docs/src/conf.py
|
Xilinx/graphanalytics
|
7b8923b6494bc11708619edeac8d96eccfc76413
|
[
"BSD-3-Clause"
] | 1
|
2021-03-11T22:02:19.000Z
|
2021-03-25T17:09:37.000Z
|
docs/src/conf.py
|
Xilinx/graphanalytics
|
7b8923b6494bc11708619edeac8d96eccfc76413
|
[
"BSD-3-Clause"
] | 2
|
2021-03-02T18:41:47.000Z
|
2021-11-14T06:54:08.000Z
|
#
# Copyright 2021 Xilinx, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
import recommonmark
from recommonmark.transform import AutoStructify
import shutil
theme_dir = '/proj/gdba/ywu/ghe/sphinx_xilinx_theme'
tools_dir = os.path.abspath(os.path.join(os.path.dirname(shutil.which('doxyrest')), '..'))
sys.path.insert(1, os.path.join(tools_dir, 'share/doxyrest/sphinx'))
# -- Project information -----------------------------------------------------
project = 'Xilinx Alveo Graph Analytics Products'
copyright = '2021, Xilinx'
author = 'Xilinx'
# The short X.Y version
version = '1.4'
# The full version, including alpha/beta/rc tags
release = 'v1.4'
# For bottom-left nav
try:
html_context
except NameError:
html_context = dict()
#html_context['display_lower_left'] = True
html_context['current_version'] = version
html_context['versions'] = ['1.4']
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'recommonmark',
# Auto-generate section labels.
'sphinx.ext.autosectionlabel',
'sphinx.ext.imgmath',
'doxyrest',
#'cpplexer',
]
# Prefix document path to section labels, otherwise autogenerated labels would look like 'heading'
# rather than 'path/to/file:heading'
autosectionlabel_prefix_document = True
# Configure 'Edit on GitHub' extension
edit_on_github_project = 'Xilinx/<ENTER GITHUB REPO PATH HERE>'
edit_on_github_branch = 'master/<ENTER DOC FOLDER HIERARCHY HERE>'
html_copy_source = False
html_show_sourcelink = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'README.md',
'rst/namespace_xf*.rst', 'rst/struct_xf*.rst', 'rst/enum_xf*.rst',
'rst/namespace_std.rst',
'rst/index.rst', 'rst/global.rst', 'rst/gqe_join.rst', 'cosinesim/page_index.rst']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'xilinx'
html_theme_path = [os.path.join(theme_dir, '_themes')]
html_last_updated_fmt = '%B %d, %Y'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'xf_database'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'xf_database.tex', 'Vitis Database Library',
'Xilinx', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'xf_database', 'Vitis Database Library',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'xf_database', 'Vitis Database Library',
author, 'Xilinx', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# At the bottom of conf.py
def setup(app):
app.add_config_value('recommonmark_config', {
'url_resolver': lambda url: github_doc_root + url,
'auto_toc_tree_section': 'Contents',
}, True)
app.add_transform(AutoStructify)
| 31.068826
| 102
| 0.667579
|
6c7877a121f0754b3b0be3aa438721eb3e302e44
| 9,182
|
py
|
Python
|
getInstance.py
|
rdustinb/fpga_code_ops
|
a4176067b007b13fb83c236eac7d73399769d8f7
|
[
"MIT"
] | 1
|
2018-04-12T15:26:16.000Z
|
2018-04-12T15:26:16.000Z
|
getInstance.py
|
rdustinb/GAPy
|
a4176067b007b13fb83c236eac7d73399769d8f7
|
[
"MIT"
] | 3
|
2015-10-23T18:17:59.000Z
|
2016-03-28T18:06:29.000Z
|
getInstance.py
|
rdustinb/GAPy
|
a4176067b007b13fb83c236eac7d73399769d8f7
|
[
"MIT"
] | null | null | null |
#!/usr/local/bin/python3.4
import getopt
import sys
from os import getcwd
from os import getenv
try:
import pyperclip
except ImportError:
print("pyperclip (>= v1.3) is required for this tool. Please run:\n")
print("\tpip install pyperclip\n")
print("Also please note that on Gnome, xclip is required and on KDE klipper\n")
print("is required for pyperclip to work correctly.\n")
sys.exit()
"""
The purpose of this script is to provide a quick pasteable instance
code block from a target HDL file. The designer will launch this
script with a file as an argument and this script will parse the
file and push the instance code into the clipboard of the OS for easy
pasting into another code file.
"""
###################################################
# SystemVerilog Stripping Function
def stripSv(line,portFlag,bits):
"""
This function removes specific keywords from different lines of an
SV file.
"""
portDict = {
1 : "in",
2 : "out",
3 : "inout"
}
if("//" in line):
line,*blah = line.split("//")
if("module" in line):
line = line.replace("module", "")
if("parameter" in line):
line = line.replace("parameter", "")
if("input" in line):
line = line.replace("input", "")
portFlag = 1
if("output" in line):
line = line.replace("output", "")
portFlag = 2
if("inout" in line):
line = line.replace("inout", "")
portFlag = 3
if("reg" in line):
line = line.replace("reg", "")
if("wire" in line):
line = line.replace("wire", "")
if("logic" in line):
line = line.replace("logic", "")
if(" " in line):
line = line.replace(" ", "")
if("=" in line):
line = line.replace("=", ",%")
line,*blah = line.split("%")
if("[" in line):
line = line.replace("[", "%")
line = line.replace("]", "%")
line = line.split("%")
newLine = ""
newannotate = ("// %s "%(portDict[portFlag]))
for part in line:
if(not(":" in part)):
if("," in part):
part = part.replace(",","")
newLine = newLine+part
else:
newannotate += ("[%s]"%(part))
line = newLine+newannotate+","
elif(portFlag != 0):
line = line.replace(",", "// %s [1],"%(portDict[portFlag]))
if(";" in line):
line = line.replace(");", "// %s [1]);"%(portDict[portFlag]))
return line,portFlag,bits
def structureSvInstance(stackedLine, tabSpace, alignCol, alignCom):
"""
This function restructures an input "stacked line" module declaration
from a .sv file. Expecting a module declaration on one line in the form
of:
blockName#(param1,param2,param3,...)(port1,port2,port3,...)
or:
blockName(port1,port2,port3,...)
It will return a string of the form:
blockName blockName_0 #(
.param1 (param1),
.param2 (param2),
.param3 (param3),
...
.paramN (paramN)
)(
.port1 (port1), // in 1 bit
.port2 (port2), // out 3 bits
.port3 (port3), // in Multidimensional Bus
...
.portN (portN) // inout 3 bits
);
or:
blockName blockName_0 (
.port1 (port1),
.port2 (port2),
.port3 (port3),
...
.portN (portN)
);
"""
newStackedPorts = ""
# There are parameters in this module
if("#" in stackedLine):
modName,remainder = stackedLine.split("#(")
paramList,remainder = remainder.split(")(")
paramList = paramList.split(",")
newParams = ""
for param in paramList:
if(newParams == ""):
newParams = (" "*tabSpace)
newParams = newParams+"."+param
newParams = newParams+(" "*(alignCol-len(param)))
newParams = newParams+"("+param+")"
else:
newParams = newParams+",\n"
newParams = newParams+(" "*tabSpace)
newParams = newParams+"."+param
newParams = newParams+(" "*(alignCol-len(param)))
newParams = newParams+"("+param+")"
paramList = newParams
portList,remainder = remainder.split(")")
portList = portList.split(",")
newPorts = ""
nextAnnotate = ""
afterPortLen = 0
for ports in portList:
# Rip Out the annotation
ports,annotate = ports.split("//")
annotate = "//"+annotate
if(newPorts == ""):
newPorts = (" "*tabSpace)
newPorts = newPorts+"."+ports
newPorts = newPorts+(" "*(alignCol-len(ports)))
newPorts = newPorts+"("+ports+")"
afterPortLen = len(ports)+2
else:
newPorts = newPorts+(",")
newPorts = newPorts+(" "*(alignCom-afterPortLen))
newPorts = newPorts+("%s\n"%nextAnnotate)
newPorts = newPorts+(" "*tabSpace)
newPorts = newPorts+"."+ports
newPorts = newPorts+(" "*(alignCol-len(ports)))
newPorts = newPorts+"("+ports+")"
afterPortLen = len(ports)+2
nextAnnotate = annotate
portList = newPorts+(" "*(alignCom-afterPortLen+1))
portList = portList+("%s"%nextAnnotate)
newStackedPorts = modName+" #(\n"+paramList+"\n) "+modName+"_0 (\n"+portList+"\n);"
stackedLine = newStackedPorts
else:
modName,remainder = stackedLine.split("(")
modName = modName+" "+modName+"_0 ("
portList,remainder = remainder.split(")")
portList = portList.split(",")
newPorts = ""
for ports in portList:
if(newPorts == ""):
newPorts = (" "*tabSpace)
newPorts = newPorts+"."+ports
newPorts = newPorts+(" "*(alignCol-len(ports)))
newPorts = newPorts+"("+ports+")"
else:
newPorts = newPorts+",\n"
newPorts = newPorts+(" "*tabSpace)
newPorts = newPorts+"."+ports
newPorts = newPorts+(" "*(alignCol-len(ports)))
newPorts = newPorts+"("+ports+")"
portList = newPorts
newStackedPorts = modName+"\n"+portList+"\n);"
return newStackedPorts
###################################################
# User Parse Function
def userParse(fileName, tabSpace, alignCol, alignCom):
"""
Core of the script. Parses the user-specified HDL file and creates an
instance block to be pasted into another HDL file.
pyperclip.copy("No options entered")
"""
instanceBeginning = 0
stackedLine = ""
portFlag = 0
bits = 0
with open(fileName, "r") as fh:
for line in fh:
if("module" in line):
instanceBeginning = 1
stackedLine,portFlag,bits = stripSv(line.strip(),portFlag,bits)
if((")" in line) and not("#" in line)):
instanceBeginning = 0
break
elif(instanceBeginning == 1):
if(");" in line):
instanceBeginning = 0
new_sl,portFlag,bits = stripSv(line.strip(),portFlag,bits)
stackedLine = stackedLine+new_sl
break
else:
new_sl,portFlag,bits = stripSv(line.strip(),portFlag,bits)
stackedLine = stackedLine+new_sl
# Final String Tweaks
if(",)" in stackedLine):
stackedLine = stackedLine.replace(",)", ")")
stackedLine = structureSvInstance(stackedLine,tabSpace,alignCol,alignCom)
pyperclip.copy(stackedLine)
#print(stackedLine)
###################################################
# Test Parse Function
def testParse():
"""
Test Function of the script. Verifies the script works as expected.
"""
svFileList = [
"tests/SVFile1.sv",
"tests/SVFile2.sv",
"tests/SVFile3.sv",
"tests/SVFile4.sv",
"tests/SVFile5.sv",
"tests/SVFile6.sv",
"tests/SVFile7.sv",
"tests/SVFile8.sv",
"tests/SVFile9.sv",
"tests/SVFile10.sv"
]
for fileName in svFileList:
print("\n\nTesting variation: %s"%fileName)
userParse(fileName, 2, 32, 10)
###################################################
# Get the input from the terminal
try:
args, opts = getopt.getopt(sys.argv[1:], "", ["test","path"])
if(args == [] and opts == [] or len(opts) != 4):
print("Invalid number of options entered. Please execute using the following")
print("format:\n")
print(" ./getInstance.py path/to/file.sv <tabSpace> <column align> <comment align>")
else:
#print("The following arguments were used: %s"%(args))
#print("The following options were used: %s"%(opts))
if(any("--test" in element for element in args)):
testParse()
elif(any("--path" in element for element in args)):
thisScriptPath = getcwd()
print("Current working directory is: %s"%thisScriptPath)
shellPath = getenv("SHELL")
print("Current shell is: %s"%shellPath)
aliasFilePath = getenv("HOME")+"/.alias"
with open(aliasFilePath, "a") as aliasFile:
# Write to bash alias
if(shellPath == "/bin/bash"):
aliasFile.write("alias getInstance='python3 %s/getInstance.py'"%(thisScriptPath))
# Write to csh alias
elif(shellPath == "/bin/csh"):
aliasFile.write("alias getInstance 'python3 %s/getInstance.py'"%(thisScriptPath))
else:
userParse(opts[0], int(opts[1]), int(opts[2]), int(opts[3]))
except getopt.error:
print("That option is not supported.")
| 33.148014
| 91
| 0.574167
|
b130b44fc851bc99be1ce4e43c34e20b4305012e
| 3,927
|
py
|
Python
|
Python/getVerificationStudy.py
|
KienTTran/PSU-CIDD-MaSim-Support
|
db4e3c514b1bb85bc3f20e75703d6be4967e98a9
|
[
"BSD-3-Clause"
] | null | null | null |
Python/getVerificationStudy.py
|
KienTTran/PSU-CIDD-MaSim-Support
|
db4e3c514b1bb85bc3f20e75703d6be4967e98a9
|
[
"BSD-3-Clause"
] | null | null | null |
Python/getVerificationStudy.py
|
KienTTran/PSU-CIDD-MaSim-Support
|
db4e3c514b1bb85bc3f20e75703d6be4967e98a9
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python3
# getVerificationStudy.py
#
# Query for verification studies and prompt the use to supply the replicate
# id that they wish to download, the CSV file saved is formatted so it can be
# used with the Matlab plotting function.
#
# Note that results for the model burn-in period are include and may contain
# invalid data.
import os
import sys
# Import our libraries
sys.path.append(os.path.join(os.path.dirname(__file__), "include"))
from include.calibrationLib import load_configuration
from include.database import select, DatabaseError
SELECT_REPLICATES = """
SELECT r.id, c.filename,
to_char(r.starttime, 'YYYY-MON-DD HH24:MI:SS'),
to_char(r.endtime, 'YYYY-MON-DD HH24:MI:SS')
FROM sim.study s
INNER JOIN sim.configuration c ON c.studyid = s.id
INNER JOIN sim.replicate r ON r.configurationid = c.id
WHERE s.id = %(studyId)s
ORDER BY r.id ASC"""
# Note that we are left joining on the monthly site data since a beta of zero
# is valid and will result in no site data being stored during model execution.
# This also means that the EIR may be set to the sentinel -9999 during the
# model burn-in period so plotting scripts will need to ensure that the
# rendering period is valid.
SELECT_DATASET = """
SELECT dayselapsed, district,
sum(population) AS population,
sum(clinicalepisodes) as clinicalepisodes,
sum(treatments) as treatments,
avg(msd.eir) AS eir,
sum(population * pfprunder5) / sum(population) AS pfprunder5,
sum(population * pfpr2to10) / sum(population) AS pfpr2to10,
sum(population * pfprall) / sum(population) AS pfprall
FROM sim.replicate r
INNER JOIN sim.monthlydata md ON md.replicateid = r.id
INNER JOIN sim.monthlysitedata msd ON msd.monthlydataid = md.id
INNER JOIN sim.location l ON l.id = msd.locationid
WHERE r.id = %(replicateId)s
GROUP BY dayselapsed, district
ORDER BY dayselapsed, district"""
def main(configuration, studyId):
try:
# Load the configuration, query for the list of replicates
cfg = load_configuration(configuration)
replicates = select(cfg["connection_string"], SELECT_REPLICATES, {'studyId':studyId})
# Display list, prompt user
if len(replicates) == 0:
print("No studies returned!")
exit(0)
print("Studies returned: ")
for replicate in replicates:
print("{}\t{}\t{}\t{}".format(replicate[0], replicate[1], replicate[2], replicate[3]))
# Prompt for replicate id
replicateId = int(input("Replicate to retrive: "))
# Load the data set, exit if nothing is returned
rows, columns = select(cfg["connection_string"], SELECT_DATASET, {'replicateId':replicateId}, True)
if len(rows) == 0:
print("No data returned!")
exit(0)
# Save the replicate to disk
filename = "{}-verification-data.csv".format(replicateId)
print("Saving data set to: {}".format(filename))
with open(filename, "w") as csvfile:
csvfile.write("")
line = ','.join(str(columns[ndx]) for ndx in range(0, len(columns)))
csvfile.write("{}\n".format(line))
for row in rows:
line = ','.join(str(row[ndx]) for ndx in range(0, len(row)))
csvfile.write("{}\n".format(line))
except DatabaseError:
sys.stderr.write("An unrecoverable database error occurred, exiting.\n")
sys.exit(1)
if __name__ == "__main__":
# Check the command line
if len(sys.argv) != 3:
print("Usage: ./getVerificationStudy [configuration] [studyid]")
print("configuration - the configuration file to be loaded")
print("studyid - the database id of the verification studies")
exit(0)
# Parse the parameters
configuration = str(sys.argv[1])
studyId = int(sys.argv[2])
# Run the main function
main(configuration, studyId)
| 36.700935
| 107
| 0.670996
|
831e48ef47fff340ff1f555a2b1e53169d18e280
| 4,989
|
py
|
Python
|
napari/layers/surface/_tests/test_surface.py
|
Czaki/napari
|
9f423a19f7bb187f9d2e12fe9c7e063a38ccd07e
|
[
"BSD-3-Clause"
] | 1
|
2022-03-01T19:38:06.000Z
|
2022-03-01T19:38:06.000Z
|
napari/layers/surface/_tests/test_surface.py
|
Czaki/napari
|
9f423a19f7bb187f9d2e12fe9c7e063a38ccd07e
|
[
"BSD-3-Clause"
] | 17
|
2020-06-11T21:02:03.000Z
|
2021-02-02T19:10:19.000Z
|
napari/layers/surface/_tests/test_surface.py
|
pwinston/napari
|
5ebb3cc914038519591d7186a8a2d42b6fdc42b9
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import pytest
from napari._tests.utils import check_layer_world_data_extent
from napari.layers import Surface
def test_random_surface():
"""Test instantiating Surface layer with random 2D data."""
np.random.seed(0)
vertices = np.random.random((10, 2))
faces = np.random.randint(10, size=(6, 3))
values = np.random.random(10)
data = (vertices, faces, values)
layer = Surface(data)
assert layer.ndim == 2
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert np.all(layer.vertices == vertices)
assert np.all(layer.faces == faces)
assert np.all(layer.vertex_values == values)
assert layer._data_view.shape[1] == 2
assert layer._view_vertex_values.ndim == 1
def test_random_3D_surface():
"""Test instantiating Surface layer with random 3D data."""
np.random.seed(0)
vertices = np.random.random((10, 3))
faces = np.random.randint(10, size=(6, 3))
values = np.random.random(10)
data = (vertices, faces, values)
layer = Surface(data)
assert layer.ndim == 3
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer._data_view.shape[1] == 2
assert layer._view_vertex_values.ndim == 1
layer._slice_dims(ndisplay=3)
assert layer._data_view.shape[1] == 3
assert layer._view_vertex_values.ndim == 1
def test_random_4D_surface():
"""Test instantiating Surface layer with random 4D data."""
np.random.seed(0)
vertices = np.random.random((10, 4))
faces = np.random.randint(10, size=(6, 3))
values = np.random.random(10)
data = (vertices, faces, values)
layer = Surface(data)
assert layer.ndim == 4
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer._data_view.shape[1] == 2
assert layer._view_vertex_values.ndim == 1
layer._slice_dims(ndisplay=3)
assert layer._data_view.shape[1] == 3
assert layer._view_vertex_values.ndim == 1
def test_random_3D_timeseries_surface():
"""Test instantiating Surface layer with random 3D timeseries data."""
np.random.seed(0)
vertices = np.random.random((10, 3))
faces = np.random.randint(10, size=(6, 3))
values = np.random.random((22, 10))
data = (vertices, faces, values)
layer = Surface(data)
assert layer.ndim == 4
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer._data_view.shape[1] == 2
assert layer._view_vertex_values.ndim == 1
assert layer.extent.data[1][0] == 22
layer._slice_dims(ndisplay=3)
assert layer._data_view.shape[1] == 3
assert layer._view_vertex_values.ndim == 1
# If a values axis is made to be a displayed axis then no data should be
# shown
with pytest.warns(UserWarning):
layer._slice_dims(ndisplay=3, order=[3, 0, 1, 2])
assert len(layer._data_view) == 0
def test_random_3D_multitimeseries_surface():
"""Test instantiating Surface layer with random 3D multitimeseries data."""
np.random.seed(0)
vertices = np.random.random((10, 3))
faces = np.random.randint(10, size=(6, 3))
values = np.random.random((16, 22, 10))
data = (vertices, faces, values)
layer = Surface(data)
assert layer.ndim == 5
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer._data_view.shape[1] == 2
assert layer._view_vertex_values.ndim == 1
assert layer.extent.data[1][0] == 16
assert layer.extent.data[1][1] == 22
layer._slice_dims(ndisplay=3)
assert layer._data_view.shape[1] == 3
assert layer._view_vertex_values.ndim == 1
def test_visiblity():
"""Test setting layer visibility."""
np.random.seed(0)
vertices = np.random.random((10, 3))
faces = np.random.randint(10, size=(6, 3))
values = np.random.random(10)
data = (vertices, faces, values)
layer = Surface(data)
assert layer.visible is True
layer.visible = False
assert layer.visible is False
layer = Surface(data, visible=False)
assert layer.visible is False
layer.visible = True
assert layer.visible is True
def test_surface_gamma():
"""Test setting gamma."""
np.random.seed(0)
vertices = np.random.random((10, 3))
faces = np.random.randint(10, size=(6, 3))
values = np.random.random(10)
data = (vertices, faces, values)
layer = Surface(data)
assert layer.gamma == 1
# Change gamma property
gamma = 0.7
layer.gamma = gamma
assert layer.gamma == gamma
# Set gamma as keyword argument
layer = Surface(data, gamma=gamma)
assert layer.gamma == gamma
def test_world_data_extent():
"""Test extent after applying transforms."""
data = [(-5, 0), (0, 15), (30, 12)]
min_val = (-5, 0)
max_val = (30, 15)
layer = Surface((np.array(data), np.array((0, 1, 2)), np.array((0, 0, 0))))
extent = np.array((min_val, max_val))
check_layer_world_data_extent(layer, extent, (3, 1), (20, 5))
| 32.396104
| 79
| 0.656845
|
431cd6fe42548124dfaff9e2aeb78562ddfd43e6
| 2,349
|
py
|
Python
|
benchmark/tabix_bench.py
|
Munchic/pysam
|
38fd53b521b99373e223a2ad42ec596124e98050
|
[
"MIT"
] | 1
|
2018-06-29T01:47:33.000Z
|
2018-06-29T01:47:33.000Z
|
benchmark/tabix_bench.py
|
Munchic/pysam
|
38fd53b521b99373e223a2ad42ec596124e98050
|
[
"MIT"
] | null | null | null |
benchmark/tabix_bench.py
|
Munchic/pysam
|
38fd53b521b99373e223a2ad42ec596124e98050
|
[
"MIT"
] | null | null | null |
import gzip
import pysam
import timeit
iterations = 5
repeats = 100
print ("repeats=", repeats, "iterations=", iterations)
fn_compressed = '/tmp/windows_small.bed.gz'
fn_uncompressed = '/tmp/windows_small.bed'
def test_python_compressed():
'''iterate through with python.'''
f = gzip.open( fn_compressed)
l = len( [x.encode().split("\t") for x in f])
def test_python_uncompressed():
'''iterate through with python.'''
f = open( "windows_small.bed")
l = len( [x.split("\t") for x in f])
def test_fetch_plain():
"""Stupid test function"""
f = pysam.Tabixfile(fn_compressed)
l = len( list(f.fetch()) )
def test_fetch_parsed():
"""Stupid test function"""
f = pysam.Tabixfile(fn_compressed)
l = len( list(f.fetch( parser = pysam.asBed())) )
def test_iterator_generic_compressed():
f = gzip.open(fn_compressed)
l = len( list( pysam.tabix_generic_iterator( f, parser = pysam.asBed() )))
def test_iterator_generic_uncompressed():
f = open("windows_small.bed")
l = len( list( pysam.tabix_generic_iterator( f, parser = pysam.asBed() )))
def test_iterator_parsed_compressed():
f = gzip.open(fn_compressed)
l = len( list( pysam.tabix_iterator( f, parser = pysam.asBed() )))
def test_iterator_parsed_uncompressed():
f = open("windows_small.bed")
l = len( list( pysam.tabix_iterator( f, parser = pysam.asBed() )))
def test_iterator_file_compressed():
f = gzip.open("windows_small.bed")
l = len( list( pysam.tabix_file_iterator( f, parser = pysam.asBed() )))
def test_iterator_file_uncompressed():
f = open("windows_small.bed")
l = len( list( pysam.tabix_file_iterator( f, parser = pysam.asBed() )))
tests = ( test_python_compressed,
test_python_uncompressed,
test_fetch_plain,
test_fetch_parsed,
test_iterator_generic_compressed,
test_iterator_generic_uncompressed,
test_iterator_parsed_compressed,
test_iterator_parsed_uncompressed,
test_iterator_file_compressed,
test_iterator_file_uncompressed )
for repeat in range( repeats ):
print ("# repeat=", repeat)
for test in tests:
try:
t = timeit.timeit( test, number = iterations )
except AttributeError:
continue
print ("%5.2f\t%s" % (t,str(test)))
| 30.506494
| 78
| 0.664538
|
fb1af2b6ba7c64773e3eb0f188fe914ea2ee6f01
| 1,002
|
py
|
Python
|
src/server/api/API_ingest/dropbox_handler.py
|
carlos-dominguez/paws-data-pipeline
|
5c224e1f259c079631df7d3514a873875c633221
|
[
"MIT"
] | 27
|
2019-11-20T20:20:30.000Z
|
2022-01-31T17:24:55.000Z
|
src/server/api/API_ingest/dropbox_handler.py
|
mrcrnkovich/paws-data-pipeline
|
7c0bd4c5f23276f541611cb564f2f5abbb6b9887
|
[
"MIT"
] | 348
|
2019-11-26T20:34:02.000Z
|
2022-02-27T20:28:20.000Z
|
src/server/api/API_ingest/dropbox_handler.py
|
mrcrnkovich/paws-data-pipeline
|
7c0bd4c5f23276f541611cb564f2f5abbb6b9887
|
[
"MIT"
] | 20
|
2019-12-03T23:50:33.000Z
|
2022-02-09T18:38:25.000Z
|
import dropbox
try:
from secrets_dict import DROPBOX_APP
except ImportError:
# Not running locally
print("Couldn't get DROPBOX_APP from file, trying environment **********")
from os import environ
try:
DROPBOX_APP = environ['DROPBOX_APP']
except KeyError:
# Not in environment
# You're SOL for now
print("Couldn't get DROPBOX_APP from file or environment")
class TransferData:
def __init__(self, access_token):
self.access_token = access_token
def upload_file(self, file_from, file_to):
dbx = dropbox.Dropbox(self.access_token)
with open(file_from, 'rb') as f:
dbx.files_upload(f.read(), file_to)
def upload_file_to_dropbox(file_path, upload_path):
access_token = DROPBOX_APP
transfer_data = TransferData(access_token)
file_from = file_path
file_to = upload_path # The full path to upload the file to, including the file name
transfer_data.upload_file(file_from, file_to)
| 27.833333
| 89
| 0.691617
|
84ab9dee85067655467e62f7492dac2084708fe8
| 724
|
py
|
Python
|
lib/commands/getpid.py
|
Tru5tNo1/shad0w
|
d26b790226d7a307c99d4e1f2a8309a9f09e088d
|
[
"MIT"
] | 1,732
|
2020-04-28T20:13:43.000Z
|
2022-03-31T19:11:27.000Z
|
lib/commands/getpid.py
|
windows1988/shad0w
|
3b7e9b826108e03796c24a2bd80c2e2a87b2cfbd
|
[
"MIT"
] | 70
|
2020-06-05T15:21:05.000Z
|
2022-01-07T13:07:19.000Z
|
lib/commands/getpid.py
|
windows1988/shad0w
|
3b7e9b826108e03796c24a2bd80c2e2a87b2cfbd
|
[
"MIT"
] | 292
|
2020-04-29T02:17:24.000Z
|
2022-03-18T05:02:29.000Z
|
#
# Get the current pid info
#
import json
__description__ = "Show current process info"
__author__ = "@_batsec_"
__type__ = "process"
EXEC_ID = 0x4000
OPCODE_PID = 0x7000
def pid_callback(shad0w, data):
shad0w.debug.log(data, log=True, pre=False)
return ""
def main(shad0w, args):
# check we actually have a beacon
if shad0w.current_beacon is None:
shad0w.debug.error("ERROR: No active beacon.")
return
# make the json
data = {"op": OPCODE_PID, "args": "null"}
data = json.dumps(data)
# set a task for the current beacon to do
shad0w.beacons[shad0w.current_beacon]["callback"] = pid_callback
shad0w.beacons[shad0w.current_beacon]["task"] = (EXEC_ID, data)
| 20.685714
| 68
| 0.675414
|
5a3d56c830a56ebbc85e4d310f09c4f4be267ad2
| 713
|
py
|
Python
|
scripts/fix_umlaut_annotated_data.py
|
fraunhofer-iais/hybrid_summarization_eval
|
399505ca2bc3485b7e88840a83a803b03f7779c8
|
[
"Apache-2.0"
] | 1
|
2021-09-13T22:59:46.000Z
|
2021-09-13T22:59:46.000Z
|
scripts/fix_umlaut_annotated_data.py
|
fraunhofer-iais/hybrid_summarization_eval
|
399505ca2bc3485b7e88840a83a803b03f7779c8
|
[
"Apache-2.0"
] | null | null | null |
scripts/fix_umlaut_annotated_data.py
|
fraunhofer-iais/hybrid_summarization_eval
|
399505ca2bc3485b7e88840a83a803b03f7779c8
|
[
"Apache-2.0"
] | null | null | null |
import unicodedata
from konvens2020_summarization.data_classes import Corpus
def fix_unicode(text):
text = unicodedata.normalize('NFC', text)
return text
corpus = Corpus.from_excel('../data/raw_annotated/competition_data_annotated_full.xlsx')
for doc in corpus:
doc.text = fix_unicode(doc.text)
doc.ref_summary = fix_unicode(doc.ref_summary)
for summary in doc.gen_summaries:
summary.text = fix_unicode(summary.text)
# for char in corpus[0].text[14:20]:
# print(char)
# print()
# for char in corpus[1].ref_summary[20:26]:
# print(char)
# print()
# for char in corpus[1].gen_summaries[2].text[78:85]:
# print(char)
corpus.to_json('../data/raw_annotated/corpus.json')
| 25.464286
| 88
| 0.718093
|
9f0e77b2264f1079b759bbd01b92b5538946687d
| 522
|
py
|
Python
|
recognition/audio_settings.py
|
ReanGD/smart-home
|
0d3ebe3213ad275f64490218ca3dbc0128c12339
|
[
"Apache-2.0"
] | 1
|
2018-07-31T21:17:37.000Z
|
2018-07-31T21:17:37.000Z
|
recognition/audio_settings.py
|
ReanGD/smart-home
|
0d3ebe3213ad275f64490218ca3dbc0128c12339
|
[
"Apache-2.0"
] | null | null | null |
recognition/audio_settings.py
|
ReanGD/smart-home
|
0d3ebe3213ad275f64490218ca3dbc0128c12339
|
[
"Apache-2.0"
] | null | null | null |
from typing import List
from audio import AudioSettings
def get_common_settings(settings: List[AudioSettings]) -> AudioSettings:
channels = settings[0].channels
sample_format = settings[0].sample_format
sample_rate = settings[0].sample_rate
for setting in settings:
if (channels != setting.channels
or sample_format != setting.sample_format
or sample_rate != setting.sample_rate):
raise Exception("Settings are not consistent")
return settings[0]
| 32.625
| 72
| 0.695402
|
5998e5f2fa71d33e9ec15d52dc2e6ecffb5a51d0
| 9,249
|
py
|
Python
|
dryxPython/tests/test_mysql.py
|
thespacedoctor/dryxPython
|
8f34f997192eebef9403bd40e4b7c1b1d216f53c
|
[
"BSD-3-Clause"
] | 2
|
2015-08-01T16:00:44.000Z
|
2017-02-24T21:06:50.000Z
|
dryxPython/tests/test_mysql.py
|
thespacedoctor/dryxPython
|
8f34f997192eebef9403bd40e4b7c1b1d216f53c
|
[
"BSD-3-Clause"
] | null | null | null |
dryxPython/tests/test_mysql.py
|
thespacedoctor/dryxPython
|
8f34f997192eebef9403bd40e4b7c1b1d216f53c
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import nose
from .. import mysql
# SETUP AND TEARDOWN FIXTURE FUNCTIONS FOR THE ENTIRE MODULE
def setUpModule():
import logging
import logging.config
import yaml
"set up test fixtures"
moduleDirectory = os.path.dirname(__file__) + "/../tests"
# SETUP PATHS TO COMMONG DIRECTORIES FOR TEST DATA
global pathToInputDataDir, pathToOutputDir, pathToOutputDataDir, pathToInputDir
pathToInputDir = moduleDirectory + "/input/"
pathToInputDataDir = pathToInputDir + "data/"
pathToOutputDir = moduleDirectory + "/output/"
pathToOutputDataDir = pathToOutputDir + "data/"
# SETUP THE TEST LOG FILE
global testlog
testlog = open(pathToOutputDir + "tests.log", 'w')
# SETUP LOGGING
loggerConfig = """
version: 1
formatters:
file_style:
format: '* %(asctime)s - %(name)s - %(levelname)s (%(pathname)s > %(funcName)s > %(lineno)d) - %(message)s '
datefmt: '%Y/%m/%d %H:%M:%S'
console_style:
format: '* %(asctime)s - %(levelname)s: %(pathname)s:%(funcName)s:%(lineno)d > %(message)s'
datefmt: '%H:%M:%S'
html_style:
format: '<div id="row" class="%(levelname)s"><span class="date">%(asctime)s</span> <span class="label">file:</span><span class="filename">%(filename)s</span> <span class="label">method:</span><span class="funcName">%(funcName)s</span> <span class="label">line#:</span><span class="lineno">%(lineno)d</span> <span class="pathname">%(pathname)s</span> <div class="right"><span class="message">%(message)s</span><span class="levelname">%(levelname)s</span></div></div>'
datefmt: '%Y-%m-%d <span class= "time">%H:%M <span class= "seconds">%Ss</span></span>'
handlers:
console:
class: logging.StreamHandler
level: DEBUG
formatter: console_style
stream: ext://sys.stdout
root:
level: DEBUG
handlers: [console]"""
logging.config.dictConfig(yaml.load(loggerConfig))
global log
log = logging.getLogger(__name__)
# SETUP DB CONNECTION
import pymysql as ms
dbConfig = """
version: 1
db: pessto_marshall_sandbox
host: localhost
user: root
password: root
"""
connDict = yaml.load(dbConfig)
global dbConn
dbConn = ms.connect(
host=connDict['host'],
user=connDict['user'],
passwd=connDict['password'],
db=connDict['db'],
use_unicode=True,
charset='utf8'
)
dbConn.autocommit(True)
# CREATE A TABLE TO TEST WITH
global testTableName
testTableName = "setupmodule_table_for_unit_testing"
cursor = dbConn.cursor(ms.cursors.DictCursor)
try:
createTestTable = """CREATE TABLE %s(
id int NOT NULL AUTO_INCREMENT,
ra DOUBLE,
decl DOUBLE,
PRIMARY KEY(id)
)""" % (testTableName,)
print createTestTable
cursor.execute(createTestTable)
except:
pass
raList = [
23.2434234234234,
145.123123123123,
46.1231231231231,
86.1312312321312,
203.432342342343,
309.124123131231,
09.334132412412,
245.242342343244,
103.434535354234,
0.23424242423423
]
decList = [
-89.3242342342324,
-82.3324342342342,
-64.1231312312312,
-45.1231231231232,
-30.2342342342342,
-0.03232323232323,
12.23232445225352,
25.23423423424244,
56.23234234234334,
79.12314252435345
]
for r, d in zip(raList, decList):
insertMe = """\
INSERT INTO %s( \
ra, \
decl \
)VALUES( \
%s, \
%s \
) \
""" % (testTableName, r, d)
print insertMe
cursor.execute(insertMe)
cursor.close()
return None
def tearDownModule():
"tear down test fixtures"
# CLOSE THE TEST LOG FILE
testlog.close()
return None
class emptyLogger:
info = None
error = None
debug = None
critical = None
warning = None
class test_convert_dictionary_to_mysql_table(unittest.TestCase):
def test_raise_error_if_dbConn_is_not_a_working_db_connection(self):
kwargs = {}
kwargs["dbConn"] = "garbage"
kwargs["log"] = log
kwargs["dictionary"] = {"someGoodKey": "nice"}
kwargs["dbTableName"] = "python_unit_testing_dict_to_mysql"
kwargs["uniqueKeyList"] = ["someGoodKey", ]
nose.tools.assert_raises(
TypeError, mysql.convert_dictionary_to_mysql_table, **kwargs)
def test_raise_error_if_dictionary_argu_not_a_dictionary(self):
kwargs = {}
kwargs["dbConn"] = dbConn
kwargs["log"] = log
kwargs["dictionary"] = "not a dictionary"
kwargs["dbTableName"] = "python_unit_testing_dict_to_mysql"
kwargs["uniqueKeyList"] = ["not a dictionary", ]
nose.tools.assert_raises(
TypeError, mysql.convert_dictionary_to_mysql_table, **kwargs)
def test_raise_error_if_dictionary_has_not_simple_values(self):
kwargs = {}
kwargs["dbConn"] = dbConn
kwargs["log"] = log
kwargs["dictionary"] = {
"someGoodKey": "nice", "someOtherBadKey": ["ev!l", "list", 42]}
kwargs["dbTableName"] = "python_unit_testing_dict_to_mysql"
kwargs["uniqueKeyList"] = ["someGoodKey", "someOtherBadKey", ]
nose.tools.assert_raises(
ValueError, mysql.convert_dictionary_to_mysql_table, **kwargs)
def test_raise_error_if_uniqueKeyList_is_not_list(self):
kwargs = {}
kwargs["dbConn"] = dbConn
kwargs["log"] = log
kwargs["dictionary"] = {"someGoodKey": "nice"}
kwargs["dbTableName"] = "python_unit_testing_dict_to_mysql"
kwargs["uniqueKeyList"] = ">>>>>>>>>>> not a list <<<<<<<<<<<"
nose.tools.assert_raises(
TypeError, mysql.convert_dictionary_to_mysql_table, **kwargs)
pass
def test_raise_error_if_uniqueKeyList_values_not_in_dictionary(self):
kwargs = {}
kwargs["dbConn"] = dbConn
kwargs["log"] = log
kwargs["dictionary"] = {
"someGoodKey": "nice", "another good key": "andother value"}
kwargs["dbTableName"] = "python_unit_testing_dict_to_mysql"
kwargs["uniqueKeyList"] = [
"someGoodKey", ">>>>>>>>>>> not a good key <<<<<<<<<<<"]
nose.tools.assert_raises(
ValueError, mysql.convert_dictionary_to_mysql_table, **kwargs)
def test_raise_error_if_createHelperTables_is_not_boolean(self):
kwargs = {}
kwargs["dbConn"] = dbConn
kwargs["log"] = log
kwargs["dictionary"] = {
"someGoodKey": "nice", "another good key": "andother value"}
kwargs["dbTableName"] = "python_unit_testing_dict_to_mysql"
kwargs["uniqueKeyList"] = ["someGoodKey", ]
kwargs["createHelperTables"] = ">>>>>>>>>>> not a boolean <<<<<<<<<<<"
nose.tools.assert_raises(
TypeError, mysql.convert_dictionary_to_mysql_table, **kwargs)
def test_to_create_a_table_to_see_if_code_completes(self):
kwargs = {}
kwargs["dbConn"] = dbConn
kwargs["log"] = log
kwargs["dictionary"] = {
"someGoodKey": ["nice", "nice"], "and other": ["nice", "nice"]}
kwargs["dbTableName"] = "python_unit_testing_dict_to_mysql"
kwargs["uniqueKeyList"] = ["someGoodKey", "and other"]
mysql.convert_dictionary_to_mysql_table(**kwargs)
class test_add_HTMIds_to_mysql_tables(unittest.TestCase):
def test_table_exits(self):
kwargs = {}
kwargs["primaryIdColumnName"] = "id"
kwargs["raColName"] = "ra"
kwargs["declColName"] = "decl"
kwargs["tableName"] = ">>>>>>>>>>>not_a_valid_name<<<<<<<<<<<"
kwargs["dbConn"] = dbConn
kwargs["log"] = log
nose.tools.assert_raises(
IOError, mysql.add_HTMIds_to_mysql_tables, **kwargs)
def test_ra_column_exits(self):
kwargs = {}
kwargs["primaryIdColumnName"] = "id"
kwargs["raColName"] = ">>>>>>>>>>> not an RA name <<<<<<<<<<<"
kwargs["declColName"] = "decl"
kwargs["tableName"] = testTableName
kwargs["dbConn"] = dbConn
kwargs["log"] = log
nose.tools.assert_raises(
IOError, mysql.add_HTMIds_to_mysql_tables, **kwargs)
def test_dec_column_exits(self):
kwargs = {}
kwargs["primaryIdColumnName"] = "id"
kwargs["raColName"] = "ra"
kwargs["declColName"] = ">>>>>>>>>>> not a DEC name <<<<<<<<<<<"
kwargs["tableName"] = testTableName
kwargs["dbConn"] = dbConn
kwargs["log"] = log
nose.tools.assert_raises(
IOError, mysql.add_HTMIds_to_mysql_tables, **kwargs)
def test_htmIds_are_generated_after_function_has_run(self):
kwargs = {}
kwargs["primaryIdColumnName"] = "id"
kwargs["raColName"] = "ra"
kwargs["declColName"] = "decl"
kwargs["tableName"] = testTableName
kwargs["dbConn"] = dbConn
kwargs["log"] = log
| 33.632727
| 485
| 0.597686
|
00a820f1b85b78f8abbf72068690d1c65c3cf863
| 398
|
py
|
Python
|
src/store/wsgi.py
|
BrayanPotosi/Python-DB-Manipulation
|
85921d3342d2bd2873cc32c593f748a3315c5b0b
|
[
"MIT"
] | null | null | null |
src/store/wsgi.py
|
BrayanPotosi/Python-DB-Manipulation
|
85921d3342d2bd2873cc32c593f748a3315c5b0b
|
[
"MIT"
] | null | null | null |
src/store/wsgi.py
|
BrayanPotosi/Python-DB-Manipulation
|
85921d3342d2bd2873cc32c593f748a3315c5b0b
|
[
"MIT"
] | null | null | null |
"""
WSGI config for store project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'store.settings.production')
application = get_wsgi_application()
| 23.411765
| 78
| 0.786432
|
1e8368f854ea5feec16224e84f79f99e084a0ade
| 4,520
|
py
|
Python
|
spec/predict.py
|
deep-spin/spec-blackboxnlp
|
23db7a559e09ff7f63ede06b04cad226432b90db
|
[
"MIT"
] | 2
|
2020-11-26T07:46:48.000Z
|
2021-07-28T08:06:58.000Z
|
spec/predict.py
|
deep-spin/spec-blackboxnlp
|
23db7a559e09ff7f63ede06b04cad226432b90db
|
[
"MIT"
] | null | null | null |
spec/predict.py
|
deep-spin/spec-blackboxnlp
|
23db7a559e09ff7f63ede06b04cad226432b90db
|
[
"MIT"
] | null | null | null |
import logging
from pathlib import Path
from spec import constants
from spec import iterator
from spec import models
from spec.dataset import dataset, fields
from spec.dataset.corpora import available_corpora
from spec.predicter import Predicter
logger = logging.getLogger(__name__)
def run(options):
fields_tuples = available_corpora[options.corpus].create_fields_tuples()
# fields_tuples += features.load(options.load)
if options.test_path is None and options.text is None:
raise Exception('You should inform a path to test data or a text.')
if options.test_path is not None and options.text is not None:
raise Exception('You cant inform both a path to test data and a text.')
dataset_iter = None
if options.test_path is not None and options.text is None:
logger.info('Building test dataset: {}'.format(options.test_path))
test_tuples = list(filter(lambda x: x[0] != 'target', fields_tuples))
test_dataset = dataset.build(options.test_path, test_tuples, options)
logger.info('Building test iterator...')
dataset_iter = iterator.build(test_dataset,
options.gpu_id,
options.dev_batch_size,
is_train=False,
lazy=options.lazy_loading)
if options.text is not None and options.test_path is None:
logger.info('Preparing text...')
test_tuples = list(filter(lambda x: x[0] != 'target', fields_tuples))
test_dataset = dataset.build_texts(options.text, test_tuples, options)
logger.info('Building iterator...')
dataset_iter = iterator.build(test_dataset,
options.gpu_id,
options.dev_batch_size,
is_train=False,
lazy=options.lazy_loading)
logger.info('Loading vocabularies...')
fields.load_vocabs(options.load, fields_tuples)
logger.info('Loading model...')
model = models.load(options.load, fields_tuples, options.gpu_id)
logger.info('Predicting...')
predicter = Predicter(dataset_iter, model)
predictions = predicter.predict(options.prediction_type)
logger.info('Preparing to save...')
if options.prediction_type == 'classes':
target_field = dict(fields_tuples)['target']
prediction_target = transform_classes_to_target(target_field,
predictions)
predictions_str = transform_predictions_to_text(prediction_target)
else:
predictions_str = transform_predictions_to_text(predictions)
if options.test_path is not None:
save_predictions(
options.output_dir,
predictions_str,
)
else:
logger.info(options.text)
logger.info(predictions_str)
return predictions
def save_predictions(directory, predictions_str):
directory = Path(directory)
directory.mkdir(parents=True, exist_ok=True)
output_path = Path(directory, constants.PREDICTIONS)
save_predictions_in_a_file(output_path, predictions_str)
logger.info('Predictions saved in {}'.format(output_path))
def save_predictions_in_a_file(output_file_path, predictions_str):
output_file_path.write_text(predictions_str + '\n')
def save_predictions_in_a_dir(ourpur_dir_path, file_names, predictions_str):
assert ourpur_dir_path.is_dir()
predictions_for_each_file = predictions_str.split('\n')
for f_name, pred_str in zip(file_names, predictions_for_each_file):
output_path = Path(ourpur_dir_path, f_name)
output_path.write_text(pred_str + '\n')
def transform_classes_to_target(target_field, predictions):
tagged_predicitons = []
for preds in predictions:
target_preds = [target_field.vocab.itos[c] for c in preds]
tagged_predicitons.append(target_preds)
return tagged_predicitons
def transform_predictions_to_text(predictions):
text = []
is_prob = isinstance(predictions[0][0], list)
for pred in predictions:
sentence = []
for p in pred:
if is_prob:
sentence.append(', '.join(['%.8f' % c for c in p]))
else:
sentence.append(p)
if is_prob:
text.append(' | '.join(sentence))
else:
text.append(' '.join(sentence))
return '\n'.join(text)
| 36.451613
| 79
| 0.650664
|
1d2e6200beb9e1d46bb15a3c52933113762c9e30
| 586
|
py
|
Python
|
argparser/parser.py
|
mhsiddiqui/djrest-argparser
|
8c29701a5723cf0e732220e08a2e0ef41d0149eb
|
[
"MIT"
] | null | null | null |
argparser/parser.py
|
mhsiddiqui/djrest-argparser
|
8c29701a5723cf0e732220e08a2e0ef41d0149eb
|
[
"MIT"
] | null | null | null |
argparser/parser.py
|
mhsiddiqui/djrest-argparser
|
8c29701a5723cf0e732220e08a2e0ef41d0149eb
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers as parser
from rest_framework.utils.serializer_helpers import ReturnDict
class ArgParser(parser.Serializer):
def parse(self, raise_exception=True):
if self.is_valid(raise_exception=raise_exception):
pass
def update(self, instance, validated_data):
pass
def create(self, validated_data):
pass
def save(self, **kwargs):
pass
def to_representation(self, instance):
pass
@property
def data(self):
return ReturnDict(self.validated_data, serializer=self)
| 20.928571
| 63
| 0.687713
|
df606ee1a473de14daed31d42dc823f7c20def90
| 1,212
|
py
|
Python
|
setup.py
|
matekrk/swa_gaussian
|
ab56ee2126c8d3d24221fbbeaf5d1ecfd40fd697
|
[
"BSD-2-Clause"
] | 337
|
2019-02-08T01:58:34.000Z
|
2022-03-26T08:47:56.000Z
|
setup.py
|
matekrk/swa_gaussian
|
ab56ee2126c8d3d24221fbbeaf5d1ecfd40fd697
|
[
"BSD-2-Clause"
] | 16
|
2019-02-15T18:40:56.000Z
|
2021-07-07T07:16:56.000Z
|
setup.py
|
matekrk/swa_gaussian
|
ab56ee2126c8d3d24221fbbeaf5d1ecfd40fd697
|
[
"BSD-2-Clause"
] | 66
|
2019-02-10T17:59:22.000Z
|
2022-03-20T03:37:34.000Z
|
from setuptools import setup
import os
import sys
_here = os.path.abspath(os.path.dirname(__file__))
if sys.version_info[0] < 3:
with open(os.path.join(_here, "README.rst")) as f:
long_description = f.read()
else:
with open(os.path.join(_here, "README.rst"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="swag",
version="0.0",
description=("SWA-Gaussian repo"),
long_description=long_description,
author="Wesley Maddox, Timur Garipov, Pavel Izmailov, Dmitry Vetrov, Andrew Gordon Wilson",
author_email="wm326@cornell.edu",
url="https://github.com/wjmaddox/swa_gaussian",
license="MPL-2.0",
packages=["swag"],
install_requires=[
"tqdm==4.26.0",
"numpy>=1.14.3",
"torchvision>=0.2.1",
"gpytorch>=0.1.0rc4",
"tabulate>=0.8.2",
"scipy>=1.1.0",
"setuptools>=39.1.0",
"matplotlib>=2.2.2",
"torch>=1.0.0",
"Pillow>=5.4.1",
"scikit_learn>=0.20.2",
],
include_package_data=True,
classifiers=[
"Development Status :: 0",
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 3.6",
],
)
| 27.545455
| 95
| 0.594059
|
b9984fdaeed6c63d5f18bc843e9aa098bd41950a
| 70
|
py
|
Python
|
apps/ImageSearch/algs/NLassoNN/__init__.py
|
AmFamMLTeam/NEXT
|
2b604230395be1b98e84115c20b5f509d5f24411
|
[
"Apache-2.0"
] | 2
|
2020-11-16T17:01:36.000Z
|
2022-03-04T17:07:59.000Z
|
apps/ImageSearch/algs/NLassoNN/__init__.py
|
AmFamMLTeam/NEXT
|
2b604230395be1b98e84115c20b5f509d5f24411
|
[
"Apache-2.0"
] | null | null | null |
apps/ImageSearch/algs/NLassoNN/__init__.py
|
AmFamMLTeam/NEXT
|
2b604230395be1b98e84115c20b5f509d5f24411
|
[
"Apache-2.0"
] | null | null | null |
from apps.ImageSearch.algs.NLassoNN.NLassoNN import NLassoNN as MyAlg
| 35
| 69
| 0.857143
|
36aaacfe38b82b67bbe4d928e9636be8d2a1509f
| 4,534
|
py
|
Python
|
src/oci/_vendor/urllib3/util/request.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/_vendor/urllib3/util/request.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/_vendor/urllib3/util/request.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Modified Work: Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
# Copyright 2008-2016 Andrey Petrov and contributors
from __future__ import absolute_import
from base64 import b64encode
from ..exceptions import UnrewindableBodyError
from ..packages.six import b, integer_types
# Pass as a value within ``headers`` to skip
# emitting some HTTP headers that are added automatically.
# The only headers that are supported are ``Accept-Encoding``,
# ``Host``, and ``User-Agent``.
SKIP_HEADER = "@@@SKIP_HEADER@@@"
SKIPPABLE_HEADERS = frozenset(["accept-encoding", "host", "user-agent"])
ACCEPT_ENCODING = "gzip,deflate"
try:
import brotli as _unused_module_brotli # noqa: F401
except ImportError:
pass
else:
ACCEPT_ENCODING += ",br"
_FAILEDTELL = object()
def make_headers(
keep_alive=None,
accept_encoding=None,
user_agent=None,
basic_auth=None,
proxy_basic_auth=None,
disable_cache=None,
):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
:param disable_cache:
If ``True``, adds 'cache-control: no-cache' header.
Example::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ",".join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers["accept-encoding"] = accept_encoding
if user_agent:
headers["user-agent"] = user_agent
if keep_alive:
headers["connection"] = "keep-alive"
if basic_auth:
headers["authorization"] = "Basic " + b64encode(b(basic_auth)).decode("utf-8")
if proxy_basic_auth:
headers["proxy-authorization"] = "Basic " + b64encode(
b(proxy_basic_auth)
).decode("utf-8")
if disable_cache:
headers["cache-control"] = "no-cache"
return headers
def set_file_position(body, pos):
"""
If a position is provided, move file to that point.
Otherwise, we'll attempt to record a position for future use.
"""
if pos is not None:
rewind_body(body, pos)
elif getattr(body, "tell", None) is not None:
try:
pos = body.tell()
except (IOError, OSError):
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body.
pos = _FAILEDTELL
return pos
def rewind_body(body, body_pos):
"""
Attempt to rewind body to a certain position.
Primarily used for request redirects and retries.
:param body:
File-like object that supports seek.
:param int pos:
Position to seek to in file.
"""
body_seek = getattr(body, "seek", None)
if body_seek is not None and isinstance(body_pos, integer_types):
try:
body_seek(body_pos)
except (IOError, OSError):
raise UnrewindableBodyError(
"An error occurred when rewinding request body for redirect/retry."
)
elif body_pos is _FAILEDTELL:
raise UnrewindableBodyError(
"Unable to record file position for rewinding "
"request body during a redirect/retry."
)
else:
raise ValueError(
"body_pos must be of type integer, instead it was %s." % type(body_pos)
)
| 30.42953
| 245
| 0.64689
|
d0e3fe5982ab89872f45567d2a1049ff782689fd
| 8,349
|
py
|
Python
|
wagtail/contrib/search_promotions/views.py
|
sonnybaker/wagtail
|
5522992c2923276fca40417401e8fb2c536b4b4f
|
[
"BSD-3-Clause"
] | 8,851
|
2016-12-09T19:01:45.000Z
|
2022-03-31T04:45:06.000Z
|
wagtail/contrib/search_promotions/views.py
|
sonnybaker/wagtail
|
5522992c2923276fca40417401e8fb2c536b4b4f
|
[
"BSD-3-Clause"
] | 5,197
|
2016-12-09T19:24:37.000Z
|
2022-03-31T22:17:55.000Z
|
wagtail/contrib/search_promotions/views.py
|
sonnybaker/wagtail
|
5522992c2923276fca40417401e8fb2c536b4b4f
|
[
"BSD-3-Clause"
] | 2,548
|
2016-12-09T18:16:55.000Z
|
2022-03-31T21:34:38.000Z
|
from django.core.paginator import Paginator
from django.db import transaction
from django.db.models import Sum, functions
from django.shortcuts import get_object_or_404, redirect
from django.template.response import TemplateResponse
from django.urls import reverse
from django.utils.translation import gettext as _
from django.views.decorators.vary import vary_on_headers
from wagtail.admin import messages
from wagtail.admin.auth import any_permission_required, permission_required
from wagtail.admin.forms.search import SearchForm
from wagtail.contrib.search_promotions import forms
from wagtail.core.log_actions import log
from wagtail.search import forms as search_forms
from wagtail.search.models import Query
@any_permission_required(
'wagtailsearchpromotions.add_searchpromotion',
'wagtailsearchpromotions.change_searchpromotion',
'wagtailsearchpromotions.delete_searchpromotion'
)
@vary_on_headers('X-Requested-With')
def index(request):
# Ordering
valid_ordering = ['query_string', '-query_string', 'views', '-views']
ordering = valid_ordering[0]
if 'ordering' in request.GET and request.GET['ordering'] in valid_ordering:
ordering = request.GET['ordering']
# Query
queries = Query.objects.filter(editors_picks__isnull=False).distinct()
if 'views' in ordering:
queries = queries.annotate(views=functions.Coalesce(Sum('daily_hits__hits'), 0))
queries = queries.order_by(ordering)
# Search
is_searching = False
query_string = request.GET.get('q', '')
if query_string:
queries = queries.filter(query_string__icontains=query_string)
is_searching = True
# Paginate
paginator = Paginator(queries, per_page=20)
queries = paginator.get_page(request.GET.get('p'))
if request.headers.get('x-requested-with') == 'XMLHttpRequest':
return TemplateResponse(request, "wagtailsearchpromotions/results.html", {
'is_searching': is_searching,
'ordering': ordering,
'queries': queries,
'query_string': query_string,
})
else:
return TemplateResponse(request, 'wagtailsearchpromotions/index.html', {
'is_searching': is_searching,
'ordering': ordering,
'queries': queries,
'query_string': query_string,
'search_form': SearchForm(
data=dict(q=query_string) if query_string else None, placeholder=_("Search promoted results")
),
})
def save_searchpicks(query, new_query, searchpicks_formset):
# Save
if searchpicks_formset.is_valid():
# Set sort_order
for i, form in enumerate(searchpicks_formset.ordered_forms):
form.instance.sort_order = i
# Make sure the form is marked as changed so it gets saved with the new order
form.has_changed = lambda: True
# log deleted items before saving, otherwise we lose their IDs
items_for_deletion = [
form.instance for form in searchpicks_formset.deleted_forms
if form.instance.pk
]
with transaction.atomic():
for search_pick in items_for_deletion:
log(search_pick, 'wagtail.delete')
searchpicks_formset.save()
for search_pick in searchpicks_formset.new_objects:
log(search_pick, 'wagtail.create')
# If query was changed, move all search picks to the new query
if query != new_query:
searchpicks_formset.get_queryset().update(query=new_query)
# log all items in the formset as having changed
for search_pick, changed_fields in searchpicks_formset.changed_objects:
log(search_pick, 'wagtail.edit')
else:
# only log objects with actual changes
for search_pick, changed_fields in searchpicks_formset.changed_objects:
if changed_fields:
log(search_pick, 'wagtail.edit')
return True
else:
return False
@permission_required('wagtailsearchpromotions.add_searchpromotion')
def add(request):
if request.method == 'POST':
# Get query
query_form = search_forms.QueryForm(request.POST)
if query_form.is_valid():
query = Query.get(query_form['query_string'].value())
# Save search picks
searchpicks_formset = forms.SearchPromotionsFormSet(request.POST, instance=query)
if save_searchpicks(query, query, searchpicks_formset):
for search_pick in searchpicks_formset.new_objects:
log(search_pick, 'wagtail.create')
messages.success(request, _("Editor's picks for '{0}' created.").format(query), buttons=[
messages.button(reverse('wagtailsearchpromotions:edit', args=(query.id,)), _('Edit'))
])
return redirect('wagtailsearchpromotions:index')
else:
if len(searchpicks_formset.non_form_errors()):
# formset level error (e.g. no forms submitted)
messages.error(request, " ".join(error for error in searchpicks_formset.non_form_errors()))
else:
# specific errors will be displayed within form fields
messages.error(request, _("Recommendations have not been created due to errors"))
else:
searchpicks_formset = forms.SearchPromotionsFormSet()
else:
query_form = search_forms.QueryForm()
searchpicks_formset = forms.SearchPromotionsFormSet()
return TemplateResponse(request, 'wagtailsearchpromotions/add.html', {
'query_form': query_form,
'searchpicks_formset': searchpicks_formset,
'form_media': query_form.media + searchpicks_formset.media,
})
@permission_required('wagtailsearchpromotions.change_searchpromotion')
def edit(request, query_id):
query = get_object_or_404(Query, id=query_id)
if request.method == 'POST':
# Get query
query_form = search_forms.QueryForm(request.POST)
# and the recommendations
searchpicks_formset = forms.SearchPromotionsFormSet(request.POST, instance=query)
if query_form.is_valid():
new_query = Query.get(query_form['query_string'].value())
# Save search picks
if save_searchpicks(query, new_query, searchpicks_formset):
messages.success(request, _("Editor's picks for '{0}' updated.").format(new_query), buttons=[
messages.button(reverse('wagtailsearchpromotions:edit', args=(query.id,)), _('Edit'))
])
return redirect('wagtailsearchpromotions:index')
else:
if len(searchpicks_formset.non_form_errors()):
messages.error(request, " ".join(error for error in searchpicks_formset.non_form_errors()))
# formset level error (e.g. no forms submitted)
else:
messages.error(request, _("Recommendations have not been saved due to errors"))
# specific errors will be displayed within form fields
else:
query_form = search_forms.QueryForm(initial=dict(query_string=query.query_string))
searchpicks_formset = forms.SearchPromotionsFormSet(instance=query)
return TemplateResponse(request, 'wagtailsearchpromotions/edit.html', {
'query_form': query_form,
'searchpicks_formset': searchpicks_formset,
'query': query,
'form_media': query_form.media + searchpicks_formset.media,
})
@permission_required('wagtailsearchpromotions.delete_searchpromotion')
def delete(request, query_id):
query = get_object_or_404(Query, id=query_id)
if request.method == 'POST':
editors_picks = query.editors_picks.all()
with transaction.atomic():
for search_pick in editors_picks:
log(search_pick, 'wagtail.delete')
editors_picks.delete()
messages.success(request, _("Editor's picks deleted."))
return redirect('wagtailsearchpromotions:index')
return TemplateResponse(request, 'wagtailsearchpromotions/confirm_delete.html', {
'query': query,
})
| 40.726829
| 111
| 0.661397
|
17be173ec27b20ba9772796eead6c31e122edb76
| 2,058
|
py
|
Python
|
setup.py
|
matthewrmshin/demo
|
a2c959c893242d2214a85d60eab356997665f2cd
|
[
"MIT"
] | null | null | null |
setup.py
|
matthewrmshin/demo
|
a2c959c893242d2214a85d60eab356997665f2cd
|
[
"MIT"
] | null | null | null |
setup.py
|
matthewrmshin/demo
|
a2c959c893242d2214a85d60eab356997665f2cd
|
[
"MIT"
] | null | null | null |
"""Demo project with bells and whistles."""
from ast import literal_eval
import os
from setuptools import setup, find_packages
PKGNAME = 'demo'
URL = 'https://github.com/matthewrmshin/demo'
# Get the long description from the README file
HERE = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(HERE, 'README.md'), encoding='utf-8',) as handle:
LONG_DESCRIPTION = handle.read()
with open(
os.path.join(HERE, PKGNAME, '__init__.py'),
encoding='utf-8',
) as handle:
for line in handle:
items = line.split('=', 1)
if items[0].strip() == '__version__':
VERSION = literal_eval(items[1].strip())
break
else:
raise RuntimeError('Cannot determine package version.')
setup(
name=PKGNAME,
version=VERSION,
description='Demo project',
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
url=URL,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Scientific/Engineering',
# 'Topic :: Scientific/Engineering :: Atmospheric Science',
'Topic :: Software Development',
],
keywords='demo',
project_urls={
'Bug Reports': f'{URL}issues',
'Source': URL,
},
packages=find_packages(),
entry_points={
'console_scripts': [
f'demo_hello = {PKGNAME}.hello:main',
],
},
include_package_data=True,
python_requires='>=3.6, <4',
install_requires=[
],
setup_requires=['pytest-runner'],
tests_require=['pytest', 'pytest-cov'],
extras_require={
'dev': ['check-manifest', 'flake8'],
'test': ['pytest', 'pytest-cov'],
},
zip_safe=True,
)
| 27.44
| 72
| 0.610301
|
e7efc51c851a4c564bdfada0474070d7584d6183
| 21,731
|
py
|
Python
|
giveaways/giveaways.py
|
Kuro-Rui/flare-cogs
|
f739e3a4a8c65bf0e10945d242ba0b82f96c6d3d
|
[
"MIT"
] | null | null | null |
giveaways/giveaways.py
|
Kuro-Rui/flare-cogs
|
f739e3a4a8c65bf0e10945d242ba0b82f96c6d3d
|
[
"MIT"
] | null | null | null |
giveaways/giveaways.py
|
Kuro-Rui/flare-cogs
|
f739e3a4a8c65bf0e10945d242ba0b82f96c6d3d
|
[
"MIT"
] | null | null | null |
import asyncio
import contextlib
import logging
from copy import deepcopy
from datetime import datetime, timezone
from typing import Optional
import aiohttp
import discord
from redbot.core import Config, commands
from redbot.core.commands.converter import TimedeltaConverter
from redbot.core.utils.chat_formatting import pagify
from redbot.core.utils.menus import DEFAULT_CONTROLS, menu
from .converter import Args
from .objects import Giveaway, GiveawayEnterError, GiveawayExecError
log = logging.getLogger("red.flare.giveaways")
GIVEAWAY_KEY = "giveaways"
# TODO: Add a way to delete giveaways that have ended from the config
class Giveaways(commands.Cog):
"""Giveaway Commands"""
__version__ = "0.11.5"
__author__ = "flare"
def format_help_for_context(self, ctx):
pre_processed = super().format_help_for_context(ctx)
return f"{pre_processed}\nCog Version: {self.__version__}\nAuthor: {self.__author__}"
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, identifier=95932766180343808)
self.config.init_custom(GIVEAWAY_KEY, 2)
self.giveaways = {}
self.giveaway_bgloop = asyncio.create_task(self.init())
self.session = aiohttp.ClientSession()
with contextlib.suppress(Exception):
self.bot.add_dev_env_value("giveaways", lambda x: self)
async def init(self) -> None:
await self.bot.wait_until_ready()
data = await self.config.custom(GIVEAWAY_KEY).all()
for _, giveaways in data.items():
for msgid, giveaway in giveaways.items():
if giveaway.get("ended", False):
continue
if datetime.now(timezone.utc) > datetime.fromtimestamp(
giveaway["endtime"]
).replace(tzinfo=timezone.utc):
continue
self.giveaways[int(msgid)] = Giveaway(
guildid=giveaway["guildid"],
channelid=giveaway["channelid"],
messageid=msgid,
endtime=datetime.fromtimestamp(giveaway["endtime"]).replace(
tzinfo=timezone.utc
),
prize=giveaway["prize"],
emoji=giveaway.get("emoji", "🎉"),
entrants=giveaway["entrants"],
**giveaway["kwargs"],
)
while True:
try:
await self.check_giveaways()
except Exception as exc:
log.error("Exception in giveaway loop: ", exc_info=exc)
await asyncio.sleep(60)
def cog_unload(self) -> None:
with contextlib.suppress(Exception):
self.bot.remove_dev_env_value("giveaways")
self.giveaway_bgloop.cancel()
asyncio.create_task(self.session.close())
async def check_giveaways(self) -> None:
to_clear = []
for msgid, giveaway in self.giveaways.items():
if giveaway.endtime < datetime.now(timezone.utc):
await self.draw_winner(giveaway)
to_clear.append(msgid)
gw = await self.config.custom(GIVEAWAY_KEY, giveaway.guildid, str(msgid)).all()
gw["ended"] = True
await self.config.custom(GIVEAWAY_KEY, giveaway.guildid, str(msgid)).set(gw)
for msgid in to_clear:
del self.giveaways[msgid]
async def draw_winner(self, giveaway: Giveaway):
guild = self.bot.get_guild(giveaway.guildid)
if guild is None:
return
channel_obj = guild.get_channel(giveaway.channelid)
if channel_obj is None:
return
winners = giveaway.draw_winner()
winner_objs = None
if winners is None:
txt = "Not enough entries to roll the giveaway."
else:
winner_objs = []
txt = ""
for winner in winners:
winner_obj = guild.get_member(winner)
if winner_obj is None:
txt += f"{winner} (Not Found)\n"
else:
txt += f"{winner_obj.mention}\n"
winner_objs.append(winner_obj)
msg = channel_obj.get_partial_message(giveaway.messageid)
winners = giveaway.kwargs.get("winners", 1) or 1
embed = discord.Embed(
title=f"{f'{winners}x ' if winners > 1 else ''}{giveaway.prize}",
description=f"Winner(s):\n{txt}",
color=await self.bot.get_embed_color(channel_obj),
timestamp=datetime.now(timezone.utc),
)
embed.set_footer(
text=f"Reroll: {(await self.bot.get_prefix(msg))[-1]}gw reroll {giveaway.messageid} | Ended at"
)
try:
await msg.edit(
content="🎉 Giveaway Ended 🎉",
embed=embed,
)
except discord.NotFound:
return
if giveaway.kwargs.get("announce"):
announce_embed = discord.Embed(
title="Giveaway Ended",
description=f"Congratulations to the {str(winners) + ' ' if winners > 1 else ''}winner{'s' if winners > 1 else ''} of [{giveaway.prize}]({msg.jump_url}).\n{txt}",
color=await self.bot.get_embed_color(channel_obj),
)
announce_embed.set_footer(
text=f"Reroll: {(await self.bot.get_prefix(msg))[-1]}gw reroll {giveaway.messageid}"
)
await channel_obj.send(
content="Congratulations " + ",".join([x.mention for x in winner_objs])
if winner_objs is not None
else "",
embed=announce_embed,
)
if channel_obj.permissions_for(guild.me).manage_messages:
await msg.clear_reactions()
if winner_objs is not None:
if giveaway.kwargs.get("congratulate", False):
for winner in winner_objs:
try:
await winner.send(
f"Congratulations! You won {giveaway.prize} in the giveaway on {guild}!"
)
except discord.Forbidden:
pass
async with self.config.custom(
GIVEAWAY_KEY, giveaway.guildid, int(giveaway.messageid)
).entrants() as entrants:
entrants = [x for x in entrants if x != winner]
return
@commands.group(aliases=["gw"])
@commands.bot_has_permissions(add_reactions=True)
@commands.has_permissions(manage_guild=True)
async def giveaway(self, ctx: commands.Context):
"""
Manage the giveaway system
"""
@giveaway.command()
async def start(
self,
ctx: commands.Context,
channel: Optional[discord.TextChannel],
time: TimedeltaConverter(default_unit="minutes"),
*,
prize: str,
):
"""
Start a giveaway.
This by default will DM the winner and also DM a user if they cannot enter the giveaway.
"""
channel = channel or ctx.channel
end = datetime.now(timezone.utc) + time
embed = discord.Embed(
title=f"{prize}",
description=f"\nReact with 🎉 to enter\nEnds: <t:{int(end.timestamp())}:R>",
color=await ctx.embed_color(),
)
msg = await channel.send(embed=embed)
giveaway_obj = Giveaway(
ctx.guild.id,
channel.id,
msg.id,
end,
prize,
"🎉",
**{"congratulate": True, "notify": True},
)
self.giveaways[msg.id] = giveaway_obj
await msg.add_reaction("🎉")
giveaway_dict = deepcopy(giveaway_obj.__dict__)
giveaway_dict["endtime"] = giveaway_dict["endtime"].timestamp()
await self.config.custom(GIVEAWAY_KEY, str(ctx.guild.id), str(msg.id)).set(giveaway_dict)
@giveaway.command()
async def reroll(self, ctx: commands.Context, msgid: int):
"""Reroll a giveaway."""
data = await self.config.custom(GIVEAWAY_KEY, ctx.guild.id).all()
if str(msgid) not in data:
return await ctx.send("Giveaway not found.")
if msgid in self.giveaways:
return await ctx.send(
f"Giveaway already running. Please wait for it to end or end it via `{ctx.clean_prefix}gw end {msgid}`."
)
giveaway_dict = data[str(msgid)]
giveaway_dict["endtime"] = datetime.fromtimestamp(giveaway_dict["endtime"]).replace(
tzinfo=timezone.utc
)
giveaway = Giveaway(**giveaway_dict)
try:
await self.draw_winner(giveaway)
except GiveawayExecError as e:
await ctx.send(e.message)
else:
await ctx.tick()
@giveaway.command()
async def end(self, ctx: commands.Context, msgid: int):
"""End a giveaway."""
if msgid in self.giveaways:
if self.giveaways[msgid].guildid != ctx.guild.id:
return await ctx.send("Giveaway not found.")
await self.draw_winner(self.giveaways[msgid])
del self.giveaways[msgid]
gw = await self.config.custom(GIVEAWAY_KEY, ctx.guild.id, str(msgid)).all()
gw["ended"] = True
await self.config.custom(GIVEAWAY_KEY, ctx.guild.id, str(msgid)).set(gw)
await ctx.tick()
else:
await ctx.send("Giveaway not found.")
@giveaway.command(aliases=["adv"])
async def advanced(self, ctx: commands.Context, *, arguments: Args):
"""Advanced creation of Giveaways.
`[p]gw explain` for a further full listing of the arguments.
"""
prize = arguments["prize"]
duration = arguments["duration"]
channel = arguments["channel"] or ctx.channel
winners = arguments.get("winners", 1) or 1
end = datetime.now(timezone.utc) + duration
description = arguments["description"] or ""
if arguments["show_requirements"]:
description += "\n\n**Requirements**:"
for kwarg in set(arguments) - {
"show_requirements",
"prize",
"duration",
"channel",
"winners",
"description",
"congratulate",
"notify",
"announce",
"emoji",
}:
if arguments[kwarg]:
description += f"\n**{kwarg.title()}:** {arguments[kwarg]}"
emoji = arguments["emoji"] or "🎉"
if isinstance(emoji, int):
emoji = self.bot.get_emoji(emoji)
embed = discord.Embed(
title=f"{f'{winners}x ' if winners > 1 else ''}{prize}",
description=f"{description}\n\nReact with {emoji} to enter\n\nEnds: <t:{int(end.timestamp())}:R>",
color=await ctx.embed_color(),
)
txt = "\n"
if arguments["ateveryone"]:
txt += "@everyone "
if arguments["athere"]:
txt += "@here "
if arguments["mentions"]:
for mention in arguments["mentions"]:
role = ctx.guild.get_role(mention)
if role is not None:
txt += f"{role.mention} "
msg = await channel.send(
content="🎉 Giveaway 🎉" + txt,
embed=embed,
allowed_mentions=discord.AllowedMentions(
roles=bool(arguments["mentions"]),
everyone=bool(arguments["ateveryone"]),
),
)
giveaway_obj = Giveaway(
ctx.guild.id,
channel.id,
msg.id,
end,
prize,
str(emoji),
**{
k: v
for k, v in arguments.items()
if k not in ["prize", "duration", "channel", "emoji"]
},
)
self.giveaways[msg.id] = giveaway_obj
await msg.add_reaction(emoji)
giveaway_dict = deepcopy(giveaway_obj.__dict__)
giveaway_dict["endtime"] = giveaway_dict["endtime"].timestamp()
await self.config.custom(GIVEAWAY_KEY, str(ctx.guild.id), str(msg.id)).set(giveaway_dict)
@giveaway.command()
async def entrants(self, ctx: commands.Context, msgid: int):
"""List all entrants for a giveaway."""
if msgid not in self.giveaways:
return await ctx.send("Giveaway not found.")
giveaway = self.giveaways[msgid]
if not giveaway.entrants:
return await ctx.send("No entrants.")
count = {}
for entrant in giveaway.entrants:
if entrant not in count:
count[entrant] = 1
else:
count[entrant] += 1
msg = ""
for userid, count_int in count.items():
user = ctx.guild.get_member(userid)
msg += f"{user.mention} ({count_int})\n" if user else f"<{userid}> ({count_int})\n"
embeds = []
for page in pagify(msg, delims=["\n"], page_length=800):
embed = discord.Embed(
title="Entrants", description=page, color=await ctx.embed_color()
)
embed.set_footer(text="Total entrants: {}".format(len(count)))
embeds.append(embed)
if len(embeds) == 1:
return await ctx.send(embed=embeds[0])
return await menu(ctx, embeds, DEFAULT_CONTROLS)
@giveaway.command()
async def info(self, ctx: commands.Context, msgid: int):
"""Information about a giveaway."""
if msgid not in self.giveaways:
return await ctx.send("Giveaway not found.")
giveaway = self.giveaways[msgid]
winners = giveaway.kwargs.get("winners", 1) or 1
msg = f"**Entrants:**: {len(giveaway.entrants)}\n**End**: <t:{int(giveaway.endtime.timestamp())}:R>\n"
for kwarg in giveaway.kwargs:
if giveaway.kwargs[kwarg]:
msg += f"**{kwarg.title()}:** {giveaway.kwargs[kwarg]}\n"
embed = discord.Embed(
title=f"{f'{winners}x ' if winners > 1 else ''}{giveaway.prize}",
color=await ctx.embed_color(),
description=msg,
)
embed.set_footer(text=f"Giveaway ID #{msgid}")
await ctx.send(embed=embed)
@giveaway.command(name="list")
async def _list(self, ctx: commands.Context):
"""List all giveaways in the server."""
if not self.giveaways:
return await ctx.send("No giveaways are running.")
giveaways = {
x: self.giveaways[x]
for x in self.giveaways
if self.giveaways[x].guildid == ctx.guild.id
}
if not giveaways:
return await ctx.send("No giveaways are running.")
msg = "".join(
f"{msgid}: [{giveaways[msgid].prize}](https://discord.com/channels/{value.guildid}/{giveaways[msgid].channelid}/{msgid})\n"
for msgid, value in giveaways.items()
)
embeds = []
for page in pagify(msg, delims=["\n"]):
embed = discord.Embed(
title=f"Giveaways in {ctx.guild}", description=page, color=await ctx.embed_color()
)
embeds.append(embed)
if len(embeds) == 1:
return await ctx.send(embed=embeds[0])
return await menu(ctx, embeds, DEFAULT_CONTROLS)
@giveaway.command()
async def explain(self, ctx: commands.Context):
"""Explanation of giveaway advanced and the arguements it supports."""
msg = """
Giveaway advanced creation.
Giveaway advanced contains many different flags that can be used to customize the giveaway.
The flags are as follows:
Required arguments:
`--prize`: The prize to be won.
Required Mutual Exclusive Arguments:
You must one ONE of these, but not both:
`--duration`: The duration of the giveaway. Must be in format such as `2d3h30m`.
`--end`: The end time of the giveaway. Must be in format such as `2021-12-23T30:00:00.000Z`, `tomorrow at 3am`, `in 4 hours`. Defaults to UTC if no timezone is provided.
Optional arguments:
`--channel`: The channel to post the giveaway in. Will default to this channel if not specified.
`--emoji`: The emoji to use for the giveaway.
`--roles`: Roles that the giveaway will be restricted to. If the role contains a space, use their ID.
`--multiplier`: Multiplier for those in specified roles. Must be a positive number.
`--multi-roles`: Roles that will receive the multiplier. If the role contains a space, use their ID.
`--cost`: Cost of credits to enter the giveaway. Must be a positive number.
`--joined`: How long the user must be a member of the server for to enter the giveaway. Must be a positive number of days.
`--created`: How long the user has been on discord for to enter the giveaway. Must be a positive number of days.
`--blacklist`: Blacklisted roles that cannot enter the giveaway. If the role contains a space, use their ID.
`--winners`: How many winners to draw. Must be a positive number.
`--mentions`: Roles to mention in the giveaway notice.
`--description`: Description of the giveaway.
Setting Arguments:
`--congratulate`: Whether or not to congratulate the winner. Not passing will default to off.
`--notify`: Whether or not to notify a user if they failed to enter the giveaway. Not passing will default to off.
`--multientry`: Whether or not to allow multiple entries. Not passing will default to off.
`--announce`: Whether to post a seperate message when the giveaway ends. Not passing will default to off.
`--ateveryone`: Whether to tag @everyone in the giveaway notice.
`--show-requirements`: Whether to show the requirements of the giveaway.
3rd party integrations:
See `[p]gw integrations` for more information.
Examples:
`{prefix}gw advanced --prize A new sword --duration 1h30m --restrict Role ID --multiplier 2 --multi-roles RoleID RoleID2`
`{prefix}gw advanced --prize A better sword --duration 2h3h30m --channel channel-name --cost 250 --joined 50 --congratulate --notify --multientry --level-req 100`""".format(
prefix=ctx.clean_prefix
)
embed = discord.Embed(
title="Giveaway Advanced Explanation", description=msg, color=await ctx.embed_color()
)
await ctx.send(embed=embed)
@giveaway.command()
async def integrations(self, ctx: commands.Context):
"""Various 3rd party integrations for giveaways."""
msg = """
3rd party integrations for giveaways.
You can use these integrations to integrate giveaways with other 3rd party services.
`--level-req`: Integrate with the Red Level system Must be Fixator's leveler.
`--rep-req`: Integrate with the Red Level Rep system Must be Fixator's leveler.
`--tatsu-level`: Integrate with the Tatsumaki's levelling system, must have a valid Tatsumaki API key set.
`--tatsu-rep`: Integrate with the Tatsumaki's rep system, must have a valid Tatsumaki API key set.
`--mee6-level`: Integrate with the MEE6 levelling system.
`--amari-level`: Integrate with the Amari's levelling system.
`--amari-weekly-xp`: Integrate with the Amari's weekly xp system.""".format(
prefix=ctx.clean_prefix
)
if await self.bot.is_owner(ctx.author):
msg += """
**API Keys**
Tatsu's API key can be set with the following command (You must find where this key is yourself): `{prefix}set api tatsumaki authorization <key>`
Amari's API key can be set with the following command (Apply [here](https://docs.google.com/forms/d/e/1FAIpQLScQDCsIqaTb1QR9BfzbeohlUJYA3Etwr-iSb0CRKbgjA-fq7Q/viewform)): `{prefix}set api amari authorization <key>`
For any integration suggestions, suggest them via the [#support-flare-cogs](https://discord.gg/GET4DVk) channel on the support server or [flare-cogs](https://github.com/flaree/flare-cogs/issues/new/choose) github.""".format(
prefix=ctx.clean_prefix
)
embed = discord.Embed(
title="3rd Party Integrations", description=msg, color=await ctx.embed_color()
)
await ctx.send(embed=embed)
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload: discord.RawReactionActionEvent):
if payload.user_id == self.bot.user.id:
return
if payload.message_id in self.giveaways:
giveaway = self.giveaways[payload.message_id]
if payload.emoji.is_custom_emoji() and str(payload.emoji) != giveaway.emoji:
return
elif payload.emoji.is_unicode_emoji() and str(payload.emoji) != giveaway.emoji:
return
try:
await giveaway.add_entrant(payload.member, bot=self.bot, session=self.session)
except GiveawayEnterError as e:
if giveaway.kwargs.get("notify", False):
await payload.member.send(e.message)
return
except GiveawayExecError as e:
log.exception("Error while adding user to giveaway", exc_info=e)
return
await self.config.custom(
GIVEAWAY_KEY, payload.guild_id, payload.message_id
).entrants.set(self.giveaways[payload.message_id].entrants)
| 42.526419
| 240
| 0.587456
|
71c84b6d0eed09d2f40a7040e788202179c9327a
| 2,014
|
py
|
Python
|
src/attestation/azext_attestation/__init__.py
|
YingXue/azure-cli-extensions
|
30086b7fe22ed591daaae9019920db6c16aef9de
|
[
"MIT"
] | 1
|
2020-09-16T03:47:44.000Z
|
2020-09-16T03:47:44.000Z
|
src/attestation/azext_attestation/__init__.py
|
YingXue/azure-cli-extensions
|
30086b7fe22ed591daaae9019920db6c16aef9de
|
[
"MIT"
] | null | null | null |
src/attestation/azext_attestation/__init__.py
|
YingXue/azure-cli-extensions
|
30086b7fe22ed591daaae9019920db6c16aef9de
|
[
"MIT"
] | 1
|
2019-05-02T00:55:30.000Z
|
2019-05-02T00:55:30.000Z
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from azure.cli.core import AzCommandsLoader
from azext_attestation.generated._help import helps # pylint: disable=unused-import
class AttestationManagementClientCommandsLoader(AzCommandsLoader):
def __init__(self, cli_ctx=None):
from azure.cli.core.commands import CliCommandType
from azext_attestation.generated._client_factory import cf_attestation_mgmt
attestation_custom = CliCommandType(
operations_tmpl='azext_attestation.custom#{}',
client_factory=cf_attestation_mgmt)
super(AttestationManagementClientCommandsLoader, self).__init__(cli_ctx=cli_ctx,
custom_command_type=attestation_custom)
def load_command_table(self, args):
from azext_attestation.generated.commands import load_command_table
load_command_table(self, args)
try:
from azext_attestation.manual.commands import load_command_table as load_command_table_manual
load_command_table_manual(self, args)
except ImportError:
pass
return self.command_table
def load_arguments(self, command):
from azext_attestation.generated._params import load_arguments
load_arguments(self, command)
try:
from azext_attestation.manual._params import load_arguments as load_arguments_manual
load_arguments_manual(self, command)
except ImportError:
pass
COMMAND_LOADER_CLS = AttestationManagementClientCommandsLoader
| 42.851064
| 111
| 0.667329
|
6203ddd086f9f1c20fe16d0e85228cf2f3063f16
| 2,159
|
py
|
Python
|
chiadoge/consensus/deficit.py
|
Jsewill/chiadogecoin
|
55511228301a0b4d00c8f4da270be8b434777470
|
[
"Apache-2.0"
] | 2
|
2021-07-05T14:34:35.000Z
|
2022-01-01T21:27:52.000Z
|
chiadoge/consensus/deficit.py
|
Jsewill/chiadogecoin
|
55511228301a0b4d00c8f4da270be8b434777470
|
[
"Apache-2.0"
] | null | null | null |
chiadoge/consensus/deficit.py
|
Jsewill/chiadogecoin
|
55511228301a0b4d00c8f4da270be8b434777470
|
[
"Apache-2.0"
] | 1
|
2021-07-07T11:08:36.000Z
|
2021-07-07T11:08:36.000Z
|
from typing import Optional
from chiadoge.consensus.block_record import BlockRecord
from chiadoge.consensus.constants import ConsensusConstants
from chiadoge.util.ints import uint8, uint32
def calculate_deficit(
constants: ConsensusConstants,
height: uint32,
prev_b: Optional[BlockRecord],
overflow: bool,
num_finished_sub_slots: int,
) -> uint8:
"""
Returns the deficit of the block to be created at height.
Args:
constants: consensus constants being used for this chain
height: block height of the block that we care about
prev_b: previous block
overflow: whether or not this is an overflow block
num_finished_sub_slots: the number of finished slots between infusion points of prev and current
"""
if height == 0:
return uint8(constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1)
else:
assert prev_b is not None
prev_deficit: uint8 = prev_b.deficit
if prev_deficit == constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
# Prev sb must be an overflow sb. However maybe it's in a different sub-slot
if overflow:
if num_finished_sub_slots > 0:
# We are an overflow block, but in a new sub-slot, so we can decrease the deficit
return uint8(prev_deficit - 1)
# Still overflowed, so we cannot decrease the deficit
return uint8(prev_deficit)
else:
# We are no longer overflow, can decrease
return uint8(prev_deficit - 1)
elif prev_deficit == 0:
if num_finished_sub_slots == 0:
return uint8(0)
elif num_finished_sub_slots == 1:
if overflow:
return uint8(constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK)
else:
return uint8(constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1)
else:
# More than one finished sub slot, we can decrease deficit
return uint8(constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1)
else:
return uint8(prev_deficit - 1)
| 39.981481
| 104
| 0.635016
|
473f698937a8e95144e4a9da6e3272bed37c5874
| 741
|
py
|
Python
|
backend/api/serializers/ActividadSerializer.py
|
kukiamarilla/polijira
|
510dbc1473db973ac71fc68fa5a9b758b90a780b
|
[
"MIT"
] | 1
|
2022-03-02T02:28:49.000Z
|
2022-03-02T02:28:49.000Z
|
backend/api/serializers/ActividadSerializer.py
|
kukiamarilla/polijira
|
510dbc1473db973ac71fc68fa5a9b758b90a780b
|
[
"MIT"
] | 22
|
2021-09-01T17:44:25.000Z
|
2021-10-07T19:39:09.000Z
|
backend/api/serializers/ActividadSerializer.py
|
kukiamarilla/polijira
|
510dbc1473db973ac71fc68fa5a9b758b90a780b
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from backend.api.models import Actividad
from backend.api.serializers.UsuarioSerializer import UsuarioSerializer
class ActividadSerializer(serializers.ModelSerializer):
"""
ActividadSerializer Serializer del modelo Actividad
Args:
serializers (ModelSerializer): Serializer del módulo Rest Framework
"""
desarrollador = UsuarioSerializer(many=False, read_only=True)
class Meta:
"""
Metadatos de Actividad
"""
model = Actividad
fields = (
"id",
"titulo",
"descripcion",
"horas",
"fecha_creacion",
"sprint_backlog",
"desarrollador",
)
| 24.7
| 75
| 0.620783
|
3ca8edea2c3a8bbdb5df1d9824232d04445c8101
| 198
|
py
|
Python
|
life.py
|
mmoreirasouza/pythonzombiegame
|
fa36c72ad0734ba8d9db8753a6be5823d68571af
|
[
"Apache-2.0"
] | 2
|
2018-03-03T21:42:55.000Z
|
2021-06-02T02:27:03.000Z
|
life.py
|
mmoreirasouza/pythonzombiegame
|
fa36c72ad0734ba8d9db8753a6be5823d68571af
|
[
"Apache-2.0"
] | null | null | null |
life.py
|
mmoreirasouza/pythonzombiegame
|
fa36c72ad0734ba8d9db8753a6be5823d68571af
|
[
"Apache-2.0"
] | 2
|
2019-04-28T16:59:35.000Z
|
2021-06-02T02:27:08.000Z
|
from rect import Rect
class Life(Rect):
Lifes = []
def __init__(self, x, y):
self.health = 200
self.total_frames = 0
super(Life, self).__init__(x, y, 32, 32)
Life.Lifes.append(self)
| 13.2
| 42
| 0.651515
|
f45edee2008083ac214eed672e7e2a07fea59725
| 13,269
|
py
|
Python
|
st2common/tests/unit/test_action_db_utils.py
|
FairwindsOps/st2
|
2b76ca740c4af0d6b2c1d1ba5534ce4133fd16fa
|
[
"Apache-2.0"
] | 1
|
2021-04-08T03:21:49.000Z
|
2021-04-08T03:21:49.000Z
|
st2common/tests/unit/test_action_db_utils.py
|
FairwindsOps/st2
|
2b76ca740c4af0d6b2c1d1ba5534ce4133fd16fa
|
[
"Apache-2.0"
] | null | null | null |
st2common/tests/unit/test_action_db_utils.py
|
FairwindsOps/st2
|
2b76ca740c4af0d6b2c1d1ba5534ce4133fd16fa
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import uuid
import mock
from st2common.exceptions.db import StackStormDBObjectNotFoundError
from st2common.transport.publishers import PoolPublisher
from st2common.models.api.action import RunnerTypeAPI
from st2common.models.db.action import ActionDB
from st2common.models.db.liveaction import LiveActionDB
from st2common.models.system.common import ResourceReference
from st2common.persistence.action import Action
from st2common.persistence.liveaction import LiveAction
from st2common.persistence.runner import RunnerType
from st2common.transport.liveaction import LiveActionPublisher
from st2common.util.date import get_datetime_utc_now
import st2common.util.action_db as action_db_utils
from st2tests.base import DbTestCase
@mock.patch.object(PoolPublisher, 'publish', mock.MagicMock())
class ActionDBUtilsTestCase(DbTestCase):
runnertype_db = None
action_db = None
liveaction_db = None
@classmethod
def setUpClass(cls):
super(ActionDBUtilsTestCase, cls).setUpClass()
ActionDBUtilsTestCase._setup_test_models()
def test_get_runnertype_nonexisting(self):
# By id.
self.assertRaises(StackStormDBObjectNotFoundError, action_db_utils.get_runnertype_by_id,
'somedummyrunnerid')
# By name.
self.assertRaises(StackStormDBObjectNotFoundError, action_db_utils.get_runnertype_by_name,
'somedummyrunnername')
def test_get_runnertype_existing(self):
# Lookup by id and verify name equals.
runner = action_db_utils.get_runnertype_by_id(ActionDBUtilsTestCase.runnertype_db.id)
self.assertEqual(runner.name, ActionDBUtilsTestCase.runnertype_db.name)
# Lookup by name and verify id equals.
runner = action_db_utils.get_runnertype_by_name(ActionDBUtilsTestCase.runnertype_db.name)
self.assertEqual(runner.id, ActionDBUtilsTestCase.runnertype_db.id)
def test_get_action_nonexisting(self):
# By id.
self.assertRaises(StackStormDBObjectNotFoundError, action_db_utils.get_action_by_id,
'somedummyactionid')
# By ref.
action = action_db_utils.get_action_by_ref('packaintexist.somedummyactionname')
self.assertTrue(action is None)
def test_get_action_existing(self):
# Lookup by id and verify name equals
action = action_db_utils.get_action_by_id(ActionDBUtilsTestCase.action_db.id)
self.assertEqual(action.name, ActionDBUtilsTestCase.action_db.name)
# Lookup by reference as string.
action_ref = ResourceReference.to_string_reference(
pack=ActionDBUtilsTestCase.action_db.pack,
name=ActionDBUtilsTestCase.action_db.name)
action = action_db_utils.get_action_by_ref(action_ref)
self.assertEqual(action.id, ActionDBUtilsTestCase.action_db.id)
def test_get_actionexec_nonexisting(self):
# By id.
self.assertRaises(StackStormDBObjectNotFoundError, action_db_utils.get_liveaction_by_id,
'somedummyactionexecid')
def test_get_actionexec_existing(self):
liveaction = action_db_utils.get_liveaction_by_id(ActionDBUtilsTestCase.liveaction_db.id)
self.assertEqual(liveaction, ActionDBUtilsTestCase.liveaction_db)
@mock.patch.object(LiveActionPublisher, 'publish_state', mock.MagicMock())
def test_update_liveaction_status(self):
liveaction_db = LiveActionDB()
liveaction_db.status = 'initializing'
liveaction_db.start_timestamp = get_datetime_utc_now()
liveaction_db.action = ResourceReference(
name=ActionDBUtilsTestCase.action_db.name,
pack=ActionDBUtilsTestCase.action_db.pack).ref
params = {
'actionstr': 'foo',
'some_key_that_aint_exist_in_action_or_runner': 'bar',
'runnerint': 555
}
liveaction_db.parameters = params
liveaction_db = LiveAction.add_or_update(liveaction_db)
origliveaction_db = copy.copy(liveaction_db)
# Update by id.
newliveaction_db = action_db_utils.update_liveaction_status(
status='running', liveaction_id=liveaction_db.id)
# Verify id didn't change.
self.assertEqual(origliveaction_db.id, newliveaction_db.id)
self.assertEqual(newliveaction_db.status, 'running')
# Verify that state is published.
self.assertTrue(LiveActionPublisher.publish_state.called)
LiveActionPublisher.publish_state.assert_called_once_with(newliveaction_db, 'running')
# Update status, result, context, and end timestamp.
now = get_datetime_utc_now()
status = 'succeeded'
result = 'Work is done.'
context = {'third_party_id': uuid.uuid4().hex}
newliveaction_db = action_db_utils.update_liveaction_status(
status=status, result=result, context=context, end_timestamp=now,
liveaction_id=liveaction_db.id)
self.assertEqual(origliveaction_db.id, newliveaction_db.id)
self.assertEqual(newliveaction_db.status, status)
self.assertEqual(newliveaction_db.result, result)
self.assertDictEqual(newliveaction_db.context, context)
self.assertEqual(newliveaction_db.end_timestamp, now)
@mock.patch.object(LiveActionPublisher, 'publish_state', mock.MagicMock())
def test_update_liveaction_result_with_dotted_key(self):
liveaction_db = LiveActionDB()
liveaction_db.status = 'initializing'
liveaction_db.start_timestamp = get_datetime_utc_now()
liveaction_db.action = ResourceReference(
name=ActionDBUtilsTestCase.action_db.name,
pack=ActionDBUtilsTestCase.action_db.pack).ref
params = {
'actionstr': 'foo',
'some_key_that_aint_exist_in_action_or_runner': 'bar',
'runnerint': 555
}
liveaction_db.parameters = params
liveaction_db = LiveAction.add_or_update(liveaction_db)
origliveaction_db = copy.copy(liveaction_db)
# Update by id.
newliveaction_db = action_db_utils.update_liveaction_status(
status='running', liveaction_id=liveaction_db.id)
# Verify id didn't change.
self.assertEqual(origliveaction_db.id, newliveaction_db.id)
self.assertEqual(newliveaction_db.status, 'running')
# Verify that state is published.
self.assertTrue(LiveActionPublisher.publish_state.called)
LiveActionPublisher.publish_state.assert_called_once_with(newliveaction_db, 'running')
now = get_datetime_utc_now()
status = 'succeeded'
result = {'a': 1, 'b': True, 'a.b.c': 'abc'}
context = {'third_party_id': uuid.uuid4().hex}
newliveaction_db = action_db_utils.update_liveaction_status(
status=status, result=result, context=context, end_timestamp=now,
liveaction_id=liveaction_db.id)
self.assertEqual(origliveaction_db.id, newliveaction_db.id)
self.assertEqual(newliveaction_db.status, status)
self.assertIn('a.b.c', result.keys())
self.assertDictEqual(newliveaction_db.result, result)
self.assertDictEqual(newliveaction_db.context, context)
self.assertEqual(newliveaction_db.end_timestamp, now)
@mock.patch.object(LiveActionPublisher, 'publish_state', mock.MagicMock())
def test_update_LiveAction_status_invalid(self):
liveaction_db = LiveActionDB()
liveaction_db.status = 'initializing'
liveaction_db.start_timestamp = get_datetime_utc_now()
liveaction_db.action = ResourceReference(
name=ActionDBUtilsTestCase.action_db.name,
pack=ActionDBUtilsTestCase.action_db.pack).ref
params = {
'actionstr': 'foo',
'some_key_that_aint_exist_in_action_or_runner': 'bar',
'runnerint': 555
}
liveaction_db.parameters = params
liveaction_db = LiveAction.add_or_update(liveaction_db)
# Update by id.
self.assertRaises(ValueError, action_db_utils.update_liveaction_status,
status='mea culpa', liveaction_id=liveaction_db.id)
# Verify that state is not published.
self.assertFalse(LiveActionPublisher.publish_state.called)
@mock.patch.object(LiveActionPublisher, 'publish_state', mock.MagicMock())
def test_update_same_liveaction_status(self):
liveaction_db = LiveActionDB()
liveaction_db.status = 'requested'
liveaction_db.start_timestamp = get_datetime_utc_now()
liveaction_db.action = ResourceReference(
name=ActionDBUtilsTestCase.action_db.name,
pack=ActionDBUtilsTestCase.action_db.pack).ref
params = {
'actionstr': 'foo',
'some_key_that_aint_exist_in_action_or_runner': 'bar',
'runnerint': 555
}
liveaction_db.parameters = params
liveaction_db = LiveAction.add_or_update(liveaction_db)
origliveaction_db = copy.copy(liveaction_db)
# Update by id.
newliveaction_db = action_db_utils.update_liveaction_status(
status='requested', liveaction_id=liveaction_db.id)
# Verify id didn't change.
self.assertEqual(origliveaction_db.id, newliveaction_db.id)
self.assertEqual(newliveaction_db.status, 'requested')
# Verify that state is not published.
self.assertFalse(LiveActionPublisher.publish_state.called)
def test_get_args(self):
params = {
'actionstr': 'foo',
'actionint': 20,
'runnerint': 555
}
pos_args, named_args = action_db_utils.get_args(params, ActionDBUtilsTestCase.action_db)
self.assertListEqual(pos_args, ['20', 'foo'], 'Positional args not parsed correctly.')
self.assertTrue('actionint' not in named_args)
self.assertTrue('actionstr' not in named_args)
self.assertEqual(named_args.get('runnerint'), 555)
@classmethod
def _setup_test_models(cls):
ActionDBUtilsTestCase.setup_runner()
ActionDBUtilsTestCase.setup_action_models()
@classmethod
def setup_runner(cls):
test_runner = {
'name': 'test-runner',
'description': 'A test runner.',
'enabled': True,
'runner_parameters': {
'runnerstr': {
'description': 'Foo str param.',
'type': 'string',
'default': 'defaultfoo'
},
'runnerint': {
'description': 'Foo int param.',
'type': 'number'
},
'runnerdummy': {
'description': 'Dummy param.',
'type': 'string',
'default': 'runnerdummy'
}
},
'runner_module': 'tests.test_runner'
}
runnertype_api = RunnerTypeAPI(**test_runner)
ActionDBUtilsTestCase.runnertype_db = RunnerType.add_or_update(
RunnerTypeAPI.to_model(runnertype_api))
@classmethod
@mock.patch.object(PoolPublisher, 'publish', mock.MagicMock())
def setup_action_models(cls):
pack = 'wolfpack'
name = 'action-1'
parameters = {
'actionstr': {'type': 'string', 'position': 1, 'required': True},
'actionint': {'type': 'number', 'default': 10, 'position': 0},
'runnerdummy': {'type': 'string', 'default': 'actiondummy'}
}
action_db = ActionDB(pack=pack, name=name, description='awesomeness',
enabled=True,
ref=ResourceReference(name=name, pack=pack).ref,
entry_point='', runner_type={'name': 'test-runner'},
parameters=parameters)
ActionDBUtilsTestCase.action_db = Action.add_or_update(action_db)
liveaction_db = LiveActionDB()
liveaction_db.status = 'initializing'
liveaction_db.start_timestamp = get_datetime_utc_now()
liveaction_db.action = ActionDBUtilsTestCase.action_db.ref
params = {
'actionstr': 'foo',
'some_key_that_aint_exist_in_action_or_runner': 'bar',
'runnerint': 555
}
liveaction_db.parameters = params
ActionDBUtilsTestCase.liveaction_db = LiveAction.add_or_update(liveaction_db)
| 43.792079
| 98
| 0.6788
|
db80b632e3c2017221a11f325b27d29615a898ad
| 1,770
|
py
|
Python
|
get_team_roles/team_roles.py
|
erikpaasonen/public-support-scripts
|
3e9750ed2c6e6c162776e41d33330666f649fe39
|
[
"Apache-2.0"
] | 59
|
2015-01-11T01:59:36.000Z
|
2022-03-07T07:51:08.000Z
|
get_team_roles/team_roles.py
|
erikpaasonen/public-support-scripts
|
3e9750ed2c6e6c162776e41d33330666f649fe39
|
[
"Apache-2.0"
] | 12
|
2019-03-22T23:14:48.000Z
|
2022-03-25T20:13:17.000Z
|
get_team_roles/team_roles.py
|
erikpaasonen/public-support-scripts
|
3e9750ed2c6e6c162776e41d33330666f649fe39
|
[
"Apache-2.0"
] | 38
|
2015-01-26T18:53:25.000Z
|
2021-12-16T17:39:06.000Z
|
import argparse
import sys
import pdpyras
def get_teams(session, comma_separated):
if comma_separated:
sys.stdout.write("Team ID, Team Name, User ID, User name, Team role\n")
try:
for team in session.iter_all('teams'):
get_team_members(team['id'], team['name'], session, comma_separated)
except pdpyras.PDClientError as e:
raise e
def get_team_members(team_id, team_name, session, comma_separated):
try:
for member in session.iter_all('teams/{}/members'.format(team_id)):
if comma_separated:
sys.stdout.write("{}, {}, {}, {}, {}\n".format(team_id, team_name, member['user']['id'], member['user']['summary'], member['role']))
else:
sys.stdout.write("Team ID: {}\n".format(team_id))
sys.stdout.write("Team Name: {}\n".format(team_name))
sys.stdout.write("User ID: {}\n".format(member['user']['id']))
sys.stdout.write("User name: {}\n".format(member['user']['summary']))
sys.stdout.write("Team role: {}\n".format(member['role']))
sys.stdout.write("-----\n")
except pdpyras.PDClientError as e:
print("Could not get team members for team {} {}".format(team_name, team_id))
raise e
if __name__ == '__main__':
ap = argparse.ArgumentParser(description="Retrieves team roles for"
"users in a PagerDuty account")
ap.add_argument('-k', '--api-key', required=True, help="REST API key")
ap.add_argument('-c', '--comma-separated', required=False, default=False, action='store_true', help="Format output separated by commas")
args = ap.parse_args()
session = pdpyras.APISession(args.api_key)
get_teams(session, args.comma_separated)
| 45.384615
| 148
| 0.622034
|
77b3fcdb8b3fba5cf9916fd0ff94bab72a1e63e9
| 11,064
|
py
|
Python
|
lib/cogs/duels.py
|
davidgao93/KenCoin
|
61654a66b6ec6cd49cd9e6f04d956164655d145e
|
[
"Unlicense"
] | null | null | null |
lib/cogs/duels.py
|
davidgao93/KenCoin
|
61654a66b6ec6cd49cd9e6f04d956164655d145e
|
[
"Unlicense"
] | 1
|
2021-07-20T02:00:20.000Z
|
2021-07-20T12:08:59.000Z
|
lib/cogs/duels.py
|
davidgao93/KenCoin
|
61654a66b6ec6cd49cd9e6f04d956164655d145e
|
[
"Unlicense"
] | null | null | null |
import re
import time
from datetime import datetime
from random import randint
from discord import Member, Embed
from asyncio import sleep
from discord.ext.commands import Cog
from discord.ext.commands import command
from discord.ext.commands import BadArgument
from ..db import db
class Duels(Cog):
def __init__(self, bot):
self.bot = bot
self.cid = bot.CID
self.gid = bot.GID
self.coin = bot.COIN
self.cs = bot.CS
@Cog.listener()
async def on_ready(self):
if not self.bot.ready:
self.bot.cogs_ready.ready_up("duels")
@Cog.listener()
async def on_reaction_add(self, reaction, user):
print(f"checking {reaction.message.author.id} to see if valid duel")
if (reaction.message.content.startswith("It's a duel!") and reaction.message.author.id == 863215534069776405):
coins, duel = db.record(f"SELECT {self.cs}, Duel FROM ledger WHERE UserID = ?", user.id)
reg = re.findall('\d+', reaction.message.content)
duel_amt = int(reg[0])
sponsor = int(reg[1])
sponsor_name = self.bot.get_user(sponsor).display_name
if (user.id == sponsor):
await reaction.message.delete()
if (duel == 1):
await self.bot.get_channel(self.cid).send(f"Duel cancelled! You are refunded {duel_amt}{self.cs}.", delete_after=5)
db.execute(f"UPDATE ledger SET {self.cs} = {self.cs} + ?, Duel = 0 WHERE UserID = ?", duel_amt, sponsor)
db.commit()
return
else:
await self.bot.get_channel(self.cid).send(f"Duel not found.", delete_after=5)
return
if (coins < duel_amt) :
await self.bot.get_channel(self.cid).send(f"You don't have enough to match the duel amount. Try again when you do.")
else:
await reaction.message.delete()
db.execute(f"UPDATE ledger SET {self.cs} = {self.cs} - ? WHERE UserID = ?", duel_amt, user.id)
db.commit()
await self.bot.get_channel(self.cid).send(f"{user} has accepted the duel!")
await self.bot.get_channel(self.cid).send(f"You both pull out your abyssal whips...", delete_after=5)
await sleep(1.5)
print(f"dueling {user.id} and {sponsor}")
sponsor_hp = 99
user_hp = 99
sponsor_roll = randint(0,100)
user_roll = randint(0,100)
if (sponsor_roll > user_roll):
PID = 1 # sponsor has PID
await self.bot.get_channel(self.cid).send(f"{sponsor_name} has the PID advantage! They strike first!")
else:
PID = 0 # challenger has PID
await self.bot.get_channel(self.cid).send(f"{user} has the PID advantage! They strike first!")
while (not (sponsor_hp <= 0 or user_hp <= 0)):
await sleep(3)
if PID == 1:
embed = Embed(title="⚔️ Duels ⚔️",
colour=0x783729)
embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/699690514051629089/866413194285547530/Abyssal_whip.png")
sponsor_hit = randint(8,20) if randint(0, 100) > 20 else 0
user_hit = randint(8,20) if randint(0, 100) > 20 else 0
if (sponsor_hit > 0):
user_hp = user_hp - sponsor_hit
user_hp = user_hp if user_hp > 0 else 0
embed.add_field(name=f"💥 {sponsor_name} hits for **{sponsor_hit}** damage. 💥",
value=f"{user} has **{user_hp}**HP left!" if user_hp> 0 else f"You cleanly slash **{user}**'s head off with one last attack!",
inline=False)
else:
embed.add_field(name=f"💨 {sponsor_name} misses! 💨",
value=f"{user} has **{user_hp}**HP left!",
inline=False)
if (user_hp > 0):
if (user_hit > 0):
sponsor_hp = sponsor_hp - user_hit
sponsor_hp = sponsor_hp if sponsor_hp > 0 else 0
embed.add_field(name=f"💥 {user} hits for **{user_hit}** damage. 💥",
value=f"{sponsor_name} has **{sponsor_hp}**HP left!" if sponsor_hp > 0 else f"You cleanly slash **{sponsor_name}**'s head off with one last attack!",
inline=False)
else:
embed.add_field(name=f"💨 {user} misses! 💨",
value=f"{sponsor_name} has **{sponsor_hp}**HP left!",
inline=False)
else:
embed.add_field(name=f"💀 {user} is dead! 💀",
value=f"{sponsor_name} has won the duel with **{sponsor_hp}**HP left!",
inline=False)
await self.bot.get_channel(self.cid).send(embed=embed, delete_after=5)
else:
embed = Embed(title="⚔️ Duels ⚔️",
colour=0x783729)
embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/699690514051629089/866413194285547530/Abyssal_whip.png")
sponsor_hit = randint(8,20) if randint(0, 100) > 20 else 0
user_hit = randint(8,20) if randint(0, 100) > 20 else 0
if (user_hit > 0):
sponsor_hp = sponsor_hp - user_hit
sponsor_hp = sponsor_hp if sponsor_hp > 0 else 0
embed.add_field(name=f"💥 {user} hits for **{user_hit}** damage. 💥",
value=f"{sponsor_name} has **{sponsor_hp}**HP left!" if sponsor_hp > 0 else f"You brutally slash **{sponsor_name}**'s head off with one last attack!",
inline=False)
else:
embed.add_field(name=f"💨 {user} misses! 💨",
value=f"{sponsor_name} has **{sponsor_hp}**HP left!",
inline=False)
if (sponsor_hp > 0):
if (sponsor_hit > 0):
user_hp = user_hp - sponsor_hit
user_hp = user_hp if user_hp > 0 else 0
embed.add_field(name=f"💥 {sponsor_name} hits for **{sponsor_hit}** damage. 💥",
value=f"{user} has **{user_hp}**HP left!" if user_hp> 0 else f"You brutally slash **{user}**'s head off with one last attack!",
inline=False)
else:
embed.add_field(name=f"💨 {sponsor_name} misses! 💨",
value=f"{user} has **{user_hp}**HP left!",
inline=False)
else:
embed.add_field(name=f"💀 {sponsor_name} is dead! 💀",
value=f"{user} has won the duel with **{user_hp}**HP left!",
inline=False)
await self.bot.get_channel(self.cid).send(embed=embed, delete_after=3.5)
embed_result = Embed(title="⚔️ Duel result ⚔️",
colour=0x783729, timestamp=datetime.utcnow())
embed_result.set_footer(text=f"We look forward to your next duel")
embed_result.set_thumbnail(url="https://cdn.discordapp.com/attachments/699690514051629089/866413194285547530/Abyssal_whip.png")
if (sponsor_hp == 0): # Sponsor loses, no need to remove money twice
embed_result.add_field(name=f"🎉 {user} wins {duel_amt * 2}{self.cs}. 🎉",
value=f"{sponsor_name} hangs their head in shame.",
inline=False)
db.execute(f"UPDATE ledger SET {self.cs} = {self.cs} + ?, Duel = 0 WHERE UserID = ?", duel_amt * 2, user.id)
db.execute(f"UPDATE ledger SET Duel = 0 WHERE UserID = ?", sponsor)
else: # Sponsor wins, get duel_amt * 2, user loses
embed_result.add_field(name=f"🎉 {sponsor_name} wins {duel_amt * 2}{self.cs}. 🎉",
value=f"{user} hangs their head in shame.",
inline=False)
db.execute(f"UPDATE ledger SET {self.cs} = {self.cs} + ?, Duel = 0 WHERE UserID = ?", duel_amt * 2, sponsor)
db.execute(f"UPDATE ledger SET Duel = 0 WHERE UserID = ?", user.id)
await self.bot.get_channel(self.cid).send(embed=embed_result)
db.commit()
@command(name="duel", aliases=["d", "D"], brief=f"Start a duel with <amt> as the ante.")
async def set_duel(self, ctx, amt: int):
coins, duel_active = db.record(f"SELECT {self.cs}, Duel FROM ledger WHERE UserID = ?", ctx.author.id) # both, unsued lvl so can be int instead of tuple
if (duel_active == 1):
await ctx.send(f"You already have an active duel, you cannot start another until the previous has completed.")
return
if (amt > coins):
await ctx.send(f"You don't have enough {self.cs} to start this duel.")
return
elif (amt <= 0):
await ctx.send("Killing each other is fun and all, but there needs to be a stake.")
return
embed = Embed(title="⚔️ Duels ⚔️",
colour=0x783729, timestamp=datetime.utcnow())
embed.set_footer(text=f"Please duel responsibly")
embed.add_field(name=f"You put down **{amt}**{self.cs}.",
value=f"Your new balance is **{coins - amt}**{self.cs}",
inline=False)
await ctx.send(embed=embed)
await ctx.send(f"It's a duel! {ctx.author.name} has sponsored a duel for **{amt}**{self.cs}! React to **this message** to fight to the death!" +
f"*You may cancel the duel as the sponsor by reacting to get your {self.cs} back.* Duel ID: {ctx.author.id}")
db.execute(f"UPDATE ledger SET {self.cs} = {self.cs} - ?, Duel = 1 WHERE UserID = ?", amt, ctx.author.id)
db.commit()
@set_duel.error
async def set_duel_error(self, ctx, exc):
if isinstance(exc, BadArgument):
await ctx.send("Bad parameters.")
@Cog.listener()
async def on_reaction_remove(self, reaction, user):
pass
def setup(bot):
bot.add_cog(Duels(bot))
| 56.44898
| 182
| 0.510936
|
ebec900eaa0a138c4c5ad71639d2fe3e8d894063
| 1,946
|
py
|
Python
|
imagenet/create_test_set.py
|
cenkbircanoglu/openface
|
2d54ca065b6ff6ba816299d25f296bc1d13ad761
|
[
"Apache-2.0"
] | 1
|
2019-03-28T07:28:35.000Z
|
2019-03-28T07:28:35.000Z
|
imagenet/create_test_set.py
|
cenkbircanoglu/openface
|
2d54ca065b6ff6ba816299d25f296bc1d13ad761
|
[
"Apache-2.0"
] | null | null | null |
imagenet/create_test_set.py
|
cenkbircanoglu/openface
|
2d54ca065b6ff6ba816299d25f296bc1d13ad761
|
[
"Apache-2.0"
] | 1
|
2019-12-11T05:30:33.000Z
|
2019-12-11T05:30:33.000Z
|
import os
from PIL import Image
def get_test_set(mapping_path, test_mapping, test_folder, output, dim):
with open(mapping_path, mode='rb') as f:
train_data = f.readlines()
train_classes = {}
test_classes = {}
for d in train_data:
path, cls = d.replace('\n', '').split(" ")
cls = int(cls)
splitted = path.split('/')
clsname = splitted[2]
train_classes[cls] = clsname
with open(test_mapping, mode='rb') as f:
test_data = f.readlines()
for d in test_data:
path, cls = d.replace('\n', '').split(" ")
cls = int(cls)
if cls in train_classes.keys():
if not cls in test_classes:
test_classes[cls] = [path]
else:
test_classes[cls].append(path)
for i, im_names in test_classes.iteritems():
directory = os.path.join(output, train_classes[i])
if not os.path.exists(directory):
os.makedirs(directory)
for im_name in im_names:
print directory
out_filepath = os.path.join(directory, im_name)
in_filepath = os.path.join(test_folder, im_name)
im = Image.open(in_filepath)
im = im.resize((dim, dim))
im.save(out_filepath)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--trainMapping', type=str, help='an integer for the accumulator')
parser.add_argument('--testMapping', type=str, help='an integer for the accumulator')
parser.add_argument('--testFolder', type=str, help='an integer for the accumulator')
parser.add_argument('--outputDir', type=str, help='an integer for the accumulator')
parser.add_argument('--dim', type=int, default=64)
args = parser.parse_args()
get_test_set(args.trainMapping, args.testMapping, args.testFolder, args.outputDir, args.dim)
| 37.423077
| 96
| 0.626927
|
5e99459e92404a877324d474db67774039c64723
| 8,869
|
py
|
Python
|
src/ssh/azext_ssh/ssh_info.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | null | null | null |
src/ssh/azext_ssh/ssh_info.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 9
|
2022-03-25T19:35:49.000Z
|
2022-03-31T06:09:47.000Z
|
src/ssh/azext_ssh/ssh_info.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 1
|
2022-03-10T22:13:02.000Z
|
2022-03-10T22:13:02.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import datetime
import oschmod
import colorama
from colorama import Fore
from colorama import Style
from azure.cli.core import azclierror
from knack import log
from . import file_utils
from . import connectivity_utils
logger = log.get_logger(__name__)
class SSHSession():
# pylint: disable=too-many-instance-attributes
def __init__(self, resource_group_name, vm_name, ssh_ip, public_key_file, private_key_file,
use_private_ip, local_user, cert_file, port, ssh_client_folder, ssh_args,
delete_credentials, resource_type, ssh_proxy_folder, credentials_folder):
self.resource_group_name = resource_group_name
self.vm_name = vm_name
self.ip = ssh_ip
self.use_private_ip = use_private_ip
self.local_user = local_user
self.port = port
self.ssh_args = ssh_args
self.delete_credentials = delete_credentials
self.resource_type = resource_type
self.proxy_path = None
self.relay_info = None
self.public_key_file = os.path.abspath(public_key_file) if public_key_file else None
self.private_key_file = os.path.abspath(private_key_file) if private_key_file else None
self.cert_file = os.path.abspath(cert_file) if cert_file else None
self.ssh_client_folder = os.path.abspath(ssh_client_folder) if ssh_client_folder else None
self.ssh_proxy_folder = os.path.abspath(ssh_proxy_folder) if ssh_proxy_folder else None
self.credentials_folder = os.path.abspath(credentials_folder) if credentials_folder else None
def is_arc(self):
if self.resource_type == "Microsoft.HybridCompute":
return True
return False
def get_host(self):
if not self.is_arc():
if self.local_user and self.ip:
return self.local_user + "@" + self.ip
else:
if self.local_user and self.vm_name:
return self.local_user + "@" + self.vm_name
raise azclierror.BadRequestError("Unable to determine host.")
# build args behaves different depending on the resource type
def build_args(self):
private_key = []
port_arg = []
certificate = []
proxy_command = []
if self.private_key_file:
private_key = ["-i", self.private_key_file]
if self.cert_file:
certificate = ["-o", "CertificateFile=\"" + self.cert_file + "\""]
if self.is_arc():
if self.port:
proxy_command = ["-o", f"ProxyCommand=\"{self.proxy_path}\" -p {self.port}"]
else:
proxy_command = ["-o", f"ProxyCommand=\"{self.proxy_path}\""]
else:
if self.port:
port_arg = ["-p", self.port]
return proxy_command + private_key + certificate + port_arg
class ConfigSession():
# pylint: disable=too-many-instance-attributes
def __init__(self, config_path, resource_group_name, vm_name, ssh_ip, public_key_file,
private_key_file, overwrite, use_private_ip, local_user, cert_file, port,
resource_type, credentials_folder, ssh_proxy_folder, ssh_client_folder):
self.config_path = os.path.abspath(config_path)
self.resource_group_name = resource_group_name
self.vm_name = vm_name
self.ip = ssh_ip
self.overwrite = overwrite
self.use_private_ip = use_private_ip
self.local_user = local_user
self.port = port
self.resource_type = resource_type
self.proxy_path = None
self.relay_info = None
self.relay_info_path = None
self.public_key_file = os.path.abspath(public_key_file) if public_key_file else None
self.private_key_file = os.path.abspath(private_key_file) if private_key_file else None
self.cert_file = os.path.abspath(cert_file) if cert_file else None
self.ssh_client_folder = os.path.abspath(ssh_client_folder) if ssh_client_folder else None
self.ssh_proxy_folder = os.path.abspath(ssh_proxy_folder) if ssh_proxy_folder else None
self.credentials_folder = os.path.abspath(credentials_folder) if credentials_folder else None
def is_arc(self):
if self.resource_type == "Microsoft.HybridCompute":
return True
return False
def get_config_text(self, is_aad):
lines = [""]
if self.is_arc():
self.relay_info_path = self._create_relay_info_file()
lines = lines + self._get_arc_entry(is_aad)
else:
if self.resource_group_name and self.vm_name and self.ip:
lines = lines + self._get_rg_and_vm_entry(is_aad)
# default to all hosts for config
if not self.ip:
self.ip = "*"
lines = lines + self._get_ip_entry(is_aad)
return lines
def _get_arc_entry(self, is_aad):
lines = []
if is_aad:
lines.append("Host " + self.resource_group_name + "-" + self.vm_name)
else:
lines.append("Host " + self.resource_group_name + "-" + self.vm_name + "-" + self.local_user)
lines.append("\tHostName " + self.vm_name)
lines.append("\tUser " + self.local_user)
if self.cert_file:
lines.append("\tCertificateFile \"" + self.cert_file + "\"")
if self.private_key_file:
lines.append("\tIdentityFile \"" + self.private_key_file + "\"")
if self.port:
lines.append("\tProxyCommand \"" + self.proxy_path + "\" " + "-r \"" + self.relay_info_path + "\" "
+ "-p " + self.port)
else:
lines.append("\tProxyCommand \"" + self.proxy_path + "\" " + "-r \"" + self.relay_info_path + "\"")
return lines
def _get_rg_and_vm_entry(self, is_aad):
lines = []
if is_aad:
lines.append("Host " + self.resource_group_name + "-" + self.vm_name)
else:
lines.append("Host " + self.resource_group_name + "-" + self.vm_name + "-" + self.local_user)
lines.append("\tUser " + self.local_user)
lines.append("\tHostName " + self.ip)
if self.cert_file:
lines.append("\tCertificateFile \"" + self.cert_file + "\"")
if self.private_key_file:
lines.append("\tIdentityFile \"" + self.private_key_file + "\"")
if self.port:
lines.append("\tPort " + self.port)
return lines
def _get_ip_entry(self, is_aad):
lines = []
if is_aad:
lines.append("Host " + self.ip)
else:
lines.append("Host " + self.ip + "-" + self.local_user)
lines.append("\tHostName " + self.ip)
lines.append("\tUser " + self.local_user)
if self.cert_file:
lines.append("\tCertificateFile \"" + self.cert_file + "\"")
if self.private_key_file:
lines.append("\tIdentityFile \"" + self.private_key_file + "\"")
if self.port:
lines.append("\tPort " + self.port)
return lines
def _create_relay_info_file(self):
relay_info_dir = self.credentials_folder
relay_info_filename = None
if not os.path.isdir(relay_info_dir):
os.makedirs(relay_info_dir)
if self.vm_name and self.resource_group_name:
relay_info_filename = self.resource_group_name + "-" + self.vm_name + "-relay_info"
relay_info_path = os.path.join(relay_info_dir, relay_info_filename)
# Overwrite relay_info if it already exists in that folder.
file_utils.delete_file(relay_info_path, f"{relay_info_path} already exists, and couldn't be overwritten.")
file_utils.write_to_file(relay_info_path, 'w', connectivity_utils.format_relay_info_string(self.relay_info),
f"Couldn't write relay information to file {relay_info_path}.", 'utf-8')
oschmod.set_mode(relay_info_path, 0o644)
# pylint: disable=broad-except
try:
expiration = datetime.datetime.fromtimestamp(self.relay_info.expires_on)
expiration = expiration.strftime("%Y-%m-%d %I:%M:%S %p")
colorama.init()
print(Fore.GREEN + f"Generated relay information {relay_info_path} is valid until {expiration} "
"in local time." + Style.RESET_ALL)
except Exception as e:
logger.warning("Couldn't determine relay information expiration. Error: %s", str(e))
return relay_info_path
| 44.567839
| 116
| 0.62104
|
46dfa4f1ece16f58ac21ec79f1fdcef5e7eb8c01
| 1,620
|
py
|
Python
|
scripts/west_commands/completion.py
|
chen-png/zephyr
|
11e91a83c1f6923a0078834404e238e5b7df3693
|
[
"Apache-2.0"
] | 35
|
2018-09-09T13:32:51.000Z
|
2022-02-13T03:38:55.000Z
|
scripts/west_commands/completion.py
|
chen-png/zephyr
|
11e91a83c1f6923a0078834404e238e5b7df3693
|
[
"Apache-2.0"
] | 12
|
2019-08-15T10:37:21.000Z
|
2019-09-11T18:23:58.000Z
|
scripts/west_commands/completion.py
|
chen-png/zephyr
|
11e91a83c1f6923a0078834404e238e5b7df3693
|
[
"Apache-2.0"
] | 13
|
2018-11-16T16:40:53.000Z
|
2021-12-28T09:18:58.000Z
|
# Copyright (c) 2019 Nordic Semiconductor ASA
#
# SPDX-License-Identifier: Apache-2.0
import argparse
import os
from west import log
from west.commands import WestCommand
# Relative to the folder where this script lives
COMPLETION_REL_PATH = 'completion/west-completion'
class Completion(WestCommand):
def __init__(self):
super().__init__(
'completion',
# Keep this in sync with the string in west-commands.yml.
'display shell completion scripts',
'Display shell completion scripts.',
accepts_unknown_args=False)
def do_add_parser(self, parser_adder):
parser = parser_adder.add_parser(
self.name,
help=self.help,
formatter_class=argparse.RawDescriptionHelpFormatter,
description=self.description)
# Remember to update completion/west-completion.bash if you add or
# remove flags
parser.add_argument('shell', nargs=1, choices=['bash'],
help='''Select the shell that which the completion
script is intended for.
Currently only bash is supported.''')
return parser
def do_run(self, args, unknown_args):
cf = os.path.join(os.path.dirname(os.path.realpath(__file__)),
*COMPLETION_REL_PATH.split('/'))
cf += '.' + args.shell[0]
try:
with open(cf, 'r') as f:
print(f.read())
except FileNotFoundError as e:
log.die('Unable to find completion file: {}'.format(e))
| 31.764706
| 78
| 0.601235
|
34f3fe9f8394562889cdcfb8f2a9a4429fd86222
| 2,907
|
py
|
Python
|
release/stubs.min/System/__init___parts/AsyncCallback.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/__init___parts/AsyncCallback.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/__init___parts/AsyncCallback.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
class AsyncCallback(MulticastDelegate, ICloneable, ISerializable):
"""
References a method to be called when a corresponding asynchronous operation completes.
AsyncCallback(object: object,method: IntPtr)
"""
def BeginInvoke(self, ar, callback, object):
""" BeginInvoke(self: AsyncCallback,ar: IAsyncResult,callback: AsyncCallback,object: object) -> IAsyncResult """
pass
def CombineImpl(self, *args):
"""
CombineImpl(self: MulticastDelegate,follow: Delegate) -> Delegate
Combines this System.Delegate with the specified System.Delegate to form a new delegate.
follow: The delegate to combine with this delegate.
Returns: A delegate that is the new root of the System.MulticastDelegate invocation list.
"""
pass
def DynamicInvokeImpl(self, *args):
"""
DynamicInvokeImpl(self: Delegate,args: Array[object]) -> object
Dynamically invokes (late-bound) the method represented by the current delegate.
args: An array of objects that are the arguments to pass to the method represented by the current
delegate.-or- null,if the method represented by the current delegate does not require
arguments.
Returns: The object returned by the method represented by the delegate.
"""
pass
def EndInvoke(self, result):
""" EndInvoke(self: AsyncCallback,result: IAsyncResult) """
pass
def GetMethodImpl(self, *args):
"""
GetMethodImpl(self: MulticastDelegate) -> MethodInfo
Returns a static method represented by the current System.MulticastDelegate.
Returns: A static method represented by the current System.MulticastDelegate.
"""
pass
def Invoke(self, ar):
""" Invoke(self: AsyncCallback,ar: IAsyncResult) """
pass
def RemoveImpl(self, *args):
"""
RemoveImpl(self: MulticastDelegate,value: Delegate) -> Delegate
Removes an element from the invocation list of this System.MulticastDelegate that is equal to
the specified delegate.
value: The delegate to search for in the invocation list.
Returns: If value is found in the invocation list for this instance,then a new System.Delegate without
value in its invocation list; otherwise,this instance with its original invocation list.
"""
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self, object, method):
""" __new__(cls: type,object: object,method: IntPtr) """
pass
def __reduce_ex__(self, *args):
pass
| 27.685714
| 221
| 0.664259
|
87a12c1b0fdb3f88f53570df035bbe91dcd034a4
| 1,393
|
py
|
Python
|
lab6.py
|
uni-student234/ISAT252
|
4c0942919c432456fe26900c23f076161b4cc266
|
[
"MIT"
] | null | null | null |
lab6.py
|
uni-student234/ISAT252
|
4c0942919c432456fe26900c23f076161b4cc266
|
[
"MIT"
] | null | null | null |
lab6.py
|
uni-student234/ISAT252
|
4c0942919c432456fe26900c23f076161b4cc266
|
[
"MIT"
] | null | null | null |
"""
Week 2, day 6, lab 6
"""
#3.1
for each_num in range(0, 6): #numbers 0 to 5
if each_num is not 3: #don't count 3
print(each_num) #exclude 3 and 6
#3.2
i = 1 #set another variable
for num in range(1, 6): #from 0 to 5
i = i*num #multiplying the numbers
print(i)
#3.3
i = 0
for num in range(1,6): #range from 1 to 5
i = i+num #getting the sum
print(i)
#3.4
i = 1
for num in range(3, 9): #range from 3 to 8
i = i*num #multiply
print(i)
#3.5
i = 1 #set the dividend
for num in range(1, 9):
i = i*num
k = 1 #set the divisor
for num in range(1, 4):
k = k*num
print(i)
print(k)
print(i/k) #get the quotient
#3.6
my_string = 'this is my 6th string' #def string
k = 1 #adding one to number of spaces
for i in my_string:
if i is ' ': #count the number of spaces
k = k + 1
print(k)
'''
Alternative:
i = 0
for word in 'this is my 6th string'.split():
i = i + 1
print(i)
'''
# #3.7
my_tweet = {
"favorite_count":1138,
"lang": "en",
"coordinates": (-75, 40),
"entities": {"hashtags": [" Preds ", "Pens", " SingIntoSpring "]} }
i = 0
for hashtags in my_tweet['entities']['hashtags']:
i = i + 1
print(i)
| 21.765625
| 71
| 0.496052
|
9f8487724cc0df75e6514be2775b216b3a1c1992
| 925
|
py
|
Python
|
jcatalog/transform/scielo_orcid_update.py
|
ednilson/jcatalog
|
e36188e39fbf37c74e37445060e1bafa18a3e20b
|
[
"BSD-2-Clause"
] | null | null | null |
jcatalog/transform/scielo_orcid_update.py
|
ednilson/jcatalog
|
e36188e39fbf37c74e37445060e1bafa18a3e20b
|
[
"BSD-2-Clause"
] | null | null | null |
jcatalog/transform/scielo_orcid_update.py
|
ednilson/jcatalog
|
e36188e39fbf37c74e37445060e1bafa18a3e20b
|
[
"BSD-2-Clause"
] | null | null | null |
# coding: utf-8
'''
This script reads data from various sources to process and store in MongoDB.
'''
import pyexcel
import logging
import models
from accent_remover import *
logging.basicConfig(filename='logs/times.info.txt', level=logging.INFO)
logger = logging.getLogger(__name__)
def scielocitations(filename):
sheet = pyexcel.get_sheet(
file_name=filename,
sheet_name='import',
name_columns_by_row=0)
sheet_json = sheet.to_records()
for rec in sheet_json:
query = models.Scielobk1.objects.filter(issn_list=rec['issn'])
if len(query) == 1:
doc = query[0]
data = {}
data['orcid'] = 1
if data:
doc.update(**data)
else:
print('---Nao encontrado: ' + rec['title'])
def main():
scielocitations('data/scielo/SciELO-ScholarOne-ORCID.xlsx')
if __name__ == "__main__":
main()
| 22.02381
| 76
| 0.623784
|
8b25454e0acfd33b4d7f7ccf518c01eefe0d45bf
| 6,767
|
py
|
Python
|
gen_video.py
|
joshua-paperspace/BlendGAN
|
ab512453d6fcd6906cc60aa98a443d0339b00b66
|
[
"MIT"
] | 403
|
2021-11-01T06:26:27.000Z
|
2022-03-30T12:18:34.000Z
|
gen_video.py
|
joshua-paperspace/BlendGAN
|
ab512453d6fcd6906cc60aa98a443d0339b00b66
|
[
"MIT"
] | 13
|
2021-11-20T08:09:23.000Z
|
2022-01-03T10:21:39.000Z
|
gen_video.py
|
joshua-paperspace/BlendGAN
|
ab512453d6fcd6906cc60aa98a443d0339b00b66
|
[
"MIT"
] | 48
|
2021-11-01T06:48:43.000Z
|
2022-03-28T02:30:33.000Z
|
import argparse
import os
import cv2
import numpy as np
import torch
from model import Generator
from psp_encoder.psp_encoders import PSPEncoder
from utils import ten2cv, cv2ten
import glob
from tqdm import tqdm
import random
seed = 0
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def sigmoid(x, w=1):
return 1. / (1 + np.exp(-w * x))
def get_alphas(start=-5, end=5, step=0.5, len_tail=10):
return [0] + [sigmoid(alpha) for alpha in np.arange(start, end, step)] + [1] * len_tail
def slide(entries, margin=32):
"""Returns a sliding reference window.
Args:
entries: a list containing two reference images, x_prev and x_next,
both of which has a shape (1, 3, H, W)
Returns:
canvas: output slide of shape (num_frames, 3, H*2, W+margin)
"""
_, C, H, W = entries[0].shape
alphas = get_alphas()
T = len(alphas) # number of frames
canvas = - torch.ones((T, C, H*2, W + margin))
merged = torch.cat(entries, dim=2) # (1, 3, H*2, W)
for t, alpha in enumerate(alphas):
top = int(H * (1 - alpha)) # top, bottom for canvas
bottom = H * 2
m_top = 0 # top, bottom for merged
m_bottom = 2 * H - top
canvas[t, :, top:bottom, :W] = merged[:, :, m_top:m_bottom, :]
return canvas
def slide_one_window(entries, margin=32):
"""Returns a sliding reference window.
Args:
entries: a list containing two reference images, x_prev and x_next,
both of which has a shape (1, 3, H, W)
Returns:
canvas: output slide of shape (num_frames, 3, H, W+margin)
"""
_, C, H, W = entries[0].shape
device = entries[0].device
alphas = get_alphas()
T = len(alphas) # number of frames
canvas = - torch.ones((T, C, H, W + margin)).to(device)
merged = torch.cat(entries, dim=2) # (1, 3, H*2, W)
for t, alpha in enumerate(alphas):
m_top = int(H * alpha) # top, bottom for merged
m_bottom = m_top + H
canvas[t, :, :, :W] = merged[:, :, m_top:m_bottom, :]
return canvas
def tensor2ndarray255(images):
images = torch.clamp(images * 0.5 + 0.5, 0, 1)
return (images.cpu().numpy().transpose(0, 2, 3, 1) * 255).astype(np.uint8)
@torch.no_grad()
def interpolate(args, g, sample_in, sample_style_prev, sample_style_next):
''' returns T x C x H x W '''
frames_ten = []
alphas = get_alphas()
for alpha in alphas:
sample_style = torch.lerp(sample_style_prev, sample_style_next, alpha)
frame_ten, _ = g([sample_in], z_embed=sample_style, add_weight_index=args.add_weight_index,
input_is_latent=True, return_latents=False, randomize_noise=False)
frames_ten.append(frame_ten)
frames_ten = torch.cat(frames_ten)
return frames_ten
@torch.no_grad()
def video_ref(args, g, psp_encoder, img_in_ten, img_style_tens, videoWriter):
sample_in = psp_encoder(img_in_ten)
img_style_ten_prev, sample_style_prev = None, None
for idx in tqdm(range(len(img_style_tens))):
img_style_ten_next = img_style_tens[idx]
sample_style_next = g_ema.get_z_embed(img_style_ten_next)
if img_style_ten_prev is None:
img_style_ten_prev, sample_style_prev = img_style_ten_next, sample_style_next
continue
interpolated = interpolate(args, g, sample_in, sample_style_prev, sample_style_next)
entries = [img_style_ten_prev, img_style_ten_next]
slided = slide_one_window(entries, margin=0) # [T, C, H, W)
frames = torch.cat([img_in_ten.expand_as(interpolated), slided, interpolated], dim=3).cpu() # [T, C, H, W*3)
frames = tensor2ndarray255(frames) # [T, H, W*3, C)
for frame_idx in range(frames.shape[0]):
frame = frames[frame_idx]
videoWriter.write(frame[:, :, ::-1])
img_style_ten_prev, sample_style_prev = img_style_ten_next, sample_style_next
# append last frame 10 time
for _ in range(10):
videoWriter.write(frame[:, :, ::-1])
if __name__ == '__main__':
device = 'cuda'
parser = argparse.ArgumentParser()
parser.add_argument('--size', type=int, default=1024)
parser.add_argument('--ckpt', type=str, default='', help='path to BlendGAN checkpoint')
parser.add_argument('--psp_encoder_ckpt', type=str, default='', help='path to psp_encoder checkpoint')
parser.add_argument('--style_img_path', type=str, default=None, help='path to style image')
parser.add_argument('--input_img_path', type=str, default=None, help='path to input image')
parser.add_argument('--add_weight_index', type=int, default=7)
parser.add_argument('--channel_multiplier', type=int, default=2)
parser.add_argument('--outdir', type=str, default="")
args = parser.parse_args()
outdir = args.outdir
if not os.path.exists(outdir):
os.makedirs(outdir, exist_ok=True)
args.latent = 512
args.n_mlp = 8
checkpoint = torch.load(args.ckpt)
model_dict = checkpoint['g_ema']
print('ckpt: ', args.ckpt)
g_ema = Generator(
args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier, load_pretrained_vgg=False
).to(device)
g_ema.load_state_dict(model_dict)
g_ema.eval()
del checkpoint, model_dict
psp_encoder = PSPEncoder(args.psp_encoder_ckpt, output_size=args.size).to(device)
psp_encoder.eval()
input_img_paths = sorted(glob.glob(os.path.join(args.input_img_path, '*.*')))
style_img_paths = sorted(glob.glob(os.path.join(args.style_img_path, '*.*')))[:]
for input_img_path in input_img_paths:
print('process: %s' % input_img_path)
name_in = os.path.splitext(os.path.basename(input_img_path))[0]
img_in = cv2.imread(input_img_path, 1)
img_in = cv2.resize(img_in, (args.size, args.size))
img_in_ten = cv2ten(img_in, device)
img_style_tens = []
style_img_path_rand = random.choices(style_img_paths, k=8)
for style_img_path in style_img_path_rand:
name_style = os.path.splitext(os.path.basename(style_img_path))[0]
img_style = cv2.imread(style_img_path, 1)
img_style = cv2.resize(img_style, (args.size, args.size))
img_style_ten = cv2ten(img_style, device)
img_style_tens.append(img_style_ten)
fname = f'{args.outdir}/{name_in}.mp4'
fourcc = cv2.VideoWriter_fourcc(*'XVID')
videoWriter = cv2.VideoWriter(fname, fourcc, 30, (args.size * 3, args.size))
video_ref(args, g_ema, psp_encoder, img_in_ten, img_style_tens, videoWriter)
videoWriter.release()
print('save video to: %s' % fname)
print('Done!')
| 34.176768
| 118
| 0.653465
|
caf572483ad8b6d50d8906cc2f4e8fe89a981e38
| 342
|
py
|
Python
|
contacts/admin.py
|
fion21/Alicante-Properties_Django
|
3e48afa42fdbba8d5a8355c06e969ca39bfbb2f1
|
[
"MIT"
] | null | null | null |
contacts/admin.py
|
fion21/Alicante-Properties_Django
|
3e48afa42fdbba8d5a8355c06e969ca39bfbb2f1
|
[
"MIT"
] | 3
|
2021-06-10T22:35:13.000Z
|
2022-01-13T02:13:50.000Z
|
contacts/admin.py
|
fion21/Alicante-Properties_Django
|
3e48afa42fdbba8d5a8355c06e969ca39bfbb2f1
|
[
"MIT"
] | 1
|
2020-02-18T23:22:54.000Z
|
2020-02-18T23:22:54.000Z
|
from django.contrib import admin
from .models import Contact
class ContactAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'listing', 'email', 'contact_date')
list_display_links = ('id', 'name')
search_fields = ('name', 'email', 'listing')
list_per_page = 25
admin.site.register(Contact, ContactAdmin)
| 24.428571
| 70
| 0.669591
|
ebd52cd499e88f2ba8ba0ae1dd0b21f344679083
| 6,455
|
py
|
Python
|
torchmeta/tests/modules/test_activation.py
|
hanseungwook/pytorch-meta
|
c385b921d14821cdf12b01bd7adc7010fc4930d9
|
[
"MIT"
] | 1,704
|
2019-09-16T15:08:18.000Z
|
2022-03-31T22:36:43.000Z
|
torchmeta/tests/modules/test_activation.py
|
hanseungwook/pytorch-meta
|
c385b921d14821cdf12b01bd7adc7010fc4930d9
|
[
"MIT"
] | 135
|
2019-09-20T15:34:03.000Z
|
2022-03-13T23:31:17.000Z
|
torchmeta/tests/modules/test_activation.py
|
hanseungwook/pytorch-meta
|
c385b921d14821cdf12b01bd7adc7010fc4930d9
|
[
"MIT"
] | 221
|
2019-09-17T09:01:21.000Z
|
2022-03-30T03:23:35.000Z
|
import pytest
import numpy as np
import torch
import torch.nn as nn
from collections import OrderedDict
from torchmeta.modules import MetaModule
from torchmeta.modules.activation import MetaMultiheadAttention
@pytest.mark.parametrize('bias', [True, False])
@pytest.mark.parametrize('add_bias_kv', [True, False])
@pytest.mark.parametrize('kdim,vdim', [(None, None), (7, 11)])
def test_metamultiheadattention(bias, add_bias_kv, kdim, vdim):
meta_model = MetaMultiheadAttention(3 * 5, 3,
bias=bias,
add_bias_kv=add_bias_kv,
kdim=kdim, vdim=vdim)
model = nn.MultiheadAttention(3 * 5, 3,
bias=bias,
add_bias_kv=add_bias_kv,
kdim=kdim, vdim=vdim)
assert isinstance(meta_model, MetaModule)
assert isinstance(meta_model, nn.MultiheadAttention)
# Set same weights for both models
if not meta_model._qkv_same_embed_dim:
q_proj_weight = torch.randn(3 * 5, 3 * 5)
meta_model.q_proj_weight.data.copy_(q_proj_weight)
model.q_proj_weight.data.copy_(q_proj_weight)
k_proj_weight = torch.randn(3 * 5, meta_model.kdim)
meta_model.k_proj_weight.data.copy_(k_proj_weight)
model.k_proj_weight.data.copy_(k_proj_weight)
v_proj_weight = torch.randn(3 * 5, meta_model.vdim)
meta_model.v_proj_weight.data.copy_(v_proj_weight)
model.v_proj_weight.data.copy_(v_proj_weight)
else:
in_proj_weight = torch.randn(3 * 3 * 5, 3 * 5)
meta_model.in_proj_weight.data.copy_(in_proj_weight)
model.in_proj_weight.data.copy_(in_proj_weight)
if bias:
in_proj_bias = torch.randn(3 * 3 * 5)
meta_model.in_proj_bias.data.copy_(in_proj_bias)
model.in_proj_bias.data.copy_(in_proj_bias)
out_proj_bias = torch.randn(3 * 5)
meta_model.out_proj.bias.data.copy_(out_proj_bias)
model.out_proj.bias.data.copy_(out_proj_bias)
if add_bias_kv:
bias_k = torch.randn(1, 1, 3 * 5)
meta_model.bias_k.data.copy_(bias_k)
model.bias_k.data.copy_(bias_k)
bias_v = torch.randn(1, 1, 3 * 5)
meta_model.bias_v.data.copy_(bias_v)
model.bias_v.data.copy_(bias_v)
out_proj_weight = torch.randn(3 * 5, 3 * 5)
meta_model.out_proj.weight.data.copy_(out_proj_weight)
model.out_proj.weight.data.copy_(out_proj_weight)
query = torch.randn(13, 17, 3 * 5)
key = torch.randn(19, 17, 3 * 5 if (kdim is None) else kdim)
value = torch.randn(19, 17, 3 * 5 if (vdim is None) else vdim)
outputs_torchmeta, weights_torchmeta = meta_model(query, key, value, params=None)
outputs_nn, weights_nn = model(query, key, value)
np.testing.assert_equal(outputs_torchmeta.detach().numpy(),
outputs_nn.detach().numpy())
np.testing.assert_equal(weights_torchmeta.detach().numpy(),
weights_nn.detach().numpy())
@pytest.mark.parametrize('bias', [True, False])
@pytest.mark.parametrize('add_bias_kv', [True, False])
@pytest.mark.parametrize('kdim,vdim', [(None, None), (7, 11)])
def test_metamultiheadattention_params(bias, add_bias_kv, kdim, vdim):
meta_model = MetaMultiheadAttention(3 * 5, 3,
bias=bias,
add_bias_kv=add_bias_kv,
kdim=kdim, vdim=vdim)
model = nn.MultiheadAttention(3 * 5, 3,
bias=bias,
add_bias_kv=add_bias_kv,
kdim=kdim, vdim=vdim)
params = OrderedDict()
if not meta_model._qkv_same_embed_dim:
params['q_proj_weight'] = torch.randn(3 * 5, 3 * 5)
model.q_proj_weight.data.copy_(params['q_proj_weight'])
params['k_proj_weight'] = torch.randn(3 * 5, meta_model.kdim)
model.k_proj_weight.data.copy_(params['k_proj_weight'])
params['v_proj_weight'] = torch.randn(3 * 5, meta_model.vdim)
model.v_proj_weight.data.copy_(params['v_proj_weight'])
else:
params['in_proj_weight'] = torch.randn(3 * 3 * 5, 3 * 5)
model.in_proj_weight.data.copy_(params['in_proj_weight'])
if bias:
params['in_proj_bias'] = torch.randn(3 * 3 * 5)
model.in_proj_bias.data.copy_(params['in_proj_bias'])
params['out_proj.bias'] = torch.randn(3 * 5)
model.out_proj.bias.data.copy_(params['out_proj.bias'])
if add_bias_kv:
params['bias_k'] = torch.randn(1, 1, 3 * 5)
model.bias_k.data.copy_(params['bias_k'])
params['bias_v'] = torch.randn(1, 1, 3 * 5)
model.bias_v.data.copy_(params['bias_v'])
params['out_proj.weight'] = torch.randn(3 * 5, 3 * 5)
model.out_proj.weight.data.copy_(params['out_proj.weight'])
query = torch.randn(13, 17, 3 * 5)
key = torch.randn(19, 17, 3 * 5 if (kdim is None) else kdim)
value = torch.randn(19, 17, 3 * 5 if (vdim is None) else vdim)
outputs_torchmeta, weights_torchmeta = meta_model(query, key, value, params=params)
outputs_nn, weights_nn = model(query, key, value)
np.testing.assert_equal(outputs_torchmeta.detach().numpy(),
outputs_nn.detach().numpy())
np.testing.assert_equal(weights_torchmeta.detach().numpy(),
weights_nn.detach().numpy())
@pytest.mark.parametrize('bias', [True, False])
@pytest.mark.parametrize('add_bias_kv', [True, False])
@pytest.mark.parametrize('kdim,vdim', [(None, None), (7, 11)])
def test_metamultiheadattention_meta_named_parameters(bias, add_bias_kv, kdim, vdim):
meta_model = MetaMultiheadAttention(3 * 5, 3,
bias=bias,
add_bias_kv=add_bias_kv,
kdim=kdim, vdim=vdim)
params = OrderedDict(meta_model.meta_named_parameters())
param_names = set(params)
if not meta_model._qkv_same_embed_dim:
assert {'q_proj_weight', 'k_proj_weight', 'v_proj_weight'} <= param_names
else:
assert 'in_proj_weight' in param_names
if bias:
{'in_proj_bias', 'out_proj.bias'} <= param_names
if add_bias_kv:
assert {'bias_k', 'bias_v'} <= param_names
assert 'out_proj.weight' in param_names
| 39.601227
| 87
| 0.620759
|
5aa08fa5e5b7bfce5e48c4c71c3bc217748f6b19
| 7,910
|
py
|
Python
|
sdk/python/pulumi_gcp/organizations/get_folder.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 121
|
2018-06-18T19:16:42.000Z
|
2022-03-31T06:06:48.000Z
|
sdk/python/pulumi_gcp/organizations/get_folder.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 492
|
2018-06-22T19:41:03.000Z
|
2022-03-31T15:33:53.000Z
|
sdk/python/pulumi_gcp/organizations/get_folder.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 43
|
2018-06-19T01:43:13.000Z
|
2022-03-23T22:43:37.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetFolderResult',
'AwaitableGetFolderResult',
'get_folder',
'get_folder_output',
]
@pulumi.output_type
class GetFolderResult:
"""
A collection of values returned by getFolder.
"""
def __init__(__self__, create_time=None, display_name=None, folder=None, folder_id=None, id=None, lifecycle_state=None, lookup_organization=None, name=None, organization=None, parent=None):
if create_time and not isinstance(create_time, str):
raise TypeError("Expected argument 'create_time' to be a str")
pulumi.set(__self__, "create_time", create_time)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if folder and not isinstance(folder, str):
raise TypeError("Expected argument 'folder' to be a str")
pulumi.set(__self__, "folder", folder)
if folder_id and not isinstance(folder_id, str):
raise TypeError("Expected argument 'folder_id' to be a str")
pulumi.set(__self__, "folder_id", folder_id)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if lifecycle_state and not isinstance(lifecycle_state, str):
raise TypeError("Expected argument 'lifecycle_state' to be a str")
pulumi.set(__self__, "lifecycle_state", lifecycle_state)
if lookup_organization and not isinstance(lookup_organization, bool):
raise TypeError("Expected argument 'lookup_organization' to be a bool")
pulumi.set(__self__, "lookup_organization", lookup_organization)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if organization and not isinstance(organization, str):
raise TypeError("Expected argument 'organization' to be a str")
pulumi.set(__self__, "organization", organization)
if parent and not isinstance(parent, str):
raise TypeError("Expected argument 'parent' to be a str")
pulumi.set(__self__, "parent", parent)
@property
@pulumi.getter(name="createTime")
def create_time(self) -> str:
"""
Timestamp when the Organization was created. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
The folder's display name.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def folder(self) -> str:
return pulumi.get(self, "folder")
@property
@pulumi.getter(name="folderId")
def folder_id(self) -> str:
return pulumi.get(self, "folder_id")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lifecycleState")
def lifecycle_state(self) -> str:
"""
The Folder's current lifecycle state.
"""
return pulumi.get(self, "lifecycle_state")
@property
@pulumi.getter(name="lookupOrganization")
def lookup_organization(self) -> Optional[bool]:
return pulumi.get(self, "lookup_organization")
@property
@pulumi.getter
def name(self) -> str:
"""
The resource name of the Folder in the form `folders/{folder_id}`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def organization(self) -> str:
"""
If `lookup_organization` is enable, the resource name of the Organization that the folder belongs.
"""
return pulumi.get(self, "organization")
@property
@pulumi.getter
def parent(self) -> str:
"""
The resource name of the parent Folder or Organization.
"""
return pulumi.get(self, "parent")
class AwaitableGetFolderResult(GetFolderResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetFolderResult(
create_time=self.create_time,
display_name=self.display_name,
folder=self.folder,
folder_id=self.folder_id,
id=self.id,
lifecycle_state=self.lifecycle_state,
lookup_organization=self.lookup_organization,
name=self.name,
organization=self.organization,
parent=self.parent)
def get_folder(folder: Optional[str] = None,
lookup_organization: Optional[bool] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetFolderResult:
"""
Use this data source to get information about a Google Cloud Folder.
```python
import pulumi
import pulumi_gcp as gcp
my_folder1 = gcp.organizations.get_folder(folder="folders/12345",
lookup_organization=True)
my_folder2 = gcp.organizations.get_folder(folder="folders/23456")
pulumi.export("myFolder1Organization", my_folder1.organization)
pulumi.export("myFolder2Parent", my_folder2.parent)
```
:param str folder: The name of the Folder in the form `{folder_id}` or `folders/{folder_id}`.
:param bool lookup_organization: `true` to find the organization that the folder belongs, `false` to avoid the lookup. It searches up the tree. (defaults to `false`)
"""
__args__ = dict()
__args__['folder'] = folder
__args__['lookupOrganization'] = lookup_organization
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:organizations/getFolder:getFolder', __args__, opts=opts, typ=GetFolderResult).value
return AwaitableGetFolderResult(
create_time=__ret__.create_time,
display_name=__ret__.display_name,
folder=__ret__.folder,
folder_id=__ret__.folder_id,
id=__ret__.id,
lifecycle_state=__ret__.lifecycle_state,
lookup_organization=__ret__.lookup_organization,
name=__ret__.name,
organization=__ret__.organization,
parent=__ret__.parent)
@_utilities.lift_output_func(get_folder)
def get_folder_output(folder: Optional[pulumi.Input[str]] = None,
lookup_organization: Optional[pulumi.Input[Optional[bool]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetFolderResult]:
"""
Use this data source to get information about a Google Cloud Folder.
```python
import pulumi
import pulumi_gcp as gcp
my_folder1 = gcp.organizations.get_folder(folder="folders/12345",
lookup_organization=True)
my_folder2 = gcp.organizations.get_folder(folder="folders/23456")
pulumi.export("myFolder1Organization", my_folder1.organization)
pulumi.export("myFolder2Parent", my_folder2.parent)
```
:param str folder: The name of the Folder in the form `{folder_id}` or `folders/{folder_id}`.
:param bool lookup_organization: `true` to find the organization that the folder belongs, `false` to avoid the lookup. It searches up the tree. (defaults to `false`)
"""
...
| 37.488152
| 193
| 0.663843
|
e40a7a8b930ea76f7a78fb4d1cc6dd5c7d186afa
| 8,223
|
py
|
Python
|
mayan/apps/sources/tests/mocks.py
|
nattangwiwat/Mayan-EDMS-recitation
|
fcf16afb56eae812fb99144d65ae1ae6749de0b7
|
[
"Apache-2.0"
] | 4
|
2021-09-02T00:16:30.000Z
|
2021-09-09T22:25:15.000Z
|
mayan/apps/sources/tests/mocks.py
|
nattangwiwat/Mayan-EDMS-recitation
|
fcf16afb56eae812fb99144d65ae1ae6749de0b7
|
[
"Apache-2.0"
] | 86
|
2021-09-01T23:53:02.000Z
|
2021-09-20T02:25:10.000Z
|
mayan/apps/sources/tests/mocks.py
|
nattangwiwat/Mayan-EDMS-recitation
|
fcf16afb56eae812fb99144d65ae1ae6749de0b7
|
[
"Apache-2.0"
] | 70
|
2021-09-01T12:54:51.000Z
|
2022-02-16T00:53:18.000Z
|
from django.utils.encoding import force_bytes, force_text
from .literals import TEST_EMAIL_BASE64_FILENAME, TEST_STAGING_PREVIEW_WIDTH
class MockIMAPMessage:
def __init__(self, uid):
self.flags = []
self.mailbox = None
self.uid = uid
def flags_add(self, flags_string):
for flag in flags_string.split():
if flag in self.flags:
self.flags.remove(flag)
def flags_remove(self, flags_string):
for flag in flags_string.split():
if flag not in self.flags:
self.flags.append(flag)
def flags_set(self, flags_string):
self.flags = flags_string.split()
def delete(self):
self.mailbox.messages.pop(self.uid)
def get_flags(self):
return ' '.join(self.flags)
def get_number(self):
return list(self.mailbox.messages.values()).index(self)
class MockIMAPMailbox:
messages = {}
def __init__(self, name='INBOX'):
self.name = name
def get_message_by_number(self, message_number):
return list(self.messages.values())[message_number - 1]
def get_message_by_uid(self, uid):
return self.messages[uid]
def get_message_count(self):
return len(self.messages)
def get_messages(self):
return list(self.messages.values())
def messages_add(self, uid):
self.messages[uid] = MockIMAPMessage(uid=uid)
self.messages[uid].mailbox = self
class MockIMAPServer:
def __init__(self):
self.mailboxes = {
'INBOX': MockIMAPMailbox(name='INBOX')
}
self.mailboxes['INBOX'].messages_add(uid='999')
self.mailbox_selected = None
def _fetch(self, messages):
flag = '\\Seen'
flag_modified = []
message_numbers = []
results = []
uids = []
for message in messages:
if flag not in message.flags:
message.flags_add(flag)
flag_modified.append(message)
message_number = message.get_number()
message_numbers.append(force_text(s=message_number))
uid = message.uid
uids.append(uid)
body = TEST_EMAIL_BASE64_FILENAME
results.append(
(
'{} (UID {} RFC822 {{{}}}'.format(message_number, uid, len(body)),
body,
)
)
results.append(
' FLAGS ({}))'.format(flag),
)
results.append(
'{} (UID {} FLAGS ({}))'.format(
' '.join(message_numbers), ' '.join(uids), flag
)
)
return results
def close(self):
return ('OK', ['Returned to authenticated state. (Success)'])
def expunge(self):
result = []
for message in self.mailbox_selected.get_messages():
if '\\Deleted' in message.flags:
result.append(
force_text(s=message.get_number())
)
message.delete()
return ('OK', ' '.join(result))
def fetch(self, message_set, message_parts):
messages = []
for message_number in message_set.split():
messages.append(
self.mailbox_selected.get_message_by_number(
message_number=int(message_number)
)
)
return ('OK', self._fetch(messages=messages))
def login(self, user, password):
return ('OK', ['{} authenticated (Success)'.format(user)])
def logout(self):
return ('BYE', ['LOGOUT Requested'])
def search(self, charset, *criteria):
"""
7.2.5. SEARCH Response
Contents: zero or more numbers
The SEARCH response occurs as a result of a SEARCH or UID SEARCH
command. The number(s) refer to those messages that match the
search criteria. For SEARCH, these are message sequence numbers;
for UID SEARCH, these are unique identifiers. Each number is
delimited by a space.
Example: S: * SEARCH 2 3 6
"""
results = [
self.mailbox_selected.messages[0]
]
message_sequences = []
for message in results:
message_sequences.append(force_text(s=message.get_number()))
return ('OK', ' '.join(message_sequences))
def select(self, mailbox='INBOX', readonly=False):
self.mailbox_selected = self.mailboxes[mailbox]
return (
'OK', [
self.mailbox_selected.get_message_count()
]
)
def store(self, message_set, command, flags):
results = []
for message_number in message_set.split():
message = self.mailbox_selected.messages[int(message_number) - 1]
if command == 'FLAGS':
message.flags_set(flags_string=flags)
elif command == '+FLAGS':
message.flags_add(flags_string=flags)
elif command == '-FLAGS':
message.flags_remove(flags_string=flags)
results.append(
'{} (FLAGS ({}))'.format(message_number, message.get_flags())
)
return ('OK', results)
def uid(self, command, *args):
if command == 'FETCH':
uid = args[0]
messages = [self.mailbox_selected.get_message_by_uid(uid=uid)]
return ('OK', self._fetch(messages=messages))
elif command == 'STORE':
results = []
uid = args[0]
subcommand = args[1]
flags = args[2]
message = self.mailbox_selected.get_message_by_uid(uid=uid)
if subcommand == 'FLAGS':
message.flags_set(flags_string=flags)
elif subcommand == '+FLAGS':
message.flags_add(flags_string=flags)
elif subcommand == '-FLAGS':
message.flags_remove(flags_string=flags)
results.append(
'{} (FLAGS ({}))'.format(uid, message.get_flags())
)
return ('OK', results)
elif command == 'SEARCH':
message_sequences = [
self.mailbox_selected.get_message_by_number(
message_number=1
).uid
]
return ('OK', [' '.join(message_sequences)])
class MockPOP3Mailbox:
"""RFC 1725"""
messages = {
1: [TEST_EMAIL_BASE64_FILENAME]
}
def dele(self, which):
return
def getwelcome(self):
return force_bytes(
s='+OK server ready for requests from 127.0.0.0 xxxxxxxxxxxxxxxxx'
)
def list(self, which=None):
# (b'+OK 7 messages (304882 bytes)',
# [b'1 4800',
# b'2 16995',
# b'3 12580',
# b'4 196497',
# b'5 48900',
# b'6 12555',
# b'7 12555'],
# 63)
message_list = []
message_number = 1
messages_total_size = 0
for key, value in self.messages.items():
message_size = 0
for line in value:
message_size = message_size + len(line)
messages_total_size = messages_total_size + message_size
message_list.append(
force_bytes(s='{} {}'.format(message_number, message_size))
)
message_number = message_number + 1
# Sum the line sizes in bytes plus 2 (CR+LF)
result_size = sum(
[len(message_entry) + 2 for message_entry in message_list]
)
return (
force_bytes(
s='+OK {} messages ({} bytes)'.format(
len(self.messages), messages_total_size
)
), message_list, result_size
)
def user(self, user):
return force_bytes(s='+OK send PASS')
def pass_(self, pswd):
return force_bytes(s='+OK Welcome.')
def quit(self):
return
def retr(self, which):
return (None, self.messages[which], None)
class MockStagingFolder:
"""Mock of a StagingFolder model"""
pk = 1
preview_height = None
preview_width = TEST_STAGING_PREVIEW_WIDTH
| 28.453287
| 86
| 0.550043
|
eac05d6a7720904c4f08d964db59269b4c75c687
| 163
|
py
|
Python
|
Kurs Work/Tree.py
|
Delivery-Klad/Kurs_work
|
522641ff2537961bf66603961a9666b00a4fef76
|
[
"Apache-2.0"
] | 2
|
2020-01-02T01:39:59.000Z
|
2020-07-11T21:49:31.000Z
|
Kurs Work/Tree.py
|
Delivery-Klad/Kurs_work
|
522641ff2537961bf66603961a9666b00a4fef76
|
[
"Apache-2.0"
] | null | null | null |
Kurs Work/Tree.py
|
Delivery-Klad/Kurs_work
|
522641ff2537961bf66603961a9666b00a4fef76
|
[
"Apache-2.0"
] | 1
|
2019-11-21T06:53:38.000Z
|
2019-11-21T06:53:38.000Z
|
class TreeNode: # класс дерева
def __init__(self, data, left=None, right=None):
self.data = data
self.left = left
self.right = right
| 23.285714
| 52
| 0.601227
|
b12889f450d6a8cff39a76b1ad4004fc8430252d
| 20,536
|
py
|
Python
|
oauth_provider/tests/issues.py
|
ovidioreyna/django-oauth-plus
|
b9b64a3ac24fd11f471763c88462bbf3c53e46e6
|
[
"BSD-3-Clause"
] | null | null | null |
oauth_provider/tests/issues.py
|
ovidioreyna/django-oauth-plus
|
b9b64a3ac24fd11f471763c88462bbf3c53e46e6
|
[
"BSD-3-Clause"
] | 4
|
2018-01-11T20:59:12.000Z
|
2020-05-12T12:48:53.000Z
|
oauth_provider/tests/issues.py
|
ovidioreyna/django-oauth-plus
|
b9b64a3ac24fd11f471763c88462bbf3c53e46e6
|
[
"BSD-3-Clause"
] | 3
|
2020-08-25T21:07:33.000Z
|
2021-04-08T15:58:28.000Z
|
from __future__ import absolute_import, print_function
import datetime
import json
import time
import oauth2 as oauth
import six.moves.urllib.error
import six.moves.urllib.parse
import six.moves.urllib.request
from django.conf import settings
from django.test.client import RequestFactory
from six.moves.urllib.parse import parse_qs, urlparse
from oauth_provider import responses, utils
from oauth_provider.models import Scope, Token
from oauth_provider.store import store as oauth_provider_store
from oauth_provider.tests.auth import (METHOD_AUTHORIZATION_HEADER,
BaseOAuthTestCase)
class OAuthTestsBug10(BaseOAuthTestCase):
"""
See https://code.welldev.org/django-oauth-plus/issue/10/malformed-callback-url-when-user-denies
"""
def test_Request_token_request_succeeds_with_valid_request_token_parameters(self):
response = self._request_token()
token = self.request_token
self.assertEqual(token.callback,
self.callback_token)
self.assertEqual(
token.callback_confirmed,
self.callback_confirmed)
def test_Requesting_user_authorization_fails_when_user_denies_authorization(self):
self._request_token()
self.c.login(username=self.username, password=self.password)
parameters = authorization_parameters = {'oauth_token': self.request_token.key}
response = self.c.get("/oauth/authorize/", parameters)
self.assertEqual(
response.status_code,
200)
# fake access not granted by the user (set session parameter again)
authorization_parameters['authorize_access'] = False
response = self.c.post("/oauth/authorize/", authorization_parameters)
self.assertEqual(
response.status_code,
302)
self.assertEqual('http://printer.example.com/request_token_ready?error=Access+not+granted+by+user.', response['Location'])
self.c.logout()
class OAuthOutOfBoundTests(BaseOAuthTestCase):
def test_Requesting_user_authorization_succeeds_when_oob(self):
self._request_token(oauth_callback="oob")
self.c.login(username=self.username, password=self.password)
parameters = self.authorization_parameters = {'oauth_token': self.request_token.key}
response = self.c.get("/oauth/authorize/", parameters)
self.assertEqual(
response.status_code,
200)
class OauthTestIssue24(BaseOAuthTestCase):
"""
See https://bitbucket.org/david/django-oauth-plus/issue/24/utilspy-initialize_server_request-should
"""
def setUp(self):
super(OauthTestIssue24, self).setUp()
#setting the access key/secret to made-up strings
self.access_token = Token(
key="key",
secret="secret",
consumer=self.consumer,
user=self.jane,
token_type=2,
scope=self.scope
)
self.access_token.save()
def test_that_initialize_server_request_when_custom_content_type(self):
"""Chceck if post data is not included in params when constent type
is not application/x-www-form-urlencoded. It would cause problems only when signature method is
HMAC-SHA1
"""
data = json.dumps({"data": {"foo": "bar"}})
content_type = "application/json"
querystring = self._make_querystring_with_HMAC_SHA1("POST", "/path/to/post", data, content_type)
#we're just using the request, don't bother faking sending it
rf = RequestFactory()
request = rf.post(querystring, data, content_type)
#this is basically a "remake" of the relevant parts of OAuthAuthentication in django-rest-framework
oauth_request = utils.get_oauth_request(request)
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
consumer = oauth_provider_store.get_consumer(request, oauth_request, consumer_key)
token_param = oauth_request.get_parameter('oauth_token')
token = oauth_provider_store.get_access_token(request, oauth_request, consumer, token_param)
oauth_server, oauth_request = utils.initialize_server_request(request)
#check that this does not throw an oauth.Error
oauth_server.verify_request(oauth_request, consumer, token)
def test_post_using_in_authorization_header_and_PLAINTEXT(self):
self._request_token()
self._authorize_and_access_token_using_form()
parameters = {
'oauth_consumer_key': self.CONSUMER_KEY,
'oauth_signature_method': "PLAINTEXT",
'oauth_version': "1.0",
'oauth_token': self.ACCESS_TOKEN_KEY,
'oauth_timestamp': str(int(time.time())),
'oauth_nonce': str(int(time.time()))+"nonce",
'oauth_signature': "%s&%s" % (self.CONSUMER_SECRET, self.ACCESS_TOKEN_SECRET),
}
header = self._get_http_authorization_header(parameters)
response = self.c.post("/oauth/photo/", HTTP_AUTHORIZATION=header)
self.assertEqual(response.status_code, 200)
def test_post_using_auth_in_post_body_and_PLAINTEXT(self):
"""Check if auth works when authorization data is in post body when
content type is pplication/x-www-form-urlencoded
"""
self._request_token()
self._authorize_and_access_token_using_form()
parameters = {
'oauth_consumer_key': self.CONSUMER_KEY,
'oauth_signature_method': "PLAINTEXT",
'oauth_version': "1.0",
'oauth_token': self.ACCESS_TOKEN_KEY,
'oauth_timestamp': str(int(time.time())),
'oauth_nonce': str(int(time.time()))+"nonce",
'oauth_signature': "%s&%s" % (self.CONSUMER_SECRET, self.ACCESS_TOKEN_SECRET),
"additional_data": "whoop" # additional data
}
response = self.c.post("/oauth/photo/", six.moves.urllib.parse.urlencode(parameters, True),
content_type="application/x-www-form-urlencoded")
self.assertEqual(response.status_code, 200)
def test_post_using_auth_in_header_with_content_type_json_and_PLAINTEXT(self):
self._request_token()
self._authorize_and_access_token_using_form()
parameters = {
'oauth_consumer_key': self.CONSUMER_KEY,
'oauth_signature_method': "PLAINTEXT",
'oauth_version': "1.0",
'oauth_token': self.ACCESS_TOKEN_KEY,
'oauth_timestamp': str(int(time.time())),
'oauth_nonce': str(int(time.time()))+"nonce",
'oauth_signature': "%s&%s" % (self.CONSUMER_SECRET, self.ACCESS_TOKEN_SECRET),
}
header = self._get_http_authorization_header(parameters)
response = self.c.post("/oauth/photo/", HTTP_AUTHORIZATION=header, CONTENT_TYPE="application/json")
self.assertEqual(response.status_code, 200)
def test_post_using_auth_in_body_content_type_and_application_x_www_form_urlencoded(self):
"""Opposite of test_that_initialize_server_request_when_custom_content_type,
If content type is application/x-www-form-urlencoded, post data should be added to params,
and it affects signature
"""
self._request_token()
self._authorize_and_access_token_using_form()
data = {"foo": "bar"}
content_type = "application/x-www-form-urlencoded"
querystring = self._make_querystring_with_HMAC_SHA1("POST", "/path/to/post", data, content_type)
#we're just using the request, don't bother faking sending it
rf = RequestFactory()
request = rf.post(querystring, six.moves.urllib.parse.urlencode(data), content_type)
# this is basically a "remake" of the relevant parts of
# OAuthAuthentication in django-rest-framework
oauth_request = utils.get_oauth_request(request)
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
consumer = oauth_provider_store.get_consumer(request, oauth_request, consumer_key)
token_param = oauth_request.get_parameter('oauth_token')
token = oauth_provider_store.get_access_token(request, oauth_request, consumer, token_param)
oauth_server, oauth_request = utils.initialize_server_request(request)
#check that this does not throw an oauth.Error
oauth_server.verify_request(oauth_request, consumer, token)
class OAuthTestsBug2UrlParseNonHttpScheme(BaseOAuthTestCase):
def test_non_http_url_callback_scheme(self):
# @vmihailenco callback example
self._request_token(oauth_callback='ftp://fnaffgdfmcfbjiifjkhbfbnjljaabiaj.com/chrome_ex_oauth.html?q=1')
self.c.login(username=self.username, password=self.password)
parameters = self.authorization_parameters = {'oauth_token': self.request_token.key}
response = self.c.get("/oauth/authorize/", parameters)
self.assertEqual(response.status_code, 200)
# fill form (authorize us)
parameters['authorize_access'] = 1
response = self.c.post("/oauth/authorize/", parameters)
self.assertEqual(response.status_code, 302)
# assert query part of url is not malformed
assert "?q=1&" in response["Location"]
class OAuthTestIssue41XForwardedProto(BaseOAuthTestCase):
def setUp(self):
super(OAuthTestIssue41XForwardedProto, self).setUp()
self._request_token(METHOD_AUTHORIZATION_HEADER)
self._authorize_and_access_token_using_form(METHOD_AUTHORIZATION_HEADER)
print()
def _make_GET_auth_header(self, url):
token = oauth.Token(self.ACCESS_TOKEN_KEY, self.ACCESS_TOKEN_SECRET)
consumer = oauth.Consumer(self.CONSUMER_KEY, self.CONSUMER_SECRET)
request = oauth.Request.from_consumer_and_token(
consumer=consumer,
token=token,
http_method="GET",
http_url=url,
)
# Sign the request.
signature_method = oauth.SignatureMethod_HMAC_SHA1()
request.sign_request(signature_method, consumer, token)
return request.to_header()["Authorization"]
def test_when_same_protocol(self):
"""Test that signature vierifies when protocol used for signing is same as used in request
"""
url = "http://testserver/oauth/none/"
kwargs = {
"HTTP_AUTHORIZATION": self._make_GET_auth_header(url),
}
response = self.c.get(url.replace('http', 'https'), **kwargs)
self.assertEqual(response.status_code, 200)
url = "https://testserver:80/oauth/none/"
kwargs = {
# this tells django test client to pretend it was https request
'wsgi.url_scheme': "https",
"HTTP_AUTHORIZATION": self._make_GET_auth_header(url),
}
response = self.c.get(url, **kwargs)
self.assertEqual(response.status_code, 200)
def test_when_protocol_mismatch(self):
"""Test that signature does not vierifies when protocol is diffrent from that which was used for signing request
"""
url = "https://testserver:80/oauth/none/"
kwargs = {
'wsgi.url_scheme': "http",
"HTTP_AUTHORIZATION": self._make_GET_auth_header(url),
}
response = self.c.get(url.replace('https', 'http'), **kwargs)
assert response == responses.COULD_NOT_VERIFY_OAUTH_REQUEST_RESPONSE
self.assertEqual(response.status_code, 401)
url = "http://testserver:80/oauth/none/"
kwargs = {
# this tells django test client to pretend it was https request
'wsgi.url_scheme': "https",
"HTTP_AUTHORIZATION": self._make_GET_auth_header(url),
}
response = self.c.get(url.replace('http', 'https'), **kwargs)
assert response == responses.COULD_NOT_VERIFY_OAUTH_REQUEST_RESPONSE
self.assertEqual(response.status_code, 401)
def test_when_x_forwarded_proto_header_has_valid_protocol(self):
"""Test that signature verifies when X-Forwarded-Proto HTTP header has same protocol as one that was used for signing request
"""
url = "https://testserver/oauth/none/"
kwargs = {
'wsgi.url_scheme': "http",
'HTTP_AUTHORIZATION': self._make_GET_auth_header(url),
'HTTP_X_FORWARDED_PROTO': 'https',
}
response = self.c.get(url.replace('https', 'http'), **kwargs)
self.assertEqual(response.status_code, 200)
url = "http://testserver/oauth/none/"
kwargs = {
'wsgi.url_scheme': "https",
"HTTP_AUTHORIZATION": self._make_GET_auth_header(url),
"HTTP_X_FORWARDED_PROTO": "http",
}
response = self.c.get(url.replace('http', 'https'), **kwargs)
self.assertEqual(response.status_code, 200)
class OAuthTestIssue16NoncesCheckedAgainstTimestamp(BaseOAuthTestCase):
def test_timestamp_ok(self):
self._request_token()
self._authorize_and_access_token_using_form()
parameters = {
'oauth_consumer_key': self.CONSUMER_KEY,
'oauth_signature_method': "PLAINTEXT",
'oauth_version': "1.0",
'oauth_token': self.ACCESS_TOKEN_KEY,
'oauth_timestamp': str(int(time.time())),
'oauth_nonce': str(int(time.time()))+"nonce1",
'oauth_signature': "%s&%s" % (self.CONSUMER_SECRET, self.ACCESS_TOKEN_SECRET),
}
response = self.c.get("/oauth/photo/", parameters)
self.assertEqual(response.status_code, 200)
def test_timestamp_repeated_nonce(self):
self._request_token()
self._authorize_and_access_token_using_form()
timestamp = str(int(time.time()))
nonce = timestamp + "nonce"
parameters = {
'oauth_consumer_key': self.CONSUMER_KEY,
'oauth_signature_method': "PLAINTEXT",
'oauth_version': "1.0",
'oauth_token': self.ACCESS_TOKEN_KEY,
'oauth_timestamp': timestamp,
'oauth_nonce': nonce,
'oauth_signature': "%s&%s" % (self.CONSUMER_SECRET, self.ACCESS_TOKEN_SECRET),
}
response = self.c.get("/oauth/photo/", parameters)
self.assertEqual(response.status_code, 200)
response = self.c.get("/oauth/photo/", parameters)
self.assertEqual(response.status_code, 401)
def test_timestamp_old_nonce(self):
self._request_token()
self._authorize_and_access_token_using_form()
#make this nonce older
timestamp = str(int(datetime.datetime.now().strftime("%s")) - (settings.OAUTH_NONCE_VALID_PERIOD + 1))
nonce = timestamp + "nonce"
parameters = {
'oauth_consumer_key': self.CONSUMER_KEY,
'oauth_signature_method': "PLAINTEXT",
'oauth_version': "1.0",
'oauth_token': self.ACCESS_TOKEN_KEY,
'oauth_timestamp': timestamp,
'oauth_nonce': nonce,
'oauth_signature': "%s&%s" % (self.CONSUMER_SECRET, self.ACCESS_TOKEN_SECRET),
}
response = self.c.get("/oauth/photo/", parameters)
self.assertEqual(response.status_code, 401)
class OAuthTestIssue39(BaseOAuthTestCase):
"""
See https://bitbucket.org/david/django-oauth-plus/issue/39/request-token-scope-unused.
"""
def setUp(self):
super(OAuthTestIssue39, self).setUp()
Scope.objects.create(name='scope1')
Scope.objects.create(name='scope2')
def test_different_token_scopes(self):
self._request_token(scope='scope1')
# Authorization code below copied from BaseOAuthTestCase
self.c.login(username=self.username, password=self.password)
parameters = self.authorization_parameters = {'oauth_token': self.request_token.key}
response = self.c.get("/oauth/authorize/", parameters)
self.assertEqual(response.status_code, 200)
# fill form (authorize us)
parameters['authorize_access'] = 1
response = self.c.post("/oauth/authorize/", parameters)
self.assertEqual(response.status_code, 302)
# finally access authorized access_token
oauth_verifier = parse_qs(urlparse(response['Location']).query)['oauth_verifier'][0]
# logout to ensure that will not authorize with session
self.c.logout()
# Changed line - change the scope of access token
# access token's scope should be same as request token
self._access_token(oauth_verifier=oauth_verifier, oauth_token=self.request_token.key, scope='scope2')
access_token = Token.objects.get(key=self.ACCESS_TOKEN_KEY)
self.assertEqual(access_token.scope.name, 'scope1')
class OAuthTestIssue44PostRequestBodyInSignature(BaseOAuthTestCase):
def test_POST_with_x_www_form_urlencoded_body_params_and_auth_header(self):
"""Test issue when user's request has authorization header and uses
application/x-www-form-urlencoded content type with some
request body parameters.
note: In this case both POST and GET parameters should be included in
signature base string, so we test GET and POST together
note: behaviour defined in http://tools.ietf.org/html/rfc5849#section-3.4.1.3.1
"""
# get valid access token
self._request_token()
self._authorize_and_access_token_using_form()
# init request params and headers
get_params = {"foo": "bar"}
body_params = {"some": "param", "other": "param"}
content_type = "application/x-www-form-urlencoded"
header = self._make_auth_header_with_HMAC_SHA1('post', "/oauth/photo/", get_params, body_params, True)
body = six.moves.urllib.parse.urlencode(body_params)
response = self.c.post(
# this is workaround to have both POST & GET params in this request
"/oauth/photo/?%s" % six.moves.urllib.parse.urlencode(get_params),
data=body,
HTTP_AUTHORIZATION=header["Authorization"],
content_type=content_type
)
self.assertEqual(response.status_code, 200)
def test_POST_with_x_www_form_urlencoded_body_params_and_auth_header_unauthorized(self):
"""Test issue when user's request has authorization header and uses
application/x-www-form-urlencoded content type with some
request body parameters, but signature was generated without body
params.
"""
# get valid access token
self._request_token()
self._authorize_and_access_token_using_form()
# init request params and headers
get_params = {"foo": "bar"}
body_params = {"some": "param", "other": "param"}
content_type = "application/x-www-form-urlencoded"
header = self._make_auth_header_with_HMAC_SHA1('post', "/oauth/photo/", get_params, {}, True)
body = six.moves.urllib.parse.urlencode(body_params)
response = self.c.post(
# this is workaround to have both POST & GET params in this request
"/oauth/photo/?%s" % six.moves.urllib.parse.urlencode(get_params),
data=body,
HTTP_AUTHORIZATION=header["Authorization"],
content_type=content_type
)
self.assertEqual(response.status_code, 401)
def _make_auth_header_with_HMAC_SHA1(self, http_method, path, get_params, body_params, is_form_encoded):
"""make auth header, take in consideration both get and post body_params
"""
consumer = oauth.Consumer(key=self.CONSUMER_KEY, secret=self.CONSUMER_SECRET)
token = oauth.Token(key=self.ACCESS_TOKEN_KEY, secret=self.ACCESS_TOKEN_SECRET)
url = "http://testserver:80" + path
body = six.moves.urllib.parse.urlencode(body_params)
params = {}
params.update(get_params)
params.update(body_params)
request = oauth.Request.from_consumer_and_token(
consumer=consumer, token=token,
http_method=http_method, http_url=url,
is_form_encoded=is_form_encoded,
body=body,
# it seems that body parameter isn't enough to have body params
# in signature base string
parameters=params
)
# Sign the request.
signature_method = oauth.SignatureMethod_HMAC_SHA1()
request.sign_request(signature_method, consumer, token)
return request.to_header()
| 41.403226
| 133
| 0.668631
|
7151fee6151e8c36df2d1e05e3f52c888c6cb370
| 3,246
|
py
|
Python
|
tools/xenserver/populate_other_config.py
|
bopopescu/OpenStack-DNRM-Nova
|
7354f378398850113ac93b511547ed05218dc770
|
[
"Apache-2.0"
] | 1
|
2021-04-08T10:13:03.000Z
|
2021-04-08T10:13:03.000Z
|
tools/xenserver/populate_other_config.py
|
bopopescu/OpenStack-DNRM-Nova
|
7354f378398850113ac93b511547ed05218dc770
|
[
"Apache-2.0"
] | 1
|
2018-01-19T07:50:49.000Z
|
2018-01-19T07:50:49.000Z
|
tools/xenserver/populate_other_config.py
|
bopopescu/OpenStack-DNRM-Nova
|
7354f378398850113ac93b511547ed05218dc770
|
[
"Apache-2.0"
] | 1
|
2020-07-24T08:49:47.000Z
|
2020-07-24T08:49:47.000Z
|
#!/usr/bin/env python
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
One-time script to populate VDI.other_config.
We use metadata stored in VDI.other_config to associate a VDI with a given
instance so that we may safely cleanup orphaned VDIs.
We had a bug in the code that meant that the vast majority of VDIs created
would not have the other_config populated.
After deploying the fixed code, this script is intended to be run against all
compute-workers in a cluster so that existing VDIs can have their other_configs
populated.
Run on compute-worker (not Dom0):
python ./tools/xenserver/populate_other_config.py [--dry-run|--verbose]
"""
import gettext
gettext.install('nova', unicode=1)
import os
import sys
possible_topdir = os.getcwd()
if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
sys.path.insert(0, possible_topdir)
from nova import config
from nova.openstack.common import uuidutils
from nova.virt import virtapi
from nova.virt.xenapi import driver as xenapi_driver
from nova.virt.xenapi import vm_utils
from oslo.config import cfg
cli_opts = [
cfg.BoolOpt('dry-run',
default=False,
help='Whether to actually update other_config.'),
]
CONF = cfg.CONF
CONF.register_cli_opts(cli_opts)
def main():
config.parse_args(sys.argv)
xenapi = xenapi_driver.XenAPIDriver(virtapi.VirtAPI())
session = xenapi._session
vdi_refs = session.call_xenapi('VDI.get_all')
for vdi_ref in vdi_refs:
vdi_rec = session.call_xenapi('VDI.get_record', vdi_ref)
other_config = vdi_rec['other_config']
# Already set...
if 'nova_instance_uuid' in other_config:
continue
name_label = vdi_rec['name_label']
# We only want name-labels of form instance-<UUID>-[optional-suffix]
if not name_label.startswith('instance-'):
continue
# Parse out UUID
instance_uuid = name_label.replace('instance-', '')[:36]
if not uuidutils.is_uuid_like(instance_uuid):
print "error: name label '%s' wasn't UUID-like" % name_label
continue
vdi_type = vdi_rec['name_description']
# We don't need a full instance record, just the UUID
instance = {'uuid': instance_uuid}
if not CONF.dry_run:
vm_utils._set_vdi_info(session, vdi_ref, vdi_type, name_label,
vdi_type, instance)
if CONF.verbose:
print "Setting other_config for instance_uuid=%s vdi_uuid=%s" % (
instance_uuid, vdi_rec['uuid'])
if CONF.dry_run:
print "Dry run completed"
if __name__ == "__main__":
main()
| 30.336449
| 79
| 0.69162
|
8811b2902d59659676bb07d4673a0fc253902cae
| 297
|
py
|
Python
|
PIC/test/TestCfg.py
|
LawrenceK/webbrick
|
cf81416653f091bacfbf29eb6e4507db33ac0ca6
|
[
"BSD-3-Clause"
] | 1
|
2019-01-21T13:10:49.000Z
|
2019-01-21T13:10:49.000Z
|
PIC/test/TestCfg.py
|
LawrenceK/webbrick
|
cf81416653f091bacfbf29eb6e4507db33ac0ca6
|
[
"BSD-3-Clause"
] | null | null | null |
PIC/test/TestCfg.py
|
LawrenceK/webbrick
|
cf81416653f091bacfbf29eb6e4507db33ac0ca6
|
[
"BSD-3-Clause"
] | null | null | null |
# $Id:$
#
class TestWbConfig:
_wbAddress = "10.100.100.100"
# these should match what the webBrick has.
_dwellCount = 4
_spCount = 8
_diCount = 12
_aiCount = 4
_doCount = 8
_aoCount = 4
_tempCount = 5
_schedCount = 16
_sceneCount = 12
| 17.470588
| 48
| 0.569024
|
3e2190e48d1dcf00656dec0f37e5c457741ad884
| 1,150
|
py
|
Python
|
ricerca_app/migrations/0027_auto_20211117_0938.py
|
UniversitaDellaCalabria/Ricerca
|
b46157d3182d1c59cff4d36cc63b9e89f2f320c9
|
[
"Apache-2.0"
] | null | null | null |
ricerca_app/migrations/0027_auto_20211117_0938.py
|
UniversitaDellaCalabria/Ricerca
|
b46157d3182d1c59cff4d36cc63b9e89f2f320c9
|
[
"Apache-2.0"
] | 1
|
2020-08-03T15:12:46.000Z
|
2020-09-03T22:12:16.000Z
|
ricerca_app/migrations/0027_auto_20211117_0938.py
|
UniversitaDellaCalabria/Ricerca
|
b46157d3182d1c59cff4d36cc63b9e89f2f320c9
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.2.7 on 2021-11-17 09:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ricerca_app', '0026_auto_20211117_0859'),
]
operations = [
migrations.CreateModel(
name='LaboratorioTipologiaAttivita',
fields=[
('id', models.AutoField(db_column='ID', primary_key=True, serialize=False)),
('descrizione', models.CharField(db_column='DESCRIZIONE', max_length=1000)),
],
options={
'db_table': 'LABORATORIO_TIPOLOGIA_ATTIVITA',
'managed': True,
},
),
migrations.RemoveField(
model_name='laboratorioattivita',
name='tipologia_attivita',
),
migrations.AddField(
model_name='laboratorioattivita',
name='id_tipologia_attivita',
field=models.ForeignKey(db_column='ID_TIPOLOGIA_ATTIVITA', null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='ricerca_app.laboratoriotipologiaattivita'),
),
]
| 32.857143
| 177
| 0.610435
|
8461f68b626b787a8b9488cb3ea373ebb61e0614
| 1,370
|
py
|
Python
|
provided-basic-tests/tests/proj3.py
|
wangsanfeng1998/xv6-3
|
15bf3cfb6275f70d682ea5780e654a387d1df001
|
[
"Xnet",
"X11"
] | null | null | null |
provided-basic-tests/tests/proj3.py
|
wangsanfeng1998/xv6-3
|
15bf3cfb6275f70d682ea5780e654a387d1df001
|
[
"Xnet",
"X11"
] | null | null | null |
provided-basic-tests/tests/proj3.py
|
wangsanfeng1998/xv6-3
|
15bf3cfb6275f70d682ea5780e654a387d1df001
|
[
"Xnet",
"X11"
] | null | null | null |
import sys, os, inspect
import toolspath
from testing import Xv6Test, Xv6Build
curdir = os.path.realpath(os.path.dirname(inspect.getfile(inspect.currentframe())))
def get_description(name):
cfile = os.path.join(curdir, 'ctests/proj3', name+'.c')
with open(cfile, 'r') as f:
desc = f.readline()
desc = desc.strip()
desc = desc[2:]
desc = "Description:" + desc
if desc[-2:] == '*/':
desc = desc[:-2]
return desc.strip()
test_values = [
{'name': 'clone'},
{'name': 'badclone'},
{'name': 'join'},
]
all_tests = []
build_test = Xv6Build
for test in test_values:
testname = test['name']
if 'point_value' in test.keys():
point_value = test['point_value']
else:
point_value = 0
members = {
'name': testname,
'tester': 'ctests/proj3/' + testname + '.c',
'description': get_description(testname),
'timeout': 10,
'point_value': point_value
}
newclass = type(testname, (Xv6Test,), members)
all_tests.append(newclass)
setattr(sys.modules[__name__], testname, newclass)
class usertests(Xv6Test):
name = 'usertests'
tester = 'ctests/proj3/usertests.c'
description = get_description('usertests')
timeout = 240
# To include full regression test by running usertests, uncomment the line below.
#all_tests.append(usertests)
from testing.runtests import main
main(build_test, all_tests)
| 24.464286
| 83
| 0.668613
|
0dcf168cec9559877cf933092cabde33eb891c19
| 5,777
|
py
|
Python
|
extra_foam/gui/windows/tests/test_plot_widgets.py
|
European-XFEL/EXtra-foam
|
f8d225db6b8923d0cce9db2b8c8a80613600b64c
|
[
"BSD-3-Clause"
] | 7
|
2019-11-27T09:31:37.000Z
|
2022-02-12T21:28:49.000Z
|
extra_foam/gui/windows/tests/test_plot_widgets.py
|
European-XFEL/EXtra-foam
|
f8d225db6b8923d0cce9db2b8c8a80613600b64c
|
[
"BSD-3-Clause"
] | 172
|
2019-12-03T07:56:02.000Z
|
2022-03-25T15:46:45.000Z
|
extra_foam/gui/windows/tests/test_plot_widgets.py
|
European-XFEL/EXtra-foam
|
f8d225db6b8923d0cce9db2b8c8a80613600b64c
|
[
"BSD-3-Clause"
] | 9
|
2019-11-27T09:32:38.000Z
|
2022-01-05T09:56:10.000Z
|
import unittest
from unittest.mock import patch
from extra_foam.logger import logger
from extra_foam.gui import mkQApp
from extra_foam.gui.plot_widgets.plot_items import ScatterPlotItem
from extra_foam.pipeline.data_model import ProcessedData
from extra_foam.pipeline.tests import _TestDataMixin
app = mkQApp()
logger.setLevel('CRITICAL')
class testPumpProbeWidgets(_TestDataMixin, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._data = cls.empty_data()
def testPumpProbeVFomPlot(self):
from extra_foam.gui.windows.pump_probe_w import PumpProbeVFomPlot
widget = PumpProbeVFomPlot()
widget.updateF(self._data)
def testPumpProbeFomPlot(self):
from extra_foam.gui.windows.pump_probe_w import PumpProbeFomPlot
widget = PumpProbeFomPlot()
widget._data = self._data
widget.refresh()
class testPulseOfInterestWidgets(_TestDataMixin, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._empty_data = cls.empty_data()
cls._data, _ = cls.data_with_assembled(1001, (4, 2, 2), histogram=True)
def testPoiImageView(self):
from extra_foam.gui.windows.pulse_of_interest_w import PoiImageView
widget = PoiImageView(0)
data = self._empty_data
widget.updateF(data)
def testPoiFomHist(self):
from extra_foam.gui.windows.pulse_of_interest_w import PoiFomHist
widget = PoiFomHist(0)
# empty data
widget._data = self._empty_data
widget.refresh()
# non-empty data
widget._data = self._data
widget.refresh()
def testPoiRoiHist(self):
from extra_foam.gui.windows.pulse_of_interest_w import PoiRoiHist
widget = PoiRoiHist(0)
# empty data
data = self._empty_data
widget.updateF(data)
# non-empty data
data = self._data
widget.updateF(data)
class testBinningWidgets(_TestDataMixin, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._empty_data = cls.empty_data()
def testBin1dHist(self):
from extra_foam.gui.windows.binning_w import Bin1dHist
widget = Bin1dHist()
widget._data = self._empty_data
widget.refresh()
def testHeatmap1D(self):
from extra_foam.gui.windows.binning_w import Bin1dHeatmap
widget = Bin1dHeatmap()
widget._data = self._empty_data
# test "Auto level" reset
widget._auto_level = True
widget.refresh()
self.assertFalse(widget._auto_level)
def testHeatmap2D(self):
from extra_foam.gui.windows.binning_w import Bin2dHeatmap
for is_count in [False, True]:
widget = Bin2dHeatmap(count=is_count)
widget._data = self._empty_data
# test "Auto level" reset
widget._auto_level = True
widget.refresh()
self.assertFalse(widget._auto_level)
class testCorrrelationWidgets(_TestDataMixin, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._empty_data = cls.empty_data()
cls._data, _ = cls.data_with_assembled(1001, (4, 2, 2), correlation=True)
def testGeneral(self):
from extra_foam.gui.windows.correlation_w import CorrelationPlot
for i in range(2):
widget = CorrelationPlot(0)
widget._data = self._empty_data
widget.refresh()
def testResolutionSwitch(self):
from extra_foam.gui.windows.correlation_w import CorrelationPlot
from extra_foam.gui.plot_widgets.plot_items import StatisticsBarItem, pg
# resolution1 = 0.0 and resolution2 > 0.0
widget = CorrelationPlot(0)
widget._data = self._data
widget.refresh()
plot_item, plot_item_slave = widget._plot, widget._plot_slave
self.assertIsInstance(plot_item, ScatterPlotItem)
self.assertIsInstance(plot_item_slave, ScatterPlotItem)
widget._idx = 1 # a trick
widget.refresh()
self.assertNotIn(plot_item, widget._plot_area._items) # being deleted
self.assertNotIn(plot_item_slave, widget._plot_area._items) # being deleted
plot_item, plot_item_slave = widget._plot, widget._plot_slave
self.assertIsInstance(plot_item, StatisticsBarItem)
self.assertIsInstance(plot_item_slave, StatisticsBarItem)
self.assertEqual(2, plot_item._beam)
self.assertEqual(2, plot_item_slave._beam)
# beam size changes with resolution
widget._data["processed"].corr[1].resolution = 4
widget.refresh()
self.assertEqual(4, plot_item._beam)
self.assertEqual(4, plot_item_slave._beam)
widget._idx = 0 # a trick
widget.refresh()
self.assertNotIn(plot_item, widget._plot_area._items) # being deleted
self.assertNotIn(plot_item_slave, widget._plot_area._items) # being deleted
self.assertIsInstance(widget._plot, ScatterPlotItem)
self.assertIsInstance(widget._plot_slave, ScatterPlotItem)
class testHistogramWidgets(_TestDataMixin, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._empty_data = cls.empty_data()
cls._data, _ = cls.data_with_assembled(1001, (4, 2, 2), histogram=True)
def testFomHist(self):
from extra_foam.gui.windows.histogram_w import FomHist
widget = FomHist()
# empty data
widget._data = self._empty_data
widget.refresh()
# non-empty data
widget._data = self._data
widget.refresh()
def testInTrainFomPlot(self):
from extra_foam.gui.windows.histogram_w import InTrainFomPlot
widget = InTrainFomPlot()
data = self._empty_data
widget.updateF(data)
| 31.227027
| 84
| 0.680284
|
d717cebc76262f34fa0233e27599a1fa474de1fd
| 3,178
|
py
|
Python
|
setup.py
|
jakevdp/MyST-NB
|
156d50e6b59553c06423e7541be3ed8e5802c863
|
[
"BSD-3-Clause"
] | 1
|
2021-03-18T07:57:54.000Z
|
2021-03-18T07:57:54.000Z
|
setup.py
|
jakevdp/MyST-NB
|
156d50e6b59553c06423e7541be3ed8e5802c863
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
jakevdp/MyST-NB
|
156d50e6b59553c06423e7541be3ed8e5802c863
|
[
"BSD-3-Clause"
] | null | null | null |
"""MyST-NB package setup."""
from setuptools import find_packages, setup
# Manually finding the version so we don't need to import our module
text = open("./myst_nb/__init__.py").read()
for line in text.split("\n"):
if "__version__" in line:
break
version = line.split("= ")[-1].strip('"')
setup(
name="myst-nb",
version=version,
description=(
"A Jupyter Notebook Sphinx reader built on top of the MyST markdown parser."
),
long_description=open("README.md", encoding="utf8").read(),
long_description_content_type="text/markdown",
url="https://github.com/executablebooks/myst-nb",
author="ExecutableBookProject",
author_email="choldgraf@berkeley.edu",
license="BSD-3",
packages=find_packages(),
entry_points={
"myst_nb.mime_render": [
"default = myst_nb.render_outputs:CellOutputRenderer",
"inline = myst_nb.render_outputs:CellOutputRendererInline",
],
# 'pygments.lexers': [
# 'myst_ansi = myst_nb.ansi_lexer:AnsiColorLexer',
# ],
},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Markup",
"Framework :: Sphinx :: Extension",
],
keywords="markdown lexer parser development docutils sphinx",
python_requires=">=3.6",
package_data={"myst_nb": ["_static/*"]},
install_requires=[
"myst-parser~=0.13.1",
"docutils>=0.15",
"sphinx>=2,<4",
# TODO 0.3.2 requires some changes to the pytests
"jupyter_sphinx==0.3.1",
"jupyter-cache~=0.4.1",
"ipython",
"nbformat~=5.0",
"nbconvert~=5.6",
"ipywidgets>=7.0.0,<8",
"pyyaml",
"sphinx-togglebutton~=0.2.2",
"importlib_metadata",
],
extras_require={
"code_style": ["flake8<3.8.0,>=3.7.0", "black", "pre-commit==1.17.0"],
"testing": [
"pytest~=5.4",
"pytest-cov~=2.8",
"coverage<5.0",
"pytest-regressions",
"matplotlib",
"numpy",
"sympy",
"pandas",
"jupytext~=1.8.0",
],
"rtd": [
"jupytext~=1.8.0",
"coconut~=1.4.3",
"sphinxcontrib-bibtex",
"ipywidgets",
"pandas",
"numpy",
"sympy",
"altair",
"alabaster",
"bokeh",
"plotly",
"matplotlib",
"sphinx-copybutton",
"sphinx-book-theme",
"sphinx-panels~=0.4.1",
],
},
zip_safe=True,
)
| 32.10101
| 84
| 0.539333
|
ab3f54bcb668fea52cb3fd659528f73b14600d6e
| 1,222
|
py
|
Python
|
test/test_image_log.py
|
LayeredInsight/layint_api_python
|
a5c9a5b24098bd823c5102b7ab9e4745432f19b4
|
[
"Apache-2.0"
] | null | null | null |
test/test_image_log.py
|
LayeredInsight/layint_api_python
|
a5c9a5b24098bd823c5102b7ab9e4745432f19b4
|
[
"Apache-2.0"
] | null | null | null |
test/test_image_log.py
|
LayeredInsight/layint_api_python
|
a5c9a5b24098bd823c5102b7ab9e4745432f19b4
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Layered Insight Assessment, Compliance, Witness & Control
LI Assessment & Compliance performs static vulnerability analysis, license and package compliance. LI Witness provides deep insight and analytics into containerized applications. Control provides dynamic runtime security and analytics for containerized applications. You can find out more about the Layered Insight Suite at [http://layeredinsight.com](http://layeredinsight.com).
OpenAPI spec version: 0.10
Contact: help@layeredinsight.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import layint_api
from layint_api.rest import ApiException
from layint_api.models.image_log import ImageLog
class TestImageLog(unittest.TestCase):
""" ImageLog unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testImageLog(self):
"""
Test ImageLog
"""
# FIXME: construct object with mandatory attributes with example values
#model = layint_api.models.image_log.ImageLog()
pass
if __name__ == '__main__':
unittest.main()
| 27.155556
| 383
| 0.728314
|
b7886a78dcf52542cd225548213e6e637b43c166
| 14,533
|
py
|
Python
|
catalyst/finance/controls.py
|
red-eagle-eye/catalyst
|
4a72e8158a49548095f1e7fbbff9e7a52d8dca4b
|
[
"Apache-2.0"
] | 6
|
2019-05-23T17:52:22.000Z
|
2022-01-30T08:13:19.000Z
|
catalyst/finance/controls.py
|
red-eagle-eye/catalyst
|
4a72e8158a49548095f1e7fbbff9e7a52d8dca4b
|
[
"Apache-2.0"
] | null | null | null |
catalyst/finance/controls.py
|
red-eagle-eye/catalyst
|
4a72e8158a49548095f1e7fbbff9e7a52d8dca4b
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import logbook
import pandas as pd
from six import with_metaclass
from catalyst.errors import (
AccountControlViolation,
TradingControlViolation,
)
from catalyst.constants import LOG_LEVEL
log = logbook.Logger('TradingControl', level=LOG_LEVEL)
class TradingControl(with_metaclass(abc.ABCMeta)):
"""
Abstract base class representing a fail-safe control on the behavior of any
algorithm.
"""
def __init__(self, on_error, **kwargs):
"""
Track any arguments that should be printed in the error message
generated by self.fail.
"""
self.on_error = on_error
self.__fail_args = kwargs
@abc.abstractmethod
def validate(self,
asset,
amount,
limit_price,
portfolio,
algo_datetime,
algo_current_data):
"""
Before any order is executed by TradingAlgorithm, this method should be
called *exactly once* on each registered TradingControl object.
If the specified asset and amount do not violate this TradingControl's
restraint given the information in `portfolio`, this method should
return None and have no externally-visible side-effects.
If the desired order violates this TradingControl's contraint, this
method should call self.fail(asset, amount).
"""
raise NotImplementedError
def _constraint_msg(self, metadata):
constraint = repr(self)
if metadata:
constraint = "{constraint} (Metadata: {metadata})".format(
constraint=constraint,
metadata=metadata
)
return constraint
def handle_violation(self, asset, amount, datetime, metadata=None):
"""
Handle a TradingControlViolation, either by raising or logging and
error with information about the failure.
If dynamic information should be displayed as well, pass it in via
`metadata`.
"""
constraint = self._constraint_msg(metadata)
if self.on_error == 'fail':
raise TradingControlViolation(
asset=asset,
amount=amount,
datetime=datetime,
constraint=constraint)
elif self.on_error == 'log':
log.error("Order for {amount} shares of {asset} at {dt} "
"violates trading constraint {constraint}",
amount=amount, asset=asset, dt=datetime,
constraint=constraint)
def __repr__(self):
return "{name}({attrs})".format(name=self.__class__.__name__,
attrs=self.__fail_args)
class MaxOrderCount(TradingControl):
"""
TradingControl representing a limit on the number of orders that can be
placed in a given trading day.
"""
def __init__(self, on_error, max_count):
super(MaxOrderCount, self).__init__(on_error, max_count=max_count)
self.orders_placed = 0
self.max_count = max_count
self.current_date = None
def validate(self,
asset,
amount,
limit_price,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if we've already placed self.max_count orders today.
"""
algo_date = algo_datetime.date()
# Reset order count if it's a new day.
if self.current_date and self.current_date != algo_date:
self.orders_placed = 0
self.current_date = algo_date
if self.orders_placed >= self.max_count:
self.handle_violation(asset, amount, algo_datetime)
self.orders_placed += 1
class RestrictedListOrder(TradingControl):
"""TradingControl representing a restricted list of assets that
cannot be ordered by the algorithm.
Parameters
----------
restrictions : catalyst.finance.asset_restrictions.Restrictions
Object representing restrictions of a group of assets.
"""
def __init__(self, on_error, restrictions):
super(RestrictedListOrder, self).__init__(on_error)
self.restrictions = restrictions
def validate(self,
asset,
amount,
limit_price,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if the asset is in the restricted_list.
"""
if self.restrictions.is_restricted(asset, algo_datetime):
self.handle_violation(asset, amount, algo_datetime)
class MaxOrderSize(TradingControl):
"""
TradingControl representing a limit on the magnitude of any single order
placed with the given asset. Can be specified by share or by dollar
value.
"""
def __init__(self, on_error, asset=None, max_shares=None,
max_notional=None):
super(MaxOrderSize, self).__init__(on_error,
asset=asset,
max_shares=max_shares,
max_notional=max_notional)
self.asset = asset
self.max_shares = max_shares
self.max_notional = max_notional
if max_shares is None and max_notional is None:
raise ValueError(
"Must supply at least one of max_shares and max_notional"
)
if max_shares and max_shares < 0:
raise ValueError(
"max_shares cannot be negative."
)
if max_notional and max_notional < 0:
raise ValueError(
"max_notional must be positive."
)
def validate(self,
asset,
amount,
limit_price,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if the magnitude of the given order exceeds either self.max_shares
or self.max_notional.
"""
if self.asset is not None and self.asset != asset:
return
if self.max_shares is not None and amount > self.max_shares:
self.handle_violation(asset, amount, algo_datetime)
if limit_price is not None:
asset_price = limit_price
else:
asset_price = algo_current_data.current(asset, "price")
order_value = amount * asset_price
too_much_value = (self.max_notional is not None and
order_value > self.max_notional)
if too_much_value:
self.handle_violation(asset, amount, algo_datetime)
class MaxPositionSize(TradingControl):
"""
TradingControl representing a limit on the maximum position size that can
be held by an algo for a given asset.
"""
def __init__(self, on_error, asset=None, max_shares=None,
max_notional=None):
super(MaxPositionSize, self).__init__(on_error,
asset=asset,
max_shares=max_shares,
max_notional=max_notional)
self.asset = asset
self.max_shares = max_shares
self.max_notional = max_notional
if max_shares is None and max_notional is None:
raise ValueError(
"Must supply at least one of max_shares and max_notional"
)
if max_shares and max_shares < 0:
raise ValueError(
"max_shares cannot be negative."
)
if max_notional and max_notional < 0:
raise ValueError(
"max_notional must be positive."
)
def validate(self,
asset,
amount,
limit_price,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if the given order would cause the magnitude of our position to be
greater in shares than self.max_shares or greater in dollar value than
self.max_notional.
"""
if self.asset is not None and self.asset != asset:
return
current_share_count = portfolio.positions[asset].amount
shares_post_order = current_share_count + amount
too_many_shares = (self.max_shares is not None and
abs(shares_post_order) > self.max_shares)
if too_many_shares:
self.handle_violation(asset, amount, algo_datetime)
current_price = algo_current_data.current(asset, "price")
order_price = limit_price if limit_price is not None else current_price
current_shares_value = current_share_count * current_price
order_value = amount * order_price
value_post_order = current_shares_value + order_value
too_much_value = (self.max_notional is not None and
abs(value_post_order) > self.max_notional)
if too_much_value:
self.handle_violation(asset, amount, algo_datetime)
class LongOnly(TradingControl):
"""
TradingControl representing a prohibition against holding short positions.
"""
def __init__(self, on_error):
super(LongOnly, self).__init__(on_error)
def validate(self,
asset,
amount,
limit_price,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if we would hold negative shares of asset after completing this
order.
"""
if portfolio.positions[asset].amount + amount < 0:
self.handle_violation(asset, amount, algo_datetime)
class AssetDateBounds(TradingControl):
"""
TradingControl representing a prohibition against ordering an asset before
its start_date, or after its end_date.
"""
def __init__(self, on_error):
super(AssetDateBounds, self).__init__(on_error)
def validate(self,
asset,
amount,
limit_price,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if the algo has passed this Asset's end_date, or before the
Asset's start date.
"""
# If the order is for 0 shares, then silently pass through.
if amount == 0:
return
normalized_algo_dt = pd.Timestamp(algo_datetime).normalize()
# Fail if the algo is before this Asset's start_date
if asset.start_date:
normalized_start = pd.Timestamp(asset.start_date).normalize()
if normalized_algo_dt < normalized_start:
metadata = {
'asset_start_date': normalized_start
}
self.handle_violation(
asset, amount, algo_datetime, metadata=metadata)
# Fail if the algo has passed this Asset's end_date
if asset.end_date:
normalized_end = pd.Timestamp(asset.end_date).normalize()
if normalized_algo_dt > normalized_end:
metadata = {
'asset_end_date': normalized_end
}
self.handle_violation(
asset, amount, algo_datetime, metadata=metadata)
class AccountControl(with_metaclass(abc.ABCMeta)):
"""
Abstract base class representing a fail-safe control on the behavior of any
algorithm.
"""
def __init__(self, **kwargs):
"""
Track any arguments that should be printed in the error message
generated by self.fail.
"""
self.__fail_args = kwargs
@abc.abstractmethod
def validate(self,
_portfolio,
_account,
_algo_datetime,
_algo_current_data):
"""
On each call to handle data by TradingAlgorithm, this method should be
called *exactly once* on each registered AccountControl object.
If the check does not violate this AccountControl's restraint given
the information in `portfolio` and `account`, this method should
return None and have no externally-visible side-effects.
If the desired order violates this AccountControl's contraint, this
method should call self.fail().
"""
raise NotImplementedError
def fail(self):
"""
Raise an AccountControlViolation with information about the failure.
"""
raise AccountControlViolation(constraint=repr(self))
def __repr__(self):
return "{name}({attrs})".format(name=self.__class__.__name__,
attrs=self.__fail_args)
class MaxLeverage(AccountControl):
"""
AccountControl representing a limit on the maximum leverage allowed
by the algorithm.
"""
def __init__(self, max_leverage):
"""
max_leverage is the gross leverage in decimal form. For example,
2, limits an algorithm to trading at most double the account value.
"""
super(MaxLeverage, self).__init__(max_leverage=max_leverage)
self.max_leverage = max_leverage
if max_leverage is None:
raise ValueError(
"Must supply max_leverage"
)
if max_leverage < 0:
raise ValueError(
"max_leverage must be positive"
)
def validate(self,
_portfolio,
_account,
_algo_datetime,
_algo_current_data):
"""
Fail if the leverage is greater than the allowed leverage.
"""
if _account.leverage > self.max_leverage:
self.fail()
| 32.805869
| 79
| 0.594165
|
2b81d3e3ebbbe9699111be22c330ad0fde3f8f0c
| 1,080
|
py
|
Python
|
pystripe/api/utils.py
|
gbozee/pystripe
|
42ffef976bfec5ee0021c53dcd29676abdf78204
|
[
"MIT"
] | null | null | null |
pystripe/api/utils.py
|
gbozee/pystripe
|
42ffef976bfec5ee0021c53dcd29676abdf78204
|
[
"MIT"
] | null | null | null |
pystripe/api/utils.py
|
gbozee/pystripe
|
42ffef976bfec5ee0021c53dcd29676abdf78204
|
[
"MIT"
] | 1
|
2019-11-05T18:51:00.000Z
|
2019-11-05T18:51:00.000Z
|
def charge_data(raw_data, full_auth=False):
data = raw_data["object"]
if full_auth:
return data
status = data.get("status")
amount_refunded = data.get("amount_refunded")
if amount_refunded:
amount_refunded = amount_refunded / 100
amount = data["amount"]
session = data["id"]
payment_details = data["payment_method_details"]["card"]
customer = data["customer"]
currency = data["currency"]
failure_code = data.get("failure_code")
failure_message = data.get("failure_message")
_id = data["id"]
outcome = data.get("outcome")
failure = {}
if failure_code:
failure = {"code": failure_code, "message": failure_message}
return {
"id": session,
"amount": amount / 100,
"currency": currency,
"customer": customer,
"payment_details": payment_details,
"status": status,
"amount_refunded": amount_refunded,
"failure": failure,
"outcome": outcome,
"amount_refunded": amount_refunded,
}
| 32.727273
| 69
| 0.602778
|
7b223786d4b2fca11af07d990740743edf7345e9
| 622
|
py
|
Python
|
codewars/duck_duck_goose.py
|
davidlukac/codekata-python
|
e4a9297fa658d2d36de43b3547353be85c08e990
|
[
"MIT"
] | null | null | null |
codewars/duck_duck_goose.py
|
davidlukac/codekata-python
|
e4a9297fa658d2d36de43b3547353be85c08e990
|
[
"MIT"
] | null | null | null |
codewars/duck_duck_goose.py
|
davidlukac/codekata-python
|
e4a9297fa658d2d36de43b3547353be85c08e990
|
[
"MIT"
] | null | null | null |
# Duck Duck Goose
# http://www.codewars.com/kata/duck-duck-goose
import unittest
from typing import List
class Player:
def __init__(self, name: str):
self.name = name
def duck_duck_goose(players: List[Player], goose: int) -> str:
return players[goose % len(players) - 1].name
class TestDuckDuckGoose(unittest.TestCase):
l = [Player('a'), Player('b'), Player('c')]
def first_round_test(self):
self.assertEqual(duck_duck_goose(self.l, 1), 'a')
def second_round_test(self):
self.assertEqual(duck_duck_goose(self.l, 4), 'a')
if __name__ == '__main__':
unittest.main()
| 22.214286
| 62
| 0.670418
|
8d770de7b40ac78a6362fe1840f0447f8b96d4fd
| 14,992
|
py
|
Python
|
web_requester/requester.py
|
JoseVL92/http-requests
|
fa846e0d0e59943847ee4c871bc701726ffec909
|
[
"MIT"
] | 1
|
2020-01-09T07:37:50.000Z
|
2020-01-09T07:37:50.000Z
|
web_requester/requester.py
|
JoseVL92/http-requests
|
fa846e0d0e59943847ee4c871bc701726ffec909
|
[
"MIT"
] | null | null | null |
web_requester/requester.py
|
JoseVL92/http-requests
|
fa846e0d0e59943847ee4c871bc701726ffec909
|
[
"MIT"
] | null | null | null |
import aiohttp
import asyncio
import logging
import requests
import sys
from aiohttp.client_exceptions import ClientConnectionError, ClientResponseError
from concurrent.futures import TimeoutError
from urllib3.util import parse_url
"""
From experience, I know that aiohttp library causes problems behind an http proxy to accessing an https site.
It might even gets problems without proxy, but that is something that I couldn't say.
I use aiohttp for performance reasons, but always checking for common failures and applying the requests library api
for every async request if aiohttp does not make the job.
"""
# ------------------------------------------- DEFAULTS AND AUXILIAR VARIABLES ------------------------------------------
default_logger = logging.getLogger(__name__)
default_logger.setLevel(logging.INFO)
formatter = logging.Formatter('[%(asctime)s | web-requester/%(levelname)s | %(module)s: %(lineno)d] %(message)s',
'%Y-%m-%d %H:%M:%S')
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging.INFO)
stdout_handler.setFormatter(formatter)
# file_handler = logging.FileHandler('logs.log')
# file_handler.setLevel(logging.DEBUG)
# file_handler.setFormatter(formatter)
# logger.addHandler(file_handler)
default_logger.addHandler(stdout_handler)
default_encoding = 'UTF-8'
# Some sites block bot by User-Agent information. To avoid that, we set a Chrome v101 header
default_headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.64 Safari/537.36"
}
default_tcp_connections_limit = 100
default_tcp_connections_limit_per_host = 30
options_keys = ['method', 'data', 'params', 'json', 'proxy_cfg', 'headers',
'allow_aio', 'aio_client', 'timeout', 'logger', 'callback']
allowed_http_methods = ['get', 'post', 'put', 'delete', 'patch', 'head', 'options']
def create_aioclient(timeout=None, connections_limit=None, connections_limit_per_host=None):
session_timeout = aiohttp.ClientTimeout(total=timeout)
conn = aiohttp.TCPConnector(
limit=connections_limit or default_tcp_connections_limit,
limit_per_host=connections_limit_per_host or default_tcp_connections_limit_per_host
)
# Setting trust_env=True is for default behavior: if proxy parameter is not specified per request,
# look at the environment variables and use them if they exist, or do not use proxy at all
return aiohttp.ClientSession(timeout=session_timeout, connector=conn, trust_env=True)
# default_client = create_aioclient()
# ------------------------------------------- EXCEPTIONS ---------------------------------------------------------------
class XTimeOutError(Exception):
""" The operation exceeded the given deadline """
pass
class XHTTPError(Exception):
""" Server response has a status code X where 400 < X < 599 """
pass
class XConnectionError(Exception):
""" Client cannot establish connection """
pass
class XResponseError(Exception):
""" Connection error during reading response """
pass
# ------------------------------------------- MAIN ENTRY POINT ---------------------------------------------------------
def request_all(urloptions_list: list, common_options: dict = None):
"""
Give the possibility of make several requests with common options for all, or specific options for each of them.
aiohttp library will be used only for default behavior (text retrieving, no callback) and if not explicitly disabled
via {'allow_aio' = False} in common_options
Options dictionary has shape (for example):
{
"method": "get",
"data": b"Hola Mundo!!!", # Only used with non-get requests
"params": {"q": "frase a buscar"}, # Only used with get requests
"json": [{ "si": "no" }],
"proxy_cfg": {"http": "http://miproxy"},
"headers": {"Content-Type": "application/pdf"},
"allow_aio": True, # if False, it ensures the use of 'requests' always and excludes aiohttp. Default: True
"aio_client": <aiohttp.ClientSession>, # this is the only option that must be global, not specific for a single url
"timeout": 15,
"logger": <logging.logger>,
"callback": <function that receives a requests.Response as argument>.
If callback exists, the URL will be fetched using 'requests' only for standardization
}
Args:
urloptions_list: tuple (url, opts), where 'url' is the url of the request, and opts is a dictionary with specific options
common_options: dictionary with common options for every request, if that request does not specify its own options
Returns: A list of responses defined by each specific callback or the general callback, or each response text by default
>>> urls = ['https://google.com', ('https://actualidad.rt.com', {'json': {'num': 2}}), 'http://www.cubadebate.cu',
('https://www.filehorse.com/es', {'timeout': 12})]
>>> response_list = request_all(urls, {'method': 'get', 'timeout': '10'})
"""
if not common_options:
common_options = {}
for k in list(common_options.keys()):
if k not in options_keys:
del common_options[k]
loop = asyncio.get_event_loop()
response_list = loop.run_until_complete(request_all_async(urloptions_list, common_options))
return response_list
# ------------------------------------------- TASK LINKERS -------------------------------------------------------------
async def chain_callback(future_fn, future_fn_args, future_fn_kwargs, callback, callback_args, callback_kwargs):
if not callable(future_fn) or not callable(callback):
raise AttributeError("future_fn and callback must be callable")
if not isinstance(future_fn_args, (tuple, list)) or not isinstance(callback_args, (tuple, list)):
raise AttributeError("future_fn_args and callable_args must be tuples or list")
if not all([isinstance(x, dict) for x in (future_fn_kwargs, callback_kwargs)]):
raise AttributeError("future_fn_kwargs and callback_kwargs must be dictionaries")
resp = await future_fn(*future_fn_args, **future_fn_kwargs)
return await callback(resp, *callback_args, **callback_kwargs)
async def request_all_async(urloptions_list: list, common_options: dict = None):
"""
Same that request_all, but to be used directly with 'async' directive
"""
if not common_options:
common_options = {}
for k in list(common_options.keys()):
if k not in options_keys:
del common_options[k]
async def aux(aux_aio_client=None):
for url in urloptions_list:
# If url is not a string, must be an iterable with shape ("http://url.example.com", {**options})
comm_opts = dict(common_options)
if isinstance(url, (list, tuple)):
if len(url) != 2 or not isinstance(url[1], dict):
raise AttributeError("At least one URL attribute has an incorrect format")
url, opts = url
comm_opts = {**comm_opts, **opts}
comm_opts.pop('aio_client', None)
comm_opts.pop('close_aio_at_end', None)
tasks.append(async_request(url, aio_client=aux_aio_client, close_aio_at_end=False, **comm_opts))
return await asyncio.gather(*tasks, return_exceptions=True)
tasks = []
if common_options.get('allow_aio', True) and not callable(common_options.get('callback')):
aio_client = common_options.pop('aio_client', create_aioclient())
async with aio_client as client:
return await aux(client)
logger = common_options.get('logger', default_logger)
logger.debug("Using 'requests' library for every request")
return await aux()
# ------------------------------------------- SYNC/ASYNC DEFINITORS ----------------------------------------------------
async def async_request(url, method='get', data=None, params=None, json=None, proxy_cfg=None, *, headers=None,
timeout=None, callback=None, allow_aio=True, aio_client=None, close_aio_at_end=True, **kwargs):
if 'logger' in kwargs:
logger = kwargs.pop('logger')
had_logger = True
else:
logger = default_logger
had_logger = False
if not isinstance(method, str) or not method.lower() in allowed_http_methods:
logger.error(f"HTTP method '{method}' not allowed")
return
method = method.lower()
try:
timeout = int(timeout)
except Exception:
logger.warning(f"'{timeout}' is not a valid timeout value.")
timeout = None
parsed = parse_url(url)
if parsed.scheme not in ('http', 'https'):
raise ValueError(f"URL '{url}' has not a valid schema (must be http or https)")
if isinstance(proxy_cfg, dict):
single_proxy = proxy_cfg.get('http')
else:
single_proxy = proxy_cfg
# If there is a proxy_cfg and it is https, or if the site to visit is https, do not use aiohttp
if (proxy_cfg is not None and not single_proxy) or parsed.scheme == 'https' or parse_url(
single_proxy).scheme == 'https':
allow_aio = False
# only use callbacks with 'requests' responses
if callback:
allow_aio = False
if allow_aio:
aio_client = aio_client or create_aioclient()
try:
return await aiohttp_pure_request(url, method, data, params, json, single_proxy,
headers=headers, client=aio_client, timeout=timeout,
close_client_at_end=close_aio_at_end, **kwargs)
except ClientConnectionError:
logger.info("Retrying with 'requests' instead")
# Pushing logger back to kwargs if it was present before
if had_logger:
kwargs['logger'] = logger
return await sync_to_async_request(url, method, data, params, json, proxy_cfg, headers=headers,
timeout=timeout, callback=callback, **kwargs)
async def sync_to_async_request(url, method, data=None, params=None, json=None, proxy_cfg=None, *,
headers=None, timeout=None, callback=None, **kwargs):
if proxy_cfg is not None and isinstance(proxy_cfg, str):
proxy_cfg = {
"http": proxy_cfg,
"https": proxy_cfg
}
elif not isinstance(proxy_cfg, dict):
proxy_cfg = None
loop = asyncio.get_event_loop()
return await loop.run_in_executor(None, sync_request, url, method, data, params, json, proxy_cfg, headers,
timeout, callback, kwargs)
# ------------------------------------------- DEFAULT RESPONSE CALLBACKS -----------------------------------------------
def get_response_text(response):
try:
text = response.text
except UnicodeError:
response.encoding = default_encoding
text = response.text
return text
# ------------------------------------------- REQUESTERS ---------------------------------------------------------------
async def aiohttp_pure_request(url, method, data=None, params=None, json=None, proxy_cfg=None, *, headers=None,
client=None, timeout=None, close_client_at_end=True, **kwargs):
client = client or create_aioclient()
proxy_auth = {'proxy': proxy_cfg} # if proxy_cfg else {'trust_env': True}
raise_for_status = True
headers = headers or default_headers
if 'logger' in kwargs:
logger = kwargs.pop('logger')
else:
logger = default_logger
# Inspired in requests.model.PreparedRequest.prepare_body()
# if isinstance(data, dict):
# basestring = (bytes, str)
# result = []
# for k, vs in data.items():
# if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
# vs = [vs]
# for v in vs:
# if v is not None:
# result.append(
# (k.encode(default_encoding) if isinstance(k, str) else k,
# v.encode(default_encoding) if isinstance(v, str) else v))
# data = urlencode(result, doseq=True)
# content_type = 'application/x-www-form-urlencoded'
#
# # Add content-type if it wasn't explicitly provided.
# if 'content-type' not in headers:
# headers['Content-Type'] = content_type
client_kwargs = {
'data': data,
'params': params,
'json': json,
'headers': {**default_headers, **headers},
'timeout': timeout,
'raise_for_status': raise_for_status
}
kwargs.update(client_kwargs)
kwargs.update(proxy_auth)
request_func = getattr(client, method, client.get)
try:
async with request_func(url, **kwargs) as resp:
try:
text = await resp.text()
except UnicodeError:
text = await resp.text(encoding=default_encoding, errors='replace')
if close_client_at_end:
await client.close()
return text
except TimeoutError as err:
logger.warning(f"aiohttp => TimeOut => {url}: {str(err)}")
return
except ClientResponseError as err:
logger.warning(f"aiohttp => HTTPError status {err.status} => {url}: {str(err)}")
return
except ClientConnectionError as err:
logger.warning(f"aiohttp => ConnectionError => {url}: {str(err)}")
raise err
def sync_request(url, method, data=None, params=None, json=None, proxy_cfg=None, headers=None,
timeout=None, callback=None, kwargs=None):
# kwargs is a dictionary with additional options for requests.request function, and an optional logger object
kwargs = kwargs or dict()
callback = callback or get_response_text
if 'logger' in kwargs:
logger = kwargs.pop('logger')
else:
logger = default_logger
if isinstance(proxy_cfg, str):
proxy_cfg = {
"http": proxy_cfg,
"https": proxy_cfg
}
headers = headers or {}
client_kwargs = {
'data': data,
'params': params,
'json': json,
'headers': {**default_headers, **headers},
'timeout': timeout,
'proxies': proxy_cfg
}
client_kwargs.update(kwargs)
resp = requests.request(method, url, **client_kwargs)
try:
resp.raise_for_status()
return callback(resp)
except requests.exceptions.Timeout as err:
logger.warning(f"requests => TimeOut => '{url}': {str(err)}")
return
except requests.exceptions.HTTPError as err:
logger.warning(f"requests => HTTPError => '{url}': {str(err)}")
return
except requests.exceptions.RequestException as err:
logger.warning(f"requests => RequestError => '{url}': {str(err)}")
return
| 39.766578
| 129
| 0.623599
|
06c4d88d2fb5216e178e4de47efbcc471cd1a57e
| 1,519
|
gyp
|
Python
|
vendor/mapbox-gl-native/vendor/mapbox-base/mapbox/pixelmatch-cpp/pixelmatch.gyp
|
aic-develop/vietmaps-gl-native-ios-source
|
c73c28c4f1ea60ecd4c83c8b49f6947ee06f24f2
|
[
"BSL-1.0",
"Apache-2.0"
] | 33
|
2015-11-20T23:33:50.000Z
|
2022-02-25T21:28:41.000Z
|
vendor/mapbox-gl-native/vendor/mapbox-base/mapbox/pixelmatch-cpp/pixelmatch.gyp
|
aic-develop/vietmaps-gl-native-ios-source
|
c73c28c4f1ea60ecd4c83c8b49f6947ee06f24f2
|
[
"BSL-1.0",
"Apache-2.0"
] | 5
|
2016-09-28T11:37:41.000Z
|
2022-02-05T11:08:44.000Z
|
vendor/mapbox-gl-native/vendor/mapbox-base/mapbox/pixelmatch-cpp/pixelmatch.gyp
|
aic-develop/vietmaps-gl-native-ios-source
|
c73c28c4f1ea60ecd4c83c8b49f6947ee06f24f2
|
[
"BSL-1.0",
"Apache-2.0"
] | 5
|
2016-12-30T16:44:22.000Z
|
2021-02-21T12:55:46.000Z
|
{
'target_defaults': {
'default_configuration': 'Release',
'cflags_cc': [ '-std=c++11', '-Wall', '-Wextra', '-Wshadow', '-fno-rtti', '-fexceptions' ],
'xcode_settings': {
'CLANG_CXX_LANGUAGE_STANDARD':'c++11',
'MACOSX_DEPLOYMENT_TARGET': '10.7',
'CLANG_CXX_LIBRARY': 'libc++',
'OTHER_CPLUSPLUSFLAGS': [ '-Wall', '-Wextra', '-Wshadow', '-fno-rtti', '-fexceptions' ],
'OTHER_CFLAGS': [ '-Wall', '-Wextra', '-Wshadow' ],
},
'configurations': {
'Debug': {
'cflags_cc': [ '-g', '-O0', '-fno-omit-frame-pointer','-fwrapv', '-fstack-protector-all', '-fno-common' ],
'defines': [ 'DEBUG' ],
'xcode_settings': {
'GCC_OPTIMIZATION_LEVEL': '0',
'GCC_GENERATE_DEBUGGING_SYMBOLS': 'YES',
'DEAD_CODE_STRIPPING': 'NO',
'GCC_INLINES_ARE_PRIVATE_EXTERN': 'NO',
'OTHER_CPLUSPLUSFLAGS': [ '-fno-omit-frame-pointer','-fwrapv', '-fstack-protector-all', '-fno-common']
}
},
'Release': {
'cflags_cc': [ '-g', '-O3' ],
'defines': [ 'NDEBUG' ],
'xcode_settings': {
'GCC_OPTIMIZATION_LEVEL': '3',
'GCC_GENERATE_DEBUGGING_SYMBOLS': 'YES',
'DEAD_CODE_STRIPPING': 'YES',
'GCC_INLINES_ARE_PRIVATE_EXTERN': 'NO'
}
},
},
},
'targets': [
{ 'target_name': 'test',
'type': 'executable',
'include_dirs': [
'include',
],
'sources': [
'test/test.cpp'
],
},
],
}
| 31
| 114
| 0.518762
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.