hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3d80d4abe9c83b86ecaed06d16585efb8b583b01
| 258
|
py
|
Python
|
src/aioice/about.py
|
SimahoJr/aioice
|
2f503e9f81d17a8afd10e643be8a5a618c797ed1
|
[
"BSD-3-Clause"
] | null | null | null |
src/aioice/about.py
|
SimahoJr/aioice
|
2f503e9f81d17a8afd10e643be8a5a618c797ed1
|
[
"BSD-3-Clause"
] | null | null | null |
src/aioice/about.py
|
SimahoJr/aioice
|
2f503e9f81d17a8afd10e643be8a5a618c797ed1
|
[
"BSD-3-Clause"
] | null | null | null |
__author__ = "Jeremy Lainé"
__email__ = "jeremy.laine@m4x.org"
__license__ = "BSD"
__summary__ = "An implementation of Interactive Connectivity Establishment (RFC 5245)"
__title__ = "aioice"
__uri__ = "https://github.com/aiortc/aioice"
__version__ = "0.7.7"
| 32.25
| 86
| 0.755814
|
8867c520caf72fedb2f24c6c6835682459849e7b
| 368
|
py
|
Python
|
keras/layers/__init__.py
|
ikingye/keras
|
1a3ee8441933fc007be6b2beb47af67998d50737
|
[
"MIT"
] | 5
|
2020-11-30T22:26:03.000Z
|
2020-12-01T22:34:25.000Z
|
keras/layers/__init__.py
|
ikingye/keras
|
1a3ee8441933fc007be6b2beb47af67998d50737
|
[
"MIT"
] | 10
|
2020-12-01T22:55:29.000Z
|
2020-12-11T18:31:46.000Z
|
keras/layers/__init__.py
|
ikingye/keras
|
1a3ee8441933fc007be6b2beb47af67998d50737
|
[
"MIT"
] | 15
|
2020-11-30T22:12:22.000Z
|
2020-12-09T01:32:48.000Z
|
from tensorflow.keras.layers import *
from . import advanced_activations
from . import convolutional
from . import convolutional_recurrent
from . import core
from . import embeddings
from . import experimental
from . import local
from . import merge
from . import noise
from . import normalization
from . import pooling
from . import recurrent
from . import wrappers
| 23
| 37
| 0.80163
|
0c5fcdbdb84b17cb2f4d3c398cf630b31ecdfbf2
| 358
|
py
|
Python
|
Python-Refresher/list.py
|
Ruhul12/Django
|
e6c6c2f125386212c0a7c7b2aed4beee649b39bd
|
[
"MIT"
] | 2
|
2021-09-10T07:43:41.000Z
|
2021-09-11T19:40:40.000Z
|
Python-Refresher/list.py
|
ruhulaminparvez/Django
|
e6c6c2f125386212c0a7c7b2aed4beee649b39bd
|
[
"MIT"
] | null | null | null |
Python-Refresher/list.py
|
ruhulaminparvez/Django
|
e6c6c2f125386212c0a7c7b2aed4beee649b39bd
|
[
"MIT"
] | null | null | null |
# List
dog_names = ["tom", "sean", "sally", "mark"]
print(type(dog_names))
print(dog_names)
# Adding Item On Last Position
dog_names.append("sam")
print(dog_names)
# Adding Item On First Position
dog_names.insert(0, "bruz")
print(dog_names)
# Delete Items
del(dog_names[0])
print(dog_names)
# Length Of List
print('Length Of List: ',len(dog_names))
| 14.916667
| 44
| 0.712291
|
e0f657126ee0d7de64396282a935abf3ea9b54a2
| 5,340
|
py
|
Python
|
opencv/semantic_segmetation_opencv.py
|
vanduc103/coral_examples
|
a514d003a3948cb0888d2dabc0bdd93939f8ddd0
|
[
"Apache-2.0"
] | null | null | null |
opencv/semantic_segmetation_opencv.py
|
vanduc103/coral_examples
|
a514d003a3948cb0888d2dabc0bdd93939f8ddd0
|
[
"Apache-2.0"
] | null | null | null |
opencv/semantic_segmetation_opencv.py
|
vanduc103/coral_examples
|
a514d003a3948cb0888d2dabc0bdd93939f8ddd0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""An example using `BasicEngine` to perform semantic segmentation.
The following command runs this script and saves a new image showing the
segmented pixels at the location specified by `output`:
python3 examples/semantic_segmentation.py \
--model models/deeplabv3_mnv2_pascal_quant_edgetpu.tflite \
--input models/bird.bmp \
--keep_aspect_ratio \
--output ${HOME}/segmentation_result.jpg
"""
import argparse
import platform
import subprocess
from segment_engine import SegmentationEngine
from edgetpu.utils import dataset_utils, image_processing
from PIL import Image
from PIL import ImageDraw
import numpy as np
import cv2
import os
def create_pascal_label_colormap():
"""Creates a label colormap used in PASCAL VOC segmentation benchmark.
Returns:
A Colormap for visualizing segmentation results.
"""
colormap = np.zeros((256, 3), dtype=int)
indices = np.arange(256, dtype=int)
for shift in reversed(range(8)):
for channel in range(3):
colormap[:, channel] |= ((indices >> channel) & 1) << shift
indices >>= 3
return colormap
def label_to_color_image(label):
"""Adds color defined by the dataset colormap to the label.
Args:
label: A 2D array with integer type, storing the segmentation label.
Returns:
result: A 2D array with floating type. The element of the array
is the color indexed by the corresponding element in the input label
to the PASCAL color map.
Raises:
ValueError: If label is not of rank 2 or its value is larger than color
map maximum entry.
"""
if label.ndim != 2:
raise ValueError('Expect 2-D input label')
colormap = create_pascal_label_colormap()
if np.max(label) >= len(colormap):
raise ValueError('label value too large.')
return colormap[label]
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model',
help='Path of the segmentation model.',
required=True)
parser.add_argument(
'--camera_idx', type=str, help='Camera index.', default=0)
parser.add_argument(
'--keep_aspect_ratio',
dest='keep_aspect_ratio',
action='store_true',
help=(
'keep the image aspect ratio when down-sampling the image by adding '
'black pixel padding (zeros) on bottom or right. '
'By default the image is resized and reshaped without cropping. This '
'option should be the same as what is applied on input images during '
'model training. Otherwise the accuracy may be affected and the '
'bounding box of detection result may be stretched.'))
parser.add_argument(
'--concat', type=bool, help='Concat original image and segmentation image?', default=False)
parser.set_defaults(keep_aspect_ratio=False)
args = parser.parse_args()
# Initialize engine.
engine = SegmentationEngine(args.model)
_, height, width, _ = engine.get_input_tensor_shape()
print("Load all models done!")
# Read frame from camera (or video)
cap = cv2.VideoCapture(args.camera_idx)
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
cv2_im = frame
cv2_im_rgb = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB)
img = Image.fromarray(cv2_im_rgb)
# Open image.
if args.keep_aspect_ratio:
resized_img, ratio = image_processing.resampling_with_original_ratio(
img, (width, height), Image.NEAREST)
else:
resized_img = img.resize((width, height))
ratio = (1., 1.)
input_tensor = np.asarray(resized_img).flatten()
_, raw_result = engine.run_inference(input_tensor)
result = np.reshape(raw_result, (height, width))
new_width, new_height = int(width * ratio[0]), int(height * ratio[1])
# If keep_aspect_ratio, we need to remove the padding area.
result = result[:new_height, :new_width]
vis_result = label_to_color_image(result.astype(int)).astype(np.uint8)
if args.concat:
vis_result = Image.fromarray(vis_result)
vis_img = resized_img.crop((0, 0, new_width, new_height))
# Concat resized input image and processed segmentation results.
concated_image = Image.new('RGB', (new_width*2, new_height))
concated_image.paste(vis_img, (0, 0))
concated_image.paste(vis_result, (width, 0))
concated_image = np.array(concated_image)
concated_image = concated_image[:, :, ::-1].copy()
cv2.imshow('frame', concated_image)
else:
cv2.namedWindow("frame", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("frame",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
cv2.imshow('frame', vis_result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| 33.375
| 97
| 0.702247
|
4a3ab5523bcaee8b874371e09eb17e2fb5d524e9
| 30,892
|
py
|
Python
|
src/transformers/models/auto/tokenization_auto.py
|
techthiyanes/transformers
|
705d65368fb28246534ef636fe62c008f4fb2682
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/models/auto/tokenization_auto.py
|
techthiyanes/transformers
|
705d65368fb28246534ef636fe62c008f4fb2682
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/models/auto/tokenization_auto.py
|
techthiyanes/transformers
|
705d65368fb28246534ef636fe62c008f4fb2682
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Auto Tokenizer class."""
import importlib
import json
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import TOKENIZER_CONFIG_FILE
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import get_file_from_repo, is_sentencepiece_available, is_tokenizers_available, logging
from ..encoder_decoder import EncoderDecoderConfig
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
config_class_to_model_type,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
logger = logging.get_logger(__name__)
if TYPE_CHECKING:
# This significantly improves completion suggestion performance when
# the transformers package is used with Microsoft's Pylance language server.
TOKENIZER_MAPPING_NAMES: OrderedDict[str, Tuple[Optional[str], Optional[str]]] = OrderedDict()
else:
TOKENIZER_MAPPING_NAMES = OrderedDict(
[
("plbart", ("PLBartTokenizer" if is_sentencepiece_available() else None, None)),
("realm", ("RealmTokenizer", "RealmTokenizerFast" if is_tokenizers_available() else None)),
("fnet", ("FNetTokenizer", "FNetTokenizerFast" if is_tokenizers_available() else None)),
("retribert", ("RetriBertTokenizer", "RetriBertTokenizerFast" if is_tokenizers_available() else None)),
("roformer", ("RoFormerTokenizer", "RoFormerTokenizerFast" if is_tokenizers_available() else None)),
(
"t5",
(
"T5Tokenizer" if is_sentencepiece_available() else None,
"T5TokenizerFast" if is_tokenizers_available() else None,
),
),
(
"mt5",
(
"MT5Tokenizer" if is_sentencepiece_available() else None,
"MT5TokenizerFast" if is_tokenizers_available() else None,
),
),
("mobilebert", ("MobileBertTokenizer", "MobileBertTokenizerFast" if is_tokenizers_available() else None)),
("distilbert", ("DistilBertTokenizer", "DistilBertTokenizerFast" if is_tokenizers_available() else None)),
(
"albert",
(
"AlbertTokenizer" if is_sentencepiece_available() else None,
"AlbertTokenizerFast" if is_tokenizers_available() else None,
),
),
(
"camembert",
(
"CamembertTokenizer" if is_sentencepiece_available() else None,
"CamembertTokenizerFast" if is_tokenizers_available() else None,
),
),
(
"pegasus",
(
"PegasusTokenizer" if is_sentencepiece_available() else None,
"PegasusTokenizerFast" if is_tokenizers_available() else None,
),
),
(
"mbart",
(
"MBartTokenizer" if is_sentencepiece_available() else None,
"MBartTokenizerFast" if is_tokenizers_available() else None,
),
),
(
"xlm-roberta",
(
"XLMRobertaTokenizer" if is_sentencepiece_available() else None,
"XLMRobertaTokenizerFast" if is_tokenizers_available() else None,
),
),
("marian", ("MarianTokenizer" if is_sentencepiece_available() else None, None)),
("blenderbot-small", ("BlenderbotSmallTokenizer", None)),
("blenderbot", ("BlenderbotTokenizer", "BlenderbotTokenizerFast")),
("tapex", ("TapexTokenizer", None)),
("bart", ("BartTokenizer", "BartTokenizerFast")),
("longformer", ("LongformerTokenizer", "LongformerTokenizerFast" if is_tokenizers_available() else None)),
("roberta", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)),
(
"reformer",
(
"ReformerTokenizer" if is_sentencepiece_available() else None,
"ReformerTokenizerFast" if is_tokenizers_available() else None,
),
),
("electra", ("ElectraTokenizer", "ElectraTokenizerFast" if is_tokenizers_available() else None)),
("funnel", ("FunnelTokenizer", "FunnelTokenizerFast" if is_tokenizers_available() else None)),
("lxmert", ("LxmertTokenizer", "LxmertTokenizerFast" if is_tokenizers_available() else None)),
("layoutlm", ("LayoutLMTokenizer", "LayoutLMTokenizerFast" if is_tokenizers_available() else None)),
("layoutlmv2", ("LayoutLMv2Tokenizer", "LayoutLMv2TokenizerFast" if is_tokenizers_available() else None)),
("layoutxlm", ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast" if is_tokenizers_available() else None)),
(
"dpr",
(
"DPRQuestionEncoderTokenizer",
"DPRQuestionEncoderTokenizerFast" if is_tokenizers_available() else None,
),
),
(
"squeezebert",
("SqueezeBertTokenizer", "SqueezeBertTokenizerFast" if is_tokenizers_available() else None),
),
("bert", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
("openai-gpt", ("OpenAIGPTTokenizer", "OpenAIGPTTokenizerFast" if is_tokenizers_available() else None)),
("gpt2", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
("transfo-xl", ("TransfoXLTokenizer", None)),
(
"xlnet",
(
"XLNetTokenizer" if is_sentencepiece_available() else None,
"XLNetTokenizerFast" if is_tokenizers_available() else None,
),
),
("flaubert", ("FlaubertTokenizer", None)),
("xlm", ("XLMTokenizer", None)),
("ctrl", ("CTRLTokenizer", None)),
("fsmt", ("FSMTTokenizer", None)),
("bert-generation", ("BertGenerationTokenizer" if is_sentencepiece_available() else None, None)),
("deberta", ("DebertaTokenizer", "DebertaTokenizerFast" if is_tokenizers_available() else None)),
(
"deberta-v2",
(
"DebertaV2Tokenizer" if is_sentencepiece_available() else None,
"DebertaV2TokenizerFast" if is_tokenizers_available() else None,
),
),
("rag", ("RagTokenizer", None)),
("xlm-prophetnet", ("XLMProphetNetTokenizer" if is_sentencepiece_available() else None, None)),
("speech_to_text", ("Speech2TextTokenizer" if is_sentencepiece_available() else None, None)),
("speech_to_text_2", ("Speech2Text2Tokenizer", None)),
("m2m_100", ("M2M100Tokenizer" if is_sentencepiece_available() else None, None)),
("prophetnet", ("ProphetNetTokenizer", None)),
("mpnet", ("MPNetTokenizer", "MPNetTokenizerFast" if is_tokenizers_available() else None)),
("tapas", ("TapasTokenizer", None)),
("led", ("LEDTokenizer", "LEDTokenizerFast" if is_tokenizers_available() else None)),
("convbert", ("ConvBertTokenizer", "ConvBertTokenizerFast" if is_tokenizers_available() else None)),
(
"big_bird",
(
"BigBirdTokenizer" if is_sentencepiece_available() else None,
"BigBirdTokenizerFast" if is_tokenizers_available() else None,
),
),
("ibert", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)),
("qdqbert", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
("wav2vec2", ("Wav2Vec2CTCTokenizer", None)),
("hubert", ("Wav2Vec2CTCTokenizer", None)),
("gpt_neo", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
("luke", ("LukeTokenizer", None)),
("mluke", ("MLukeTokenizer" if is_sentencepiece_available() else None, None)),
("bigbird_pegasus", ("PegasusTokenizer", "PegasusTokenizerFast" if is_tokenizers_available() else None)),
("canine", ("CanineTokenizer", None)),
("bertweet", ("BertweetTokenizer", None)),
("bert-japanese", ("BertJapaneseTokenizer", None)),
("splinter", ("SplinterTokenizer", "SplinterTokenizerFast")),
("byt5", ("ByT5Tokenizer", None)),
(
"cpm",
(
"CpmTokenizer" if is_sentencepiece_available() else None,
"CpmTokenizerFast" if is_tokenizers_available() else None,
),
),
("herbert", ("HerbertTokenizer", "HerbertTokenizerFast" if is_tokenizers_available() else None)),
("phobert", ("PhobertTokenizer", None)),
("bartpho", ("BartphoTokenizer", None)),
(
"barthez",
(
"BarthezTokenizer" if is_sentencepiece_available() else None,
"BarthezTokenizerFast" if is_tokenizers_available() else None,
),
),
(
"mbart50",
(
"MBart50Tokenizer" if is_sentencepiece_available() else None,
"MBart50TokenizerFast" if is_tokenizers_available() else None,
),
),
(
"rembert",
(
"RemBertTokenizer" if is_sentencepiece_available() else None,
"RemBertTokenizerFast" if is_tokenizers_available() else None,
),
),
(
"clip",
(
"CLIPTokenizer",
"CLIPTokenizerFast" if is_tokenizers_available() else None,
),
),
("wav2vec2_phoneme", ("Wav2Vec2PhonemeCTCTokenizer", None)),
(
"perceiver",
(
"PerceiverTokenizer",
None,
),
),
(
"xglm",
(
"XGLMTokenizer" if is_sentencepiece_available() else None,
"XGLMTokenizerFast" if is_tokenizers_available() else None,
),
),
]
)
TOKENIZER_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TOKENIZER_MAPPING_NAMES)
CONFIG_TO_TYPE = {v: k for k, v in CONFIG_MAPPING_NAMES.items()}
def tokenizer_class_from_name(class_name: str):
if class_name == "PreTrainedTokenizerFast":
return PreTrainedTokenizerFast
for module_name, tokenizers in TOKENIZER_MAPPING_NAMES.items():
if class_name in tokenizers:
module_name = model_type_to_module_name(module_name)
module = importlib.import_module(f".{module_name}", "transformers.models")
return getattr(module, class_name)
for config, tokenizers in TOKENIZER_MAPPING._extra_content.items():
for tokenizer in tokenizers:
if getattr(tokenizer, "__name__", None) == class_name:
return tokenizer
return None
def get_tokenizer_config(
pretrained_model_name_or_path: Union[str, os.PathLike],
cache_dir: Optional[Union[str, os.PathLike]] = None,
force_download: bool = False,
resume_download: bool = False,
proxies: Optional[Dict[str, str]] = None,
use_auth_token: Optional[Union[bool, str]] = None,
revision: Optional[str] = None,
local_files_only: bool = False,
**kwargs,
):
"""
Loads the tokenizer configuration from a pretrained model tokenizer configuration.
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained model configuration hosted inside a model repo on
huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced
under a user or organization name, like `dbmdz/bert-base-german-cased`.
- a path to a *directory* containing a configuration file saved using the
[`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the standard
cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the configuration files and override the cached versions if they
exist.
resume_download (`bool`, *optional*, defaults to `False`):
Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
use_auth_token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `transformers-cli login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
local_files_only (`bool`, *optional*, defaults to `False`):
If `True`, will only try to load the tokenizer configuration from local files.
<Tip>
Passing `use_auth_token=True` is required when you want to use a private model.
</Tip>
Returns:
`Dict`: The configuration of the tokenizer.
Examples:
```python
# Download configuration from huggingface.co and cache.
tokenizer_config = get_tokenizer_config("bert-base-uncased")
# This model does not have a tokenizer config so the result will be an empty dict.
tokenizer_config = get_tokenizer_config("xlm-roberta-base")
# Save a pretrained tokenizer locally and you can reload its config
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
tokenizer.save_pretrained("tokenizer-test")
tokenizer_config = get_tokenizer_config("tokenizer-test")
```"""
resolved_config_file = get_file_from_repo(
pretrained_model_name_or_path,
TOKENIZER_CONFIG_FILE,
cache_dir=cache_dir,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
use_auth_token=use_auth_token,
revision=revision,
local_files_only=local_files_only,
)
if resolved_config_file is None:
logger.info("Could not locate the tokenizer configuration file, will try to use the model config instead.")
return {}
with open(resolved_config_file, encoding="utf-8") as reader:
return json.load(reader)
class AutoTokenizer:
r"""
This is a generic tokenizer class that will be instantiated as one of the tokenizer classes of the library when
created with the [`AutoTokenizer.from_pretrained`] class method.
This class cannot be instantiated directly using `__init__()` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoTokenizer is designed to be instantiated "
"using the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)` method."
)
@classmethod
@replace_list_option_in_docstrings(TOKENIZER_MAPPING_NAMES)
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
r"""
Instantiate one of the tokenizer classes of the library from a pretrained model vocabulary.
The tokenizer class to instantiate is selected based on the `model_type` property of the config object (either
passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by
falling back to using pattern matching on `pretrained_model_name_or_path`:
List options
Params:
pretrained_model_name_or_path (`str` or `os.PathLike`):
Can be either:
- A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
user or organization name, like `dbmdz/bert-base-german-cased`.
- A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved
using the [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
- A path or url to a single saved vocabulary file if and only if the tokenizer only requires a
single vocabulary file (like Bert or XLNet), e.g.: `./my_model_directory/vocab.txt`. (Not
applicable to all derived classes)
inputs (additional positional arguments, *optional*):
Will be passed along to the Tokenizer `__init__()` method.
config ([`PretrainedConfig`], *optional*)
The configuration object used to dertermine the tokenizer class to instantiate.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download the model weights and configuration files and override the
cached versions if they exist.
resume_download (`bool`, *optional*, defaults to `False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
subfolder (`str`, *optional*):
In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for
facebook/rag-token-base), specify it here.
use_fast (`bool`, *optional*, defaults to `True`):
Whether or not to try to load the fast version of the tokenizer.
tokenizer_type (`str`, *optional*):
Tokenizer type to be loaded.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
should only be set to `True` for repositories you trust and in which you have read the code, as it will
execute code present on the Hub on your local machine.
kwargs (additional keyword arguments, *optional*):
Will be passed to the Tokenizer `__init__()` method. Can be used to set special tokens like
`bos_token`, `eos_token`, `unk_token`, `sep_token`, `pad_token`, `cls_token`, `mask_token`,
`additional_special_tokens`. See parameters in the `__init__()` for more details.
Examples:
```python
>>> from transformers import AutoTokenizer
>>> # Download vocabulary from huggingface.co and cache.
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
>>> # Download vocabulary from huggingface.co (user-uploaded) and cache.
>>> tokenizer = AutoTokenizer.from_pretrained("dbmdz/bert-base-german-cased")
>>> # If vocabulary files are in a directory (e.g. tokenizer was saved using *save_pretrained('./test/saved_model/')*)
>>> tokenizer = AutoTokenizer.from_pretrained("./test/bert_saved_model/")
```"""
config = kwargs.pop("config", None)
kwargs["_from_auto"] = True
use_fast = kwargs.pop("use_fast", True)
tokenizer_type = kwargs.pop("tokenizer_type", None)
trust_remote_code = kwargs.pop("trust_remote_code", False)
# First, let's see whether the tokenizer_type is passed so that we can leverage it
if tokenizer_type is not None:
tokenizer_class = None
tokenizer_class_tuple = TOKENIZER_MAPPING_NAMES.get(tokenizer_type, None)
if tokenizer_class_tuple is None:
raise ValueError(
f"Passed `tokenizer_type` {tokenizer_type} does not exist. `tokenizer_type` should be one of "
f"{', '.join(c for c in TOKENIZER_MAPPING_NAMES.keys())}."
)
tokenizer_class_name, tokenizer_fast_class_name = tokenizer_class_tuple
if use_fast and tokenizer_fast_class_name is not None:
tokenizer_class = tokenizer_class_from_name(tokenizer_fast_class_name)
if tokenizer_class is None:
tokenizer_class = tokenizer_class_from_name(tokenizer_class_name)
if tokenizer_class is None:
raise ValueError(f"Tokenizer class {tokenizer_class_name} is not currently imported.")
return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
# Next, let's try to use the tokenizer_config file to get the tokenizer class.
tokenizer_config = get_tokenizer_config(pretrained_model_name_or_path, **kwargs)
config_tokenizer_class = tokenizer_config.get("tokenizer_class")
tokenizer_auto_map = None
if "auto_map" in tokenizer_config:
if isinstance(tokenizer_config["auto_map"], (tuple, list)):
# Legacy format for dynamic tokenizers
tokenizer_auto_map = tokenizer_config["auto_map"]
else:
tokenizer_auto_map = tokenizer_config["auto_map"].get("AutoTokenizer", None)
# If that did not work, let's try to use the config.
if config_tokenizer_class is None:
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(
pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
)
config_tokenizer_class = config.tokenizer_class
if hasattr(config, "auto_map") and "AutoTokenizer" in config.auto_map:
tokenizer_auto_map = config.auto_map["AutoTokenizer"]
# If we have the tokenizer class from the tokenizer config or the model config we're good!
if config_tokenizer_class is not None:
tokenizer_class = None
if tokenizer_auto_map is not None:
if not trust_remote_code:
raise ValueError(
f"Loading {pretrained_model_name_or_path} requires you to execute the tokenizer file in that repo "
"on your local machine. Make sure you have read the code there to avoid malicious use, then set "
"the option `trust_remote_code=True` to remove this error."
)
if kwargs.get("revision", None) is None:
logger.warning(
"Explicitly passing a `revision` is encouraged when loading a model with custom code to ensure "
"no malicious code has been contributed in a newer revision."
)
if use_fast and tokenizer_auto_map[1] is not None:
class_ref = tokenizer_auto_map[1]
else:
class_ref = tokenizer_auto_map[0]
module_file, class_name = class_ref.split(".")
tokenizer_class = get_class_from_dynamic_module(
pretrained_model_name_or_path, module_file + ".py", class_name, **kwargs
)
elif use_fast and not config_tokenizer_class.endswith("Fast"):
tokenizer_class_candidate = f"{config_tokenizer_class}Fast"
tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate)
if tokenizer_class is None:
tokenizer_class_candidate = config_tokenizer_class
tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate)
if tokenizer_class is None:
raise ValueError(
f"Tokenizer class {tokenizer_class_candidate} does not exist or is not currently imported."
)
return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
# Otherwise we have to be creative.
# if model is an encoder decoder, the encoder tokenizer class is used by default
if isinstance(config, EncoderDecoderConfig):
if type(config.decoder) is not type(config.encoder): # noqa: E721
logger.warning(
f"The encoder model config class: {config.encoder.__class__} is different from the decoder model "
f"config class: {config.decoder.__class__}. It is not recommended to use the "
"`AutoTokenizer.from_pretrained()` method in this case. Please use the encoder and decoder "
"specific tokenizer classes."
)
config = config.encoder
model_type = config_class_to_model_type(type(config).__name__)
if model_type is not None:
tokenizer_class_py, tokenizer_class_fast = TOKENIZER_MAPPING[type(config)]
if tokenizer_class_fast and (use_fast or tokenizer_class_py is None):
return tokenizer_class_fast.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
else:
if tokenizer_class_py is not None:
return tokenizer_class_py.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
else:
raise ValueError(
"This tokenizer cannot be instantiated. Please make sure you have `sentencepiece` installed "
"in order to use this tokenizer."
)
raise ValueError(
f"Unrecognized configuration class {config.__class__} to build an AutoTokenizer.\n"
f"Model type should be one of {', '.join(c.__name__ for c in TOKENIZER_MAPPING.keys())}."
)
def register(config_class, slow_tokenizer_class=None, fast_tokenizer_class=None):
"""
Register a new tokenizer in this mapping.
Args:
config_class ([`PretrainedConfig`]):
The configuration corresponding to the model to register.
slow_tokenizer_class ([`PretrainedTokenizer`], *optional*):
The slow tokenizer to register.
slow_tokenizer_class ([`PretrainedTokenizerFast`], *optional*):
The fast tokenizer to register.
"""
if slow_tokenizer_class is None and fast_tokenizer_class is None:
raise ValueError("You need to pass either a `slow_tokenizer_class` or a `fast_tokenizer_class")
if slow_tokenizer_class is not None and issubclass(slow_tokenizer_class, PreTrainedTokenizerFast):
raise ValueError("You passed a fast tokenizer in the `slow_tokenizer_class`.")
if fast_tokenizer_class is not None and issubclass(fast_tokenizer_class, PreTrainedTokenizer):
raise ValueError("You passed a slow tokenizer in the `fast_tokenizer_class`.")
if (
slow_tokenizer_class is not None
and fast_tokenizer_class is not None
and issubclass(fast_tokenizer_class, PreTrainedTokenizerFast)
and fast_tokenizer_class.slow_tokenizer_class != slow_tokenizer_class
):
raise ValueError(
"The fast tokenizer class you are passing has a `slow_tokenizer_class` attribute that is not "
"consistent with the slow tokenizer class you passed (fast tokenizer has "
f"{fast_tokenizer_class.slow_tokenizer_class} and you passed {slow_tokenizer_class}. Fix one of those "
"so they match!"
)
# Avoid resetting a set slow/fast tokenizer if we are passing just the other ones.
if config_class in TOKENIZER_MAPPING._extra_content:
existing_slow, existing_fast = TOKENIZER_MAPPING[config_class]
if slow_tokenizer_class is None:
slow_tokenizer_class = existing_slow
if fast_tokenizer_class is None:
fast_tokenizer_class = existing_fast
TOKENIZER_MAPPING.register(config_class, (slow_tokenizer_class, fast_tokenizer_class))
| 50.642623
| 126
| 0.62207
|
3356cf54493b802d6b5b3e2d0b35b1bcef283860
| 756
|
py
|
Python
|
gspan_mining/benchmarkPlots/plot_chemical_graphs.py
|
NaazS03/gSpan
|
45a500c3ff50385ef227956166a765e372574141
|
[
"MIT"
] | null | null | null |
gspan_mining/benchmarkPlots/plot_chemical_graphs.py
|
NaazS03/gSpan
|
45a500c3ff50385ef227956166a765e372574141
|
[
"MIT"
] | null | null | null |
gspan_mining/benchmarkPlots/plot_chemical_graphs.py
|
NaazS03/gSpan
|
45a500c3ff50385ef227956166a765e372574141
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
lines = []
line_styles = ['-', '--']
line_colors = ['red', 'blue']
x = [3,4,5,6,7,8,9,10]
y_num_total_graphs = [18121,5935,3608,2121,1770,1224,977,844]
y_num_closed_graphs = [2704,1642,1177,813,689,552,481,401]
line, = plt.plot(x, y_num_total_graphs, line_styles[0], color=line_colors[0])
lines.append(line)
line, = plt.plot(x, y_num_closed_graphs, line_styles[1], color=line_colors[1])
lines.append(line)
legend1 = plt.legend(lines, ['Total graphs produced', 'Closed Graphs Produced'], loc='upper right')
plt.gca().add_artist(legend1)
plt.xlabel("Percent of Database As Minimum Support")
plt.ylabel("Number of Graphs Produced")
plt.title("Graphs Created vs Minimum Support of Chemical Benchmark Dataset")
plt.show()
| 31.5
| 99
| 0.732804
|
d5cc187383cdcf7033bd73f05413a0c6c2a7c1e7
| 25,196
|
py
|
Python
|
src/cfnlint/config.py
|
pmahony893/cfn-lint
|
e85ea913e2012e3ee2d7465a57eeda690be8c437
|
[
"MIT-0"
] | null | null | null |
src/cfnlint/config.py
|
pmahony893/cfn-lint
|
e85ea913e2012e3ee2d7465a57eeda690be8c437
|
[
"MIT-0"
] | null | null | null |
src/cfnlint/config.py
|
pmahony893/cfn-lint
|
e85ea913e2012e3ee2d7465a57eeda690be8c437
|
[
"MIT-0"
] | null | null | null |
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
import sys
import argparse
import logging
import glob
import json
import os
import copy
from pathlib import Path
import six
import jsonschema
import cfnlint.decode.cfn_yaml
from cfnlint.version import __version__
from cfnlint.helpers import REGIONS
# pylint: disable=too-many-public-methods
LOGGER = logging.getLogger('cfnlint')
def configure_logging(debug_logging, info_logging):
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
if debug_logging:
LOGGER.setLevel(logging.DEBUG)
elif info_logging:
LOGGER.setLevel(logging.INFO)
else:
LOGGER.setLevel(logging.NOTSET)
log_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(log_formatter)
# make sure all other log handlers are removed before adding it back
for handler in LOGGER.handlers:
LOGGER.removeHandler(handler)
LOGGER.addHandler(ch)
class ConfigFileArgs(object):
"""
Config File arguments.
Parses .cfnlintrc OR .cfnlintrc.yaml OR .cfnlintrc.yml in the Home and Project folder.
"""
file_args = {}
__user_config_file = None
__project_config_file = None
__custom_config_file = None
def __init__(self, schema=None, config_file=None):
# self.file_args = self.get_config_file_defaults()
self.file_args = {}
self.default_schema_file = Path(__file__).parent.joinpath(
'data/CfnLintCli/config/schema.json')
with self.default_schema_file.open() as f:
self.default_schema = json.load(f)
self.schema = self.default_schema if not schema else schema
if config_file:
self.__custom_config_file = config_file
else:
LOGGER.debug('Looking for CFLINTRC before attempting to load')
self.__user_config_file, self.__project_config_file = self._find_config()
self.load()
def _find_config(self):
"""Looks up for user and project level config
Returns
-------
Tuple
(Path, Path)
Tuple with both configs and whether they were found
Example
-------
> user_config, project_config = self._find_config()
"""
config_file_name = '.cfnlintrc'
if six.PY34:
self.__user_config_file = Path(os.path.expanduser('~')).joinpath(config_file_name)
else:
self.__user_config_file = Path.home().joinpath(config_file_name)
self.__project_config_file = Path.cwd().joinpath(config_file_name)
if self._has_file(config_file_name + '.yaml'):
self.__project_config_file = Path.cwd().joinpath(config_file_name + '.yaml')
elif self._has_file(config_file_name + '.yml'):
self.__project_config_file = Path.cwd().joinpath(config_file_name + '.yml')
user_config_path = ''
project_config_path = ''
if self._has_file(self.__user_config_file):
LOGGER.debug('Found User CFNLINTRC')
user_config_path = self.__user_config_file
if self._has_file(self.__project_config_file):
LOGGER.debug('Found Project level CFNLINTRC')
project_config_path = self.__project_config_file
return user_config_path, project_config_path
def _has_file(self, filename):
"""Confirm whether file exists
Parameters
----------
filename : str
Path to a file
Returns
-------
Boolean
"""
return Path(filename).is_file()
def load(self):
"""Load configuration file and expose as a dictionary
Returns
-------
Dict
CFLINTRC configuration
"""
if self.__custom_config_file:
custom_config = self._read_config(self.__custom_config_file)
LOGGER.debug('Validating Custom CFNLINTRC')
self.validate_config(custom_config, self.schema)
LOGGER.debug('Custom configuration loaded as')
LOGGER.debug('%s', custom_config)
self.file_args = custom_config
else:
user_config = self._read_config(self.__user_config_file)
LOGGER.debug('Validating User CFNLINTRC')
self.validate_config(user_config, self.schema)
project_config = self._read_config(self.__project_config_file)
LOGGER.debug('Validating Project CFNLINTRC')
self.validate_config(project_config, self.schema)
LOGGER.debug('User configuration loaded as')
LOGGER.debug('%s', user_config)
LOGGER.debug('Project configuration loaded as')
LOGGER.debug('%s', project_config)
LOGGER.debug('Merging configurations...')
self.file_args = self.merge_config(user_config, project_config)
def validate_config(self, config, schema):
"""Validate configuration against schema
Parameters
----------
config : dict
CFNLINTRC configuration
schema : dict
JSONSchema to validate against
Raises
-------
jsonschema.exceptions.ValidationError
Returned when cfnlintrc doesn't match schema provided
"""
LOGGER.debug('Validating CFNLINTRC config with given JSONSchema')
LOGGER.debug('Schema used: %s', schema)
LOGGER.debug('Config used: %s', config)
jsonschema.validate(config, schema)
LOGGER.debug('CFNLINTRC looks valid!')
def merge_config(self, user_config, project_config):
"""Merge project and user configuration into a single dictionary
Creates a new configuration with both configuration merged
it favours project level over user configuration if keys are duplicated
NOTE
----
It takes any number of nested dicts
It overrides lists found in user_config with project_config
Parameters
----------
user_config : Dict
User configuration (~/.cfnlintrc) found at user's home directory
project_config : Dict
Project configuration (.cfnlintrc) found at current directory
Returns
-------
Dict
Merged configuration
"""
# Recursively override User config with Project config
for key in user_config:
if key in project_config:
# If both keys are the same, let's check whether they have nested keys
if isinstance(user_config[key], dict) and isinstance(project_config[key], dict):
self.merge_config(user_config[key], project_config[key])
else:
user_config[key] = project_config[key]
LOGGER.debug(
'Overriding User\'s key %s with Project\'s specific value %s.', key, project_config[key])
# Project may have unique config we need to copy over too
# so that we can have user+project config available as one
for key in project_config:
if key not in user_config:
user_config[key] = project_config[key]
return user_config
def _read_config(self, config):
"""Parse given YAML configuration
Returns
-------
Dict
Parsed YAML configuration as dictionary
"""
config = Path(config)
config_template = None
if self._has_file(config):
LOGGER.debug('Parsing CFNLINTRC')
config_template = cfnlint.decode.cfn_yaml.load(str(config))
if not config_template:
config_template = {}
return config_template
def comma_separated_arg(string):
""" Split a comma separated string """
return string.split(',')
def _ensure_value(namespace, name, value):
if getattr(namespace, name, None) is None:
setattr(namespace, name, value)
return getattr(namespace, name)
class RuleConfigurationAction(argparse.Action):
""" Override the default Action """
def __init__(self, option_strings, dest, nargs=None, const=None, default=None,
type=None, choices=None, required=False, help=None, metavar=None): # pylint: disable=W0622
super(RuleConfigurationAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def _parse_rule_configuration(self, string):
""" Parse the config rule structure """
configs = comma_separated_arg(string)
results = {}
for config in configs:
rule_id = config.split(':')[0]
config_name = config.split(':')[1].split('=')[0]
config_value = config.split(':')[1].split('=')[1]
if rule_id not in results:
results[rule_id] = {}
results[rule_id][config_name] = config_value
return results
def __call__(self, parser, namespace, values, option_string=None):
items = copy.copy(_ensure_value(namespace, self.dest, {}))
try:
for value in values:
new_value = self._parse_rule_configuration(value)
for v_k, v_vs in new_value.items():
if v_k in items:
for s_k, s_v in v_vs.items():
items[v_k][s_k] = s_v
else:
items[v_k] = v_vs
setattr(namespace, self.dest, items)
except Exception: # pylint: disable=W0703
parser.print_help()
parser.exit()
class CliArgs(object):
""" Base Args class"""
cli_args = {}
def __init__(self, cli_args):
self.parser = self.create_parser()
self.cli_args, _ = self.parser.parse_known_args(cli_args)
def create_parser(self):
"""Do first round of parsing parameters to set options"""
class ArgumentParser(argparse.ArgumentParser):
""" Override Argument Parser so we can control the exit code"""
def error(self, message):
self.print_help(sys.stderr)
self.exit(32, '%s: error: %s\n' % (self.prog, message))
class ExtendAction(argparse.Action):
"""Support argument types that are lists and can be specified multiple times."""
def __call__(self, parser, namespace, values, option_string=None):
items = getattr(namespace, self.dest)
items = [] if items is None else items
for value in values:
if isinstance(value, list):
items.extend(value)
else:
items.append(value)
setattr(namespace, self.dest, items)
usage = (
'\nBasic: cfn-lint test.yaml\n'
'Ignore a rule: cfn-lint -i E3012 -- test.yaml\n'
'Configure a rule: cfn-lint -x E3012:strict=false -t test.yaml\n'
'Lint all yaml files in a folder: cfn-lint dir/**/*.yaml'
)
parser = ArgumentParser(
description='CloudFormation Linter',
usage=usage)
parser.register('action', 'extend', ExtendAction)
standard = parser.add_argument_group('Standard')
advanced = parser.add_argument_group('Advanced / Debugging')
# Allow the template to be passes as an optional or a positional argument
standard.add_argument(
'templates', metavar='TEMPLATE', nargs='*', help='The CloudFormation template to be linted')
standard.add_argument(
'-t', '--template', metavar='TEMPLATE', dest='template_alt',
help='The CloudFormation template to be linted', nargs='+', default=[], action='extend')
standard.add_argument(
'-b', '--ignore-bad-template', help='Ignore failures with Bad template',
action='store_true'
)
standard.add_argument(
'--ignore-templates', dest='ignore_templates',
help='Ignore templates', nargs='+', default=[], action='extend'
)
advanced.add_argument(
'-D', '--debug', help='Enable debug logging', action='store_true'
)
advanced.add_argument(
'-I', '--info', help='Enable information logging', action='store_true'
)
standard.add_argument(
'-f', '--format', help='Output Format', choices=['quiet', 'parseable', 'json', 'junit', 'pretty', 'sarif']
)
standard.add_argument(
'-l', '--list-rules', dest='listrules', default=False,
action='store_true', help='list all the rules'
)
standard.add_argument(
'-r', '--regions', dest='regions', nargs='+', default=[],
type=comma_separated_arg, action='extend',
help='list the regions to validate against.'
)
advanced.add_argument(
'-a', '--append-rules', dest='append_rules', nargs='+', default=[],
type=comma_separated_arg, action='extend',
help='specify one or more rules directories using '
'one or more --append-rules arguments. '
)
standard.add_argument(
'-i', '--ignore-checks', dest='ignore_checks', nargs='+', default=[],
type=comma_separated_arg, action='extend',
help='only check rules whose id do not match these values'
)
standard.add_argument(
'-c', '--include-checks', dest='include_checks', nargs='+', default=[],
type=comma_separated_arg, action='extend',
help='include rules whose id match these values'
)
standard.add_argument(
'-m', '--mandatory-checks', dest='mandatory_checks', nargs='+', default=[],
type=comma_separated_arg, action='extend',
help='always check rules whose id match these values, regardless of template exclusions'
)
standard.add_argument(
'-e', '--include-experimental', help='Include experimental rules', action='store_true'
)
standard.add_argument(
'-x', '--configure-rule', dest='configure_rules', nargs='+', default={},
action=RuleConfigurationAction,
help='Provide configuration for a rule. Format RuleId:key=value. Example: E3012:strict=false'
)
standard.add_argument('--config-file', dest='config_file',
help='Specify the cfnlintrc file to use')
standard.add_argument(
'-z', '--custom-rules', dest='custom_rules',
help='Allows specification of a custom rule file.'
)
advanced.add_argument(
'-o', '--override-spec', dest='override_spec',
help='A CloudFormation Spec override file that allows customization'
)
advanced.add_argument(
'-g', '--build-graph', help='Creates a file in the same directory as the template that models the template\'s resources in DOT format', action='store_true'
)
advanced.add_argument(
'-s', '--registry-schemas', help='one or more directories of CloudFormation Registry Schemas', action='extend', type=comma_separated_arg, nargs='+'
)
standard.add_argument(
'-v', '--version', help='Version of cfn-lint', action='version',
version='%(prog)s {version}'.format(version=__version__)
)
advanced.add_argument(
'-u', '--update-specs', help='Update the CloudFormation Specs',
action='store_true'
)
advanced.add_argument(
'--update-documentation', help=argparse.SUPPRESS,
action='store_true'
)
advanced.add_argument(
'--update-iam-policies', help=argparse.SUPPRESS,
action='store_true'
)
standard.add_argument(
'--output-file', type=str, default=None,
help='Writes the output to the specified file, ideal for producing reports'
)
standard.add_argument(
'--merge-configs', default=False, action='store_true',
help='Merges lists between configuration layers'
)
return parser
class TemplateArgs(object):
""" Per Template Args """
def __init__(self, template_args):
self.set_template_args(template_args)
def get_template_args(self):
return self._template_args
def set_template_args(self, template):
defaults = {}
if isinstance(template, dict):
configs = template.get('Metadata', {}).get('cfn-lint', {}).get('config', {})
if isinstance(configs, dict):
for config_name, config_value in configs.items():
if config_name == 'ignore_checks':
if isinstance(config_value, list):
defaults['ignore_checks'] = config_value
if config_name == 'regions':
if isinstance(config_value, list):
defaults['regions'] = config_value
if config_name == 'append_rules':
if isinstance(config_value, list):
defaults['append_rules'] = config_value
if config_name == 'override_spec':
if isinstance(config_value, (six.string_types)):
defaults['override_spec'] = config_value
if config_name == 'custom_rules':
if isinstance(config_value, (six.string_types)):
defaults['custom_rules'] = config_value
if config_name == 'ignore_bad_template':
if isinstance(config_value, bool):
defaults['ignore_bad_template'] = config_value
if config_name == 'include_checks':
if isinstance(config_value, list):
defaults['include_checks'] = config_value
if config_name == 'configure_rules':
if isinstance(config_value, dict):
defaults['configure_rules'] = config_value
self._template_args = defaults
template_args = property(get_template_args, set_template_args)
# pylint: disable=too-many-public-methods
class ConfigMixIn(TemplateArgs, CliArgs, ConfigFileArgs, object):
""" Mixin for the Configs """
def __init__(self, cli_args):
CliArgs.__init__(self, cli_args)
# configure debug as soon as we can
configure_logging(self.cli_args.debug, self.cli_args.info)
TemplateArgs.__init__(self, {})
ConfigFileArgs.__init__(
self, config_file=self._get_argument_value('config_file', False, False))
def _get_argument_value(self, arg_name, is_template, is_config_file):
cli_value = getattr(self.cli_args, arg_name)
template_value = self.template_args.get(arg_name)
file_value = self.file_args.get(arg_name)
# merge list configurations
# make sure we don't do an infinite loop so skip this check for merge_configs
if arg_name != 'merge_configs':
if self.merge_configs:
# the CLI will always have an empty list when the item is a list
# we will use that to evaluate if we need to merge the lists
if isinstance(cli_value, list):
result = cli_value
if isinstance(template_value, list):
result.extend(template_value)
if isinstance(file_value, list):
result.extend(file_value)
return result
# return individual items
if cli_value:
return cli_value
if template_value and is_template:
return template_value
if file_value and is_config_file:
return file_value
return cli_value
@property
def ignore_checks(self):
return self._get_argument_value('ignore_checks', True, True)
@property
def include_checks(self):
results = self._get_argument_value('include_checks', True, True)
return ['W', 'E'] + results
@property
def mandatory_checks(self):
return self._get_argument_value('mandatory_checks', False, True)
@property
def include_experimental(self):
return self._get_argument_value('include_experimental', True, True)
@property
def regions(self):
results = self._get_argument_value('regions', True, True)
if not results:
return ['us-east-1']
if 'ALL_REGIONS' in results:
return REGIONS
return results
@property
def ignore_bad_template(self):
return self._get_argument_value('ignore_bad_template', True, True)
@property
def debug(self):
return self._get_argument_value('debug', False, False)
@property
def format(self):
return self._get_argument_value('format', False, True)
@property
def templates(self):
templates_args = self._get_argument_value('templates', False, True)
template_alt_args = self._get_argument_value('template_alt', False, False)
if template_alt_args:
filenames = template_alt_args
elif templates_args:
filenames = templates_args
else:
return None
# if only one is specified convert it to array
if isinstance(filenames, six.string_types):
filenames = [filenames]
# handle different shells and Config files
# some shells don't expand * and configparser won't expand wildcards
all_filenames = []
ignore_templates = self._ignore_templates()
for filename in filenames:
add_filenames = glob.glob(filename, recursive=True)
# only way to know of the glob failed is to test it
# then add the filename as requested
if not add_filenames:
if filename not in ignore_templates:
all_filenames.append(filename)
else:
for add_filename in add_filenames:
if add_filename not in ignore_templates:
all_filenames.append(add_filename)
return sorted(all_filenames)
def _ignore_templates(self):
ignore_template_args = self._get_argument_value('ignore_templates', False, True)
if ignore_template_args:
filenames = ignore_template_args
else:
return []
# if only one is specified convert it to array
if isinstance(filenames, six.string_types):
filenames = [filenames]
# handle different shells and Config files
# some shells don't expand * and configparser won't expand wildcards
all_filenames = []
for filename in filenames:
add_filenames = glob.glob(filename, recursive=True)
# only way to know of the glob failed is to test it
# then add the filename as requested
if not add_filenames:
all_filenames.append(filename)
else:
all_filenames.extend(add_filenames)
return all_filenames
@property
def append_rules(self):
return self._get_argument_value('append_rules', False, True)
@property
def override_spec(self):
return self._get_argument_value('override_spec', False, True)
@property
def custom_rules(self):
""" custom_rules_spec """
return self._get_argument_value('custom_rules', False, True)
@property
def update_specs(self):
return self._get_argument_value('update_specs', False, False)
@property
def update_documentation(self):
return self._get_argument_value('update_documentation', False, False)
@property
def update_iam_policies(self):
return self._get_argument_value('update_iam_policies', False, False)
@property
def listrules(self):
return self._get_argument_value('listrules', False, False)
@property
def configure_rules(self):
return self._get_argument_value('configure_rules', True, True)
@property
def config_file(self):
return self._get_argument_value('config_file', False, False)
@property
def build_graph(self):
return self._get_argument_value('build_graph', False, False)
@property
def output_file(self):
return self._get_argument_value('output_file', False, True)
@property
def registry_schemas(self):
return self._get_argument_value('registry_schemas', False, True)
@property
def merge_configs(self):
return self._get_argument_value('merge_configs', True, True)
| 37.438336
| 167
| 0.605334
|
84051f01d5b4a309ff74500544d1263fd0c9d60e
| 6,407
|
py
|
Python
|
nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/lb/lbwlm_lbvserver_binding.py
|
culbertm/NSttyPython
|
ff9f6aedae3fb8495342cd0fc4247c819cf47397
|
[
"Apache-2.0"
] | null | null | null |
nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/lb/lbwlm_lbvserver_binding.py
|
culbertm/NSttyPython
|
ff9f6aedae3fb8495342cd0fc4247c819cf47397
|
[
"Apache-2.0"
] | null | null | null |
nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/lb/lbwlm_lbvserver_binding.py
|
culbertm/NSttyPython
|
ff9f6aedae3fb8495342cd0fc4247c819cf47397
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2008-2016 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class lbwlm_lbvserver_binding(base_resource) :
""" Binding class showing the lbvserver that can be bound to lbwlm.
"""
def __init__(self) :
self._vservername = None
self._wlmname = None
self.___count = None
@property
def wlmname(self) :
r"""The name of the Work Load Manager.<br/>Minimum length = 1.
"""
try :
return self._wlmname
except Exception as e:
raise e
@wlmname.setter
def wlmname(self, wlmname) :
r"""The name of the Work Load Manager.<br/>Minimum length = 1
"""
try :
self._wlmname = wlmname
except Exception as e:
raise e
@property
def vservername(self) :
r"""Name of the virtual server which is to be bound to the WLM.
"""
try :
return self._vservername
except Exception as e:
raise e
@vservername.setter
def vservername(self, vservername) :
r"""Name of the virtual server which is to be bound to the WLM.
"""
try :
self._vservername = vservername
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(lbwlm_lbvserver_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lbwlm_lbvserver_binding
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.wlmname is not None :
return str(self.wlmname)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = lbwlm_lbvserver_binding()
updateresource.wlmname = resource.wlmname
updateresource.vservername = resource.vservername
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [lbwlm_lbvserver_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].wlmname = resource[i].wlmname
updateresources[i].vservername = resource[i].vservername
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = lbwlm_lbvserver_binding()
deleteresource.wlmname = resource.wlmname
deleteresource.vservername = resource.vservername
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [lbwlm_lbvserver_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].wlmname = resource[i].wlmname
deleteresources[i].vservername = resource[i].vservername
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, wlmname="", option_="") :
r""" Use this API to fetch lbwlm_lbvserver_binding resources.
"""
try :
if not wlmname :
obj = lbwlm_lbvserver_binding()
response = obj.get_resources(service, option_)
else :
obj = lbwlm_lbvserver_binding()
obj.wlmname = wlmname
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, wlmname, filter_) :
r""" Use this API to fetch filtered set of lbwlm_lbvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = lbwlm_lbvserver_binding()
obj.wlmname = wlmname
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, wlmname) :
r""" Use this API to count lbwlm_lbvserver_binding resources configued on NetScaler.
"""
try :
obj = lbwlm_lbvserver_binding()
obj.wlmname = wlmname
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, wlmname, filter_) :
r""" Use this API to count the filtered set of lbwlm_lbvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = lbwlm_lbvserver_binding()
obj.wlmname = wlmname
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class lbwlm_lbvserver_binding_response(base_response) :
def __init__(self, length=1) :
self.lbwlm_lbvserver_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lbwlm_lbvserver_binding = [lbwlm_lbvserver_binding() for _ in range(length)]
| 30.951691
| 125
| 0.722179
|
42b542906887a1af25505c11574b495b50d89648
| 814
|
py
|
Python
|
Collect/ETmonitor/Ei_monthly.py
|
ali1100/wa
|
700e5014533c45f38a245c3abdeacc537cb307bc
|
[
"Apache-2.0"
] | 16
|
2017-04-27T21:22:37.000Z
|
2020-10-21T12:57:03.000Z
|
Collect/ETmonitor/Ei_monthly.py
|
ali1100/wa
|
700e5014533c45f38a245c3abdeacc537cb307bc
|
[
"Apache-2.0"
] | 1
|
2017-06-17T08:07:53.000Z
|
2017-08-22T12:28:37.000Z
|
Collect/ETmonitor/Ei_monthly.py
|
wateraccounting/wa
|
29ed8e7eac732135678a5d171cd5e53a54c95313
|
[
"Apache-2.0"
] | 19
|
2016-10-24T13:24:34.000Z
|
2020-02-03T17:42:22.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 03 09:35:22 2018
@author: tih
"""
import os
import sys
from DataAccess import DownloadData
def main(Dir, Startdate='', Enddate='', latlim=[-60, 70], lonlim=[-180, 180], Waitbar = 1):
"""
This function downloads monthly ETmonitor data
Keyword arguments:
Dir -- 'C:/file/to/path/'
Startdate -- 'yyyy-mm-dd'
Enddate -- 'yyyy-mm-dd'
latlim -- [ymin, ymax] (values must be between -60 and 70)
lonlim -- [xmin, xmax] (values must be between -180 and 180)
"""
print '\nDownload monthly ETmonitor Interception data for the period %s till %s' %(Startdate, Enddate)
Type = "ei"
# Download data
DownloadData(Dir, Startdate, Enddate, latlim, lonlim, Type, Waitbar)
if __name__ == '__main__':
main(sys.argv)
| 25.4375
| 106
| 0.635135
|
8dfd466d3619045ce9c9fef81ef4d6bef9ce7675
| 1,466
|
py
|
Python
|
BookClub/tests/views/recommender_views/test_recommender_abstract_view.py
|
amir-rahim/BookClubSocialNetwork
|
b69a07cd33592f700214252a64c7c1c53845625d
|
[
"MIT"
] | 4
|
2022-02-04T02:11:48.000Z
|
2022-03-12T21:38:01.000Z
|
BookClub/tests/views/recommender_views/test_recommender_abstract_view.py
|
amir-rahim/BookClubSocialNetwork
|
b69a07cd33592f700214252a64c7c1c53845625d
|
[
"MIT"
] | 51
|
2022-02-01T18:56:23.000Z
|
2022-03-31T15:35:37.000Z
|
BookClub/tests/views/recommender_views/test_recommender_abstract_view.py
|
amir-rahim/BookClubSocialNetwork
|
b69a07cd33592f700214252a64c7c1c53845625d
|
[
"MIT"
] | null | null | null |
"""Unit testing for the Recommender Base view"""
from django.urls import reverse
from django.test import TestCase, tag
from BookClub.models import User, Club
@tag('views', 'recommendations', 'base')
class BaseUserRecommenderViewTestCase(TestCase):
"""Testing for the Recommender Base view"""
fixtures = [
"BookClub/tests/fixtures/default_users.json",
"BookClub/tests/fixtures/default_clubs.json",
]
def setUp(self):
self.user1 = User.objects.get(pk=1)
self.club = Club.objects.get(pk=1)
self.user_url = reverse('user_recommendations')
self.club_url = reverse('club_recommendations', kwargs={'club_url_name': self.club.club_url_name})
def test_user_url(self):
self.assertEqual(self.user_url, '/library/recommendations/')
def test_club_url(self):
self.assertEqual(self.club_url, '/club/' + self.club.club_url_name + '/recommendations/')
def test_correct_template_user_view(self):
self.client.login(username=self.user1.username, password="Password123")
response = self.client.get(self.user_url)
self.assertTemplateUsed(response, 'recommendations/recommendation_base_user.html')
def test_correct_template_club_view(self):
self.client.login(username=self.user1.username, password="Password123")
response = self.client.get(self.club_url)
self.assertTemplateUsed(response, 'recommendations/recommendation_base_club.html')
| 40.722222
| 106
| 0.720327
|
1e79c21463c173377375ac3775bdd5836b86037a
| 3,520
|
py
|
Python
|
app_dir/app_notes_and_tips.py
|
virginia4/app_run_local
|
5bcb8280dda777f972b09f49b420040db6bfeb77
|
[
"MIT"
] | null | null | null |
app_dir/app_notes_and_tips.py
|
virginia4/app_run_local
|
5bcb8280dda777f972b09f49b420040db6bfeb77
|
[
"MIT"
] | null | null | null |
app_dir/app_notes_and_tips.py
|
virginia4/app_run_local
|
5bcb8280dda777f972b09f49b420040db6bfeb77
|
[
"MIT"
] | null | null | null |
import dash_html_components as html
from . import app
about = """
In this page you can find useful tips of how to use the tool and get inform about updates,
issues adn changes. The app is heavily depending on your contributions, either that is
an new idea of how to make the tool more useful or reports of issues. Feel free to contact
me at enquiries@moleculardimensions.com for any issues regarding this tool.
"""
notes = """The tool is under development and unfortunately there some
MDL screens that are not yet available for optimisation. These screens are
the following:
"""
tips = """ Some tips when using the tool: """
about_html = [html.P(i) for i in about.split("\n\n")]
notes_html = [html.P(i) for i in notes.split("\n\n")]
tips_html = [html.P(i) for i in notes.split("\n\n")]
layout = [
html.Div(
[
html.Div(html.H1(app.secondary_title), id="secondary_itle"),
html.H2("About"),
# html.Img(src="assets/images/logos_combine.png", className="ramp_logo",
# height= 200, width = 380, style={'textAlign': 'justify'}),
# html.Img(src="assets/images/eu_logo.png", className="eu_logo",
# height= 50, width = 80, style={'textAlign': 'justify'}),
# html.Img(src="assets/images/surrey_logo.png", className="sur_logo",
# height= 70, width = 150, style={'textAlign': 'justify'}),
html.Div(about_html, className="basic_info_container"),
html.H2("Tips"),
html.Div(tips_html + [
html.Ol([
html.Li(html.P('Be careful when typing the code name of the screen and the hitwell. Spaces or misuse of capital letters might effect on the performance.')),
html.Li(html.P('An error message will appear when something is not working right, but we might have missed a case. If nothing appears on your screen, it means that came across a bug. That is great, it means that we can now receive your input and improve the tool.')),
html.Li([html.P('We are looking forward on impoving the tool by receiving your feedback. For all enquires of how to use the tool, suggestions and reports of errors please contact:'),
html.A('enquiries@moleculardimensions.com', href=' ',target="_blank")])
])], className="notes"),
html.H2("Notes"),
html.Div(notes_html + [
html.Ol([
html.Li(html.P('CryoSol')),
html.Li(html.P('MD1-47')),
html.Li(html.P('MD1-48')),
# html.Li(html.P('MD1-68')),
# html.Li(html.P('MD1-91-92')),
html.Li(html.P('MD1-93')),
html.Li(html.P('MD1-100')),
# html.Li(html.P('MD1-116_117')),
html.Li(html.P('MD1-118')),
html.Li(html.P('MD1-123')),
])], className="notes")], id="main_container",
**{'data-iframe-height': ''}, style={ 'width': '60%',
'padding': '20px',
'margin': '20px',
'justify-content': 'center','align-items': 'center',
# 'width': '60%',
# 'margin': 'auto', 'padding': '10px'
}
)
]
| 51.014493
| 299
| 0.536364
|
eead97ceb93d8ee5cc84a0a9cbab3dc4a894a739
| 1,621
|
py
|
Python
|
modelvshuman/datasets/info_mappings.py
|
TizianThieringer/model-vs-human
|
17729b8167520f682d93d55c340c27de07bb2681
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | 158
|
2021-06-04T15:19:58.000Z
|
2022-03-30T00:31:28.000Z
|
modelvshuman/datasets/info_mappings.py
|
TizianThieringer/model-vs-human
|
17729b8167520f682d93d55c340c27de07bb2681
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | 7
|
2021-07-20T03:57:34.000Z
|
2022-02-01T11:00:47.000Z
|
modelvshuman/datasets/info_mappings.py
|
TizianThieringer/model-vs-human
|
17729b8167520f682d93d55c340c27de07bb2681
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | 14
|
2021-06-16T13:33:11.000Z
|
2022-03-29T15:04:09.000Z
|
from abc import ABC
class ImagePathToInformationMapping(ABC):
def __init__(self):
pass
def __call__(self, full_path):
pass
class ImageNetInfoMapping(ImagePathToInformationMapping):
"""
For ImageNet-like directory structures without sessions/conditions:
.../{category}/{img_name}
"""
def __call__(self, full_path):
session_name = "session-1"
img_name = full_path.split("/")[-1]
condition = "NaN"
category = full_path.split("/")[-2]
return session_name, img_name, condition, category
class ImageNetCInfoMapping(ImagePathToInformationMapping):
"""
For the ImageNet-C Dataset with path structure:
...{corruption function}/{corruption severity}/{category}/{img_name}
"""
def __call__(self, full_path):
session_name = "session-1"
parts = full_path.split("/")
img_name = parts[-1]
category = parts[-2]
severity = parts[-3]
corruption = parts[-4]
condition = "{}-{}".format(corruption, severity)
return session_name, img_name, condition, category
class InfoMappingWithSessions(ImagePathToInformationMapping):
"""
Directory/filename structure:
.../{session_name}/{something}_{something}_{something}_{condition}_{category}_{img_name}
"""
def __call__(self, full_path):
session_name = full_path.split("/")[-2]
img_name = full_path.split("/")[-1]
condition = img_name.split("_")[3]
category = img_name.split("_")[4]
return session_name, img_name, condition, category
| 28.438596
| 96
| 0.636027
|
e3b374f523813478fc7a31d204e877898d929145
| 1,421
|
py
|
Python
|
windows/capture_window_480.py
|
Anonymousey/bongbot
|
3498d379ef28206f3325691e340347baa14c2c97
|
[
"MIT"
] | null | null | null |
windows/capture_window_480.py
|
Anonymousey/bongbot
|
3498d379ef28206f3325691e340347baa14c2c97
|
[
"MIT"
] | null | null | null |
windows/capture_window_480.py
|
Anonymousey/bongbot
|
3498d379ef28206f3325691e340347baa14c2c97
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
capture_window_480.py
"""
import cygwinreg
import argparse
def set_registry(top, left, width, height):
'''set screen capture recorder registry settings
to desired pos and dimensions
'''
cap = cygwinreg.OpenKey(
cygwinreg.HKEY_CURRENT_USER,
"Software\\screen-capture-recorder", 0, cygwinreg.KEY_ALL_ACCESS
)
values = ['capture_height', 'capture_width', 'start_x', 'start_y',]
#for value in values:
# v, t = cygwinreg.QueryValueEx(cap, value)
# print "{v} is {value}".format(v=v, value=value)
set_value(cap, 'start_x', left)
set_value(cap, 'start_y', top)
set_value(cap, 'capture_width', width)
set_value(cap, 'capture_height', height)
#never capture the mouse pointer.
set_value(cap, 'capture_mouse_default_1', 0)
def set_value(key, subkey, value):
#first echo the current value
v, t = cygwinreg.QueryValueEx(key, subkey)
print "{subkey} initial value {v} and type {t}".format(subkey=subkey, v=v, t=str(t))
cygwinreg.SetValueEx(key, subkey, 0, t, value)
v, t = cygwinreg.QueryValueEx(key, subkey)
print "{subkey} final value {v}".format(subkey=subkey, v=v)
def main():
#parser = argparse.ArgumentParser(description='Exercise bing translation api.')
#parser.add_argument('text', help='Input text to translate.', type=str)
#args = parser.parse_args()
set_registry(100, 400, 848, 480)
if __name__ == "__main__":
main()
| 30.234043
| 86
| 0.699507
|
275bf4c77ca02fa01e2141366668da9930d4be91
| 5,859
|
py
|
Python
|
kCharge-firmware/temperature.py
|
koalacreations/kCharge-firmware
|
6abbf5c3ba9eb4dba6877860f0d518b193ffa9bb
|
[
"MIT"
] | null | null | null |
kCharge-firmware/temperature.py
|
koalacreations/kCharge-firmware
|
6abbf5c3ba9eb4dba6877860f0d518b193ffa9bb
|
[
"MIT"
] | null | null | null |
kCharge-firmware/temperature.py
|
koalacreations/kCharge-firmware
|
6abbf5c3ba9eb4dba6877860f0d518b193ffa9bb
|
[
"MIT"
] | null | null | null |
import time
import os
import json
import machine
import onewire
import ds18x20
import ubinascii
from leds import YELLOW, OFF, RED, GREEN
def convert_sensor_str(sensor):
return ubinascii.hexlify(sensor).decode("utf8")
def convert_sensor_byte(sensor):
return ubinascii.unhexlify(sensor)
class TemperatureSensors:
_CALIBRATION_FILE_NAME = "temperature_calibration.json"
TEMP_RESOLUTION = 9
extra_sensors = 2 # how many extra (non channel) temp sensors we have
sensor_calibration = {}
temperature_data = {}
def __init__(self, status_leds, force_calibrate=False):
self.status_leds = status_leds
# the data bus is on GPIO 27
bus_pin = machine.Pin(27)
# create the onewire object
self.data_bus = onewire.OneWire(bus_pin)
self.temp_bus = ds18x20.DS18X20(self.data_bus)
# scan for devices on the bus
self.sensors = self.data_bus.scan()
print("Found {} temp sensors on the data bus.".format(len(self.sensors)))
for sensor in self.sensors:
# set each sensor to 9 bit resolution for fast reading
self.temp_bus.write_scratch(sensor, b"\x00\x00\x1f")
# check if the calibration exists and load it if it does
if self._CALIBRATION_FILE_NAME in os.listdir() and force_calibrate == False:
with open(self._CALIBRATION_FILE_NAME) as json_file:
self.sensor_calibration = json.load(json_file)
print(
"Found {} temp sensors in the calibration file.".format(
len(self.sensor_calibration)
)
)
# if len(self.sensor_calibration) != len(self.sensors) - self.extra_sensors:
# raise RuntimeError(
# "Sensor calibration data does not match the amount found on the bus! This may mean a hardware failure."
# )
else:
print(
"No temperature sensor calibration data. Calculating temperature baseline."
)
baseline = {}
baseline_loops = 3 # loops to perform baseline temperature calculation
for sensor in self.sensors:
baseline[convert_sensor_str(sensor)] = 0
for loop in range(baseline_loops):
self.temp_bus.convert_temp()
time.sleep(1)
for sensor in self.sensors:
baseline[convert_sensor_str(sensor)] += self.temp_bus.read_temp(
sensor
)
for sensor in self.sensors:
baseline[convert_sensor_str(sensor)] /= baseline_loops
print("Temperature baseline calculated, starting calibration.")
for channel in range(len(self.sensors) - self.extra_sensors):
channel += 1
ignore = []
for sensor in self.sensor_calibration.values():
ignore.append(sensor)
calibrated = self.calibrate_channel(channel, baseline, ignore)
if calibrated:
self.sensor_calibration[channel] = calibrated
with open(self._CALIBRATION_FILE_NAME, "w") as outfile:
json.dump(self.sensor_calibration, outfile)
print("Temperature sensors calibrated!")
def calibrate_channel(self, channel, baseline, ignore=None):
"""[Calibrates a specific channel's temperature sensor.]
Args:
channel ([number]): [The channel to calibrate.]
baseline ([number]): [The baseline temperature.]
Returns:
[type]: [A string of the sensor ID.]
"""
baseline_rise = 0.5 # rise above baseline in degrees C required to calibrate
max_calibration_loops = (
20 # max times to look for the temp rise specified above per channel
)
if ignore:
print("Ignoring:")
print(ignore)
print("Please press finger to channel {} sensor.".format(channel))
for x in range(max_calibration_loops):
self.temp_bus.convert_temp()
self.status_leds.set_channel(channel, YELLOW)
time.sleep(0.5)
self.status_leds.set_channel(channel, OFF)
time.sleep(0.5)
for sensor in self.sensors:
# if we should ignore the sensor then continue the loop
if convert_sensor_str(sensor) in ignore:
continue
temperature = self.temp_bus.read_temp(sensor)
if temperature > baseline[convert_sensor_str(sensor)] + baseline_rise:
print(
"Channel {} complete! Mapped to {}.".format(
channel, convert_sensor_str(sensor)
)
)
self.status_leds.set_channel(channel, GREEN)
return convert_sensor_str(sensor)
print("FAILED to calibrated sensor for channel {}.".format(channel))
self.status_leds.set_channel(channel, RED)
return None
def get_temperature(self, channel):
"""[Returns the latest temperature read for the channel.]
Args:
channel ([number]): [Channel to get.]
Returns:
[number]: [Temperature in degrees celcius.]
"""
try:
sensor_id = convert_sensor_byte(self.sensor_calibration[channel])
return self.temp_bus.read_temp(sensor_id)
except Exception as e:
return None
def request_temperatures(self, blocking=False):
"""[Requests a new temperature converion/update from the sensors.]"""
self.temp_bus.convert_temp()
if blocking:
time.sleep(0.75)
| 35.295181
| 129
| 0.587984
|
ff0dd891e013296a27d8f9b39ee4743db9105dc8
| 16,798
|
py
|
Python
|
tsparser/ui.py
|
m4tx/techswarm-receiver
|
cf0a34c8bd2c98ce72ec3ab56231be556a55fb6b
|
[
"MIT"
] | null | null | null |
tsparser/ui.py
|
m4tx/techswarm-receiver
|
cf0a34c8bd2c98ce72ec3ab56231be556a55fb6b
|
[
"MIT"
] | null | null | null |
tsparser/ui.py
|
m4tx/techswarm-receiver
|
cf0a34c8bd2c98ce72ec3ab56231be556a55fb6b
|
[
"MIT"
] | null | null | null |
import _curses, curses
from bisect import bisect_left
import math
import os
from threading import Thread
from time import sleep
from tsparser.utils import Singleton, StatisticDataCollector
class UserInterface(metaclass=Singleton):
"""
User Interface is a singleton. Once ran, it renders UI until exiting the app.
"""
def __init__(self, refreshing_frequency=30):
self.__REFRESHING_FREQUENCY = refreshing_frequency
def run(self):
Thread(target=self.__interface_thread, daemon=True).start()
def __interface_thread(self):
try:
self.__init_curses()
while True:
sleep(1/self.__REFRESHING_FREQUENCY)
self.__update_filter()
self.__process_events()
self.__render_frame()
except Exception as err:
error_message = '{}: {}'.format(err.__class__.__name__, err)
StatisticDataCollector().get_logger().log('ui', error_message)
curses.endwin()
print(error_message)
def __init_curses(self):
self.__screen = curses.initscr()
curses.start_color()
curses.curs_set(0)
self.__screen.nodelay(True)
self.__screen.keypad(True)
self.__SCREEN_MINIMAL_SIZE = 24, 80 # lines, cols
self.__last_color = 1
self.__TIMESTAMP_COLOR = self.__init_color_pair(
curses.COLOR_RED, curses.COLOR_BLACK)
self.__MODULE_NAME_COLOR = self.__init_color_pair(
curses.COLOR_BLUE, curses.COLOR_BLACK)
self.__INFO_BAR_DESC_COLOR = self.__init_color_pair(
curses.COLOR_BLACK, curses.COLOR_CYAN)
self.__FILTER_WINDOW_BACKGROUND = self.__init_color_pair(
curses.COLOR_WHITE, curses.COLOR_BLUE)
self.__FILTER_WINDOW_SELECTION = self.__init_color_pair(
curses.COLOR_BLACK, curses.COLOR_CYAN)
self.__logs_auto_scrolling = True
self.__scroll_position = 0
self.__log_index_to_last_line_no = list()
self.__cached_processed_logs = list()
self.__filter_window_active = False
self.__filter = dict()
self.__filter_selected_index = int()
self.__filter_selected_module = str()
StatisticDataCollector().get_logger().log('ui', 'User interface initialized!')
def __init_color_pair(self, fb, bg):
curses.init_pair(self.__last_color, fb, bg)
self.__last_color += 1
return self.__last_color - 1
def __update_filter(self):
for module_name in StatisticDataCollector().get_logger().get_all_modules():
if module_name not in self.__filter:
self.__filter[module_name] = True
def __process_events(self):
while True:
key_code = self.__screen.getch()
if key_code == curses.ERR:
return
if key_code == curses.KEY_RESIZE:
self.__delete_cached_logs()
continue
if self.__filter_window_active:
self.__filter_window_process_event(key_code)
else:
self.__main_window_process_event(key_code)
def __delete_cached_logs(self):
self.__cached_processed_logs.clear()
self.__log_index_to_last_line_no.clear()
def __filter_window_process_event(self, key_code):
if key_code == 27: # escape
self.__filter_window_active = False
elif key_code == ord(' '):
if self.__filter:
self.__filter[self.__filter_selected_module] = not self.__filter[self.__filter_selected_module]
self.__auto_scroll_position = True
self.__delete_cached_logs()
elif key_code == curses.KEY_UP:
self.__filter_selected_index -= 1
if self.__filter_selected_index == -1:
self.__filter_selected_index = len(self.__filter) - 1
elif key_code == curses.KEY_DOWN:
self.__filter_selected_index += 1
if self.__filter_selected_index == len(self.__filter):
self.__filter_selected_index = 0
def __main_window_process_event(self, key_code):
if key_code == curses.KEY_F2:
self.__logs_auto_scrolling = not self.__logs_auto_scrolling
elif key_code == curses.KEY_F3:
self.__filter_window_active = True
self.__filter_selected_index = 0
elif key_code == curses.KEY_F4:
StatisticDataCollector().get_logger().clear_logs()
self.__delete_cached_logs()
elif key_code == curses.KEY_F9:
curses.endwin()
os.kill(os.getpid(), 15)
elif key_code == curses.KEY_UP:
self.__logs_auto_scrolling = False
self.__scroll_position -= 1 # renderer will increase value if it is too small
elif key_code == curses.KEY_DOWN:
if self.__cached_processed_logs:
if self.__scroll_position >= self.__log_index_to_last_line_no[-1]:
self.__logs_auto_scrolling = True
else:
self.__scroll_position += 1
def __render_frame(self):
lines, cols = self.__screen.getmaxyx()
min_lines, min_cols = self.__SCREEN_MINIMAL_SIZE
if lines < min_lines or cols < min_cols:
self.__screen.clear()
self.__screen.addstr('Terminal size should be at least {}x{}!\n'.format(min_cols, min_lines))
self.__screen.refresh()
return
statistics_window_width = 40
logs_windows = self.__screen.subwin(lines - 1, cols - statistics_window_width, 0, 0)
self.__render_logs_window(logs_windows)
statistics_window = self.__screen.subwin(lines - 1, statistics_window_width, 0, cols - statistics_window_width)
self.__render_statistics_window(statistics_window)
info_bar_window = self.__screen.subwin(1, cols, lines - 1, 0)
self.__render_info_bar(info_bar_window)
if self.__filter_window_active:
width, height = 60, 20
filter_window = self.__screen.subwin(height, width, (lines - height) // 2, (cols - width) // 2)
self.__render_filter_window(filter_window)
self.__screen.refresh()
def __render_logs_window(self, window):
window.clear()
self.__draw_entitled_box(window, 'Logs')
sub_win = self.__get_sub_window(window)
lines, cols = sub_win.getmaxyx()
selected_modules = [module_name for module_name in self.__filter if self.__filter[module_name]]
logs = StatisticDataCollector().get_logger().get_logs(selected_modules)
new_logs = logs[len(self.__cached_processed_logs):]
self.__cache_new_log_entries(new_logs, cols)
self.__render_visible_log_entries(sub_win)
def __cache_new_log_entries(self, new_entries, line_width):
for timestamp, module_name, message in new_entries:
timestamp_str = '{:02}:{:02}:{:02}.{:06}'.format(timestamp.hour, timestamp.minute,
timestamp.second, timestamp.microsecond)
module_name = module_name.replace('\n', '<nl> ')
self.__cached_processed_logs.append((timestamp_str, module_name, message))
whole_message = timestamp_str + ' ' + module_name + ' ' + message
lines_needed = 0
for pseudo_line in whole_message.split('\n')[:-1]:
pseudo_line += '\n'
lines_needed += math.ceil(len(pseudo_line) / line_width)
lines_needed += math.ceil(len(whole_message.split('\n')[-1]) / line_width)
previous_entry_last_line = self.__log_index_to_last_line_no[-1] if self.__log_index_to_last_line_no else -1
self.__log_index_to_last_line_no.append(previous_entry_last_line + lines_needed)
def __render_visible_log_entries(self, window):
lines, cols = window.getmaxyx()
lines -= 1 # last line should be empty (cursor will be there)
# otherwise cursor will land below the window (what causes curses error)
if not self.__cached_processed_logs:
window.addstr(lines, 0, '(no logs)', curses.A_DIM)
return
if self.__logs_auto_scrolling:
self.__scroll_position = self.__log_index_to_last_line_no[-1]
self.__scroll_position = max(self.__scroll_position, lines - 1)
first_line_no = max(0, self.__scroll_position - lines + 1)
first_log_entry_index = bisect_left(self.__log_index_to_last_line_no, first_line_no)
log_entry_index = first_log_entry_index
while log_entry_index < len(self.__cached_processed_logs):
last_entry_line_no = self.__log_index_to_last_line_no[log_entry_index]
first_entry_line_no = self.__log_index_to_last_line_no[log_entry_index-1] + 1 if log_entry_index > 0 else 0
self.__render_log_entry(window, log_entry_index,
max(0, first_line_no - first_entry_line_no),
max(0, last_entry_line_no - self.__scroll_position))
log_entry_index += 1
if last_entry_line_no >= self.__scroll_position:
break
if self.__scroll_position >= self.__log_index_to_last_line_no[-1]:
info_message = '(end)'
else:
left_lines_count = self.__log_index_to_last_line_no[-1] - self.__scroll_position
left_lines_count = str(left_lines_count) if left_lines_count < 10**9 else '>=10e9'
info_message = '({} more lines)'.format(left_lines_count)
window.addstr(lines, 0, info_message, curses.A_DIM)
def __render_log_entry(self, window, log_entry_index, omitted_first_lines, omitted_last_lines):
lines, cols = window.getmaxyx()
timestamp_str, module_name, message = self.__cached_processed_logs[log_entry_index]
colored_prefix = timestamp_str + ' ' + module_name + ' '
whole_message = colored_prefix + message
entry_lines = list()
while len(whole_message) > 0:
next_new_line = whole_message.find('\n') + 1
if next_new_line == 0:
next_new_line = cols
split_point = min(cols, len(whole_message), next_new_line)
entry_lines.append(whole_message[:split_point])
whole_message = whole_message[split_point:]
if len(entry_lines[-1]) < cols and not entry_lines[-1].endswith('\n'):
entry_lines[-1] += '\n'
if omitted_first_lines + omitted_last_lines >= len(entry_lines):
return
if omitted_first_lines == 0:
if len(colored_prefix) <= cols:
rest_of_first_line = entry_lines[0][len(colored_prefix):]
window.addstr(timestamp_str + ' ', curses.color_pair(self.__TIMESTAMP_COLOR))
window.addstr(module_name + ' ', curses.color_pair(self.__MODULE_NAME_COLOR))
window.addstr(rest_of_first_line)
else:
window.addstr(entry_lines[0])
entry_lines = entry_lines[max(1, omitted_first_lines):len(entry_lines)-omitted_last_lines]
for line in entry_lines:
window.addstr(line)
def __render_statistics_window(self, window):
window.clear()
self.__draw_entitled_box(window, 'Statistics')
sub_win = self.__get_sub_window(window)
stats_to_display = self.__prepare_stats()
for name, value in stats_to_display:
sub_win.addstr('{}: {}\n'.format(name, value))
self.__render_progress_window(window)
@staticmethod
def __prepare_stats():
def timedelta_to_str(timedelta_obj):
seconds = timedelta_obj.total_seconds()
if seconds < 1:
return '<1 sec'
seconds = int(seconds)
result = '{} sec'.format(seconds % 60)
if seconds >= 60:
result = '{} min {}'.format(seconds // 60, result)
return result
def data_amount_to_str(data_amount):
steps = (
(1, 'B'),
(10**3, 'kB'),
(10**6, 'MB'),
(10**9, 'GB')
)
last_good_result = '0 B'
for bound, unit in steps:
if data_amount < bound:
return last_good_result
last_good_result = '{:.3f} {}'.format(data_amount / bound, unit)
return last_good_result
sdc = StatisticDataCollector()
stats_scheme = (
('Time since last receiving', timedelta_to_str(sdc.get_time_since_last_data_receiving())),
('Time since start', timedelta_to_str(sdc.get_time_since_start())),
('Data receiving speed', data_amount_to_str(sdc.get_data_receiving_speed())+'/s'),
('Total data received', data_amount_to_str(sdc.get_total_data_received())),
('Queued requests', str(sdc.get_count_of_queued_requests())),
('Total sent requests', str(sdc.get_total_count_of_sent_requests()))
)
return stats_scheme
def __render_progress_window(self, window):
sdc = StatisticDataCollector()
progress = sdc.get_progress()
title = sdc.get_progress_title()
subtitle = sdc.get_progress_subtitle()
if progress == -1:
return
# Create progress window
lines, cols = window.getmaxyx()
beg_y, beg_x = window.getbegyx()
progress_win = window.subwin(beg_y + lines - 5, beg_x)
self.__draw_entitled_box(progress_win, 'Progress',
0, 0, 0, 0, curses.ACS_SSSB, curses.ACS_SBSS)
# Subwindow
sub_win = self.__get_sub_window(progress_win)
lines, cols = sub_win.getmaxyx()
# Draw window content
sub_win.insstr(0, int(cols / 2 - len(title) / 2), title)
self.__draw_progress_bar(progress, sub_win, 1, cols)
sub_win.insstr(2, int(cols / 2 - len(subtitle) / 2), subtitle)
@staticmethod
def __draw_progress_bar(progress, window, y, width):
progress_str = '{}%'.format(progress)
s = ' ' * width
x_pos = int(width / 2 - len(progress_str) / 2)
s = s[:x_pos] + progress_str + s[x_pos:]
for i in range(width):
fill = i / width < progress / 100
window.insch(y, i, s[i],
curses.A_REVERSE if fill else curses.A_NORMAL)
@staticmethod
def __draw_entitled_box(window, title, *border_args):
window.border(*border_args)
window.addstr(0, 1, title)
@staticmethod
def __get_sub_window(window, margin=1):
lines, cols = window.getmaxyx()
beg_y, beg_x = window.getbegyx()
return window.subwin(lines - margin * 2, cols - margin * 2, beg_y + margin, beg_x + margin)
def __render_info_bar(self, window):
window.clear()
info_bar_scheme = (
('F2', '{} auto scrolling'.format('Disable' if self.__logs_auto_scrolling else 'Enable')),
('F3', 'Filter'),
('F4', 'Clear'),
('F9', 'Exit'),
('↑↓', 'Scroll')
)
for key, description in info_bar_scheme:
window.addstr(key)
window.addstr(description, curses.color_pair(self.__INFO_BAR_DESC_COLOR))
def __render_filter_window(self, window):
window.clear()
window.bkgd(' ', curses.color_pair(self.__FILTER_WINDOW_BACKGROUND))
self.__draw_entitled_box(window, 'Filter')
sub_win = self.__get_sub_window(window)
sub_win.addstr('Please use arrows, space and escape to navigate.\n\n')
self.__render_filter_list(sub_win)
def __render_filter_list(self, window):
lines, cols = window.getmaxyx()
for i, (module_name, is_checked) in enumerate(self.__filter.items()):
color = self.__FILTER_WINDOW_BACKGROUND
is_entry_selected = self.__filter_selected_index == i
if is_entry_selected:
color = self.__FILTER_WINDOW_SELECTION
self.__filter_selected_module = module_name
prefix = '[x] ' if is_checked else '[ ] '
module_name_needed_length = cols - len(prefix)
if len(module_name) > module_name_needed_length:
module_name = module_name[:module_name_needed_length-3] + '...'
else:
module_name += ' ' * (module_name_needed_length - len(module_name))
try: # TODO if there are many modules (False on May 18th, 2015), implement scrolling for this window
window.addstr(prefix + module_name, curses.color_pair(color))
except _curses.error:
break
| 44.439153
| 119
| 0.625134
|
e2a316bf5b0432533771df84ad8e6b880e422544
| 990
|
py
|
Python
|
src/training.py
|
Sirius207/Subtractor
|
ebd1c57a7e3392f0516125db30fee5c2bc9e6936
|
[
"MIT"
] | null | null | null |
src/training.py
|
Sirius207/Subtractor
|
ebd1c57a7e3392f0516125db30fee5c2bc9e6936
|
[
"MIT"
] | null | null | null |
src/training.py
|
Sirius207/Subtractor
|
ebd1c57a7e3392f0516125db30fee5c2bc9e6936
|
[
"MIT"
] | null | null | null |
def train(DATA, BATCH_SIZE, trainingOutputPath, model):
x_train = DATA[0][0]
y_train = DATA[0][1]
x_val = DATA[1][0]
y_val = DATA[1][1]
training_log = list()
with open(trainingOutputPath, 'w') as output:
output.write('loss,acc,val_loss,val_acc\n')
for iteration in range(200):
print()
print('-' * 50)
print('Iteration', iteration)
history_callback = model.fit(x_train, y_train,
batch_size=BATCH_SIZE,
epochs=1,
validation_data=(x_val, y_val))
training_log.append(history_callback.history)
output.write(str(history_callback.history['loss'][0]) + ',')
output.write(str(history_callback.history['acc'][0]) + ',')
output.write(str(history_callback.history['val_loss'][0]) + ',')
output.write(str(history_callback.history['val_acc'][0]) + '\n')
output.close()
return model
| 38.076923
| 76
| 0.568687
|
500432371e1577edeb28e4aaa9403f8e52a213aa
| 45,158
|
py
|
Python
|
dff_rfcn/core/module.py
|
faisalnazir/Deep-Feature-Flow
|
489959fa4dc2a287ec28237d9eb4eec46ebce698
|
[
"MIT"
] | null | null | null |
dff_rfcn/core/module.py
|
faisalnazir/Deep-Feature-Flow
|
489959fa4dc2a287ec28237d9eb4eec46ebce698
|
[
"MIT"
] | null | null | null |
dff_rfcn/core/module.py
|
faisalnazir/Deep-Feature-Flow
|
489959fa4dc2a287ec28237d9eb4eec46ebce698
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------
# Deep Feature Flow
# Copyright (c) 2017 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Modified by Yuwen Xiong
# --------------------------------------------------------
# Based on:
# MX-RCNN
# Copyright (c) 2016 by Contributors
# Licence under The Apache 2.0 License
# https://github.com/ijkguo/mx-rcnn/
# --------------------------------------------------------
"""A `MutableModule` implement the `BaseModule` API, and allows input shape
varying with training iterations. If shapes vary, executors will rebind,
using shared arrays from the initial module binded with maximum shape.
"""
import time
import logging
import warnings
from mxnet import context as ctx
from mxnet.initializer import Uniform, InitDesc
from mxnet.module.base_module import BaseModule, _check_input_names, _parse_data_desc, _as_list
from mxnet.model import _create_kvstore, _initialize_kvstore, _update_params, _update_params_on_kvstore, load_checkpoint, BatchEndParam
from mxnet import metric
from .DataParallelExecutorGroup import DataParallelExecutorGroup
from mxnet import ndarray as nd
from mxnet import optimizer as opt
class Module(BaseModule):
"""Module is a basic module that wrap a `Symbol`. It is functionally the same
as the `FeedForward` model, except under the module API.
Parameters
----------
symbol : Symbol
data_names : list of str
Default is `('data')` for a typical model used in image classification.
label_names : list of str
Default is `('softmax_label')` for a typical model used in image
classification.
logger : Logger
Default is `logging`.
context : Context or list of Context
Default is `cpu()`.
work_load_list : list of number
Default `None`, indicating uniform workload.
fixed_param_names: list of str
Default `None`, indicating no network parameters are fixed.
state_names : list of str
states are similar to data and label, but not provided by data iterator.
Instead they are initialized to 0 and can be set by set_states()
"""
def __init__(self, symbol, data_names=('data',), label_names=('softmax_label',),
logger=logging, context=ctx.cpu(), work_load_list=None,
fixed_param_names=None, state_names=None):
super(Module, self).__init__(logger=logger)
if isinstance(context, ctx.Context):
context = [context]
self._context = context
if work_load_list is None:
work_load_list = [1] * len(self._context)
assert len(work_load_list) == len(self._context)
self._work_load_list = work_load_list
self._symbol = symbol
data_names = list(data_names) if data_names is not None else []
label_names = list(label_names) if label_names is not None else []
state_names = list(state_names) if state_names is not None else []
fixed_param_names = list(fixed_param_names) if fixed_param_names is not None else []
_check_input_names(symbol, data_names, "data", True)
_check_input_names(symbol, label_names, "label", False)
_check_input_names(symbol, state_names, "state", True)
_check_input_names(symbol, fixed_param_names, "fixed_param", True)
arg_names = symbol.list_arguments()
input_names = data_names + label_names + state_names
self._param_names = [x for x in arg_names if x not in input_names]
self._fixed_param_names = fixed_param_names
self._aux_names = symbol.list_auxiliary_states()
self._data_names = data_names
self._label_names = label_names
self._state_names = state_names
self._output_names = symbol.list_outputs()
self._arg_params = None
self._aux_params = None
self._params_dirty = False
self._optimizer = None
self._kvstore = None
self._update_on_kvstore = None
self._updater = None
self._preload_opt_states = None
self._grad_req = None
self._exec_group = None
self._data_shapes = None
self._label_shapes = None
@staticmethod
def load(prefix, epoch, load_optimizer_states=False, **kwargs):
"""Create a model from previously saved checkpoint.
Parameters
----------
prefix : str
path prefix of saved model files. You should have
"prefix-symbol.json", "prefix-xxxx.params", and
optionally "prefix-xxxx.states", where xxxx is the
epoch number.
epoch : int
epoch to load.
load_optimizer_states : bool
whether to load optimizer states. Checkpoint needs
to have been made with save_optimizer_states=True.
data_names : list of str
Default is `('data')` for a typical model used in image classification.
label_names : list of str
Default is `('softmax_label')` for a typical model used in image
classification.
logger : Logger
Default is `logging`.
context : Context or list of Context
Default is `cpu()`.
work_load_list : list of number
Default `None`, indicating uniform workload.
fixed_param_names: list of str
Default `None`, indicating no network parameters are fixed.
"""
sym, args, auxs = load_checkpoint(prefix, epoch)
mod = Module(symbol=sym, **kwargs)
mod._arg_params = args
mod._aux_params = auxs
mod.params_initialized = True
if load_optimizer_states:
mod._preload_opt_states = '%s-%04d.states'%(prefix, epoch)
return mod
def save_checkpoint(self, prefix, epoch, save_optimizer_states=False):
"""Save current progress to checkpoint.
Use mx.callback.module_checkpoint as epoch_end_callback to save during training.
Parameters
----------
prefix : str
The file prefix to checkpoint to
epoch : int
The current epoch number
save_optimizer_states : bool
Whether to save optimizer states for continue training
"""
self._symbol.save('%s-symbol.json'%prefix)
param_name = '%s-%04d.params' % (prefix, epoch)
self.save_params(param_name)
logging.info('Saved checkpoint to \"%s\"', param_name)
if save_optimizer_states:
state_name = '%s-%04d.states' % (prefix, epoch)
self.save_optimizer_states(state_name)
logging.info('Saved optimizer state to \"%s\"', state_name)
def _reset_bind(self):
"""Internal function to reset binded state."""
self.binded = False
self._exec_group = None
self._data_shapes = None
self._label_shapes = None
@property
def data_names(self):
"""A list of names for data required by this module."""
return self._data_names
@property
def label_names(self):
"""A list of names for labels required by this module."""
return self._label_names
@property
def output_names(self):
"""A list of names for the outputs of this module."""
return self._output_names
@property
def data_shapes(self):
"""Get data shapes.
Returns
-------
A list of `(name, shape)` pairs.
"""
assert self.binded
return self._data_shapes
@property
def label_shapes(self):
"""Get label shapes.
Returns
-------
A list of `(name, shape)` pairs. The return value could be `None` if
the module does not need labels, or if the module is not binded for
training (in this case, label information is not available).
"""
assert self.binded
return self._label_shapes
@property
def output_shapes(self):
"""Get output shapes.
Returns
-------
A list of `(name, shape)` pairs.
"""
assert self.binded
return self._exec_group.get_output_shapes()
def get_params(self):
"""Get current parameters.
Returns
-------
`(arg_params, aux_params)`, each a dictionary of name to parameters (in
`NDArray`) mapping.
"""
assert self.binded and self.params_initialized
if self._params_dirty:
self._sync_params_from_devices()
return (self._arg_params, self._aux_params)
def init_params(self, initializer=Uniform(0.01), arg_params=None, aux_params=None,
allow_missing=False, force_init=False):
"""Initialize the parameters and auxiliary states.
Parameters
----------
initializer : Initializer
Called to initialize parameters if needed.
arg_params : dict
If not None, should be a dictionary of existing arg_params. Initialization
will be copied from that.
aux_params : dict
If not None, should be a dictionary of existing aux_params. Initialization
will be copied from that.
allow_missing : bool
If true, params could contain missing values, and the initializer will be
called to fill those missing params.
force_init : bool
If true, will force re-initialize even if already initialized.
"""
if self.params_initialized and not force_init:
warnings.warn("Parameters already initialized and force_init=False. "
"init_params call ignored.", stacklevel=2)
return
assert self.binded, 'call bind before initializing the parameters'
def _impl(name, arr, cache):
"""Internal helper for parameter initialization"""
if cache is not None:
if name in cache:
cache_arr = cache[name]
# just in case the cached array is just the target itself
if cache_arr is not arr:
cache_arr.copyto(arr)
else:
if not allow_missing:
raise RuntimeError("%s is not presented" % name)
if initializer != None:
initializer(name, arr)
else:
initializer(name, arr)
attrs = self._symbol.attr_dict()
for name, arr in list(self._arg_params.items()):
desc = InitDesc(name, attrs.get(name, None))
_impl(desc, arr, arg_params)
for name, arr in list(self._aux_params.items()):
desc = InitDesc(name, attrs.get(name, None))
_impl(desc, arr, aux_params)
self.params_initialized = True
self._params_dirty = False
# copy the initialized parameters to devices
self._exec_group.set_params(self._arg_params, self._aux_params)
def set_params(self, arg_params, aux_params, allow_missing=False, force_init=True):
"""Assign parameter and aux state values.
Parameters
----------
arg_params : dict
Dictionary of name to value (`NDArray`) mapping.
aux_params : dict
Dictionary of name to value (`NDArray`) mapping.
allow_missing : bool
If true, params could contain missing values, and the initializer will be
called to fill those missing params.
force_init : bool
If true, will force re-initialize even if already initialized.
Examples
--------
An example of setting module parameters::
>>> sym, arg_params, aux_params = \
>>> mx.model.load_checkpoint(model_prefix, n_epoch_load)
>>> mod.set_params(arg_params=arg_params, aux_params=aux_params)
"""
if not allow_missing:
self.init_params(initializer=None, arg_params=arg_params, aux_params=aux_params,
allow_missing=allow_missing, force_init=force_init)
return
if self.params_initialized and not force_init:
warnings.warn("Parameters already initialized and force_init=False. "
"set_params call ignored.", stacklevel=2)
return
self._exec_group.set_params(arg_params, aux_params)
# because we didn't update self._arg_params, they are dirty now.
self._params_dirty = True
self.params_initialized = True
def bind(self, data_shapes, label_shapes=None, for_training=True,
inputs_need_grad=False, force_rebind=False, shared_module=None,
grad_req='write'):
"""Bind the symbols to construct executors. This is necessary before one
can perform computation with the module.
Parameters
----------
data_shapes : list of (str, tuple)
Typically is `data_iter.provide_data`.
label_shapes : list of (str, tuple)
Typically is `data_iter.provide_label`.
for_training : bool
Default is `True`. Whether the executors should be bind for training.
inputs_need_grad : bool
Default is `False`. Whether the gradients to the input data need to be computed.
Typically this is not needed. But this might be needed when implementing composition
of modules.
force_rebind : bool
Default is `False`. This function does nothing if the executors are already
binded. But with this `True`, the executors will be forced to rebind.
shared_module : Module
Default is `None`. This is used in bucketing. When not `None`, the shared module
essentially corresponds to a different bucket -- a module with different symbol
but with the same sets of parameters (e.g. unrolled RNNs with different lengths).
"""
# force rebinding is typically used when one want to switch from
# training to prediction phase.
if force_rebind:
self._reset_bind()
if self.binded:
self.logger.warning('Already binded, ignoring bind()')
return
self.for_training = for_training
self.inputs_need_grad = inputs_need_grad
self.binded = True
self._grad_req = grad_req
if not for_training:
assert not inputs_need_grad
else:
pass
# this is not True, as some module might not contains a loss function
# that consumes the labels
# assert label_shapes is not None
# self._data_shapes, self._label_shapes = _parse_data_desc(
# self.data_names, self.label_names, data_shapes, label_shapes)
self._data_shapes, self._label_shapes = list(zip(*[_parse_data_desc(self.data_names, self.label_names, data_shape, label_shape)
for data_shape, label_shape in zip(data_shapes, label_shapes)]))
if self._label_shapes.count(None) == len(self._label_shapes):
self._label_shapes = None
if shared_module is not None:
assert isinstance(shared_module, Module) and \
shared_module.binded and shared_module.params_initialized
shared_group = shared_module._exec_group
else:
shared_group = None
self._exec_group = DataParallelExecutorGroup(self._symbol, self._context,
self._work_load_list, self._data_shapes,
self._label_shapes, self._param_names,
for_training, inputs_need_grad,
shared_group, logger=self.logger,
fixed_param_names=self._fixed_param_names,
grad_req=grad_req,
state_names=self._state_names)
# self._total_exec_bytes = self._exec_group._total_exec_bytes
if shared_module is not None:
self.params_initialized = True
self._arg_params = shared_module._arg_params
self._aux_params = shared_module._aux_params
elif self.params_initialized:
# if the parameters are already initialized, we are re-binding
# so automatically copy the already initialized params
self._exec_group.set_params(self._arg_params, self._aux_params)
else:
assert self._arg_params is None and self._aux_params is None
param_arrays = [
nd.zeros(x[0].shape, dtype=x[0].dtype)
for x in self._exec_group.param_arrays
]
self._arg_params = {name:arr for name, arr in list(zip(self._param_names, param_arrays))}
aux_arrays = [
nd.zeros(x[0].shape, dtype=x[0].dtype)
for x in self._exec_group.aux_arrays
]
self._aux_params = {name:arr for name, arr in list(zip(self._aux_names, aux_arrays))}
if shared_module is not None and shared_module.optimizer_initialized:
self.borrow_optimizer(shared_module)
def reshape(self, data_shapes, label_shapes=None):
"""Reshape the module for new input shapes.
Parameters
----------
data_shapes : list of (str, tuple)
Typically is `data_iter.provide_data`.
label_shapes : list of (str, tuple)
Typically is `data_iter.provide_label`.
"""
assert self.binded
# self._data_shapes, self._label_shapes = _parse_data_desc(
# self.data_names, self.label_names, data_shapes, label_shapes)
self._data_shapes, self._label_shapes = list(zip(*[_parse_data_desc(self.data_names, self.label_names, data_shape, label_shape)
for data_shape, label_shape in zip(data_shapes, label_shapes)]))
self._exec_group.reshape(self._data_shapes, self._label_shapes)
def init_optimizer(self, kvstore='local', optimizer='sgd',
optimizer_params=(('learning_rate', 0.01),), force_init=False):
"""Install and initialize optimizers.
Parameters
----------
kvstore : str or KVStore
Default `'local'`.
optimizer : str or Optimizer
Default `'sgd'`
optimizer_params : dict
Default `(('learning_rate', 0.01),)`. The default value is not a dictionary,
just to avoid pylint warning of dangerous default values.
force_init : bool
Default `False`, indicating whether we should force re-initializing the
optimizer in the case an optimizer is already installed.
"""
assert self.binded and self.params_initialized
if self.optimizer_initialized and not force_init:
self.logger.warning('optimizer already initialized, ignoring...')
return
(kvstore, update_on_kvstore) = \
_create_kvstore(kvstore, len(self._context), self._arg_params)
batch_size = self._exec_group.batch_size
if kvstore and 'dist' in kvstore.type and '_sync' in kvstore.type:
batch_size *= kvstore.num_workers
rescale_grad = 1.0/batch_size
if isinstance(optimizer, str):
idx2name = {}
if update_on_kvstore:
idx2name.update(enumerate(self._exec_group.param_names))
else:
for k in range(len(self._context)):
idx2name.update({i*len(self._context)+k: n
for i, n in enumerate(self._exec_group.param_names)})
optimizer_params = dict(optimizer_params)
if 'rescale_grad' not in optimizer_params:
optimizer_params['rescale_grad'] = rescale_grad
optimizer = opt.create(optimizer,
sym=self.symbol, param_idx2name=idx2name,
**optimizer_params)
else:
assert isinstance(optimizer, opt.Optimizer)
if optimizer.rescale_grad != rescale_grad:
#pylint: disable=no-member
warnings.warn(
"Optimizer created manually outside Module but rescale_grad " +
"is not normalized to 1.0/batch_size/num_workers (%s vs. %s). "%(
optimizer.rescale_grad, rescale_grad) +
"Is this intended?", stacklevel=2)
self._optimizer = optimizer
self._kvstore = kvstore
self._update_on_kvstore = update_on_kvstore
self._updater = None
if kvstore:
# copy initialized local parameters to kvstore
_initialize_kvstore(kvstore=kvstore,
param_arrays=self._exec_group.param_arrays,
arg_params=self._arg_params,
param_names=self._param_names,
update_on_kvstore=update_on_kvstore)
if update_on_kvstore:
kvstore.set_optimizer(self._optimizer)
else:
self._updater = opt.get_updater(optimizer)
self.optimizer_initialized = True
if self._preload_opt_states is not None:
self.load_optimizer_states(self._preload_opt_states)
self._preload_opt_states = None
def borrow_optimizer(self, shared_module):
"""Borrow optimizer from a shared module. Used in bucketing, where exactly the same
optimizer (esp. kvstore) is used.
Parameters
----------
shared_module : Module
"""
assert shared_module.optimizer_initialized
self._optimizer = shared_module._optimizer
self._kvstore = shared_module._kvstore
self._update_on_kvstore = shared_module._update_on_kvstore
self._updater = shared_module._updater
self.optimizer_initialized = True
def forward(self, data_batch, is_train=None):
"""Forward computation.
Parameters
----------
data_batch : DataBatch
Could be anything with similar API implemented.
is_train : bool
Default is `None`, which means `is_train` takes the value of `self.for_training`.
"""
assert self.binded and self.params_initialized
self._exec_group.forward(data_batch, is_train)
def backward(self, out_grads=None):
"""Backward computation.
Parameters
----------
out_grads : NDArray or list of NDArray, optional
Gradient on the outputs to be propagated back.
This parameter is only needed when bind is called
on outputs that are not a loss function.
"""
assert self.binded and self.params_initialized
self._exec_group.backward(out_grads=out_grads)
def update(self):
"""Update parameters according to the installed optimizer and the gradients computed
in the previous forward-backward batch.
"""
assert self.binded and self.params_initialized and self.optimizer_initialized
self._params_dirty = True
if self._update_on_kvstore:
_update_params_on_kvstore(self._exec_group.param_arrays,
self._exec_group.grad_arrays,
self._kvstore)
else:
_update_params(self._exec_group.param_arrays,
self._exec_group.grad_arrays,
updater=self._updater,
num_device=len(self._context),
kvstore=self._kvstore)
def get_outputs(self, merge_multi_context=True):
"""Get outputs of the previous forward computation.
Parameters
----------
merge_multi_context : bool
Default is `True`. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
If `merge_multi_context` is `True`, it is like `[out1, out2]`. Otherwise, it
is like `[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]`. All the output
elements are `NDArray`.
"""
assert self.binded and self.params_initialized
return self._exec_group.get_outputs(merge_multi_context=merge_multi_context)
def get_input_grads(self, merge_multi_context=True):
"""Get the gradients with respect to the inputs of the module.
Parameters
----------
merge_multi_context : bool
Default is `True`. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
If `merge_multi_context` is `True`, it is like `[grad1, grad2]`. Otherwise, it
is like `[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]`. All the output
elements are `NDArray`.
"""
assert self.binded and self.params_initialized and self.inputs_need_grad
return self._exec_group.get_input_grads(merge_multi_context=merge_multi_context)
def get_states(self, merge_multi_context=True):
"""Get states from all devices
Parameters
----------
merge_multi_context : bool
Default is `True`. In the case when data-parallelism is used, the states
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
If `merge_multi_context` is `True`, it is like `[out1, out2]`. Otherwise, it
is like `[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]`. All the output
elements are `NDArray`.
"""
assert self.binded and self.params_initialized
return self._exec_group.get_states(merge_multi_context=merge_multi_context)
def set_states(self, states=None, value=None):
"""Set value for states. Only one of states & value can be specified.
Parameters
----------
states : list of list of NDArrays
source states arrays formatted like [[state1_dev1, state1_dev2],
[state2_dev1, state2_dev2]].
value : number
a single scalar value for all state arrays.
"""
assert self.binded and self.params_initialized
self._exec_group.set_states(states, value)
def update_metric(self, eval_metric, labels):
"""Evaluate and accumulate evaluation metric on outputs of the last forward computation.
Parameters
----------
eval_metric : EvalMetric
labels : list of NDArray
Typically `data_batch.label`.
"""
self._exec_group.update_metric(eval_metric, labels)
def _sync_params_from_devices(self):
"""Synchronize parameters from devices to CPU. This function should be called after
calling `update` that updates the parameters on the devices, before one can read the
latest parameters from `self._arg_params` and `self._aux_params`.
"""
self._exec_group.get_params(self._arg_params, self._aux_params)
self._params_dirty = False
def save_optimizer_states(self, fname):
"""Save optimizer (updater) state to file
Parameters
----------
fname : str
Path to output states file.
"""
assert self.optimizer_initialized
if self._update_on_kvstore:
self._kvstore.save_optimizer_states(fname)
else:
with open(fname, 'wb') as fout:
fout.write(self._updater.get_states())
def load_optimizer_states(self, fname):
"""Load optimizer (updater) state from file
Parameters
----------
fname : str
Path to input states file.
"""
assert self.optimizer_initialized
if self._update_on_kvstore:
self._kvstore.load_optimizer_states(fname)
else:
self._updater.set_states(open(fname, 'rb').read())
def install_monitor(self, mon):
""" Install monitor on all executors """
assert self.binded
self._exec_group.install_monitor(mon)
class MutableModule(BaseModule):
"""A mutable module is a module that supports variable input data.
Parameters
----------
symbol : Symbol
data_names : list of str
label_names : list of str
logger : Logger
context : Context or list of Context
work_load_list : list of number
max_data_shapes : list of (name, shape) tuple, designating inputs whose shape vary
max_label_shapes : list of (name, shape) tuple, designating inputs whose shape vary
fixed_param_prefix : list of str, indicating fixed parameters
"""
def __init__(self, symbol, data_names, label_names,
logger=logging, context=ctx.cpu(), work_load_list=None,
max_data_shapes=None, max_label_shapes=None, fixed_param_prefix=None):
super(MutableModule, self).__init__(logger=logger)
self._symbol = symbol
self._data_names = data_names
self._label_names = label_names
self._context = context
self._work_load_list = work_load_list
self._curr_module = None
self._max_data_shapes = max_data_shapes
self._max_label_shapes = max_label_shapes
self._fixed_param_prefix = fixed_param_prefix
fixed_param_names = list()
if fixed_param_prefix is not None:
for name in self._symbol.list_arguments():
for prefix in self._fixed_param_prefix:
if name.startswith(prefix):
fixed_param_names.append(name)
self._fixed_param_names = fixed_param_names
self._preload_opt_states = None
def _reset_bind(self):
self.binded = False
self._curr_module = None
@property
def data_names(self):
return self._data_names
@property
def output_names(self):
return self._symbol.list_outputs()
@property
def data_shapes(self):
assert self.binded
return self._curr_module.data_shapes
@property
def label_shapes(self):
assert self.binded
return self._curr_module.label_shapes
@property
def output_shapes(self):
assert self.binded
return self._curr_module.output_shapes
def get_params(self):
assert self.binded and self.params_initialized
return self._curr_module.get_params()
def init_params(self, initializer=Uniform(0.01), arg_params=None, aux_params=None,
allow_missing=False, force_init=False):
if self.params_initialized and not force_init:
return
assert self.binded, 'call bind before initializing the parameters'
self._curr_module.init_params(initializer=initializer, arg_params=arg_params,
aux_params=aux_params, allow_missing=allow_missing,
force_init=force_init)
self.params_initialized = True
def bind(self, data_shapes, label_shapes=None, for_training=True,
inputs_need_grad=False, force_rebind=False, shared_module=None, grad_req='write'):
# in case we already initialized params, keep it
if self.params_initialized:
arg_params, aux_params = self.get_params()
# force rebinding is typically used when one want to switch from
# training to prediction phase.
if force_rebind:
self._reset_bind()
if self.binded:
self.logger.warning('Already binded, ignoring bind()')
return
assert shared_module is None, 'shared_module for MutableModule is not supported'
self.for_training = for_training
self.inputs_need_grad = inputs_need_grad
self.binded = True
max_shapes_dict = dict()
if self._max_data_shapes is not None:
max_shapes_dict.update(dict(self._max_data_shapes[0]))
if self._max_label_shapes is not None:
max_shapes_dict.update(dict(self._max_label_shapes[0]))
max_data_shapes = list()
for name, shape in data_shapes[0]:
if name in max_shapes_dict:
max_data_shapes.append((name, max_shapes_dict[name]))
else:
max_data_shapes.append((name, shape))
max_label_shapes = list()
if not label_shapes.count(None) == len(label_shapes):
for name, shape in label_shapes[0]:
if name in max_shapes_dict:
max_label_shapes.append((name, max_shapes_dict[name]))
else:
max_label_shapes.append((name, shape))
if len(max_label_shapes) == 0:
max_label_shapes = None
module = Module(self._symbol, self._data_names, self._label_names, logger=self.logger,
context=self._context, work_load_list=self._work_load_list,
fixed_param_names=self._fixed_param_names)
module.bind([max_data_shapes for _ in range(len(self._context))], [max_label_shapes for _ in range(len(self._context))],
for_training, inputs_need_grad, force_rebind=False, shared_module=None)
self._curr_module = module
# copy back saved params, if already initialized
if self.params_initialized:
self.set_params(arg_params, aux_params)
def save_checkpoint(self, prefix, epoch, save_optimizer_states=False):
"""Save current progress to checkpoint.
Use mx.callback.module_checkpoint as epoch_end_callback to save during training.
Parameters
----------
prefix : str
The file prefix to checkpoint to
epoch : int
The current epoch number
save_optimizer_states : bool
Whether to save optimizer states for continue training
"""
self._curr_module.save_checkpoint(prefix, epoch, save_optimizer_states)
def init_optimizer(self, kvstore='local', optimizer='sgd',
optimizer_params=(('learning_rate', 0.01),), force_init=False):
assert self.binded and self.params_initialized
if self.optimizer_initialized and not force_init:
self.logger.warning('optimizer already initialized, ignoring.')
return
self._curr_module._preload_opt_states = self._preload_opt_states
self._curr_module.init_optimizer(kvstore, optimizer, optimizer_params,
force_init=force_init)
self.optimizer_initialized = True
def fit(self, train_data, eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None, kvstore='local',
optimizer='sgd', optimizer_params=(('learning_rate', 0.01),),
eval_end_callback=None,
eval_batch_end_callback=None, initializer=Uniform(0.01),
arg_params=None, aux_params=None, allow_missing=False,
force_rebind=False, force_init=False, begin_epoch=0, num_epoch=None,
validation_metric=None, monitor=None, prefix=None):
"""Train the module parameters.
Parameters
----------
train_data : DataIter
eval_data : DataIter
If not `None`, will be used as validation set and evaluate the performance
after each epoch.
eval_metric : str or EvalMetric
Default `'acc'`. The performance measure used to display during training.
epoch_end_callback : function or list of function
Each callback will be called with the current `epoch`, `symbol`, `arg_params`
and `aux_params`.
batch_end_callback : function or list of function
Each callback will be called with a `BatchEndParam`.
kvstore : str or KVStore
Default `'local'`.
optimizer : str or Optimizer
Default `'sgd'`
optimizer_params : dict
Default `(('learning_rate', 0.01),)`. The parameters for the optimizer constructor.
The default value is not a `dict`, just to avoid pylint warning on dangerous
default values.
eval_end_callback : function or list of function
These will be called at the end of each full evaluation, with the metrics over
the entire evaluation set.
eval_batch_end_callback : function or list of function
These will be called at the end of each minibatch during evaluation
initializer : Initializer
Will be called to initialize the module parameters if not already initialized.
arg_params : dict
Default `None`, if not `None`, should be existing parameters from a trained
model or loaded from a checkpoint (previously saved model). In this case,
the value here will be used to initialize the module parameters, unless they
are already initialized by the user via a call to `init_params` or `fit`.
`arg_params` has higher priority to `initializer`.
aux_params : dict
Default `None`. Similar to `arg_params`, except for auxiliary states.
allow_missing : bool
Default `False`. Indicate whether we allow missing parameters when `arg_params`
and `aux_params` are not `None`. If this is `True`, then the missing parameters
will be initialized via the `initializer`.
force_rebind : bool
Default `False`. Whether to force rebinding the executors if already binded.
force_init : bool
Default `False`. Indicate whether we should force initialization even if the
parameters are already initialized.
begin_epoch : int
Default `0`. Indicate the starting epoch. Usually, if we are resuming from a
checkpoint saved at a previous training phase at epoch N, then we should specify
this value as N+1.
num_epoch : int
Number of epochs to run training.
Examples
--------
An example of using fit for training::
>>> #Assume training dataIter and validation dataIter are ready
>>> mod.fit(train_data=train_dataiter, eval_data=val_dataiter,
optimizer_params={'learning_rate':0.01, 'momentum': 0.9},
num_epoch=10)
"""
assert num_epoch is not None, 'please specify number of epochs'
self.bind(data_shapes=train_data.provide_data, label_shapes=train_data.provide_label,
for_training=True, force_rebind=force_rebind)
if monitor is not None:
self.install_monitor(monitor)
self.init_params(initializer=initializer, arg_params=arg_params, aux_params=aux_params,
allow_missing=allow_missing, force_init=force_init)
self.init_optimizer(kvstore=kvstore, optimizer=optimizer,
optimizer_params=optimizer_params)
if validation_metric is None:
validation_metric = eval_metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
################################################################################
# training loop
################################################################################
for epoch in range(begin_epoch, num_epoch):
tic = time.time()
eval_metric.reset()
for nbatch, data_batch in enumerate(train_data):
if monitor is not None:
monitor.tic()
self.forward_backward(data_batch)
self.update()
self.update_metric(eval_metric, data_batch.label)
if monitor is not None:
monitor.toc_print()
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch, nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
for callback in _as_list(batch_end_callback):
callback(batch_end_params)
# one epoch of training is finished
for name, val in eval_metric.get_name_value():
self.logger.info('Epoch[%d] Train-%s=%f', epoch, name, val)
toc = time.time()
self.logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc-tic))
# sync aux params across devices
arg_params, aux_params = self.get_params()
self.set_params(arg_params, aux_params)
if epoch_end_callback is not None:
for callback in _as_list(epoch_end_callback):
callback(epoch, self.symbol, arg_params, aux_params)
#----------------------------------------
# evaluation on validation set
if eval_data:
res = self.score(eval_data, validation_metric,
score_end_callback=eval_end_callback,
batch_end_callback=eval_batch_end_callback, epoch=epoch)
#TODO: pull this into default
for name, val in res:
self.logger.info('Epoch[%d] Validation-%s=%f', epoch, name, val)
# end of 1 epoch, reset the data-iter for another epoch
train_data.reset()
def forward(self, data_batch, is_train=None):
assert self.binded and self.params_initialized
# get current_shapes
if self._curr_module.label_shapes is not None:
current_shapes = [dict(self._curr_module.data_shapes[i] + self._curr_module.label_shapes[i]) for i in range(len(self._context))]
else:
current_shapes = [dict(self._curr_module.data_shapes[i]) for i in range(len(self._context))]
# get input_shapes
if is_train:
input_shapes = [dict(data_batch.provide_data[i] + data_batch.provide_label[i]) for i in range(len(self._context))]
else:
input_shapes = [dict(data_batch.provide_data[i]) for i in range(len(data_batch.provide_data))]
# decide if shape changed
shape_changed = len(current_shapes) != len(input_shapes)
for pre, cur in zip(current_shapes, input_shapes):
for k, v in list(pre.items()):
if v != cur[k]:
shape_changed = True
if shape_changed:
# self._curr_module.reshape(data_batch.provide_data, data_batch.provide_label)
module = Module(self._symbol, self._data_names, self._label_names,
logger=self.logger, context=[self._context[i] for i in range(len(data_batch.provide_data))],
work_load_list=self._work_load_list,
fixed_param_names=self._fixed_param_names)
module.bind(data_batch.provide_data, data_batch.provide_label, self._curr_module.for_training,
self._curr_module.inputs_need_grad, force_rebind=False,
shared_module=self._curr_module)
self._curr_module = module
self._curr_module.forward(data_batch, is_train=is_train)
def backward(self, out_grads=None):
assert self.binded and self.params_initialized
self._curr_module.backward(out_grads=out_grads)
def update(self):
assert self.binded and self.params_initialized and self.optimizer_initialized
self._curr_module.update()
def get_outputs(self, merge_multi_context=True):
assert self.binded and self.params_initialized
return self._curr_module.get_outputs(merge_multi_context=merge_multi_context)
def get_input_grads(self, merge_multi_context=True):
assert self.binded and self.params_initialized and self.inputs_need_grad
return self._curr_module.get_input_grads(merge_multi_context=merge_multi_context)
def update_metric(self, eval_metric, labels):
assert self.binded and self.params_initialized
self._curr_module.update_metric(eval_metric, labels)
def install_monitor(self, mon):
""" Install monitor on all executors """
assert self.binded
self._curr_module.install_monitor(mon)
| 42.046555
| 140
| 0.617011
|
9d21c1bd4182c9790ccb22da6a62d458212b393b
| 719
|
py
|
Python
|
PacoteDownload/Mundo 3 do curso/Desafio 101.py
|
Gabriel-ER/CursoEmVideodoYoutube-Python-
|
0733ce05f28f8f87603270ef5ab7cb51c8f2c5ac
|
[
"MIT"
] | null | null | null |
PacoteDownload/Mundo 3 do curso/Desafio 101.py
|
Gabriel-ER/CursoEmVideodoYoutube-Python-
|
0733ce05f28f8f87603270ef5ab7cb51c8f2c5ac
|
[
"MIT"
] | null | null | null |
PacoteDownload/Mundo 3 do curso/Desafio 101.py
|
Gabriel-ER/CursoEmVideodoYoutube-Python-
|
0733ce05f28f8f87603270ef5ab7cb51c8f2c5ac
|
[
"MIT"
] | null | null | null |
def voto(ano_de_nascimento, analfabeto='N'):
from datetime import datetime, date
analfabeto = analfabeto.upper()
# posso pegar o ano também com:
idade = date.today().year - ano_de_nascimento
# idade = datetime.now().year - ano_de_nascimento
print(f'Idade: {idade} anos.')
if idade < 16:
return 'Voto proibido'
if 16 <= idade < 18 or idade > 70:
return f'Com {idade} anos, o Voto é Opicinal'
if 18 <= idade < 70:
if analfabeto == 'S':
return 'Voto opicional por ser analfabeto.'
else:
return f'Com {idade} anos, o Voto é obrigatório '
ano_de_nascimento = int(input('Ano de nascimento: '))
print(voto(ano_de_nascimento, 'n'))
| 32.681818
| 61
| 0.62726
|
1967dd77d7645901712ca6f47decb61d96d08853
| 11,797
|
py
|
Python
|
tests/test_unit.py
|
derrickorama/image_optim
|
5d2a776409f48b43d26e311e58b72ca58992f3df
|
[
"MIT"
] | 2
|
2016-10-19T13:04:25.000Z
|
2021-08-21T13:50:40.000Z
|
tests/test_unit.py
|
derrickorama/image_optim
|
5d2a776409f48b43d26e311e58b72ca58992f3df
|
[
"MIT"
] | null | null | null |
tests/test_unit.py
|
derrickorama/image_optim
|
5d2a776409f48b43d26e311e58b72ca58992f3df
|
[
"MIT"
] | 1
|
2017-12-07T11:57:21.000Z
|
2017-12-07T11:57:21.000Z
|
# -*- coding: utf-8 -*-
import os
import shutil
import subprocess
import unittest
import nose
from .context import ImageOptim, NoImagesOptimizedError
class ImageOptimTests(unittest.TestCase):
"""\"optimize\" method tests"""
jpg_file_orig = os.path.join(os.path.dirname(__file__), 'assets', 'jpg-example.jpg')
png_file_orig = os.path.join(os.path.dirname(__file__), 'assets', 'png-example.png')
gif_file_orig = os.path.join(os.path.dirname(__file__), 'assets', 'gif-example.gif')
@classmethod
def setUpClass(self):
# Get original file size
self.jpg_file_orig_size = os.path.getsize(self.jpg_file_orig)
self.png_file_orig_size = os.path.getsize(self.png_file_orig)
self.gif_file_orig_size = os.path.getsize(self.gif_file_orig)
def setUp(self):
# Copy original JPG file
self.jpg_file = os.path.join(os.path.dirname(__file__), 'assets', 'jpg-example-temp.jpg')
self.png_file = os.path.join(os.path.dirname(__file__), 'assets', 'png-example-temp.png')
self.gif_file = os.path.join(os.path.dirname(__file__), 'assets', 'gif-example-temp.gif')
shutil.copyfile(self.jpg_file_orig, self.jpg_file)
shutil.copyfile(self.png_file_orig, self.png_file)
shutil.copyfile(self.gif_file_orig, self.gif_file)
def tearDown(self):
os.remove(self.jpg_file)
os.remove(self.png_file)
os.remove(self.gif_file)
def test_optimizes_jpg_image(self):
'''ImageOptim optimizes JPG files'''
image_optim = ImageOptim()
def done(results):
self.assertGreater(self.jpg_file_orig_size, os.path.getsize(self.jpg_file))
image_optim.optimize(self.jpg_file, done)
def test_optimizes_png_image(self):
'''ImageOptim optimizes PNG files'''
image_optim = ImageOptim()
def done(results):
self.assertGreater(self.png_file_orig_size, os.path.getsize(self.png_file))
image_optim.optimize(self.png_file, done)
def test_optimizes_gif_image(self):
'''ImageOptim optimizes GIF files'''
image_optim = ImageOptim()
def done(results):
self.assertGreater(self.gif_file_orig_size, os.path.getsize(self.gif_file))
image_optim.optimize(self.gif_file, done)
def test_optimize_returns_results_json_in_callback(self):
'''ImageOptim returns results in JSON format when callback is provided'''
image_optim = ImageOptim()
def done(results):
self.assertIn('images', results)
self.assertIn('totals', results)
image_optim.optimize(self.jpg_file, done)
def test_optimize_returns_results_json_without_callback(self):
'''ImageOptim returns results in JSON format when callback is not provided'''
image_optim = ImageOptim()
results = image_optim.optimize(self.jpg_file)
self.assertIn('images', results)
self.assertIn('totals', results)
class ExceptionTests(unittest.TestCase):
"""Exception tests"""
def test_non_zero(self):
'''ImageOptim raises an exception if image_optim returns a non-zero return code'''
image_optim = ImageOptim()
self.assertRaises(subprocess.CalledProcessError, image_optim.optimize, 'idontexist.jpg')
def test_no_images_optimized(self):
'''ImageOptim raises a NoImagesOptimizedError if image_optim returns no stdout nor stderr'''
image_optim = ImageOptim()
self.assertRaises(NoImagesOptimizedError, image_optim.optimize, os.path.join(os.path.dirname(__file__), 'assets', 'empty'))
def test_image_optim_errors(self):
'''ImageOptim raises exceptions when image_optim utility encounters an error'''
text_file = os.path.join(os.path.dirname(__file__), 'assets', 'non-image.txt')
image_optim = ImageOptim()
self.assertRaises(subprocess.CalledProcessError, image_optim.optimize, text_file)
class DirectoryOptimizeTests(unittest.TestCase):
"""directory optimization tests"""
directory_orig = os.path.join(os.path.dirname(__file__), 'assets')
@classmethod
def get_directory_size(self, directory):
total_size = 0
for dirpath, dirnames, filenames in os.walk(directory):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
@classmethod
def setUpClass(self):
# Copy original JPG file
self.directory = os.path.join(os.path.dirname(__file__), 'assets-temp')
shutil.copytree(self.directory_orig, self.directory)
# Copy JPG file to be excluded
excluded_original_path = os.path.join(os.path.dirname(__file__), 'assets', 'jpg-example.jpg')
self.excluded_temp_path = os.path.join(os.path.dirname(__file__), 'assets-temp', 'excluded.jpg')
shutil.copyfile(excluded_original_path, self.excluded_temp_path)
# Get original size for comparison
self.excluded_original_size = os.path.getsize(self.excluded_temp_path)
# Get original total directory size
self.directory_orig_size = self.get_directory_size(self.directory)
image_optim = ImageOptim()
self.results = image_optim.optimize(self.directory, exclude='excluded.*')
@classmethod
def tearDownClass(self):
shutil.rmtree(self.directory)
def test_optimizes_directory(self):
'''ImageOptim optimizes a directory of images'''
self.assertGreater(self.directory_orig_size, self.get_directory_size(self.directory))
def test_includes_image_results(self):
'''ImageOptim returns optimization results for each image'''
self.assertEqual(len(self.results['images']), 3)
def test_excludes_defined_excluded_glob(self):
'''ImageOptim excludes images as defined by the exclude glob'''
self.assertEqual(self.excluded_original_size, os.path.getsize(self.excluded_temp_path))
class ResultsInterpreterTests(unittest.TestCase):
@classmethod
def setUpClass(self):
self.image_optim = ImageOptim()
# Image results
def test_image_savings_ratio(self):
'''Interpreter gets savings ratio for each image'''
stdout = b''' 5.3% 32B ./path/to/image.jpg\nTotal: 11.57% 191K'''
results = self.image_optim.interpret(stdout)
self.assertEqual(results['images'][0]['ratioSavings'], 5.3)
def test_image_savings_size_bytes(self):
'''Interpreter gets savings size for each image in bytes when stdout displays bytes'''
stdout = b''' 5.3% 32B ./path/to/image.jpg\nTotal: 11.57% 191K'''
results = self.image_optim.interpret(stdout)
self.assertEqual(results['images'][0]['sizeSavings'], 32)
def test_image_savings_size_kilobytes(self):
'''Interpreter gets savings size for each image in bytes when stdout displays kilobytes'''
stdout = b''' 5.3% 32K ./path/to/image.jpg\nTotal: 11.57% 191K'''
results = self.image_optim.interpret(stdout)
self.assertEqual(results['images'][0]['sizeSavings'], 32768)
def test_image_savings_size_megabytes(self):
'''Interpreter gets savings size for each image in bytes when stdout displays megabytes'''
stdout = b''' 5.3% 32M ./path/to/image.jpg\nTotal: 11.57% 191K'''
results = self.image_optim.interpret(stdout)
self.assertEqual(results['images'][0]['sizeSavings'], 33554432)
def test_image_path(self):
'''Interpreter gets path to each image'''
stdout = b''' 5.3% 32M ./path/to/image.jpg\nTotal: 11.57% 191K'''
results = self.image_optim.interpret(stdout)
self.assertEqual(results['images'][0]['path'], './path/to/image.jpg')
def test_image_savings_size_float(self):
'''Interpreter gets savings size for each image in bytes when stdout displays floats'''
stdout = b''' 5.3% 32.25K ./path/to/image.jpg\nTotal: 11.57% 191K'''
results = self.image_optim.interpret(stdout)
self.assertEqual(results['images'][0]['sizeSavings'], 33024)
def test_image_savings_size_float_with_remainder(self):
'''Interpreter gets savings size for each image in bytes when stdout displays floats that don\'t equal an whole # of bytes'''
stdout = b''' 5.3% 32.55K ./path/to/image.jpg\nTotal: 11.57% 191K'''
results = self.image_optim.interpret(stdout)
self.assertEqual(results['images'][0]['sizeSavings'], 33332)
def test_no_savings(self):
'''Interpreter sets ratioSavings and sizeSavings to 0 when no optimization occurs'''
stdout = b''' ------ ./path/to/image.jpg\nTotal: ------'''
results = self.image_optim.interpret(stdout)
self.assertEqual(results['images'][0]['ratioSavings'], 0)
self.assertEqual(results['images'][0]['sizeSavings'], 0)
self.assertEqual(results['images'][0]['path'], './path/to/image.jpg')
def test_files_with_spaces(self):
'''Interpreter correctly parses output that includes file names with spaces'''
stdout = b''' 5.3% 32.25K ./path/to/image filename with spaces.jpg\nTotal: 11.57% 191K'''
results = self.image_optim.interpret(stdout)
self.assertEqual(results['images'][0]['path'], './path/to/image filename with spaces.jpg')
# Totals
def test_total_savings_ratio(self):
'''Interpreter gets total savings ratio'''
stdout = b''' 5.3% 32B ./path/to/image.jpg\nTotal: 11.57% 191K'''
results = self.image_optim.interpret(stdout)
self.assertEqual(results['totals']['ratioSavings'], 11.57)
def test_total_savings_size_bytes(self):
'''Interpreter gets total savings size in bytes when stdout displays bytes'''
stdout = b''' 5.3% 32B ./path/to/image.jpg\nTotal: 11.57% 191B'''
results = self.image_optim.interpret(stdout)
self.assertEqual(results['totals']['sizeSavings'], 191)
def test_total_savings_size_kilobytes(self):
'''Interpreter gets total savings size in bytes when stdout displays kilobytes'''
stdout = b''' 5.3% 32B ./path/to/image.jpg\nTotal: 11.57% 191K'''
results = self.image_optim.interpret(stdout)
self.assertEqual(results['totals']['sizeSavings'], 195584)
def test_total_savings_size_megabytes(self):
'''Interpreter gets total savings size in bytes when stdout displays megabytes'''
stdout = b''' 5.3% 32B ./path/to/image.jpg\nTotal: 11.57% 191M'''
results = self.image_optim.interpret(stdout)
self.assertEqual(results['totals']['sizeSavings'], 200278016)
def test_total_no_savings(self):
'''Interpreter sets ratioSavings and sizeSavings to 0 when no optimization occurs'''
stdout = b''' ------ ./path/to/image.jpg\nTotal: ------'''
results = self.image_optim.interpret(stdout)
self.assertEqual(results['totals']['ratioSavings'], 0)
self.assertEqual(results['totals']['sizeSavings'], 0)
def test_total_savings_size_float(self):
'''Interpreter gets total savings size in bytes when stdout displays floats'''
stdout = b''' 5.3% 32K ./path/to/image.jpg\nTotal: 11.57% 191.25K'''
results = self.image_optim.interpret(stdout)
self.assertEqual(results['totals']['sizeSavings'], 195840)
def test_total_savings_size_float_with_remainder(self):
'''Interpreter gets total savings size in bytes when stdout displays floats that don\'t equal an whole # of bytes'''
stdout = b''' 5.3% 32.55K ./path/to/image.jpg\nTotal: 11.57% 191.55K'''
results = self.image_optim.interpret(stdout)
self.assertEqual(results['totals']['sizeSavings'], 196148)
if __name__ == '__main__':
nose.main()
| 40.539519
| 133
| 0.675595
|
6da7587bb073507a827c60dd162d9b83b77bc4e8
| 2,532
|
py
|
Python
|
rpython/translator/test/test_interactive.py
|
kantai/passe-pypy-taint-tracking
|
b60a3663f8fe89892dc182c8497aab97e2e75d69
|
[
"MIT"
] | 2
|
2016-07-06T23:30:20.000Z
|
2017-05-30T15:59:31.000Z
|
rpython/translator/test/test_interactive.py
|
kantai/passe-pypy-taint-tracking
|
b60a3663f8fe89892dc182c8497aab97e2e75d69
|
[
"MIT"
] | null | null | null |
rpython/translator/test/test_interactive.py
|
kantai/passe-pypy-taint-tracking
|
b60a3663f8fe89892dc182c8497aab97e2e75d69
|
[
"MIT"
] | 2
|
2020-07-09T08:14:22.000Z
|
2021-01-15T18:01:25.000Z
|
from rpython.translator.interactive import Translation
import py
def test_simple_annotate():
def f(x,y):
return x+y
t = Translation(f, [int, int])
assert t.context is t.driver.translator
assert t.config is t.driver.config is t.context.config
s = t.annotate()
assert s.knowntype == int
t = Translation(f, [int, int])
s = t.annotate()
assert s.knowntype == int
def test_simple_rtype():
def f(x,y):
return x+y
t = Translation(f, [int, int])
t.annotate()
t.rtype()
assert 'rtype_lltype' in t.driver.done
def test_simple_backendopt():
def f(x, y):
return x,y
t = Translation(f, [int, int], backend='c')
t.backendopt()
assert 'backendopt_lltype' in t.driver.done
def test_simple_source():
def f(x, y):
return x,y
t = Translation(f, [int, int], backend='c')
t.annotate()
t.source()
assert 'source_c' in t.driver.done
t = Translation(f, [int, int])
t.source_c()
assert 'source_c' in t.driver.done
t = Translation(f, [int, int])
py.test.raises(Exception, "t.source()")
def test_disable_logic():
def f(x,y):
return x+y
t = Translation(f, [int, int])
t.disable(['backendopt'])
t.source_c()
assert 'backendopt' not in t.driver.done
def test_simple_compile_c():
import ctypes
def f(x,y):
return x+y
t = Translation(f, [int, int])
t.source(backend='c')
t.compile()
dll = ctypes.CDLL(str(t.driver.c_entryp))
f = dll.pypy_g_f
assert f(2, 3) == 5
def test_simple_rtype_with_type_system():
def f(x,y):
return x+y
t = Translation(f, [int, int])
t.rtype(type_system='lltype')
assert 'rtype_lltype' in t.driver.done
t = Translation(f, [int, int])
t.rtype(type_system='ootype')
assert 'rtype_ootype' in t.driver.done
t = Translation(f, [int, int], type_system='ootype')
t.rtype()
assert 'rtype_ootype' in t.driver.done
t = Translation(f, [int, int])
t.rtype(backend='cli')
assert 'rtype_ootype' in t.driver.done
t = Translation(f, [int, int], backend='cli', type_system='ootype')
t.rtype()
assert 'rtype_ootype' in t.driver.done
t = Translation(f, [int, int], type_system='lltype')
t.annotate()
py.test.raises(Exception, "t.rtype(backend='cli')")
t = Translation(f, [int, int], backend='cli')
t.annotate()
py.test.raises(Exception, "t.rtype(type_system='lltype')")
| 21.641026
| 71
| 0.603081
|
31f057021cfdb445f46a8b994d0c0ae4c7b335dc
| 3,185
|
py
|
Python
|
keywords.py
|
deokhwaStory/naver_keywords
|
d38bab651183d5087edf7db8ca3a32eb964d00ed
|
[
"MIT"
] | null | null | null |
keywords.py
|
deokhwaStory/naver_keywords
|
d38bab651183d5087edf7db8ca3a32eb964d00ed
|
[
"MIT"
] | null | null | null |
keywords.py
|
deokhwaStory/naver_keywords
|
d38bab651183d5087edf7db8ca3a32eb964d00ed
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import requests
import json
import pymongo
from bs4 import BeautifulSoup
from sqlalchemy import *
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
# In[2]:
base = declarative_base()
class NaverKeywords(base):
__tablename__ = "naver"
id = Column(Integer, primary_key=True)
rank = Column(Integer, nullable=False)
keyword = Column(String(50), nullable=False)
rdate = Column(TIMESTAMP, nullable=False)
def __init__(self, rank, keyword):
self.rank = rank
self.keyword = keyword
def __repr__(self):
return "<NaverKeywords {}, {}>".format(self.rank, self.keyword)
# In[3]:
# 클래스 작성
# 생성자, 크롤링함수, mysql 저장, mongodb 저장, 슬랙전송, 실행함수 작성
# __init__, crawling, mysql_save, mongo_save, send_slack, run
# In[4]:
class NaverKeywordsCrawling:
def __init__(self, base, ip="15.164.163.11", pw="dssf", database="terraform"):
self.mysql_client = create_engine("mysql://root:{}@{}/{}?charset=utf8".format(pw, ip, database))
self.mongo_client = pymongo.MongoClient('mongodb://{}:27017'.format(ip))
self.datas = None
self.base = base
def crawling(self):
response = requests.get("https://www.naver.com/")
dom = BeautifulSoup(response.content, "html.parser")
keywords = dom.select(".ah_roll_area > .ah_l > .ah_item")
datas = []
for keyword in keywords:
rank = keyword.select_one(".ah_r").text
keyword = keyword.select_one(".ah_k").text
datas.append((rank, keyword))
self.datas = datas
def mysql_save(self):
# make table
self.base.metadata.create_all(self.mysql_client)
# parsing keywords
keywords = [NaverKeywords(rank, keyword) for rank, keyword in self.datas]
# make session
maker = sessionmaker(bind=self.mysql_client)
session = maker()
# save datas
session.add_all(keywords)
session.commit()
# close session
session.close()
def mongo_save(self):
# parsing querys
keyowrds = [{"rank":rank, "keyword":keyword} for rank, keyword in self.datas]
# insert keyowrds
self.mongo_client.terraform.naver_keywords.insert(keyowrds)
def send_slack(self, msg, channel="#program", username="provision_bot" ):
webhook_URL = "https://hooks.slack.com/services/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"
payload = {
"channel": channel,
"username": username,
"icon_emoji": ":provision:",
"text": msg,
}
response = requests.post(
webhook_URL,
data = json.dumps(payload),
)
return response
def run(self):
# crawling
self.crawling()
# save datas to db
self.mysql_save()
self.mongo_save()
# send msg
self.send_slack("naver crawling done! TTTT ")
# In[5]:
nk = NaverKeywordsCrawling(base)
nk
# In[6]:
nk.run()
# In[ ]:
| 23.07971
| 104
| 0.6
|
0dede1a051a4efc1743ac075bd64382b654e5f72
| 22,958
|
py
|
Python
|
stable_baselines/a2c/utils.py
|
hzm2016/stable-baselines
|
ebd5cf79ecf8d263becafe293c2034ffcb9bcce4
|
[
"MIT"
] | 2
|
2019-12-02T08:59:50.000Z
|
2021-08-20T12:38:25.000Z
|
stable_baselines/a2c/utils.py
|
hzm2016/stable-baselines-option-critic
|
02935592d7bceb77b49bd6deef836c67a23e408b
|
[
"MIT"
] | null | null | null |
stable_baselines/a2c/utils.py
|
hzm2016/stable-baselines-option-critic
|
02935592d7bceb77b49bd6deef836c67a23e408b
|
[
"MIT"
] | null | null | null |
import os
from collections import deque
import numpy as np
import tensorflow as tf
def fc(x, scope, nh, *, init_scale=1.0, init_bias=0.0):
with tf.variable_scope(scope):
nin = x.get_shape()[1].value
w = tf.get_variable("w", [nin, nh], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh], initializer=tf.constant_initializer(init_bias))
return tf.matmul(x, w)+b
def sample(logits):
"""
Creates a sampling Tensor for non deterministic policies
when using categorical distribution.
It uses the Gumbel-max trick: http://amid.fish/humble-gumbel
:param logits: (TensorFlow Tensor) The input probability for each action
:return: (TensorFlow Tensor) The sampled action
"""
noise = tf.random_uniform(tf.shape(logits))
return tf.argmax(logits - tf.log(-tf.log(noise)), 1)
def calc_entropy(logits):
"""
Calculates the entropy of the output values of the network
:param logits: (TensorFlow Tensor) The input probability for each action
:return: (TensorFlow Tensor) The Entropy of the output values of the network
"""
# Compute softmax
a_0 = logits - tf.reduce_max(logits, 1, keepdims=True)
exp_a_0 = tf.exp(a_0)
z_0 = tf.reduce_sum(exp_a_0, 1, keepdims=True)
p_0 = exp_a_0 / z_0
return tf.reduce_sum(p_0 * (tf.log(z_0) - a_0), 1)
def calc_entropy_softmax(action_proba):
"""
Calculates the softmax entropy of the output values of the network
:param action_proba: (TensorFlow Tensor) The input probability for each action
:return: (TensorFlow Tensor) The softmax entropy of the output values of the network
"""
return - tf.reduce_sum(action_proba * tf.log(action_proba + 1e-6), axis=1)
def mse(pred, target):
"""
Returns the Mean squared error between prediction and target
:param pred: (TensorFlow Tensor) The predicted value
:param target: (TensorFlow Tensor) The target value
:return: (TensorFlow Tensor) The Mean squared error between prediction and target
"""
return tf.reduce_mean(tf.square(pred - target))
def ortho_init(scale=1.0):
"""
Orthogonal initialization for the policy weights
:param scale: (float) Scaling factor for the weights.
:return: (function) an initialization function for the weights
"""
# _ortho_init(shape, dtype, partition_info=None)
def _ortho_init(shape, *_, **_kwargs):
"""Intialize weights as Orthogonal matrix.
Orthogonal matrix initialization [1]_. For n-dimensional shapes where
n > 2, the n-1 trailing axes are flattened. For convolutional layers, this
corresponds to the fan-in, so this makes the initialization usable for
both dense and convolutional layers.
References
----------
.. [1] Saxe, Andrew M., James L. McClelland, and Surya Ganguli.
"Exact solutions to the nonlinear dynamics of learning in deep
linear
"""
# lasagne ortho init for tf
shape = tuple(shape)
if len(shape) == 2:
flat_shape = shape
elif len(shape) == 4: # assumes NHWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
else:
raise NotImplementedError
gaussian_noise = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(gaussian_noise, full_matrices=False)
weights = u if u.shape == flat_shape else v # pick the one with the correct shape
weights = weights.reshape(shape)
return (scale * weights[:shape[0], :shape[1]]).astype(np.float32)
return _ortho_init
def conv(input_tensor, scope, *, n_filters, filter_size, stride,
pad='VALID', init_scale=1.0, data_format='NHWC', one_dim_bias=False):
"""
Creates a 2d convolutional layer for TensorFlow
:param input_tensor: (TensorFlow Tensor) The input tensor for the convolution
:param scope: (str) The TensorFlow variable scope
:param n_filters: (int) The number of filters
:param filter_size: (Union[int, [int], tuple<int, int>]) The filter size for the squared kernel matrix,
or the height and width of kernel filter if the input is a list or tuple
:param stride: (int) The stride of the convolution
:param pad: (str) The padding type ('VALID' or 'SAME')
:param init_scale: (int) The initialization scale
:param data_format: (str) The data format for the convolution weights
:param one_dim_bias: (bool) If the bias should be one dimentional or not
:return: (TensorFlow Tensor) 2d convolutional layer
"""
if isinstance(filter_size, list) or isinstance(filter_size, tuple):
assert len(filter_size) == 2, \
"Filter size must have 2 elements (height, width), {} were given".format(len(filter_size))
filter_height = filter_size[0]
filter_width = filter_size[1]
else:
filter_height = filter_size
filter_width = filter_size
if data_format == 'NHWC':
channel_ax = 3
strides = [1, stride, stride, 1]
bshape = [1, 1, 1, n_filters]
elif data_format == 'NCHW':
channel_ax = 1
strides = [1, 1, stride, stride]
bshape = [1, n_filters, 1, 1]
else:
raise NotImplementedError
bias_var_shape = [n_filters] if one_dim_bias else [1, n_filters, 1, 1]
n_input = input_tensor.get_shape()[channel_ax].value
wshape = [filter_height, filter_width, n_input, n_filters]
with tf.variable_scope(scope):
weight = tf.get_variable("w", wshape, initializer=ortho_init(init_scale))
bias = tf.get_variable("b", bias_var_shape, initializer=tf.constant_initializer(0.0))
if not one_dim_bias and data_format == 'NHWC':
bias = tf.reshape(bias, bshape)
return bias + tf.nn.conv2d(input_tensor, weight, strides=strides, padding=pad, data_format=data_format)
def linear(input_tensor, scope, n_hidden, *, init_scale=1.0, init_bias=0.0):
"""
Creates a fully connected layer for TensorFlow
:param input_tensor: (TensorFlow Tensor) The input tensor for the fully connected layer
:param scope: (str) The TensorFlow variable scope
:param n_hidden: (int) The number of hidden neurons
:param init_scale: (int) The initialization scale
:param init_bias: (int) The initialization offset bias
:return: (TensorFlow Tensor) fully connected layer
"""
with tf.variable_scope(scope):
print(input_tensor.get_shape())
n_input = input_tensor.get_shape()[1]
weight = tf.get_variable("w", [n_input, n_hidden], initializer=ortho_init(init_scale))
bias = tf.get_variable("b", [n_hidden], initializer=tf.constant_initializer(init_bias))
return tf.matmul(input_tensor, weight) + bias
def batch_to_seq(tensor_batch, n_batch, n_steps, flat=False):
"""
Transform a batch of Tensors, into a sequence of Tensors for recurrent policies
:param tensor_batch: (TensorFlow Tensor) The input tensor to unroll
:param n_batch: (int) The number of batch to run (n_envs * n_steps)
:param n_steps: (int) The number of steps to run for each environment
:param flat: (bool) If the input Tensor is flat
:return: (TensorFlow Tensor) sequence of Tensors for recurrent policies
"""
if flat:
tensor_batch = tf.reshape(tensor_batch, [n_batch, n_steps])
else:
tensor_batch = tf.reshape(tensor_batch, [n_batch, n_steps, -1])
return [tf.squeeze(v, [1]) for v in tf.split(axis=1, num_or_size_splits=n_steps, value=tensor_batch)]
def seq_to_batch(tensor_sequence, flat=False):
"""
Transform a sequence of Tensors, into a batch of Tensors for recurrent policies
:param tensor_sequence: (TensorFlow Tensor) The input tensor to batch
:param flat: (bool) If the input Tensor is flat
:return: (TensorFlow Tensor) batch of Tensors for recurrent policies
"""
shape = tensor_sequence[0].get_shape().as_list()
if not flat:
assert len(shape) > 1
n_hidden = tensor_sequence[0].get_shape()[-1].value
return tf.reshape(tf.concat(axis=1, values=tensor_sequence), [-1, n_hidden])
else:
return tf.reshape(tf.stack(values=tensor_sequence, axis=1), [-1])
def lstm(input_tensor, mask_tensor, cell_state_hidden, scope, n_hidden, init_scale=1.0, layer_norm=False):
"""
Creates an Long Short Term Memory (LSTM) cell for TensorFlow
:param input_tensor: (TensorFlow Tensor) The input tensor for the LSTM cell
:param mask_tensor: (TensorFlow Tensor) The mask tensor for the LSTM cell
:param cell_state_hidden: (TensorFlow Tensor) The state tensor for the LSTM cell
:param scope: (str) The TensorFlow variable scope
:param n_hidden: (int) The number of hidden neurons
:param init_scale: (int) The initialization scale
:param layer_norm: (bool) Whether to apply Layer Normalization or not
:return: (TensorFlow Tensor) LSTM cell
"""
_, n_input = [v.value for v in input_tensor[0].get_shape()]
with tf.variable_scope(scope):
weight_x = tf.get_variable("wx", [n_input, n_hidden * 4], initializer=ortho_init(init_scale))
weight_h = tf.get_variable("wh", [n_hidden, n_hidden * 4], initializer=ortho_init(init_scale))
bias = tf.get_variable("b", [n_hidden * 4], initializer=tf.constant_initializer(0.0))
if layer_norm:
# Gain and bias of layer norm
gain_x = tf.get_variable("gx", [n_hidden * 4], initializer=tf.constant_initializer(1.0))
bias_x = tf.get_variable("bx", [n_hidden * 4], initializer=tf.constant_initializer(0.0))
gain_h = tf.get_variable("gh", [n_hidden * 4], initializer=tf.constant_initializer(1.0))
bias_h = tf.get_variable("bh", [n_hidden * 4], initializer=tf.constant_initializer(0.0))
gain_c = tf.get_variable("gc", [n_hidden], initializer=tf.constant_initializer(1.0))
bias_c = tf.get_variable("bc", [n_hidden], initializer=tf.constant_initializer(0.0))
cell_state, hidden = tf.split(axis=1, num_or_size_splits=2, value=cell_state_hidden)
for idx, (_input, mask) in enumerate(zip(input_tensor, mask_tensor)):
cell_state = cell_state * (1 - mask)
hidden = hidden * (1 - mask)
if layer_norm:
gates = _ln(tf.matmul(_input, weight_x), gain_x, bias_x) \
+ _ln(tf.matmul(hidden, weight_h), gain_h, bias_h) + bias
else:
gates = tf.matmul(_input, weight_x) + tf.matmul(hidden, weight_h) + bias
in_gate, forget_gate, out_gate, cell_candidate = tf.split(axis=1, num_or_size_splits=4, value=gates)
in_gate = tf.nn.sigmoid(in_gate)
forget_gate = tf.nn.sigmoid(forget_gate)
out_gate = tf.nn.sigmoid(out_gate)
cell_candidate = tf.tanh(cell_candidate)
cell_state = forget_gate * cell_state + in_gate * cell_candidate
if layer_norm:
hidden = out_gate * tf.tanh(_ln(cell_state, gain_c, bias_c))
else:
hidden = out_gate * tf.tanh(cell_state)
input_tensor[idx] = hidden
cell_state_hidden = tf.concat(axis=1, values=[cell_state, hidden])
return input_tensor, cell_state_hidden
def _ln(input_tensor, gain, bias, epsilon=1e-5, axes=None):
"""
Apply layer normalisation.
:param input_tensor: (TensorFlow Tensor) The input tensor for the Layer normalization
:param gain: (TensorFlow Tensor) The scale tensor for the Layer normalization
:param bias: (TensorFlow Tensor) The bias tensor for the Layer normalization
:param epsilon: (float) The epsilon value for floating point calculations
:param axes: (tuple, list or int) The axes to apply the mean and variance calculation
:return: (TensorFlow Tensor) a normalizing layer
"""
if axes is None:
axes = [1]
mean, variance = tf.nn.moments(input_tensor, axes=axes, keep_dims=True)
input_tensor = (input_tensor - mean) / tf.sqrt(variance + epsilon)
input_tensor = input_tensor * gain + bias
return input_tensor
def lnlstm(input_tensor, mask_tensor, cell_state, scope, n_hidden, init_scale=1.0):
"""
Creates a LSTM with Layer Normalization (lnlstm) cell for TensorFlow
:param input_tensor: (TensorFlow Tensor) The input tensor for the LSTM cell
:param mask_tensor: (TensorFlow Tensor) The mask tensor for the LSTM cell
:param cell_state: (TensorFlow Tensor) The state tensor for the LSTM cell
:param scope: (str) The TensorFlow variable scope
:param n_hidden: (int) The number of hidden neurons
:param init_scale: (int) The initialization scale
:return: (TensorFlow Tensor) lnlstm cell
"""
return lstm(input_tensor, mask_tensor, cell_state, scope, n_hidden, init_scale, layer_norm=True)
def conv_to_fc(input_tensor):
"""
Reshapes a Tensor from a convolutional network to a Tensor for a fully connected network
:param input_tensor: (TensorFlow Tensor) The convolutional input tensor
:return: (TensorFlow Tensor) The fully connected output tensor
"""
n_hidden = np.prod([v.value for v in input_tensor.get_shape()[1:]])
input_tensor = tf.reshape(input_tensor, [-1, n_hidden])
return input_tensor
def discount_with_dones(rewards, dones, gamma):
"""
Apply the discount value to the reward, where the environment is not done
:param rewards: ([float]) The rewards
:param dones: ([bool]) Whether an environment is done or not
:param gamma: (float) The discount value
:return: ([float]) The discounted rewards
"""
discounted = []
ret = 0 # Return: discounted reward
for reward, done in zip(rewards[::-1], dones[::-1]):
ret = reward + gamma * ret * (1. - done) # fixed off by one bug
discounted.append(ret)
return discounted[::-1]
def make_path(path):
"""
For a given path, create the folders if they do not exist
:param path: (str) The path
:return: (bool) Whether or not it finished correctly
"""
return os.makedirs(path, exist_ok=True)
def constant(_):
"""
Returns a constant value for the Scheduler
:param _: ignored
:return: (float) 1
"""
return 1.
def linear_schedule(progress):
"""
Returns a linear value for the Scheduler
:param progress: (float) Current progress status (in [0, 1])
:return: (float) 1 - progress
"""
return 1 - progress
def middle_drop(progress):
"""
Returns a linear value with a drop near the middle to a constant value for the Scheduler
:param progress: (float) Current progress status (in [0, 1])
:return: (float) 1 - progress if (1 - progress) >= 0.75 else 0.075
"""
eps = 0.75
if 1 - progress < eps:
return eps * 0.1
return 1 - progress
def double_linear_con(progress):
"""
Returns a linear value (x2) with a flattened tail for the Scheduler
:param progress: (float) Current progress status (in [0, 1])
:return: (float) 1 - progress*2 if (1 - progress*2) >= 0.125 else 0.125
"""
progress *= 2
eps = 0.125
if 1 - progress < eps:
return eps
return 1 - progress
def double_middle_drop(progress):
"""
Returns a linear value with two drops near the middle to a constant value for the Scheduler
:param progress: (float) Current progress status (in [0, 1])
:return: (float) if 0.75 <= 1 - p: 1 - p, if 0.25 <= 1 - p < 0.75: 0.75, if 1 - p < 0.25: 0.125
"""
eps1 = 0.75
eps2 = 0.25
if 1 - progress < eps1:
if 1 - progress < eps2:
return eps2 * 0.5
return eps1 * 0.1
return 1 - progress
SCHEDULES = {
'linear': linear_schedule,
'constant': constant,
'double_linear_con': double_linear_con,
'middle_drop': middle_drop,
'double_middle_drop': double_middle_drop
}
class Scheduler(object):
def __init__(self, initial_value, n_values, schedule):
"""
Update a value every iteration, with a specific curve
:param initial_value: (float) initial value
:param n_values: (int) the total number of iterations
:param schedule: (function) the curve you wish to follow for your value
"""
self.step = 0.
self.initial_value = initial_value
self.nvalues = n_values
self.schedule = SCHEDULES[schedule]
def value(self):
"""
Update the Scheduler, and return the current value
:return: (float) the current value
"""
current_value = self.initial_value * self.schedule(self.step / self.nvalues)
self.step += 1.
return current_value
def value_steps(self, steps):
"""
Get a value for a given step
:param steps: (int) The current number of iterations
:return: (float) the value for the current number of iterations
"""
return self.initial_value * self.schedule(steps / self.nvalues)
class EpisodeStats:
def __init__(self, n_steps, n_envs):
"""
Calculates the episode statistics
:param n_steps: (int) The number of steps to run for each environment
:param n_envs: (int) The number of environments
"""
self.episode_rewards = []
for _ in range(n_envs):
self.episode_rewards.append([])
self.len_buffer = deque(maxlen=40) # rolling buffer for episode lengths
self.rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards
self.n_steps = n_steps
self.n_envs = n_envs
def feed(self, rewards, masks):
"""
Update the latest reward and mask
:param rewards: ([float]) The new rewards for the new step
:param masks: ([float]) The new masks for the new step
"""
rewards = np.reshape(rewards, [self.n_envs, self.n_steps])
masks = np.reshape(masks, [self.n_envs, self.n_steps])
for i in range(0, self.n_envs):
for j in range(0, self.n_steps):
self.episode_rewards[i].append(rewards[i][j])
if masks[i][j]:
reward_length = len(self.episode_rewards[i])
reward_sum = sum(self.episode_rewards[i])
self.len_buffer.append(reward_length)
self.rewbuffer.append(reward_sum)
self.episode_rewards[i] = []
def mean_length(self):
"""
Returns the average length of each episode
:return: (float)
"""
if self.len_buffer:
return np.mean(self.len_buffer)
else:
return 0 # on the first params dump, no episodes are finished
def mean_reward(self):
"""
Returns the average reward of each episode
:return: (float)
"""
if self.rewbuffer:
return np.mean(self.rewbuffer)
else:
return 0
# For ACER
def get_by_index(input_tensor, idx):
"""
Return the input tensor, offset by a certain value
:param input_tensor: (TensorFlow Tensor) The input tensor
:param idx: (int) The index offset
:return: (TensorFlow Tensor) the offset tensor
"""
assert len(input_tensor.get_shape()) == 2
assert len(idx.get_shape()) == 1
idx_flattened = tf.range(0, input_tensor.shape[0]) * input_tensor.shape[1] + idx
offset_tensor = tf.gather(tf.reshape(input_tensor, [-1]), # flatten input
idx_flattened) # use flattened indices
return offset_tensor
def check_shape(tensors, shapes):
"""
Verifies the tensors match the given shape, will raise an error if the shapes do not match
:param tensors: ([TensorFlow Tensor]) The tensors that should be checked
:param shapes: ([list]) The list of shapes for each tensor
"""
i = 0
for (tensor, shape) in zip(tensors, shapes):
assert tensor.get_shape().as_list() == shape, "id " + str(i) + " shape " + str(tensor.get_shape()) + str(shape)
i += 1
def avg_norm(tensor):
"""
Return an average of the L2 normalization of the batch
:param tensor: (TensorFlow Tensor) The input tensor
:return: (TensorFlow Tensor) Average L2 normalization of the batch
"""
return tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square(tensor), axis=-1)))
def gradient_add(grad_1, grad_2, param, verbose=0):
"""
Sum two gradients
:param grad_1: (TensorFlow Tensor) The first gradient
:param grad_2: (TensorFlow Tensor) The second gradient
:param param: (TensorFlow parameters) The trainable parameters
:param verbose: (int) verbosity level
:return: (TensorFlow Tensor) the sum of the gradients
"""
if verbose > 1:
print([grad_1, grad_2, param.name])
if grad_1 is None and grad_2 is None:
return None
elif grad_1 is None:
return grad_2
elif grad_2 is None:
return grad_1
else:
return grad_1 + grad_2
def q_explained_variance(q_pred, q_true):
"""
Calculates the explained variance of the Q value
:param q_pred: (TensorFlow Tensor) The predicted Q value
:param q_true: (TensorFlow Tensor) The expected Q value
:return: (TensorFlow Tensor) the explained variance of the Q value
"""
_, var_y = tf.nn.moments(q_true, axes=[0, 1])
_, var_pred = tf.nn.moments(q_true - q_pred, axes=[0, 1])
check_shape([var_y, var_pred], [[]] * 2)
return 1.0 - (var_pred / var_y)
def total_episode_reward_logger(rew_acc, rewards, masks, writer, steps):
"""
calculates the cumulated episode reward, and prints to tensorflow log the output
:param rew_acc: (np.array float) the total running reward
:param rewards: (np.array float) the rewards
:param masks: (np.array bool) the end of episodes
:param writer: (TensorFlow Session.writer) the writer to log to
:param steps: (int) the current timestep
:return: (np.array float) the updated total running reward
:return: (np.array float) the updated total running reward
"""
with tf.variable_scope("environment_info", reuse=True):
for env_idx in range(rewards.shape[0]):
dones_idx = np.sort(np.argwhere(masks[env_idx]))
if len(dones_idx) == 0:
rew_acc[env_idx] += sum(rewards[env_idx])
else:
rew_acc[env_idx] += sum(rewards[env_idx, :dones_idx[0, 0]])
summary = tf.Summary(value=[tf.Summary.Value(tag="episode_reward", simple_value=rew_acc[env_idx])])
writer.add_summary(summary, steps + dones_idx[0, 0])
for k in range(1, len(dones_idx[:, 0])):
rew_acc[env_idx] = sum(rewards[env_idx, dones_idx[k-1, 0]:dones_idx[k, 0]])
summary = tf.Summary(value=[tf.Summary.Value(tag="episode_reward", simple_value=rew_acc[env_idx])])
writer.add_summary(summary, steps + dones_idx[k, 0])
rew_acc[env_idx] = sum(rewards[env_idx, dones_idx[-1, 0]:])
return rew_acc
| 38.199667
| 119
| 0.657069
|
9bd86b099d537cf76d0c8758bfc0b278027259e2
| 2,275
|
py
|
Python
|
python/gff/gff2orthocluster.py
|
lotharwissler/bioinformatics
|
83a53771222ecb0759e3b4bfa2018d2cd7647643
|
[
"MIT"
] | 10
|
2016-01-13T00:39:30.000Z
|
2020-11-30T05:56:19.000Z
|
python/gff/gff2orthocluster.py
|
lotharwissler/bioinformatics
|
83a53771222ecb0759e3b4bfa2018d2cd7647643
|
[
"MIT"
] | 1
|
2017-02-09T22:46:49.000Z
|
2017-02-09T22:46:49.000Z
|
python/gff/gff2orthocluster.py
|
lotharwissler/bioinformatics
|
83a53771222ecb0759e3b4bfa2018d2cd7647643
|
[
"MIT"
] | 10
|
2015-10-09T00:29:16.000Z
|
2019-06-09T05:32:15.000Z
|
#!/usr/bin/python
import os, sys # low level handling, such as command line stuff
import string # string methods available
import re # regular expressions
import getopt # comand line argument handling
from low import * # custom functions, written by myself
import gff3
from collections import defaultdict
# =============================================================================
def show_help( ):
""" displays the program parameter list and usage information """
stdout( "usage: " + sys.argv[0] + " -f <path>" )
stdout( " " )
stdout( " option description" )
stdout( " -h help (this text here)" )
stdout( " -f gff3 file" )
stdout( " " )
sys.exit(1)
# =============================================================================
def handle_arguments():
""" verifies the presence of all necessary arguments and returns the data dir """
if len ( sys.argv ) == 1:
stderr( "no arguments provided." )
show_help()
try: # check for the right arguments
keys, values = getopt.getopt( sys.argv[1:], "hf:" )
except getopt.GetoptError:
stderr( "invalid arguments provided." )
show_help()
args = {}
for key, value in keys:
if key == '-f': args['gff'] = value
if not args.has_key('gff'):
stderr( "gff argument missing." )
show_help()
elif not file_exists( args.get('gff') ):
stderr( "gff does not exist." )
show_help()
return args
# =============================================================================
# === MAIN ====================================================================
# =============================================================================
def main( args ):
fo = open(args['gff'])
for line in fo:
if line.startswith("#"): continue
if len(line.strip()) == 0: continue
if len(line.split("\t")) != 9: continue
gf = gff3.GeneFeature(line.rstrip())
if gf.type != "gene": continue
id = gf.get_attributes()['ID']
if gf.strand == '+': strand = '1'
else: strand = "-1"
print string.join([id, gf.seqid, str(gf.start), str(gf.stop), strand], "\t")
fo.close()
# =============================================================================
args = handle_arguments()
main( args )
| 31.597222
| 83
| 0.487912
|
5b6bc0d7712ead86ff983e74a015b350ce708419
| 38,484
|
py
|
Python
|
Day_13/AoCUtils.py
|
Uklusi/AdventOfCode2019
|
4f9bd09183129b78258647fa2b0a42d84d9c0820
|
[
"MIT"
] | null | null | null |
Day_13/AoCUtils.py
|
Uklusi/AdventOfCode2019
|
4f9bd09183129b78258647fa2b0a42d84d9c0820
|
[
"MIT"
] | null | null | null |
Day_13/AoCUtils.py
|
Uklusi/AdventOfCode2019
|
4f9bd09183129b78258647fa2b0a42d84d9c0820
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import hashlib
from typing import Sequence, Iterable, Optional, Callable, Union, Literal, TypeVar, NamedTuple, NoReturn, Any, NewType, cast, overload
from collections.abc import Iterable
from io import TextIOWrapper
from copy import copy, deepcopy
from math import gcd
from functools import partial
from itertools import product
from queue import PriorityQueue
import re
import numpy as np
# Type aliases (for type hinting)
Numeric = Union[int, float]
MaybeSlice = Union[slice, int]
DirectionType = Union[str, int]
LogFileType = Union[str, TextIOWrapper]
DoubleSlice = Union[tuple[MaybeSlice, MaybeSlice], MaybeSlice]
T = TypeVar('T')
NewType = NewType
# Utility functions (debug, logging, prettify, etc...)
def rotations(l: Sequence[T]) -> list[Sequence[T]]:
cycled = list(l) + list(l)
n = len(l)
return [cycled[i:i+n] for i in range(n)]
def stringify(l: Iterable[Any]) -> list[str]:
return [str(o) for o in l]
def prettify(l: Iterable[Any]) -> str:
return "\n".join( stringify(l) )
def sign(x: Numeric) -> int:
return 1 if x > 0 else 0 if x == 0 else -1
def docstring(s: str) -> str:
slist = s.strip().split("\n")
slist = [l.strip() for l in slist]
return prettify(s)
def printLogFactory(logFile: LogFileType) -> Callable[..., None]:
def printLog(*t: Any) -> None:
"""
printLog function.
Takes an arbitrary number of inputs and writes them to a log file
If logFile is stdout, writes to stdout
"""
if isinstance(logFile, str):
print(*t)
else:
s = " ".join( stringify(t) )
logFile.write(s)
logFile.write("\n")
return printLog
# Useful variables
inf = float("inf")
false = False
true = True
# maze characters
solid = "\u2588"
empty = " "
path = "·"
# Positioning classes
class XY(NamedTuple):
x: int
y: int
class NormalizedVector(NamedTuple):
vx: float
vy: float
def dirToNum(direction: DirectionType) -> int:
"""
A function to read various direction inputs and return a standardized version
(a number from 0 to 3, with up as 0 and proceeding clockwise)
"""
if isinstance(direction, int):
return direction % 4
direction = direction.upper()
if direction in ["N", "U", "^", "0"]:
return 0
elif direction in ["E", "R", ">", "1"]:
return 1
elif direction in ["S", "D", "V", "2"]:
return 2
elif direction in ["W", "L", "<", "3", "-1"]:
return 3
else:
raise(Exception(f"DirectionError: {direction}"))
def dirToName(direction: DirectionType) -> str:
"""
A function to read a direction and returning a standardized version
(the letters U, R, D, L for up, right, down, left)
"""
direction = dirToNum(direction)
if direction == 0:
return "U"
elif direction == 1:
return "R"
elif direction == 2:
return "D"
else: # direction == 3:
return "L"
def dirToArrow(direction: DirectionType) -> str:
"""
A function to read a direction and returning a standardized version
(the arrows ^ > v < for up, right, down, left)
"""
direction = dirToNum(direction)
if direction == 0:
return "^"
elif direction == 1:
return ">"
elif direction == 2:
return "v"
else: # direction == 3:
return "<"
_PositionGeneric = Union["Position", "Agent", "MapPosition", "MapAgent"]
_BaseR2Generic = Union["_BaseR2Class", "HexGrid", "Vector", _PositionGeneric]
class _BaseR2Class():
"""
Class used to implement common methods between Vector and Position.
Implements hash, eq, gt (reading order: smaller y, greater x), ge,
coords (using stdcoords method) copy, repr (using str method)
Assuming stdcoords and __str__ methods and reverseY and upVertSign properties
All classes deriving from _BaseR2Class can be compared, returning equal if they have the same coordinates
"""
def __init__(self) -> None:
self.reverseY = False
self.upVertSign = False
self.x = 0
self.y = 0
def stdcoords(self, inverted: bool = False) -> tuple[int, int]:
return (self.x, self.y)
def __hash__(self) -> int:
return hash(self.stdcoords())
def __eq__(self: _BaseR2Generic, other: _BaseR2Generic) -> bool:
return self.stdcoords() == other.stdcoords()
def __gt__(self: _BaseR2Generic, other: _BaseR2Generic) -> Union[bool, NoReturn]:
(sx, sy, *_) = self.stdcoords()
(ox, oy, *_) = other.stdcoords()
sign = -self.upVertSign
s = (sign * sy, sx)
o = (sign * oy, ox)
return s > o
def __ge__(self: _BaseR2Generic, other: _BaseR2Generic) -> Union[bool, NoReturn]:
try:
gt = self > other
except:
raise(NotImplementedError)
return gt or self == other
def coords(self, inverted: bool = False) -> tuple[int, int]:
inverted = inverted ^ self.reverseY
return self.stdcoords(inverted=inverted)
def __str__(self) -> str:
return str(self.stdcoords())
def __repr__(self) -> str:
return str(self)
def copy(self: _BaseR2Generic) -> _BaseR2Generic:
return copy(self)
class Vector(_BaseR2Class):
"""
Vector class: used to indicate offsets.
Inherits from _BaseR2Class
As a mathematician,I wanted to separate the affine plane Z^2 (class Position)
and the vectorial space Z^2 underneath (class Vector)
(Yes, I know Z is not a field and Position is technically a module, but let it slide)
Syntax is Vector(x, y, reverseY=False). [Hashable]
Vector has properties vx and vy, in orded to distinguish them from Position's x and y
reverseY=True means that going down increases the y coordinate.
It also makes the coordinates display as <y, x>, as if the coordinates are from a matrix.
This way it is easier to find points when debugging.
A vector has a distance() (Manhattan/L1 distance from <0,0>)
and a length() (Euclidean/L2 distance from <0,0>)
A vector can be standardized to a direction with the direction() method,
returning a new vector with gcd(vx, vy) = 1.
This is the preferred method for getting directions, as floating point comparisons are not perfect.
However, in case a direction in the unit circle is needed,
the direction method has a normalized parameter (default False)
"""
def __init__(
self,
x: int = 0,
y: int = 0,
reverseY: bool = False
) -> None:
self.upVertSign = -1 if reverseY else 1
self.reverseY = reverseY
self.vx = x
self.vy = y
def __add__(self, other: Vector) -> Vector:
return Vector(
self.vx + other.vx,
self.vy + other.vy,
reverseY = self.reverseY
)
def __sub__(self, other: Vector) -> Vector:
return Vector(
self.vx - other.vx,
self.vy - other.vy,
reverseY = self.reverseY
)
def __neg__(self) -> Vector:
return Vector(
-self.vx,
-self.vy,
reverseY=self.reverseY
)
def __rmul__(self, n: int) -> Vector:
return Vector(n*self.vx, n*self.vy, reverseY=self.reverseY)
def __mul__(self, n: int) -> Vector:
return n * self
def __str__(self) -> str:
(x, y) = self.coords()
return f"<{x}, {y}>"
def stdcoords(self, inverted: bool = False) -> tuple[int, int]:
if inverted:
return (self.vy, self.vx)
return (self.vx, self.vy)
def distance(self) -> int:
return abs(self.vx) + abs(self.vy)
def length(self) -> float:
return ( (self.vx) ** 2 + (self.vy) ** 2 ) ** (1/2)
def direction(self, normalized: bool = False) -> Union[Vector,NormalizedVector]:
if self == Vector(0,0):
return self
elif normalized:
d = self.length()
return NormalizedVector(self.vx / d, self.vy / d)
else:
d = gcd(self.vx, self.vy)
return Vector(self.vx // d, self.vy // d)
def VectorDir(direction: DirectionType, n: int = 1, reverseY: bool = False) -> Vector:
"""
Helper function for class Vector
Used to construct Vectors starting with a direction and the number of steps
"""
upVertSign = -1 if reverseY else 1
x = 0
y = 0
direction = dirToNum(direction)
if direction % 2 == 0:
y = n * upVertSign * (1 - direction)
else:
x = n * (2 - direction)
return Vector(x, y, reverseY=reverseY)
_PositionVar = TypeVar("_PositionVar", bound="Position")
class Position(_BaseR2Class):
"""
Position class: used to indicate positions in the affine plane Z^2.
Inherits from _BaseR2Class
This class is complemented by the Vector class, which is a prerequisite for this class.
Syntax is Position(x, y, reverseY=False). [Hashable]
Position has properties x and y (the coordinates)
reverseY=True means that going down increases the y coordinate.
It also makes the coordinates display as (y, x), as if the coordinates are from a matrix.
This way it is easier to find points when debugging.
A Position can only be added with a Vector, not with other Positions.
However, two Positions can be subtracted: p1 - p2 is the Vector v such that p2 + v == p1.
A Position has a method adjacent(includeCorners=False), to return all adjacent Positions.
If includeCorners is False, only the four positions sharing an edge are returned,
while if it is True, the list also includes the 4 Positions sharing a vertex.
A Position p has a distance(q=Position(0,0)) and a length(q=Position(0,0)),
implemented as the corresponding method for the Vector (p - q)
"""
def __init__(
self,
x: int = 0,
y: int = 0,
reverseY: bool = False
) -> None:
self.x = x
self.y = y
self.reverseY = reverseY
self.upVertSign = -1 if reverseY else 1
def __add__(self: _PositionVar, vector: Vector) -> _PositionVar:
return type(self)(
self.x + vector.vx,
self.y + vector.vy,
reverseY = self.reverseY
)
@overload
def __sub__(self: _PositionVar, other: Vector) -> _PositionVar:
return type(self)(0,0)
@overload
def __sub__(self, other: _PositionGeneric) -> Vector:
return Vector(0,0)
def __sub__(self, other: Union[Vector, _PositionGeneric]):
if isinstance(other, Vector):
return self + (-other)
return Vector(
self.x - other.x,
self.y - other.y,
reverseY = self.reverseY
)
def __str__(self) -> str:
return str(self.coords())
def stdcoords(self, inverted: bool = False) -> tuple[int, int]:
if inverted:
return (self.y, self.x)
return (self.x, self.y)
def adjacent(self: _PositionVar, includeCorners: bool = False) -> list[_PositionVar]:
if includeCorners:
return [self + Vector(i, j) for (i,j) in product([-1,0,1], repeat=2) if (i,j) != (0,0)]
return [self + VectorDir(direction=d) for d in [0,1,2,3] ]
def distance(self, other: Optional[_PositionGeneric] = None) -> int:
if other is None:
other = Position(0,0)
return (self - other).distance()
def length(self, other: Optional[_PositionGeneric] = None) -> float:
if other is None:
other = Position(0,0)
return (self - other).length()
class Agent(Position):
"""
Agent class: represents a movable entity in a 2D grid.
Inherits from Position
Syntax is Agent(x, y, direction=0, reverseY=False) [not hashable]
Agent is mutable, so hash is not implemented.
An agent can turnRight, turnLeft, turnReverse or turn(numOfRightTurns)
An agent can also move, both in the direction he is currently facing and in another direction
It also has a moveTo method, moving it to the position specified,
and a position method, returning the Position correspondig to the agent's current position
"""
def __init__(
self,
x: int = 0,
y: int = 0,
direction: DirectionType = 0,
reverseY: bool = False
) -> None:
super().__init__(x, y, reverseY=reverseY)
self.direction = dirToNum(direction)
def __add__(self, vector: Vector) -> Agent:
return Agent(
self.x + vector.vx,
self.y + vector.vy,
direction = self.direction,
reverseY = self.reverseY
)
def turn(self, direction: Optional[DirectionType] = 1) -> None:
if direction is None:
return
dirNum = dirToNum(direction)
self.direction = (self.direction + dirNum) % 4
def turnRight(self) -> None:
self.turn(1)
def turnLeft(self) -> None:
self.turn(-1)
def turnReverse(self) -> None:
self.turn(2)
def moveTo(self, target: _PositionGeneric) -> None:
self.x = target.x
self.y = target.y
def move(self, n: int = 1, direction: Optional[DirectionType] = None) -> None:
if direction is None:
direction = self.direction
self.moveTo( self + VectorDir(n=n, direction=direction, reverseY=self.reverseY) )
def position(self) -> Position:
return Position(self.x, self.y, reverseY=self.reverseY)
def _inbound(n: int, nmin: Numeric, nmax: Numeric) -> int:
"""
Helper function for MapPosition class
"""
result = n if n < nmax else nmax
result = result if result > nmin else nmin
return cast(int, result)
class MapPosition(Position):
"""
MapPosition class: represents a Position on a possibly limited, possibly not fully traversable 2D plane.
Inherits from Position
Syntax is MapPosition(x, y, reverseY=True, frame=None, xmin=-inf, xmax=inf, ymin=-inf, xmax=inf, occupied=lambda p: False) [Hashable]
A MapPosition assumes reverseY, contrary to the Position class, because usually a map is limited.
The limits can be specified via frame (a view of the observable portion of the map),
setting min to 0 and max to the effective max coordinate in the frame,
or via the parameters. Occupied is a function that takes a MapPosition as input
and returns whether or not that position is free (i.e an agent can move there).
A MapPosition has a method isValid checking for validity, and also separate methods
for the two ways a position can be invalid.
MapPosition.adjacent only returns valid positions.
"""
def __init__(
self,
x: int = 0,
y: int = 0,
reverseY: bool = True,
frame: Optional[Sequence[Sequence[Any]]] = None,
xmin: Numeric = -inf,
xmax: Numeric = inf,
ymin: Numeric = -inf,
ymax: Numeric = inf,
occupied: Callable[[_PositionGeneric], bool] = lambda p: False
) -> None:
super().__init__(
x,
y,
reverseY = reverseY
)
if frame is not None:
self.xmin = 0
self.xmax = len(frame[0]) - 1
self.ymin = 0
self.ymax = len(frame) - 1
else:
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
self._occupiedFunction = occupied
def __add__(self, vector: Vector) -> _PositionGeneric:
return MapPosition(
self.x + vector.vx,
self.y + vector.vy,
reverseY = self.reverseY,
xmin = self.xmin,
xmax = self.xmax,
ymin = self.ymin,
ymax = self.ymax,
occupied = self._occupiedFunction
)
def isOccupied(self) -> bool:
return self._occupiedFunction(self)
def isEmpty(self) -> bool:
return not self.isOccupied()
def isInLimits(self) -> bool:
return (
self.x == _inbound(self.x, self.xmin, self.xmax) and
self.y == _inbound(self.y, self.ymin, self.ymax)
)
def isValid(self) -> bool:
return self.isEmpty() and self.isInLimits()
def adjacent(self: _PositionVar, includeCorners: bool = False) -> list[_PositionVar]:
ret = super().adjacent(includeCorners=includeCorners)
T1 = type(self)
return cast(list[T1], [p for p in ret if p.isValid()])
class MapAgent(MapPosition, Agent):
"""
MapAgent class: represents an agent on a MapPosition.
Inherits from MapPosition, Agent
Syntax is MapAgent(x, y, direction=0, reverseY=True, frame=None, xmin=-inf, xmax=inf, ymin=-inf, xmax=inf, occupied=lambda p: False) [Not hashable]
MapAgent inherits both from MapPosition and Agent, with the following changes:
- A MapAgent has a mapPosition method returning the corresponding MapPosition
- The move method now proceeds one step at a time and check for validity before moving.
"""
def __init__(
self,
x: int = 0,
y: int = 0,
direction: DirectionType = 0,
reverseY: bool = True,
frame: Optional[Sequence[Sequence[Any]]] = None,
xmin: Numeric = -inf,
xmax: Numeric = inf,
ymin: Numeric = -inf,
ymax: Numeric = inf,
occupied: Callable[[Position], bool] = lambda p: False
):
Agent.__init__(
self,
x,
y,
direction = direction,
reverseY = reverseY
)
MapPosition.__init__(
self,
x,
y,
reverseY = reverseY,
frame = frame,
xmin = xmin,
xmax = xmax,
ymin = ymin,
ymax = ymax,
occupied = occupied
)
self.direction = dirToNum(direction)
def __add__(self, vector: Vector) -> MapAgent:
return MapAgent(
self.x + vector.vx,
self.y + vector.vy,
reverseY = self.reverseY,
direction = self.direction,
xmin = self.xmin,
xmax = self.xmax,
ymin = self.ymin,
ymax = self.ymax,
occupied = self._occupiedFunction
)
def move(self, n: int = 1, direction: Optional[DirectionType] = None) -> None:
if direction is None:
direction = self.direction
if n != 1:
for _ in range(n):
self.move(n=1, direction=direction)
return
v = VectorDir(direction=direction, reverseY=self.reverseY)
newpos = (self + v)
if newpos.isValid():
super().move(n=1, direction=direction)
def mapPosition(self) -> MapPosition:
return MapPosition(
self.x,
self.y,
reverseY = self.reverseY,
xmin = self.xmin,
xmax = self.xmax,
ymin = self.ymin,
ymax = self.ymax,
occupied = self._occupiedFunction
)
def breakHexDirections(tape: str) -> list[str]:
"""
Helper method to HexGrid: breaks a tape containing directions into a list of directions
"""
directionsRE = re.compile("NE|NW|N|SE|SW|S|UL|UR|U|DL|DR|D")
return directionsRE.findall(tape.upper())
class HexGrid(_BaseR2Class):
"""
HexGrid class: a coordinate system for an hexagonal grid.
Inherits from _BaseR2Class (except gt)
Syntax: HexGrid(x, y) [Hashable]
HexGrid is hashable while mutable: pay attention at what you do.
HexGrid is basically an unholy union of Agent and Vector, but on an hex grid.
This means that a position is represented as a 2d coordinate, but not all coordinates are acceptable.
I am using a system like in figure, with axis NE and SW, with N and S as third axis.
· |
·+y +x
· O
·-x -y
· |
HexGrids can be summed and subtracted.
They can also move using the directions above, or move to a specific position.
HexGrids have an adjacent() method, returning six HexGrids,
and a distance(other=HexGrid(0,0)) method, returning the minimum number of steps
required to go from self to other
"""
def __init__(self, x: int = 0, y: int = 0) -> None:
super().__init__()
self.x = x
self.y = y
def __add__(self, other: HexGrid) -> HexGrid:
return HexGrid(self.x + other.x, self.y + other.y)
def __sub__(self, other: HexGrid) -> HexGrid:
return HexGrid(self.x - other.x, self.y - other.y)
def __rmul__(self, n: int) -> HexGrid:
return HexGrid(n*self.x, n*self.y)
def __mul__(self, n: int) -> HexGrid:
return n * self
def __str__(self) -> str:
return "Hex" + str(self.coords())
def __gt__(self, other: Any) -> NoReturn:
raise(Exception("Class not ordered"))
def move(self, n: int = 1, direction: Optional[str] = None) -> None:
if direction is None:
raise(Exception("DirectionError: None"))
for _ in range(n):
direction = direction.upper()
if direction in ["N", "U"]:
self.x += 1
self.y += 1
elif direction in ["NE", "UR"]:
self.x += 1
elif direction in ["NW", "UL"]:
self.y += 1
elif direction in ["S", "D"]:
self.x += -1
self.y += -1
elif direction in ["SE", "DR"]:
self.y += -1
elif direction in ["SW", "DL"]:
self.x += -1
else:
raise(Exception(f"DirectionError: {direction}"))
def moveFromTape(self, tape: list[str] = []) -> None:
for direction in tape:
self.move(direction=direction)
def moveTo(self, target: HexGrid) -> None:
self.x = target.x
self.y = target.y
def adjacent(self) -> list[HexGrid]:
return [self + HexGrid(i,j) for (i,j) in [(1,0), (0,1), (1,1), (-1,0), (0,-1), (-1,-1)]]
def distance(self, other: Optional[HexGrid] = None) -> int:
if other is None:
other = HexGrid(0,0)
x = self.x - other.x
y = self.y - other.y
if x * y <= 0:
return abs(x) + abs(y)
else:
return max(abs(x), abs(y))
class PositionNDim():
"""
PositionNDim function: a n-dimensional Position-like class (n >= 3)
Syntax: PositionNDim(x, y, z, ...) / PositionNDim([x, y, z, ...])
PositionNDim is the translation of Vector and Position in a multidimensional environment.
If self.numDimensions = 3 or 4 the class has the properties x, y, z (w). These are not to be modified.
"""
def __init__(self, coordOrList: Union[int, Iterable[int]], *otherCoords: int) -> None:
if isinstance(coordOrList, int):
coords = (coordOrList, ) + otherCoords
self.coordinates = tuple(coords)
else:
self.coordinates = tuple(coordOrList)
self.numDimensions = len(self.coordinates)
if self.numDimensions <= 4:
self.x = self.coordinates[0]
self.y = self.coordinates[1]
self.z = self.coordinates[2]
if self.numDimensions == 4:
self.w = self.coordinates[3]
def __add__(self, other: PositionNDim) -> PositionNDim:
return PositionNDim(
[self.coordinates[i] + other.coordinates[i] for i in range(self.numDimensions)]
)
def __sub__(self, other: PositionNDim) -> PositionNDim:
return PositionNDim(
[self.coordinates[i] - other.coordinates[i] for i in range(self.numDimensions)]
)
def __rmul__(self, n: int) -> PositionNDim:
return PositionNDim( [n*c for c in self.coordinates] )
def __hash__(self) -> int:
return hash(self.coords())
def __eq__(self, other: PositionNDim) -> bool:
return self.coords() == other.coords()
def __str__(self) -> str:
return str(self.coords())
def __repr__(self) -> str:
return str(self)
def stdcoords(self) -> tuple[int, ...]:
return tuple(self.coordinates)
def coords(self) -> tuple[int, ...]:
return self.stdcoords()
def copy(self) -> PositionNDim:
return copy(self)
def adjacent(self, includeCorners: bool = False) -> list[PositionNDim]:
if includeCorners:
return [
self + PositionNDim(vals) for vals in product([-1,0,1], repeat=self.numDimensions) \
if vals != (0,) * self.numDimensions
]
return [
self + PositionNDim(vals) for vals in \
rotations([1] + [0] * (self.numDimensions - 1)) + \
rotations([-1] + [0] * (self.numDimensions - 1))
]
def distance(self, other: Optional[PositionNDim] = None) -> int:
if other is None:
other = PositionNDim( ([0] * self.numDimensions) )
s = self - other
return sum(map(lambda n: abs(n), s.coordinates))
def length(self, other: Optional[PositionNDim] = None) -> float:
if other is None:
other = PositionNDim( ([0] * self.numDimensions) )
s = self - other
return sum(map(lambda n: n**2, s.coordinates)) ** (1/2)
class GameOfLife():
"""
GameOfLife class: a naive implementation of Conway's Game of Life in a limited space
Syntax: GameOfLife(data, on="#", off=".") [Not hashable]
GameOfLife is a representation of a set rectangular portion of the 2D plane as a Game of Life automaton.
state is an iterable of fixed length iterables, representing a rectangle in the plane.
It is assumed that the elements of state are either on or off,
and that the positions outside the state are off and will always remain off.
The step method progresses the automaton by a cycle,
while the image(origChars=False) method returns a Image object representing the current state.
If origChars is True, the characters used to init the object are used;
Otherwise, the solid and empty characters are used
"""
def __init__(self, data: Iterable[Iterable[T]], on: T = "#", off: T =".") -> None:
self.on = on
self.off = off
self.state = [[1 if c is on else 0 for c in s] for s in data]
def __str__(self) -> str:
return "\n".join(["".join([solid if bit else empty for bit in s]) for s in self.state])
def __repr__(self) -> str:
return str(self)
def _neighs(self, p: Position) -> list[MapPosition]:
q = MapPosition(p.x, p.y, frame=self.state)
return q.adjacent(includeCorners=True)
def step(self) -> None:
n = len(self.state)
m = len(self.state[0])
newstate = deepcopy(self.state)
for i in range(n):
for j in range(m):
onNeighs = 0
for p in self._neighs(Position(i,j)):
onNeighs += self.state[p.x][p.y]
if self.state[i][j] and onNeighs in [2,3]:
newstate[i][j] = 1
elif not self.state[i][j] and onNeighs == 3:
newstate[i][j] = 1
else:
newstate[i][j] = 0
self.state = newstate
def image(self, origChars: bool = False) -> Image:
on = str(self.on) if origChars else solid
off = str(self.off) if origChars else empty
return Image([[on if n == 1 else off for n in l] for l in self.state])
# Image-based classes
def _setDoubleSlice(key:DoubleSlice) -> tuple[slice, slice]:
"""
Helper method for overloading getitem with two dimensions
"""
if isinstance(key, tuple):
y = key[0]
x = key[1]
else:
y = key
x = slice(None)
if not isinstance(x, slice):
x = slice(x, x+1 or None)
if not isinstance(y, slice):
y = slice(y, y+1 or None)
return (y,x)
def _sliceToRange(item: slice, minRange: int, maxRange: int) -> range:
"""
Helper method to transform a slice object into a range object
"""
(start, stop, step) = cast(tuple[Union[int, None], ...], (item.start, item.stop, item.step))
return range(start or minRange, stop or maxRange, step or 1)
class Image():
"""
Image class: Used to represent a 2D image using numpy arrays
Syntax: Image(imageAsIter) [Hashable]
Image takes an iterable of iterables (assumed rectangular)
and uses it to construct a 2D numpy array called pixels.
This array has shape image.shape = (y, x), having y rows and x columns.
Properties image.ishape = (x, y) and image.nshape = {x: x, y: y} are also available.
Images can be concatenated horizontally with + and vertically with &.
Since this rebuilds the array every time, if in need to concatenate an entire row or column,
please use the apposite helper function.
str(image), and image.image() return a string, having newlines separating the lines
and are thus ideal for printing to the terminal (or file)
Slicing an image returns the corresponding part of the image as an Image object,
selecting the rows first and the columns second.
For example, image[0] is the first row, while image[:,0] is the first column.
You can also use negative-based ranges.
Images can be copied with image.copy(),
rotated with image.rotate(n=1, clockwise=False, copy=False)
and flipped with image.flip(ud=False, copy=False)
The four rotations of an image are available using image.rotations(),
while all the variations (rotated and flipped) are available using image.variations()
"""
def __init__(self, image: Iterable[Iterable[Any]]) -> None:
self.pixels: np.ndarray = np.array([list(r) for r in image]) # type: ignore
def __eq__(self, other: Image) -> bool:
if self.shape != other.shape:
return False
return bool(cast(np.ndarray, self.pixels == other.pixels).all())
def __add__(self, other: Image) -> Image:
return Image(np.concatenate((self.pixels, other.pixels), axis=1))
def __and__(self, other: Image) -> Image:
return Image(np.concatenate((self.pixels, other.pixels), axis=0))
def __getitem__(self, key: DoubleSlice) -> Image:
(yslice, xslice) = _setDoubleSlice(key)
return Image(self.pixels[yslice, xslice])
def __str__(self) -> str:
return self.image()
def __repr__(self) -> str:
return self.image()
def __hash__(self) -> int:
return hash(self.image())
@property
def shape(self) -> tuple[int, int]:
return cast(tuple[int, int], self.pixels.shape)
@property
def ishape(self) -> tuple[int, int]:
s = self.shape
return (s[1], s[0])
@property
def nshape(self) -> XY:
return XY(*self.ishape)
def copy(self) -> Image:
return Image(self.pixels)
def image(self) -> str:
return "\n".join(["".join(stringify(row)) for row in self.pixels])
@overload
def rotate(self, n: int = 0, clockwise: bool = False, copy: Literal[False] = False) -> None:
return
@overload
def rotate(self, n: int = 0, clockwise: bool = False, copy: Literal[True] = True) -> Image:
return Image([[""]])
def rotate(self, n: int = 1, clockwise: bool = False, copy: bool = False):
if clockwise:
k = -n
else:
k = n
i = np.rot90(self.pixels, k)
if copy:
return Image(i)
else:
self.pixels = i
@overload
def flip(self, ud: bool = False, copy: Literal[False] = False) -> None:
return
@overload
def flip(self, ud: bool = False, copy: Literal[True] = True) -> Image:
return Image([[""]])
def flip(self, ud: bool = False, copy: bool = False):
if ud:
i = np.flipud(self.pixels)
else:
i = np.fliplr(self.pixels)
if copy:
return Image(i)
else:
self.pixels = i
def rotations(self) -> list[Image]:
i1 = self.rotate(0, copy=True)
i2 = self.rotate(1, copy=True)
i3 = self.rotate(2, copy=True)
i4 = self.rotate(3, copy=True)
return [i1, i2, i3, i4]
def variations(self) -> list[Image]:
i1 = self.flip(copy=True)
return self.rotations() + i1.rotations()
def imageConcat(imageIter: Iterable[Union[Image, np.ndarray]], vertical: bool = False) -> Image:
"""
This is an helper function in order to concatenate several images in one passage.
"""
imageList = list(imageIter)
for i in range(len(imageList)):
if isinstance(imageList[i], Image):
imageList[i] = cast(Image, imageList[i]).pixels
imageList = cast(list[np.ndarray], imageList)
if vertical:
axis = 0
else:
axis = 1
return Image(np.concatenate(imageIter, axis=axis))
class Map():
"""
Map class: a window on the state of a Position-based 2D plane representation.
Syntax: Map(visual, frame=None, xmin, ymin, xmax, ymax) [not hashable]
Map uses the frame, if available, or the min and max parameters otherwise,
to determine the looking zone (default 10×10, starting from (0,0)).
If frame is not None, xmin and ymin are assumed 0 and the max come from frame.
visual is the key of the Map: visual takes a Position
and returns the character to display in that position.
visual is called once for each position in the frame when requesting an image.
The image is requested via slicing or the image() method (returning an Image object) or via str (returning str(map.image()))
"""
def __init__(
self,
visual: Callable[[Position], str] = lambda p: ".",
frame: Optional[Sequence[Sequence[Any]]] = None,
xmin: int = 0,
xmax: int = 9,
ymin: int = 0,
ymax: int = 9
):
self._visualFunction = visual
if frame is not None:
self.xmin = 0
self.xmax = len(frame[0])
self.ymin = 0
self.ymax = len(frame)
else:
self.xmin = xmin
self.xmax = xmax + 1
self.ymin = ymin
self.ymax = ymax + 1
def __getitem__(self, key: DoubleSlice) -> Image:
(yslice, xslice) = _setDoubleSlice(key)
yrange = _sliceToRange(yslice, self.ymin, self.ymax)
xrange = _sliceToRange(xslice, self.xmin, self.xmax)
visualRepr = [[self._visualFunction(Position(x,y)) for x in xrange] for y in yrange]
return Image(visualRepr)
def image(self) -> Image:
return self[:,:]
def __str__(self) -> str:
return str(self.image())
def __repr__(self) -> str:
return str(self)
# Random classes and functions
class LinkedList():
"""
LinkedList class: an implementation of a circular double linked list
Syntax: LinkedList(data) [not hashable]
LinkedList is the starting node in a linked list.
It starts with linked.data = data, linked.next = linked.prev = linked.
linked.add(data) adds the specified data AFTER the node pointed by linked, and returns the new node
linked.delete() removes the pointed node, and returns the node BEFORE the deleted one, if any.
If the deleted node was the only one, returns None.
Note that linked.add(data).delete() is idempotent.
The list can be traversed by using linked.next, linked.prev or linked.move(n=1)
linked.move() requires the number of steps to move forward: if negative moves backward
"""
def __init__(self, data: Any) -> None:
self.data = data
self.next = self
self.prev = self
def add(self, othData: Any) -> LinkedList:
other = LinkedList(othData)
other.prev = self
other.next = self.next
self.next.prev = other
self.next = other
return other
def delete(self) -> Optional[LinkedList]:
if self.next == self:
del(self)
return None
else:
self.next.prev = self.prev
self.prev.next = self.next
ret = self.prev
del(self)
return ret
def move(self, n: int = 1) -> LinkedList:
ret = self
if n > 0:
for _ in range(n):
ret = ret.next
elif n < 0:
for _ in range(-n):
ret = ret.prev
return ret
def __eq__(self, other: LinkedList) -> bool:
return self is other
# Easier md5
def md5(string: str) -> str:
return hashlib.md5(string.encode()).hexdigest()
def aStar(
start: _PositionGeneric,
goal: _PositionGeneric,
distanceFunction: Callable[[_PositionGeneric, _PositionGeneric], Numeric] = lambda p, q: p.distance(q),
includeCorners: bool = False
) -> Numeric:
"""
A* Traversing algorithm.
Usage: aStar(start, goal, *distanceFunction, *includeCorners)
Assuming start and goal as instances of class Position
or at least assuming that they are ordered, hashable and
with a method called adjacent with parameter includeCorners.
If called without specifiyng a distance fuction,
it also assumes that there is a method called distance(otherPosition)
"""
estimate = partial(distanceFunction, goal)
openSet: PriorityQueue[tuple[Numeric, _PositionGeneric]] = PriorityQueue()
distance: dict[_PositionGeneric, Numeric] = {start: 0}
openSet.put((estimate(start) + distance[start], start))
while not openSet.empty():
if goal in distance:
return distance[goal]
(_, current) = openSet.get()
for p in current.adjacent(includeCorners=includeCorners):
tentativeDistance = distance[current] + distanceFunction(current, p)
if p not in distance or distance[p] > tentativeDistance:
distance[p] = tentativeDistance
openSet.put((estimate(p) + distance[p], p))
return -1
| 32.976864
| 151
| 0.595832
|
dd2db19a062e9d80eda287acce37bd759705d4b3
| 1,847
|
py
|
Python
|
core/migrations/0030_abouttheeditorpage_editor.py
|
PARINetwork/pari
|
08e4c42475ab62b37f3c3ab3ae6369acc2685ecc
|
[
"BSD-3-Clause"
] | 35
|
2015-10-04T17:07:20.000Z
|
2022-03-23T08:10:13.000Z
|
core/migrations/0030_abouttheeditorpage_editor.py
|
PARINetwork/pari
|
08e4c42475ab62b37f3c3ab3ae6369acc2685ecc
|
[
"BSD-3-Clause"
] | 322
|
2015-07-31T17:06:47.000Z
|
2022-02-10T07:17:55.000Z
|
core/migrations/0030_abouttheeditorpage_editor.py
|
PARINetwork/pari
|
08e4c42475ab62b37f3c3ab3ae6369acc2685ecc
|
[
"BSD-3-Clause"
] | 14
|
2016-05-09T10:50:20.000Z
|
2021-05-08T14:48:51.000Z
|
# Generated by Django 2.2 on 2021-06-07 10:54
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0001_squashed_0021'),
('wagtailcore', '0041_group_collection_permissions_verbose_name_plural'),
('core', '0029_auto_20201204_1216'),
]
operations = [
migrations.CreateModel(
name='AboutTheEditorPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('page_title', models.CharField(max_length=250)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='Editor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('name', models.CharField(max_length=250)),
('title', models.CharField(max_length=250)),
('description', wagtail.core.fields.RichTextField()),
('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailimages.Image')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='editor', to='core.AboutTheEditorPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
| 40.152174
| 191
| 0.59935
|
1434130e0a919ba49284a2d8e13d2e03c2fbfa2d
| 4,524
|
py
|
Python
|
src/tools/antool/syscalltable.py
|
krzycz/pmemfile
|
a1b9897a90cd223e24c10c4a7558235986f0fad3
|
[
"BSD-3-Clause"
] | 82
|
2017-06-30T13:54:44.000Z
|
2022-03-13T02:51:28.000Z
|
src/tools/antool/syscalltable.py
|
krzycz/pmemfile
|
a1b9897a90cd223e24c10c4a7558235986f0fad3
|
[
"BSD-3-Clause"
] | 40
|
2017-05-12T13:27:14.000Z
|
2017-11-16T19:47:40.000Z
|
src/tools/antool/syscalltable.py
|
krzycz/pmemfile
|
a1b9897a90cd223e24c10c4a7558235986f0fad3
|
[
"BSD-3-Clause"
] | 15
|
2017-05-12T12:32:01.000Z
|
2022-02-28T14:09:11.000Z
|
#!/usr/bin/python3
#
# Copyright 2017, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from utils import *
from syscallinfo import *
########################################################################################################################
# SyscallTable
########################################################################################################################
class SyscallTable:
def __init__(self):
self.log_sctbl = logging.getLogger("syscalltable")
self.table = []
####################################################################################################################
def valid_index(self, ind):
if ind < len(self.table):
i = ind
else:
i = len(self.table) - 1
return i
####################################################################################################################
def get(self, ind):
i = self.valid_index(ind)
return self.table[i]
####################################################################################################################
def name(self, ind):
i = self.valid_index(ind)
return self.table[i].name
####################################################################################################################
# read_syscall_table -- read the syscall table from the file
####################################################################################################################
def read_syscall_table(self, fh):
fmt = 'I4sP32sIIIiI6s6s'
size_fmt = struct.calcsize(fmt)
size_check, = read_fmt_data(fh, 'i')
if size_check != size_fmt:
self.log_sctbl.error("wrong format of syscalls table:")
self.log_sctbl.error(" format size : {0:d}".format(size_fmt))
self.log_sctbl.error(" data size : {0:d}".format(size_check))
return -1
count, = read_fmt_data(fh, 'i')
self.log_sctbl.debug("format of syscall table OK, reading {0:d} records...".format(count))
for i in range(count):
try:
data = read_fmt_data(fh, fmt)
num, num_str, pname, name, length, nargs, mask, avail, nstrargs, positions, _padding = data
bname = bytes(name)
sname = str(bname.decode(errors="ignore"))
name = sname.split('\0')[0]
name = name[4:]
syscall = SyscallInfo(name, mask, nargs, nstrargs)
self.table.append(syscall)
except EndOfFile:
break
except CriticalError as err:
print("ERROR: {0:s}".format(err.message), file=stderr)
exit(-1)
except: # pragma: no cover
print("ERROR: unexpected error", file=stderr)
raise
self.log_sctbl.debug("read {0:d} records of syscall table.".format(count))
return 0
| 41.127273
| 120
| 0.521662
|
05e717ee1f19cfd348b43dd802012e128a07d155
| 78
|
py
|
Python
|
pos.py
|
galleu/autobookdl
|
18802f4dbb1a42a2a19d953bcfd0ea04ff5a037a
|
[
"MIT"
] | null | null | null |
pos.py
|
galleu/autobookdl
|
18802f4dbb1a42a2a19d953bcfd0ea04ff5a037a
|
[
"MIT"
] | null | null | null |
pos.py
|
galleu/autobookdl
|
18802f4dbb1a42a2a19d953bcfd0ea04ff5a037a
|
[
"MIT"
] | null | null | null |
import pyautogui
while 1:
pos = pyautogui.position()
print(pos)
| 13
| 31
| 0.628205
|
ae96cf9db54ba6f25d391416577898ab8f4405a1
| 1,585
|
py
|
Python
|
server/config/asgi.py
|
kfields/django-arcade
|
24df3d43dde2d69df333529d8790507fb1f5fcf1
|
[
"MIT"
] | 1
|
2021-10-03T05:44:32.000Z
|
2021-10-03T05:44:32.000Z
|
server/config/asgi.py
|
kfields/django-arcade
|
24df3d43dde2d69df333529d8790507fb1f5fcf1
|
[
"MIT"
] | null | null | null |
server/config/asgi.py
|
kfields/django-arcade
|
24df3d43dde2d69df333529d8790507fb1f5fcf1
|
[
"MIT"
] | null | null | null |
"""
ASGI config for config project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
from ariadne.asgi import GraphQL
from channels.routing import URLRouter
from django.urls import path, re_path
from users.auth import BasicAuthBackend
from schema import schema
from users.jwt import jwt_middleware
from iam.middleware import TokenAuthMiddleware
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings')
def get_context_value(request):
return {
"request": request,
"cookies": request.scope.get("cookies", {}),
"user": request.scope.get("user"),
"session": request.scope.get("session"),
}
application = TokenAuthMiddleware(URLRouter(
[
#path("graphql/", GraphQL(schema, debug=True, middleware=[jwt_middleware])),
path("graphql/", GraphQL(schema, debug=True, context_value=get_context_value)),
re_path(r"", get_asgi_application()),
]
))
'''
import os
from django.core.asgi import get_asgi_application
from ariadne.asgi import GraphQL
from channels.http import AsgiHandler
from channels.routing import URLRouter
from django.urls import path, re_path
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings')
#application = get_asgi_application()
from .schema import schema
application = URLRouter([
path("graphql/", GraphQL(schema, debug=True)),
re_path(r"", AsgiHandler),
])
'''
| 25.15873
| 87
| 0.738801
|
c8aa3c326e492a6e7f4b922ef309b583d1dd22c4
| 138
|
py
|
Python
|
app/__init__.py
|
vax521/PublicationMS
|
10d03fbd67d25e68497d2ae2c7a0d53cc8f0248e
|
[
"MIT"
] | 3
|
2019-06-15T09:47:30.000Z
|
2022-01-16T07:06:06.000Z
|
app/__init__.py
|
vax521/PublicationMS
|
10d03fbd67d25e68497d2ae2c7a0d53cc8f0248e
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
vax521/PublicationMS
|
10d03fbd67d25e68497d2ae2c7a0d53cc8f0248e
|
[
"MIT"
] | 3
|
2018-07-03T03:22:10.000Z
|
2019-06-15T09:48:14.000Z
|
from flask import Flask
app = Flask(__name__)
#读取配置文件信息
app.config.from_object('config')
from app import views
from app import retrieval
| 17.25
| 32
| 0.797101
|
72c284f28543ba85421744da067a5d0205e21380
| 4,449
|
py
|
Python
|
combs_pub/rotamer/rotamer.py
|
npolizzi/combs_pub
|
a649d6104d7fb81cd75d9a838d630fda1ec5cc4e
|
[
"MIT"
] | null | null | null |
combs_pub/rotamer/rotamer.py
|
npolizzi/combs_pub
|
a649d6104d7fb81cd75d9a838d630fda1ec5cc4e
|
[
"MIT"
] | null | null | null |
combs_pub/rotamer/rotamer.py
|
npolizzi/combs_pub
|
a649d6104d7fb81cd75d9a838d630fda1ec5cc4e
|
[
"MIT"
] | 2
|
2020-11-02T23:04:27.000Z
|
2021-07-11T16:41:36.000Z
|
import prody as pr
__all__ = ['get_chi', 'calc_rotamer']
#### ADAPTED FROM LENNA PETERSON https://gist.github.com/lennax/0f5f65ddbfa278713f58 #####
def get_chi(residue_sele, atoms_list):
'''Take parsedvdm and a list of 4 atoms and calculate dihedral in prody
Helper function for rotamer()'''
prody_list = [residue_sele.select('name %s' % name) for name in atoms_list]
return pr.calcDihedral(prody_list[0], prody_list[1], prody_list[2], prody_list[3])
chi_dict = dict(
chi1=dict(
ARG=['N', 'CA', 'CB', 'CG'],
ASN=['N', 'CA', 'CB', 'CG'],
ASP=['N', 'CA', 'CB', 'CG'],
CYS=['N', 'CA', 'CB', 'SG'],
GLN=['N', 'CA', 'CB', 'CG'],
GLU=['N', 'CA', 'CB', 'CG'],
HIS=['N', 'CA', 'CB', 'CG'],
ILE=['N', 'CA', 'CB', 'CG1'],
LEU=['N', 'CA', 'CB', 'CG'],
LYS=['N', 'CA', 'CB', 'CG'],
MET=['N', 'CA', 'CB', 'CG'],
PHE=['N', 'CA', 'CB', 'CG'],
PRO=['N', 'CA', 'CB', 'CG'],
SER=['N', 'CA', 'CB', 'OG'],
THR=['N', 'CA', 'CB', 'OG1'],
TRP=['N', 'CA', 'CB', 'CG'],
TYR=['N', 'CA', 'CB', 'CG'],
VAL=['N', 'CA', 'CB', 'CG1'],
),
chi2=dict(
ARG=['CA', 'CB', 'CG', 'CD'],
ASN=['CA', 'CB', 'CG', 'OD1'],
ASP=['CA', 'CB', 'CG', 'OD1'],
GLN=['CA', 'CB', 'CG', 'CD'],
GLU=['CA', 'CB', 'CG', 'CD'],
HIS=['CA', 'CB', 'CG', 'ND1'],
ILE=['CA', 'CB', 'CG1', 'CD1'],
LEU=['CA', 'CB', 'CG', 'CD1'],
LYS=['CA', 'CB', 'CG', 'CD'],
MET=['CA', 'CB', 'CG', 'SD'],
PHE=['CA', 'CB', 'CG', 'CD1'],
PRO=['CA', 'CB', 'CG', 'CD'],
TRP=['CA', 'CB', 'CG', 'CD1'],
TYR=['CA', 'CB', 'CG', 'CD1'],
),
chi3=dict(
ARG=['CB', 'CG', 'CD', 'NE'],
GLN=['CB', 'CG', 'CD', 'OE1'],
GLU=['CB', 'CG', 'CD', 'OE1'],
LYS=['CB', 'CG', 'CD', 'CE'],
MET=['CB', 'CG', 'SD', 'CE'],
),
chi4=dict(
ARG=['CG', 'CD', 'NE', 'CZ'],
LYS=['CG', 'CD', 'CE', 'NZ'],
),
chi5=dict(
ARG=['CD', 'NE', 'CZ', 'NH1'],
)
)
alt_chi_dict = dict(
chi1=dict(VAL=['N', 'CA', 'CB', 'CG2']),
chi2=dict(
ASP=['CA', 'CB', 'CG', 'OD2'],
LEU=['CA', 'CB', 'CG', 'CD2'],
PHE=['CA', 'CB', 'CG', 'CD2'],
TYR=['CA', 'CB', 'CG', 'CD2'],
),
)
def calc_rotamer(prody_pdb, resnum, chid, segment):
'''Calculates dihedrals for all the chi angles in the vdm residue (vdmires).
Returns nested list of all the chi angles for the vdm ires. Empty list for ALA, GLY, and vdms that
fail the 'try' statement. If successful, ex of nested list is:
[[chi1, altchi1], [chi2], [chi3], [chi4] ] '''
resi_sele = prody_pdb.select('segment %s and chain %s and resnum `%s` and (not element H) and (not name C) and (not name O)'
% (segment, chid, resnum))
restype = resi_sele.getResnames()[0]
if restype == 'ALA' or restype == 'GLY':
return []
chi_list = []
# format is nested list, ex: [[chi1, altchi1], [chi2], [chi3], [chi4] ]
for chi in ['chi1', 'chi2', 'chi3', 'chi4']:
try:
ls = []
dihedral = get_chi(resi_sele, chi_dict[chi][restype])
ls.append(dihedral)
try:
dihedral = get_chi(resi_sele, alt_chi_dict[chi][restype])
ls.append(dihedral)
except:
pass # if there are no alt chis
chi_list.append(ls)
except:
pass # if there are no chi3's, chi4's, etc
return chi_list
| 39.723214
| 128
| 0.365475
|
7808f1fe279cfff61b8016ed37ddb3ce9f42285d
| 3,343
|
py
|
Python
|
pyntnclick/utils.py
|
CTPUG/pyntnclick
|
36082e663caba121b851b19f205c78e44936598b
|
[
"MIT"
] | 3
|
2020-01-02T10:11:14.000Z
|
2021-11-16T08:43:08.000Z
|
pyntnclick/utils.py
|
CTPUG/pyntnclick
|
36082e663caba121b851b19f205c78e44936598b
|
[
"MIT"
] | 1
|
2019-09-08T07:07:46.000Z
|
2019-09-08T07:07:46.000Z
|
pyntnclick/utils.py
|
CTPUG/pyntnclick
|
36082e663caba121b851b19f205c78e44936598b
|
[
"MIT"
] | 1
|
2020-08-29T20:04:52.000Z
|
2020-08-29T20:04:52.000Z
|
# Misc utils I don't know where else to put
from __future__ import print_function, division
import sys
import pygame
from pygame.color import Color
from pygame.colordict import THECOLORS
from pygame.locals import SRCALPHA
from pygame.surface import Surface
if sys.version_info.major == 2:
str_type = basestring # noqa: available in Python 2
else:
str_type = str
def list_scenes(scene_module, scene_list):
"""List the scenes in the state"""
print("Available scenes and details:")
for scene in scene_list:
scenemod = __import__(
'%s.%s' % (scene_module, scene), fromlist=[scene])
if scenemod.SCENES:
print(" * %s" % scene)
else:
print(" * %s (details only)" % scene)
for detailcls in getattr(scenemod, 'DETAIL_VIEWS', []):
print(" - %s" % detailcls.NAME)
def draw_rect_image(surface, color, rect, thickness):
"""Draw a rectangle with lines thickness wide"""
# top
surface.fill(color, (rect.left, rect.top, rect.width, thickness))
# bottom
surface.fill(
color, (rect.left, rect.bottom - thickness, rect.width, thickness))
# left
surface.fill(color, (rect.left, rect.top, thickness, rect.height))
# right
surface.fill(
color, (rect.right - thickness, rect.top, thickness, rect.height))
def convert_color(color):
"""Give me a pygame Color, dammit"""
if isinstance(color, pygame.Color):
return color
if isinstance(color, str_type):
return pygame.Color(color)
return pygame.Color(*color)
def lookup_debug_color(number):
"""Choose a unique colour for this number, to aid debugging"""
return Color(list(THECOLORS.keys())[number])
def render_text(
text, fontname, font_size, color, bg_color, resource, size,
centre=True):
"""Render the text so it will fit in the given size, reducing font
size as needed.
Note that this does not do any text wrapping."""
done = False
width, height = size
color = convert_color(color)
bg_color = convert_color(bg_color)
surface = Surface(size, SRCALPHA)
if resource.CONVERT_ALPHA:
surface = surface.convert_alpha()
else:
# Don't actually render the text when testing
return surface
surface.fill(bg_color)
while not done and font_size > 0:
# We bail at font_size 1 and just clip in that case, since we're
# out of good options
font = resource.get_font(fontname, font_size)
text_surf = font.render(text, True, color)
if (text_surf.get_width() > width or text_surf.get_height() > height):
font_size -= 1
else:
done = True
if centre:
# Centre the text in the rect
x = max(0, (width - text_surf.get_width()) // 2)
y = max(0, (height - text_surf.get_height()) // 2)
else:
x = y = 0
surface.blit(text_surf, (x, y))
return surface
def make_reversible_list(seq):
"""Turns a list of images into a symmetric sequence that runs through
the list first forward and then backwards.
i.e. Given the sequence [a, b, c, d, e], it will return the sequence
[a, b, c, d, e, d, c, b].
This is intended as a helper for constructing looping animations."""
return seq + seq[-2:0:-1]
| 31.242991
| 78
| 0.639545
|
8ac393a1f2b0b7f49f7738801c95b92737645ced
| 2,956
|
py
|
Python
|
apps/win/mintty/mintty.py
|
hsnyder/knausj_talon
|
b25d7155d1c43aa8dd3cdb24164fb2c154d794e7
|
[
"Unlicense"
] | 4
|
2020-07-26T00:31:29.000Z
|
2021-06-24T15:07:02.000Z
|
apps/win/mintty/mintty.py
|
hsnyder/knausj_talon
|
b25d7155d1c43aa8dd3cdb24164fb2c154d794e7
|
[
"Unlicense"
] | 1
|
2020-09-19T15:50:02.000Z
|
2020-09-19T17:08:14.000Z
|
apps/win/mintty/mintty.py
|
hsnyder/knausj_talon
|
b25d7155d1c43aa8dd3cdb24164fb2c154d794e7
|
[
"Unlicense"
] | 1
|
2021-09-20T14:45:08.000Z
|
2021-09-20T14:45:08.000Z
|
from talon import Context, Module, actions, imgui, settings, ui
import os
import subprocess
mod = Module()
mod.apps.mintty = """
os: windows
and app.name: Terminal
os: windows
and app.name: mintty.exe
"""
ctx = Context()
ctx.matches = r"""
app: mintty
"""
directories_to_remap = {}
directories_to_exclude = {}
setting_cyg_path = mod.setting(
"cygpath",
type=str,
default="C:\\cygwin64\\bin\\cygpath.exe",
desc="Path to cygpath.exe",
)
def get_win_path(cyg_path):
path = ""
try:
path = (
subprocess.check_output([setting_cyg_path.get(), "-w", cyg_path])
.strip(b"\n")
.decode()
)
except:
path = ""
return path
@ctx.action_class("user")
class user_actions:
def file_manager_current_path():
path = ui.active_window().title
path = get_win_path(path)
if path in directories_to_remap:
path = directories_to_remap[title]
if path in directories_to_exclude:
path = ""
return path
def file_manager_show_properties():
"""Shows the properties for the file"""
def file_manager_open_directory(path: str):
"""opens the directory that's already visible in the view"""
actions.insert("cd ")
path = '"{}"'.format(path)
actions.insert(path)
actions.key("enter")
def file_manager_select_directory(path: str):
"""selects the directory"""
actions.insert(path)
def file_manager_new_folder(name: str):
"""Creates a new folder in a gui filemanager or inserts the command to do so for terminals"""
name = '"{}"'.format(name)
actions.insert("mkdir " + name)
def file_manager_open_file(path: str):
"""opens the file"""
actions.insert(path)
actions.key("enter")
def file_manager_select_file(path: str):
"""selects the file"""
actions.insert(path)
def file_manager_open_volume(volume: str):
"""file_manager_open_volume"""
actions.user.file_manager_open_directory(volume)
def terminal_list_directories():
actions.insert("ls")
actions.key("enter")
def terminal_list_all_directories():
actions.insert("ls -a")
actions.key("enter")
def terminal_change_directory(path: str):
actions.insert("cd {}".format(path))
if path:
actions.key("enter")
def terminal_change_directory_root():
"""Root of current drive"""
actions.insert("cd /")
actions.key("enter")
def terminal_clear_screen():
"""Clear screen"""
actions.key("ctrl-l")
def terminal_run_last():
actions.key("up enter")
def terminal_kill_all():
actions.key("ctrl-c")
actions.insert("y")
actions.key("enter")
| 25.482759
| 102
| 0.586604
|
99e3bd926e1075700581eb63e4f89bb7c74d0a75
| 17,183
|
py
|
Python
|
_build/jupyter_execute/contents/background/03_basic_hydrogeology.py
|
prabhasyadav/iGW-I
|
eba32830f32f1109a7bee600c65832af0e7183fa
|
[
"CC-BY-4.0"
] | null | null | null |
_build/jupyter_execute/contents/background/03_basic_hydrogeology.py
|
prabhasyadav/iGW-I
|
eba32830f32f1109a7bee600c65832af0e7183fa
|
[
"CC-BY-4.0"
] | null | null | null |
_build/jupyter_execute/contents/background/03_basic_hydrogeology.py
|
prabhasyadav/iGW-I
|
eba32830f32f1109a7bee600c65832af0e7183fa
|
[
"CC-BY-4.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[4]:
# required libraries
import numpy as np
import matplotlib.pyplot as plt
import panel as pn
import pandas as pd
pn.extension("katex")
#from IPython.display import Image, Video
#import warnings
#warnings.filterwarnings('ignore')
# ## Lecture 1 - Course Introduction/Water Cycle ##
#
# _(The contents presented in this section were re-developed principally by Dr. P. K. Yadav. The original contents were developed by Prof. Rudolf Liedl)_
# ### Contents of “Groundwater Module ” ###
#
# The entire contents of this interactive book can be listed with the following points:
#
# + general overview
# + types of aquifers, properties of aquifers
# + forces and pressure in the subsurface
# + laws of groundwater flow and some applications (e.g. groundwater wells)
# + quantification of aquifer parameter values via pumping tests
# + transport of chemicals (solutes) in groundwater
# + retardation and degradation of chemicals in groundwater
# + groundwater modelling
#
# ### Suggested Literature: ###
#
#
# + Brassington R. (1988): Field hydrogeology, Wiley & Sons.
#
# + Domenico P. A., Schwartz F. W. (1990): Physical and chemical hydrogeology, Wiley & Sons.
#
# + Fetter C. W. (2001): Applied hydrogeology, Prentice Hall.
#
# + Freeze R. A., Cherry J. A. (1979): Groundwater, Prentice Hall.
#
# + Heath R. C. (1987): Basic groundwater hydrology, USGS Water Supply Paper 2220.
#
# + Price M. (1996): Introducing groundwater, Chapman and Hall.
#
# Additional literature details are provided in the text when used.
# ### What is Hydrogeology? ###
#
# _**Hydrogeology**_ is the study of the laws governing the movement of subterranean water, the mechanical, chemical, and thermal interaction of this water with the porous solid, and the transport of energy and chemical constituents by the flow.
# (Domenico and Schwartz, 1990)
#
#
# The dominant reliance of groundwater for the drinking globally has made hydrogeology a very important academic course. Also, it is a very important research field. Therefore, several **techniques** and **methods** are now available to explore and understand **Hydrogeological Process**. The methods and techniques can be broadly categorized to:
#
# 1. Field works
# 2. Laboratory experiments
# 3. Computer modeling
#
#
# _Computer modelling_ is often the most economical method but its usefullness rely of data obtained from _Field works_ and _Laboratory experiments._ Thus, the sequence of techniques/methods to be adopted depends on the available site information.
#
# In[2]:
im1 = pn.pane.PNG("images/L01_f_1c.png", width=250)
im2 = pn.pane.PNG("images/L01_f_1b.png", width=275)
im3 = pn.pane.PNG("images/L01_f_1a.png", width=280)
pn.Row(im1, im2, im3)
# ### Example: Groundwater Extraction Well
#
# Groundwater is extracted using a groundwater well applying _hydrogeological_ methods and techniques. The procedure followed can be summarized in the following steps:
#
# 1. The appropriate extraction location is identified
# 2. Drilling machine are used to obtain sub-surface structure, i.e. or well logs are obtained. The process is also called well logging.
# 3. Well logs are studied in detail to identify the characteristics of the subsurface- e.g., how thick is the aquifer or identify environmental consequence of water extraction.
# 4. The construction of well begins
#
# Groundwater extraction using well is a challenge when aquifers are located very deep from the surface, e.g., in deserts.
#
# In[3]:
video1 = pn.pane.Video("images/L01_f_2.mp4", width=600, height=400, loop=False)
video1
# In[4]:
#gif_pane = pn.pane.GIF('images/L01_f_2.gif', width=500)
#gif_pane
video2 = pn.pane.Video("images/L01_f_3.mp4", width=600, height=400, loop=False)
#Video("images/L01_f_3.mp4", width=600, embed=True)
video2
pn1 = pn.pane.Markdown("""
**Wells** are placed on the layer or that aquifer part which allows feasible extraction of groundwater.
The extraction leads to drop of groundwater level. To ensure that there is sustainable extraction,
the drops in the level has to be monitored. Quite often this is done through _computer modelling_. There
already exists several computer models that can use the well logs data (also called Borehole) and provide
good estimations of the effects due to extraction. The _computer models_ are also able to predict the effects
at larger scales, e.g., regional scales. _Computer models_ are oftenly used these days be agencies to determine
quantities such as **travel time**, **capture zones** or obtain **isochrones**, which are used for deciding on
groundwater extraction programmes.
""")
pn.Row(pn1, video2)
# ### Groundwater and Global Water Cycle ###
#
# Water bodies that exist on earth is connected, and they function as a cycle, called **Global Water Cycle**. It is estimated that over 57, 700 Km$^3$ of water actively participates in the cycle each year. **Precipitation** and **evaporation** are the two main components of the cycle in which **temperature** plays the critical role. In the cycle, **Groundwater** receives water from _precipitation,_ It then contributes to _evaporation_ through subsurface flow or through mostly human intervention (e.g., use for drinking water).
#
# The water cycle provides an approach to judge the sustainability of groundwater extraction. The sustainability of extraction can be obtained if extraction rate approximately equals the replenishing rate. Often the replenishing rate of groundwater is much slower and this has led to groundwater stress in many parts of the world.
# In[2]:
#gif_pane = pn.pane.GIF('images/L01_f_2.gif', width=500)
#gif_pane
video3 = pn.pane.Video("images/L01_f_4.mp4", width=600, height=400, loop=False)
#Video("images/L01_f_3.mp4", width=600, embed=True)
video3
# In[3]:
#gif_pane = pn.pane.GIF('images/L01_f_2.gif', width=500)
#gif_pane
fig5 = pn.pane.PNG("images/L01_f_5.png", width=600)
#Video("images/L01_f_3.mp4", width=600, embed=True)
pn1 = pn.pane.Markdown("""
### Water balance by continents
Groundwater receives water from the _infiltration_ of **runoff** water.
""")
pn.Row(pn1, fig5)
# ### The Hydrological Balance ###
#
# Since _groundwater_ is part of the global water cycle, the balance of the cycle becomes an important topic. In general:
#
# + The _hydrological balance_ provides a relationship between various flow rates for a certain area. It is based on the conservation of water volume.
# + expressed in words: _inflow_ equals _outflow_ plus _change in storage_
# + expressed by a formula:
#
# $$
# P = ET + R + \Delta S
# $$
#
# ```{margin} Where,
# where, <br>
# $P$ = _Precipitation, <br> $ET$ = Evapotranspiration, <br> $R$ = Runoff,_ and <br> $\Delta S$ = _Change in Storage_
#
# ```
#
# The _change in storage_ can be interpreted in the following way:
#
# + change in storage $\Delta S > 0$ : Water volume is increasing with time in the investigation area.
#
# + change in storage $\Delta S < 0$:Water volume is decreasing with time in the investigation area.
#
# + change in storage $\Delta S = 0$: Water volume does not change with time in the investigation area (steady-state or stationary situation, i.e. inflow equals outflow).
# ### Water Volume
#
#
# **So how much water do we have?**<br>
# It is estimated* that the total volume of water on Earth amounts to ca. 1 358 710 150 km$^3$ ($\approx$ 1018 m$^3$).
#
# <img src="images/L01_f_6.png" alt="Water volume in Earth" class="bg-primary" width="200px">
#
# The total volume of fresh water on Earth amounts to ca. $38\times 10^6$ km$^3$ ($\approx$ 1016 m$^3$).<br><br>
#
# _*Gleick P. (1996): Water re-sources, in: Schneider S. H. (ed.), Encyclopedia of climate and weather 2, Oxford Univ. Press._
#
# ### Volume of Available Fresh Water ###
#
# **Fresh water** are water with low concentrations of dissolved salts and other total dissolved solids, i.e., sea/ocean water or brackish water are not fresh water. Human activities (drinking water) are directly dependent on fresh activities.
#
# **So how much _fresh water_ do we have?**
#
#
# It is estimated* that the total volume of available fresh water (liquid) on Earth amounts to ca. 8 831 600 km$^3$ ($\approx$ 1016 m$^3$).
#
# <img src="images/L01_f_7.png" alt="Fresh water volume in Earth" class="bg-primary" width="200px">
#
#
# _*Gleick P. (1996): Water re-sources, in: Schneider S. H. (ed.), Encyclopedia of climate and weather 2, Oxford Univ. Press._
# ### Continental distribution of fresh water components ###
#
# <img src="images/L01_f_8.png" alt="Fresh water in different continents" class="bg-primary" width="500px">
#
# ### Volume and Mass Budget
#
#
# Very basics of volume and mass budget - let us start with _budget._
#
# **Budget** = quantitative comparison of _growth_ (or _production_) and _loss_ in a system
#
# Budgets can be put together for various quantities:
# + energy
# + mass <span style="color:blue">$\leftarrow$ needed to quantify transport of solutes in groundwater</span>
# + volume <span style="color:blue">$\leftarrow$ needed to quantify groundwater flow</span>
# + momentum
# + electric charge
# + number of inhabitants
# + animal population
# + money (bank account!)
# + and many others
#
# In this course we focus on _Mass Budget_ and _Volume Budget._
# ### Volume Budget ###
#
# As discussed in the last topic a _budget_ represents the change (e.g., growth and loss). Thus, it is more suitable to quantify the **volume budget** in terms of a change, representing two different states (e.g., time $(t)$). More formally, the **volume budget** ($\Delta V$) can be obtained from:
#
# $$
# \Delta V = Q_{in} \cdot \Delta t - Q{out} \cdot \Delta t
# $$
# <br>
# with, <br>
#
# $\Delta t$ = time interval [T] <br>
# $\Delta V$ = change of volume in the system [L$^3$] <br>
# $Q_{in}$ = volumetric rate of flow into the system [L$^3$/T] <br>
# $Q_{out} =$ volumetric rate of flow out of the system [L$^3$/T] <br>
#
# The following points have to be considered when using the above equation:
#
# + Inflow and outflow may each consist of several individual components. <br>
#
# + $\Delta V = 0$ (no change in volume) is tantamount to steady-state or stationary (= time-independent) conditions. <br>
#
# + For steady-state conditions we have: $Q_{in} = Q_{out}$
#
# #### Water Budget for a Catchment ####
#
# The equation provided for _volume budget_ looks simple but in practice it is very complicated as several _inflow_ and _outflow_ components must be considered. Quantifying these components can be a challenging task.
#
# For quantifying water budget of a catchment, one has to consider the following components:
#
# **To be considered:</span>**
# + precipitation
# + evapotranspiration
# + surface runoff
# + subsurface runoff
#
# Among the above components, quantification of evapotranspiration and subsurface runoff have very high level of uncertainties.
#
# ```{image} images/L01_f_9.png
# :height: 500px
# :align: center
# :name: Water-Budget
# ```
# ### Example: Estimation of Subsurface Runoff ###
#
# Most numbers used in the example do not refer to the catchment shown before!
#
# To calculate the following four-steps are to be followed:
#
# + Step 1: determine rate of inflow in m³/a
# + step 2: determine rate of outflow due to evapotranspiration (ET.A) in m³/a
# + Step 3: express rate of outflow due to surface runoff in m³/a
# + step 4: determine rate of outflow due to subsurface runoff
#
# An Example:<br>
# For given data, determine the rate of outflow Qout,sub due to subsurface runoff for steady-state conditions
# In[4]:
A = 4500 # km², catchment area
P = 550 # mm/a, precipitation
ET = 200 # mm/a, evapotranspiration
Qout_surf = 40 # m³/s, surface runoff
Delta_V = 0 # m³, change in volume = 0 Steady-state conditions
#Volume budget in this example: P·A = ET·A + Qout,surf + Qout,sub
#Step 1
Qin = P*A*10**3 #m³/a, 10^3 for unit conversion
#step 2:
ET_A = ET*A*10**3 #m³/a, 10^3 for unit conversion
#Step 3:
Qout_surf = Qout_surf *365*24*3600 # m³/a
# step 4:
Qout_sub = Qin - ET_A - Qout_surf # m³/a
print("The rate of inflow, Qin is {0:1.1E}".format(Qin),"m\u00b3/a \n"); print("The outflow rate due to Evapotranspiration is {0:1.1E}".format(ET_A),"m\u00b3/a \n")
print("The surface outflow rate, Q_out_surf in m\u00b3/a is {0:1.1E}".format(Qout_surf),"m\u00b3/a \n");print("The subsurface outflow rate, Qout_surf in m\u00b3/a is {0:1.1E}".format(Qout_sub),"m\u00b3/a \n")
# ### Mass Budget ###
#
# The **mass budget** is quantified similar to the _volume budget._ Mathematically, the _mass budget_ is:
#
# $$\Delta M = J_{in}\cdot \Delta t - J_{out} \cdot \Delta t$$
#
# with <br>
# $\Delta t$ = time interval [T]<br>
# $\Delta M$ = change of mass in the system [M]<br>
# $J_{in}$ = rate of mass flow into the system [M/T]<br>
# $J_{out}$ = rate of mass flow out of the system [M/T]
#
# Similar to _volume budget,_ the following points have to be considered in quantifying mass budget:
#
# + Inflow and outflow may each consist of several individual components.<br>
# + $\Delta M$ = 0 (no change in mass) is tantamount to steady-state or stationary <br>(= time-independent) conditions.<br>
# + For steady-state conditions we have: $J_{in}$= $J_{out}$
# ### Example of Mass Budget: Radioactive Decay ###
#
# Consider a decay chain comprising of the three chemicals: **A**, **B** and **C**
#
# + decay chain: A $\rightarrow$ B $\rightarrow$ C <br>
# + 30% of $\text{A}$ and 20% of $\text{B}$ decay each year.<br>
#
# + decay rate of $\text{A}$ = production rate of $\text{B}$ = $0.3 \cdot a^{-1}\cdot M_A$ <br>
#
# + decay rate of $\text{B}$ = production rate of $\text{C}$ = $0.2\cdot a^{-1}\cdot M_B$ <br>
#
# + mass budgets for $\text{A}$, $\text{B}$ and $\text{C}$:<br>
#
#
#
# \begin{equation*}
# \begin{split}
# \Delta M_A &= 0.3 \text{ a $^{-1}$ } \cdot M_A \cdot \Delta t \\
# \Delta M_B &= 0.3 \text{a$^{-1}$} \cdot M_A \cdot \Delta t - 0.2 \text{ a$^{-1}$} \cdot M_B \cdot \Delta t \\
# \Delta M_C &= 0.2 \text{a$^{-1}$} \cdot M_B \cdot \Delta t
# \end{split}
# \end{equation*}
#
#
# + Similar equations hold for quantitative descriptions of some chemical reactions which correspond to the type A $\rightarrow$ B $\rightarrow$ C
# In[2]:
def mass_bal(n_simulation, MA, MB, MC, R_A, R_B):
A = np.zeros(n_simulation)
B = np.zeros(n_simulation)
C = np.zeros(n_simulation)
time = np.arange(n_simulation)
for i in range(0,n_simulation-1):
A[0] = MA
B[0] = MB
C[0] = MC
A[i+1] = A[i]-R_A*A[i]
B[i+1] = B[i]+R_A*A[i]-R_B*B[i]
C[i+1] = C[i]+R_B*B[i]
summ = A[i]+B[i]+C[i]
d = {"Mass_A": A, "Mass_B": B, "Mass_C": C, "Total Mass": summ}
df = pd.DataFrame(d) # Generating result table
label = ["Mass A (g)", "Mass B (g)", "Mass C (g)"]
fig = plt.figure(figsize=(6,4))
plt.plot(time, A, time, B, time, C, linewidth=3); # plotting the results
plt.xlabel("Time [Time Unit]"); plt.ylabel("Mass [g]") # placing axis labels
plt.legend(label, loc=0);plt.grid(); plt.xlim([0,20]); plt.ylim(bottom=0) # legends, grids, x,y limits
plt.show() # display plot
df_pane = pn.pane.DataFrame(df)
return print(df.round(2))
N = widgets.BoundedIntText(value=20,min=0,max=100,step=1,description= 'Δ t (day)',disabled=False)
A = widgets.BoundedFloatText(value=100,min=0,max=1000.0,step=1,description='M<sub>A</sub> (kg)',disabled=False)
B = widgets.BoundedFloatText(value=5,min=0,max=1000.0,step=1,description='M<sub>B</sub> (kg)',disabled=False)
C = widgets.BoundedFloatText(value=10,min=0,max=1000,step=0.1,description='M<sub>C</sub> (kg)',disabled=False)
RA = widgets.BoundedFloatText(value=0.2,min=0,max=100,step=0.1,description='R<sub>A</sub> (day<sup>-1 </sup>)',disabled=False)
RB = widgets.BoundedFloatText(value=0.2,min=0,max=100,step=0.1,description='R<sub>B</sub> (day<sup>-1 </sup>)',disabled=False)
interactive_plot = widgets.interactive(mass_bal, n_simulation = N, MA=A, MB=B, MC=C, R_A=RA, R_B=RB,)
output = interactive_plot.children[-1]
#output.layout.height = '350px'
interactive_plot
# ### Comparison of Mass and Volume Budgets ###
#
# **mass budget**: $\Delta M = J_{in} \cdot \Delta t - J_{out} \cdot \Delta t$
#
# **volume budget**: $\Delta V = Q_{in} \cdot \Delta t - Q_{out} \cdot \Delta t $
#
#
# + Mass and volume budgets are equivalent if there is no change of density $\rho$ [M/L$^3$] with time. In this case the well known relationship $\Delta M$ = $\rho \cdot \Delta V$ holds and each equation given above can be directly transformed into the other one.
#
#
# + If density changes have to be considered (e.g. for gas flow), the mass budget equation remains valid but the volume budget equation must be modified because $\Delta M = \rho \cdot \Delta V + \Delta \rho \cdot V$ with $\Delta \rho$= change in density.
#
#
# + Cases with changing density have proven to be more easily tractable if the mass budget equation is used.
| 39.683603
| 532
| 0.695047
|
b71e6b3004af1e4050973b36f2a38d03ed10f61c
| 5,180
|
py
|
Python
|
epochalypse.py
|
pstirparo/utils
|
8fcf56e08f173afd615a5394b7eeee7f72b3509c
|
[
"Apache-2.0"
] | 31
|
2016-11-20T14:24:05.000Z
|
2022-02-04T16:41:04.000Z
|
epochalypse.py
|
pstirparo/utils
|
8fcf56e08f173afd615a5394b7eeee7f72b3509c
|
[
"Apache-2.0"
] | 2
|
2016-11-20T17:51:11.000Z
|
2020-05-13T09:50:23.000Z
|
epochalypse.py
|
pstirparo/utils
|
8fcf56e08f173afd615a5394b7eeee7f72b3509c
|
[
"Apache-2.0"
] | 8
|
2017-05-31T18:12:57.000Z
|
2021-05-03T14:13:00.000Z
|
#! /usr/bin/python
# -*- coding: iso-8859-15 -*-
#############################################################################
## ##
## Epochalypse.py --- Utility to convert epoch timestamps ##
## ##
## Copyright 2018 Pasquale Stirparo, @pstirparo ##
## New License is: Apache 2.0 ##
## ##
#############################################################################
__description__ = 'Epochalypse - timestamp converter utility'
__author__ = 'Pasquale Stirparo, @pstirparo'
__version__ = '0.5.1'
import sys
import time
from datetime import datetime
import argparse
import binascii
# The number of seconds between January 1, 1904 and Jan 1, 1970.
HFS_OFFSET = 2082844800
# The number of seconds between January 1, 1970 and January 1, 2001.
# Apple Safari also uses Cocoa timestamp
COCOA_OFFSET = 978307200
# The difference between Jan 1, 1601 and Jan 1, 1970 in micro seconds
# WebKit timestamp is used by Google Chrome and Opera
WEBKIT_OFFSET = 11644473600 * 1000000
# The difference between Jan 1, 1601 and Jan 1, 1970 in 100 nano seconds
NTFS_OFFSET = 11644473600 * 10000000
# The difference between Jan 1, 1980 and Jan 1, 1970 in seconds.
FAT_OFFSET = 315532800
# No offset calculation needed for APFS, as represent the number of nano
# seconds since January 1, 1970 (same as standard Unix epoch)
# No offset calculation needed for FireFox timestamp, as represent the number
# of microseconds since January 1, 1970 (same as standard Unix epoch)
def fromEpoch(epoch):
print('Epoch Time input to be converted: %.6f' % epoch)
try:
print('Unix: ' + datetime.utcfromtimestamp(epoch).isoformat(" ") + ' UTC')
except:
print('Unix: -')
try:
print('COCOA: ' + datetime.utcfromtimestamp(
epoch + COCOA_OFFSET).isoformat(" ") + ' UTC')
except:
print('COCOA: -')
try:
print('FAT: ' + datetime.utcfromtimestamp(epoch + FAT_OFFSET).isoformat(
" ") + ' UTC')
except:
print('FAT: -')
try:
print('HFS+: ' + datetime.utcfromtimestamp(epoch - HFS_OFFSET).isoformat(
" ") + ' UTC')
except:
print('HFS+: -')
try:
# Webkit timestamp calculation
wk = datetime.utcfromtimestamp(float(epoch - WEBKIT_OFFSET) / 1000000)
print('WebKit: ' + wk.isoformat(" ") + ' UTC')
except:
print('WebKit: -')
try:
# ntfs time calculation
ntfs = datetime.utcfromtimestamp(float(epoch - NTFS_OFFSET) / 10000000)
print('NTFS: ' + ntfs.isoformat(" ") + ' UTC')
except:
print('NTFS: -')
try:
# APFS time calculation
apfs = datetime.utcfromtimestamp(float(epoch) / 1000000000)
print('APFS: ' + apfs.isoformat(" ") + ' UTC')
except:
print('APFS: -')
try:
# Firefox timestamp, number of microseconds since January 1, 1970 UTC
ff = datetime.utcfromtimestamp(float(epoch) / 1000000)
print('FireFox: ' + ff.isoformat(" ") + ' UTC')
except:
print('FireFox: -')
def fromHex(hextime):
return(float.fromhex(hextime))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--epoch', dest='epoch_input', default=False,
help='Epoch time to be converted.', metavar='timestamp')
parser.add_argument('-r', '--revhex', action='store_true', default=False,
help='Reverse hex bytes (for little endian input), use it together with -x.')
parser.add_argument('-x', '--hex', dest='hexadecimal_input', default=False,
help='Hexadecimal timemstamp value to be converted.', metavar='hex_timestamp')
parser.add_argument('-v', '--version', action='version', version= '%(prog)s ' + __version__)
try:
args = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
if args.revhex and not args.hexadecimal_input:
print("\nInput parameters Error: Use the -r option together with the hex one, -rx.\n")
parser.print_help()
sys.exit(1)
except Exception as e:
parser.print_help()
sys.exit(1)
print('\n##########################################################')
print('# #')
print('# Epochalypse - Epoch timestamp converter utility #')
print('# by Pasquale Stirparo, @pstirparo #')
print('# #')
print('##########################################################\n')
try:
if args.epoch_input:
fromEpoch(float(args.epoch_input))
print('')
elif args.hexadecimal_input:
hex_text = args.hexadecimal_input.replace(' ', '')
if args.revhex:
hex_text = binascii.hexlify(binascii.unhexlify(hex_text)[::-1]).decode()
epoch = fromHex(hex_text)
fromEpoch(epoch)
print('')
except ValueError as e:
print("[ERROR] Input value not valid.\n")
sys.exit(1)
except:
sys.exit(1)
if __name__ == "__main__":
main()
| 35
| 94
| 0.567375
|
6d0be790d6942b3ca72c59ec3ee7b794ad44f591
| 1,753
|
py
|
Python
|
setup.py
|
mcsekrish/deepracer-utils
|
a97302d4457f87cb0d04c8f57403628c888945c7
|
[
"MIT-0"
] | null | null | null |
setup.py
|
mcsekrish/deepracer-utils
|
a97302d4457f87cb0d04c8f57403628c888945c7
|
[
"MIT-0"
] | null | null | null |
setup.py
|
mcsekrish/deepracer-utils
|
a97302d4457f87cb0d04c8f57403628c888945c7
|
[
"MIT-0"
] | null | null | null |
#!/usr/bin/env python3
from setuptools import setup, find_packages
from os import path
import versioneer
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='deepracer-utils',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
packages=find_packages(include=["deepracer", "deepracer.*"]),
description='A set of tools for working with DeepRacer training',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/aws-deepracer-community/deepracer-utils/',
author='AWS DeepRacer Community',
classifiers=[
'Development Status :: 3 - Alpha',
# Pick your license as you wish
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Internet :: Log Analysis'
],
keywords='aws deepracer awsdeepracer',
python_requires='>=3.6.*, <4',
install_requires=[
'boto3>=1.12.0',
'python-dateutil<3.0.0,>=2.1',
'numpy>=1.18.0',
'shapely>=1.7.0',
'matplotlib>=3.1.0',
'pandas>=1.0.0',
'scikit-learn>=0.22.0',
'joblib>=0.17.0'
],
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
project_urls={
'Bug Reports':
'https://github.com/aws-deepracer-community/deepracer-utils/issues',
'Source':
'https://github.com/aws-deepracer-community/deepracer-utils/',
},
)
| 30.224138
| 76
| 0.621791
|
4d8677b463794e178dfca8a5c7982527c0d37b38
| 301
|
py
|
Python
|
data/multilingual/Latn.TUR/Sans_8/pdf_to_json_test_Latn.TUR_Sans_8.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | 1
|
2021-09-19T19:47:35.000Z
|
2021-09-19T19:47:35.000Z
|
data/multilingual/Latn.TUR/Sans_8/pdf_to_json_test_Latn.TUR_Sans_8.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
data/multilingual/Latn.TUR/Sans_8/pdf_to_json_test_Latn.TUR_Sans_8.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.TUR/Sans_8/udhr_Latn.TUR_Sans_8.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 30.1
| 71
| 0.810631
|
efc4f9b5b8d816eed4b389d524122b6db49806f5
| 2,415
|
py
|
Python
|
data/p4VQE/R2/benchmark/startQiskit_Class63.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R2/benchmark/startQiskit_Class63.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R2/benchmark/startQiskit_Class63.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=3
# total number=9
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.y(input_qubit[1]) # number=6
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.rx(2.7457519792374794,input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=7
prog.swap(input_qubit[1],input_qubit[0]) # number=8
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit_Class63.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog, FakeYorktown())
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 27.443182
| 118
| 0.636439
|
d1eb115028cf3746ebd666b2e68a1d841e91b27e
| 1,685
|
py
|
Python
|
perfkitbenchmarker/version.py
|
Nowasky/PerfKitBenchmarker
|
cfa88e269eb373780910896ed4bdc8db09469753
|
[
"Apache-2.0"
] | 3
|
2018-04-28T13:06:14.000Z
|
2020-06-09T02:39:44.000Z
|
perfkitbenchmarker/version.py
|
Nowasky/PerfKitBenchmarker
|
cfa88e269eb373780910896ed4bdc8db09469753
|
[
"Apache-2.0"
] | 1
|
2021-09-09T07:43:25.000Z
|
2021-09-09T10:47:56.000Z
|
perfkitbenchmarker/version.py
|
Nowasky/PerfKitBenchmarker
|
cfa88e269eb373780910896ed4bdc8db09469753
|
[
"Apache-2.0"
] | 6
|
2019-06-11T18:59:57.000Z
|
2021-03-02T19:14:42.000Z
|
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PerfKitBenchmarker version."""
import os.path
import subprocess
import perfkitbenchmarker
import pkg_resources
_STATIC_VERSION_FILE = 'version.txt'
def _GetVersion():
"""Gets the version from git or the static version file."""
# Try to pull the version from git.
root_dir = os.path.dirname(os.path.dirname(__file__))
git_dir = os.path.join(root_dir, '.git')
try:
version = subprocess.check_output(['git', '--git-dir', git_dir,
'describe', '--always'],
stderr=subprocess.STDOUT,
universal_newlines=True)
except (OSError, subprocess.CalledProcessError):
# Could not get the version from git. Resort to contents of the static
# version file.
try:
version = pkg_resources.resource_string(perfkitbenchmarker.__name__,
_STATIC_VERSION_FILE)
except IOError:
# Could not determine version.
return 'unknown'
return version.rstrip('\n')
VERSION = _GetVersion()
| 34.387755
| 74
| 0.673591
|
85cbda925354a1f04106e530720ae8f32525526f
| 421
|
py
|
Python
|
example_metrics.py
|
preact/preact-python
|
d2464a206a7e7733889cc9bfe965988218199674
|
[
"MIT"
] | 1
|
2017-12-03T09:00:24.000Z
|
2017-12-03T09:00:24.000Z
|
example_metrics.py
|
preact/preact-python
|
d2464a206a7e7733889cc9bfe965988218199674
|
[
"MIT"
] | null | null | null |
example_metrics.py
|
preact/preact-python
|
d2464a206a7e7733889cc9bfe965988218199674
|
[
"MIT"
] | null | null | null |
from preact import log_metrics_bulk
metricsdata = {
'data': [
{
'account_id': '517eda23c82561f72a000005',
'name': 'total-gigabytes-utilized',
'value': 691.751,
'ts': 1428527783
},
{
'account_id': '533dd27f4443aec9e4000001',
'name': 'total-gigabytes-utilized',
'value': 913.751,
'ts': 1428527783
}
]
}
resp = log_metrics_bulk(metricsdata)
print resp
| 18.304348
| 47
| 0.60095
|
a3b48af6cfe177d33c6fb49425812453c005fcc7
| 1,235
|
py
|
Python
|
tasks.py
|
shad0w008/Scanver
|
cbe03d38422641e83275bfbc8d1ae93f42c3f387
|
[
"Apache-2.0"
] | 22
|
2019-01-25T02:39:08.000Z
|
2021-09-19T06:44:20.000Z
|
tasks.py
|
killvxk/Scanver
|
b23ac4753241f95e28587b82ae66672dd3eb6b7e
|
[
"Apache-2.0"
] | null | null | null |
tasks.py
|
killvxk/Scanver
|
b23ac4753241f95e28587b82ae66672dd3eb6b7e
|
[
"Apache-2.0"
] | 13
|
2019-06-11T09:32:34.000Z
|
2021-06-28T15:58:32.000Z
|
#!/usr/bin/env python3
# encoding=utf-8
#codeby 道长且阻
#email @ydhcui/QQ664284092
from core import scan
from service import app,TaskManage
@TaskManage.task(tasktype='-1')
def handwork():
'''手工录入'''
pass
@TaskManage.task(tasktype='-1')
def automatic():
'''批量导入'''
pass
@TaskManage.task(tasktype='host')
def portscan(Q):
'''资产扫描'''
s = scan.ServiceScan(Q)
s.start()
@TaskManage.task(tasktype='host')
def hostscan(Q):
'''主机扫描'''
s = scan.HostsScan(Q)
s.start()
@TaskManage.task(tasktype='host')
def pluginscan(Q):
'''插件扫描'''
s = scan.PluginsScan(Q)
s.start()
@TaskManage.task(tasktype='web')
def bugscan(Q):
'''网站扫描'''
s = scan.HttpScan(Q)
s.start()
#@TaskManage.task(tasktype='web')
def domainscan(Q):
'''域名扫描'''
s = scan.DomainScan(Q)
s.start()
if __name__ == '__main__':
import sys
if len(sys.argv) >= 2:
cmd = sys.argv[1]
if cmd == 'init':
TaskManage.load()
TaskManage.init()
elif cmd == 'start':
TaskManage.loop()
else:
app.start()
else:
print('''scanol start ...
cmd: python3 tasks.py init|start|worker <celery argv>
''')
| 18.712121
| 61
| 0.567611
|
ce4051a44f2fae8f90e26b773ffdccc8e5062477
| 2,184
|
py
|
Python
|
entity/models.py
|
userlocalhost/airone-1
|
8aabeabb65fd2117876380f1f69a04f0cf39889d
|
[
"MIT"
] | null | null | null |
entity/models.py
|
userlocalhost/airone-1
|
8aabeabb65fd2117876380f1f69a04f0cf39889d
|
[
"MIT"
] | null | null | null |
entity/models.py
|
userlocalhost/airone-1
|
8aabeabb65fd2117876380f1f69a04f0cf39889d
|
[
"MIT"
] | null | null | null |
from django.db import models
from airone.lib.acl import ACLObjType
from acl.models import ACLBase
from webhook.models import Webhook
class EntityAttr(ACLBase):
# This parameter is needed to make a relationship to the corresponding Entity at importing
parent_entity = models.ForeignKey("Entity", on_delete=models.SET_NULL, null=True)
type = models.IntegerField(default=0)
is_mandatory = models.BooleanField(default=False)
referral = models.ManyToManyField(ACLBase, default=[], related_name="referred_attr_base")
index = models.IntegerField(default=0)
# When this parameters set, all entries which are related to the parent_entity will be analyzed
# at the dashboard of entity
is_summarized = models.BooleanField(default=False)
# When an entry is deleted, another entry that is referred from this Attribute will be delete,
# if this parameter set.
is_delete_in_chain = models.BooleanField(default=False)
def __init__(self, *args, **kwargs):
super(ACLBase, self).__init__(*args, **kwargs)
self.objtype = ACLObjType.EntityAttr
def is_updated(self, name, is_mandatory, is_delete_in_chain, index, refs):
# checks each parameters that are different between current object parameters
if (
self.name != name
or self.is_mandatory != is_mandatory
or self.is_delete_in_chain != is_delete_in_chain
or self.index != int(index)
or sorted([x.id for x in self.referral.all()]) != sorted(refs)
):
return True
# This means that all specified parameters are same with current object's ones.
return False
class Entity(ACLBase):
STATUS_TOP_LEVEL = 1 << 0
STATUS_CREATING = 1 << 1
STATUS_EDITING = 1 << 2
note = models.CharField(max_length=200)
attrs = models.ManyToManyField(EntityAttr)
# This indicates informatoin where to send request for notification
webhooks = models.ManyToManyField(Webhook, default=[], related_name="registered_entity")
def __init__(self, *args, **kwargs):
super(Entity, self).__init__(*args, **kwargs)
self.objtype = ACLObjType.Entity
| 37.655172
| 99
| 0.702381
|
334e5746085f8fc60a167720a3f39ee1ba379b78
| 513
|
py
|
Python
|
setup.py
|
akush07/Forest-Fire-Prediction
|
37d152dc96f16e1adaaaf6c06d3959b5aaa6807c
|
[
"MIT"
] | null | null | null |
setup.py
|
akush07/Forest-Fire-Prediction
|
37d152dc96f16e1adaaaf6c06d3959b5aaa6807c
|
[
"MIT"
] | null | null | null |
setup.py
|
akush07/Forest-Fire-Prediction
|
37d152dc96f16e1adaaaf6c06d3959b5aaa6807c
|
[
"MIT"
] | null | null | null |
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='The Aim is to predict forest fire before it happens based on dataset that contains tree observations from four areas of the Roosevelt National Forest in Colorado. All observations are cartographic variables (no remote sensing) from 30 meter x 30 meter sections of forest. There are over half a million measurements total!',
author='Abhishek Kushwaha',
license='MIT',
)
| 46.636364
| 340
| 0.758285
|
b18ddc38a345da07f5b59e6d0544f92b041721a1
| 367
|
py
|
Python
|
{{cookiecutter.package_name}}/src/{{cookiecutter.app_slug}}/models.py
|
StuartMacKay/cookiecutter-django-library
|
230550d690b6321f283fa73992953a982ec83c86
|
[
"BSD-3-Clause"
] | null | null | null |
{{cookiecutter.package_name}}/src/{{cookiecutter.app_slug}}/models.py
|
StuartMacKay/cookiecutter-django-library
|
230550d690b6321f283fa73992953a982ec83c86
|
[
"BSD-3-Clause"
] | 3
|
2020-10-06T13:46:18.000Z
|
2020-10-06T13:56:02.000Z
|
{{cookiecutter.package_name}}/src/{{cookiecutter.app_slug}}/models.py
|
StuartMacKay/cookiecutter-django-library
|
230550d690b6321f283fa73992953a982ec83c86
|
[
"BSD-3-Clause"
] | null | null | null |
{% if cookiecutter.create_copyright_notice == "y" %}
{%- include "resources/copyright/header.txt" %}
{% endif -%}
from django.db import models
from django.utils.translation import gettext_lazy as _
{% if cookiecutter.create_project == "y" %}
class Example(models.Model):
name = models.CharField(verbose_name=_("Name"), max_length=32, blank=True)
{%- endif %}
| 30.583333
| 78
| 0.713896
|
ab7470f2e871220c3ced7b21bfa03b033d46f191
| 636
|
py
|
Python
|
LeetCodeSolutions/python/301_Remove_Invalid_Parentheses.py
|
ChuanleiGuo/AlgorithmsPlayground
|
90b6287b742c8bfd3797540c408d679be2821a40
|
[
"MIT"
] | 1
|
2017-03-27T13:38:37.000Z
|
2017-03-27T13:38:37.000Z
|
LeetCodeSolutions/python/301_Remove_Invalid_Parentheses.py
|
ChuanleiGuo/AlgorithmsPlayground
|
90b6287b742c8bfd3797540c408d679be2821a40
|
[
"MIT"
] | null | null | null |
LeetCodeSolutions/python/301_Remove_Invalid_Parentheses.py
|
ChuanleiGuo/AlgorithmsPlayground
|
90b6287b742c8bfd3797540c408d679be2821a40
|
[
"MIT"
] | null | null | null |
class Solution(object):
def removeInvalidParentheses(self, s):
"""
:type s: str
:rtype: List[str]
"""
def is_valid(s):
ctr = 0
for c in s:
if c == '(':
ctr += 1
elif c == ')':
ctr -= 1
if ctr < 0:
return False
return ctr == 0
level = {s}
while True:
valid = filter(is_valid, level)
if valid:
return valid
level = {s[:i] + s[i + 1:] for s in level for i in range(len(s))}
| 26.5
| 77
| 0.361635
|
f32caa55c950d356164aaa87c397651f6918ed44
| 1,087
|
py
|
Python
|
tests/utils.py
|
miracum/ahd2fhir
|
0c1bf3e0d86278145f9f1fa5c99a121f8e961d5f
|
[
"Apache-2.0"
] | 3
|
2021-11-23T16:24:21.000Z
|
2022-03-30T07:59:03.000Z
|
tests/utils.py
|
miracum/ahd2fhir
|
0c1bf3e0d86278145f9f1fa5c99a121f8e961d5f
|
[
"Apache-2.0"
] | 40
|
2021-05-27T14:26:33.000Z
|
2022-03-29T14:29:33.000Z
|
tests/utils.py
|
miracum/ahd2fhir
|
0c1bf3e0d86278145f9f1fa5c99a121f8e961d5f
|
[
"Apache-2.0"
] | 1
|
2021-06-30T11:11:01.000Z
|
2021-06-30T11:11:01.000Z
|
import json
from typing import Callable
from fhir.resources.attachment import Attachment
from fhir.resources.documentreference import DocumentReference, DocumentReferenceContent
from fhir.resources.reference import Reference
def get_empty_document_reference():
docref = DocumentReference.construct()
docref.status = "current"
cnt = DocumentReferenceContent.construct()
cnt.attachment = Attachment.construct()
docref.content = [cnt]
subject_ref = Reference.construct()
subject_ref.reference = "Patient/Test"
subject_ref.type = "Patient"
docref.subject = subject_ref
docref.id = "empty-document"
return docref
def map_resources(ahd_json_path: str, ahd_type: str, func: Callable) -> list:
with open(f"tests/resources/ahd/{ahd_json_path}") as file:
ahd_payload = json.load(file)
resources = []
for val in ahd_payload:
if val["type"] == ahd_type:
resource = func(val, get_empty_document_reference())
if resource is not None:
resources.append(resource)
return resources
| 31.057143
| 88
| 0.713891
|
ec934d694356fb09414b8fd7fc12be50aa472e2a
| 496
|
py
|
Python
|
yumi-control/client/yumi_control/yumi_utils.py
|
0aqz0/egm-control
|
f139980d11f43ae05c306d84dba5b581bf440a51
|
[
"MIT"
] | 1
|
2022-03-30T22:57:01.000Z
|
2022-03-30T22:57:01.000Z
|
yumi-control/client/yumi_control/yumi_utils.py
|
0aqz0/egm-control
|
f139980d11f43ae05c306d84dba5b581bf440a51
|
[
"MIT"
] | 1
|
2021-11-25T12:11:50.000Z
|
2021-11-25T12:11:50.000Z
|
yumi-control/client/yumi_control/yumi_utils.py
|
0aqz0/yumi-control
|
f139980d11f43ae05c306d84dba5b581bf440a51
|
[
"MIT"
] | null | null | null |
import numpy as np
from yumipy import YuMiState
def rad2degree(angle):
return angle * 180.0 / np.pi
def clamp_to_limits(joint_angle, lower_bound, upper_bound):
return np.clip(joint_angle, lower_bound, upper_bound)
def set_new_target(target):
state = YuMiState()
state.joint1 = target[0]
state.joint2 = target[1]
state.joint7 = target[2]
state.joint3 = target[3]
state.joint4 = target[4]
state.joint5 = target[5]
state.joint6 = target[6]
return state
| 26.105263
| 59
| 0.699597
|
7ef21108459851cad510a01f8a916f537ad86df1
| 2,523
|
py
|
Python
|
ratings/migrations/0001_initial.py
|
vivekthedev/djangosnippets.org
|
1bb3446de36ec5e4135dc9946eb8902875890a6c
|
[
"BSD-3-Clause"
] | 244
|
2016-08-26T05:25:48.000Z
|
2022-03-17T22:51:58.000Z
|
ratings/migrations/0001_initial.py
|
vivekthedev/djangosnippets.org
|
1bb3446de36ec5e4135dc9946eb8902875890a6c
|
[
"BSD-3-Clause"
] | 193
|
2016-08-25T15:38:20.000Z
|
2022-03-01T11:02:22.000Z
|
ratings/migrations/0001_initial.py
|
vivekthedev/djangosnippets.org
|
1bb3446de36ec5e4135dc9946eb8902875890a6c
|
[
"BSD-3-Clause"
] | 67
|
2016-10-22T10:43:09.000Z
|
2022-03-23T23:15:59.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("contenttypes", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="RatedItem",
fields=[
("id", models.AutoField(verbose_name="ID", serialize=False, auto_created=True, primary_key=True)),
("score", models.FloatField(default=0, db_index=True)),
("hashed", models.CharField(max_length=40, editable=False, db_index=True)),
("object_id", models.IntegerField()),
(
"content_type",
models.ForeignKey(
related_name="rated_items",
to="contenttypes.ContentType",
on_delete=models.CASCADE,
),
),
(
"user",
models.ForeignKey(
related_name="rateditems",
to=settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
),
),
],
options={
"abstract": False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name="SimilarItem",
fields=[
("id", models.AutoField(verbose_name="ID", serialize=False, auto_created=True, primary_key=True)),
("object_id", models.IntegerField()),
("similar_object_id", models.IntegerField()),
("score", models.FloatField(default=0)),
(
"content_type",
models.ForeignKey(
related_name="similar_items",
to="contenttypes.ContentType",
on_delete=models.CASCADE,
),
),
(
"similar_content_type",
models.ForeignKey(
related_name="similar_items_set",
to="contenttypes.ContentType",
on_delete=models.CASCADE,
),
),
],
options={},
bases=(models.Model,),
),
]
| 34.561644
| 114
| 0.45541
|
f5f85a4d348e2e4ba00458cd3e4e25d56fe1f2f4
| 111,332
|
py
|
Python
|
virtual/lib/python3.8/site-packages/django/db/models/sql/query.py
|
ShirQUillE-SandE/the-neighborhood-101
|
fda09cb0481d1cd902f5e13b7ed61ed96772121d
|
[
"MIT"
] | 5
|
2021-09-05T16:11:12.000Z
|
2022-03-20T12:28:42.000Z
|
virtual/lib/python3.8/site-packages/django/db/models/sql/query.py
|
ShirQUillE-SandE/the-neighborhood-101
|
fda09cb0481d1cd902f5e13b7ed61ed96772121d
|
[
"MIT"
] | 21
|
2021-02-04T01:37:44.000Z
|
2022-03-12T01:00:55.000Z
|
virtual/lib/python3.8/site-packages/django/db/models/sql/query.py
|
ShirQUillE-SandE/the-neighborhood-101
|
fda09cb0481d1cd902f5e13b7ed61ed96772121d
|
[
"MIT"
] | 27
|
2021-11-10T08:44:10.000Z
|
2022-03-30T08:19:46.000Z
|
"""
Create SQL statements for QuerySets.
The code in here encapsulates all of the SQL construction so that QuerySets
themselves do not have to (and could be backed by things other than SQL
databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
import copy
import difflib
import functools
import inspect
import sys
import warnings
from collections import Counter, namedtuple
from collections.abc import Iterator, Mapping
from itertools import chain, count, product
from string import ascii_uppercase
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connections
from django.db.models.aggregates import Count
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import (
BaseExpression, Col, Exists, F, OuterRef, Ref, ResolvedOuterRef,
)
from django.db.models.fields import Field
from django.db.models.fields.related_lookups import MultiColSource
from django.db.models.lookups import Lookup
from django.db.models.query_utils import (
Q, check_rel_lookup_compatibility, refs_expression,
)
from django.db.models.sql.constants import (
INNER, LOUTER, ORDER_DIR, ORDER_PATTERN, SINGLE,
)
from django.db.models.sql.datastructures import (
BaseTable, Empty, Join, MultiJoin,
)
from django.db.models.sql.where import (
AND, OR, ExtraWhere, NothingNode, WhereNode,
)
from django.utils.deprecation import RemovedInDjango40Warning
from django.utils.functional import cached_property
from django.utils.tree import Node
__all__ = ['Query', 'RawQuery']
def get_field_names_from_opts(opts):
return set(chain.from_iterable(
(f.name, f.attname) if f.concrete else (f.name,)
for f in opts.get_fields()
))
def get_children_from_q(q):
for child in q.children:
if isinstance(child, Node):
yield from get_children_from_q(child)
else:
yield child
JoinInfo = namedtuple(
'JoinInfo',
('final_field', 'targets', 'opts', 'joins', 'path', 'transform_function')
)
class RawQuery:
"""A single raw SQL query."""
def __init__(self, sql, using, params=()):
self.params = params
self.sql = sql
self.using = using
self.cursor = None
# Mirror some properties of a normal query so that
# the compiler can be used to process results.
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.extra_select = {}
self.annotation_select = {}
def chain(self, using):
return self.clone(using)
def clone(self, using):
return RawQuery(self.sql, using, params=self.params)
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.identifier_converter
return [converter(column_meta[0])
for column_meta in self.cursor.description]
def __iter__(self):
# Always execute a new query for a new iterator.
# This could be optimized with a cache at the expense of RAM.
self._execute_query()
if not connections[self.using].features.can_use_chunked_reads:
# If the database can't use chunked reads we need to make sure we
# evaluate the entire query up front.
result = list(self.cursor)
else:
result = self.cursor
return iter(result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
@property
def params_type(self):
if self.params is None:
return None
return dict if isinstance(self.params, Mapping) else tuple
def __str__(self):
if self.params_type is None:
return self.sql
return self.sql % self.params_type(self.params)
def _execute_query(self):
connection = connections[self.using]
# Adapt parameters to the database, as much as possible considering
# that the target type isn't known. See #17755.
params_type = self.params_type
adapter = connection.ops.adapt_unknown_value
if params_type is tuple:
params = tuple(adapter(val) for val in self.params)
elif params_type is dict:
params = {key: adapter(val) for key, val in self.params.items()}
elif params_type is None:
params = None
else:
raise RuntimeError("Unexpected params type: %s" % params_type)
self.cursor = connection.cursor()
self.cursor.execute(self.sql, params)
class Query(BaseExpression):
"""A single SQL query."""
alias_prefix = 'T'
subq_aliases = frozenset([alias_prefix])
compiler = 'SQLCompiler'
def __init__(self, model, where=WhereNode, alias_cols=True):
self.model = model
self.alias_refcount = {}
# alias_map is the most important data structure regarding joins.
# It's used for recording which joins exist in the query and what
# types they are. The key is the alias of the joined table (possibly
# the table name) and the value is a Join-like object (see
# sql.datastructures.Join for more information).
self.alias_map = {}
# Whether to provide alias to columns during reference resolving.
self.alias_cols = alias_cols
# Sometimes the query contains references to aliases in outer queries (as
# a result of split_exclude). Correct alias quoting needs to know these
# aliases too.
# Map external tables to whether they are aliased.
self.external_aliases = {}
self.table_map = {} # Maps table names to list of aliases.
self.default_cols = True
self.default_ordering = True
self.standard_ordering = True
self.used_aliases = set()
self.filter_is_sticky = False
self.subquery = False
# SQL-related attributes
# Select and related select clauses are expressions to use in the
# SELECT clause of the query.
# The select is used for cases where we want to set up the select
# clause to contain other than default fields (values(), subqueries...)
# Note that annotations go to annotations dictionary.
self.select = ()
self.where = where()
self.where_class = where
# The group_by attribute can have one of the following forms:
# - None: no group by at all in the query
# - A tuple of expressions: group by (at least) those expressions.
# String refs are also allowed for now.
# - True: group by all select fields of the model
# See compiler.get_group_by() for details.
self.group_by = None
self.order_by = ()
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.distinct = False
self.distinct_fields = ()
self.select_for_update = False
self.select_for_update_nowait = False
self.select_for_update_skip_locked = False
self.select_for_update_of = ()
self.select_for_no_key_update = False
self.select_related = False
# Arbitrary limit for select_related to prevents infinite recursion.
self.max_depth = 5
# Holds the selects defined by a call to values() or values_list()
# excluding annotation_select and extra_select.
self.values_select = ()
# SQL annotation-related attributes
self.annotations = {} # Maps alias -> Annotation Expression
self.annotation_select_mask = None
self._annotation_select_cache = None
# Set combination attributes
self.combinator = None
self.combinator_all = False
self.combined_queries = ()
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
self.extra = {} # Maps col_alias -> (col_sql, params).
self.extra_select_mask = None
self._extra_select_cache = None
self.extra_tables = ()
self.extra_order_by = ()
# A tuple that is a set of model field names and either True, if these
# are the fields to defer, or False if these are the only fields to
# load.
self.deferred_loading = (frozenset(), True)
self._filtered_relations = {}
self.explain_query = False
self.explain_format = None
self.explain_options = {}
@property
def output_field(self):
if len(self.select) == 1:
select = self.select[0]
return getattr(select, 'target', None) or select.field
elif len(self.annotation_select) == 1:
return next(iter(self.annotation_select.values())).output_field
@property
def has_select_fields(self):
return bool(self.select or self.annotation_select_mask or self.extra_select_mask)
@cached_property
def base_table(self):
for alias in self.alias_map:
return alias
def __str__(self):
"""
Return the query as a string of SQL with the parameter values
substituted in (use sql_with_params() to see the unsubstituted string).
Parameter values won't necessarily be quoted correctly, since that is
done by the database interface at execution time.
"""
sql, params = self.sql_with_params()
return sql % params
def sql_with_params(self):
"""
Return the query as an SQL string and the parameters that will be
substituted into the query.
"""
return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
def __deepcopy__(self, memo):
"""Limit the amount of work when a Query is deepcopied."""
result = self.clone()
memo[id(self)] = result
return result
def get_compiler(self, using=None, connection=None):
if using is None and connection is None:
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
return connection.ops.compiler(self.compiler)(self, connection, using)
def get_meta(self):
"""
Return the Options instance (the model._meta) from which to start
processing. Normally, this is self.model._meta, but it can be changed
by subclasses.
"""
return self.model._meta
def clone(self):
"""
Return a copy of the current Query. A lightweight alternative to
to deepcopy().
"""
obj = Empty()
obj.__class__ = self.__class__
# Copy references to everything.
obj.__dict__ = self.__dict__.copy()
# Clone attributes that can't use shallow copy.
obj.alias_refcount = self.alias_refcount.copy()
obj.alias_map = self.alias_map.copy()
obj.external_aliases = self.external_aliases.copy()
obj.table_map = self.table_map.copy()
obj.where = self.where.clone()
obj.annotations = self.annotations.copy()
if self.annotation_select_mask is None:
obj.annotation_select_mask = None
else:
obj.annotation_select_mask = self.annotation_select_mask.copy()
obj.combined_queries = tuple(query.clone() for query in self.combined_queries)
# _annotation_select_cache cannot be copied, as doing so breaks the
# (necessary) state in which both annotations and
# _annotation_select_cache point to the same underlying objects.
# It will get re-populated in the cloned queryset the next time it's
# used.
obj._annotation_select_cache = None
obj.extra = self.extra.copy()
if self.extra_select_mask is None:
obj.extra_select_mask = None
else:
obj.extra_select_mask = self.extra_select_mask.copy()
if self._extra_select_cache is None:
obj._extra_select_cache = None
else:
obj._extra_select_cache = self._extra_select_cache.copy()
if self.select_related is not False:
# Use deepcopy because select_related stores fields in nested
# dicts.
obj.select_related = copy.deepcopy(obj.select_related)
if 'subq_aliases' in self.__dict__:
obj.subq_aliases = self.subq_aliases.copy()
obj.used_aliases = self.used_aliases.copy()
obj._filtered_relations = self._filtered_relations.copy()
# Clear the cached_property
try:
del obj.base_table
except AttributeError:
pass
return obj
def chain(self, klass=None):
"""
Return a copy of the current Query that's ready for another operation.
The klass argument changes the type of the Query, e.g. UpdateQuery.
"""
obj = self.clone()
if klass and obj.__class__ != klass:
obj.__class__ = klass
if not obj.filter_is_sticky:
obj.used_aliases = set()
obj.filter_is_sticky = False
if hasattr(obj, '_setup_query'):
obj._setup_query()
return obj
def relabeled_clone(self, change_map):
clone = self.clone()
clone.change_aliases(change_map)
return clone
def _get_col(self, target, field, alias):
if not self.alias_cols:
alias = None
return target.get_col(alias, field)
def rewrite_cols(self, annotation, col_cnt):
# We must make sure the inner query has the referred columns in it.
# If we are aggregating over an annotation, then Django uses Ref()
# instances to note this. However, if we are annotating over a column
# of a related model, then it might be that column isn't part of the
# SELECT clause of the inner query, and we must manually make sure
# the column is selected. An example case is:
# .aggregate(Sum('author__awards'))
# Resolving this expression results in a join to author, but there
# is no guarantee the awards column of author is in the select clause
# of the query. Thus we must manually add the column to the inner
# query.
orig_exprs = annotation.get_source_expressions()
new_exprs = []
for expr in orig_exprs:
# FIXME: These conditions are fairly arbitrary. Identify a better
# method of having expressions decide which code path they should
# take.
if isinstance(expr, Ref):
# Its already a Ref to subquery (see resolve_ref() for
# details)
new_exprs.append(expr)
elif isinstance(expr, (WhereNode, Lookup)):
# Decompose the subexpressions further. The code here is
# copied from the else clause, but this condition must appear
# before the contains_aggregate/is_summary condition below.
new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)
new_exprs.append(new_expr)
else:
# Reuse aliases of expressions already selected in subquery.
for col_alias, selected_annotation in self.annotation_select.items():
if selected_annotation is expr:
new_expr = Ref(col_alias, expr)
break
else:
# An expression that is not selected the subquery.
if isinstance(expr, Col) or (expr.contains_aggregate and not expr.is_summary):
# Reference column or another aggregate. Select it
# under a non-conflicting alias.
col_cnt += 1
col_alias = '__col%d' % col_cnt
self.annotations[col_alias] = expr
self.append_annotation_mask([col_alias])
new_expr = Ref(col_alias, expr)
else:
# Some other expression not referencing database values
# directly. Its subexpression might contain Cols.
new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)
new_exprs.append(new_expr)
annotation.set_source_expressions(new_exprs)
return annotation, col_cnt
def get_aggregation(self, using, added_aggregate_names):
"""
Return the dictionary with the values of the existing aggregations.
"""
if not self.annotation_select:
return {}
existing_annotations = [
annotation for alias, annotation
in self.annotations.items()
if alias not in added_aggregate_names
]
# Decide if we need to use a subquery.
#
# Existing annotations would cause incorrect results as get_aggregation()
# must produce just one result and thus must not use GROUP BY. But we
# aren't smart enough to remove the existing annotations from the
# query, so those would force us to use GROUP BY.
#
# If the query has limit or distinct, or uses set operations, then
# those operations must be done in a subquery so that the query
# aggregates on the limit and/or distinct results instead of applying
# the distinct and limit after the aggregation.
if (isinstance(self.group_by, tuple) or self.is_sliced or existing_annotations or
self.distinct or self.combinator):
from django.db.models.sql.subqueries import AggregateQuery
inner_query = self.clone()
inner_query.subquery = True
outer_query = AggregateQuery(self.model, inner_query)
inner_query.select_for_update = False
inner_query.select_related = False
inner_query.set_annotation_mask(self.annotation_select)
if not self.is_sliced and not self.distinct_fields:
# Queries with distinct_fields need ordering and when a limit
# is applied we must take the slice from the ordered query.
# Otherwise no need for ordering.
inner_query.clear_ordering(True)
if not inner_query.distinct:
# If the inner query uses default select and it has some
# aggregate annotations, then we must make sure the inner
# query is grouped by the main model's primary key. However,
# clearing the select clause can alter results if distinct is
# used.
has_existing_aggregate_annotations = any(
annotation for annotation in existing_annotations
if getattr(annotation, 'contains_aggregate', True)
)
if inner_query.default_cols and has_existing_aggregate_annotations:
inner_query.group_by = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),)
inner_query.default_cols = False
relabels = {t: 'subquery' for t in inner_query.alias_map}
relabels[None] = 'subquery'
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
col_cnt = 0
for alias, expression in list(inner_query.annotation_select.items()):
annotation_select_mask = inner_query.annotation_select_mask
if expression.is_summary:
expression, col_cnt = inner_query.rewrite_cols(expression, col_cnt)
outer_query.annotations[alias] = expression.relabeled_clone(relabels)
del inner_query.annotations[alias]
annotation_select_mask.remove(alias)
# Make sure the annotation_select wont use cached results.
inner_query.set_annotation_mask(inner_query.annotation_select_mask)
if inner_query.select == () and not inner_query.default_cols and not inner_query.annotation_select_mask:
# In case of Model.objects[0:3].count(), there would be no
# field selected in the inner query, yet we must use a subquery.
# So, make sure at least one field is selected.
inner_query.select = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),)
else:
outer_query = self
self.select = ()
self.default_cols = False
self.extra = {}
outer_query.clear_ordering(True)
outer_query.clear_limits()
outer_query.select_for_update = False
outer_query.select_related = False
compiler = outer_query.get_compiler(using)
result = compiler.execute_sql(SINGLE)
if result is None:
result = [None] * len(outer_query.annotation_select)
converters = compiler.get_converters(outer_query.annotation_select.values())
result = next(compiler.apply_converters((result,), converters))
return dict(zip(outer_query.annotation_select, result))
def get_count(self, using):
"""
Perform a COUNT() query using the current filter constraints.
"""
obj = self.clone()
obj.add_annotation(Count('*'), alias='__count', is_summary=True)
number = obj.get_aggregation(using, ['__count'])['__count']
if number is None:
number = 0
return number
def has_filters(self):
return self.where
def exists(self, using, limit=True):
q = self.clone()
if not q.distinct:
if q.group_by is True:
q.add_fields((f.attname for f in self.model._meta.concrete_fields), False)
# Disable GROUP BY aliases to avoid orphaning references to the
# SELECT clause which is about to be cleared.
q.set_group_by(allow_aliases=False)
q.clear_select_clause()
if q.combined_queries and q.combinator == 'union':
limit_combined = connections[using].features.supports_slicing_ordering_in_compound
q.combined_queries = tuple(
combined_query.exists(using, limit=limit_combined)
for combined_query in q.combined_queries
)
q.clear_ordering(True)
if limit:
q.set_limits(high=1)
q.add_extra({'a': 1}, None, None, None, None, None)
q.set_extra_mask(['a'])
return q
def has_results(self, using):
q = self.exists(using)
compiler = q.get_compiler(using=using)
return compiler.has_results()
def explain(self, using, format=None, **options):
q = self.clone()
q.explain_query = True
q.explain_format = format
q.explain_options = options
compiler = q.get_compiler(using=using)
return '\n'.join(compiler.explain_query())
def combine(self, rhs, connector):
"""
Merge the 'rhs' query into the current one (with any 'rhs' effects
being applied *after* (that is, "to the right of") anything in the
current query. 'rhs' is not modified during a call to this function.
The 'connector' parameter describes how to connect filters from the
'rhs' query.
"""
assert self.model == rhs.model, \
"Cannot combine queries on two different base models."
assert not self.is_sliced, \
"Cannot combine queries once a slice has been taken."
assert self.distinct == rhs.distinct, \
"Cannot combine a unique query with a non-unique query."
assert self.distinct_fields == rhs.distinct_fields, \
"Cannot combine queries with different distinct fields."
# Work out how to relabel the rhs aliases, if necessary.
change_map = {}
conjunction = (connector == AND)
# Determine which existing joins can be reused. When combining the
# query with AND we must recreate all joins for m2m filters. When
# combining with OR we can reuse joins. The reason is that in AND
# case a single row can't fulfill a condition like:
# revrel__col=1 & revrel__col=2
# But, there might be two different related rows matching this
# condition. In OR case a single True is enough, so single row is
# enough, too.
#
# Note that we will be creating duplicate joins for non-m2m joins in
# the AND case. The results will be correct but this creates too many
# joins. This is something that could be fixed later on.
reuse = set() if conjunction else set(self.alias_map)
# Base table must be present in the query - this is the same
# table on both sides.
self.get_initial_alias()
joinpromoter = JoinPromoter(connector, 2, False)
joinpromoter.add_votes(
j for j in self.alias_map if self.alias_map[j].join_type == INNER)
rhs_votes = set()
# Now, add the joins from rhs query into the new query (skipping base
# table).
rhs_tables = list(rhs.alias_map)[1:]
for alias in rhs_tables:
join = rhs.alias_map[alias]
# If the left side of the join was already relabeled, use the
# updated alias.
join = join.relabeled_clone(change_map)
new_alias = self.join(join, reuse=reuse)
if join.join_type == INNER:
rhs_votes.add(new_alias)
# We can't reuse the same join again in the query. If we have two
# distinct joins for the same connection in rhs query, then the
# combined query must have two joins, too.
reuse.discard(new_alias)
if alias != new_alias:
change_map[alias] = new_alias
if not rhs.alias_refcount[alias]:
# The alias was unused in the rhs query. Unref it so that it
# will be unused in the new query, too. We have to add and
# unref the alias so that join promotion has information of
# the join type for the unused alias.
self.unref_alias(new_alias)
joinpromoter.add_votes(rhs_votes)
joinpromoter.update_join_types(self)
# Combine subqueries aliases to ensure aliases relabelling properly
# handle subqueries when combining where and select clauses.
self.subq_aliases |= rhs.subq_aliases
# Now relabel a copy of the rhs where-clause and add it to the current
# one.
w = rhs.where.clone()
w.relabel_aliases(change_map)
self.where.add(w, connector)
# Selection columns and extra extensions are those provided by 'rhs'.
if rhs.select:
self.set_select([col.relabeled_clone(change_map) for col in rhs.select])
else:
self.select = ()
if connector == OR:
# It would be nice to be able to handle this, but the queries don't
# really make sense (or return consistent value sets). Not worth
# the extra complexity when you can write a real query instead.
if self.extra and rhs.extra:
raise ValueError("When merging querysets using 'or', you cannot have extra(select=...) on both sides.")
self.extra.update(rhs.extra)
extra_select_mask = set()
if self.extra_select_mask is not None:
extra_select_mask.update(self.extra_select_mask)
if rhs.extra_select_mask is not None:
extra_select_mask.update(rhs.extra_select_mask)
if extra_select_mask:
self.set_extra_mask(extra_select_mask)
self.extra_tables += rhs.extra_tables
# Ordering uses the 'rhs' ordering, unless it has none, in which case
# the current ordering is used.
self.order_by = rhs.order_by or self.order_by
self.extra_order_by = rhs.extra_order_by or self.extra_order_by
def deferred_to_data(self, target, callback):
"""
Convert the self.deferred_loading data structure to an alternate data
structure, describing the field that *will* be loaded. This is used to
compute the columns to select from the database and also by the
QuerySet class to work out which fields are being initialized on each
model. Models that have all their fields included aren't mentioned in
the result, only those that have field restrictions in place.
The "target" parameter is the instance that is populated (in place).
The "callback" is a function that is called whenever a (model, field)
pair need to be added to "target". It accepts three parameters:
"target", and the model and list of fields being added for that model.
"""
field_names, defer = self.deferred_loading
if not field_names:
return
orig_opts = self.get_meta()
seen = {}
must_include = {orig_opts.concrete_model: {orig_opts.pk}}
for field_name in field_names:
parts = field_name.split(LOOKUP_SEP)
cur_model = self.model._meta.concrete_model
opts = orig_opts
for name in parts[:-1]:
old_model = cur_model
if name in self._filtered_relations:
name = self._filtered_relations[name].relation_name
source = opts.get_field(name)
if is_reverse_o2o(source):
cur_model = source.related_model
else:
cur_model = source.remote_field.model
opts = cur_model._meta
# Even if we're "just passing through" this model, we must add
# both the current model's pk and the related reference field
# (if it's not a reverse relation) to the things we select.
if not is_reverse_o2o(source):
must_include[old_model].add(source)
add_to_dict(must_include, cur_model, opts.pk)
field = opts.get_field(parts[-1])
is_reverse_object = field.auto_created and not field.concrete
model = field.related_model if is_reverse_object else field.model
model = model._meta.concrete_model
if model == opts.model:
model = cur_model
if not is_reverse_o2o(field):
add_to_dict(seen, model, field)
if defer:
# We need to load all fields for each model, except those that
# appear in "seen" (for all models that appear in "seen"). The only
# slight complexity here is handling fields that exist on parent
# models.
workset = {}
for model, values in seen.items():
for field in model._meta.local_fields:
if field not in values:
m = field.model._meta.concrete_model
add_to_dict(workset, m, field)
for model, values in must_include.items():
# If we haven't included a model in workset, we don't add the
# corresponding must_include fields for that model, since an
# empty set means "include all fields". That's why there's no
# "else" branch here.
if model in workset:
workset[model].update(values)
for model, values in workset.items():
callback(target, model, values)
else:
for model, values in must_include.items():
if model in seen:
seen[model].update(values)
else:
# As we've passed through this model, but not explicitly
# included any fields, we have to make sure it's mentioned
# so that only the "must include" fields are pulled in.
seen[model] = values
# Now ensure that every model in the inheritance chain is mentioned
# in the parent list. Again, it must be mentioned to ensure that
# only "must include" fields are pulled in.
for model in orig_opts.get_parent_list():
seen.setdefault(model, set())
for model, values in seen.items():
callback(target, model, values)
def table_alias(self, table_name, create=False, filtered_relation=None):
"""
Return a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
alias_list = self.table_map.get(table_name)
if not create and alias_list:
alias = alias_list[0]
self.alias_refcount[alias] += 1
return alias, False
# Create a new alias for this table.
if alias_list:
alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1)
alias_list.append(alias)
else:
# The first occurrence of a table uses the table name directly.
alias = filtered_relation.alias if filtered_relation is not None else table_name
self.table_map[table_name] = [alias]
self.alias_refcount[alias] = 1
return alias, True
def ref_alias(self, alias):
"""Increases the reference count for this alias."""
self.alias_refcount[alias] += 1
def unref_alias(self, alias, amount=1):
"""Decreases the reference count for this alias."""
self.alias_refcount[alias] -= amount
def promote_joins(self, aliases):
"""
Promote recursively the join type of given aliases and its children to
an outer join. If 'unconditional' is False, only promote the join if
it is nullable or the parent join is an outer join.
The children promotion is done to avoid join chains that contain a LOUTER
b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted,
then we must also promote b->c automatically, or otherwise the promotion
of a->b doesn't actually change anything in the query results.
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type is None:
# This is the base table (first FROM entry) - this table
# isn't really joined at all in the query, so we should not
# alter its join type.
continue
# Only the first alias (skipped above) should have None join_type
assert self.alias_map[alias].join_type is not None
parent_alias = self.alias_map[alias].parent_alias
parent_louter = parent_alias and self.alias_map[parent_alias].join_type == LOUTER
already_louter = self.alias_map[alias].join_type == LOUTER
if ((self.alias_map[alias].nullable or parent_louter) and
not already_louter):
self.alias_map[alias] = self.alias_map[alias].promote()
# Join type of 'alias' changed, so re-examine all aliases that
# refer to this one.
aliases.extend(
join for join in self.alias_map
if self.alias_map[join].parent_alias == alias and join not in aliases
)
def demote_joins(self, aliases):
"""
Change join type from LOUTER to INNER for all joins in aliases.
Similarly to promote_joins(), this method must ensure no join chains
containing first an outer, then an inner join are generated. If we
are demoting b->c join in chain a LOUTER b LOUTER c then we must
demote a->b automatically, or otherwise the demotion of b->c doesn't
actually change anything in the query results. .
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type == LOUTER:
self.alias_map[alias] = self.alias_map[alias].demote()
parent_alias = self.alias_map[alias].parent_alias
if self.alias_map[parent_alias].join_type == INNER:
aliases.append(parent_alias)
def reset_refcounts(self, to_counts):
"""
Reset reference counts for aliases so that they match the value passed
in `to_counts`.
"""
for alias, cur_refcount in self.alias_refcount.copy().items():
unref_amount = cur_refcount - to_counts.get(alias, 0)
self.unref_alias(alias, unref_amount)
def change_aliases(self, change_map):
"""
Change the aliases in change_map (which maps old-alias -> new-alias),
relabelling any references to them in select columns and the where
clause.
"""
assert set(change_map).isdisjoint(change_map.values())
# 1. Update references in "select" (normal columns plus aliases),
# "group by" and "where".
self.where.relabel_aliases(change_map)
if isinstance(self.group_by, tuple):
self.group_by = tuple([col.relabeled_clone(change_map) for col in self.group_by])
self.select = tuple([col.relabeled_clone(change_map) for col in self.select])
self.annotations = self.annotations and {
key: col.relabeled_clone(change_map) for key, col in self.annotations.items()
}
# 2. Rename the alias in the internal table/alias datastructures.
for old_alias, new_alias in change_map.items():
if old_alias not in self.alias_map:
continue
alias_data = self.alias_map[old_alias].relabeled_clone(change_map)
self.alias_map[new_alias] = alias_data
self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
del self.alias_refcount[old_alias]
del self.alias_map[old_alias]
table_aliases = self.table_map[alias_data.table_name]
for pos, alias in enumerate(table_aliases):
if alias == old_alias:
table_aliases[pos] = new_alias
break
self.external_aliases = {
# Table is aliased or it's being changed and thus is aliased.
change_map.get(alias, alias): (aliased or alias in change_map)
for alias, aliased in self.external_aliases.items()
}
def bump_prefix(self, outer_query):
"""
Change the alias prefix to the next letter in the alphabet in a way
that the outer query's aliases and this query's aliases will not
conflict. Even tables that previously had no alias will get an alias
after this call.
"""
def prefix_gen():
"""
Generate a sequence of characters in alphabetical order:
-> 'A', 'B', 'C', ...
When the alphabet is finished, the sequence will continue with the
Cartesian product:
-> 'AA', 'AB', 'AC', ...
"""
alphabet = ascii_uppercase
prefix = chr(ord(self.alias_prefix) + 1)
yield prefix
for n in count(1):
seq = alphabet[alphabet.index(prefix):] if prefix else alphabet
for s in product(seq, repeat=n):
yield ''.join(s)
prefix = None
if self.alias_prefix != outer_query.alias_prefix:
# No clashes between self and outer query should be possible.
return
# Explicitly avoid infinite loop. The constant divider is based on how
# much depth recursive subquery references add to the stack. This value
# might need to be adjusted when adding or removing function calls from
# the code path in charge of performing these operations.
local_recursion_limit = sys.getrecursionlimit() // 16
for pos, prefix in enumerate(prefix_gen()):
if prefix not in self.subq_aliases:
self.alias_prefix = prefix
break
if pos > local_recursion_limit:
raise RecursionError(
'Maximum recursion depth exceeded: too many subqueries.'
)
self.subq_aliases = self.subq_aliases.union([self.alias_prefix])
outer_query.subq_aliases = outer_query.subq_aliases.union(self.subq_aliases)
self.change_aliases({
alias: '%s%d' % (self.alias_prefix, pos)
for pos, alias in enumerate(self.alias_map)
})
def get_initial_alias(self):
"""
Return the first alias for this query, after increasing its reference
count.
"""
if self.alias_map:
alias = self.base_table
self.ref_alias(alias)
else:
alias = self.join(BaseTable(self.get_meta().db_table, None))
return alias
def count_active_tables(self):
"""
Return the number of tables in this query with a non-zero reference
count. After execution, the reference counts are zeroed, so tables
added in compiler will not be seen by this method.
"""
return len([1 for count in self.alias_refcount.values() if count])
def join(self, join, reuse=None, reuse_with_filtered_relation=False):
"""
Return an alias for the 'join', either reusing an existing alias for
that join or creating a new one. 'join' is either a
sql.datastructures.BaseTable or Join.
The 'reuse' parameter can be either None which means all joins are
reusable, or it can be a set containing the aliases that can be reused.
The 'reuse_with_filtered_relation' parameter is used when computing
FilteredRelation instances.
A join is always created as LOUTER if the lhs alias is LOUTER to make
sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new
joins are created as LOUTER if the join is nullable.
"""
if reuse_with_filtered_relation and reuse:
reuse_aliases = [
a for a, j in self.alias_map.items()
if a in reuse and j.equals(join, with_filtered_relation=False)
]
else:
reuse_aliases = [
a for a, j in self.alias_map.items()
if (reuse is None or a in reuse) and j == join
]
if reuse_aliases:
if join.table_alias in reuse_aliases:
reuse_alias = join.table_alias
else:
# Reuse the most recent alias of the joined table
# (a many-to-many relation may be joined multiple times).
reuse_alias = reuse_aliases[-1]
self.ref_alias(reuse_alias)
return reuse_alias
# No reuse is possible, so we need a new alias.
alias, _ = self.table_alias(join.table_name, create=True, filtered_relation=join.filtered_relation)
if join.join_type:
if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable:
join_type = LOUTER
else:
join_type = INNER
join.join_type = join_type
join.table_alias = alias
self.alias_map[alias] = join
return alias
def join_parent_model(self, opts, model, alias, seen):
"""
Make sure the given 'model' is joined in the query. If 'model' isn't
a parent of 'opts' or if it is None this method is a no-op.
The 'alias' is the root alias for starting the join, 'seen' is a dict
of model -> alias of existing joins. It must also contain a mapping
of None -> some alias. This will be returned in the no-op case.
"""
if model in seen:
return seen[model]
chain = opts.get_base_chain(model)
if not chain:
return alias
curr_opts = opts
for int_model in chain:
if int_model in seen:
curr_opts = int_model._meta
alias = seen[int_model]
continue
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not curr_opts.parents[int_model]:
curr_opts = int_model._meta
continue
link_field = curr_opts.get_ancestor_link(int_model)
join_info = self.setup_joins([link_field.name], curr_opts, alias)
curr_opts = int_model._meta
alias = seen[int_model] = join_info.joins[-1]
return alias or seen[None]
def add_annotation(self, annotation, alias, is_summary=False, select=True):
"""Add a single annotation expression to the Query."""
annotation = annotation.resolve_expression(self, allow_joins=True, reuse=None,
summarize=is_summary)
if select:
self.append_annotation_mask([alias])
else:
self.set_annotation_mask(set(self.annotation_select).difference({alias}))
self.annotations[alias] = annotation
def resolve_expression(self, query, *args, **kwargs):
clone = self.clone()
# Subqueries need to use a different set of aliases than the outer query.
clone.bump_prefix(query)
clone.subquery = True
# It's safe to drop ordering if the queryset isn't using slicing,
# distinct(*fields) or select_for_update().
if (self.low_mark == 0 and self.high_mark is None and
not self.distinct_fields and
not self.select_for_update):
clone.clear_ordering(True)
clone.where.resolve_expression(query, *args, **kwargs)
for key, value in clone.annotations.items():
resolved = value.resolve_expression(query, *args, **kwargs)
if hasattr(resolved, 'external_aliases'):
resolved.external_aliases.update(clone.external_aliases)
clone.annotations[key] = resolved
# Outer query's aliases are considered external.
for alias, table in query.alias_map.items():
clone.external_aliases[alias] = (
(isinstance(table, Join) and table.join_field.related_model._meta.db_table != alias) or
(isinstance(table, BaseTable) and table.table_name != table.table_alias)
)
return clone
def get_external_cols(self):
exprs = chain(self.annotations.values(), self.where.children)
return [
col for col in self._gen_cols(exprs, include_external=True)
if col.alias in self.external_aliases
]
def as_sql(self, compiler, connection):
sql, params = self.get_compiler(connection=connection).as_sql()
if self.subquery:
sql = '(%s)' % sql
return sql, params
def resolve_lookup_value(self, value, can_reuse, allow_joins):
if hasattr(value, 'resolve_expression'):
value = value.resolve_expression(
self, reuse=can_reuse, allow_joins=allow_joins,
)
elif isinstance(value, (list, tuple)):
# The items of the iterable may be expressions and therefore need
# to be resolved independently.
values = (
self.resolve_lookup_value(sub_value, can_reuse, allow_joins)
for sub_value in value
)
type_ = type(value)
if hasattr(type_, '_make'): # namedtuple
return type_(*values)
return type_(values)
return value
def solve_lookup_type(self, lookup):
"""
Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains').
"""
lookup_splitted = lookup.split(LOOKUP_SEP)
if self.annotations:
expression, expression_lookups = refs_expression(lookup_splitted, self.annotations)
if expression:
return expression_lookups, (), expression
_, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())
field_parts = lookup_splitted[0:len(lookup_splitted) - len(lookup_parts)]
if len(lookup_parts) > 1 and not field_parts:
raise FieldError(
'Invalid lookup "%s" for model %s".' %
(lookup, self.get_meta().model.__name__)
)
return lookup_parts, field_parts, False
def check_query_object_type(self, value, opts, field):
"""
Check whether the object passed while querying is of the correct type.
If not, raise a ValueError specifying the wrong object.
"""
if hasattr(value, '_meta'):
if not check_rel_lookup_compatibility(value._meta.model, opts, field):
raise ValueError(
'Cannot query "%s": Must be "%s" instance.' %
(value, opts.object_name))
def check_related_objects(self, field, value, opts):
"""Check the type of object passed to query relations."""
if field.is_relation:
# Check that the field and the queryset use the same model in a
# query like .filter(author=Author.objects.all()). For example, the
# opts would be Author's (from the author field) and value.model
# would be Author.objects.all() queryset's .model (Author also).
# The field is the related field on the lhs side.
if (isinstance(value, Query) and not value.has_select_fields and
not check_rel_lookup_compatibility(value.model, opts, field)):
raise ValueError(
'Cannot use QuerySet for "%s": Use a QuerySet for "%s".' %
(value.model._meta.object_name, opts.object_name)
)
elif hasattr(value, '_meta'):
self.check_query_object_type(value, opts, field)
elif hasattr(value, '__iter__'):
for v in value:
self.check_query_object_type(v, opts, field)
def check_filterable(self, expression):
"""Raise an error if expression cannot be used in a WHERE clause."""
if (
hasattr(expression, 'resolve_expression') and
not getattr(expression, 'filterable', True)
):
raise NotSupportedError(
expression.__class__.__name__ + ' is disallowed in the filter '
'clause.'
)
if hasattr(expression, 'get_source_expressions'):
for expr in expression.get_source_expressions():
self.check_filterable(expr)
def build_lookup(self, lookups, lhs, rhs):
"""
Try to extract transforms and lookup from given lhs.
The lhs value is something that works like SQLExpression.
The rhs value is what the lookup is going to compare against.
The lookups is a list of names to extract using get_lookup()
and get_transform().
"""
# __exact is the default lookup if one isn't given.
*transforms, lookup_name = lookups or ['exact']
for name in transforms:
lhs = self.try_transform(lhs, name)
# First try get_lookup() so that the lookup takes precedence if the lhs
# supports both transform and lookup for the name.
lookup_class = lhs.get_lookup(lookup_name)
if not lookup_class:
if lhs.field.is_relation:
raise FieldError('Related Field got invalid lookup: {}'.format(lookup_name))
# A lookup wasn't found. Try to interpret the name as a transform
# and do an Exact lookup against it.
lhs = self.try_transform(lhs, lookup_name)
lookup_name = 'exact'
lookup_class = lhs.get_lookup(lookup_name)
if not lookup_class:
return
lookup = lookup_class(lhs, rhs)
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value unless the lookup supports it.
if lookup.rhs is None and not lookup.can_use_none_as_rhs:
if lookup_name not in ('exact', 'iexact'):
raise ValueError("Cannot use None as a query value")
return lhs.get_lookup('isnull')(lhs, True)
# For Oracle '' is equivalent to null. The check must be done at this
# stage because join promotion can't be done in the compiler. Using
# DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here.
# A similar thing is done in is_nullable(), too.
if (connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and
lookup_name == 'exact' and lookup.rhs == ''):
return lhs.get_lookup('isnull')(lhs, True)
return lookup
def try_transform(self, lhs, name):
"""
Helper method for build_lookup(). Try to fetch and initialize
a transform for name parameter from lhs.
"""
transform_class = lhs.get_transform(name)
if transform_class:
return transform_class(lhs)
else:
output_field = lhs.output_field.__class__
suggested_lookups = difflib.get_close_matches(name, output_field.get_lookups())
if suggested_lookups:
suggestion = ', perhaps you meant %s?' % ' or '.join(suggested_lookups)
else:
suggestion = '.'
raise FieldError(
"Unsupported lookup '%s' for %s or join on the field not "
"permitted%s" % (name, output_field.__name__, suggestion)
)
def build_filter(self, filter_expr, branch_negated=False, current_negated=False,
can_reuse=None, allow_joins=True, split_subq=True,
reuse_with_filtered_relation=False, check_filterable=True):
"""
Build a WhereNode for a single filter clause but don't add it
to this Query. Query.add_q() will then add this filter to the where
Node.
The 'branch_negated' tells us if the current branch contains any
negations. This will be used to determine if subqueries are needed.
The 'current_negated' is used to determine if the current filter is
negated or not and this will be used to determine if IS NULL filtering
is needed.
The difference between current_negated and branch_negated is that
branch_negated is set on first negation, but current_negated is
flipped for each negation.
Note that add_filter will not do any negating itself, that is done
upper in the code by add_q().
The 'can_reuse' is a set of reusable joins for multijoins.
If 'reuse_with_filtered_relation' is True, then only joins in can_reuse
will be reused.
The method will create a filter clause that can be added to the current
query. However, if the filter isn't added to the query then the caller
is responsible for unreffing the joins used.
"""
if isinstance(filter_expr, dict):
raise FieldError("Cannot parse keyword query as dict")
if isinstance(filter_expr, Q):
return self._add_q(
filter_expr,
branch_negated=branch_negated,
current_negated=current_negated,
used_aliases=can_reuse,
allow_joins=allow_joins,
split_subq=split_subq,
check_filterable=check_filterable,
)
if hasattr(filter_expr, 'resolve_expression'):
if not getattr(filter_expr, 'conditional', False):
raise TypeError('Cannot filter against a non-conditional expression.')
condition = self.build_lookup(
['exact'], filter_expr.resolve_expression(self, allow_joins=allow_joins), True
)
clause = self.where_class()
clause.add(condition, AND)
return clause, []
arg, value = filter_expr
if not arg:
raise FieldError("Cannot parse keyword query %r" % arg)
lookups, parts, reffed_expression = self.solve_lookup_type(arg)
if check_filterable:
self.check_filterable(reffed_expression)
if not allow_joins and len(parts) > 1:
raise FieldError("Joined field references are not permitted in this query")
pre_joins = self.alias_refcount.copy()
value = self.resolve_lookup_value(value, can_reuse, allow_joins)
used_joins = {k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0)}
if check_filterable:
self.check_filterable(value)
clause = self.where_class()
if reffed_expression:
condition = self.build_lookup(lookups, reffed_expression, value)
clause.add(condition, AND)
return clause, []
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = not branch_negated or not split_subq
try:
join_info = self.setup_joins(
parts, opts, alias, can_reuse=can_reuse, allow_many=allow_many,
reuse_with_filtered_relation=reuse_with_filtered_relation,
)
# Prevent iterator from being consumed by check_related_objects()
if isinstance(value, Iterator):
value = list(value)
self.check_related_objects(join_info.final_field, value, join_info.opts)
# split_exclude() needs to know which joins were generated for the
# lookup parts
self._lookup_joins = join_info.joins
except MultiJoin as e:
return self.split_exclude(filter_expr, can_reuse, e.names_with_path)
# Update used_joins before trimming since they are reused to determine
# which joins could be later promoted to INNER.
used_joins.update(join_info.joins)
targets, alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path)
if can_reuse is not None:
can_reuse.update(join_list)
if join_info.final_field.is_relation:
# No support for transforms for relational fields
num_lookups = len(lookups)
if num_lookups > 1:
raise FieldError('Related Field got invalid lookup: {}'.format(lookups[0]))
if len(targets) == 1:
col = self._get_col(targets[0], join_info.final_field, alias)
else:
col = MultiColSource(alias, targets, join_info.targets, join_info.final_field)
else:
col = self._get_col(targets[0], join_info.final_field, alias)
condition = self.build_lookup(lookups, col, value)
lookup_type = condition.lookup_name
clause.add(condition, AND)
require_outer = lookup_type == 'isnull' and condition.rhs is True and not current_negated
if current_negated and (lookup_type != 'isnull' or condition.rhs is False) and condition.rhs is not None:
require_outer = True
if lookup_type != 'isnull':
# The condition added here will be SQL like this:
# NOT (col IS NOT NULL), where the first NOT is added in
# upper layers of code. The reason for addition is that if col
# is null, then col != someval will result in SQL "unknown"
# which isn't the same as in Python. The Python None handling
# is wanted, and it can be gotten by
# (col IS NULL OR col != someval)
# <=>
# NOT (col IS NOT NULL AND col = someval).
if (
self.is_nullable(targets[0]) or
self.alias_map[join_list[-1]].join_type == LOUTER
):
lookup_class = targets[0].get_lookup('isnull')
col = self._get_col(targets[0], join_info.targets[0], alias)
clause.add(lookup_class(col, False), AND)
# If someval is a nullable column, someval IS NOT NULL is
# added.
if isinstance(value, Col) and self.is_nullable(value.target):
lookup_class = value.target.get_lookup('isnull')
clause.add(lookup_class(value, False), AND)
return clause, used_joins if not require_outer else ()
def add_filter(self, filter_clause):
self.add_q(Q(**{filter_clause[0]: filter_clause[1]}))
def add_q(self, q_object):
"""
A preprocessor for the internal _add_q(). Responsible for doing final
join promotion.
"""
# For join promotion this case is doing an AND for the added q_object
# and existing conditions. So, any existing inner join forces the join
# type to remain inner. Existing outer joins can however be demoted.
# (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if
# rel_a doesn't produce any rows, then the whole condition must fail.
# So, demotion is OK.
existing_inner = {a for a in self.alias_map if self.alias_map[a].join_type == INNER}
clause, _ = self._add_q(q_object, self.used_aliases)
if clause:
self.where.add(clause, AND)
self.demote_joins(existing_inner)
def build_where(self, filter_expr):
return self.build_filter(filter_expr, allow_joins=False)[0]
def _add_q(self, q_object, used_aliases, branch_negated=False,
current_negated=False, allow_joins=True, split_subq=True,
check_filterable=True):
"""Add a Q-object to the current filter."""
connector = q_object.connector
current_negated = current_negated ^ q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = self.where_class(connector=connector,
negated=q_object.negated)
joinpromoter = JoinPromoter(q_object.connector, len(q_object.children), current_negated)
for child in q_object.children:
child_clause, needed_inner = self.build_filter(
child, can_reuse=used_aliases, branch_negated=branch_negated,
current_negated=current_negated, allow_joins=allow_joins,
split_subq=split_subq, check_filterable=check_filterable,
)
joinpromoter.add_votes(needed_inner)
if child_clause:
target_clause.add(child_clause, connector)
needed_inner = joinpromoter.update_join_types(self)
return target_clause, needed_inner
def build_filtered_relation_q(self, q_object, reuse, branch_negated=False, current_negated=False):
"""Add a FilteredRelation object to the current filter."""
connector = q_object.connector
current_negated ^= q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = self.where_class(connector=connector, negated=q_object.negated)
for child in q_object.children:
if isinstance(child, Node):
child_clause = self.build_filtered_relation_q(
child, reuse=reuse, branch_negated=branch_negated,
current_negated=current_negated,
)
else:
child_clause, _ = self.build_filter(
child, can_reuse=reuse, branch_negated=branch_negated,
current_negated=current_negated,
allow_joins=True, split_subq=False,
reuse_with_filtered_relation=True,
)
target_clause.add(child_clause, connector)
return target_clause
def add_filtered_relation(self, filtered_relation, alias):
filtered_relation.alias = alias
lookups = dict(get_children_from_q(filtered_relation.condition))
relation_lookup_parts, relation_field_parts, _ = self.solve_lookup_type(filtered_relation.relation_name)
if relation_lookup_parts:
raise ValueError(
"FilteredRelation's relation_name cannot contain lookups "
"(got %r)." % filtered_relation.relation_name
)
for lookup in chain(lookups):
lookup_parts, lookup_field_parts, _ = self.solve_lookup_type(lookup)
shift = 2 if not lookup_parts else 1
lookup_field_path = lookup_field_parts[:-shift]
for idx, lookup_field_part in enumerate(lookup_field_path):
if len(relation_field_parts) > idx:
if relation_field_parts[idx] != lookup_field_part:
raise ValueError(
"FilteredRelation's condition doesn't support "
"relations outside the %r (got %r)."
% (filtered_relation.relation_name, lookup)
)
else:
raise ValueError(
"FilteredRelation's condition doesn't support nested "
"relations deeper than the relation_name (got %r for "
"%r)." % (lookup, filtered_relation.relation_name)
)
self._filtered_relations[filtered_relation.alias] = filtered_relation
def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False):
"""
Walk the list of names and turns them into PathInfo tuples. A single
name in 'names' can generate multiple PathInfos (m2m, for example).
'names' is the path of names to travel, 'opts' is the model Options we
start the name resolving from, 'allow_many' is as for setup_joins().
If fail_on_missing is set to True, then a name that can't be resolved
will generate a FieldError.
Return a list of PathInfo tuples. In addition return the final field
(the last used join field) and target (which is a field guaranteed to
contain the same value as the final field). Finally, return those names
that weren't found (which are likely transforms and the final lookup).
"""
path, names_with_path = [], []
for pos, name in enumerate(names):
cur_names_with_path = (name, [])
if name == 'pk':
name = opts.pk.name
field = None
filtered_relation = None
try:
field = opts.get_field(name)
except FieldDoesNotExist:
if name in self.annotation_select:
field = self.annotation_select[name].output_field
elif name in self._filtered_relations and pos == 0:
filtered_relation = self._filtered_relations[name]
if LOOKUP_SEP in filtered_relation.relation_name:
parts = filtered_relation.relation_name.split(LOOKUP_SEP)
filtered_relation_path, field, _, _ = self.names_to_path(
parts, opts, allow_many, fail_on_missing,
)
path.extend(filtered_relation_path[:-1])
else:
field = opts.get_field(filtered_relation.relation_name)
if field is not None:
# Fields that contain one-to-many relations with a generic
# model (like a GenericForeignKey) cannot generate reverse
# relations and therefore cannot be used for reverse querying.
if field.is_relation and not field.related_model:
raise FieldError(
"Field %r does not generate an automatic reverse "
"relation and therefore cannot be used for reverse "
"querying. If it is a GenericForeignKey, consider "
"adding a GenericRelation." % name
)
try:
model = field.model._meta.concrete_model
except AttributeError:
# QuerySet.annotate() may introduce fields that aren't
# attached to a model.
model = None
else:
# We didn't find the current field, so move position back
# one step.
pos -= 1
if pos == -1 or fail_on_missing:
available = sorted([
*get_field_names_from_opts(opts),
*self.annotation_select,
*self._filtered_relations,
])
raise FieldError("Cannot resolve keyword '%s' into field. "
"Choices are: %s" % (name, ", ".join(available)))
break
# Check if we need any joins for concrete inheritance cases (the
# field lives in parent, but we are currently in one of its
# children)
if model is not opts.model:
path_to_parent = opts.get_path_to_parent(model)
if path_to_parent:
path.extend(path_to_parent)
cur_names_with_path[1].extend(path_to_parent)
opts = path_to_parent[-1].to_opts
if hasattr(field, 'get_path_info'):
pathinfos = field.get_path_info(filtered_relation)
if not allow_many:
for inner_pos, p in enumerate(pathinfos):
if p.m2m:
cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1])
names_with_path.append(cur_names_with_path)
raise MultiJoin(pos + 1, names_with_path)
last = pathinfos[-1]
path.extend(pathinfos)
final_field = last.join_field
opts = last.to_opts
targets = last.target_fields
cur_names_with_path[1].extend(pathinfos)
names_with_path.append(cur_names_with_path)
else:
# Local non-relational field.
final_field = field
targets = (field,)
if fail_on_missing and pos + 1 != len(names):
raise FieldError(
"Cannot resolve keyword %r into field. Join on '%s'"
" not permitted." % (names[pos + 1], name))
break
return path, final_field, targets, names[pos + 1:]
def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True,
reuse_with_filtered_relation=False):
"""
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are starting from), 'alias' is the alias for
the table to start the joining from.
The 'can_reuse' defines the reverse foreign key joins we can reuse. It
can be None in which case all joins are reusable or a set of aliases
that can be reused. Note that non-reverse foreign keys are always
reusable when using setup_joins().
The 'reuse_with_filtered_relation' can be used to force 'can_reuse'
parameter and force the relation on the given connections.
If 'allow_many' is False, then any reverse foreign key seen will
generate a MultiJoin exception.
Return the final field involved in the joins, the target field (used
for any 'where' constraint), the final 'opts' value, the joins, the
field path traveled to generate the joins, and a transform function
that takes a field and alias and is equivalent to `field.get_col(alias)`
in the simple case but wraps field transforms if they were included in
names.
The target field is the field containing the concrete value. Final
field can be something different, for example foreign key pointing to
that value. Final field is needed for example in some value
conversions (convert 'obj' in fk__id=obj to pk val using the foreign
key field for example).
"""
joins = [alias]
# The transform can't be applied yet, as joins must be trimmed later.
# To avoid making every caller of this method look up transforms
# directly, compute transforms here and create a partial that converts
# fields to the appropriate wrapped version.
def final_transformer(field, alias):
if not self.alias_cols:
alias = None
return field.get_col(alias)
# Try resolving all the names as fields first. If there's an error,
# treat trailing names as lookups until a field can be resolved.
last_field_exception = None
for pivot in range(len(names), 0, -1):
try:
path, final_field, targets, rest = self.names_to_path(
names[:pivot], opts, allow_many, fail_on_missing=True,
)
except FieldError as exc:
if pivot == 1:
# The first item cannot be a lookup, so it's safe
# to raise the field error here.
raise
else:
last_field_exception = exc
else:
# The transforms are the remaining items that couldn't be
# resolved into fields.
transforms = names[pivot:]
break
for name in transforms:
def transform(field, alias, *, name, previous):
try:
wrapped = previous(field, alias)
return self.try_transform(wrapped, name)
except FieldError:
# FieldError is raised if the transform doesn't exist.
if isinstance(final_field, Field) and last_field_exception:
raise last_field_exception
else:
raise
final_transformer = functools.partial(transform, name=name, previous=final_transformer)
# Then, add the path to the query's joins. Note that we can't trim
# joins at this stage - we will need the information about join type
# of the trimmed joins.
for join in path:
if join.filtered_relation:
filtered_relation = join.filtered_relation.clone()
table_alias = filtered_relation.alias
else:
filtered_relation = None
table_alias = None
opts = join.to_opts
if join.direct:
nullable = self.is_nullable(join.join_field)
else:
nullable = True
connection = Join(
opts.db_table, alias, table_alias, INNER, join.join_field,
nullable, filtered_relation=filtered_relation,
)
reuse = can_reuse if join.m2m or reuse_with_filtered_relation else None
alias = self.join(
connection, reuse=reuse,
reuse_with_filtered_relation=reuse_with_filtered_relation,
)
joins.append(alias)
if filtered_relation:
filtered_relation.path = joins[:]
return JoinInfo(final_field, targets, opts, joins, path, final_transformer)
def trim_joins(self, targets, joins, path):
"""
The 'target' parameter is the final field being joined to, 'joins'
is the full list of join aliases. The 'path' contain the PathInfos
used to create the joins.
Return the final target field and table alias and the new active
joins.
Always trim any direct join if the target column is already in the
previous table. Can't trim reverse joins as it's unknown if there's
anything on the other side of the join.
"""
joins = joins[:]
for pos, info in enumerate(reversed(path)):
if len(joins) == 1 or not info.direct:
break
if info.filtered_relation:
break
join_targets = {t.column for t in info.join_field.foreign_related_fields}
cur_targets = {t.column for t in targets}
if not cur_targets.issubset(join_targets):
break
targets_dict = {r[1].column: r[0] for r in info.join_field.related_fields if r[1].column in cur_targets}
targets = tuple(targets_dict[t.column] for t in targets)
self.unref_alias(joins.pop())
return targets, joins[-1], joins
@classmethod
def _gen_cols(cls, exprs, include_external=False):
for expr in exprs:
if isinstance(expr, Col):
yield expr
elif include_external and callable(getattr(expr, 'get_external_cols', None)):
yield from expr.get_external_cols()
else:
yield from cls._gen_cols(
expr.get_source_expressions(),
include_external=include_external,
)
@classmethod
def _gen_col_aliases(cls, exprs):
yield from (expr.alias for expr in cls._gen_cols(exprs))
def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False):
annotation = self.annotations.get(name)
if annotation is not None:
if not allow_joins:
for alias in self._gen_col_aliases([annotation]):
if isinstance(self.alias_map[alias], Join):
raise FieldError(
'Joined field references are not permitted in '
'this query'
)
if summarize:
# Summarize currently means we are doing an aggregate() query
# which is executed as a wrapped subquery if any of the
# aggregate() elements reference an existing annotation. In
# that case we need to return a Ref to the subquery's annotation.
if name not in self.annotation_select:
raise FieldError(
"Cannot aggregate over the '%s' alias. Use annotate() "
"to promote it." % name
)
return Ref(name, self.annotation_select[name])
else:
return annotation
else:
field_list = name.split(LOOKUP_SEP)
annotation = self.annotations.get(field_list[0])
if annotation is not None:
for transform in field_list[1:]:
annotation = self.try_transform(annotation, transform)
return annotation
join_info = self.setup_joins(field_list, self.get_meta(), self.get_initial_alias(), can_reuse=reuse)
targets, final_alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path)
if not allow_joins and len(join_list) > 1:
raise FieldError('Joined field references are not permitted in this query')
if len(targets) > 1:
raise FieldError("Referencing multicolumn fields with F() objects "
"isn't supported")
# Verify that the last lookup in name is a field or a transform:
# transform_function() raises FieldError if not.
transform = join_info.transform_function(targets[0], final_alias)
if reuse is not None:
reuse.update(join_list)
return transform
def split_exclude(self, filter_expr, can_reuse, names_with_path):
"""
When doing an exclude against any kind of N-to-many relation, we need
to use a subquery. This method constructs the nested query, given the
original exclude filter (filter_expr) and the portion up to the first
N-to-many relation field.
For example, if the origin filter is ~Q(child__name='foo'), filter_expr
is ('child__name', 'foo') and can_reuse is a set of joins usable for
filters in the original query.
We will turn this into equivalent of:
WHERE NOT EXISTS(
SELECT 1
FROM child
WHERE name = 'foo' AND child.parent_id = parent.id
LIMIT 1
)
"""
filter_lhs, filter_rhs = filter_expr
if isinstance(filter_rhs, OuterRef):
filter_expr = (filter_lhs, OuterRef(filter_rhs))
elif isinstance(filter_rhs, F):
filter_expr = (filter_lhs, OuterRef(filter_rhs.name))
# Generate the inner query.
query = Query(self.model)
query._filtered_relations = self._filtered_relations
query.add_filter(filter_expr)
query.clear_ordering(True)
# Try to have as simple as possible subquery -> trim leading joins from
# the subquery.
trimmed_prefix, contains_louter = query.trim_start(names_with_path)
col = query.select[0]
select_field = col.target
alias = col.alias
if alias in can_reuse:
pk = select_field.model._meta.pk
# Need to add a restriction so that outer query's filters are in effect for
# the subquery, too.
query.bump_prefix(self)
lookup_class = select_field.get_lookup('exact')
# Note that the query.select[0].alias is different from alias
# due to bump_prefix above.
lookup = lookup_class(pk.get_col(query.select[0].alias),
pk.get_col(alias))
query.where.add(lookup, AND)
query.external_aliases[alias] = True
lookup_class = select_field.get_lookup('exact')
lookup = lookup_class(col, ResolvedOuterRef(trimmed_prefix))
query.where.add(lookup, AND)
condition, needed_inner = self.build_filter(Exists(query))
if contains_louter:
or_null_condition, _ = self.build_filter(
('%s__isnull' % trimmed_prefix, True),
current_negated=True, branch_negated=True, can_reuse=can_reuse)
condition.add(or_null_condition, OR)
# Note that the end result will be:
# (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL.
# This might look crazy but due to how IN works, this seems to be
# correct. If the IS NOT NULL check is removed then outercol NOT
# IN will return UNKNOWN. If the IS NULL check is removed, then if
# outercol IS NULL we will not match the row.
return condition, needed_inner
def set_empty(self):
self.where.add(NothingNode(), AND)
for query in self.combined_queries:
query.set_empty()
def is_empty(self):
return any(isinstance(c, NothingNode) for c in self.where.children)
def set_limits(self, low=None, high=None):
"""
Adjust the limits on the rows retrieved. Use low/high to set these,
as it makes it more Pythonic to read and write. When the SQL query is
created, convert them to the appropriate offset and limit values.
Apply any limits passed in here to the existing constraints. Add low
to the current low value and clamp both to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
if self.low_mark == self.high_mark:
self.set_empty()
def clear_limits(self):
"""Clear any existing limits."""
self.low_mark, self.high_mark = 0, None
@property
def is_sliced(self):
return self.low_mark != 0 or self.high_mark is not None
def has_limit_one(self):
return self.high_mark is not None and (self.high_mark - self.low_mark) == 1
def can_filter(self):
"""
Return True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.is_sliced
def clear_select_clause(self):
"""Remove all fields from SELECT clause."""
self.select = ()
self.default_cols = False
self.select_related = False
self.set_extra_mask(())
self.set_annotation_mask(())
def clear_select_fields(self):
"""
Clear the list of fields to select (but not extra_select columns).
Some queryset types completely replace any existing list of select
columns.
"""
self.select = ()
self.values_select = ()
def add_select_col(self, col, name):
self.select += col,
self.values_select += name,
def set_select(self, cols):
self.default_cols = False
self.select = tuple(cols)
def add_distinct_fields(self, *field_names):
"""
Add and resolve the given fields to the query's "distinct on" clause.
"""
self.distinct_fields = field_names
self.distinct = True
def add_fields(self, field_names, allow_m2m=True):
"""
Add the given (model) fields to the select set. Add the field names in
the order specified.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
try:
cols = []
for name in field_names:
# Join promotion note - we must not remove any rows here, so
# if there is no existing joins, use outer join.
join_info = self.setup_joins(name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m)
targets, final_alias, joins = self.trim_joins(
join_info.targets,
join_info.joins,
join_info.path,
)
for target in targets:
cols.append(join_info.transform_function(target, final_alias))
if cols:
self.set_select(cols)
except MultiJoin:
raise FieldError("Invalid field name: '%s'" % name)
except FieldError:
if LOOKUP_SEP in name:
# For lookups spanning over relationships, show the error
# from the model on which the lookup failed.
raise
elif name in self.annotations:
raise FieldError(
"Cannot select the '%s' alias. Use annotate() to promote "
"it." % name
)
else:
names = sorted([
*get_field_names_from_opts(opts), *self.extra,
*self.annotation_select, *self._filtered_relations
])
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
def add_ordering(self, *ordering):
"""
Add items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-' or '?') -- or OrderBy
expressions.
If 'ordering' is empty, clear all ordering from the query.
"""
errors = []
for item in ordering:
if isinstance(item, str):
if '.' in item and ORDER_PATTERN.match(item):
warnings.warn(
'Passing column raw column aliases to order_by() is '
'deprecated. Wrap %r in a RawSQL expression before '
'passing it to order_by().' % item,
category=RemovedInDjango40Warning,
stacklevel=3,
)
continue
if item == '?':
continue
if item.startswith('-'):
item = item[1:]
if item in self.annotations:
continue
if self.extra and item in self.extra:
continue
# names_to_path() validates the lookup. A descriptive
# FieldError will be raise if it's not.
self.names_to_path(item.split(LOOKUP_SEP), self.model._meta)
elif not hasattr(item, 'resolve_expression'):
errors.append(item)
if getattr(item, 'contains_aggregate', False):
raise FieldError(
'Using an aggregate in order_by() without also including '
'it in annotate() is not allowed: %s' % item
)
if errors:
raise FieldError('Invalid order_by arguments: %s' % errors)
if ordering:
self.order_by += ordering
else:
self.default_ordering = False
def clear_ordering(self, force_empty):
"""
Remove any ordering settings. If 'force_empty' is True, there will be
no ordering in the resulting query (not even the model's default).
"""
self.order_by = ()
self.extra_order_by = ()
if force_empty:
self.default_ordering = False
def set_group_by(self, allow_aliases=True):
"""
Expand the GROUP BY clause required by the query.
This will usually be the set of all non-aggregate fields in the
return data. If the database backend supports grouping by the
primary key, and the query would be equivalent, the optimization
will be made automatically.
"""
# Column names from JOINs to check collisions with aliases.
if allow_aliases:
column_names = set()
seen_models = set()
for join in list(self.alias_map.values())[1:]: # Skip base table.
model = join.join_field.related_model
if model not in seen_models:
column_names.update({
field.column
for field in model._meta.local_concrete_fields
})
seen_models.add(model)
group_by = list(self.select)
if self.annotation_select:
for alias, annotation in self.annotation_select.items():
signature = inspect.signature(annotation.get_group_by_cols)
if 'alias' not in signature.parameters:
annotation_class = annotation.__class__
msg = (
'`alias=None` must be added to the signature of '
'%s.%s.get_group_by_cols().'
) % (annotation_class.__module__, annotation_class.__qualname__)
warnings.warn(msg, category=RemovedInDjango40Warning)
group_by_cols = annotation.get_group_by_cols()
else:
if not allow_aliases or alias in column_names:
alias = None
group_by_cols = annotation.get_group_by_cols(alias=alias)
group_by.extend(group_by_cols)
self.group_by = tuple(group_by)
def add_select_related(self, fields):
"""
Set up the select_related data structure so that we only select
certain related models (as opposed to all models, when
self.select_related=True).
"""
if isinstance(self.select_related, bool):
field_dict = {}
else:
field_dict = self.select_related
for field in fields:
d = field_dict
for part in field.split(LOOKUP_SEP):
d = d.setdefault(part, {})
self.select_related = field_dict
def add_extra(self, select, select_params, where, params, tables, order_by):
"""
Add data to the various extra_* attributes for user-created additions
to the query.
"""
if select:
# We need to pair any placeholder markers in the 'select'
# dictionary with their parameters in 'select_params' so that
# subsequent updates to the select dictionary also adjust the
# parameters appropriately.
select_pairs = {}
if select_params:
param_iter = iter(select_params)
else:
param_iter = iter([])
for name, entry in select.items():
entry = str(entry)
entry_params = []
pos = entry.find("%s")
while pos != -1:
if pos == 0 or entry[pos - 1] != '%':
entry_params.append(next(param_iter))
pos = entry.find("%s", pos + 2)
select_pairs[name] = (entry, entry_params)
self.extra.update(select_pairs)
if where or params:
self.where.add(ExtraWhere(where, params), AND)
if tables:
self.extra_tables += tuple(tables)
if order_by:
self.extra_order_by = order_by
def clear_deferred_loading(self):
"""Remove any fields from the deferred loading set."""
self.deferred_loading = (frozenset(), True)
def add_deferred_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
exclude from loading from the database when automatic column selection
is done. Add the new field names to any existing field names that
are deferred (or removed from any existing field names that are marked
as the only ones for immediate loading).
"""
# Fields on related models are stored in the literal double-underscore
# format, so that we can use a set datastructure. We do the foo__bar
# splitting and handling when computing the SQL column names (as part of
# get_columns()).
existing, defer = self.deferred_loading
if defer:
# Add to existing deferred names.
self.deferred_loading = existing.union(field_names), True
else:
# Remove names from the set of any existing "immediate load" names.
self.deferred_loading = existing.difference(field_names), False
def add_immediate_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
retrieve when the SQL is executed ("immediate loading" fields). The
field names replace any existing immediate loading field names. If
there are field names already specified for deferred loading, remove
those names from the new field_names before storing the new names
for immediate loading. (That is, immediate loading overrides any
existing immediate values, but respects existing deferrals.)
"""
existing, defer = self.deferred_loading
field_names = set(field_names)
if 'pk' in field_names:
field_names.remove('pk')
field_names.add(self.get_meta().pk.name)
if defer:
# Remove any existing deferred names from the current set before
# setting the new names.
self.deferred_loading = field_names.difference(existing), False
else:
# Replace any existing "immediate load" field names.
self.deferred_loading = frozenset(field_names), False
def get_loaded_field_names(self):
"""
If any fields are marked to be deferred, return a dictionary mapping
models to a set of names in those fields that will be loaded. If a
model is not in the returned dictionary, none of its fields are
deferred.
If no fields are marked for deferral, return an empty dictionary.
"""
# We cache this because we call this function multiple times
# (compiler.fill_related_selections, query.iterator)
try:
return self._loaded_field_names_cache
except AttributeError:
collection = {}
self.deferred_to_data(collection, self.get_loaded_field_names_cb)
self._loaded_field_names_cache = collection
return collection
def get_loaded_field_names_cb(self, target, model, fields):
"""Callback used by get_deferred_field_names()."""
target[model] = {f.attname for f in fields}
def set_annotation_mask(self, names):
"""Set the mask of annotations that will be returned by the SELECT."""
if names is None:
self.annotation_select_mask = None
else:
self.annotation_select_mask = set(names)
self._annotation_select_cache = None
def append_annotation_mask(self, names):
if self.annotation_select_mask is not None:
self.set_annotation_mask(self.annotation_select_mask.union(names))
def set_extra_mask(self, names):
"""
Set the mask of extra select items that will be returned by SELECT.
Don't remove them from the Query since they might be used later.
"""
if names is None:
self.extra_select_mask = None
else:
self.extra_select_mask = set(names)
self._extra_select_cache = None
def set_values(self, fields):
self.select_related = False
self.clear_deferred_loading()
self.clear_select_fields()
if fields:
field_names = []
extra_names = []
annotation_names = []
if not self.extra and not self.annotations:
# Shortcut - if there are no extra or annotations, then
# the values() clause must be just field names.
field_names = list(fields)
else:
self.default_cols = False
for f in fields:
if f in self.extra_select:
extra_names.append(f)
elif f in self.annotation_select:
annotation_names.append(f)
else:
field_names.append(f)
self.set_extra_mask(extra_names)
self.set_annotation_mask(annotation_names)
selected = frozenset(field_names + extra_names + annotation_names)
else:
field_names = [f.attname for f in self.model._meta.concrete_fields]
selected = frozenset(field_names)
# Selected annotations must be known before setting the GROUP BY
# clause.
if self.group_by is True:
self.add_fields((f.attname for f in self.model._meta.concrete_fields), False)
# Disable GROUP BY aliases to avoid orphaning references to the
# SELECT clause which is about to be cleared.
self.set_group_by(allow_aliases=False)
self.clear_select_fields()
elif self.group_by:
# Resolve GROUP BY annotation references if they are not part of
# the selected fields anymore.
group_by = []
for expr in self.group_by:
if isinstance(expr, Ref) and expr.refs not in selected:
expr = self.annotations[expr.refs]
group_by.append(expr)
self.group_by = tuple(group_by)
self.values_select = tuple(field_names)
self.add_fields(field_names, True)
@property
def annotation_select(self):
"""
Return the dictionary of aggregate columns that are not masked and
should be used in the SELECT clause. Cache this result for performance.
"""
if self._annotation_select_cache is not None:
return self._annotation_select_cache
elif not self.annotations:
return {}
elif self.annotation_select_mask is not None:
self._annotation_select_cache = {
k: v for k, v in self.annotations.items()
if k in self.annotation_select_mask
}
return self._annotation_select_cache
else:
return self.annotations
@property
def extra_select(self):
if self._extra_select_cache is not None:
return self._extra_select_cache
if not self.extra:
return {}
elif self.extra_select_mask is not None:
self._extra_select_cache = {
k: v for k, v in self.extra.items()
if k in self.extra_select_mask
}
return self._extra_select_cache
else:
return self.extra
def trim_start(self, names_with_path):
"""
Trim joins from the start of the join path. The candidates for trim
are the PathInfos in names_with_path structure that are m2m joins.
Also set the select column so the start matches the join.
This method is meant to be used for generating the subquery joins &
cols in split_exclude().
Return a lookup usable for doing outerq.filter(lookup=self) and a
boolean indicating if the joins in the prefix contain a LEFT OUTER join.
_"""
all_paths = []
for _, paths in names_with_path:
all_paths.extend(paths)
contains_louter = False
# Trim and operate only on tables that were generated for
# the lookup part of the query. That is, avoid trimming
# joins generated for F() expressions.
lookup_tables = [
t for t in self.alias_map
if t in self._lookup_joins or t == self.base_table
]
for trimmed_paths, path in enumerate(all_paths):
if path.m2m:
break
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER:
contains_louter = True
alias = lookup_tables[trimmed_paths]
self.unref_alias(alias)
# The path.join_field is a Rel, lets get the other side's field
join_field = path.join_field.field
# Build the filter prefix.
paths_in_prefix = trimmed_paths
trimmed_prefix = []
for name, path in names_with_path:
if paths_in_prefix - len(path) < 0:
break
trimmed_prefix.append(name)
paths_in_prefix -= len(path)
trimmed_prefix.append(
join_field.foreign_related_fields[0].name)
trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix)
# Lets still see if we can trim the first join from the inner query
# (that is, self). We can't do this for:
# - LEFT JOINs because we would miss those rows that have nothing on
# the outer side,
# - INNER JOINs from filtered relations because we would miss their
# filters.
first_join = self.alias_map[lookup_tables[trimmed_paths + 1]]
if first_join.join_type != LOUTER and not first_join.filtered_relation:
select_fields = [r[0] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths + 1]
self.unref_alias(lookup_tables[trimmed_paths])
extra_restriction = join_field.get_extra_restriction(
self.where_class, None, lookup_tables[trimmed_paths + 1])
if extra_restriction:
self.where.add(extra_restriction, AND)
else:
# TODO: It might be possible to trim more joins from the start of the
# inner query if it happens to have a longer join chain containing the
# values in select_fields. Lets punt this one for now.
select_fields = [r[1] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths]
# The found starting point is likely a Join instead of a BaseTable reference.
# But the first entry in the query's FROM clause must not be a JOIN.
for table in self.alias_map:
if self.alias_refcount[table] > 0:
self.alias_map[table] = BaseTable(self.alias_map[table].table_name, table)
break
self.set_select([f.get_col(select_alias) for f in select_fields])
return trimmed_prefix, contains_louter
def is_nullable(self, field):
"""
Check if the given field should be treated as nullable.
Some backends treat '' as null and Django treats such fields as
nullable for those backends. In such situations field.null can be
False even if we should treat the field as nullable.
"""
# We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have
# (nor should it have) knowledge of which connection is going to be
# used. The proper fix would be to defer all decisions where
# is_nullable() is needed to the compiler stage, but that is not easy
# to do currently.
return (
connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and
field.empty_strings_allowed
) or field.null
def get_order_dir(field, default='ASC'):
"""
Return the field name and direction for an order specification. For
example, '-foo' is returned as ('foo', 'DESC').
The 'default' param is used to indicate which way no prefix (or a '+'
prefix) should sort. The '-' prefix always sorts the opposite way.
"""
dirn = ORDER_DIR[default]
if field[0] == '-':
return field[1:], dirn[1]
return field, dirn[0]
def add_to_dict(data, key, value):
"""
Add "value" to the set of values for "key", whether or not "key" already
exists.
"""
if key in data:
data[key].add(value)
else:
data[key] = {value}
def is_reverse_o2o(field):
"""
Check if the given field is reverse-o2o. The field is expected to be some
sort of relation field or related object.
"""
return field.is_relation and field.one_to_one and not field.concrete
class JoinPromoter:
"""
A class to abstract away join promotion problems for complex filter
conditions.
"""
def __init__(self, connector, num_children, negated):
self.connector = connector
self.negated = negated
if self.negated:
if connector == AND:
self.effective_connector = OR
else:
self.effective_connector = AND
else:
self.effective_connector = self.connector
self.num_children = num_children
# Maps of table alias to how many times it is seen as required for
# inner and/or outer joins.
self.votes = Counter()
def add_votes(self, votes):
"""
Add single vote per item to self.votes. Parameter can be any
iterable.
"""
self.votes.update(votes)
def update_join_types(self, query):
"""
Change join types so that the generated query is as efficient as
possible, but still correct. So, change as many joins as possible
to INNER, but don't make OUTER joins INNER if that could remove
results from the query.
"""
to_promote = set()
to_demote = set()
# The effective_connector is used so that NOT (a AND b) is treated
# similarly to (a OR b) for join promotion.
for table, votes in self.votes.items():
# We must use outer joins in OR case when the join isn't contained
# in all of the joins. Otherwise the INNER JOIN itself could remove
# valid results. Consider the case where a model with rel_a and
# rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now,
# if rel_a join doesn't produce any results is null (for example
# reverse foreign key or null value in direct foreign key), and
# there is a matching row in rel_b with col=2, then an INNER join
# to rel_a would remove a valid match from the query. So, we need
# to promote any existing INNER to LOUTER (it is possible this
# promotion in turn will be demoted later on).
if self.effective_connector == 'OR' and votes < self.num_children:
to_promote.add(table)
# If connector is AND and there is a filter that can match only
# when there is a joinable row, then use INNER. For example, in
# rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL
# as join output, then the col=1 or col=2 can't match (as
# NULL=anything is always false).
# For the OR case, if all children voted for a join to be inner,
# then we can use INNER for the join. For example:
# (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell)
# then if rel_a doesn't produce any rows, the whole condition
# can't match. Hence we can safely use INNER join.
if self.effective_connector == 'AND' or (
self.effective_connector == 'OR' and votes == self.num_children):
to_demote.add(table)
# Finally, what happens in cases where we have:
# (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0
# Now, we first generate the OR clause, and promote joins for it
# in the first if branch above. Both rel_a and rel_b are promoted
# to LOUTER joins. After that we do the AND case. The OR case
# voted no inner joins but the rel_a__col__gte=0 votes inner join
# for rel_a. We demote it back to INNER join (in AND case a single
# vote is enough). The demotion is OK, if rel_a doesn't produce
# rows, then the rel_a__col__gte=0 clause can't be true, and thus
# the whole clause must be false. So, it is safe to use INNER
# join.
# Note that in this example we could just as well have the __gte
# clause and the OR clause swapped. Or we could replace the __gte
# clause with an OR clause containing rel_a__col=1|rel_a__col=2,
# and again we could safely demote to INNER.
query.promote_joins(to_promote)
query.demote_joins(to_demote)
return to_demote
| 44.657842
| 119
| 0.607139
|
05eb26966dc97e6f024460ad70bd374d28990323
| 1,564
|
py
|
Python
|
pymnn/pip_package/MNN/nn/__init__.py
|
stephehuang/MNN
|
e8d9ee89aca3e8247745fb89b338eca2ad9b583d
|
[
"Apache-2.0"
] | 3
|
2020-09-26T03:40:17.000Z
|
2021-12-26T06:58:11.000Z
|
pymnn/pip_package/MNN/nn/__init__.py
|
stephehuang/MNN
|
e8d9ee89aca3e8247745fb89b338eca2ad9b583d
|
[
"Apache-2.0"
] | 2
|
2020-06-19T08:04:43.000Z
|
2020-10-23T03:34:44.000Z
|
pymnn/pip_package/MNN/nn/__init__.py
|
stephehuang/MNN
|
e8d9ee89aca3e8247745fb89b338eca2ad9b583d
|
[
"Apache-2.0"
] | 1
|
2021-06-06T06:54:53.000Z
|
2021-06-06T06:54:53.000Z
|
from _mnncengine._nn import *
import _mnncengine._expr as _F
import _mnncengine._nn as _nn
def load_module_from_file(file_name, for_training):
m = _F.load_as_dict(file_name)
inputs_outputs = _F.get_inputs_and_outputs(m)
inputs = []
for key in inputs_outputs[0].keys():
inputs.append(inputs_outputs[0][key])
outputs = []
for key in inputs_outputs[1].keys():
outputs.append(inputs_outputs[1][key])
module = _nn.load_module(inputs, outputs, for_training)
return module
class Module(_nn._Module):
def __init__(self):
super(Module, self).__init__()
self._children = {}
def forward(self, x):
raise NotImplementedError
def __call__(self, x):
raise NotImplementedError("__call__ not implemented, please use 'forward' method in subclasses")
def __setattr__(self, name, value):
self.__dict__[name] = value
def remove_from(dicts):
if name in dicts:
del dicts[name]
if isinstance(value, (Module, _nn._Module)):
remove_from(self._children)
value.set_name(name)
self._children[name] = value
self._register_submodules([value])
return
class FixModule(object):
def __init__(self, module):
super(FixModule, self).__init__()
self.module = module
def forward(self, x):
self.module.train(False)
return self.module.forward(x)
def __call__(self, x):
self.module.train(False)
return self.module(x)
| 25.225806
| 104
| 0.63491
|
06293df65eda2c5a0384f0bf78ed2766aca79169
| 1,772
|
py
|
Python
|
usage.py
|
mdettling/dns-tools
|
9a7edc6835bf5800bb52d3d40fbcc8f48b3841ea
|
[
"MIT"
] | null | null | null |
usage.py
|
mdettling/dns-tools
|
9a7edc6835bf5800bb52d3d40fbcc8f48b3841ea
|
[
"MIT"
] | null | null | null |
usage.py
|
mdettling/dns-tools
|
9a7edc6835bf5800bb52d3d40fbcc8f48b3841ea
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2020 devsecurity.io <dns-tools@devsecurity.io>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import sys
import glob
def rchop(string, suffix):
if string.endswith(suffix):
return string[:-len(suffix)]
return string
def main():
sys.stderr.write("Usage: docker run --rm -i -v <local volume>:<container volume> devsecurity/dns-tools:<tag> <command>\n\n")
sys.stderr.write("Tags:\n")
sys.stderr.write("\tlatest\n\n")
sys.stderr.write("Commands:\n")
command_files = glob.glob("*.py")
command_files.remove("usage.py")
commands = [ rchop(x, ".py") for x in command_files ]
for command in commands:
sys.stderr.write("\t%s\n" % command)
if __name__ == "__main__":
main()
| 33.433962
| 128
| 0.73307
|
45647ba876b6668d6a6db51437e11e7709c845aa
| 13,139
|
py
|
Python
|
optuna/multi_objective/trial.py
|
majiang/optuna
|
aeeb201f0ab362c47a580dc2e4ed7346bd8501fe
|
[
"MIT"
] | null | null | null |
optuna/multi_objective/trial.py
|
majiang/optuna
|
aeeb201f0ab362c47a580dc2e4ed7346bd8501fe
|
[
"MIT"
] | null | null | null |
optuna/multi_objective/trial.py
|
majiang/optuna
|
aeeb201f0ab362c47a580dc2e4ed7346bd8501fe
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Sequence
from typing import Union
from optuna import multi_objective
from optuna._deprecated import deprecated
from optuna._study_direction import StudyDirection
from optuna.distributions import BaseDistribution
from optuna.trial import FrozenTrial
from optuna.trial import Trial
from optuna.trial import TrialState
CategoricalChoiceType = Union[None, bool, int, float, str]
@deprecated("2.4.0", "4.0.0")
class MultiObjectiveTrial(object):
"""A trial is a process of evaluating an objective function.
This object is passed to an objective function and provides interfaces to get parameter
suggestion, manage the trial's state, and set/get user-defined attributes of the trial.
Note that the direct use of this constructor is not recommended.
This object is seamlessly instantiated and passed to the objective function behind
the :func:`optuna.multi_objective.study.MultiObjectiveStudy.optimize()` method;
hence library users do not care about instantiation of this object.
Args:
trial:
A :class:`~optuna.trial.Trial` object.
"""
def __init__(self, trial: Trial):
self._trial = trial
# TODO(ohta): Optimize the code below to eliminate the `MultiObjectiveStudy` construction.
# See also: https://github.com/optuna/optuna/pull/1054/files#r407982636
self._n_objectives = multi_objective.study.MultiObjectiveStudy(trial.study).n_objectives
def suggest_float(
self,
name: str,
low: float,
high: float,
*,
step: Optional[float] = None,
log: bool = False,
) -> float:
"""Suggest a value for the floating point parameter.
Please refer to the documentation of :func:`optuna.trial.Trial.suggest_float`
for further details.
"""
return self._trial.suggest_float(name, low, high, step=step, log=log)
def suggest_uniform(self, name: str, low: float, high: float) -> float:
"""Suggest a value for the continuous parameter.
Please refer to the documentation of :func:`optuna.trial.Trial.suggest_uniform`
for further details.
"""
return self._trial.suggest_uniform(name, low, high)
def suggest_loguniform(self, name: str, low: float, high: float) -> float:
"""Suggest a value for the continuous parameter.
Please refer to the documentation of :func:`optuna.trial.Trial.suggest_loguniform`
for further details.
"""
return self._trial.suggest_loguniform(name, low, high)
def suggest_discrete_uniform(self, name: str, low: float, high: float, q: float) -> float:
"""Suggest a value for the discrete parameter.
Please refer to the documentation of :func:`optuna.trial.Trial.suggest_discrete_uniform`
for further details.
"""
return self._trial.suggest_discrete_uniform(name, low, high, q)
def suggest_int(self, name: str, low: int, high: int, step: int = 1, log: bool = False) -> int:
"""Suggest a value for the integer parameter.
Please refer to the documentation of :func:`optuna.trial.Trial.suggest_int`
for further details.
"""
return self._trial.suggest_int(name, low, high, step=step, log=log)
def suggest_categorical(
self, name: str, choices: Sequence[CategoricalChoiceType]
) -> CategoricalChoiceType:
"""Suggest a value for the categorical parameter.
Please refer to the documentation of :func:`optuna.trial.Trial.suggest_categorical`
for further details.
"""
return self._trial.suggest_categorical(name, choices)
def report(self, values: Sequence[float], step: int) -> None:
"""Report intermediate objective function values for a given step.
The reported values are used by the pruners to determine whether this trial should be
pruned.
.. seealso::
Please refer to :class:`~optuna.pruners.BasePruner`.
.. note::
The reported values are converted to ``float`` type by applying ``float()``
function internally. Thus, it accepts all float-like types (e.g., ``numpy.float32``).
If the conversion fails, a ``TypeError`` is raised.
Args:
values:
Intermediate objective function values for a given step.
step:
Step of the trial (e.g., Epoch of neural network training).
"""
# TODO(ohta): Allow users reporting a subset of target values.
# See https://github.com/optuna/optuna/pull/1054/files#r401594785 for the detail.
if len(values) != self._n_objectives:
raise ValueError(
"The number of the intermediate values {} at step {} is mismatched with"
"the number of the objectives {}.",
len(values),
step,
self._n_objectives,
)
for i, value in enumerate(values):
self._trial.report(value, self._n_objectives * (step + 1) + i)
def _report_complete_values(self, values: Sequence[float]) -> None:
if len(values) != self._n_objectives:
raise ValueError(
"The number of the values {} is mismatched with the number of the objectives {}.",
len(values),
self._n_objectives,
)
for i, value in enumerate(values):
self._trial.report(value, i)
def set_user_attr(self, key: str, value: Any) -> None:
"""Set user attributes to the trial.
Please refer to the documentation of :func:`optuna.trial.Trial.set_user_attr`
for further details.
"""
self._trial.set_user_attr(key, value)
def set_system_attr(self, key: str, value: Any) -> None:
"""Set system attributes to the trial.
Please refer to the documentation of :func:`optuna.trial.Trial.set_system_attr`
for further details.
"""
self._trial.set_system_attr(key, value)
@property
def number(self) -> int:
"""Return trial's number which is consecutive and unique in a study.
Returns:
A trial number.
"""
return self._trial.number
@property
def params(self) -> Dict[str, Any]:
"""Return parameters to be optimized.
Returns:
A dictionary containing all parameters.
"""
return self._trial.params
@property
def distributions(self) -> Dict[str, BaseDistribution]:
"""Return distributions of parameters to be optimized.
Returns:
A dictionary containing all distributions.
"""
return self._trial.distributions
@property
def user_attrs(self) -> Dict[str, Any]:
"""Return user attributes.
Returns:
A dictionary containing all user attributes.
"""
return self._trial.user_attrs
@property
def system_attrs(self) -> Dict[str, Any]:
"""Return system attributes.
Returns:
A dictionary containing all system attributes.
"""
return self._trial.system_attrs
@property
def datetime_start(self) -> Optional[datetime]:
"""Return start datetime.
Returns:
Datetime where the :class:`~optuna.trial.Trial` started.
"""
return self._trial.datetime_start
# TODO(ohta): Add `to_single_objective` method.
# This method would be helpful to use the existing pruning
# integrations for multi-objective optimization.
def _get_values(self) -> List[Optional[float]]:
trial = self._trial.study._storage.get_trial(self._trial._trial_id)
return [trial.intermediate_values.get(i) for i in range(self._n_objectives)]
@deprecated("2.4.0", "4.0.0")
class FrozenMultiObjectiveTrial(object):
"""Status and results of a :class:`~optuna.multi_objective.trial.MultiObjectiveTrial`.
Attributes:
number:
Unique and consecutive number of
:class:`~optuna.multi_objective.trial.MultiObjectiveTrial` for each
:class:`~optuna.multi_objective.study.MultiObjectiveStudy`.
Note that this field uses zero-based numbering.
state:
:class:`~optuna.trial.TrialState` of the
:class:`~optuna.multi_objective.trial.MultiObjectiveTrial`.
values:
Objective values of the :class:`~optuna.multi_objective.trial.MultiObjectiveTrial`.
datetime_start:
Datetime where the :class:`~optuna.multi_objective.trial.MultiObjectiveTrial` started.
datetime_complete:
Datetime where the :class:`~optuna.multi_objective.trial.MultiObjectiveTrial` finished.
params:
Dictionary that contains suggested parameters.
distributions:
Dictionary that contains the distributions of :attr:`params`.
user_attrs:
Dictionary that contains the attributes of the
:class:`~optuna.multi_objective.trial.MultiObjectiveTrial` set with
:func:`optuna.multi_objective.trial.MultiObjectiveTrial.set_user_attr`.
intermediate_values:
Intermediate objective values set with
:func:`optuna.multi_objective.trial.MultiObjectiveTrial.report`.
"""
def __init__(self, n_objectives: int, trial: FrozenTrial):
self.n_objectives = n_objectives
self._trial = trial
self.values = tuple(trial.intermediate_values.get(i) for i in range(n_objectives))
intermediate_values: Dict[int, List[Optional[float]]] = {}
for key, value in trial.intermediate_values.items():
if key < n_objectives:
continue
step = key // n_objectives - 1
if step not in intermediate_values:
intermediate_values[step] = list(None for _ in range(n_objectives))
intermediate_values[step][key % n_objectives] = value
self.intermediate_values = {k: tuple(v) for k, v in intermediate_values.items()}
@property
def number(self) -> int:
return self._trial.number
@property
def _trial_id(self) -> int:
return self._trial._trial_id
@property
def state(self) -> TrialState:
return self._trial.state
@property
def datetime_start(self) -> Optional[datetime]:
return self._trial.datetime_start
@property
def datetime_complete(self) -> Optional[datetime]:
return self._trial.datetime_complete
@property
def params(self) -> Dict[str, Any]:
return self._trial.params
@property
def user_attrs(self) -> Dict[str, Any]:
return self._trial.user_attrs
@property
def system_attrs(self) -> Dict[str, Any]:
return self._trial.system_attrs
@property
def last_step(self) -> Optional[int]:
if len(self.intermediate_values) == 0:
return None
else:
return max(self.intermediate_values.keys())
@property
def distributions(self) -> Dict[str, BaseDistribution]:
return self._trial.distributions
def _dominates(
self,
other: "multi_objective.trial.FrozenMultiObjectiveTrial",
directions: List[StudyDirection],
) -> bool:
if len(self.values) != len(other.values):
raise ValueError("Trials with different numbers of objectives cannot be compared.")
if len(self.values) != len(directions):
raise ValueError(
"The number of the values and the number of the objectives are mismatched."
)
if self.state != TrialState.COMPLETE:
return False
if other.state != TrialState.COMPLETE:
return True
values0 = [_normalize_value(v, d) for v, d in zip(self.values, directions)]
values1 = [_normalize_value(v, d) for v, d in zip(other.values, directions)]
if values0 == values1:
return False
return all([v0 <= v1 for v0, v1 in zip(values0, values1)])
def __eq__(self, other: Any) -> bool:
if not isinstance(other, FrozenMultiObjectiveTrial):
return NotImplemented
return self._trial == other._trial
def __lt__(self, other: Any) -> bool:
if not isinstance(other, FrozenMultiObjectiveTrial):
return NotImplemented
return self._trial < other._trial
def __le__(self, other: Any) -> bool:
if not isinstance(other, FrozenMultiObjectiveTrial):
return NotImplemented
return self._trial <= other._trial
def __hash__(self) -> int:
return hash(self._trial)
# TODO(ohta): Implement `__repr__` method.
def _normalize_value(value: Optional[float], direction: StudyDirection) -> float:
if value is None:
value = float("inf")
if direction is StudyDirection.MAXIMIZE:
value = -value
return value
| 33.347716
| 99
| 0.643885
|
cd43012772e2d2ca2aed819c9c52c439b40a78aa
| 4,595
|
py
|
Python
|
snapflow/testing/utils.py
|
icedevml/snapflow
|
329dae3f8eaa70d3a26d38a505faeb45d8eecb57
|
[
"BSD-3-Clause"
] | null | null | null |
snapflow/testing/utils.py
|
icedevml/snapflow
|
329dae3f8eaa70d3a26d38a505faeb45d8eecb57
|
[
"BSD-3-Clause"
] | null | null | null |
snapflow/testing/utils.py
|
icedevml/snapflow
|
329dae3f8eaa70d3a26d38a505faeb45d8eecb57
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import annotations
import tempfile
from contextlib import contextmanager
from dataclasses import dataclass
from typing import Any, Dict, Iterator, List, Optional
from commonmodel.base import Schema, SchemaLike
from dcp.data_format.handler import get_handler_for_name, infer_schema_for_name
from dcp.data_format.formats.memory.records import PythonRecordsHandler
from dcp.storage.base import Storage
from dcp.storage.database.utils import get_tmp_sqlite_db_url
from dcp.utils.common import rand_str
from dcp.utils.data import read_csv, read_json, read_raw_string_csv
from pandas import DataFrame
from snapflow import DataBlock, Environment, Graph, _Snap
from snapflow.core.module import SnapflowModule
from snapflow.core.node import DataBlockLog, Node, SnapLog
from sqlalchemy.orm import Session
from sqlalchemy.sql.expression import select
def display_snap_log(env: Environment):
for dbl in env.md_api.execute(
select(DataBlockLog).order_by(DataBlockLog.created_at)
):
print(f"{dbl.snap_log.snap_key:30} {dbl.data_block_id:4} {dbl.direction}")
def str_as_dataframe(
env: Environment,
test_data: str,
module: Optional[SnapflowModule] = None,
nominal_schema: Optional[Schema] = None,
) -> DataFrame:
# TODO: add conform_dataframe_to_schema option
if test_data.endswith(".csv"):
if module is None:
raise
with module.open_module_file(test_data) as f:
raw_records = list(read_csv(f.readlines()))
elif test_data.endswith(".json"):
if module is None:
raise
with module.open_module_file(test_data) as f:
raw_records = [read_json(line) for line in f]
else:
# Raw str csv
raw_records = list(read_raw_string_csv(test_data))
tmp = "_test_obj_" + rand_str()
env._local_python_storage.get_api().put(tmp, raw_records)
if nominal_schema is None:
auto_schema = infer_schema_for_name(tmp, env._local_python_storage)
nominal_schema = auto_schema
else:
PythonRecordsHandler().cast_to_schema(
tmp, env._local_python_storage, nominal_schema
)
df = DataFrame.from_records(raw_records)
return df
@dataclass
class DataInput:
data: str
schema: Optional[SchemaLike] = None
module: Optional[SnapflowModule] = None
def as_dataframe(self, env: Environment):
schema = None
if self.schema:
schema = env.get_schema(self.schema)
return str_as_dataframe(
env, self.data, module=self.module, nominal_schema=schema
)
def get_schema_key(self) -> Optional[str]:
if not self.schema:
return None
if isinstance(self.schema, str):
return self.schema
return self.schema.key
@contextmanager
def produce_snap_output_for_static_input(
snap: _Snap,
params: Dict[str, Any] = None,
input: Any = None,
inputs: Any = None,
env: Optional[Environment] = None,
module: Optional[SnapflowModule] = None,
target_storage: Optional[Storage] = None,
upstream: Any = None, # TODO: DEPRECATED
) -> Iterator[List[DataBlock]]:
inputs = input or inputs or upstream
if env is None:
db = get_tmp_sqlite_db_url()
env = Environment(metadata_storage=db)
if target_storage:
target_storage = env.add_storage(target_storage)
with env.md_api.begin():
g = Graph(env)
input_datas = inputs
input_nodes: Dict[str, Node] = {}
pi = snap.get_interface()
if not isinstance(inputs, dict):
assert len(pi.get_non_recursive_inputs()) == 1
input_datas = {pi.get_non_recursive_inputs()[0].name: inputs}
for inpt in pi.inputs:
if inpt.from_self:
continue
assert inpt.name is not None
input_data = input_datas[inpt.name]
if isinstance(input_data, str):
input_data = DataInput(data=input_data)
n = g.create_node(
key=f"_input_{inpt.name}",
snap="core.import_dataframe",
params={
"dataframe": input_data.as_dataframe(env),
"schema": input_data.get_schema_key(),
},
)
input_nodes[inpt.name] = n
test_node = g.create_node(
key=f"{snap.name}", snap=snap, params=params, inputs=input_nodes
)
blocks = env.produce(
test_node, to_exhaustion=False, target_storage=target_storage
)
yield blocks
| 34.548872
| 82
| 0.661371
|
ba088b116143638256294faf166bf093b5de23c4
| 8,582
|
py
|
Python
|
software_scripts/0-dnapi.py
|
19zhangt/sRNA_analysis_Maize
|
8a7688a30f02f0a86b05bd820a8c7d7d110b2767
|
[
"MIT"
] | 20
|
2016-10-18T13:33:57.000Z
|
2021-12-18T02:04:00.000Z
|
software_scripts/0-dnapi.py
|
19zhangt/sRNA_analysis_Maize
|
8a7688a30f02f0a86b05bd820a8c7d7d110b2767
|
[
"MIT"
] | 2
|
2016-10-19T21:06:39.000Z
|
2017-03-23T18:04:41.000Z
|
software_scripts/0-dnapi.py
|
19zhangt/sRNA_analysis_Maize
|
8a7688a30f02f0a86b05bd820a8c7d7d110b2767
|
[
"MIT"
] | 6
|
2016-11-10T04:23:14.000Z
|
2022-03-10T12:20:11.000Z
|
#! /usr/bin/env python
import sys
import os.path
import re
import uuid
import signal
import fileinput
import subprocess
from argparse import ArgumentParser
import dnapilib
from dnapilib.io_utils import get_file_obj
from dnapilib.apred import adapter_prediction
from dnapilib.apred import iterative_adapter_prediction
from dnapilib.exhaust import rm_temp_dir
from dnapilib.exhaust import fastq_input_prep
from dnapilib.exhaust import map_clean_reads
from dnapilib.exhaust import make_stats_report
TEMP_DIR = None
MAP_TO_GENOME = False
SAMPLE_NUM = 50000
def convert_interval(s_in, s_op, func):
"""Return range of kmers or filtering ratios.
"""
msg = "bad {}: {} {}"
try:
s = list(map(func, s_in.split(":")))
except:
raise Exception(msg.format("value", s_op, s_in))
if len(s) == 1:
return s
if len(s) == 3:
beg, end, interval = s
values = []
while beg < end:
values.append(beg)
beg += interval
values.append(end)
return values
else:
raise Exception(msg.format("interval", s_op, s_in))
def parse_args():
"""Return options and required arguments.
"""
parser = ArgumentParser(
usage="%(prog)s [options] FASTQ",
description="Predict or evaluate 3'adapter sequence(s)",
epilog="Report bug to: Junko Tsuji <jnktsj@gmail.com>")
parser.add_argument("FASTQ",
type=str,
help="including stdin or compressed file {zip,gz,tar,bz}")
parser.add_argument("--version", action="version",
version="%(prog)s {}".format(dnapilib.__version__))
predop = parser.add_argument_group("adapter prediction parameters")
predop.add_argument("-k",
metavar="[KMER_BEG:KMER_END:INCREMENT | KMER_LEN]",
default="9:11:2",
help="range of kmers or a single kmer to predict 3'adapters "
"(default: %(default)s)")
predop.add_argument("-r",
metavar="[RATIO_BEG:RATIO_END:INTCREMENT | RATIO]",
default="1.2:1.4:0.1",
help="range of ratios or a single ratio to filter less abundant kmers"
" (default: %(default)s)")
predop.add_argument("--show-all",
action="store_true",
help="show other candidates if any")
exhaop = parser.add_argument_group("exhaustive adapter search")
exhaop.add_argument("--map-command",
metavar="COMMAND",
default=None,
help="read mapping command to be tested")
exhaop.add_argument("--subsample-rate",
metavar="FLOAT",
default=1.0, type=float,
help="subsampling fraction of reads (default: %(default)s)")
exhaop.add_argument("--output-dir",
metavar="DIRECTORY",
default="./dnapi_out",
help="output directory to write report and cleansed reads"
" (default: ./dnapi_out)")
exhaop.add_argument("--no-output-files",
action="store_true",
help="only display report and suppress output files")
exhaop.add_argument("--temp-dir",
metavar="DIRECTORY",
default="/tmp",
help="place to make temporary directory (default: %(default)s)")
evalop = parser.add_argument_group("evaluation of candidate adapters")
evalop.add_argument("--adapter-seq",
dest="seq", nargs="+",
default=None,
help="list of 3'adapters for evaluation")
adrmop = parser.add_argument_group("adapter removal parameters")
adrmop.add_argument("--prefix-match",
metavar="LENGTH",
default=7, type=int,
help="3'adapter match length to trim (default: %(default)s)")
adrmop.add_argument("--min-len",
metavar="LENGTH",
default=16, type=int,
help="minimum read length to keep for mapping (default: %(default)s)")
adrmop.add_argument("--max-len",
metavar="LENGTH",
default=36, type=int,
help="maximum read length to keep for mapping (default: %(default)s)")
adrmop.add_argument("--trim-5p",
metavar="LENGTH",
default=0, type=int,
help="trim specified number of bases from 5'ends after adapter removal"
" (default: %(default)s)")
adrmop.add_argument("--trim-3p",
metavar="LENGTH",
default=0, type=int,
help="trim specified number of bases from 3'ends after adapter removal"
" (default: %(default)s)")
args = parser.parse_args()
if args.map_command:
err_find = "can't find {}"
soft = os.path.expanduser(args.map_command.split()[0])
if os.path.dirname(soft):
if not os.path.exists(soft):
raise Exception(err_find.format(soft))
else:
try:
subprocess.call("which {}".format(soft).split())
except OSError:
raise Exception(err_find.format(soft))
if not re.findall("@in", args.map_command):
raise Exception("can't locate input argument: @in")
if not re.findall("@out", args.map_command):
raise Exception("can't locate output argument: @out")
if args.prefix_match <= 0:
raise Exception("bad value: --prefix-match")
if args.min_len <= 0:
raise Exception("bad value: --min-len")
if args.max_len <= 0:
raise Exception("bad value: --max-len")
if args.trim_5p < 0:
raise Exception("bad value: --trim-5p")
if args.trim_3p < 0:
raise Exception("bad value: --trim-3p")
if args.subsample_rate <= 0 or 1 < args.subsample_rate:
raise Exception("bad subsampling rate")
global MAP_TO_GENOME
MAP_TO_GENOME = True
return args
def main():
args = parse_args()
fastq = args.FASTQ
Ks = convert_interval(args.k, "-k", int)
Rs = convert_interval(args.r, "-r", float)
if not MAP_TO_GENOME:
if len(Ks) > 1 or len(Rs) > 1:
adapts = iterative_adapter_prediction(fastq, Rs, Ks, SAMPLE_NUM)
else:
adapts = adapter_prediction(fastq, Rs[0], Ks[0], SAMPLE_NUM)
if args.show_all:
for x in adapts:
print("{}\tscore={:.2f}".format(*x))
else:
print(adapts[0][0])
else:
global TEMP_DIR
TEMP_DIR = "{}/DNApi_tmp_{}".format(
args.temp_dir, str(uuid.uuid4()))
subprocess.call(("mkdir {}".format(TEMP_DIR)).split())
original_fastq = fastq
fastq, total_read, sd = fastq_input_prep(
fastq, args.subsample_rate, TEMP_DIR)
if args.seq:
adapts = set(args.seq)
setstr = ["user-input" for i in range(len(adapts))]
else:
msg = "warning: predicted adapter is too short (<{0}): '{1}'\n" \
+ "warning: '{1}' will not be further investigated\n"
params = {}
for k in Ks:
for r in Rs:
aout = adapter_prediction(fastq, r, k, SAMPLE_NUM)[0][0]
if len(aout) < args.prefix_match:
sys.stderr.write(msg.format(l, s))
continue
aseq = aout[: args.prefix_match+5]
params.setdefault(aseq,[]).append("{}:{:.1f}".format(k,r))
adapts = list(params.keys())
setstr = [';'.join(s) for s in params.values()]
adapts.append("RAW_INPUT")
setstr.append("NO_TREATMENT")
if not adapts:
raise Exception("no valid adapters to further process")
table = []
for i, aseq in enumerate(adapts):
cnts = map_clean_reads(
fastq, aseq[:args.prefix_match], args.trim_5p,
args.trim_3p, args.min_len, args.max_len,
args.map_command, TEMP_DIR)
read_stats = [c / total_read * 100 for c in cnts]
table.append([aseq, cnts[0], read_stats[0],
cnts[1], read_stats[1], setstr[i]])
make_stats_report(
table, total_read, args.subsample_rate, args.prefix_match,
sd, original_fastq, args.output_dir, TEMP_DIR, args.no_output_files)
if __name__ == "__main__":
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
try:
main()
except KeyboardInterrupt:
rm_temp_dir(TEMP_DIR)
except Exception as e:
prog = os.path.basename(sys.argv[0])
rm_temp_dir(TEMP_DIR)
sys.exit("{}: error: {}".format(prog, str(e)))
finally:
rm_temp_dir(TEMP_DIR)
| 35.172131
| 80
| 0.589257
|
bdd5bab6a96a311f2d63df79346ac77da721f92a
| 3,132
|
py
|
Python
|
octant/python-gsw/gsw/utilities/utilities.py
|
kthyng/octant
|
65591d87797fa74e0c092d5f50fb0cd703eb412e
|
[
"BSD-3-Clause"
] | null | null | null |
octant/python-gsw/gsw/utilities/utilities.py
|
kthyng/octant
|
65591d87797fa74e0c092d5f50fb0cd703eb412e
|
[
"BSD-3-Clause"
] | null | null | null |
octant/python-gsw/gsw/utilities/utilities.py
|
kthyng/octant
|
65591d87797fa74e0c092d5f50fb0cd703eb412e
|
[
"BSD-3-Clause"
] | 1
|
2019-05-03T22:14:19.000Z
|
2019-05-03T22:14:19.000Z
|
# -*- coding: utf-8 -*-
from __future__ import division
import os
import numpy as np
__all__ = [
'match_args_return',
'Dict2Struc',
'Cache_npz',
'read_data',
'strip_mask',
]
class match_args_return(object):
r"""Function decorator to homogenize input arguments and to make the output
match the original input with respect to scalar versus array, and masked
versus ndarray.
"""
def __init__(self, func):
self.func = func
self.__wrapped__ = func
self.__doc__ = func.__doc__
self.__name__ = func.__name__
def __call__(self, *args, **kw):
p = kw.get('p', None)
if p is not None:
args = list(args)
args.append(p)
self.array = np.any([hasattr(a, '__iter__') for a in args])
self.masked = np.any([np.ma.isMaskedArray(a) for a in args])
newargs = [np.ma.atleast_1d(np.ma.masked_invalid(a)) for a in args]
newargs = [a.astype(np.float) for a in newargs]
if p is not None:
kw['p'] = newargs.pop()
ret = self.func(*newargs, **kw)
if not self.masked:
ret = np.ma.filled(ret, np.nan)
if not self.array:
ret = ret[0]
return ret
class Dict2Struc(object):
r"""Open variables from a dictionary in a "matlab-like-structure"."""
def __init__(self, adict):
for k in adict.files:
self.__dict__[k] = adict[k]
class Cache_npz(object):
def __init__(self):
self._cache = dict()
self._default_path = os.path.join(os.path.dirname(__file__), 'data')
def __call__(self, fname, datadir=None):
if datadir is None:
datadir = self._default_path
fpath = os.path.join(datadir, fname)
try:
return self._cache[fpath]
except KeyError:
pass
d = np.load(fpath)
ret = Dict2Struc(d)
self._cache[fpath] = ret
return ret
_npz_cache = Cache_npz()
def read_data(fname, datadir=None):
r"""Read variables from a numpy '.npz' file into a minimal class providing
attribute access. A cache is used to avoid re-reading the same file."""
return _npz_cache(fname, datadir=datadir)
def strip_mask(*args):
r"""Process the standard arguments for efficient calculation.
Return unmasked arguments, plus a mask.
The first argument, SA, is handled specially so that it can be
This could be absorbed into a decorator, but it would
require redefining functions to take the additional
mask argument or kwarg.
"""
mask = np.ma.getmaskarray(args[-1])
SA = args[0]
if SA.shape:
SA = np.ma.asarray(SA)
SA[SA < 0] = np.ma.masked
for a in args[:-1]:
mask = np.ma.mask_or(mask, np.ma.getmask(a))
newargs = [SA.filled(0)]
elif SA < 0:
SA = 0
for a in args[1:-1]:
mask = np.ma.mask_or(mask, np.ma.getmask(a))
newargs = [SA]
newargs.extend([np.ma.filled(a, 0) for a in args[1:]])
newargs.append(mask)
return newargs
| 28.733945
| 79
| 0.591635
|
8b04499cd4bb1b13e5a30b799e74bbac965a2197
| 2,738
|
py
|
Python
|
makeRsaKeys.py
|
HoshinoTouko/LearnCryptography
|
580dd67bd08ffc586b0ed32838180ea20613ab40
|
[
"BSD-3-Clause"
] | null | null | null |
makeRsaKeys.py
|
HoshinoTouko/LearnCryptography
|
580dd67bd08ffc586b0ed32838180ea20613ab40
|
[
"BSD-3-Clause"
] | null | null | null |
makeRsaKeys.py
|
HoshinoTouko/LearnCryptography
|
580dd67bd08ffc586b0ed32838180ea20613ab40
|
[
"BSD-3-Clause"
] | null | null | null |
# RSA Key Generator
# http://inventwithpython.com/hacking (BSD Licensed)
import random, sys, os, rabinMiller, cryptomath
def main():
# create a public/private keypair with 1024 bit keys
print('Making key files...')
makeKeyFiles('al_sweigart', 1024)
print('Key files made.')
def generateKey(keySize):
# Creates a public/private key pair with keys that are keySize bits in
# size. This function may take a while to run.
# Step 1: Create two prime numbers, p and q. Calculate n = p * q.
print('Generating p prime...')
p = rabinMiller.generateLargePrime(keySize)
print('Generating q prime...')
q = rabinMiller.generateLargePrime(keySize)
n = p * q
# Step 2: Create a number e that is relatively prime to (p-1)*(q-1).
print('Generating e that is relatively prime to (p-1)*(q-1)...')
while True:
# Keep trying random numbers for e until one is valid.
e = random.randrange(2 ** (keySize - 1), 2 ** (keySize))
if cryptomath.gcd(e, (p - 1) * (q - 1)) == 1:
break
# Step 3: Calculate d, the mod inverse of e.
print('Calculating d that is mod inverse of e...')
d = cryptomath.findModInverse(e, (p - 1) * (q - 1))
publicKey = (n, e)
privateKey = (n, d)
print('Public key:', publicKey)
print('Private key:', privateKey)
return (publicKey, privateKey)
def makeKeyFiles(name, keySize):
# Creates two files 'x_pubkey.txt' and 'x_privkey.txt' (where x is the
# value in name) with the the n,e and d,e integers written in them,
# delimited by a comma.
# Our safety check will prevent us from overwriting our old key files:
if os.path.exists('%s_pubkey.txt' % (name)) or os.path.exists('%s_privkey.txt' % (name)):
sys.exit('WARNING: The file %s_pubkey.txt or %s_privkey.txt already exists! Use a different name or delete these files and re-run this program.' % (name, name))
publicKey, privateKey = generateKey(keySize)
print()
print('The public key is a %s and a %s digit number.' % (len(str(publicKey[0])), len(str(publicKey[1]))))
print('Writing public key to file %s_pubkey.txt...' % (name))
fo = open('%s_pubkey.txt' % (name), 'w')
fo.write('%s,%s,%s' % (keySize, publicKey[0], publicKey[1]))
fo.close()
print()
print('The private key is a %s and a %s digit number.' % (len(str(publicKey[0])), len(str(publicKey[1]))))
print('Writing private key to file %s_privkey.txt...' % (name))
fo = open('%s_privkey.txt' % (name), 'w')
fo.write('%s,%s,%s' % (keySize, privateKey[0], privateKey[1]))
fo.close()
# If makeRsaKeys.py is run (instead of imported as a module) call
# the main() function.
if __name__ == '__main__':
main()
| 36.506667
| 168
| 0.637327
|
1b1eafbdd1a8bcc102342bf0262680dfa5cf1e56
| 5,156
|
py
|
Python
|
modules/PiDisplaySleep/click-listener/detect_sound.py
|
benwulmer/magicmirror
|
f61534893dc7101a563d8af82f500834e5bda88a
|
[
"MIT"
] | null | null | null |
modules/PiDisplaySleep/click-listener/detect_sound.py
|
benwulmer/magicmirror
|
f61534893dc7101a563d8af82f500834e5bda88a
|
[
"MIT"
] | null | null | null |
modules/PiDisplaySleep/click-listener/detect_sound.py
|
benwulmer/magicmirror
|
f61534893dc7101a563d8af82f500834e5bda88a
|
[
"MIT"
] | null | null | null |
from collections import deque
from datetime import datetime
import time
from pathlib import Path
from typing import Union
import os
from fastdtw import fastdtw
from scipy.spatial.distance import euclidean
import librosa
import numpy as np
import sounddevice as sd
from scipy.spatial import distance
from scipy.signal import fftconvolve
from scipy.io import wavfile
import json
import matplotlib.pyplot as plt
from os import popen,system
from time import sleep
wifi_connected = False
display_on = False
ip="192.168.86.26"
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
class AudioHandler:
def __init__(self) -> None:
self.DIST_THRESHOLD = 75
self.sr = 44100
self.sec = 2
self.smoothing = 10
self.num_samples = 2
self.samples = []
for i in range(self.num_samples):
with open('click' + str(i+1) + '.json', 'r') as file:
self.samples.append(np.array(json.load(file)))
self.sample_mfccs = []
self.historical_mfcc = []
for i in range(3):
with open("./mfcc_features/mfcc" + str(i + 1) + ".json", 'r') as file:
features = np.array(json.load(file))
self.sample_mfccs.append(features)
self.historical_mfcc.append([])
def get_rule_vote(self, sample):
sample = np.absolute(sample)
std = np.std(sample)
avg = np.mean(sample)
upper_threshold = avg + 10 * std
lower_threshold = avg + 7 * std
onPeak = False
onePeakTime = 0
onePeak = False
for i in range(int(len(sample) / self.smoothing)):
sample_subset = sample[i*self.smoothing:(i+1) * self.smoothing]
amp = np.mean(sample_subset)
if amp > upper_threshold and not onPeak:
onPeak = True
if not onePeak:
onePeak = True
elif onePeakTime > i * self.smoothing - self.sr / 2:
return True
elif amp < lower_threshold and onPeak:
if onePeak and onPeak:
onePeakTime = i * self.smoothing
onPeak = False
return False
def get_mfcc_vote(self, sample):
mfcc_features = self.compute_mean_mfcc(sample, self.sr)
votes = 0
for i in range(3):
d, path = fastdtw(self.sample_mfccs[i], mfcc_features, dist=euclidean)
self.historical_mfcc[i].append(d)
if len(self.historical_mfcc[i]) <= 3:
continue
avg = np.mean(self.historical_mfcc[i])
std = np.std(self.historical_mfcc[i])
if d < avg - std:
votes += 1
return votes > self.num_samples / 2.0
def get_correlation_vote(self, sample):
votes = 0
for i in range(self.num_samples):
correlation = self.correlation_similarity(sample, self.samples[i])
if correlation > 3:
votes += 1
return votes > self.num_samples / 2.0
def start_detection(self) -> None:
j = 0
timeout = 12 * 60 * 60 # [seconds]
timeout_start = time.time()
print("starting")
prev_sound = np.array([])
nmap = ""
while time.time() < timeout_start + timeout:
j += 1
sound_record = sd.rec(
int(self.sec * self.sr),
samplerate=self.sr,
channels=1,
blocking=True,
).flatten()
print("")
print("results")
print("mfcc", self.get_mfcc_vote(sound_record))
print("rule", self.get_rule_vote(sound_record))
print("correlation", self.get_correlation_vote(sound_record))
# with open("./mfcc_features/mfcc" + str(j) + ".json", 'w') as outfile:
# json.dump(features, outfile,cls = NumpyEncoder)
# print(j)
if j % 4 == 0:
nmap_out=str(popen('nmap -sP '+ip).read())
if j % 5 == 0:
j = 0
if nmap_out.find('latency') == -1 and wifi_connected:
wifi_connected = False
if display_on:
display_on = False
system('vcgencmd display_power 0')
elif nmap_out.find('latency') > 1 and not wifi_connected:
wifi_connected = True
if not display_on:
display_on = True
system('vcgencmd display_power 1') #Bash command to turn on the display
def correlation_similarity(self, sample, recording):
corr = fftconvolve(sample, recording)
return max(abs(corr))
def compute_mean_mfcc(self, audio, sr, dtype="float32"):
mfcc_features = librosa.feature.mfcc(audio, sr=sr, dtype=dtype, n_mfcc=20)
return np.mean(mfcc_features, axis=1)
if __name__ == '__main__':
AudioHandler().start_detection()
| 34.373333
| 95
| 0.561288
|
a401d37a3904e37f2bb54984a0e752e075c0f428
| 133
|
py
|
Python
|
allennlp_models/lm/util/beam_search_generators/__init__.py
|
shunk031/allennlp-models
|
d37c5fadeef9326808a84dda0bcfd210a078d6b1
|
[
"Apache-2.0"
] | 402
|
2020-03-11T22:58:35.000Z
|
2022-03-29T09:05:27.000Z
|
allennlp_models/lm/util/beam_search_generators/__init__.py
|
staceywhitmore-inl/allennlp-models
|
1e89d5e51cb45f3e77a48d4983bf980088334fac
|
[
"Apache-2.0"
] | 116
|
2020-03-11T01:26:57.000Z
|
2022-03-25T13:03:56.000Z
|
allennlp_models/lm/util/beam_search_generators/__init__.py
|
staceywhitmore-inl/allennlp-models
|
1e89d5e51cb45f3e77a48d4983bf980088334fac
|
[
"Apache-2.0"
] | 140
|
2020-03-11T00:51:35.000Z
|
2022-03-29T09:05:36.000Z
|
from .beam_search_generator import BeamSearchGenerator
from .transformer_beam_search_generator import TransformerBeamSearchGenerator
| 44.333333
| 77
| 0.924812
|
05b0b4e60a309cc022447c70ebc4175baf9aba64
| 1,004
|
py
|
Python
|
qiskit/util.py
|
Elliot-Coupe/qiskit-terra
|
8a604e156ba4c2fa099b1c24cd941f59b9408398
|
[
"Apache-2.0"
] | 1
|
2021-08-29T08:00:01.000Z
|
2021-08-29T08:00:01.000Z
|
qiskit/util.py
|
Elliot-Coupe/qiskit-terra
|
8a604e156ba4c2fa099b1c24cd941f59b9408398
|
[
"Apache-2.0"
] | 6
|
2021-07-19T21:31:40.000Z
|
2022-03-30T07:10:09.000Z
|
qiskit/util.py
|
Elliot-Coupe/qiskit-terra
|
8a604e156ba4c2fa099b1c24cd941f59b9408398
|
[
"Apache-2.0"
] | 1
|
2020-04-15T07:23:23.000Z
|
2020-04-15T07:23:23.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Common utilities for Qiskit."""
# Deprecated: for backwards compatibility to be removed in a future release
from qiskit.utils.deprecation import deprecate_arguments
from qiskit.utils.deprecation import deprecate_function
from qiskit.utils.multiprocessing import is_main_process
from qiskit.utils.multiprocessing import local_hardware_info
from qiskit.utils.units import apply_prefix
__all__ = [
"deprecate_arguments",
"deprecate_function",
"is_main_process",
"local_hardware_info",
"apply_prefix",
]
| 33.466667
| 77
| 0.77988
|
4cbfda5a81b400875aa935ee82eab189c6db3a06
| 2,108
|
py
|
Python
|
kv_tool.py
|
TempAccountNull/Xbox_360_Crypto
|
be40b69bf841badcec77ac01bd641d43bebd7fa3
|
[
"BSD-3-Clause"
] | null | null | null |
kv_tool.py
|
TempAccountNull/Xbox_360_Crypto
|
be40b69bf841badcec77ac01bd641d43bebd7fa3
|
[
"BSD-3-Clause"
] | null | null | null |
kv_tool.py
|
TempAccountNull/Xbox_360_Crypto
|
be40b69bf841badcec77ac01bd641d43bebd7fa3
|
[
"BSD-3-Clause"
] | 1
|
2021-06-29T01:29:56.000Z
|
2021-06-29T01:29:56.000Z
|
#!/usr/bin/env python3
from enum import IntEnum
from pathlib import Path
from struct import pack_into
from XeCrypt import XeCryptKeyVaultEncrypt
class ConsoleType(IntEnum):
RETAIL_PHAT = 0
RETAIL_SLIM = 1
TEST_KIT = 2
DEVKIT = 3
def main() -> None:
cpu_key = bytes.fromhex("A55F6604990DD4736DE6A0E09FC576F1")
dvd_key = bytes.fromhex("C7F720142AB22847757398FEB4AECDD1")
console_type = ConsoleType.DEVKIT
print("CPU key: " + cpu_key.hex().upper())
print("DVD key: " + dvd_key.hex().upper())
# create fuse buffer 12 lines by 8 bytes
fuse_data = bytearray(12 * 8)
pack_into("8s", fuse_data, 0, bytes.fromhex("C0FFFFFFFFFFFFFF")) # line #1
pack_into("1s", fuse_data, 0x38, b"\xF0") # line #8
# read the KV
kv_path = Path("KV/banned.bin")
kv_data = bytearray(kv_path.read_bytes())
# update the DVD key
pack_into("16s", kv_data, 0x100, dvd_key)
# encrypt the KV with the specified CPU key
kv_data = XeCryptKeyVaultEncrypt(cpu_key, kv_data)
# update console type
pack_into("6s", fuse_data, 8, bytes.fromhex("0F0F0F0F0F0F"))
if console_type == ConsoleType.TEST_KIT:
pack_into("2s", fuse_data, 0xE, bytes.fromhex("F00F"))
elif console_type == ConsoleType.DEVKIT:
pack_into("2s", fuse_data, 0xE, bytes.fromhex("0F0F"))
elif console_type == ConsoleType.RETAIL_PHAT:
pack_into("2s", fuse_data, 0xE, bytes.fromhex("0FF0"))
elif console_type == ConsoleType.RETAIL_SLIM:
pack_into("2s", fuse_data, 0xE, bytes.fromhex("F0F0"))
# update CPU key in fuses
pack_into("8s8s8s8s", fuse_data, 0x18, cpu_key[:8], cpu_key[:8], cpu_key[8:16], cpu_key[8:16])
# setup fuse path
fuse_path = Path("Output/Zero/fuses.bin")
# write fuses
fuse_path.write_bytes(fuse_data)
# setup KV path
kv_path = Path("Output/Zero/kv_enc.bin")
# write the KV
kv_path.write_bytes(kv_data)
# print fuse lines
print()
print("Fuses:")
for i in range(12):
print(fuse_data[i * 8:(i * 8) + 8].hex().upper())
# print output paths
print()
print(f"KV written to \"{str(kv_path.absolute())}\"!")
print(f"Fuses written to \"{str(fuse_path.absolute())}\"!")
if __name__ == "__main__":
main()
| 29.277778
| 95
| 0.709677
|
3514e0b84678b114a24f158ff22a57bfec39f0bb
| 436
|
py
|
Python
|
chunk/main.py
|
andrewgryan/jinja-playground
|
dff19beb4573f9d0c828084eb9afdbcd0c761fdd
|
[
"BSD-3-Clause"
] | null | null | null |
chunk/main.py
|
andrewgryan/jinja-playground
|
dff19beb4573f9d0c828084eb9afdbcd0c761fdd
|
[
"BSD-3-Clause"
] | null | null | null |
chunk/main.py
|
andrewgryan/jinja-playground
|
dff19beb4573f9d0c828084eb9afdbcd0c761fdd
|
[
"BSD-3-Clause"
] | null | null | null |
import jinja2
syntax = """
{% set CHUNKS=[] %}
{% for I in range(N) %}
{% do CHUNKS.append(ITEMS[I::N]) %}
{% endfor %}
{% for I in range(N) %}
[[task_{{ '%02d' % I }}]]
[[[environment]]]
HOURS={{ CHUNKS[I] | join(' ') }}
{% endfor %}
"""
environment = jinja2.Environment(extensions=['jinja2.ext.do'])
items = [1, 2, 3, 4, 5]
N = 2
print(environment.from_string(syntax).render(
N=N,
ITEMS=items))
| 19.818182
| 62
| 0.53211
|
db8dabdf0f1fa840bf2633f5620ab6db235d0d87
| 69
|
py
|
Python
|
coding/contains_duplicate/starter.py
|
alexanderywang/tech-interview-questions
|
a0098eda33db73d10787e198d1f105532ea32c09
|
[
"MIT"
] | null | null | null |
coding/contains_duplicate/starter.py
|
alexanderywang/tech-interview-questions
|
a0098eda33db73d10787e198d1f105532ea32c09
|
[
"MIT"
] | null | null | null |
coding/contains_duplicate/starter.py
|
alexanderywang/tech-interview-questions
|
a0098eda33db73d10787e198d1f105532ea32c09
|
[
"MIT"
] | null | null | null |
class Solution:
def hasDuplicate(self, nums: List[int]) -> bool:
| 23
| 52
| 0.681159
|
357a907b004d480559de30f5ea0a6e53cf43ffc9
| 1,243
|
py
|
Python
|
app/__init__.py
|
imdreamer2018/blogByFlask
|
87f374e40b45c95cb87402c3fe1bf4df226a297d
|
[
"MIT"
] | 2
|
2020-05-18T00:56:25.000Z
|
2020-05-18T00:56:27.000Z
|
app/__init__.py
|
imdreamer2018/SimpleBlog
|
87f374e40b45c95cb87402c3fe1bf4df226a297d
|
[
"MIT"
] | 4
|
2020-05-16T13:22:44.000Z
|
2020-05-16T13:22:47.000Z
|
app/__init__.py
|
imdreamer2018/SimpleBlog
|
87f374e40b45c95cb87402c3fe1bf4df226a297d
|
[
"MIT"
] | null | null | null |
import os
from flask import Flask,render_template
from flask_bootstrap import Bootstrap
from flask_mail import Mail
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from config import config
from flask_login import LoginManager
from flask_migrate import Migrate
from flask_pagedown import PageDown
login_manager = LoginManager()
login_manager.login_view = 'auth.login'
bootstrap = Bootstrap()
mail = Mail()
moment = Moment()
db = SQLAlchemy()
migrate = Migrate()
pagedown = PageDown()
def create_app():
config_name = os.getenv('FLASK_CONFIG') or 'default'
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
mail.init_app(app)
moment.init_app(app)
db.init_app(app)
migrate.init_app(app,db)
login_manager.init_app(app)
pagedown.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
from .api import api as api_blueprint
app.register_blueprint(api_blueprint,url_prefix='/api/v1')
return app
| 28.906977
| 63
| 0.741754
|
3a82e07b26ad5409b0c8fa34c903638fb7d12cfb
| 306
|
py
|
Python
|
lasio/exceptions.py
|
ae3e/lasio
|
8c539b7437c4ee852140df2d9e3be78cc14d38dc
|
[
"MIT"
] | 8
|
2020-01-27T19:29:37.000Z
|
2021-10-02T17:55:01.000Z
|
lasio/exceptions.py
|
markderry/lasio
|
006a2358b0123efdc11255803408f94ce8291c04
|
[
"MIT"
] | 7
|
2020-06-18T11:42:50.000Z
|
2020-09-27T13:22:51.000Z
|
lasio/exceptions.py
|
markderry/lasio
|
006a2358b0123efdc11255803408f94ce8291c04
|
[
"MIT"
] | 6
|
2020-05-16T14:03:27.000Z
|
2021-08-05T15:00:29.000Z
|
class LASDataError(Exception):
'''Error during reading of numerical data from LAS file.'''
pass
class LASHeaderError(Exception):
'''Error during reading of header data from LAS file.'''
pass
class LASUnknownUnitError(Exception):
'''Error of unknown unit in LAS file.'''
pass
| 18
| 63
| 0.689542
|
7cd092768bcdae3c458a454ce4dc748f85a0b32c
| 5,495
|
py
|
Python
|
homeassistant/generated/config_flows.py
|
tggm/core
|
8401c1ae533736877476996ec6bd385fb5e41164
|
[
"Apache-2.0"
] | 3
|
2021-11-05T13:24:19.000Z
|
2022-01-08T12:17:09.000Z
|
homeassistant/generated/config_flows.py
|
tggm/core
|
8401c1ae533736877476996ec6bd385fb5e41164
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/generated/config_flows.py
|
tggm/core
|
8401c1ae533736877476996ec6bd385fb5e41164
|
[
"Apache-2.0"
] | 2
|
2022-01-10T18:08:10.000Z
|
2022-01-21T19:42:34.000Z
|
"""Automatically generated by hassfest.
To update, run python3 -m script.hassfest
"""
# fmt: off
FLOWS = [
"abode",
"accuweather",
"acmeda",
"adax",
"adguard",
"advantage_air",
"aemet",
"agent_dvr",
"airly",
"airnow",
"airthings",
"airtouch4",
"airvisual",
"alarmdecoder",
"almond",
"ambee",
"amberelectric",
"ambiclimate",
"ambient_station",
"apple_tv",
"arcam_fmj",
"asuswrt",
"atag",
"august",
"aurora",
"awair",
"axis",
"azure_devops",
"blebox",
"blink",
"bmw_connected_drive",
"bond",
"bosch_shc",
"braviatv",
"broadlink",
"brother",
"bsblan",
"buienradar",
"canary",
"cast",
"cert_expiry",
"climacell",
"cloudflare",
"co2signal",
"coinbase",
"control4",
"coolmaster",
"coronavirus",
"crownstone",
"daikin",
"deconz",
"denonavr",
"devolo_home_control",
"dexcom",
"dialogflow",
"directv",
"dlna_dmr",
"doorbird",
"dsmr",
"dunehd",
"dynalite",
"eafm",
"ecobee",
"econet",
"efergy",
"elgato",
"elkm1",
"emonitor",
"emulated_roku",
"enocean",
"enphase_envoy",
"environment_canada",
"epson",
"esphome",
"ezviz",
"faa_delays",
"fireservicerota",
"fjaraskupan",
"flick_electric",
"flipr",
"flo",
"flume",
"flunearyou",
"flux_led",
"forecast_solar",
"forked_daapd",
"foscam",
"freebox",
"freedompro",
"fritz",
"fritzbox",
"fritzbox_callmonitor",
"garages_amsterdam",
"gdacs",
"geofency",
"geonetnz_quakes",
"geonetnz_volcano",
"gios",
"glances",
"goalzero",
"gogogate2",
"google_travel_time",
"gpslogger",
"gree",
"growatt_server",
"guardian",
"habitica",
"hangouts",
"harmony",
"heos",
"hisense_aehw4a1",
"hive",
"hlk_sw16",
"home_connect",
"home_plus_control",
"homekit",
"homekit_controller",
"homematicip_cloud",
"honeywell",
"huawei_lte",
"hue",
"huisbaasje",
"hunterdouglas_powerview",
"hvv_departures",
"hyperion",
"ialarm",
"iaqualink",
"icloud",
"ifttt",
"insteon",
"ios",
"iotawatt",
"ipma",
"ipp",
"iqvia",
"islamic_prayer_times",
"isy994",
"izone",
"juicenet",
"keenetic_ndms2",
"kmtronic",
"kodi",
"konnected",
"kostal_plenticore",
"kraken",
"kulersky",
"life360",
"lifx",
"litejet",
"litterrobot",
"local_ip",
"locative",
"logi_circle",
"lookin",
"luftdaten",
"lutron_caseta",
"lyric",
"mailgun",
"mazda",
"melcloud",
"met",
"met_eireann",
"meteo_france",
"meteoclimatic",
"metoffice",
"mikrotik",
"mill",
"minecraft_server",
"mobile_app",
"modem_callerid",
"modern_forms",
"monoprice",
"motion_blinds",
"motioneye",
"mqtt",
"mullvad",
"mutesync",
"myq",
"mysensors",
"nam",
"nanoleaf",
"neato",
"nest",
"netatmo",
"netgear",
"nexia",
"nfandroidtv",
"nightscout",
"nmap_tracker",
"notion",
"nuheat",
"nuki",
"nut",
"nws",
"nzbget",
"octoprint",
"omnilogic",
"ondilo_ico",
"onewire",
"onvif",
"opengarage",
"opentherm_gw",
"openuv",
"openweathermap",
"ovo_energy",
"owntracks",
"ozw",
"p1_monitor",
"panasonic_viera",
"philips_js",
"pi_hole",
"picnic",
"plaato",
"plex",
"plugwise",
"plum_lightpad",
"point",
"poolsense",
"powerwall",
"profiler",
"progettihwsw",
"prosegur",
"ps4",
"pvpc_hourly_pricing",
"rachio",
"rainforest_eagle",
"rainmachine",
"recollect_waste",
"renault",
"rfxtrx",
"ring",
"risco",
"rituals_perfume_genie",
"rointe",
"roku",
"roomba",
"roon",
"rpi_power",
"ruckus_unleashed",
"samsungtv",
"screenlogic",
"sense",
"sentry",
"sharkiq",
"shelly",
"shopping_list",
"sia",
"simplisafe",
"sma",
"smappee",
"smart_meter_texas",
"smarthab",
"smartthings",
"smarttub",
"smhi",
"sms",
"solaredge",
"solarlog",
"soma",
"somfy",
"somfy_mylink",
"sonarr",
"songpal",
"sonos",
"speedtestdotnet",
"spider",
"spotify",
"squeezebox",
"srp_energy",
"starline",
"stookalert",
"subaru",
"surepetcare",
"switchbot",
"switcher_kis",
"syncthing",
"syncthru",
"synology_dsm",
"system_bridge",
"tado",
"tasmota",
"tellduslive",
"tibber",
"tile",
"toon",
"totalconnect",
"tplink",
"traccar",
"tractive",
"tradfri",
"transmission",
"tuya",
"twentemilieu",
"twilio",
"twinkly",
"unifi",
"upb",
"upcloud",
"upnp",
"uptimerobot",
"velbus",
"vera",
"verisure",
"vesync",
"vilfo",
"vizio",
"vlc_telnet",
"volumio",
"wallbox",
"watttime",
"waze_travel_time",
"wemo",
"whirlpool",
"wiffi",
"wilight",
"withings",
"wled",
"wolflink",
"xbox",
"xiaomi_aqara",
"xiaomi_miio",
"yale_smart_alarm",
"yamaha_musiccast",
"yeelight",
"youless",
"zerproc",
"zha",
"zwave",
"zwave_js"
]
| 16.20944
| 41
| 0.51465
|
3c2d134679155e01b30f8afb4d3307bd30aedd2f
| 295
|
py
|
Python
|
src/spaceone/monitoring/info/webhook_info.py
|
xellos00/plugin-amorepacific-monitoring-webhook
|
d31287c67a2481e055d070a0f254b0a1f8ba75fa
|
[
"Apache-2.0"
] | 1
|
2022-02-11T02:01:44.000Z
|
2022-02-11T02:01:44.000Z
|
src/spaceone/monitoring/info/webhook_info.py
|
xellos00/plugin-amorepacific-monitoring-webhook
|
d31287c67a2481e055d070a0f254b0a1f8ba75fa
|
[
"Apache-2.0"
] | 2
|
2022-02-10T05:32:56.000Z
|
2022-03-17T12:17:20.000Z
|
src/spaceone/monitoring/info/webhook_info.py
|
xellos00/plugin-amorepacific-monitoring-webhook
|
d31287c67a2481e055d070a0f254b0a1f8ba75fa
|
[
"Apache-2.0"
] | 2
|
2021-08-18T01:38:48.000Z
|
2022-02-11T01:47:48.000Z
|
__all__ = ['WebhookPluginInfo']
from spaceone.api.monitoring.plugin import webhook_pb2, event_pb2
from spaceone.core.pygrpc.message_type import *
def WebhookPluginInfo(result):
result['metadata'] = change_struct_type(result['metadata'])
return webhook_pb2.WebhookPluginInfo(**result)
| 29.5
| 65
| 0.789831
|
eef0f306744ad69413024145579d8d5092d8ce88
| 303
|
py
|
Python
|
data/multilingual/Latn.BUM/Mono_12/pdf_to_json_test_Latn.BUM_Mono_12.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | 1
|
2021-09-19T19:47:35.000Z
|
2021-09-19T19:47:35.000Z
|
data/multilingual/Latn.BUM/Mono_12/pdf_to_json_test_Latn.BUM_Mono_12.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
data/multilingual/Latn.BUM/Mono_12/pdf_to_json_test_Latn.BUM_Mono_12.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.BUM/Mono_12/udhr_Latn.BUM_Mono_12.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 30.3
| 73
| 0.811881
|
59a81d0db1d9008b27b8156d01aefd938ea154aa
| 4,508
|
py
|
Python
|
Server/server.py
|
FredricM/ECE422
|
528bf8a3193394a3318de651ed0abc0bc2eab585
|
[
"MIT"
] | null | null | null |
Server/server.py
|
FredricM/ECE422
|
528bf8a3193394a3318de651ed0abc0bc2eab585
|
[
"MIT"
] | null | null | null |
Server/server.py
|
FredricM/ECE422
|
528bf8a3193394a3318de651ed0abc0bc2eab585
|
[
"MIT"
] | null | null | null |
import socket
import sys
import threading
import os
import getpass
import collections
from os.path import isfile, join
class mdict(dict):
def __setitem__(self, key, value):
"""add the given value to the list of values for this key"""
self.setdefault(key, []).append(value)
def createHome(name, username):
if not os.path.exists(username+"home"):
os.makedirs(username+"home")
print "created home directory for " + username
def addUser(name, sock, username, database):
if username in database:
sock.send( username +" already exists, please pick another name/")
else:
sock.send("input your password/")
password = sock.recv(256)
database[username] = password
createHome("name", username)
sock.send( username +" has been created/"+ directory)
print database
def createFile(name, sock, filename):
item = os.path.abspath(directory)
completeName = os.path.join(item, filename+".txt")
if os.path.isfile(completeName):
print(item)
sock.send("already exists/" + directory)
else:
new_file = open(completeName, "w")
new_file.close()
sock.send("file created/" + directory)
def createGroup(name, sock, groupName, groupbase):
if groupName in groupbase:
sock.send("this group already exists/")
else:
groupbase[groupName]="root"
sock.send("successfully created new group/")
print groupbase
def addToGroup(name, sock, group, groupbase, database ):
if group not in groupbase:
sock.send("group doesn't exist/")
else:
sock.send("enter member")
member = sock.recv(256)
if member not in database:
sock.send("user doesn't exist/")
elif member in groupbase[group]:
sock.send("user already in group/")
else:
groupbase[group] = (member)
sock.send("member added to the group/")
print groupbase[group]
return True
def login(name, sock, username, database):
global user
global directory
if username not in database:
sock.send("user doesn't exist/")
else:
sock.send("enter password: /")
password = sock.recv(256)
if database[username] == password:
user = username
directory = user+"home"
sock.send("welcome " + user + "/" + directory)
else:
sock.send("incorrect password/")
def logout(name, sock):
global user
global directory
sock.send("goodbye " + user + "/")
user = ""
directory = ""
database = {"root" : "ece492"}
groupbase = mdict()
user = ""
directory = ""
def server_program():
# Create a TCP/IP socket
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket = socket.socket()
# Bind the socket to the port
server_address = ('localhost', 10008)
print >>sys.stderr, 'starting up on %s port %s' % server_address
server_socket.bind(server_address)
# Listen for incoming connections
server_socket.listen(5)
conn, address = server_socket.accept() # accept new connection
print("Connection from: " + str(address))
while True:
# receive data stream. it won't accept data packet greater than 1024 bytes
data = conn.recv(1024).decode()
if not data:
# if data is not received break
break
elif data[:3] == "add" and len(user) == 0:
addUser("name", conn, data[4:], database)
elif data[:6] == "create" and len(user) != 0:
createFile("name", conn, data[7:])
elif data[:6] == "newgrp" and len(user) == 0:
createGroup("name", conn, data[7:] , groupbase)
elif data[:6] == "memadd" and len(user) == 0:
addToGroup("name", conn, data[7:], groupbase, database)
elif data[:5] == "login" and len(user) == 0:
login("name", conn, data[6:], database)
print(user + "+" + directory)
elif data[:6] == "logout" and len(user) != 0:
logout("name", conn)
else:
server_message = "not a command you can do/" + directory
conn.send(server_message.encode()) # send data to the client
conn.close() # close the connection
if __name__ == '__main__':
server_program()
| 26.994012
| 82
| 0.582076
|
ad9290f565451aba1978ddd76827e2f5a1b6d649
| 23,725
|
py
|
Python
|
datashader/mpl_ext.py
|
goncaloperes/datashader
|
1ae52b65ec8a79920e5db9c6c04487f254428553
|
[
"BSD-3-Clause"
] | 706
|
2019-11-15T07:50:54.000Z
|
2022-03-31T10:55:04.000Z
|
datashader/mpl_ext.py
|
goncaloperes/datashader
|
1ae52b65ec8a79920e5db9c6c04487f254428553
|
[
"BSD-3-Clause"
] | 223
|
2019-11-15T19:32:54.000Z
|
2022-03-31T20:46:21.000Z
|
datashader/mpl_ext.py
|
goncaloperes/datashader
|
1ae52b65ec8a79920e5db9c6c04487f254428553
|
[
"BSD-3-Clause"
] | 106
|
2019-12-05T12:32:54.000Z
|
2022-03-31T15:50:00.000Z
|
from collections import OrderedDict
import warnings
from matplotlib.image import _ImageBase
from matplotlib.patches import Patch
from matplotlib.transforms import Bbox, TransformedBbox, BboxTransform
import matplotlib as mpl
import numpy as np
from . import reductions
from . import transfer_functions as tf
from .colors import Sets1to3
from .core import bypixel, Canvas
__all__ = ["ScalarDSArtist", "CategoricalDSArtist", "alpha_colormap", "dsshow"]
def uint32_to_uint8(img):
"""Cast a uint32 raster to a 4-channel uint8 RGBA array."""
return img.view(dtype=np.uint8).reshape(img.shape + (4,))
def uint8_to_uint32(img):
"""Cast a 4-channel uint8 RGBA array to uint32 raster"""
return img.view(dtype=np.uint32).reshape(img.shape[:-1])
def to_ds_image(binned, rgba):
if binned.ndim == 2:
return tf.Image(uint8_to_uint32(rgba), coords=binned.coords, dims=binned.dims)
elif binned.ndim == 3:
return tf.Image(
uint8_to_uint32(rgba),
dims=binned.dims[:-1],
coords=OrderedDict(
[
(binned.dims[1], binned.coords[binned.dims[1]]),
(binned.dims[0], binned.coords[binned.dims[0]]),
]
),
)
else:
raise ValueError("Aggregate must be 2D or 3D.")
def compute_mask(binned):
# Use datashader's rules for masking aggregates
# mask == True --> invalid
if np.issubdtype(binned.dtype, np.bool_):
mask = ~binned
elif binned.dtype.kind == "u":
mask = binned == 0
else:
mask = np.isnan(binned)
return mask
def alpha_colormap(color, min_alpha=40, max_alpha=255, N=256):
"""
Generate a transparency-based monochromatic colormap.
Parameters
----------
color : str or tuple
Color name, hex code or RGB tuple.
min_alpha, max_alpha: int
Values between 0 - 255 representing the range of alpha values to use for
colormapped pixels that contain data.
Returns
-------
:class:`matplotlib.colors.LinearSegmentedColormap`
"""
for a in (min_alpha, max_alpha):
if a < 0 or a > 255:
raise ValueError("Alpha values must be integers between 0 and 255")
r, g, b = mpl.colors.to_rgb(color)
return mpl.colors.LinearSegmentedColormap(
"_datashader_alpha",
{
"red": [(0.0, r, r), (1.0, r, r)],
"green": [(0.0, g, g), (1.0, g, g)],
"blue": [(0.0, b, b), (1.0, b, b)],
"alpha": [
(0.0, min_alpha / 255, min_alpha / 255),
(1.0, max_alpha / 255, max_alpha / 255),
],
},
N=N,
)
class EqHistNormalize(mpl.colors.Normalize):
def __init__(self, vmin=None, vmax=None, clip=False, nbins=256 ** 2, ncolors=256):
super(EqHistNormalize, self).__init__(vmin, vmax, clip)
self._nbins = nbins
self._bin_edges = None
self._ncolors = ncolors
self._color_bins = np.linspace(0, 1, ncolors)
def _binning(self, data, n=256):
if np.ma.is_masked(data):
data = data[~data.mask]
low = data.min() if self.vmin is None else self.vmin
high = data.max() if self.vmax is None else self.vmax
nbins = self._nbins
eq_bin_edges = np.linspace(low, high, nbins + 1)
hist, _ = np.histogram(data, eq_bin_edges)
eq_bin_centers = np.convolve(eq_bin_edges, [0.5, 0.5], mode="valid")
cdf = np.cumsum(hist)
cdf_max = cdf[-1]
norm_cdf = cdf / cdf_max
# Iteratively find as many finite bins as there are colors
finite_bins = n - 1
binning = []
iterations = 0
guess = n * 2
while (finite_bins != n) and (iterations < 4) and (finite_bins != 0):
ratio = guess / finite_bins
if ratio > 1000:
# Abort if distribution is extremely skewed
break
guess = np.round(max(n * ratio, n))
# Interpolate
palette_edges = np.arange(0, guess)
palette_cdf = norm_cdf * (guess - 1)
binning = np.interp(palette_edges, palette_cdf, eq_bin_centers)
# Evaluate binning
uniq_bins = np.unique(binning)
finite_bins = len(uniq_bins) - 1
iterations += 1
if finite_bins == 0:
binning = [low] + [high] * (n - 1)
else:
binning = binning[-n:]
if finite_bins != n:
warnings.warn(
"EqHistColorMapper warning: Histogram equalization did not converge."
)
return binning
def __call__(self, data, clip=None):
mask = np.ma.getmask(data)
result = self.process_value(data)[0]
# Preserve the mask after normalization if there is one
return np.ma.masked_array(result, mask)
def process_value(self, data):
if self._bin_edges is None:
raise ValueError("Not usable until eq_hist has been computed")
isscalar = np.isscalar(data)
data = np.array([data]) if isscalar else data
interped = np.interp(data, self._bin_edges, self._color_bins)
return interped, isscalar
def inverse(self, value):
if self._bin_edges is None:
raise ValueError("Not invertible until eq_hist has been computed")
return np.interp([value], self._color_bins, self._bin_edges)[0]
def autoscale(self, A):
super(EqHistNormalize, self).autoscale(A)
self._bin_edges = self._binning(A, self._ncolors)
def autoscale_None(self, A):
super(EqHistNormalize, self).autoscale_None(A)
self._bin_edges = self._binning(A, self._ncolors)
def scaled(self):
return super(EqHistNormalize, self).scaled() and self._bin_edges is not None
class DSArtist(_ImageBase):
def __init__(
self,
ax,
df,
glyph,
aggregator,
agg_hook,
shade_hook,
plot_width,
plot_height,
x_range,
y_range,
width_scale,
height_scale,
origin="lower",
interpolation="none",
**kwargs
):
super().__init__(ax, origin=origin, interpolation=interpolation, **kwargs)
self.axes = ax
self.df = df
self.glyph = glyph
self.aggregator = aggregator
self.agg_hook = agg_hook
self.shade_hook = shade_hook
self.plot_width = plot_width
self.plot_height = plot_height
self.width_scale = width_scale
self.height_scale = height_scale
if x_range is None:
x_col = glyph.x_label
x_range = (df[x_col].min(), df[x_col].max())
if y_range is None:
y_col = glyph.y_label
y_range = (df[y_col].min(), df[y_col].max())
ax.set_xlim(x_range)
ax.set_ylim(y_range)
def aggregate(self, x_range, y_range):
"""Aggregate data in given range to the window dimensions."""
dims = self.axes.patch.get_window_extent().bounds
if self.plot_width is None:
plot_width = int(int(dims[2] + 0.5) * self.width_scale)
else:
plot_width = self.plot_width
if self.plot_height is None:
plot_height = int(int(dims[3] + 0.5) * self.height_scale)
else:
plot_height = self.plot_height
canvas = Canvas(
plot_width=plot_width,
plot_height=plot_height,
x_range=x_range,
y_range=y_range,
)
binned = bypixel(self.df, canvas, self.glyph, self.aggregator)
return binned
def shade(self, binned):
"""Convert an aggregate into an RGBA array."""
raise NotImplementedError
def make_image(self, renderer, magnification=1.0, unsampled=True):
"""
Normalize, rescale, and colormap this image's data for rendering using
*renderer*, with the given *magnification*.
If *unsampled* is True, the image will not be scaled, but an
appropriate affine transformation will be returned instead.
Returns
-------
image : (M, N, 4) uint8 array
The RGBA image, resampled unless *unsampled* is True.
x, y : float
The upper left corner where the image should be drawn, in pixel
space.
trans : Affine2D
The affine transformation from image to pixel space.
"""
x1, x2, y1, y2 = self.get_extent()
bbox = Bbox(np.array([[x1, y1], [x2, y2]]))
trans = self.get_transform()
transformed_bbox = TransformedBbox(bbox, trans)
if (
self.plot_width is not None
or self.plot_height is not None
or self.width_scale != 1.0
or self.height_scale != 1.0
):
unsampled = False
# Aggregate
binned = self.aggregate([x1, x2], [y1, y2])
if self.agg_hook is not None:
binned = self.agg_hook(binned)
self.set_ds_data(binned)
# Normalize and color to make an RGBA array
rgba = self.shade(binned)
if self.shade_hook is not None:
img = to_ds_image(binned, rgba)
img = self.shade_hook(img)
rgba = uint32_to_uint8(img.data)
self.set_array(rgba)
return self._make_image(
rgba,
bbox,
transformed_bbox,
self.axes.bbox,
magnification=magnification,
unsampled=unsampled,
)
def set_ds_data(self, binned):
"""
Set the aggregate data for the bounding box currently displayed.
Should be a :class:`xarray.DataArray`.
"""
self._ds_data = binned
def get_ds_data(self):
"""
Return the aggregated, pre-shaded :class:`xarray.DataArray` backing the
bounding box currently displayed.
"""
return self._ds_data
def get_extent(self):
"""Return the image extent as tuple (left, right, bottom, top)"""
(x1, x2), (y1, y2) = self.axes.get_xlim(), self.axes.get_ylim()
return x1, x2, y1, y2
def get_cursor_data(self, event):
"""
Return the aggregated data at the event position or *None* if the
event is outside the bounds of the current view.
"""
xmin, xmax, ymin, ymax = self.get_extent()
if self.origin == "upper":
ymin, ymax = ymax, ymin
arr = self.get_ds_data().data
data_extent = Bbox([[ymin, xmin], [ymax, xmax]])
array_extent = Bbox([[0, 0], arr.shape[:2]])
trans = BboxTransform(boxin=data_extent, boxout=array_extent)
y, x = event.ydata, event.xdata
i, j = trans.transform_point([y, x]).astype(int)
# Clip the coordinates at array bounds
if not (0 <= i < arr.shape[0]) or not (0 <= j < arr.shape[1]):
return None
else:
return arr[i, j]
class ScalarDSArtist(DSArtist):
def __init__(
self,
ax,
df,
glyph,
aggregator,
agg_hook=None,
shade_hook=None,
plot_width=None,
plot_height=None,
x_range=None,
y_range=None,
width_scale=1.0,
height_scale=1.0,
norm=None,
cmap=None,
alpha=None,
**kwargs
):
super().__init__(
ax,
df,
glyph,
aggregator,
agg_hook,
shade_hook,
plot_width,
plot_height,
x_range,
y_range,
width_scale,
height_scale,
**kwargs
)
self._vmin = norm.vmin
self._vmax = norm.vmax
self.set_norm(norm)
self.set_cmap(cmap)
self.set_alpha(alpha)
# Aggregate the current view
binned = self.aggregate(self.axes.get_xlim(), self.axes.get_ylim())
if self.agg_hook is not None:
binned = self.agg_hook(binned)
self.set_ds_data(binned)
# Placeholder until self.make_image
self.set_array(np.eye(2))
def shade(self, binned):
# Mask missing data in the greyscale array
mask = compute_mask(binned.data)
A = np.ma.masked_array(binned.data, mask)
# Rescale the norm to the current array
self.set_array(A)
self.norm.vmin = self._vmin
self.norm.vmax = self._vmax
self.autoscale_None()
# Make the image with matplotlib
return self.to_rgba(A, bytes=True, norm=True)
def get_ds_image(self):
binned = self.get_ds_data()
rgba = self.to_rgba(self.get_array(), bytes=True, norm=True)
return to_ds_image(binned, rgba)
def get_legend_elements(self):
return None
class CategoricalDSArtist(DSArtist):
def __init__(
self,
ax,
df,
glyph,
aggregator,
agg_hook=None,
shade_hook=None,
plot_width=None,
plot_height=None,
x_range=None,
y_range=None,
width_scale=1.0,
height_scale=1.0,
color_key=None,
alpha_range=(40, 255),
color_baseline=None,
**kwargs
):
super().__init__(
ax,
df,
glyph,
aggregator,
agg_hook,
shade_hook,
plot_width,
plot_height,
x_range,
y_range,
width_scale,
height_scale,
**kwargs
)
self._color_key = color_key
self._alpha_range = alpha_range
self._color_baseline = color_baseline
# Aggregate the current view
binned = self.aggregate(self.axes.get_xlim(), self.axes.get_ylim())
if self.agg_hook is not None:
binned = self.agg_hook(binned)
self.set_ds_data(binned)
# Placeholder until self.make_image
self.set_array(np.eye(2))
def shade(self, binned):
# Make the blended image with datashader
img = tf.shade(
binned,
color_key=self._color_key,
min_alpha=self._alpha_range[0],
alpha=self._alpha_range[1],
color_baseline=self._color_baseline,
)
rgba = uint32_to_uint8(img.data)
return rgba
def get_ds_image(self):
binned = self.get_ds_data()
rgba = self.get_array()
return to_ds_image(binned, rgba)
def get_legend_elements(self):
"""
Return legend elements to display the color code for each category.
"""
if not isinstance(self._color_key, dict):
binned = self.get_ds_data()
categories = binned.coords[binned.dims[2]].data
color_dict = dict(zip(categories, self._color_key))
else:
color_dict = self._color_key
return [
Patch(facecolor=color, edgecolor="none", label=category)
for category, color in color_dict.items()
]
def dsshow(
df,
glyph,
aggregator=reductions.count(),
agg_hook=None,
shade_hook=None,
plot_width=None,
plot_height=None,
x_range=None,
y_range=None,
width_scale=1.0,
height_scale=1.0,
*,
norm=None,
cmap=None,
alpha=None,
vmin=None,
vmax=None,
color_key=Sets1to3,
alpha_range=(40, 255),
color_baseline=None,
ax=None,
fignum=None,
aspect=None,
**kwargs
):
"""
Display the output of a data shading pipeline applied to a dataframe.
The plot will respond to changes in the data space bounds displayed, such
as pan/zoom events. Both scalar and categorical pipelines are supported.
Parameters
----------
df : pandas.DataFrame, dask.DataFrame
Dataframe to apply the datashading pipeline to.
glyph : Glyph
The glyph to bin by.
aggregator : Reduction, optional, default: :class:`~.count`
The reduction to compute per-pixel.
agg_hook : callable, optional
A callable that takes the computed aggregate as an argument, and
returns another aggregate. This can be used to do preprocessing before
the aggregate is converted to an image.
shade_hook : callable, optional
A callable that takes the image output of the shading pipeline, and
returns another :class:`~.Image` object. See :func:`~.dynspread` and
:func:`~.spread` for examples.
plot_width, plot_height : int, optional
Grid dimensions, i.e. the width and height of the output aggregates in
pixels. Default is to use the native width and height dimensions of
the axes bounding box.
x_range, y_range : pair of float, optional
A tuple representing the initial bounds inclusive space ``[min, max]``
along the axis. If None, the initial bounds will encompass all of the
data along the axis.
height_scale : float, optional
Factor by which to scale the height of the image in pixels relative to
the height of the display space in pixels.
width_scale : float, optional
Factor by which to scale the width of the image in pixels relative to
the width of the display space in pixels.
norm : str or :class:`matplotlib.colors.Normalize`, optional
For scalar aggregates, a matplotlib norm to normalize the
aggregate data to [0, 1] before colormapping. The datashader arguments
'linear', 'log', 'cbrt' and 'eq_hist' are also supported and correspond
to equivalent matplotlib norms. Default is the linear norm.
cmap : str or list or :class:`matplotlib.cm.Colormap`, optional
For scalar aggregates, a matplotlib colormap name or instance.
Alternatively, an iterable of colors can be passed and will be converted
to a colormap. For a single-color, transparency-based colormap, see
:func:`alpha_colormap`.
alpha : float
For scalar aggregates, the alpha blending value, between 0
(transparent) and 1 (opaque).
vmin, vmax : float, optional
For scalar aggregates, the data range that the colormap covers.
If vmin or vmax is None (default), the colormap autoscales to the
range of data in the area displayed, unless the corresponding value is
already set in the norm.
color_key : dict or iterable, optional
For categorical aggregates, the colors to use for blending categories.
See `tf.shade`.
alpha_range : pair of int, optional
For categorical aggregates, the minimum and maximum alpha values in
[0, 255] to use to indicate data values of non-empty pixels. The
default range is (40, 255).
color_baseline : float, optional
For categorical aggregates, the baseline for calculating how
categorical data mixes to determine the color of a pixel. See
`tf.shade` for more information.
Other Parameters
----------------
ax : `matplotlib.Axes`, optional
Axes to draw into. If *None*, create a new figure or use ``fignum`` to
draw into an existing figure.
fignum : None or int or False, optional
If *None* and ``ax`` is *None*, create a new figure window with
automatic numbering.
If a nonzero integer and ``ax`` is *None*, draw into the figure with
the given number (create it if it does not exist).
If 0, use the current axes (or create one if it does not exist).
aspect : {'equal', 'auto'} or float, default: ``rcParams["image.aspect"]``
The aspect ratio of the axes.
**kwargs
All other kwargs are passed to the artist.
Returns
-------
:class:`ScalarDSArtist` or :class:`CategoricalDSArtist`
Notes
-----
If the aggregation is scalar/single-category (i.e. generates a 2D scalar
mappable), the artist can be used to make a colorbar. See example.
If the aggregation is multi-category (i.e. generates a 3D array with
two or more components that get composited to form an image), you can use
the :meth:`CategoricalDSArtist.get_legend_elements` method to obtain patch
handles that can be used to make a legend. See example.
Examples
--------
Generate two Gaussian point clouds and (1) plot the density as a
quantitative map and (2) color the points by category.
.. plot::
:context: close-figs
>>> import pandas as pd
>>> import datashader as ds
>>> import matplotlib.pyplot as plt
>>> from datashader.mpl_ext import dsshow
>>> n = 10000
>>> df = pd.DataFrame({
... 'x': np.r_[np.random.randn(n) - 1, np.random.randn(n) + 1],
... 'y': np.r_[np.random.randn(n), np.random.randn(n)],
... 'c': pd.Categorical(np.r_[['cloud 1'] * n, ['cloud 2'] * n])
... })
>>> da1 = dsshow(
... df,
... ds.Point('x', 'y'),
... aspect='equal'
... )
>>> plt.colorbar(da1); # doctest: +SKIP
>>> da2 = dsshow(
... df,
... ds.Point('x', 'y'),
... ds.count_cat('c'),
... aspect='equal'
... )
>>> plt.legend(handles=da2.get_legend_elements()); # doctest: +SKIP
"""
import matplotlib.pyplot as plt
if fignum == 0:
ax = plt.gca()
elif ax is None:
# Make appropriately sized figure.
fig = plt.figure(fignum)
ax = fig.add_axes([0.15, 0.09, 0.775, 0.775])
if isinstance(aggregator, reductions.by):
artist = CategoricalDSArtist(
ax,
df,
glyph,
aggregator,
agg_hook,
shade_hook,
plot_width=plot_width,
plot_height=plot_height,
x_range=x_range,
y_range=y_range,
width_scale=width_scale,
height_scale=height_scale,
color_key=color_key,
alpha_range=alpha_range,
color_baseline=color_baseline,
**kwargs
)
else:
if cmap is not None:
if isinstance(cmap, list):
cmap = mpl.colors.LinearSegmentedColormap.from_list("_datashader", cmap)
if norm is None:
norm = mpl.colors.Normalize()
elif isinstance(norm, str):
if norm == "linear":
norm = mpl.colors.Normalize()
elif norm == "log":
norm = mpl.colors.LogNorm()
elif norm == "cbrt":
norm = mpl.colors.PowerNorm(1 / 3)
elif norm == "eq_hist":
norm = EqHistNormalize()
if not isinstance(norm, mpl.colors.Normalize):
raise ValueError(
"`norm` must be one of 'linear', 'log', 'cbrt', 'eq_hist', "
"or a matplotlib norm instance."
)
if vmin is not None:
norm.vmin = vmin
if vmax is not None:
norm.vmax = vmax
artist = ScalarDSArtist(
ax,
df,
glyph,
aggregator,
agg_hook,
shade_hook,
plot_width=plot_width,
plot_height=plot_height,
x_range=x_range,
y_range=y_range,
width_scale=width_scale,
height_scale=height_scale,
norm=norm,
cmap=cmap,
alpha=alpha,
**kwargs
)
ax.add_artist(artist)
if aspect is None:
aspect = plt.rcParams["image.aspect"]
ax.set_aspect(aspect)
return artist
| 32.278912
| 89
| 0.582044
|
6132220a9657b8015cde10327f13569fa8dc7d52
| 10,397
|
py
|
Python
|
plugins/modules/oci_database_data_guard_association_facts.py
|
slmjy/oci-ansible-collection
|
349c91e2868bf4706a6e3d6fb3b47fc622bfe11b
|
[
"Apache-2.0"
] | 108
|
2020-05-19T20:46:10.000Z
|
2022-03-25T14:10:01.000Z
|
plugins/modules/oci_database_data_guard_association_facts.py
|
slmjy/oci-ansible-collection
|
349c91e2868bf4706a6e3d6fb3b47fc622bfe11b
|
[
"Apache-2.0"
] | 90
|
2020-06-14T22:07:11.000Z
|
2022-03-07T05:40:29.000Z
|
plugins/modules/oci_database_data_guard_association_facts.py
|
slmjy/oci-ansible-collection
|
349c91e2868bf4706a6e3d6fb3b47fc622bfe11b
|
[
"Apache-2.0"
] | 42
|
2020-08-30T23:09:12.000Z
|
2022-03-25T16:58:01.000Z
|
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_database_data_guard_association_facts
short_description: Fetches details about one or multiple DataGuardAssociation resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple DataGuardAssociation resources in Oracle Cloud Infrastructure
- Lists all Data Guard associations for the specified database.
- If I(data_guard_association_id) is specified, the details of a single DataGuardAssociation will be returned.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
database_id:
description:
- The database L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm).
type: str
required: true
data_guard_association_id:
description:
- The Data Guard association's L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm).
- Required to get a specific data_guard_association.
type: str
aliases: ["id"]
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific data_guard_association
oci_database_data_guard_association_facts:
# required
database_id: "ocid1.database.oc1..xxxxxxEXAMPLExxxxxx"
data_guard_association_id: "ocid1.dataguardassociation.oc1..xxxxxxEXAMPLExxxxxx"
- name: List data_guard_associations
oci_database_data_guard_association_facts:
# required
database_id: "ocid1.database.oc1..xxxxxxEXAMPLExxxxxx"
"""
RETURN = """
data_guard_associations:
description:
- List of DataGuardAssociation resources
returned: on success
type: complex
contains:
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the Data Guard association.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
database_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the reporting database.
returned: on success
type: str
sample: "ocid1.database.oc1..xxxxxxEXAMPLExxxxxx"
role:
description:
- The role of the reporting database in this Data Guard association.
returned: on success
type: str
sample: PRIMARY
lifecycle_state:
description:
- The current state of the Data Guard association.
returned: on success
type: str
sample: PROVISIONING
lifecycle_details:
description:
- Additional information about the current lifecycleState, if available.
returned: on success
type: str
sample: lifecycle_details_example
peer_db_system_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the DB system containing the associated
peer database.
returned: on success
type: str
sample: "ocid1.peerdbsystem.oc1..xxxxxxEXAMPLExxxxxx"
peer_db_home_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the Database Home containing the associated peer
database.
returned: on success
type: str
sample: "ocid1.peerdbhome.oc1..xxxxxxEXAMPLExxxxxx"
peer_database_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the associated peer database.
returned: on success
type: str
sample: "ocid1.peerdatabase.oc1..xxxxxxEXAMPLExxxxxx"
peer_data_guard_association_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the peer database's Data Guard association.
returned: on success
type: str
sample: "ocid1.peerdataguardassociation.oc1..xxxxxxEXAMPLExxxxxx"
peer_role:
description:
- The role of the peer database in this Data Guard association.
returned: on success
type: str
sample: PRIMARY
apply_lag:
description:
- The lag time between updates to the primary database and application of the redo data on the standby database,
as computed by the reporting database.
- "Example: `9 seconds`"
returned: on success
type: str
sample: apply_lag_example
apply_rate:
description:
- The rate at which redo logs are synced between the associated databases.
- "Example: `180 Mb per second`"
returned: on success
type: str
sample: apply_rate_example
protection_mode:
description:
- The protection mode of this Data Guard association. For more information, see
L(Oracle Data Guard Protection Modes,http://docs.oracle.com/database/122/SBYDB/oracle-data-guard-protection-modes.htm#SBYDB02000)
in the Oracle Data Guard documentation.
returned: on success
type: str
sample: MAXIMUM_AVAILABILITY
transport_type:
description:
- The redo transport type used by this Data Guard association. For more information, see
L(Redo Transport Services,http://docs.oracle.com/database/122/SBYDB/oracle-data-guard-redo-transport-services.htm#SBYDB00400)
in the Oracle Data Guard documentation.
returned: on success
type: str
sample: SYNC
time_created:
description:
- The date and time the Data Guard association was created.
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
sample: [{
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"database_id": "ocid1.database.oc1..xxxxxxEXAMPLExxxxxx",
"role": "PRIMARY",
"lifecycle_state": "PROVISIONING",
"lifecycle_details": "lifecycle_details_example",
"peer_db_system_id": "ocid1.peerdbsystem.oc1..xxxxxxEXAMPLExxxxxx",
"peer_db_home_id": "ocid1.peerdbhome.oc1..xxxxxxEXAMPLExxxxxx",
"peer_database_id": "ocid1.peerdatabase.oc1..xxxxxxEXAMPLExxxxxx",
"peer_data_guard_association_id": "ocid1.peerdataguardassociation.oc1..xxxxxxEXAMPLExxxxxx",
"peer_role": "PRIMARY",
"apply_lag": "apply_lag_example",
"apply_rate": "apply_rate_example",
"protection_mode": "MAXIMUM_AVAILABILITY",
"transport_type": "SYNC",
"time_created": "2013-10-20T19:20:30+01:00"
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.database import DatabaseClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class DataGuardAssociationFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get, list"""
def get_required_params_for_get(self):
return [
"database_id",
"data_guard_association_id",
]
def get_required_params_for_list(self):
return [
"database_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_data_guard_association,
database_id=self.module.params.get("database_id"),
data_guard_association_id=self.module.params.get(
"data_guard_association_id"
),
)
def list_resources(self):
optional_list_method_params = []
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_data_guard_associations,
database_id=self.module.params.get("database_id"),
**optional_kwargs
)
DataGuardAssociationFactsHelperCustom = get_custom_class(
"DataGuardAssociationFactsHelperCustom"
)
class ResourceFactsHelper(
DataGuardAssociationFactsHelperCustom, DataGuardAssociationFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
database_id=dict(type="str", required=True),
data_guard_association_id=dict(aliases=["id"], type="str"),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="data_guard_association",
service_client_class=DatabaseClient,
namespace="database",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(data_guard_associations=result)
if __name__ == "__main__":
main()
| 36.868794
| 152
| 0.652977
|
8c68f97661b9492e385d0dfda2523ddabb3c5383
| 1,827
|
py
|
Python
|
cbmc_viewer/runt.py
|
natasha-jeppu/aws-viewer-for-cbmc
|
456883a2db2cb043dfe0d859ed005ae50cb42a8d
|
[
"Apache-2.0"
] | null | null | null |
cbmc_viewer/runt.py
|
natasha-jeppu/aws-viewer-for-cbmc
|
456883a2db2cb043dfe0d859ed005ae50cb42a8d
|
[
"Apache-2.0"
] | null | null | null |
cbmc_viewer/runt.py
|
natasha-jeppu/aws-viewer-for-cbmc
|
456883a2db2cb043dfe0d859ed005ae50cb42a8d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Run a command with simple error handling."""
import logging
import subprocess
import sys
def run(cmd, cwd=None, ignored=None, encoding=None):
"""Run command cmd in directory cwd.
The argument 'ignored' may be a list of integers giving command
return codes that are acceptable and can be ignored.
The argument 'encoding' is a character encoding for the text
string captured as stdout and stderr. The default encoding for
modern Python is utf-8, but source code written on Windows
platforms uses a different character encoding. In this case,
'latin1' is a reasonable choice, since it agrees with utf-8 on the
ascii character set. The wikipedia page on latin1 goes so far as
to say latin1 is "often assumed to be the encoding of 8-bit text
on Unix and Microsoft Windows...".
"""
kwds = {
'cwd': cwd,
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
'universal_newlines': True,
}
# encoding keyword argument was introduced in Python 3.6
if sys.version_info >= (3, 6):
kwds['encoding'] = encoding
logging.debug('run: cmd: %s', cmd)
logging.debug('run: kwds: %s', kwds)
result = subprocess.run(cmd, **kwds, check=False)
if result.returncode:
logging.debug('Failed to run command: %s', ' '.join(cmd))
logging.debug('Failed return code: %s', result.returncode)
logging.debug('Failed stdout: %s', result.stdout)
logging.debug('Failed stderr: %s', result.stderr)
if ignored is None or result.returncode not in ignored:
result.check_returncode()
logging.debug('Ignoring failure to run command: %s', cmd)
return result.stdout
| 35.134615
| 70
| 0.67433
|
95af194cca1c8044486f9423ec993532c757f229
| 2,446
|
py
|
Python
|
protonfixes/gamefixes/600720.py
|
Citiroller/protonfixes
|
6e0116bd1cd2172b6f0ff9905667bbc59595cdb7
|
[
"BSD-2-Clause"
] | null | null | null |
protonfixes/gamefixes/600720.py
|
Citiroller/protonfixes
|
6e0116bd1cd2172b6f0ff9905667bbc59595cdb7
|
[
"BSD-2-Clause"
] | null | null | null |
protonfixes/gamefixes/600720.py
|
Citiroller/protonfixes
|
6e0116bd1cd2172b6f0ff9905667bbc59595cdb7
|
[
"BSD-2-Clause"
] | null | null | null |
""" Game fix for Trackmania² Lagoon
"""
#pylint: disable=C0103
import os
import shutil
from protonfixes import util
from protonfixes.logger import log
mania_planet_games = [228760, 229870, 232910, 233050, 233070,
243360, 264660, 264850, 600720, 600730]
def main():
""" Create a ManiaPlanet folder in compatdata and link the prefixes for every game_bottle.
With this games ManiaPlanet games can be switched while in game. (Same as in windows now)
"""
game_proton_bottle = os.path.dirname(os.path.dirname(util.protonprefix()))
compdata_folder = os.path.dirname(game_proton_bottle)
mania_planet_pfx = os.path.join(compdata_folder, "ManiaPlanet")
if not os.path.exists(mania_planet_pfx):
log("Could not find ManiaPlanet directory.")
log("Creating new folder and symlinking games to it.")
pfx_folder = os.path.join(game_proton_bottle, "pfx")
os.rename(pfx_folder, mania_planet_pfx)
os.symlink(mania_planet_pfx, pfx_folder)
for game_id in mania_planet_games:
game_pfx = os.path.join(compdata_folder, str(game_id), "pfx")
log("Checking {}".format(game_id))
if not os.path.exists(game_pfx):
log("No prefix for {} found, skipping.".format(game_id))
continue
if os.path.islink(game_pfx):
log("{} is already a symlink, skipping.".format(game_id))
continue
log("Copying contents of {} to ManiaPlanet folder.".format(game_id))
for src_dir, dirs, files in os.walk(game_pfx):
dst_dir = src_dir.replace(game_pfx, mania_planet_pfx, 1)
for file_ in files:
src_file = os.path.join(src_dir, file_)
dst_file = os.path.join(dst_dir, file_)
if os.path.exists(dst_file) or not os.path.exists(src_file):
continue
try:
shutil.move(src_file, dst_file)
log("Moving {} to {}".format(src_file, dst_file))
except FileNotFoundError:
# FIXME: paths with special chars (&, whitespace) do not work!
log("Can't move {}. Continuing anyway.".format(src_file))
log("Removing {}".format(game_pfx))
shutil.rmtree(game_pfx)
log("Symlinking {} prefix to ManiaPlanet folder.".format(game_id))
os.symlink(mania_planet_pfx, game_pfx)
log("All DONE")
| 41.457627
| 97
| 0.631235
|
003d0c72db78aca52471fb7e4d393f00f863e43f
| 91
|
py
|
Python
|
tests/test_archivator.py
|
seik/archivator
|
c0dc59b193eadf4f3b9c2713969b0a8a3b53598d
|
[
"MIT"
] | 1
|
2020-06-11T11:51:38.000Z
|
2020-06-11T11:51:38.000Z
|
tests/test_archivator.py
|
seik/archivator
|
c0dc59b193eadf4f3b9c2713969b0a8a3b53598d
|
[
"MIT"
] | 6
|
2020-04-03T20:04:09.000Z
|
2020-04-21T06:12:11.000Z
|
tests/test_archivator.py
|
seik/archivator
|
c0dc59b193eadf4f3b9c2713969b0a8a3b53598d
|
[
"MIT"
] | null | null | null |
from archivator import __version__
def test_version():
assert __version__ == "0.1.0"
| 15.166667
| 34
| 0.725275
|
1ace656c0ef233be51c7eb19ae7a1eaabb404d5a
| 8,048
|
py
|
Python
|
visualize_results.py
|
akazako1/CollinsLab_ScrunchingAnalysis
|
91509671fdada9b59f0e3e027b989afc53e5d45d
|
[
"MIT"
] | 1
|
2021-11-04T01:10:21.000Z
|
2021-11-04T01:10:21.000Z
|
visualize_results.py
|
akazako1/CollinsLab_ScrunchingAnalysis
|
91509671fdada9b59f0e3e027b989afc53e5d45d
|
[
"MIT"
] | null | null | null |
visualize_results.py
|
akazako1/CollinsLab_ScrunchingAnalysis
|
91509671fdada9b59f0e3e027b989afc53e5d45d
|
[
"MIT"
] | null | null | null |
# code for displaying multiple images in one figure
# import libraries
import cv2 as cv
import numpy as np
import matplotlib
#matplotlib.use('Qt5Agg') # Apple doesn't like Tkinter (TkAgg backend) so I needed to change the backend to 'Qt5Agg'
from matplotlib import pyplot as plt
import read_input as rin
import data_collection
import skimage.measure as skmeasure
import glob
import read_input
def plot_mean_line(data, time):
y_mean = [np.mean(data)] * len(time)
plt.plot(time, y_mean, label='Mean', linestyle='--')
plt.show()
def displayVideo(filtered_imgs, outpath):
frameSize = filtered_imgs[0].shape[:2]
fourcc = cv.VideoWriter_fourcc('M','J','P','G')
video = cv.VideoWriter(outpath, fourcc, 10, frameSize, False) #10 fps
for i, img in enumerate(filtered_imgs):
img = np.uint8(img)*250
img = cv.putText(img, str(i), (30, 30), cv.FONT_HERSHEY_COMPLEX, 1, (200, 0, 0), 2)
video.write(img)
video.release()
cv.destroyAllWindows()
""" Creates and saves a video from raw images for a particular well
NOTE: change the output path every time -- othervise the movie would be corrupted
"""
def displayFullVideo(start_frame, last_frame, scale_percent = 100, fps = 5, filepath='/Users/Arina/', outpath=None):
if outpath==None:
outpath = filepath + "/fullPlate.avi"
img_array = []
for i in range(start_frame, last_frame):
newPath = filepath + "/" + str(i) + ".jpeg"
print(newPath)
img = cv.imread(newPath)
if img is not None:
img = cv.putText(img, str(start_frame+i), (50, 200), cv.FONT_HERSHEY_COMPLEX, 2, (200, 0, 0), 3)
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
if scale_percent != 100:
dsize = (width, height)
img = cv.resize(img, dsize) # resize image
img_array.append(img)
else:
continue
fourcc = cv.VideoWriter_fourcc('M','J','P','G')
#fourcc = cv.VideoWriter_fourcc(*'X264')
video = cv.VideoWriter(outpath, fourcc, fps, (width, height))
for img in img_array:
video.write(img)
video.release()
cv.destroyAllWindows()
""" Creates and saves a video from raw images for a particular well """
def displayOrigVideo(start_frame, last_frame, filepath, wellNum, outpath='project.avi', fps=5):
img_array = []
#for filename in sorted(glob.glob('/Users/Arina/Desktop/02/results/well_1/*.png'), key=numericalSort):
for i in range(start_frame, last_frame):
newPath = filepath + "/" + "results/" + "well_" + str(wellNum) + "/croppedImage_" + str(i + 1) + ".png"
im = cv.imread(newPath)
if im is not None:
img_array.append(im)
else:
continue
frameSize = img_array[0].shape[:2]
fourcc = cv.VideoWriter_fourcc('M','J','P','G')
video = cv.VideoWriter(outpath, fourcc, fps, frameSize, False) #10 fps
for img in img_array:
img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
video.write(img)
video.release()
cv.destroyAllWindows()
"""
Assumptions: 10 fps;
"""
def plotMAL(major_axis_lengths, MAL = True, title="Plot of MAL over time", outpath = "MAL plot", show = True):
if MAL == True:
time = np.arange(start=0, stop=(len(major_axis_lengths))/5, step = 0.2)
plt.plot(time, major_axis_lengths)
plt.title(title)
plt.ylabel('major axis length, pix')
plt.xlabel('time, s')
plt.legend(['MAL, in pix'])
plt.savefig(outpath)
plt.show()
plt.close()
if not show:
plt.close('all')
def plotAxes(img):
""" This is my variation of plotting major/minor axes on the image """
label_image = skmeasure.label(img)
axis_major, major_len = data_collection.inertia(label_image, "major")
axis_minor, minor_len = data_collection.inertia(label_image, "minor")
x_coord_axis_major = (axis_major[0][0], axis_major[1][0])
y_coord_axis_major = (axis_major[0][1], axis_major[1][1])
x_coord_axis_minor = (axis_minor[0][0], axis_minor[1][0])
y_coord_axis_minor = (axis_minor[0][1], axis_minor[1][1])
plt.show()
fig, ax = plt.subplots()
ax.imshow(img)
ax.plot(x_coord_axis_major, y_coord_axis_major, '-', linewidth=2)
ax.imshow(img)
ax.plot(x_coord_axis_minor, y_coord_axis_minor, '-', linewidth=2)
""" This is Alex's version """
axis_major2, inertia, skewness, kurt, vari = data_collection.inertia2(label_image, "major")
axis_minor2, inertia, skewness, kurt, vari = data_collection.inertia2(label_image, "minor")
x_coord_axis_major2 = (axis_major2[1][1], axis_major2[0][1])
y_coord_axis_major2 = (axis_major2[1][0], axis_major2[0][0])
x_coord_axis_minor2 = (axis_minor2[1][1], axis_minor2[0][1])
y_coord_axis_minor2 = (axis_minor2[0][0], axis_minor2[1][0])
plt.show()
fig, ax = plt.subplots()
ax.imshow(img)
ax.plot(x_coord_axis_major2, y_coord_axis_major2, '-', linewidth=2)
ax.imshow(img)
ax.plot(x_coord_axis_minor2, y_coord_axis_minor2, '-', linewidth=2)
"""
Creates a pane of images to display
needs some editing
"""
def showImgs():
# create figure
fig = plt.figure(figsize=(50, 35))
# setting values to rows and column variables
rows = 6
columns = 8
imgs = []
print(len(imgs))
# reading images
for i in range(rows*columns):
imgs = rin.read_input(1, 48, filepath="/Users/Arina/Desktop/9")
# Adds a subplot at the i-th position
fig.add_subplot(rows, columns, i+1)
# showing image
plt.imshow(imgs[i])
plt.axis('off')
plt.title("Well number " + str(i))
#showImgs()
"""
## Identify the worm
oneWellImg = skan.pre.threshold(cropped)
plt.imshow(np.uint8(cropped))
plt.show()
plt.imshow(np.uint8(oneWellImg))
plt.show()
skeleton = morphology.skeletonize(oneWellImg)
#cv.imshow("skeleton", skeleton)
plt.imshow(skeleton)
plt.show()
#skel_obj = skan.Skeleton(oneWellImg)
#skel_obj.path_lengths(0)
def find_contours():
contours, _ = cv.findContours(img, cv.RETR_TREE, cv.CHAIN_APPROX_NONE) #or cv.CHAIN_APPROX_SIMPLE?
cnt = contours[0]
M = cv.moments(cnt) #can be used to calculate other params
contours = contours[0].reshape(-1, 2) #Reshape to 2D matrices
img_copied = img.copy() #draw the points as individual circles in the image
for (x, y) in contours:
cv.circle(img_copied, (x, y), 1, (255, 0, 0), 3)
cv.imshow("contours", img_copied)
cv.waitKey(5)
def displaySkeletons(image, skeleton):
# Compute the medial axis (skeleton) and the distance transform
skel, distance = medial_axis(image, return_distance=True)
# Compare with other skeletonization algorithms
skeleton = skeletonize(image)
skeleton_lee = skeletonize(image, method='lee')
# Distance to the background for pixels of the skeleton
dist_on_skel = distance * skel
fig, axes = plt.subplots(2, 2, figsize=(8, 8), sharex=True, sharey=True)
ax = axes.ravel()
ax[0].imshow(image, cmap=plt.cm.gray)
ax[0].set_title('original')
ax[0].axis('off')
ax[1].imshow(dist_on_skel, cmap='magma')
ax[1].contour(image, [0.5], colors='w')
ax[1].set_title('medial_axis')
ax[1].axis('off')
ax[2].imshow(skeleton, cmap=plt.cm.gray)
ax[2].set_title('skeletonize')
ax[2].axis('off')
ax[3].imshow(skeleton_lee, cmap=plt.cm.gray)
ax[3].set_title("skeletonize (Lee 94)")
ax[3].axis('off')
fig.tight_layout()
plt.show()
"""
displayFullVideo(1, 1510, scale_percent = 80, fps=10, filepath='/Users/Arina/Downloads/17/17', outpath='/Users/Arina/Downloads/mefl_17_0910_25uM.avi')
displayFullVideo(1, 1510, scale_percent = 80, fps=10, filepath='/Users/Arina/Downloads/18', outpath='/Users/Arina/Downloads/mefl_18_0910_25uM.avi')
displayFullVideo(1, 1510, scale_percent = 80, fps=10, filepath='/Users/Arina/Downloads/19', outpath='/Users/Arina/Downloads/mefl_19_0910_50uM.avi')
| 34.246809
| 150
| 0.657058
|
7acf434c8b36235251ca20345b06982397f1c9e5
| 11,039
|
py
|
Python
|
mbrl/diagnostics/visualize_model_preds.py
|
pecey/mbrl-lib
|
ebca518b35a1370dbaede2a1c96fcde714bc5489
|
[
"MIT"
] | 1
|
2021-06-17T08:35:31.000Z
|
2021-06-17T08:35:31.000Z
|
mbrl/diagnostics/visualize_model_preds.py
|
pecey/mbrl-lib
|
ebca518b35a1370dbaede2a1c96fcde714bc5489
|
[
"MIT"
] | 1
|
2021-08-13T12:37:03.000Z
|
2021-08-14T16:34:31.000Z
|
mbrl/diagnostics/visualize_model_preds.py
|
pecey/mbrl-lib
|
ebca518b35a1370dbaede2a1c96fcde714bc5489
|
[
"MIT"
] | 1
|
2021-12-13T10:50:33.000Z
|
2021-12-13T10:50:33.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import pathlib
from typing import Generator, List, Optional, Tuple, cast
import gym.wrappers
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
import torch
import mbrl
import mbrl.models
import mbrl.planning
import mbrl.util.common
import mbrl.util.mujoco
VisData = Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]
class Visualizer:
def __init__(
self,
lookahead: int,
results_dir: str,
agent_dir: Optional[str],
num_steps: Optional[int] = None,
num_model_samples: int = 1,
model_subdir: Optional[str] = None,
):
self.lookahead = lookahead
self.results_path = pathlib.Path(results_dir)
self.model_path = self.results_path
self.vis_path = self.results_path / "diagnostics"
if model_subdir:
self.model_path /= model_subdir
# If model subdir is child of diagnostics, remove "diagnostics" before
# appending to vis_path. This can happen, for example, if Finetuner
# generated this model with a model_subdir
if "diagnostics" in model_subdir:
model_subdir = pathlib.Path(model_subdir).name
self.vis_path /= model_subdir
pathlib.Path.mkdir(self.vis_path, parents=True, exist_ok=True)
self.num_model_samples = num_model_samples
self.num_steps = num_steps
self.cfg = mbrl.util.common.load_hydra_cfg(self.results_path)
self.env, term_fn, reward_fn = mbrl.util.mujoco.make_env(self.cfg)
self.reward_fn = reward_fn
self.dynamics_model = mbrl.util.common.create_one_dim_tr_model(
self.cfg,
self.env.observation_space.shape,
self.env.action_space.shape,
model_dir=self.model_path,
)
self.model_env = mbrl.models.ModelEnv(
self.env,
self.dynamics_model,
term_fn,
reward_fn,
generator=torch.Generator(self.dynamics_model.device),
)
self.agent: mbrl.planning.Agent
if agent_dir is None:
self.agent = mbrl.planning.RandomAgent(self.env)
else:
agent_cfg = mbrl.util.common.load_hydra_cfg(agent_dir)
if (
agent_cfg.algorithm.agent._target_
== "mbrl.planning.TrajectoryOptimizerAgent"
):
agent_cfg.algorithm.agent.planning_horizon = lookahead
self.agent = mbrl.planning.create_trajectory_optim_agent_for_model(
self.model_env,
agent_cfg.algorithm.agent,
num_particles=agent_cfg.algorithm.num_particles,
)
else:
self.agent = mbrl.planning.load_agent(agent_dir, self.env)
self.fig = None
self.axs: List[plt.Axes] = []
self.lines: List[plt.Line2D] = []
self.writer = animation.FFMpegWriter(
fps=15, metadata=dict(artist="Me"), bitrate=1800
)
# The total reward obtained while building the visualizationn
self.total_reward = 0
def get_obs_rewards_and_actions(
self, obs: np.ndarray, use_mpc: bool = False
) -> VisData:
if use_mpc:
# When using MPC, rollout model trajectories to see the controller actions
model_obses, model_rewards, actions = mbrl.util.common.rollout_model_env(
self.model_env,
obs,
plan=None,
agent=self.agent,
num_samples=self.num_model_samples,
)
# Then evaluate in the environment
real_obses, real_rewards, _ = mbrl.util.mujoco.rollout_mujoco_env(
cast(gym.wrappers.TimeLimit, self.env),
obs,
self.lookahead,
agent=None,
plan=actions,
)
else:
# When not using MPC, rollout the agent on the environment and get its actions
real_obses, real_rewards, actions = mbrl.util.mujoco.rollout_mujoco_env(
cast(gym.wrappers.TimeLimit, self.env),
obs,
self.lookahead,
agent=self.agent,
)
# Then see what the model would predict for this
model_obses, model_rewards, _ = mbrl.util.common.rollout_model_env(
self.model_env,
obs,
agent=None,
plan=actions,
num_samples=self.num_model_samples,
)
return real_obses, real_rewards, model_obses, model_rewards, actions
def vis_rollout(self, use_mpc: bool = False) -> Generator:
obs = self.env.reset()
done = False
i = 0
while not done:
vis_data = self.get_obs_rewards_and_actions(obs, use_mpc=use_mpc)
action = self.agent.act(obs)
next_obs, reward, done, _ = self.env.step(action)
self.total_reward += reward
obs = next_obs
i += 1
if self.num_steps and i == self.num_steps:
break
yield vis_data
def set_data_lines_idx(
self,
plot_idx: int,
data_idx: int,
real_data: np.ndarray,
model_data: np.ndarray,
):
def adjust_ylim(ax, array):
ymin, ymax = ax.get_ylim()
real_ymin = array.min() - 0.5 * np.abs(array.min())
real_ymax = array.max() + 0.5 * np.abs(array.max())
if real_ymin < ymin or real_ymax > ymax:
self.axs[plot_idx].set_ylim(min(ymin, real_ymin), max(ymax, real_ymax))
self.axs[plot_idx].figure.canvas.draw()
def fix_array_len(array):
if len(array) < self.lookahead + 1:
new_array = np.ones((self.lookahead + 1,) + tuple(array.shape[1:]))
new_array *= array[-1]
new_array[: len(array)] = array
return new_array
return array
x_data = range(self.lookahead + 1)
if real_data.ndim == 1:
real_data = real_data[:, None]
if model_data.ndim == 2:
model_data = model_data[:, :, None]
real_data = fix_array_len(real_data)
model_data = fix_array_len(model_data)
adjust_ylim(self.axs[plot_idx], real_data[:, data_idx])
adjust_ylim(self.axs[plot_idx], model_data.mean(1)[:, data_idx])
self.lines[4 * plot_idx].set_data(x_data, real_data[:, data_idx])
model_obs_mean = model_data[:, :, data_idx].mean(axis=1)
model_obs_min = model_data[:, :, data_idx].min(axis=1)
model_obs_max = model_data[:, :, data_idx].max(axis=1)
self.lines[4 * plot_idx + 1].set_data(x_data, model_obs_mean)
self.lines[4 * plot_idx + 2].set_data(x_data, model_obs_min)
self.lines[4 * plot_idx + 3].set_data(x_data, model_obs_max)
def plot_func(self, data: VisData):
real_obses, real_rewards, model_obses, model_rewards, actions = data
num_plots = len(real_obses[0]) + 1
assert len(self.lines) == 4 * num_plots
for i in range(num_plots - 1):
self.set_data_lines_idx(i, i, real_obses, model_obses)
self.set_data_lines_idx(num_plots - 1, 0, real_rewards, model_rewards)
return self.lines
def create_axes(self):
num_plots = self.env.observation_space.shape[0] + 1
num_cols = int(np.ceil(np.sqrt(num_plots)))
num_rows = int(np.ceil(num_plots / num_cols))
fig, axs = plt.subplots(num_rows, num_cols)
fig.text(
0.5, 0.04, f"Time step (lookahead of {self.lookahead} steps)", ha="center"
)
fig.text(
0.04,
0.17,
"Predictions (blue/red) and ground truth (black).",
ha="center",
rotation="vertical",
)
axs = axs.reshape(-1)
lines = []
for i, ax in enumerate(axs):
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.set_xlim(0, self.lookahead)
if i < num_plots:
(real_line,) = ax.plot([], [], "k")
(model_mean_line,) = ax.plot([], [], "r" if i == num_plots - 1 else "b")
(model_ub_line,) = ax.plot(
[], [], "r" if i == num_plots - 1 else "b", linewidth=0.5
)
(model_lb_line,) = ax.plot(
[], [], "r" if i == num_plots - 1 else "b", linewidth=0.5
)
lines.append(real_line)
lines.append(model_mean_line)
lines.append(model_lb_line)
lines.append(model_ub_line)
self.fig = fig
self.axs = axs
self.lines = lines
def run(self, use_mpc: bool):
self.create_axes()
ani = animation.FuncAnimation(
self.fig,
self.plot_func,
frames=lambda: self.vis_rollout(use_mpc=use_mpc),
blit=True,
interval=100,
save_count=self.num_steps,
repeat=False,
)
save_path = self.vis_path / f"rollout_{type(self.agent).__name__}_policy.mp4"
ani.save(save_path, writer=self.writer)
print(f"Video saved at {save_path}.")
print(f"Total rewards obtained was: {self.total_reward}.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--experiments_dir",
type=str,
default=None,
help="The directory where the original experiment was run.",
)
parser.add_argument("--lookahead", type=int, default=25)
parser.add_argument(
"--agent_dir",
type=str,
default=None,
help="The directory where the agent configuration and data is stored. "
"If not provided, a random agent will be used.",
)
parser.add_argument("--num_steps", type=int, default=200)
parser.add_argument(
"--model_subdir",
type=str,
default=None,
help="Can be used to point to models generated by other diagnostics tools.",
)
parser.add_argument(
"--num_model_samples",
type=int,
default=35,
help="Number of samples from the model, to visualize uncertainty.",
)
args = parser.parse_args()
visualizer = Visualizer(
lookahead=args.lookahead,
results_dir=args.experiments_dir,
agent_dir=args.agent_dir,
num_steps=args.num_steps,
num_model_samples=args.num_model_samples,
model_subdir=args.model_subdir,
)
use_mpc = isinstance(visualizer.agent, mbrl.planning.TrajectoryOptimizerAgent)
visualizer.run(use_mpc=use_mpc)
| 36.3125
| 90
| 0.585107
|
557ff981605dca12cb04f3bf278557843873e0c1
| 3,612
|
py
|
Python
|
pytype/pytd/parse/parser_test.py
|
yang/pytype-hack
|
093d69c9a6f6144f0209cdeb8eed6c7e832bbee0
|
[
"Apache-2.0"
] | null | null | null |
pytype/pytd/parse/parser_test.py
|
yang/pytype-hack
|
093d69c9a6f6144f0209cdeb8eed6c7e832bbee0
|
[
"Apache-2.0"
] | null | null | null |
pytype/pytd/parse/parser_test.py
|
yang/pytype-hack
|
093d69c9a6f6144f0209cdeb8eed6c7e832bbee0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8; python-indent:2; indent-tabs-mode:nil -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility classes for testing the PYTD parser."""
import os
import sys
import textwrap
from pytype.pytd import pytd
from pytype.pytd.parse import parser
from pytype.pytd.parse import visitors
import unittest
class ParserTest(unittest.TestCase):
"""Test utility class. Knows how to parse PYTD and compare source code."""
def setUp(self):
self.parser = parser.TypeDeclParser()
def Parse(self, src, version=None):
# TODO(kramm): Using self.parser here breaks tests. Why?
tree = parser.TypeDeclParser(version=version).Parse(textwrap.dedent(src))
tree.Visit(visitors.VerifyVisitor())
return tree
def ToAST(self, src_or_tree):
# TODO(pludemann): The callers are not consistent in how they use this
# and in most (all?) cases they know whether they're
# passing in a source string or parse tree. It would
# be better if all the calles were consistent.
if isinstance(src_or_tree, basestring):
# Put into a canonical form (removes comments, standard indents):
return self.Parse(src_or_tree + "\n")
else: # isinstance(src_or_tree, tuple):
src_or_tree.Visit(visitors.VerifyVisitor())
return src_or_tree
def AssertSourceEquals(self, src_or_tree_1, src_or_tree_2):
# Strip leading "\n"s for convenience
ast1 = self.ToAST(src_or_tree_1)
ast2 = self.ToAST(src_or_tree_2)
src1 = pytd.Print(ast1).strip() + "\n"
src2 = pytd.Print(ast2).strip() + "\n"
# Verify printed versions are the same and ASTs are the same.
# TODO(pludemann): Find out why some tests leave confuse NamedType and
# ClassType and fix the tests so that this conversion isn't
# needed.
ast1 = ast1.Visit(visitors.ClassTypeToNamedType())
ast2 = ast2.Visit(visitors.ClassTypeToNamedType())
if src1 != src2 or not ast1.ASTeq(ast2):
# Due to differing opinions on the form of debug output, allow an
# environment variable to control what output you want. Set
# PY_UNITTEST_DIFF to get diff output.
if os.getenv("PY_UNITTEST_DIFF"):
self.maxDiff = None # for better diff output (assertMultiLineEqual)
self.assertMultiLineEqual(src1, src2)
else:
sys.stdout.flush()
sys.stderr.flush()
print >>sys.stderr, "Source files or ASTs differ:"
print >>sys.stderr, "-" * 36, " Actual ", "-" * 36
print >>sys.stderr, textwrap.dedent(src1).strip()
print >>sys.stderr, "-" * 36, "Expected", "-" * 36
print >>sys.stderr, textwrap.dedent(src2).strip()
print >>sys.stderr, "-" * 80
if not ast1.ASTeq(ast2):
print >>sys.stderr, "Actual AST:", ast1
print >>sys.stderr, "Expect AST:", ast2
self.fail("source files differ")
def ApplyVisitorToString(self, data, visitor):
tree = self.Parse(data)
new_tree = tree.Visit(visitor)
return pytd.Print(new_tree)
| 41.045455
| 80
| 0.67608
|
1303e52d511702c1ef294ce7544e1cbe234eaeea
| 550
|
py
|
Python
|
Climate.py
|
ZhiXingHeYi-0712/Solar-Radiation
|
84020c446fcb62cadc844033b76197cf4d1b8071
|
[
"MIT"
] | null | null | null |
Climate.py
|
ZhiXingHeYi-0712/Solar-Radiation
|
84020c446fcb62cadc844033b76197cf4d1b8071
|
[
"MIT"
] | null | null | null |
Climate.py
|
ZhiXingHeYi-0712/Solar-Radiation
|
84020c446fcb62cadc844033b76197cf4d1b8071
|
[
"MIT"
] | null | null | null |
from enum import Enum
class Climate(Enum):
TROPICS = 1
MID_LATITUDE_SUMMER = 2
MID_LATITUDE_WINTER = 3
COLD_ZONE_SUMMER = 4
NONE_TYPE = 5
def getDetermination(type):
if (type == Climate.TROPICS):
return (0.95, 0.98, 1.02)
elif (type == Climate.MID_LATITUDE_SUMMER):
return (0.97, 0.99, 1.02)
elif (type == Climate.MID_LATITUDE_WINTER):
return (1.03, 1.01, 1.00)
elif (type == Climate.COLD_ZONE_SUMMER):
return (0.99, 0.99, 1.01)
else:
raise ("Climate type no found.")
| 26.190476
| 47
| 0.614545
|
18657e7d11d89d275ac9a0fc7e4e7b26806d95b3
| 399
|
py
|
Python
|
decorators/lab/3_even_numbers.py
|
Minkov/python-oop-2020-06
|
63b830a42b7abfac5bee576a81ee7626c47a80bc
|
[
"MIT"
] | 3
|
2020-07-04T11:32:42.000Z
|
2020-08-14T08:43:25.000Z
|
decorators/lab/3_even_numbers.py
|
Minkov/python-oop-2020-06
|
63b830a42b7abfac5bee576a81ee7626c47a80bc
|
[
"MIT"
] | null | null | null |
decorators/lab/3_even_numbers.py
|
Minkov/python-oop-2020-06
|
63b830a42b7abfac5bee576a81ee7626c47a80bc
|
[
"MIT"
] | 2
|
2020-07-09T07:17:37.000Z
|
2021-02-22T22:55:52.000Z
|
def even_numbers(func):
def is_even(number):
return number % 2 == 0
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
# return [x for x in result if is_even(x)]
return list(filter(is_even, result))
return wrapper
@even_numbers
def get_numbers(numbers):
return numbers
print(get_numbers([1, 2, 3, 4, 5])) # [2, 4]
| 19.95
| 51
| 0.573935
|
9bd10940e7cf37b456846860caa3e9c2c967b6fd
| 636
|
py
|
Python
|
export_table.py
|
gkovacs/invideo-quizzes-analysis-las2016
|
6ec8686ef0d3ffa5e994f8dec41590fea87e9539
|
[
"MIT"
] | null | null | null |
export_table.py
|
gkovacs/invideo-quizzes-analysis-las2016
|
6ec8686ef0d3ffa5e994f8dec41590fea87e9539
|
[
"MIT"
] | null | null | null |
export_table.py
|
gkovacs/invideo-quizzes-analysis-las2016
|
6ec8686ef0d3ffa5e994f8dec41590fea87e9539
|
[
"MIT"
] | null | null | null |
import MySQLdb
import sqlite3
from pandas.io.sql import read_frame
import json
datastage_login = json.load(open('datastage_login.json'))
databases = {}
tablename = 'VideoInteraction'
databasename = 'Edx'
def getTableName():
return tablename
def getDatabaseName():
return databasename
def getDatabase():
if databasename not in databases:
databases[databasename] = MySQLdb.connect(host='datastage.stanford.edu', user=datastage_login['user'], passwd=datastage_login['passwd'], db=databasename)
return databases[databasename]
data = read_frame('select * from ' + getTableName() + ' limit 100', getDatabase())
print data
| 23.555556
| 157
| 0.761006
|
0d7c49c465c298a52945ddf6755a55f726315c49
| 2,040
|
py
|
Python
|
infrastructure-provisioning/src/general/scripts/azure/ssn_create_datalake.py
|
ofuks/DLab
|
460804a2559843d099936fe40373093f9bf9edcb
|
[
"Apache-2.0"
] | null | null | null |
infrastructure-provisioning/src/general/scripts/azure/ssn_create_datalake.py
|
ofuks/DLab
|
460804a2559843d099936fe40373093f9bf9edcb
|
[
"Apache-2.0"
] | null | null | null |
infrastructure-provisioning/src/general/scripts/azure/ssn_create_datalake.py
|
ofuks/DLab
|
460804a2559843d099936fe40373093f9bf9edcb
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# *****************************************************************************
#
# Copyright (c) 2016, EPAM SYSTEMS INC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ******************************************************************************
import argparse
import json
from dlab.fab import *
from dlab.actions_lib import *
from dlab.meta_lib import *
import sys
parser = argparse.ArgumentParser()
parser.add_argument('--datalake_name', type=str, default='')
parser.add_argument('--datalake_tags', type=str, default='{"empty":"string"}')
parser.add_argument('--resource_group_name', type=str, default='')
parser.add_argument('--region', type=str, default='')
args = parser.parse_args()
if __name__ == "__main__":
try:
check_datalake = False
datalake_tags = json.loads(args.datalake_tags)
for datalake in AzureMeta().list_datalakes(args.resource_group_name):
if datalake["Name"] == datalake.tags["Name"]:
check_datalake = True
print("REQUESTED DATA LAKE {} ALREADY EXISTS".format(datalake.name))
if not check_datalake:
datalake_name = id_generator().lower()
print("Creating DataLake {}.".format(datalake_name))
datalake = AzureActions().create_datalake_store(args.resource_group_name, datalake_name, args.region,
datalake_tags)
print("DATA LAKE {} has been created".format(datalake_name))
except:
sys.exit(1)
| 37.777778
| 113
| 0.626961
|
0cf1b47f317c73d9be46ee3367d72fb072b5bb8d
| 370
|
py
|
Python
|
__init__.py
|
fwinstead/rules-of-acquisition-skill
|
f0682f9ecafa02aa6e30d11b101704cd8060aea1
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
fwinstead/rules-of-acquisition-skill
|
f0682f9ecafa02aa6e30d11b101704cd8060aea1
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
fwinstead/rules-of-acquisition-skill
|
f0682f9ecafa02aa6e30d11b101704cd8060aea1
|
[
"Apache-2.0"
] | null | null | null |
from mycroft import MycroftSkill, intent_file_handler
class RulesOfAcquisition(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
@intent_file_handler('acquisition.of.rules.intent')
def handle_acquisition_of_rules(self, message):
self.speak_dialog('acquisition.of.rules')
def create_skill():
return RulesOfAcquisition()
| 23.125
| 55
| 0.754054
|
008a2020e9d2909d2beb1b577a214cd5f8c64c03
| 63
|
py
|
Python
|
scia/handlers/__init__.py
|
c-crogeo/scia
|
7652668f8a7befc1f300415d6d3b1e7fc3a45bc3
|
[
"MIT"
] | 1
|
2020-12-29T00:11:57.000Z
|
2020-12-29T00:11:57.000Z
|
scia/handlers/__init__.py
|
c-crogeo/scia
|
7652668f8a7befc1f300415d6d3b1e7fc3a45bc3
|
[
"MIT"
] | null | null | null |
scia/handlers/__init__.py
|
c-crogeo/scia
|
7652668f8a7befc1f300415d6d3b1e7fc3a45bc3
|
[
"MIT"
] | null | null | null |
from .timedRotatingFileHandler import TimedRotatingFileHandler
| 31.5
| 62
| 0.920635
|
d8d0854ad6d263d1b0e7c5182b4c144246b0d0f3
| 460
|
py
|
Python
|
Solutions1/is_palindrome.py
|
mohamedsugal/Leetcode-Solutions
|
c67720af4cb36d07d758c57efffac7a28e4f8b9f
|
[
"MIT"
] | 3
|
2020-11-12T06:51:44.000Z
|
2021-09-19T00:26:33.000Z
|
Solutions1/is_palindrome.py
|
mohamedsugal/Leetcode-Solutions
|
c67720af4cb36d07d758c57efffac7a28e4f8b9f
|
[
"MIT"
] | null | null | null |
Solutions1/is_palindrome.py
|
mohamedsugal/Leetcode-Solutions
|
c67720af4cb36d07d758c57efffac7a28e4f8b9f
|
[
"MIT"
] | null | null | null |
string = "A man, a plan, a canal"
def is_palindrome(string):
left = 0
right = len(string) - 1
while left < right:
if not string[left].isalnum():
left += 1
elif not string[right].isalnum():
right -= 1
else:
if string[left].lower() != string[right].lower():
return False
left += 1
right -= 1
return True
print(is_palindrome(string))
| 23
| 62
| 0.495652
|
6591d51f2a37f882de36566a7bb22169808e2059
| 2,570
|
py
|
Python
|
tensorflow/contrib/learn/python/learn/ops/array_ops.py
|
285219011/hello-world
|
dfb71ea206eb9f61e5d97c9727caa1a6449e39cb
|
[
"Apache-2.0"
] | 6
|
2017-04-25T01:30:41.000Z
|
2019-12-11T15:08:46.000Z
|
tensorflow/contrib/learn/python/learn/ops/array_ops.py
|
PaulTR/tensorflow
|
84bcff1e814ee5697b5980535583737f8e81d82f
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/learn/python/learn/ops/array_ops.py
|
PaulTR/tensorflow
|
84bcff1e814ee5697b5980535583737f8e81d82f
|
[
"Apache-2.0"
] | 8
|
2017-04-17T23:39:12.000Z
|
2019-05-11T14:06:31.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow ops for array / tensor manipulation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops as array_ops_
from tensorflow.python.ops import math_ops
def split_squeeze(dim, num_split, tensor_in):
"""Splits input on given dimension and then squeezes that dimension.
Args:
dim: Dimension to split and squeeze on.
num_split: integer, the number of ways to split.
tensor_in: Input tensor of shape [N1, N2, .. Ndim, .. Nx].
Returns:
List of tensors [N1, N2, .. Ndim-1, Ndim+1, .. Nx].
"""
return [array_ops_.squeeze(t, squeeze_dims=[dim])
for t in array_ops_.split(dim, num_split, tensor_in)]
def expand_concat(dim, inputs):
"""Expands inputs on given dimension and then concatenates them.
Args:
dim: Dimension to expand and concatenate on.
inputs: List of tensors of the same shape [N1, ... Nx].
Returns:
A tensor of shape [N1, .. Ndim, ... Nx]
"""
return array_ops_.concat(dim, [array_ops_.expand_dims(t, dim)
for t in inputs])
def one_hot_matrix(tensor_in, num_classes, on_value=1.0, off_value=0.0):
"""Encodes indices from given tensor as one-hot tensor.
TODO(ilblackdragon): Ideally implementation should be
part of TensorFlow with Eigen-native operation.
Args:
tensor_in: Input tensor of shape [N1, N2].
num_classes: Number of classes to expand index into.
on_value: Tensor or float, value to fill-in given index.
off_value: Tensor or float, value to fill-in everything else.
Returns:
Tensor of shape [N1, N2, num_classes] with 1.0 for each id in original
tensor.
"""
return array_ops_.one_hot(
math_ops.cast(tensor_in, dtypes.int64), num_classes, on_value, off_value)
| 35.205479
| 80
| 0.700778
|
c7f2d09278cdd74aa9659c7b94c45516e258985f
| 2,591
|
py
|
Python
|
aliyun-python-sdk-pvtz/setup.py
|
DataDog/aliyun-openapi-python-sdk
|
5cbee29bce6416dd62f61f0c3786b1af6ea0d84f
|
[
"Apache-2.0"
] | 1
|
2019-12-23T12:36:43.000Z
|
2019-12-23T12:36:43.000Z
|
aliyun-python-sdk-pvtz/setup.py
|
liusc27/aliyun-openapi-python-sdk
|
5e3db3535dd21de987dc5981e71151327d5a884f
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-pvtz/setup.py
|
liusc27/aliyun-openapi-python-sdk
|
5e3db3535dd21de987dc5981e71151327d5a884f
|
[
"Apache-2.0"
] | 1
|
2021-02-23T11:27:54.000Z
|
2021-02-23T11:27:54.000Z
|
#!/usr/bin/python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
from setuptools import setup, find_packages
import os
import sys
"""
setup module for pvtz.
Created on 7/3/2015
@author: alex
"""
PACKAGE = "aliyunsdkpvtz"
NAME = "aliyun-python-sdk-pvtz"
DESCRIPTION = "The pvtz module of Aliyun Python sdk."
AUTHOR = "Aliyun"
AUTHOR_EMAIL = "aliyun-developers-efficiency@list.alibaba-inc.com"
URL = "http://develop.aliyun.com/sdk/python"
TOPDIR = os.path.dirname(__file__) or "."
VERSION = __import__(PACKAGE).__version__
desc_file = open("README.rst")
try:
LONG_DESCRIPTION = desc_file.read()
finally:
desc_file.close()
requires = []
if sys.version_info < (3, 3):
requires.append("aliyun-python-sdk-core>=2.0.2")
else:
requires.append("aliyun-python-sdk-core-v3>=2.3.5")
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="Apache",
url=URL,
keywords=["aliyun","sdk","pvtz"],
packages=find_packages(exclude=["tests*"]),
include_package_data=True,
platforms="any",
install_requires=requires,
classifiers=(
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Software Development",
)
)
| 30.482353
| 69
| 0.66731
|
41e9d4efbce1242031deeb9508e7199988f6685d
| 6,957
|
py
|
Python
|
dvc/config_schema.py
|
NigelVanHattum/dvc
|
356f9355fd5156595cfd4c786231ce2f04a5e046
|
[
"Apache-2.0"
] | null | null | null |
dvc/config_schema.py
|
NigelVanHattum/dvc
|
356f9355fd5156595cfd4c786231ce2f04a5e046
|
[
"Apache-2.0"
] | 81
|
2021-04-13T08:02:09.000Z
|
2022-03-30T16:10:17.000Z
|
dvc/config_schema.py
|
NigelVanHattum/dvc
|
356f9355fd5156595cfd4c786231ce2f04a5e046
|
[
"Apache-2.0"
] | null | null | null |
import os
from urllib.parse import urlparse
from funcy import walk_values
from voluptuous import (
All,
Any,
Coerce,
Invalid,
Lower,
Optional,
Range,
Schema,
)
Bool = All(
Lower,
Any("true", "false"),
lambda v: v == "true",
msg="expected true or false",
)
def supported_cache_type(types):
"""Checks if link type config option consists only of valid values.
Args:
types (list/string): type(s) of links that dvc should try out.
"""
if types is None:
return None
if isinstance(types, str):
types = [typ.strip() for typ in types.split(",")]
unsupported = set(types) - {"reflink", "hardlink", "symlink", "copy"}
if unsupported:
raise Invalid(
"Unsupported cache type(s): {}".format(", ".join(unsupported))
)
return types
def Choices(*choices):
"""Checks that value belongs to the specified set of values
Args:
*choices: pass allowed values as arguments, or pass a list or
tuple as a single argument
"""
return Any(*choices, msg="expected one of {}".format(", ".join(choices)))
def ByUrl(mapping):
schemas = walk_values(Schema, mapping)
def validate(data):
if "url" not in data:
raise Invalid("expected 'url'")
parsed = urlparse(data["url"])
# Windows absolute paths should really have scheme == "" (local)
if os.name == "nt" and len(parsed.scheme) == 1 and parsed.netloc == "":
return schemas[""](data)
if parsed.scheme not in schemas:
raise Invalid(f"Unsupported URL type {parsed.scheme}://")
return schemas[parsed.scheme](data)
return validate
class RelPath(str):
pass
REMOTE_COMMON = {
"url": str,
"checksum_jobs": All(Coerce(int), Range(1)),
"jobs": All(Coerce(int), Range(1)),
Optional("no_traverse"): Bool, # obsoleted
"verify": Bool,
}
LOCAL_COMMON = {
"type": supported_cache_type,
Optional("protected", default=False): Bool, # obsoleted
"shared": All(Lower, Choices("group")),
Optional("slow_link_warning", default=True): Bool,
}
HTTP_COMMON = {
"auth": All(Lower, Choices("basic", "digest", "custom")),
"custom_auth_header": str,
"user": str,
"password": str,
"ask_password": Bool,
"ssl_verify": Bool,
"method": str,
}
WEBDAV_COMMON = {
"user": str,
"password": str,
"ask_password": Bool,
"token": str,
"cert_path": str,
"key_path": str,
"timeout": Coerce(int),
}
SCHEMA = {
"core": {
"remote": Lower,
"checksum_jobs": All(Coerce(int), Range(1)),
Optional("interactive", default=False): Bool,
Optional("analytics", default=True): Bool,
Optional("hardlink_lock", default=False): Bool,
Optional("no_scm", default=False): Bool,
Optional("autostage", default=False): Bool,
Optional("experiments"): Bool, # obsoleted
Optional("check_update", default=True): Bool,
},
"cache": {
"local": str,
"s3": str,
"gs": str,
"hdfs": str,
"webhdfs": str,
"ssh": str,
"azure": str,
# This is for default local cache
"dir": str,
**LOCAL_COMMON,
},
"remote": {
str: ByUrl(
{
"": {**LOCAL_COMMON, **REMOTE_COMMON},
"s3": {
"region": str,
"profile": str,
"credentialpath": str,
"configpath": str,
"endpointurl": str,
"access_key_id": str,
"secret_access_key": str,
"session_token": str,
Optional("listobjects", default=False): Bool, # obsoleted
Optional("use_ssl", default=True): Bool,
Optional("ssl_verify", default=True): Bool,
"sse": str,
"sse_kms_key_id": str,
"acl": str,
"grant_read": str,
"grant_read_acp": str,
"grant_write_acp": str,
"grant_full_control": str,
**REMOTE_COMMON,
},
"gs": {
"projectname": str,
"credentialpath": str,
**REMOTE_COMMON,
},
"ssh": {
"type": supported_cache_type,
"port": Coerce(int),
"user": str,
"password": str,
"ask_password": Bool,
"keyfile": str,
"timeout": Coerce(int),
"gss_auth": Bool,
"allow_agent": Bool,
**REMOTE_COMMON,
},
"hdfs": {"user": str, **REMOTE_COMMON},
"webhdfs": {
"hdfscli_config": str,
"webhdfs_token": str,
"user": str,
"webhdfs_alias": str,
**REMOTE_COMMON,
},
"azure": {
"connection_string": str,
"sas_token": str,
"account_name": str,
"account_key": str,
"tenant_id": str,
"client_id": str,
"client_secret": str,
**REMOTE_COMMON,
},
"oss": {
"oss_key_id": str,
"oss_key_secret": str,
"oss_endpoint": str,
**REMOTE_COMMON,
},
"gdrive": {
"gdrive_use_service_account": Bool,
"gdrive_client_id": str,
"gdrive_client_secret": str,
"gdrive_user_credentials_file": str,
"gdrive_service_account_user_email": str,
"gdrive_service_account_json_file_path": str,
Optional("gdrive_trash_only", default=False): Bool,
**REMOTE_COMMON,
},
"http": {**HTTP_COMMON, **REMOTE_COMMON},
"https": {**HTTP_COMMON, **REMOTE_COMMON},
"webdav": {**WEBDAV_COMMON, **REMOTE_COMMON},
"webdavs": {**WEBDAV_COMMON, **REMOTE_COMMON},
"remote": {str: object}, # Any of the above options are valid
}
)
},
"state": {
"row_limit": All(Coerce(int), Range(1)), # obsoleted
"row_cleanup_quota": All(Coerce(int), Range(0, 100)), # obsoleted
},
# section for experimental features
"feature": {
# enabled by default. It's of no use, kept for backward compatibility.
Optional("parametrization", default=True): Bool
},
}
| 30.647577
| 79
| 0.485698
|
ae891c44e9afe40bd1d5d05c6b84daad300a7357
| 2,032
|
py
|
Python
|
utils/optim.py
|
Lednik7/data_fusion
|
2cac8ee2ca6c144218731795bc118f6c355bd477
|
[
"MIT"
] | 1
|
2022-01-23T10:18:16.000Z
|
2022-01-23T10:18:16.000Z
|
utils/optim.py
|
Lednik7/data_fusion
|
2cac8ee2ca6c144218731795bc118f6c355bd477
|
[
"MIT"
] | null | null | null |
utils/optim.py
|
Lednik7/data_fusion
|
2cac8ee2ca6c144218731795bc118f6c355bd477
|
[
"MIT"
] | null | null | null |
import logging
import torch
import transformers
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
NO_DECAY = ['bias', 'LayerNorm.weight']
def is_backbone(name):
return 'backbone' in name# or 'pooler' in name
def needs_decay(name):
return not any(word in name for word in NO_DECAY)
def get_optimizer(model, lr, weight_decay, head_factor):
grouped_parameters = [
{
'params': [
param for name, param in model.named_parameters() if is_backbone(name) and needs_decay(name)
],
'lr': lr,
'weight_decay': weight_decay,
},
{
'params': [
param for name, param in model.named_parameters() if is_backbone(name) and not needs_decay(name)
],
'lr': lr,
'weight_decay': 0.,
},
{
'params': [param for name, param in model.named_parameters() if not is_backbone(name)],
'lr': lr * head_factor,
'weight_decay': weight_decay,
}
]
logger.info(f'\t Head parameters with factor {head_factor}:')
for name, _ in model.named_parameters():
if not is_backbone(name):
logger.info(f'\t \t {name}')
return torch.optim.AdamW(grouped_parameters, lr=lr)
def get_scheduler(optimizer, dataloader, num_epochs, accum_steps, warmup_params):
epoch_size = len(dataloader)
num_training_steps = int(epoch_size * num_epochs / accum_steps)
num_warmup_steps = warmup_params.get(
'num_steps', int(num_training_steps * warmup_params['percentage'])
)
msg = '\t Linear warmup schedule with {} warmup steps out of {} total steps.'
logger.info(msg.format(num_warmup_steps, num_training_steps))
return transformers.get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps
)
| 30.787879
| 113
| 0.619587
|
f86244d22553511da6cc06c0fa5273f886302272
| 669
|
py
|
Python
|
seeds/user_seeder.py
|
tamert/sentry-discord-webhook
|
a3b36b52c7e7b1a3d67e1f57185f748df1348412
|
[
"MIT"
] | null | null | null |
seeds/user_seeder.py
|
tamert/sentry-discord-webhook
|
a3b36b52c7e7b1a3d67e1f57185f748df1348412
|
[
"MIT"
] | null | null | null |
seeds/user_seeder.py
|
tamert/sentry-discord-webhook
|
a3b36b52c7e7b1a3d67e1f57185f748df1348412
|
[
"MIT"
] | null | null | null |
from flask_seeder import Seeder, Faker, generator
from models import User
import hashlib
class UserSeeder(Seeder):
# run() will be called by Flask-Seeder
def run(self):
# Create a new Faker and tell it how to create User objects
faker = Faker(
cls=User,
init={
"id": generator.Sequence(),
"name": generator.Name(),
"email": "admin@admin.com",
"password": hashlib.md5("secret"),
"role": "admin"
}
)
for user in faker.create(1):
print("Adding user: %s" % user)
self.db.session.add(user)
| 26.76
| 67
| 0.521674
|
eb866ffad6a03c485ce918b2acc772feee8d5038
| 3,245
|
py
|
Python
|
main.py
|
Davidyz/AutoStacker
|
9f637891b9379b166e41597bcd44a8011561beea
|
[
"MIT"
] | null | null | null |
main.py
|
Davidyz/AutoStacker
|
9f637891b9379b166e41597bcd44a8011561beea
|
[
"MIT"
] | null | null | null |
main.py
|
Davidyz/AutoStacker
|
9f637891b9379b166e41597bcd44a8011561beea
|
[
"MIT"
] | null | null | null |
from modules import algo, imageRW, gps
import sys, os
from typing import List, Optional, Union
class ArgumentError(Exception):
pass
def parseArgs(args: List[str], config: dict[str, Union[str, int, None]] = {'source':None, 'target': None, 'group-size': 1, 'mode': 'mean'}) -> Optional[dict]:
'''
parse the command line arguments:
-s: specify the source directory of the images. this directory may contain images and a gps track file.
-t: specify the name of output directory or file.
-group-size (-gs): specify the number of images per stack. if not specified, all photos are merged into one stack.
-mode (-m): specify the mode of stacking. has to be one of 'mean', 'max'.
-track: specify a gps track that is not in the source directory.
'''
if args == []:
for i in config.keys():
if config.get(i) == None:
raise ArgumentError("Please check the command line arguments.")
return config
elif args[0] == '-h':
# print help messages
pass
elif args[0] == '-s':
if os.path.isdir(args[1]):
config['source'] = args[1]
return parseArgs(args[2:], config)
else:
raise ArgumentError("The source directory does not exist.")
elif args[0] == '-t':
if os.path.isdir(args[1]) or args[1].split('.')[-1].lower() in imageRW.SUFFIX:
config['target'] = args[1]
return parseArgs(args[2:], config)
else:
raise ArgumentError("The target has to be an existing directory or a file name with suffix jpg or tiff.")
elif args[0] in ('-group-size', '-gs'):
try:
config['group-size'] = int(args[1])
return parseArgs(args[2:], config)
except Exception:
print("Invalid input for group size. Please check your input.")
sys.exit()
elif args[0] in ('-mode', 'm'):
if args[1] in algo.ALGORITHMS.keys():
config['mode'] = args[1]
return parseArgs(args[2:], config)
else:
raise ArgumentError("The stacking mode is not supported.")
elif args[0] == '-track':
if args[1].split('.')[-1] in gps.GPS_SUFFIX:
config['gpsFile'] = args[1]
return parseArgs(args[2:], config)
else:
raise ArgumentError("Cannot recognize the input {}.".format(args[0]))
def main():
config: Optional[dict] = parseArgs(sys.argv[1:])
if config.get('gpsFile') == None:
config['gpsFile'] = gps.findGPSTrack(config['source'])
if imageRW.countImage(config['source']) % int(config['group_size']):
print("Warning: The group size is not a factor of the total number of images. The quality of the last image might be affected.")
sourceGen = imageRW.read(config['source'])
targetGen = algo.ALGORITHMS[config['mode']](sourceGen, config['group_size'])
if config.get('gpsFile') != None:
gpsTrack: dict = gps.getTrack(str(config.get('gpsFile')))
targetGen = gps.setGPS(gpsTrack, targetGen)
for image in targetGen:
image.setPath(os.path.join(config['target'], )) # need to work out the new image names.
image.write()
if __name__ == '__main__':
pass
| 41.602564
| 158
| 0.605547
|
fa904ba94bed2b6689060566145f1cc3eba67c25
| 7,202
|
py
|
Python
|
udp-py/udp/ba/helper.py
|
moky/WormHole
|
6b2b79274274f6764e0d519d384eb65489f4ca56
|
[
"MIT"
] | 5
|
2020-05-24T03:35:00.000Z
|
2021-06-05T00:27:54.000Z
|
udp-py/udp/ba/helper.py
|
moky/WormHole
|
6b2b79274274f6764e0d519d384eb65489f4ca56
|
[
"MIT"
] | null | null | null |
udp-py/udp/ba/helper.py
|
moky/WormHole
|
6b2b79274274f6764e0d519d384eb65489f4ca56
|
[
"MIT"
] | 2
|
2020-09-11T05:29:11.000Z
|
2022-03-13T15:45:22.000Z
|
# -*- coding: utf-8 -*-
#
# BA: Byte Array
#
# Written in 2021 by Moky <albert.moky@gmail.com>
#
# ==============================================================================
# MIT License
#
# Copyright (c) 2021 Albert Moky
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
from abc import ABC, abstractmethod
from typing import Optional, Union
from .utils import array_concat, array_find
from .utils import array_set, array_update, array_insert, array_remove
from .utils import int_from_buffer, int_to_buffer
from .array import set_data_helper, set_mutable_helper, set_integer_helper
from .array import ByteArray, MutableByteArray, Endian
from .data import Data
class ByteArrayHelper(ABC):
@abstractmethod
def adjust(self, index: int, size: int) -> int:
""" Adjust the index with range [0, size) """
raise NotImplemented
@abstractmethod
def adjust_e(self, index: int, size: int) -> int:
""" Adjust the index with range [0, size), throws when index too small """
raise NotImplemented
@abstractmethod
def concat(self, left: ByteArray, right: ByteArray) -> ByteArray:
""" left + right """
raise NotImplemented
@abstractmethod
def find(self, sub: ByteArray, data: ByteArray, start: int, end: int) -> int:
""" Search sub data """
raise NotImplemented
class MutableByteArrayHelper(ABC):
@abstractmethod
def set(self, value: int, index: int, data: MutableByteArray) -> int:
""" Set value to data at index, return new size """
raise NotImplemented
@abstractmethod
def update(self, src: ByteArray, index: int, data: MutableByteArray) -> int:
""" Update src to data at index, return new size """
raise NotImplemented
@abstractmethod
def insert(self, src: ByteArray, index: int, data: MutableByteArray) -> int:
""" Insert src to data at index, return new size """
raise NotImplemented
@abstractmethod
def remove(self, index: int, data: MutableByteArray) -> (Optional[int], int, int):
""" Remove element at index and return its value, new offset & size """
raise NotImplemented
class IntegerHelper(ABC):
@abstractmethod
def get_value(self, buffer: Union[bytes, bytearray], offset: int, size: int, endian: Endian) -> int:
""" Get integer value from data with range [offset, offset + size) """
raise NotImplemented
@abstractmethod
def set_value(self, value: int, buffer: Union[bytearray], offset: int, size: int, endian: Endian):
""" Set integer value into data with range [offset, offset + size) """
raise NotImplemented
#
# Implementations
#
class DefaultByteArrayHelper(ByteArrayHelper):
""" Default ByteArrayHelper """
# Override
def adjust(self, index: int, size: int) -> int:
if index < 0:
index += size # count from right hand
if index < 0:
return 0 # too small
elif index > size:
return size # too big
return index
# Override
def adjust_e(self, index: int, size: int) -> int:
if index < 0:
index += size # count from right hand
if index < 0:
# too small
raise IndexError('error index: %d, size: %d' % (index - size, size))
return index
# Override
def concat(self, left: ByteArray, right: ByteArray) -> ByteArray:
buffer, offset, size = array_concat(left_buffer=left.buffer, left_offset=left.offset, left_size=left.size,
right_buffer=right.buffer, right_offset=right.offset, right_size=right.size)
return Data(buffer=buffer, offset=offset, size=size)
# Override
def find(self, sub: ByteArray, data: ByteArray, start: int, end: int) -> int:
if 0 < start or end < data.size:
# slice
data = Data(buffer=data.buffer, offset=(data.offset+start), size=(end-start))
# searching within the range [start, end)
pos = array_find(sub_buffer=sub.buffer, sub_offset=sub.offset, sub_size=sub.size,
buffer=data.buffer, offset=data.offset, size=data.size)
if pos == -1:
return -1
else:
return pos + start
class DefaultMutableByteArrayHelper(MutableByteArrayHelper):
""" Default MutableByteArrayHelper """
# Override
def set(self, value: int, index: int, data: MutableByteArray) -> int:
return array_set(index=index, value=value,
buffer=data.buffer, offset=data.offset, size=data.size)
# Override
def update(self, src: ByteArray, index: int, data: MutableByteArray) -> int:
return array_update(index=index, src=src.buffer, src_offset=src.offset, src_size=src.size,
buffer=data.buffer, offset=data.offset, size=data.size)
# Override
def insert(self, src: ByteArray, index: int, data: MutableByteArray) -> int:
return array_insert(index=index, src=src.buffer, src_offset=src.offset, src_size=src.size,
buffer=data.buffer, offset=data.offset, size=data.size)
# Override
def remove(self, index: int, data: MutableByteArray) -> (Optional[int], int, int):
return array_remove(index=index, buffer=data.buffer, offset=data.offset, size=data.size)
class DefaultIntegerHelper(IntegerHelper):
""" Default IntegerHelper """
# Override
def get_value(self, buffer: Union[bytes, bytearray], offset: int, size: int, endian: Endian) -> int:
return int_from_buffer(buffer=buffer, offset=offset, size=size, endian=endian)
# Override
def set_value(self, value: int, buffer: Union[bytearray], offset: int, size: int, endian: Endian):
int_to_buffer(value=value, buffer=buffer, offset=offset, size=size, endian=endian)
# set default helpers
set_data_helper(helper=DefaultByteArrayHelper())
set_mutable_helper(helper=DefaultMutableByteArrayHelper())
set_integer_helper(helper=DefaultIntegerHelper())
| 38.513369
| 120
| 0.651347
|
712e08263f8a785926b30d75778f9da308e985f9
| 32,150
|
py
|
Python
|
desktop/core/src/desktop/middleware.py
|
ajay25/hue
|
5733a1605c1a6055a052012d6ee1c24a48658300
|
[
"Apache-2.0"
] | null | null | null |
desktop/core/src/desktop/middleware.py
|
ajay25/hue
|
5733a1605c1a6055a052012d6ee1c24a48658300
|
[
"Apache-2.0"
] | null | null | null |
desktop/core/src/desktop/middleware.py
|
ajay25/hue
|
5733a1605c1a6055a052012d6ee1c24a48658300
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from builtins import object
import inspect
import json
import logging
import mimetypes
import os.path
import re
import socket
import tempfile
import time
import traceback
import kerberos
import django.db
import django.views.static
import django_prometheus
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import REDIRECT_FIELD_NAME, BACKEND_SESSION_KEY, authenticate, load_backend, login
from django.contrib.auth.middleware import RemoteUserMiddleware
from django.core import exceptions, urlresolvers
from django.http import HttpResponseNotAllowed, HttpResponseForbidden
from django.urls import resolve
from django.http import HttpResponseRedirect, HttpResponse
from django.utils.translation import ugettext as _
from django.utils.http import urlquote, is_safe_url
from hadoop import cluster
from useradmin.models import User
import desktop.views
from desktop import appmanager, metrics
from desktop.auth.backend import is_admin
from desktop.conf import AUTH, HTTP_ALLOWED_METHODS, ENABLE_PROMETHEUS, KNOX, DJANGO_DEBUG_MODE, AUDIT_EVENT_LOG_DIR, \
SERVER_USER, REDIRECT_WHITELIST, SECURE_CONTENT_SECURITY_POLICY, ENABLE_CONNECTORS
from desktop.context_processors import get_app_name
from desktop.lib import apputil, i18n, fsmanager
from desktop.lib.django_util import JsonResponse, render, render_json
from desktop.lib.exceptions import StructuredException
from desktop.lib.exceptions_renderable import PopupException
from desktop.log import get_audit_logger
from desktop.log.access import access_log, log_page_hit, access_warn
LOG = logging.getLogger(__name__)
MIDDLEWARE_HEADER = "X-Hue-Middleware-Response"
# Views inside Django that don't require login
# (see LoginAndPermissionMiddleware)
DJANGO_VIEW_AUTH_WHITELIST = [
django.views.static.serve,
desktop.views.is_alive,
]
if ENABLE_PROMETHEUS.get():
DJANGO_VIEW_AUTH_WHITELIST.append(django_prometheus.exports.ExportToDjangoView)
class AjaxMiddleware(object):
"""
Middleware that augments request to set request.ajax
for either is_ajax() (looks at HTTP headers) or ?format=json
GET parameters.
"""
def process_request(self, request):
request.ajax = request.is_ajax() or request.GET.get("format", "") == "json"
return None
class ExceptionMiddleware(object):
"""
If exceptions know how to render themselves, use that.
"""
def process_exception(self, request, exception):
tb = traceback.format_exc()
logging.info("Processing exception: %s: %s" % (
i18n.smart_unicode(exception), i18n.smart_unicode(tb))
)
if isinstance(exception, PopupException):
return exception.response(request)
if isinstance(exception, StructuredException):
if request.ajax:
response = render_json(exception.response_data)
response[MIDDLEWARE_HEADER] = 'EXCEPTION'
response.status_code = getattr(exception, 'error_code', 500)
return response
else:
response = render("error.mako", request, {
'error': exception.response_data.get("message"),
'is_embeddable': request.GET.get('is_embeddable', False),
})
response.status_code = getattr(exception, 'error_code', 500)
return response
return None
class ClusterMiddleware(object):
"""
Manages setting request.fs and request.jt
"""
def process_view(self, request, view_func, view_args, view_kwargs):
"""
Sets request.fs and request.jt on every request to point to the configured filesystem.
"""
request.fs_ref = request.GET.get('fs', view_kwargs.get('fs', 'default'))
if "fs" in view_kwargs:
del view_kwargs["fs"]
request.fs = fsmanager.get_filesystem(request.fs_ref)
if request.user.is_authenticated():
if request.fs is not None:
request.fs.setuser(request.user.username)
# Deprecated
request.jt = None
class NotificationMiddleware(object):
"""
Manages setting request.info and request.error
"""
def process_view(self, request, view_func, view_args, view_kwargs):
def message(title, detail=None):
if detail is None:
detail = ''
else:
detail = '<br/>%s' % detail
return '%s %s' % (title, detail)
def info(title, detail=None):
messages.info(request, message(title, detail))
def error(title, detail=None):
messages.error(request, message(title, detail))
def warn(title, detail=None):
messages.warning(request, message(title, detail))
request.info = info
request.error = error
request.warn = warn
class AppSpecificMiddleware(object):
@classmethod
def augment_request_with_app(cls, request, view_func):
"""Inject the app name into the request for use in later-stage middleware"""
if not hasattr(request, "_desktop_app"):
module = inspect.getmodule(view_func)
request._desktop_app = apputil.get_app_for_module(module)
if not request._desktop_app and not module.__name__.startswith('django.'):
logging.debug("no app for view func: %s in %s" % (view_func, module))
def __init__(self):
self.middlewares_by_app = {}
for app in appmanager.DESKTOP_APPS:
self.middlewares_by_app[app.name] = self._load_app_middleware(app)
def _get_middlewares(self, app, type):
return self.middlewares_by_app.get(app, {}).get(type, [])
def process_view(self, request, view_func, view_args, view_kwargs):
self.augment_request_with_app(request, view_func)
if not request._desktop_app:
return None
# Run the middlewares
ret = None
for middleware in self._get_middlewares(request._desktop_app, 'view'):
ret = middleware(request, view_func, view_args, view_kwargs)
if ret: return ret # Short circuit
return ret
def process_response(self, request, response):
# We have the app that we stuffed in there
if not hasattr(request, '_desktop_app'):
logging.debug("No desktop_app known for request.")
return response
for middleware in reversed(self._get_middlewares(request._desktop_app, 'response')):
response = middleware(request, response)
return response
def process_exception(self, request, exception):
# We have the app that we stuffed in there
if not hasattr(request, '_desktop_app'):
logging.debug("No desktop_app known for exception.")
return None
# Run the middlewares
ret = None
for middleware in self._get_middlewares(request._desktop_app, 'exception'):
ret = middleware(request, exception)
if ret: return ret # short circuit
return ret
def _load_app_middleware(cls, app):
app_settings = app.settings
if not app_settings:
return
mw_classes = app_settings.__dict__.get('MIDDLEWARE_CLASSES', [])
result = {'view': [], 'response': [], 'exception': []}
for middleware_path in mw_classes:
# This code brutally lifted from django.core.handlers
try:
dot = middleware_path.rindex('.')
except ValueError:
raise exceptions.ImproperlyConfigured(_('%(module)s isn\'t a middleware module.') % {'module': middleware_path})
mw_module, mw_classname = middleware_path[:dot], middleware_path[dot+1:]
try:
mod = __import__(mw_module, {}, {}, [''])
except ImportError as e:
raise exceptions.ImproperlyConfigured(_('Error importing middleware %(module)s: "%(error)s".') % {'module': mw_module, 'error': e})
try:
mw_class = getattr(mod, mw_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured(_('Middleware module "%(module)s" does not define a "%(class)s" class.') % {'module': mw_module, 'class':mw_classname})
try:
mw_instance = mw_class()
except exceptions.MiddlewareNotUsed:
continue
# End brutal code lift
# We need to make sure we don't have a process_request function because we don't know what
# application will handle the request at the point process_request is called
if hasattr(mw_instance, 'process_request'):
raise exceptions.ImproperlyConfigured(_('AppSpecificMiddleware module "%(module)s" has a process_request function' + \
' which is impossible.') % {'module': middleware_path})
if hasattr(mw_instance, 'process_view'):
result['view'].append(mw_instance.process_view)
if hasattr(mw_instance, 'process_response'):
result['response'].insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
result['exception'].insert(0, mw_instance.process_exception)
return result
class LoginAndPermissionMiddleware(object):
"""
Middleware that forces all views (except those that opt out) through authentication.
"""
def process_request(self, request):
# When local user login, oidc middleware refresh token if oidc_id_token_expiration doesn't exists!
if request.session.get('_auth_user_backend', '') == 'desktop.auth.backend.AllowFirstUserDjangoBackend' \
and 'desktop.auth.backend.OIDCBackend' in AUTH.BACKEND.get():
request.session['oidc_id_token_expiration'] = time.time() + 300
def process_view(self, request, view_func, view_args, view_kwargs):
"""
We also perform access logging in ``process_view()`` since we have the view function,
which tells us the log level. The downside is that we don't have the status code,
which isn't useful for status logging anyways.
"""
request.ts = time.time()
request.view_func = view_func
access_log_level = getattr(view_func, 'access_log_level', None)
# Skip loop for oidc
if request.path in ['/oidc/authenticate/', '/oidc/callback/', '/oidc/logout/', '/hue/oidc_failed/']:
return None
# Skip views not requiring login
# If the view has "opted out" of login required, skip
if hasattr(view_func, "login_notrequired"):
log_page_hit(request, view_func, level=access_log_level or logging.DEBUG)
return None
# There are certain django views which are also opt-out, but
# it would be evil to go add attributes to them
if view_func in DJANGO_VIEW_AUTH_WHITELIST:
log_page_hit(request, view_func, level=access_log_level or logging.DEBUG)
return None
# If user is logged in, check that he has permissions to access the app
if request.user.is_active and request.user.is_authenticated():
AppSpecificMiddleware.augment_request_with_app(request, view_func)
# Until Django 1.3 which resolves returning the URL name, just do a match of the name of the view
try:
access_view = 'access_view:%s:%s' % (request._desktop_app, resolve(request.path)[0].__name__)
except Exception as e:
access_log(request, 'error checking view perm: %s' % e, level=access_log_level)
access_view = ''
app_accessed = request._desktop_app
app_libs_whitelist = ("desktop", "home", "home2", "about", "hue", "editor", "notebook", "indexer", "404", "500", "403")
if not ENABLE_CONNECTORS.get():
# Accessing an app can access an underlying other app.
# e.g. impala or spark uses code from beeswax and so accessing impala shows up as beeswax here.
# Here we trust the URL to be the real app we need to check the perms.
ui_app_accessed = get_app_name(request)
if app_accessed != ui_app_accessed and ui_app_accessed not in ('logs', 'accounts', 'login'):
app_accessed = ui_app_accessed
if app_accessed and \
app_accessed not in app_libs_whitelist and \
not (
is_admin(request.user) or
request.user.has_hue_permission(action="access", app=app_accessed) or
request.user.has_hue_permission(action=access_view, app=app_accessed)
) and \
not (app_accessed == '__debug__' and DJANGO_DEBUG_MODE.get()):
access_log(request, 'permission denied', level=access_log_level)
return PopupException(
_("You do not have permission to access the %(app_name)s application.") % {'app_name': app_accessed.capitalize()},
error_code=401
).response(request)
else:
if not hasattr(request, 'view_func'):
log_page_hit(request, view_func, level=access_log_level)
return None
logging.info("Redirecting to login page: %s", request.get_full_path())
access_log(request, 'login redirection', level=access_log_level)
no_idle_backends = (
"libsaml.backend.SAML2Backend",
"desktop.auth.backend.SpnegoDjangoBackend",
"desktop.auth.backend.KnoxSpnegoDjangoBackend"
)
if request.ajax and all(no_idle_backend not in AUTH.BACKEND.get() for no_idle_backend in no_idle_backends):
# Send back a magic header which causes Hue.Request to interpose itself
# in the ajax request and make the user login before resubmitting the
# request.
response = HttpResponse("/* login required */", content_type="text/javascript")
response[MIDDLEWARE_HEADER] = 'LOGIN_REQUIRED'
return response
else:
if request.GET.get('is_embeddable'):
return JsonResponse({'url': "%s?%s=%s" % (settings.LOGIN_URL, REDIRECT_FIELD_NAME, urlquote('/hue' + request.get_full_path().replace('is_embeddable=true', '').replace('&&','&')))}) # Remove embeddable so redirect from & to login works. Login page is not embeddable
else:
return HttpResponseRedirect("%s?%s=%s" % (settings.LOGIN_URL, REDIRECT_FIELD_NAME, urlquote(request.get_full_path())))
def process_response(self, request, response):
if hasattr(request, 'ts') and hasattr(request, 'view_func'):
log_page_hit(request, request.view_func, level=logging.INFO, start_time=request.ts, response=response)
return response
class JsonMessage(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def __str__(self):
return json.dumps(self.kwargs)
class AuditLoggingMiddleware(object):
def __init__(self):
self.impersonator = SERVER_USER.get()
if not AUDIT_EVENT_LOG_DIR.get():
LOG.info('Unloading AuditLoggingMiddleware')
raise exceptions.MiddlewareNotUsed
def process_response(self, request, response):
response['audited'] = False
try:
if hasattr(request, 'audit') and request.audit is not None:
self._log_message(request, response)
response['audited'] = True
except Exception as e:
LOG.error('Could not audit the request: %s' % e)
return response
def _log_message(self, request, response=None):
audit_logger = get_audit_logger()
audit_logger.debug(JsonMessage(**{
'username': self._get_username(request),
'impersonator': self.impersonator,
'ipAddress': self._get_client_ip(request),
'operation': request.audit['operation'],
'operationText': request.audit.get('operationText', ''),
'eventTime': self._milliseconds_since_epoch(),
'allowed': self._get_allowed(request, response),
'service': get_app_name(request),
'url': request.path
}))
def _get_client_ip(self, request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
x_forwarded_for = x_forwarded_for.split(',')[0]
return request.META.get('HTTP_CLIENT_IP') or x_forwarded_for or request.META.get('REMOTE_ADDR')
def _get_username(self, request):
username = 'anonymous'
if request.audit.get('username', None):
username = request.audit.get('username')
elif hasattr(request, 'user') and not request.user.is_anonymous():
username = request.user.get_username()
return username
def _milliseconds_since_epoch(self):
return int(time.time() * 1000)
def _get_allowed(self, request, response=None):
allowed = response.status_code != 401
if 'allowed' in request.audit:
return request.audit['allowed']
return allowed
try:
import tidylib
_has_tidylib = True
except Exception as ex:
# The exception type is not ImportError. It's actually an OSError.
logging.warn("Failed to import tidylib (for debugging). Is libtidy installed?")
_has_tidylib = False
class HtmlValidationMiddleware(object):
"""
If configured, validate output html for every response.
"""
def __init__(self):
self._logger = logging.getLogger('HtmlValidationMiddleware')
if not _has_tidylib:
logging.error("HtmlValidationMiddleware not activatived: Failed to import tidylib.")
return
# Things that we don't care about
self._to_ignore = (
re.compile('- Warning: <.*> proprietary attribute "data-'),
re.compile('- Warning: trimming empty'),
re.compile('- Info:'),
)
# Find the directory to write tidy html output
try:
self._outdir = os.path.join(tempfile.gettempdir(), 'hue_html_validation')
if not os.path.isdir(self._outdir):
os.mkdir(self._outdir, 0o755)
except Exception as ex:
self._logger.exception('Failed to get temp directory: %s', (ex,))
self._outdir = tempfile.mkdtemp(prefix='hue_html_validation-')
# Options to pass to libtidy. See
# http://tidy.sourceforge.net/docs/quickref.html
self._options = {
'show-warnings': 1,
'output-html': 0,
'output-xhtml': 1,
'char-encoding': 'utf8',
'output-encoding': 'utf8',
'indent': 1,
'wrap': 0,
}
def process_response(self, request, response):
if not _has_tidylib or not self._is_html(request, response):
return response
html, errors = tidylib.tidy_document(response.content,
self._options,
keep_doc=True)
if not errors:
return response
# Filter out what we care about
err_list = errors.rstrip().split('\n')
err_list = self._filter_warnings(err_list)
if not err_list:
return response
try:
fn = urlresolvers.resolve(request.path)[0]
fn_name = '%s.%s' % (fn.__module__, fn.__name__)
except:
LOG.exception('failed to resolve url')
fn_name = '<unresolved_url>'
# Write the two versions of html out for offline debugging
filename = os.path.join(self._outdir, fn_name)
result = "HTML tidy result: %s [%s]:" \
"\n\t%s" \
"\nPlease see %s.orig %s.tidy\n-------" % \
(request.path, fn_name, '\n\t'.join(err_list), filename, filename)
file(filename + '.orig', 'w').write(i18n.smart_str(response.content))
file(filename + '.tidy', 'w').write(i18n.smart_str(html))
file(filename + '.info', 'w').write(i18n.smart_str(result))
self._logger.error(result)
return response
def _filter_warnings(self, err_list):
"""A hacky way to filter out things that we don't care about."""
res = [ ]
for err in err_list:
for ignore in self._to_ignore:
if ignore.search(err):
break
else:
res.append(err)
return res
def _is_html(self, request, response):
return not request.is_ajax() and \
'html' in response['Content-Type'] and \
200 <= response.status_code < 300
class ProxyMiddleware(object):
def __init__(self):
if not 'desktop.auth.backend.AllowAllBackend' in AUTH.BACKEND.get():
LOG.info('Unloading ProxyMiddleware')
raise exceptions.MiddlewareNotUsed
def process_response(self, request, response):
return response
def process_request(self, request):
view_func = resolve(request.path)[0]
if view_func in DJANGO_VIEW_AUTH_WHITELIST:
return
# AuthenticationMiddleware is required so that request.user exists.
if not hasattr(request, 'user'):
raise exceptions.ImproperlyConfigured(
"The Django remote user auth middleware requires the"
" authentication middleware to be installed. Edit your"
" MIDDLEWARE_CLASSES setting to insert"
" 'django.contrib.auth.middleware.AuthenticationMiddleware'"
" before the SpnegoUserMiddleware class.")
if request.GET.get('user.name'):
try:
username = request.GET.get('user.name')
user = authenticate(username=username, password='')
if user:
request.user = user
login(request, user)
msg = 'Successful login for user: %s' % request.user.username
else:
msg = 'Failed login for user: %s' % request.user.username
request.audit = {
'operation': 'USER_LOGIN',
'username': request.user.username,
'operationText': msg
}
return
except:
LOG.exception('Unexpected error when authenticating')
return
def clean_username(self, username, request):
"""
Allows the backend to clean the username, if the backend defines a
clean_username method.
"""
backend_str = request.session[BACKEND_SESSION_KEY]
backend = load_backend(backend_str)
try:
username = backend.clean_username(username)
except AttributeError:
pass
return username
class SpnegoMiddleware(object):
"""
Based on the WSGI SPNEGO middlware class posted here:
http://code.activestate.com/recipes/576992/
"""
def __init__(self):
if not set(AUTH.BACKEND.get()).intersection(
set(['desktop.auth.backend.SpnegoDjangoBackend', 'desktop.auth.backend.KnoxSpnegoDjangoBackend'])
):
LOG.info('Unloading SpnegoMiddleware')
raise exceptions.MiddlewareNotUsed
def process_response(self, request, response):
if 'GSS-String' in request.META:
response['WWW-Authenticate'] = request.META['GSS-String']
elif 'Return-401' in request.META:
response = HttpResponse("401 Unauthorized", content_type="text/plain",
status=401)
response['WWW-Authenticate'] = 'Negotiate'
response.status = 401
return response
def process_request(self, request):
"""
The process_request() method needs to communicate some state to the
process_response() method. The two options for this are to return an
HttpResponse object or to modify the META headers in the request object. In
order to ensure that all of the middleware is properly invoked, this code
currently uses the later approach. The following headers are currently used:
GSS-String:
This means that GSS authentication was successful and that we need to pass
this value for the WWW-Authenticate header in the response.
Return-401:
This means that the SPNEGO backend is in use, but we didn't get an
AUTHORIZATION header from the client. The way that the protocol works
(http://tools.ietf.org/html/rfc4559) is by having the first response to an
un-authenticated request be a 401 with the WWW-Authenticate header set to
Negotiate. This will cause the browser to re-try the request with the
AUTHORIZATION header set.
"""
view_func = resolve(request.path)[0]
if view_func in DJANGO_VIEW_AUTH_WHITELIST:
return
# AuthenticationMiddleware is required so that request.user exists.
if not hasattr(request, 'user'):
raise exceptions.ImproperlyConfigured(
"The Django remote user auth middleware requires the"
" authentication middleware to be installed. Edit your"
" MIDDLEWARE_CLASSES setting to insert"
" 'django.contrib.auth.middleware.AuthenticationMiddleware'"
" before the SpnegoUserMiddleware class.")
if 'HTTP_AUTHORIZATION' in request.META:
type, authstr = request.META['HTTP_AUTHORIZATION'].split(' ', 1)
if type == 'Negotiate':
try:
result, context = kerberos.authGSSServerInit('HTTP')
if result != 1:
return
gssstring=''
r=kerberos.authGSSServerStep(context,authstr)
if r == 1:
gssstring=kerberos.authGSSServerResponse(context)
request.META['GSS-String'] = 'Negotiate %s' % gssstring
else:
kerberos.authGSSServerClean(context)
return
username = kerberos.authGSSServerUserName(context)
kerberos.authGSSServerClean(context)
# In Trusted knox proxy, Hue must expect following:
# Trusted knox user: KNOX_PRINCIPAL
# Trusted knox proxy host: KNOX_PROXYHOSTS
if 'desktop.auth.backend.KnoxSpnegoDjangoBackend' in AUTH.BACKEND.get():
knox_verification = False
principals = self.clean_principal(KNOX.KNOX_PRINCIPAL.get())
principal = self.clean_principal(username)
if principal.intersection(principals):
# This may contain chain of reverse proxies, e.g. knox proxy, hue load balancer
# Compare hostname on both HTTP_X_FORWARDED_HOST & KNOX_PROXYHOSTS. Both of these can be configured to use either hostname or IPs and we have to normalize to one or the other
req_hosts = self.clean_host(request.META['HTTP_X_FORWARDED_HOST'])
knox_proxy = self.clean_host(KNOX.KNOX_PROXYHOSTS.get())
if req_hosts.intersection(knox_proxy):
knox_verification = True
else:
access_warn(request, 'Failed to verify provided host %s with %s ' % (req_hosts, knox_proxy))
else:
access_warn(request, 'Failed to verify provided username %s with %s ' % (principal, principals))
# If knox authentication failed then generate 401 (Unauthorized error)
if not knox_verification:
request.META['Return-401'] = ''
return
if request.user.is_authenticated():
if request.user.username == self.clean_username(username, request):
return
user = authenticate(username=username, request=request)
if user:
request.user = user
login(request, user)
msg = 'Successful login for user: %s' % request.user.username
else:
msg = 'Failed login for user: %s' % request.user.username
request.audit = {
'operation': 'USER_LOGIN',
'username': request.user.username,
'operationText': msg
}
access_warn(request, msg)
return
except:
LOG.exception('Unexpected error when authenticating against KDC')
return
else:
request.META['Return-401'] = ''
return
else:
if not request.user.is_authenticated():
request.META['Return-401'] = ''
return
def clean_host(self, pattern):
hosts = []
if pattern:
pattern_list = pattern if isinstance(pattern, list) else pattern.split(',')
for hostport in pattern_list:
host = hostport.split(':')[0].strip()
try:
hosts.append(socket.gethostbyaddr(host)[0])
except Exception:
LOG.exception('Could not resolve host addr %s' % host)
hosts.append(host)
return set(hosts)
def clean_principal(self, pattern):
principals = []
if pattern:
pattern_list = pattern if isinstance(pattern, list) else pattern.split(',')
for principal_host in pattern_list:
principal = principal_host.split('/')[0].strip()
principals.append(principal)
return set(principals)
def clean_username(self, username, request):
"""
Allows the backend to clean the username, if the backend defines a
clean_username method.
"""
backend_str = request.session[BACKEND_SESSION_KEY]
backend = load_backend(backend_str)
try:
username = backend.clean_username(username, request)
except AttributeError:
pass
return username
class HueRemoteUserMiddleware(RemoteUserMiddleware):
"""
Middleware to delegate authentication to a proxy server. The proxy server
will set an HTTP header (defaults to Remote-User) with the name of the
authenticated user. This class extends the RemoteUserMiddleware class
built into Django with the ability to configure the HTTP header and to
unload the middleware if the RemoteUserDjangoBackend is not currently
in use.
"""
def __init__(self):
if not 'desktop.auth.backend.RemoteUserDjangoBackend' in AUTH.BACKEND.get():
LOG.info('Unloading HueRemoteUserMiddleware')
raise exceptions.MiddlewareNotUsed
self.header = AUTH.REMOTE_USER_HEADER.get()
class EnsureSafeMethodMiddleware(object):
"""
Middleware to white list configured HTTP request methods.
"""
def process_request(self, request):
if request.method not in HTTP_ALLOWED_METHODS.get():
return HttpResponseNotAllowed(HTTP_ALLOWED_METHODS.get())
class EnsureSafeRedirectURLMiddleware(object):
"""
Middleware to white list configured redirect URLs.
"""
def process_response(self, request, response):
if response.status_code in (301, 302, 303, 305, 307, 308) and response.get('Location') and not hasattr(response, 'redirect_override'):
redirection_patterns = REDIRECT_WHITELIST.get()
location = response['Location']
if any(regexp.match(location) for regexp in redirection_patterns):
return response
if is_safe_url(location, request.get_host()):
return response
if request.path in ['/oidc/authenticate/', '/oidc/callback/', '/oidc/logout/', '/hue/oidc_failed/']:
return response
response = render("error.mako", request, {
'error': _('Redirect to %s is not allowed.') % response['Location'],
'is_embeddable': request.GET.get('is_embeddable', False),
})
response.status_code = 403
return response
else:
return response
class MetricsMiddleware(object):
"""
Middleware to track the number of active requests.
"""
def process_request(self, request):
self._response_timer = metrics.response_time.time()
metrics.active_requests.inc()
def process_exception(self, request, exception):
self._response_timer.stop()
metrics.request_exceptions.inc()
def process_response(self, request, response):
self._response_timer.stop()
metrics.active_requests.dec()
return response
class ContentSecurityPolicyMiddleware(object):
def __init__(self, get_response=None):
self.secure_content_security_policy = SECURE_CONTENT_SECURITY_POLICY.get()
if not self.secure_content_security_policy:
LOG.info('Unloading ContentSecurityPolicyMiddleware')
raise exceptions.MiddlewareNotUsed
def process_response(self, request, response):
if self.secure_content_security_policy and not 'Content-Security-Policy' in response:
response["Content-Security-Policy"] = self.secure_content_security_policy
return response
class MimeTypeJSFileFixStreamingMiddleware(object):
"""
Middleware to detect and fix ".js" mimetype. SLES 11SP4 as example OS which detect js file
as "text/x-js" and if strict X-Content-Type-Options=nosniff is set then browser fails to
execute javascript file.
"""
def __init__(self):
jsmimetypes = ['application/javascript', 'application/ecmascript']
if mimetypes.guess_type("dummy.js")[0] in jsmimetypes:
LOG.info('Unloading MimeTypeJSFileFixStreamingMiddleware')
raise exceptions.MiddlewareNotUsed
def process_response(self, request, response):
if request.path_info.endswith('.js'):
response['Content-Type'] = "application/javascript"
return response
| 37.210648
| 272
| 0.69042
|
5f968955b150f7158a05d93e18ea9772a199547a
| 1,250
|
py
|
Python
|
src/pymap3d/tests/test_vincenty_vreckon.py
|
wrlssqi/pymap3d
|
bd91a5ff4e9066eb33fead3006ba9de191e2c5e5
|
[
"BSD-2-Clause"
] | 116
|
2020-02-23T02:04:18.000Z
|
2022-03-29T00:19:37.000Z
|
src/pymap3d/tests/test_vincenty_vreckon.py
|
wrlssqi/pymap3d
|
bd91a5ff4e9066eb33fead3006ba9de191e2c5e5
|
[
"BSD-2-Clause"
] | 30
|
2017-04-25T12:43:50.000Z
|
2020-01-27T07:56:18.000Z
|
src/pymap3d/tests/test_vincenty_vreckon.py
|
wrlssqi/pymap3d
|
bd91a5ff4e9066eb33fead3006ba9de191e2c5e5
|
[
"BSD-2-Clause"
] | 53
|
2017-04-25T12:25:40.000Z
|
2020-01-17T12:32:22.000Z
|
import pytest
from pytest import approx
import pymap3d.vincenty as vincenty
ll0 = [10, 20]
lat2 = [10.02137267, 10.01917819]
lon2 = [20.0168471, 20.0193493]
az2 = [218.00292856, 225.00336316]
sr1 = [3e3, 1e3]
az1 = [38, 45]
lat3 = (10.02137267, 10.00639286)
lon3 = (20.0168471, 20.00644951)
az3 = (218.00292856, 225.0011203)
@pytest.mark.parametrize(
"lat,lon,srange,az,lato,lono",
[
(0, 0, 0, 0, 0, 0),
(0, 0, 1.001875e7, 90, 0, 90),
(0, 0, 1.001875e7, 270, 0, 270),
(0, 0, 1.001875e7, -90, 0, 270),
(0, 0, 2.00375e7, 90, 0, 180),
(0, 0, 2.00375e7, 270, 0, 180),
(0, 0, 2.00375e7, -90, 0, 180),
],
)
def test_unit(lat, lon, srange, az, lato, lono):
lat1, lon1 = vincenty.vreckon(lat, lon, srange, az)
assert lat1 == approx(lato)
assert isinstance(lat1, float)
assert lon1 == approx(lono, rel=0.001)
assert isinstance(lon1, float)
def test_az_vector():
pytest.importorskip("numpy")
a, b = vincenty.vreckon(*ll0, sr1[0], az1)
assert a == approx(lat2)
assert b == approx(lon2)
def test_both_vector():
pytest.importorskip("numpy")
a, b = vincenty.vreckon(10, 20, sr1, az1)
assert a == approx(lat3)
assert b == approx(lon3)
| 24.509804
| 55
| 0.6008
|
122dbb985bcd529a29aa213eeabbb97ca07d5257
| 913
|
py
|
Python
|
examples/django/testsite/urls.py
|
wangsha/graphene-file-upload
|
70efa3238ba5155ee5d2f4a2dbbb519884bdca59
|
[
"MIT"
] | 276
|
2018-04-06T10:08:12.000Z
|
2022-03-07T01:14:27.000Z
|
examples/django/testsite/urls.py
|
wangsha/graphene-file-upload
|
70efa3238ba5155ee5d2f4a2dbbb519884bdca59
|
[
"MIT"
] | 49
|
2018-04-05T23:48:57.000Z
|
2022-03-17T12:43:10.000Z
|
examples/django/testsite/urls.py
|
wangsha/graphene-file-upload
|
70efa3238ba5155ee5d2f4a2dbbb519884bdca59
|
[
"MIT"
] | 42
|
2018-05-29T06:51:59.000Z
|
2022-03-03T15:27:27.000Z
|
"""testsite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url
from graphene_file_upload.django import FileUploadGraphQLView
urlpatterns = [
path('admin/', admin.site.urls),
url(r'^graphql', FileUploadGraphQLView.as_view(graphiql=True)),
]
| 36.52
| 77
| 0.730559
|
0281d01c3c0465970e5631e5bab2cfb4c97cd81a
| 1,587
|
py
|
Python
|
fluent_pages/utils/db.py
|
masschallenge/django-fluent-pages
|
8beb083d89fba935ef3bfeda8cacf566f28b1334
|
[
"Apache-2.0"
] | null | null | null |
fluent_pages/utils/db.py
|
masschallenge/django-fluent-pages
|
8beb083d89fba935ef3bfeda8cacf566f28b1334
|
[
"Apache-2.0"
] | 1
|
2021-03-24T18:53:10.000Z
|
2021-03-24T18:53:10.000Z
|
fluent_pages/utils/db.py
|
masschallenge/django-fluent-pages
|
8beb083d89fba935ef3bfeda8cacf566f28b1334
|
[
"Apache-2.0"
] | null | null | null |
"""
Custom generic managers
"""
from django.db import models
from django.db.models.query import QuerySet
# Based on django-queryset-transform.
# This object however, operates on a per-object instance
# without breaking the result generators
class DecoratingQuerySet(QuerySet):
"""
An enhancement of the QuerySet which allows objects to be decorated
with extra properties before they are returned.
"""
def __init__(self, *args, **kwargs):
super(DecoratingQuerySet, self).__init__(*args, **kwargs)
self._decorate_funcs = []
def _clone(self, klass=None, setup=False, **kw):
c = super(DecoratingQuerySet, self)._clone(klass, setup, **kw)
c._decorate_funcs = self._decorate_funcs
return c
def decorate(self, fn):
"""
Register a function which will decorate a retrieved object before it's returned.
"""
if fn not in self._decorate_funcs:
self._decorate_funcs.append(fn)
return self
def iterator(self):
"""
Overwritten iterator which will apply the decorate functions before returning it.
"""
base_iterator = super(DecoratingQuerySet, self).iterator()
for obj in base_iterator:
# Apply the decorators
for fn in self._decorate_funcs:
fn(obj)
yield obj
class DecoratorManager(models.Manager):
"""
The manager class which ensures the enhanced DecoratorQuerySet object is used.
"""
def get_query_set(self):
return DecoratingQuerySet(self.model)
| 27.842105
| 89
| 0.657845
|
38d72429228c88544db0a5291176e0be42f473c8
| 7,703
|
py
|
Python
|
pyassim/util_functions.py
|
ZoneTsuyoshi/pyassim
|
1b40ce914a7b1e4ec6e240a6d67a19a22e431137
|
[
"MIT"
] | null | null | null |
pyassim/util_functions.py
|
ZoneTsuyoshi/pyassim
|
1b40ce914a7b1e4ec6e240a6d67a19a22e431137
|
[
"MIT"
] | null | null | null |
pyassim/util_functions.py
|
ZoneTsuyoshi/pyassim
|
1b40ce914a7b1e4ec6e240a6d67a19a22e431137
|
[
"MIT"
] | null | null | null |
# Copyright (c) The pyakalman developers.
# All rights reserved.
"""
Utility functions
"_determine_dimensionality", "_parse_observations", and "_last_dims" functions are originally provided in pykalman.
The other functions are original in this package.
"""
import numpy as np
try:
import cupy
except:
pass
def judge_xp_type(xp_type = "numpy"):
if xp_type in ["numpy", False]:
return np
elif xp_type in ["cupy", True]:
return cupy
def _determine_dimensionality(variables, default = None, xp_type = "numpy"):
"""Derive the dimensionality of the state space
Parameters
----------
variables : list of ({None, array}, conversion function, index)
variables, functions to convert them to arrays, and indices in those
arrays to derive dimensionality from.
default : {None, int}
default dimensionality to return if variables is empty
Returns
-------
dim : int
dimensionality of state space as derived from variables or default.
"""
xp = judge_xp_type(xp_type)
# gather possible values based on the variables
candidates = []
for (v, converter, idx) in variables:
if v is not None:
v = converter(v)
candidates.append(v.shape[idx])
# also use the manually specified default
if default is not None:
candidates.append(default)
# ensure consistency of all derived values
# If dimensionality of candidates doesn't have consistency,
# raise ValueError
if len(candidates) == 0:
return 1
else:
if not xp.all(xp.array(candidates) == candidates[0]):
print(candidates)
raise ValueError(
"The shape of all " +
"parameters is not consistent. " +
"Please re-check their values."
)
return candidates[0]
def _parse_observations(obs, xp_type="numpy"):
"""Safely convert observations to their expected format"""
xp = judge_xp_type(xp_type)
obs = xp.ma.atleast_2d(obs)
# 2軸目の方が大きい場合は,第1軸と第2軸を交換
if obs.shape[0] == 1 and obs.shape[1] > 1:
obs = obs.T
# 欠測値をマスク処理
obs = xp.ma.array(obs, mask = xp.isnan(obs))
return obs
def _last_dims(X, t, ndims = 2, xp_type="numpy"):
"""Extract the final dimensions of `X`
Extract the final `ndim` dimensions at index `t` if `X` has >= `ndim` + 1
dimensions, otherwise return `X`.
Parameters
----------
X : array with at least dimension `ndims`
t : int
index to use for the `ndims` + 1th dimension
ndims : int, optional
number of dimensions in the array desired
Returns
-------
Y : array with dimension `ndims`
the final `ndims` dimensions indexed by `t`
"""
xp = judge_xp_type(xp_type)
X = xp.asarray(X)
if len(X.shape) == ndims + 1:
return X[t]
elif len(X.shape) == ndims:
return X
else:
raise ValueError(("X only has %d dimensions when %d" +
" or more are required") % (len(X.shape), ndims))
def _log_sum_exp(a, axis=None, keepdims=False, xp_type="numpy"):
"""Calculate logsumexp like as scipy.special.logsumexp
"""
xp = judge_xp_type(xp_type)
a_max = a.max(axis=axis, keepdims=True)
if a_max.ndim > 0:
a_max[~xp.isfinite(a_max)] = 0
elif not xp.isfinite(a_max):
a_max = 0
adj_exp = xp.exp(a - a_max)
sum_exp = adj_exp.sum(axis=axis, keepdims=keepdims)
out = xp.log(sum_exp)
if not keepdims:
a_max = xp.squeeze(a_max, axis=axis)
out += a_max
return out
# calculate transition covariance
def _calc_transition_covariance(G, Q):
"""Calculate transition covariance
Args:
G [n_time - 1, n_dim_sys, n_dim_noise] or [n_dim_sys, n_dim_noise]
{numpy-array, float}
transition noise matrix
ノイズ変換行列[時間軸,状態変数軸,ノイズ変数軸] or [状態変数軸,ノイズ変数軸]
Q [n_time - 1, n_dim_noise, n_dim_noise] or [n_dim_sys, n_dim_noise]
{numpy-array, float}
system transition covariance for times
システムノイズの共分散行列[時間軸,ノイズ変数軸,ノイズ変数軸]
"""
if G.ndim == 2:
GT = G.T
elif G.ndim == 3:
GT = G.transpose(0,2,1)
else:
raise ValueError('The ndim of transition_noise_matrices'
+ ' should be 2 or 3,' + ' but your input is ' + str(G.ndim) + '.')
if Q.ndim == 2 or Q.ndim == 3:
return xp.matmul(G, xp.matmul(Q, GT))
else:
raise ValueError('The ndim of transition_covariance should be 2 or 3,'
+ ' but your input is ' + str(Q.ndim) + '.')
# log prob gauss
def _log_prob_gauss(mean=None, cov=None, pre=None, xp_type="numpy"):
xp = judge_xp_type(xp_type)
if mean is not None:
if cov is not None:
def func(x):
if x.ndim==1:
x = x.reshape(-1,1)
-0.5 * (len(mean)*math.log(2*math.pi) + xp.linalg.slogdet(cov)
+ (mean - x).T @ xp.linalg.pinv(cov) @ (mean - x))
elif pre is not None:
def func(x):
if x.ndim==1:
x = x.reshape(-1,1)
-0.5 * (len(mean)*math.log(2*math.pi) - xp.linalg.slogdet(pre)
+ (mean - x).T @ pre @ (mean - x))
else:
def func(x, cov):
if x.ndim==1:
x = x.reshape(-1,1)
-0.5 * (len(mean)*math.log(2*math.pi) + xp.linalg.slogdet(cov)
+ (mean - x).T @ xp.linalg.pinv(cov) @ (mean - x))
else:
if cov is not None:
def func(x, mean):
if x.ndim==1:
x = x.reshape(-1,1)
-0.5 * (len(mean)*math.log(2*math.pi) + xp.linalg.slogdet(cov)
+ (mean - x).T @ xp.linalg.pinv(cov) @ (mean - x))
elif pre is not None:
def func(x, mean):
if x.ndim==1:
x = x.reshape(-1,1)
-0.5 * (len(mean)*math.log(2*math.pi) - xp.linalg.slogdet(pre)
+ (mean - x).T @ pre @ (mean - x))
else:
raise ValueError("mean, covariance and precision are None elements.")
return func
# calculate MSE
def mean_squared_error(x, y, xp_type="numpy"):
assert x.shape == y.shape
xp = judge_xp_type(xp_type)
return xp.square(x - y).mean()
# return xp.sqrt(xp.sum(xp.square(x - y))) / x.size
# calculate MAE
def mean_absolute_error(x, y, xp_type="numpy"):
assert x.shape == y.shape
xp = judge_xp_type(xp_type)
return xp.mean(xp.absolute(x - y))
# intersection
def _intersect1d(ar1, ar2, assume_unique=False, return_indices=False, xp_type="numpy"):
xp = judge_xp_type(xp_type)
ar1 = xp.asanyarray(ar1)
ar2 = xp.asanyarray(ar2)
if not assume_unique:
if return_indices:
ar1, ind1 = xp.unique(ar1, return_index=True)
ar2, ind2 = xp.unique(ar2, return_index=True)
else:
ar1 = xp.unique(ar1)
ar2 = xp.unique(ar2)
else:
ar1 = ar1.ravel()
ar2 = ar2.ravel()
aux = xp.concatenate((ar1, ar2))
if return_indices:
aux_sort_indices = xp.argsort(aux)
aux = aux[aux_sort_indices]
else:
aux.sort()
mask = aux[1:] == aux[:-1]
int1d = aux[:-1][mask]
if return_indices:
ar1_indices = aux_sort_indices[:-1][mask]
ar2_indices = aux_sort_indices[1:][mask] - ar1.size
if not assume_unique:
ar1_indices = ind1[ar1_indices]
ar2_indices = ind2[ar2_indices]
return int1d, ar1_indices, ar2_indices
else:
return int1d
| 30.089844
| 115
| 0.572894
|
ce4d31f42fbee2df90003587ef301e0ae1d611ec
| 485
|
py
|
Python
|
kiss/controllers/page.py
|
stanfeldman/kiss.py
|
badc1941d190e17b538f230d11633c817b668476
|
[
"BSD-4-Clause"
] | 4
|
2016-07-04T15:22:30.000Z
|
2017-03-04T15:12:20.000Z
|
kiss/controllers/page.py
|
stanfeldman/kiss.py
|
badc1941d190e17b538f230d11633c817b668476
|
[
"BSD-4-Clause"
] | null | null | null |
kiss/controllers/page.py
|
stanfeldman/kiss.py
|
badc1941d190e17b538f230d11633c817b668476
|
[
"BSD-4-Clause"
] | 4
|
2018-11-13T05:51:33.000Z
|
2022-02-13T10:52:34.000Z
|
from core import Controller
from kiss.views.templates import TemplateResponse
class PageController(Controller):
"""
If you need just to show page, create PageController and pass to it your page and optional context.
Use it like another controllers in urls settings of your app.
"""
def __init__(self, page, context={}):
self.page = page
self.context = context
def get(self, request):
self.context["request"] = request
return TemplateResponse(self.page, self.context)
| 30.3125
| 100
| 0.754639
|
209abf1140db1d2c89d9177e5d299feba9953eee
| 22,795
|
py
|
Python
|
src/oci/management_dashboard/models/update_management_saved_search_details.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/management_dashboard/models/update_management_saved_search_details.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/management_dashboard/models/update_management_saved_search_details.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateManagementSavedSearchDetails(object):
"""
Properties of a saved search. Saved search ID must not be provided.
"""
#: A constant which can be used with the type property of a UpdateManagementSavedSearchDetails.
#: This constant has a value of "SEARCH_SHOW_IN_DASHBOARD"
TYPE_SEARCH_SHOW_IN_DASHBOARD = "SEARCH_SHOW_IN_DASHBOARD"
#: A constant which can be used with the type property of a UpdateManagementSavedSearchDetails.
#: This constant has a value of "SEARCH_DONT_SHOW_IN_DASHBOARD"
TYPE_SEARCH_DONT_SHOW_IN_DASHBOARD = "SEARCH_DONT_SHOW_IN_DASHBOARD"
#: A constant which can be used with the type property of a UpdateManagementSavedSearchDetails.
#: This constant has a value of "WIDGET_SHOW_IN_DASHBOARD"
TYPE_WIDGET_SHOW_IN_DASHBOARD = "WIDGET_SHOW_IN_DASHBOARD"
#: A constant which can be used with the type property of a UpdateManagementSavedSearchDetails.
#: This constant has a value of "WIDGET_DONT_SHOW_IN_DASHBOARD"
TYPE_WIDGET_DONT_SHOW_IN_DASHBOARD = "WIDGET_DONT_SHOW_IN_DASHBOARD"
def __init__(self, **kwargs):
"""
Initializes a new UpdateManagementSavedSearchDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param display_name:
The value to assign to the display_name property of this UpdateManagementSavedSearchDetails.
:type display_name: str
:param provider_id:
The value to assign to the provider_id property of this UpdateManagementSavedSearchDetails.
:type provider_id: str
:param provider_version:
The value to assign to the provider_version property of this UpdateManagementSavedSearchDetails.
:type provider_version: str
:param provider_name:
The value to assign to the provider_name property of this UpdateManagementSavedSearchDetails.
:type provider_name: str
:param compartment_id:
The value to assign to the compartment_id property of this UpdateManagementSavedSearchDetails.
:type compartment_id: str
:param is_oob_saved_search:
The value to assign to the is_oob_saved_search property of this UpdateManagementSavedSearchDetails.
:type is_oob_saved_search: bool
:param description:
The value to assign to the description property of this UpdateManagementSavedSearchDetails.
:type description: str
:param nls:
The value to assign to the nls property of this UpdateManagementSavedSearchDetails.
:type nls: object
:param type:
The value to assign to the type property of this UpdateManagementSavedSearchDetails.
Allowed values for this property are: "SEARCH_SHOW_IN_DASHBOARD", "SEARCH_DONT_SHOW_IN_DASHBOARD", "WIDGET_SHOW_IN_DASHBOARD", "WIDGET_DONT_SHOW_IN_DASHBOARD"
:type type: str
:param ui_config:
The value to assign to the ui_config property of this UpdateManagementSavedSearchDetails.
:type ui_config: object
:param data_config:
The value to assign to the data_config property of this UpdateManagementSavedSearchDetails.
:type data_config: list[object]
:param screen_image:
The value to assign to the screen_image property of this UpdateManagementSavedSearchDetails.
:type screen_image: str
:param metadata_version:
The value to assign to the metadata_version property of this UpdateManagementSavedSearchDetails.
:type metadata_version: str
:param widget_template:
The value to assign to the widget_template property of this UpdateManagementSavedSearchDetails.
:type widget_template: str
:param widget_vm:
The value to assign to the widget_vm property of this UpdateManagementSavedSearchDetails.
:type widget_vm: str
:param parameters_config:
The value to assign to the parameters_config property of this UpdateManagementSavedSearchDetails.
:type parameters_config: list[object]
:param drilldown_config:
The value to assign to the drilldown_config property of this UpdateManagementSavedSearchDetails.
:type drilldown_config: list[object]
:param freeform_tags:
The value to assign to the freeform_tags property of this UpdateManagementSavedSearchDetails.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this UpdateManagementSavedSearchDetails.
:type defined_tags: dict(str, dict(str, object))
"""
self.swagger_types = {
'display_name': 'str',
'provider_id': 'str',
'provider_version': 'str',
'provider_name': 'str',
'compartment_id': 'str',
'is_oob_saved_search': 'bool',
'description': 'str',
'nls': 'object',
'type': 'str',
'ui_config': 'object',
'data_config': 'list[object]',
'screen_image': 'str',
'metadata_version': 'str',
'widget_template': 'str',
'widget_vm': 'str',
'parameters_config': 'list[object]',
'drilldown_config': 'list[object]',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))'
}
self.attribute_map = {
'display_name': 'displayName',
'provider_id': 'providerId',
'provider_version': 'providerVersion',
'provider_name': 'providerName',
'compartment_id': 'compartmentId',
'is_oob_saved_search': 'isOobSavedSearch',
'description': 'description',
'nls': 'nls',
'type': 'type',
'ui_config': 'uiConfig',
'data_config': 'dataConfig',
'screen_image': 'screenImage',
'metadata_version': 'metadataVersion',
'widget_template': 'widgetTemplate',
'widget_vm': 'widgetVM',
'parameters_config': 'parametersConfig',
'drilldown_config': 'drilldownConfig',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags'
}
self._display_name = None
self._provider_id = None
self._provider_version = None
self._provider_name = None
self._compartment_id = None
self._is_oob_saved_search = None
self._description = None
self._nls = None
self._type = None
self._ui_config = None
self._data_config = None
self._screen_image = None
self._metadata_version = None
self._widget_template = None
self._widget_vm = None
self._parameters_config = None
self._drilldown_config = None
self._freeform_tags = None
self._defined_tags = None
@property
def display_name(self):
"""
Gets the display_name of this UpdateManagementSavedSearchDetails.
Display name of the saved search.
:return: The display_name of this UpdateManagementSavedSearchDetails.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this UpdateManagementSavedSearchDetails.
Display name of the saved search.
:param display_name: The display_name of this UpdateManagementSavedSearchDetails.
:type: str
"""
self._display_name = display_name
@property
def provider_id(self):
"""
Gets the provider_id of this UpdateManagementSavedSearchDetails.
ID of the service (for example log-analytics) that owns the saved search. Each service has a unique ID.
:return: The provider_id of this UpdateManagementSavedSearchDetails.
:rtype: str
"""
return self._provider_id
@provider_id.setter
def provider_id(self, provider_id):
"""
Sets the provider_id of this UpdateManagementSavedSearchDetails.
ID of the service (for example log-analytics) that owns the saved search. Each service has a unique ID.
:param provider_id: The provider_id of this UpdateManagementSavedSearchDetails.
:type: str
"""
self._provider_id = provider_id
@property
def provider_version(self):
"""
Gets the provider_version of this UpdateManagementSavedSearchDetails.
Version of the service that owns this saved search.
:return: The provider_version of this UpdateManagementSavedSearchDetails.
:rtype: str
"""
return self._provider_version
@provider_version.setter
def provider_version(self, provider_version):
"""
Sets the provider_version of this UpdateManagementSavedSearchDetails.
Version of the service that owns this saved search.
:param provider_version: The provider_version of this UpdateManagementSavedSearchDetails.
:type: str
"""
self._provider_version = provider_version
@property
def provider_name(self):
"""
Gets the provider_name of this UpdateManagementSavedSearchDetails.
Name of the service (for example, Logging Analytics) that owns the saved search.
:return: The provider_name of this UpdateManagementSavedSearchDetails.
:rtype: str
"""
return self._provider_name
@provider_name.setter
def provider_name(self, provider_name):
"""
Sets the provider_name of this UpdateManagementSavedSearchDetails.
Name of the service (for example, Logging Analytics) that owns the saved search.
:param provider_name: The provider_name of this UpdateManagementSavedSearchDetails.
:type: str
"""
self._provider_name = provider_name
@property
def compartment_id(self):
"""
Gets the compartment_id of this UpdateManagementSavedSearchDetails.
OCID of the compartment in which the saved search resides.
:return: The compartment_id of this UpdateManagementSavedSearchDetails.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this UpdateManagementSavedSearchDetails.
OCID of the compartment in which the saved search resides.
:param compartment_id: The compartment_id of this UpdateManagementSavedSearchDetails.
:type: str
"""
self._compartment_id = compartment_id
@property
def is_oob_saved_search(self):
"""
Gets the is_oob_saved_search of this UpdateManagementSavedSearchDetails.
Determines whether the saved search is an Out-of-the-Box (OOB) saved search. Note that OOB saved searches are only provided by Oracle and cannot be modified.
:return: The is_oob_saved_search of this UpdateManagementSavedSearchDetails.
:rtype: bool
"""
return self._is_oob_saved_search
@is_oob_saved_search.setter
def is_oob_saved_search(self, is_oob_saved_search):
"""
Sets the is_oob_saved_search of this UpdateManagementSavedSearchDetails.
Determines whether the saved search is an Out-of-the-Box (OOB) saved search. Note that OOB saved searches are only provided by Oracle and cannot be modified.
:param is_oob_saved_search: The is_oob_saved_search of this UpdateManagementSavedSearchDetails.
:type: bool
"""
self._is_oob_saved_search = is_oob_saved_search
@property
def description(self):
"""
Gets the description of this UpdateManagementSavedSearchDetails.
Description of the saved search.
:return: The description of this UpdateManagementSavedSearchDetails.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this UpdateManagementSavedSearchDetails.
Description of the saved search.
:param description: The description of this UpdateManagementSavedSearchDetails.
:type: str
"""
self._description = description
@property
def nls(self):
"""
Gets the nls of this UpdateManagementSavedSearchDetails.
JSON that contains internationalization options.
:return: The nls of this UpdateManagementSavedSearchDetails.
:rtype: object
"""
return self._nls
@nls.setter
def nls(self, nls):
"""
Sets the nls of this UpdateManagementSavedSearchDetails.
JSON that contains internationalization options.
:param nls: The nls of this UpdateManagementSavedSearchDetails.
:type: object
"""
self._nls = nls
@property
def type(self):
"""
Gets the type of this UpdateManagementSavedSearchDetails.
Determines how the saved search is displayed in a dashboard.
Allowed values for this property are: "SEARCH_SHOW_IN_DASHBOARD", "SEARCH_DONT_SHOW_IN_DASHBOARD", "WIDGET_SHOW_IN_DASHBOARD", "WIDGET_DONT_SHOW_IN_DASHBOARD"
:return: The type of this UpdateManagementSavedSearchDetails.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this UpdateManagementSavedSearchDetails.
Determines how the saved search is displayed in a dashboard.
:param type: The type of this UpdateManagementSavedSearchDetails.
:type: str
"""
allowed_values = ["SEARCH_SHOW_IN_DASHBOARD", "SEARCH_DONT_SHOW_IN_DASHBOARD", "WIDGET_SHOW_IN_DASHBOARD", "WIDGET_DONT_SHOW_IN_DASHBOARD"]
if not value_allowed_none_or_none_sentinel(type, allowed_values):
raise ValueError(
"Invalid value for `type`, must be None or one of {0}"
.format(allowed_values)
)
self._type = type
@property
def ui_config(self):
"""
Gets the ui_config of this UpdateManagementSavedSearchDetails.
JSON that contains user interface options.
:return: The ui_config of this UpdateManagementSavedSearchDetails.
:rtype: object
"""
return self._ui_config
@ui_config.setter
def ui_config(self, ui_config):
"""
Sets the ui_config of this UpdateManagementSavedSearchDetails.
JSON that contains user interface options.
:param ui_config: The ui_config of this UpdateManagementSavedSearchDetails.
:type: object
"""
self._ui_config = ui_config
@property
def data_config(self):
"""
Gets the data_config of this UpdateManagementSavedSearchDetails.
Array of JSON that contain data source options.
:return: The data_config of this UpdateManagementSavedSearchDetails.
:rtype: list[object]
"""
return self._data_config
@data_config.setter
def data_config(self, data_config):
"""
Sets the data_config of this UpdateManagementSavedSearchDetails.
Array of JSON that contain data source options.
:param data_config: The data_config of this UpdateManagementSavedSearchDetails.
:type: list[object]
"""
self._data_config = data_config
@property
def screen_image(self):
"""
Gets the screen_image of this UpdateManagementSavedSearchDetails.
Screen image of the saved search.
:return: The screen_image of this UpdateManagementSavedSearchDetails.
:rtype: str
"""
return self._screen_image
@screen_image.setter
def screen_image(self, screen_image):
"""
Sets the screen_image of this UpdateManagementSavedSearchDetails.
Screen image of the saved search.
:param screen_image: The screen_image of this UpdateManagementSavedSearchDetails.
:type: str
"""
self._screen_image = screen_image
@property
def metadata_version(self):
"""
Gets the metadata_version of this UpdateManagementSavedSearchDetails.
Version of the metadata.
:return: The metadata_version of this UpdateManagementSavedSearchDetails.
:rtype: str
"""
return self._metadata_version
@metadata_version.setter
def metadata_version(self, metadata_version):
"""
Sets the metadata_version of this UpdateManagementSavedSearchDetails.
Version of the metadata.
:param metadata_version: The metadata_version of this UpdateManagementSavedSearchDetails.
:type: str
"""
self._metadata_version = metadata_version
@property
def widget_template(self):
"""
Gets the widget_template of this UpdateManagementSavedSearchDetails.
Reference to the HTML file of the widget.
:return: The widget_template of this UpdateManagementSavedSearchDetails.
:rtype: str
"""
return self._widget_template
@widget_template.setter
def widget_template(self, widget_template):
"""
Sets the widget_template of this UpdateManagementSavedSearchDetails.
Reference to the HTML file of the widget.
:param widget_template: The widget_template of this UpdateManagementSavedSearchDetails.
:type: str
"""
self._widget_template = widget_template
@property
def widget_vm(self):
"""
Gets the widget_vm of this UpdateManagementSavedSearchDetails.
Reference to the view model of the widget.
:return: The widget_vm of this UpdateManagementSavedSearchDetails.
:rtype: str
"""
return self._widget_vm
@widget_vm.setter
def widget_vm(self, widget_vm):
"""
Sets the widget_vm of this UpdateManagementSavedSearchDetails.
Reference to the view model of the widget.
:param widget_vm: The widget_vm of this UpdateManagementSavedSearchDetails.
:type: str
"""
self._widget_vm = widget_vm
@property
def parameters_config(self):
"""
Gets the parameters_config of this UpdateManagementSavedSearchDetails.
Defines parameters for the saved search.
:return: The parameters_config of this UpdateManagementSavedSearchDetails.
:rtype: list[object]
"""
return self._parameters_config
@parameters_config.setter
def parameters_config(self, parameters_config):
"""
Sets the parameters_config of this UpdateManagementSavedSearchDetails.
Defines parameters for the saved search.
:param parameters_config: The parameters_config of this UpdateManagementSavedSearchDetails.
:type: list[object]
"""
self._parameters_config = parameters_config
@property
def drilldown_config(self):
"""
Gets the drilldown_config of this UpdateManagementSavedSearchDetails.
Drill-down configuration to define the destination of a drill-down action.
:return: The drilldown_config of this UpdateManagementSavedSearchDetails.
:rtype: list[object]
"""
return self._drilldown_config
@drilldown_config.setter
def drilldown_config(self, drilldown_config):
"""
Sets the drilldown_config of this UpdateManagementSavedSearchDetails.
Drill-down configuration to define the destination of a drill-down action.
:param drilldown_config: The drilldown_config of this UpdateManagementSavedSearchDetails.
:type: list[object]
"""
self._drilldown_config = drilldown_config
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this UpdateManagementSavedSearchDetails.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:return: The freeform_tags of this UpdateManagementSavedSearchDetails.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this UpdateManagementSavedSearchDetails.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:param freeform_tags: The freeform_tags of this UpdateManagementSavedSearchDetails.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
Gets the defined_tags of this UpdateManagementSavedSearchDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:return: The defined_tags of this UpdateManagementSavedSearchDetails.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this UpdateManagementSavedSearchDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:param defined_tags: The defined_tags of this UpdateManagementSavedSearchDetails.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 34.642857
| 245
| 0.671989
|
579abe167fc728ccae238a02b41ba237ccd1c875
| 229
|
py
|
Python
|
src/pytezos/michelson/sections/__init__.py
|
konchunas/pytezos
|
65576d18bdf1956fae8ea21241b6c43a38921b83
|
[
"MIT"
] | 98
|
2019-02-07T16:33:38.000Z
|
2022-03-31T15:53:41.000Z
|
src/pytezos/michelson/sections/__init__.py
|
konchunas/pytezos
|
65576d18bdf1956fae8ea21241b6c43a38921b83
|
[
"MIT"
] | 152
|
2019-05-20T16:38:56.000Z
|
2022-03-30T14:24:38.000Z
|
src/pytezos/michelson/sections/__init__.py
|
konchunas/pytezos
|
65576d18bdf1956fae8ea21241b6c43a38921b83
|
[
"MIT"
] | 34
|
2019-07-25T12:03:51.000Z
|
2021-11-11T22:23:38.000Z
|
from pytezos.michelson.sections.code import CodeSection
from pytezos.michelson.sections.parameter import ParameterSection
from pytezos.michelson.sections.storage import StorageSection
from pytezos.michelson.sections.tzt import *
| 45.8
| 65
| 0.873362
|
59169d8f8ea67d4122d217f8f9943b12ac04618f
| 8,625
|
py
|
Python
|
ambari-agent/src/main/python/ambari_agent/Controller.py
|
cglewis/ambari
|
e5d7e08e42baea2ad15784400c6f9e7ebb5f1608
|
[
"Apache-2.0"
] | null | null | null |
ambari-agent/src/main/python/ambari_agent/Controller.py
|
cglewis/ambari
|
e5d7e08e42baea2ad15784400c6f9e7ebb5f1608
|
[
"Apache-2.0"
] | null | null | null |
ambari-agent/src/main/python/ambari_agent/Controller.py
|
cglewis/ambari
|
e5d7e08e42baea2ad15784400c6f9e7ebb5f1608
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python2.6
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import logging
import logging.handlers
import signal
import json
import hostname
import sys, traceback
import time
import threading
import urllib2
from urllib2 import Request, urlopen, URLError
import httplib
import ssl
import AmbariConfig
import pprint
import ProcessHelper
from Heartbeat import Heartbeat
from Register import Register
from ActionQueue import ActionQueue
from optparse import OptionParser
from wsgiref.simple_server import ServerHandler
import security
from NetUtil import NetUtil
from random import randrange, randint
logger = logging.getLogger()
class Controller(threading.Thread):
def __init__(self, config, range=120):
threading.Thread.__init__(self)
logger.debug('Initializing Controller RPC thread.')
self.lock = threading.Lock()
self.safeMode = True
self.credential = None
self.config = config
self.hostname = hostname.hostname()
server_secured_url = 'https://' + config.get('server', 'hostname') + ':' + config.get('server', 'secured_url_port')
self.registerUrl = server_secured_url + '/agent/v1/register/' + self.hostname
self.heartbeatUrl = server_secured_url + '/agent/v1/heartbeat/' + self.hostname
self.netutil = NetUtil()
self.responseId = -1
self.repeatRegistration = False
self.cachedconnect = None
self.range = range
def start(self):
self.actionQueue = ActionQueue(self.config)
self.actionQueue.start()
self.register = Register(self.config)
self.heartbeat = Heartbeat(self.actionQueue)
pass
def __del__(self):
logger.info("Server connection disconnected.")
pass
def registerWithServer(self):
retry=False
firstTime=True
registered=False
id = -1
ret = {}
while not registered:
try:
data = json.dumps(self.register.build(id))
logger.info("Registering with the server " + pprint.pformat(data))
response = self.sendRequest(self.registerUrl, data)
ret = json.loads(response)
logger.info("Registered with the server with " + pprint.pformat(ret))
print("Registered with the server")
self.responseId= int(ret['responseId'])
registered = True
if 'statusCommands' in ret.keys():
logger.info("Got status commands on registration " + pprint.pformat(ret['statusCommands']) )
self.addToQueue(ret['statusCommands'])
pass
pass
except Exception, err:
# try a reconnect only after a certain amount of random time
delay = randint(0, self.range)
logger.info("Unable to connect to: " + self.registerUrl, exc_info = True)
""" Sleeping for {0} seconds and then retrying again """.format(delay)
time.sleep(delay)
pass
pass
return ret
def addToQueue(self, commands):
"""Add to the queue for running the commands """
""" Put the required actions into the Queue """
""" Verify if the action is to reboot or not """
if not commands:
logger.debug("No commands from the server : " + pprint.pformat(commands))
else:
"""Only add to the queue if not empty list """
for command in commands:
logger.debug("Adding command to the action queue: \n" +\
pprint.pformat(command))
self.actionQueue.put(command)
pass
pass
pass
# For testing purposes
DEBUG_HEARTBEAT_RETRIES = 0
DEBUG_SUCCESSFULL_HEARTBEATS = 0
DEBUG_STOP_HEARTBITTING = False
def heartbeatWithServer(self):
self.DEBUG_HEARTBEAT_RETRIES = 0
self.DEBUG_SUCCESSFULL_HEARTBEATS = 0
retry = False
certVerifFailed = False
config = AmbariConfig.config
hb_interval = config.get('heartbeat', 'state_interval')
#TODO make sure the response id is monotonically increasing
id = 0
while not self.DEBUG_STOP_HEARTBITTING:
try:
if not retry:
data = json.dumps(self.heartbeat.build(self.responseId, int(hb_interval)))
pass
else:
self.DEBUG_HEARTBEAT_RETRIES += 1
response = self.sendRequest(self.heartbeatUrl, data)
response = json.loads(response)
logger.debug('Got server response: ' + pprint.pformat(response))
serverId=int(response['responseId'])
if 'registrationCommand' in response.keys():
# check if the registration command is None. If none skip
if response['registrationCommand'] is not None:
logger.info("RegistrationCommand received - repeat agent registration")
self.repeatRegistration = True
return
if serverId!=self.responseId+1:
logger.error("Error in responseId sequence - restarting")
self.restartAgent()
else:
self.responseId=serverId
if 'executionCommands' in response.keys():
self.addToQueue(response['executionCommands'])
pass
if 'statusCommands' in response.keys():
self.addToQueue(response['statusCommands'])
pass
if "true" == response['restartAgent']:
logger.error("Got restartAgent command")
self.restartAgent()
else:
logger.info("No commands sent from the Server.")
pass
if retry:
print("Reconnected to the server")
logger.info("Reconnected to the server")
retry=False
certVerifFailed = False
self.DEBUG_SUCCESSFULL_HEARTBEATS += 1
self.DEBUG_HEARTBEAT_RETRIES = 0
except Exception, err:
#randomize the heartbeat
delay = randint(0, self.range)
time.sleep(delay)
if "code" in err:
logger.error(err.code)
else:
logger.error("Unable to connect to: " + self.heartbeatUrl + " due to " + str(err))
logger.debug("Details: " + str(err), exc_info=True)
if not retry:
print("Connection to the server was lost. Reconnecting...")
if 'certificate verify failed' in str(err) and not certVerifFailed:
print("Server certificate verify failed. Did you regenerate server certificate?")
certVerifFailed = True
self.cachedconnect = None # Previous connection is broken now
retry=True
if self.actionQueue.isIdle():
time.sleep(self.netutil.HEARTBEAT_IDDLE_INTERVAL_SEC)
else:
time.sleep(self.netutil.HEARTBEAT_NOT_IDDLE_INTERVAL_SEC)
pass
def run(self):
opener = urllib2.build_opener()
urllib2.install_opener(opener)
while True:
self.repeatRegistration = False
self.registerAndHeartbeat()
if not self.repeatRegistration:
break
pass
def registerAndHeartbeat(self):
registerResponse = self.registerWithServer()
message = registerResponse['response']
logger.info("Response from server = " + message)
time.sleep(self.netutil.HEARTBEAT_IDDLE_INTERVAL_SEC)
self.heartbeatWithServer()
def restartAgent(self):
ProcessHelper.restartAgent()
pass
def sendRequest(self, url, data):
if self.cachedconnect is None: # Lazy initialization
self.cachedconnect = security.CachedHTTPSConnection(self.config)
req = urllib2.Request(url, data, {'Content-Type': 'application/json'})
response = self.cachedconnect.request(req)
return response
def main(argv=None):
# Allow Ctrl-C
signal.signal(signal.SIGINT, signal.SIG_DFL)
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s %(filename)s:%(lineno)d - \
%(message)s")
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logger.info('Starting Server RPC Thread: %s' % ' '.join(sys.argv))
config = AmbariConfig.config
collector = Controller(config)
collector.start()
collector.run()
if __name__ == '__main__':
main()
| 32.919847
| 119
| 0.679536
|
b6f03431ce9601cc4e9ea79206d093135fa17be9
| 968
|
py
|
Python
|
examples/send_sms_advanced_tracking_ex.py
|
ubidreams/infobip-api-python-client
|
3e585bf00565627bd7da46a2c8f10b860faaeb8b
|
[
"Apache-2.0"
] | null | null | null |
examples/send_sms_advanced_tracking_ex.py
|
ubidreams/infobip-api-python-client
|
3e585bf00565627bd7da46a2c8f10b860faaeb8b
|
[
"Apache-2.0"
] | null | null | null |
examples/send_sms_advanced_tracking_ex.py
|
ubidreams/infobip-api-python-client
|
3e585bf00565627bd7da46a2c8f10b860faaeb8b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from infobip.api.model.sms.mt.send.Tracking import Tracking
from infobip.clients import send_multiple_textual_sms_advanced
from infobip.api.model.sms.mt.send.textual.SMSAdvancedTextualRequest import SMSAdvancedTextualRequest
from infobip.api.model.sms.mt.send.Message import Message
from infobip.api.model.Destination import Destination
from __init__ import configuration
send_sms_client = send_multiple_textual_sms_advanced(configuration)
dest = Destination()
dest.message_id = "message_111"
dest.to = "number1aaa"
message = Message()
message.from_ = "sender1"
message.text = "This is an example message. More information you can find on: http://dev.infobip.com/docs/fully-featured-textual-message"
message.destinations = [dest]
request = SMSAdvancedTextualRequest()
request.messages = [message]
tracking = Tracking()
tracking.set_track("URL")
request.set_tracking(tracking)
response = send_sms_client.execute(request)
print(response)
| 31.225806
| 137
| 0.809917
|
5d10c17c80b0a5c8c2e084a11e9bdc79e13cbb5e
| 3,558
|
py
|
Python
|
sharppy/tests/test_winds.py
|
skovic/SHARPpy
|
19175269ab11fe06c917b5d10376862a4716e1db
|
[
"BSD-3-Clause"
] | 163
|
2015-01-05T06:57:16.000Z
|
2022-03-15T04:19:42.000Z
|
sharppy/tests/test_winds.py
|
skovic/SHARPpy
|
19175269ab11fe06c917b5d10376862a4716e1db
|
[
"BSD-3-Clause"
] | 187
|
2015-01-20T05:30:55.000Z
|
2022-03-28T17:50:38.000Z
|
sharppy/tests/test_winds.py
|
skovic/SHARPpy
|
19175269ab11fe06c917b5d10376862a4716e1db
|
[
"BSD-3-Clause"
] | 110
|
2015-01-06T05:55:47.000Z
|
2022-03-15T18:40:21.000Z
|
import numpy as np
import numpy.ma as ma
import numpy.testing as npt
import sharppy.sharptab.winds as winds
import sharppy.sharptab.utils as utils
import sharppy.sharptab.interp as interp
from sharppy.sharptab.profile import Profile
import test_profile
prof = test_profile.TestProfile().prof
import time
def test_mean_wind():
returned = winds.mean_wind(prof)
correct_u, correct_v = 27.347100616691097, 1.7088123127933754
npt.assert_almost_equal(returned, [correct_u, correct_v])
def test_mean_wind_npw():
returned = winds.mean_wind_npw(prof)
correct_u, correct_v = 31.831128476043443, -0.40994804851302158
npt.assert_almost_equal(returned, [correct_u, correct_v])
def test_sr_wind():
input_stu = 10
input_stv = 10
returned = winds.sr_wind(prof, stu=input_stu, stv=input_stv)
correct_u, correct_v = 17.347100616691126, -8.2911876872066141
npt.assert_almost_equal(returned, [correct_u, correct_v])
def test_sr_wind_npw():
input_stu = 10
input_stv = 10
returned = winds.sr_wind_npw(prof, stu=input_stu, stv=input_stv)
correct_u, correct_v = 21.831128476043443, -10.40994804851302158
npt.assert_almost_equal(returned, [correct_u, correct_v])
def test_wind_shear():
agl1 = 0
agl2 = 1000
msl1 = interp.to_msl(prof, agl1)
msl2 = interp.to_msl(prof, agl2)
pbot = interp.pres(prof, msl1)
ptop = interp.pres(prof, msl2)
correct_u, correct_v = -2.625075135691132, 10.226725739920353
returned = winds.wind_shear(prof, pbot, ptop)
npt.assert_almost_equal(returned, [correct_u, correct_v])
def test_non_parcel_bunkers_motion():
correct = [10.532915762684453, -7.863859696750608,
20.924864405622614, 19.379065415942257]
returned = winds.non_parcel_bunkers_motion(prof)
npt.assert_almost_equal(returned, correct)
def test_helicity():
agl1 = 0.
agl2 = 3000.
input_ru = 10.5329157627
input_rv = -7.86385969675
correct = [284.9218078420389, 302.9305759626597, -18.008768120620786]
returned = winds.helicity(prof, agl1, agl2, stu=input_ru,
stv=input_rv, exact=True)
npt.assert_almost_equal(returned, correct)
correct = [285.00199936592099, 302.99422077416955, -17.992221408248568]
returned = winds.helicity(prof, agl1, agl2, stu=input_ru,
stv=input_rv, exact=False)
npt.assert_almost_equal(returned, correct)
"""
def test_max_wind():
agl1 = 0.
agl2 = 30000
correct = [73.860581475915609, -13.023613325019747, 179.]
returned = winds.max_wind(prof, agl1, agl2)
npt.assert_almost_equal(returned, correct)
correct_u = [73.86058147591561, 73.86058147591561]
correct_v = [-13.023613325019747, -13.023613325019747]
correct_p = [175.0, 172.64]
correct = [correct_u, correct_v, correct_p]
returned = winds.max_wind(prof, agl1, agl2, all=True)
npt.assert_almost_equal(returned, correct)
"""
def test_corfidi_mcs_motion():
correct = [34.597990416506541, -17.61022875300797,
64.319470111830171, -16.945587838431905]
returned = winds.corfidi_mcs_motion(prof)
npt.assert_almost_equal(returned, correct)
def test_mbe_vectors():
correct = [34.597990416506541, -17.61022875300797,
64.319470111830171, -16.945587838431905]
returned = winds.mbe_vectors(prof)
npt.assert_almost_equal(returned, correct)
def test_critical_angle():
correct = [169.2658597]
returned = [winds.critical_angle(prof)]
npt.assert_almost_equal(returned, correct)
| 32.054054
| 75
| 0.717257
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.