hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f62f71f11c190d9d97f87487f6832615fa57c22f
| 6,647
|
py
|
Python
|
HMI_1h_synoptic_prep.py
|
bmampaey/SDO-auto-prep
|
ea34bcd70c261aaf158c31937e1ccf3086b48938
|
[
"MIT"
] | null | null | null |
HMI_1h_synoptic_prep.py
|
bmampaey/SDO-auto-prep
|
ea34bcd70c261aaf158c31937e1ccf3086b48938
|
[
"MIT"
] | null | null | null |
HMI_1h_synoptic_prep.py
|
bmampaey/SDO-auto-prep
|
ea34bcd70c261aaf158c31937e1ccf3086b48938
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import sys
import os
import logging
import signal
import argparse
from datetime import datetime, timedelta
from dateutil.parser import parse as date_parser
from glob import glob
import Queue
import threading
from IDL import SSWIDL
# The base directory of the input FITS files
input_file_dir = "/data/SDO/public/AIA_HMI_1h_synoptic/"
# The pattern of the input filenames
input_file_pattern = "{channel}/{year:04d}/{month:02d}/{day:02d}/*.fits"
# The base directory of the outpout FITS files
output_file_dir = "/data/SDO/public/AIA_HMI_1h_synoptic/"
# The pattern of the output filenames
output_file_pattern = "{channel}.prepped/{year:04d}/{month:02d}/{day:02d}/{filename}_prepped.fits"
# The default duration to process before now. Must be a timedelta
past_duration = timedelta(days=30)
# The maximum nuber of jobs that can be run before restarting IDL
max_job_count = 4
def aia_prep(jobs, timeout = 120, verbose = False, force = False):
''' aia_prep AIA/HMI FITS files by running sswidl with pexpect '''
# We setup the logging
# Should be changed to use the logging facility
if verbose:
logfile = sys.stdout
else:
logfile = None
# We create the sswidl prompt
idl = SSWIDL(SSW_instruments = ["aia"], IDL_STARTUP_path = "/home/sdo/auto_prep/idl_startup.pro", logfile=logfile)
if not idl.start():
logging.error("Could not start sswidl")
return
# We construct the aia_prep command template
aia_prep_cmd = "aia_prep, '{filename}', 0, outdir = '{outdir}', outfile = '{outfile}', /do_write_fits"
if verbose:
aia_prep_cmd += ", /verbose"
logging.debug("aia_prep command template: %s", aia_prep_cmd)
job = jobs.get()
job_count = 0
while job and not terminate_thread.is_set():
# Check if the file has not already been prepped
if not force and os.path.exists(os.path.join(job['outdir'], job['outfile'])):
logging.info("File %s was already aia_prepped, skipping.", filename)
else:
# Restart sswidl if we ran more than max_job_count, otherwise we get "program area code full" error messages from idl
if job_count > max_job_count:
logging.info('Reached max job count, restarting sswidl')
idl.stop()
if not idl.start():
logging.error("Could not restart sswidl")
return
job_count = 0
# We run aia_prep
logging.info("About to aia_prep file %s", job['filename'])
idl.run(aia_prep_cmd.format(filename = job['filename'], outdir = job['outdir'], outfile = job['outfile']), timeout = timeout)
job_count += 1
job = jobs.get()
# We stop sswidl
idl.stop()
logging.info("Stopping thread")
def terminate_gracefully(signal, frame):
logging.info("Received signal %s: Stopping threads", str(signal))
terminate_thread.set()
if __name__ == "__main__":
# Get the arguments
parser = argparse.ArgumentParser(description='Call the IDL aia_prep on HMI FITS files')
parser.add_argument('--debug', '-d', default=False, action='store_true', help='Debug output to screen')
parser.add_argument('--verbose', '-v', default=False, action='store_true', help='Verbose output to screen')
parser.add_argument('--force', '-f', default=False, action='store_true', help='Force to aia_prep files that have already been prepped')
parser.add_argument('--start_date', '-s', default=(datetime.utcnow() - past_duration).isoformat(), type=str, help='Start date of data to prep')
parser.add_argument('--end_date', '-e', default=datetime.utcnow().isoformat(), type=str, help='End date of data to prep')
parser.add_argument('--number_threads', '-n', default=4, type=int, help='Number of files to prep in parralel')
parser.add_argument('--timeout', '-t', default=120, type=int, help='Timeout for the prepping of 1 file')
parser.add_argument('--channels', '-c', default=['hmi.m_45s', 'hmi.ic_45s'], nargs='+', help='The HMI channels to prep')
args = parser.parse_args()
# Setup the logging
if args.debug:
logging.basicConfig(level = logging.DEBUG, format='%(levelname)-8s: %(message)s')
elif args.verbose:
logging.basicConfig(level = logging.INFO, format='%(levelname)-8s: %(message)s')
else:
logging.basicConfig(level = logging.CRITICAL, format='%(levelname)-8s: %(message)s')
# Parse the start and end date
try:
date = date_parser(args.start_date)
except ValueError, why:
logging.error("Unknown format for start_date %s", args.start_date)
sys.exit(2)
try:
end_date = date_parser(args.end_date)
except ValueError, why:
logging.error("Unknown format for end_date %s", args.end_date)
sys.exit(2)
logging.info("Prepping files from %s to %s", date, end_date)
# The terminate_thread will tell threads to terminate gracefully
terminate_thread = threading.Event()
# We setup the termination signal
signal.signal(signal.SIGINT, terminate_gracefully)
signal.signal(signal.SIGQUIT, terminate_gracefully)
signal.signal(signal.SIGTERM, terminate_gracefully)
signal.signal(signal.SIGHUP, signal.SIG_IGN)
jobs = Queue.Queue()
# We start the threads
threads = list()
for t in range(args.number_threads):
thread = threading.Thread(name="aia_prep_"+str(t), target=aia_prep, args=(jobs, args.timeout, args.verbose, args.force))
thread.start()
threads.append(thread)
# Search the filenames and feed them to the threads
while date < end_date:
for channel in args.channels:
filenames = os.path.join(input_file_dir, input_file_pattern.format(year = date.year, month = date.month, day = date.day, channel = channel))
logging.debug("Looking for files %s", filenames)
filenames = sorted(glob(filenames))
logging.debug("Found files %s", filenames)
for filename in filenames:
output_filename = output_file_pattern.format(year = date.year, month = date.month, day = date.day, channel = channel, filename = os.path.splitext(os.path.basename(filename))[0])
outdir, outfile = os.path.split(os.path.join(output_file_dir, output_filename))
# We make sure the file has not already been preprocessed
if not args.force and os.path.exists(os.path.join(outdir, outfile)):
logging.debug("File %s was already aia_prepped, skipping.", filename)
else:
# Create the outdir if necessary
if not os.path.isdir(outdir):
try:
logging.info("Creating directory %s", outdir)
os.makedirs(outdir)
except Exception, why:
logging.error("Could not create output directory %s", outdir)
continue
logging.info("File %s will be aia_prepped.", filename)
jobs.put({"filename": filename, "outfile": outfile, "outdir": outdir})
date += timedelta(days=1)
for thread in threads:
jobs.put(None)
for thread in threads:
thread.join()
| 37.767045
| 181
| 0.719723
|
c90ea0d7fe68dfd747d0895a38db13a372b6bb37
| 5,613
|
py
|
Python
|
venv/lib/python3.7/site-packages/keyring/tests/test_backend.py
|
margretmwangi/Rblog
|
2d606a858c3313e1d48cdd6a8ce205c8776be754
|
[
"Unlicense"
] | 1
|
2021-04-05T02:52:30.000Z
|
2021-04-05T02:52:30.000Z
|
venv/lib/python3.7/site-packages/keyring/tests/test_backend.py
|
margretmwangi/Rblog
|
2d606a858c3313e1d48cdd6a8ce205c8776be754
|
[
"Unlicense"
] | 2
|
2021-09-13T16:05:02.000Z
|
2021-10-21T21:24:41.000Z
|
venv/lib/python3.7/site-packages/keyring/tests/test_backend.py
|
margretmwangi/Rblog
|
2d606a858c3313e1d48cdd6a8ce205c8776be754
|
[
"Unlicense"
] | 1
|
2021-04-07T09:11:05.000Z
|
2021-04-07T09:11:05.000Z
|
# coding: utf-8
"""
Common test functionality for backends.
"""
from __future__ import unicode_literals
import string
import pytest
from .util import random_string
from keyring import errors
__metaclass__ = type
# unicode only characters
# Sourced from The Quick Brown Fox... Pangrams
# http://www.columbia.edu/~fdc/utf8/
UNICODE_CHARS = (
"זהכיףסתםלשמועאיךתנצחקרפדעץטובבגן"
"ξεσκεπάζωτηνψυχοφθόραβδελυγμία"
"Съешьжеещёэтихмягкихфранцузскихбулокдавыпейчаю"
"Жълтатадюлябешещастливачепухъткойтоцъфназамръзнакатогьон"
)
# ensure no-ascii chars slip by - watch your editor!
assert min(ord(char) for char in UNICODE_CHARS) > 127
def is_ascii_printable(s):
return all(32 <= ord(c) < 127 for c in s)
class BackendBasicTests:
"""Test for the keyring's basic functions. password_set and password_get
"""
DIFFICULT_CHARS = string.whitespace + string.punctuation
def setUp(self):
self.keyring = self.init_keyring()
self.credentials_created = set()
def tearDown(self):
for item in self.credentials_created:
self.keyring.delete_password(*item)
def set_password(self, service, username, password):
# set the password and save the result so the test runner can clean
# up after if necessary.
self.keyring.set_password(service, username, password)
self.credentials_created.add((service, username))
def check_set_get(self, service, username, password):
keyring = self.keyring
# for the non-existent password
assert keyring.get_password(service, username) is None
# common usage
self.set_password(service, username, password)
assert keyring.get_password(service, username) == password
# for the empty password
self.set_password(service, username, "")
assert keyring.get_password(service, username) == ""
def test_password_set_get(self):
password = random_string(20)
username = random_string(20)
service = random_string(20)
self.check_set_get(service, username, password)
def test_difficult_chars(self):
password = random_string(20, self.DIFFICULT_CHARS)
username = random_string(20, self.DIFFICULT_CHARS)
service = random_string(20, self.DIFFICULT_CHARS)
self.check_set_get(service, username, password)
def test_delete_present(self):
password = random_string(20, self.DIFFICULT_CHARS)
username = random_string(20, self.DIFFICULT_CHARS)
service = random_string(20, self.DIFFICULT_CHARS)
self.keyring.set_password(service, username, password)
self.keyring.delete_password(service, username)
assert self.keyring.get_password(service, username) is None
def test_delete_not_present(self):
username = random_string(20, self.DIFFICULT_CHARS)
service = random_string(20, self.DIFFICULT_CHARS)
with pytest.raises(errors.PasswordDeleteError):
self.keyring.delete_password(service, username)
def test_delete_one_in_group(self):
username1 = random_string(20, self.DIFFICULT_CHARS)
username2 = random_string(20, self.DIFFICULT_CHARS)
password = random_string(20, self.DIFFICULT_CHARS)
service = random_string(20, self.DIFFICULT_CHARS)
self.keyring.set_password(service, username1, password)
self.set_password(service, username2, password)
self.keyring.delete_password(service, username1)
assert self.keyring.get_password(service, username2) == password
def test_name_property(self):
assert is_ascii_printable(self.keyring.name)
def test_unicode_chars(self):
password = random_string(20, UNICODE_CHARS)
username = random_string(20, UNICODE_CHARS)
service = random_string(20, UNICODE_CHARS)
self.check_set_get(service, username, password)
def test_unicode_and_ascii_chars(self):
source = (random_string(10, UNICODE_CHARS) + random_string(10)
+ random_string(10, self.DIFFICULT_CHARS))
password = random_string(20, source)
username = random_string(20, source)
service = random_string(20, source)
self.check_set_get(service, username, password)
def test_different_user(self):
"""
Issue #47 reports that WinVault isn't storing passwords for
multiple users. This test exercises that test for each of the
backends.
"""
keyring = self.keyring
self.set_password('service1', 'user1', 'password1')
self.set_password('service1', 'user2', 'password2')
assert keyring.get_password('service1', 'user1') == 'password1'
assert keyring.get_password('service1', 'user2') == 'password2'
self.set_password('service2', 'user3', 'password3')
assert keyring.get_password('service1', 'user1') == 'password1'
def test_credential(self):
keyring = self.keyring
cred = keyring.get_credential('service', None)
assert cred is None
self.set_password('service1', 'user1', 'password1')
self.set_password('service1', 'user2', 'password2')
cred = keyring.get_credential('service1', None)
assert cred is None or (cred.username, cred.password) in (
('user1', 'password1'),
('user2', 'password2'),
)
cred = keyring.get_credential('service1', 'user2')
assert cred is not None
assert (cred.username, cred.password) in (
('user1', 'password1'),
('user2', 'password2'),
)
| 35.301887
| 76
| 0.680563
|
31312070d94436ede8d223a14e9c560f187a6ec5
| 13,374
|
py
|
Python
|
allennlp/modules/token_embedders/pretrained_transformer_embedder.py
|
12seetharaman/allennlp
|
212035f23c4642b3f3bc850316fe0119f2053ab1
|
[
"Apache-2.0"
] | 1
|
2021-06-12T22:01:10.000Z
|
2021-06-12T22:01:10.000Z
|
allennlp/modules/token_embedders/pretrained_transformer_embedder.py
|
12seetharaman/allennlp
|
212035f23c4642b3f3bc850316fe0119f2053ab1
|
[
"Apache-2.0"
] | null | null | null |
allennlp/modules/token_embedders/pretrained_transformer_embedder.py
|
12seetharaman/allennlp
|
212035f23c4642b3f3bc850316fe0119f2053ab1
|
[
"Apache-2.0"
] | null | null | null |
import math
from typing import Optional, Tuple
from overrides import overrides
import torch
import torch.nn.functional as F
from transformers import XLNetConfig
from transformers.modeling_auto import AutoModel
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
from allennlp.nn.util import batched_index_select
@TokenEmbedder.register("pretrained_transformer")
class PretrainedTransformerEmbedder(TokenEmbedder):
"""
Uses a pretrained model from `transformers` as a `TokenEmbedder`.
Registered as a `TokenEmbedder` with name "pretrained_transformer".
# Parameters
model_name : `str`
The name of the `transformers` model to use. Should be the same as the corresponding
`PretrainedTransformerIndexer`.
max_length : `int`, optional (default = `None`)
If positive, folds input token IDs into multiple segments of this length, pass them
through the transformer model independently, and concatenate the final representations.
Should be set to the same value as the `max_length` option on the
`PretrainedTransformerIndexer`.
sub_module: `str`, optional (default = `None`)
The name of a submodule of the transformer to be used as the embedder. Some transformers naturally act
as embedders such as BERT. However, other models consist of encoder and decoder, in which case we just
want to use the encoder.
train_parameters: `bool`, optional (default = `True`)
If this is `True`, the transformer weights get updated during training.
"""
def __init__(
self,
model_name: str,
max_length: int = None,
sub_module: str = None,
train_parameters: bool = True,
) -> None:
super().__init__()
self.transformer_model = AutoModel.from_pretrained(model_name)
self.config = self.transformer_model.config
if sub_module:
assert hasattr(self.transformer_model, sub_module)
self.transformer_model = getattr(self.transformer_model, sub_module)
self._max_length = max_length
# I'm not sure if this works for all models; open an issue on github if you find a case
# where it doesn't work.
self.output_dim = self.config.hidden_size
tokenizer = PretrainedTransformerTokenizer(model_name)
self._num_added_start_tokens = len(tokenizer.single_sequence_start_tokens)
self._num_added_end_tokens = len(tokenizer.single_sequence_end_tokens)
self._num_added_tokens = self._num_added_start_tokens + self._num_added_end_tokens
if not train_parameters:
for param in self.transformer_model.parameters():
param.requires_grad = False
@overrides
def get_output_dim(self):
return self.output_dim
def _number_of_token_type_embeddings(self):
if isinstance(self.config, XLNetConfig):
return 3 # XLNet has 3 type ids
elif hasattr(self.config, "type_vocab_size"):
return self.config.type_vocab_size
else:
return 0
@overrides
def forward(
self,
token_ids: torch.LongTensor,
mask: torch.BoolTensor,
type_ids: Optional[torch.LongTensor] = None,
segment_concat_mask: Optional[torch.BoolTensor] = None,
) -> torch.Tensor: # type: ignore
"""
# Parameters
token_ids: `torch.LongTensor`
Shape: `[batch_size, num_wordpieces if max_length is None else num_segment_concat_wordpieces]`.
num_segment_concat_wordpieces is num_wordpieces plus special tokens inserted in the
middle, e.g. the length of: "[CLS] A B C [SEP] [CLS] D E F [SEP]" (see indexer logic).
mask: `torch.BoolTensor`
Shape: [batch_size, num_wordpieces].
type_ids: `Optional[torch.LongTensor]`
Shape: `[batch_size, num_wordpieces if max_length is None else num_segment_concat_wordpieces]`.
segment_concat_mask: `Optional[torch.BoolTensor]`
Shape: `[batch_size, num_segment_concat_wordpieces]`.
# Returns
`torch.Tensor`
Shape: `[batch_size, num_wordpieces, embedding_size]`.
"""
# Some of the huggingface transformers don't support type ids at all and crash when you supply
# them. For others, you can supply a tensor of zeros, and if you don't, they act as if you did.
# There is no practical difference to the caller, so here we pretend that one case is the same
# as another case.
if type_ids is not None:
max_type_id = type_ids.max()
if max_type_id == 0:
type_ids = None
else:
if max_type_id >= self._number_of_token_type_embeddings():
raise ValueError("Found type ids too large for the chosen transformer model.")
assert token_ids.shape == type_ids.shape
fold_long_sequences = self._max_length is not None and token_ids.size(1) > self._max_length
if fold_long_sequences:
batch_size, num_segment_concat_wordpieces = token_ids.size()
token_ids, segment_concat_mask, type_ids = self._fold_long_sequences(
token_ids, segment_concat_mask, type_ids
)
transformer_mask = segment_concat_mask if self._max_length is not None else mask
# Shape: [batch_size, num_wordpieces, embedding_size],
# or if self._max_length is not None:
# [batch_size * num_segments, self._max_length, embedding_size]
# We call this with kwargs because some of the huggingface models don't have the
# token_type_ids parameter and fail even when it's given as None.
# Also, as of transformers v2.5.1, they are taking FloatTensor masks.
parameters = {"input_ids": token_ids, "attention_mask": transformer_mask.float()}
if type_ids is not None:
parameters["token_type_ids"] = type_ids
embeddings = self.transformer_model(**parameters)[0]
if fold_long_sequences:
embeddings = self._unfold_long_sequences(
embeddings, segment_concat_mask, batch_size, num_segment_concat_wordpieces
)
return embeddings
def _fold_long_sequences(
self,
token_ids: torch.LongTensor,
mask: torch.BoolTensor,
type_ids: Optional[torch.LongTensor] = None,
) -> Tuple[torch.LongTensor, torch.LongTensor, Optional[torch.LongTensor]]:
"""
We fold 1D sequences (for each element in batch), returned by `PretrainedTransformerIndexer`
that are in reality multiple segments concatenated together, to 2D tensors, e.g.
[ [CLS] A B C [SEP] [CLS] D E [SEP] ]
-> [ [ [CLS] A B C [SEP] ], [ [CLS] D E [SEP] [PAD] ] ]
The [PAD] positions can be found in the returned `mask`.
# Parameters
token_ids: `torch.LongTensor`
Shape: `[batch_size, num_segment_concat_wordpieces]`.
num_segment_concat_wordpieces is num_wordpieces plus special tokens inserted in the
middle, i.e. the length of: "[CLS] A B C [SEP] [CLS] D E F [SEP]" (see indexer logic).
mask: `torch.BoolTensor`
Shape: `[batch_size, num_segment_concat_wordpieces]`.
The mask for the concatenated segments of wordpieces. The same as `segment_concat_mask`
in `forward()`.
type_ids: `Optional[torch.LongTensor]`
Shape: [batch_size, num_segment_concat_wordpieces].
# Returns:
token_ids: `torch.LongTensor`
Shape: [batch_size * num_segments, self._max_length].
mask: `torch.BoolTensor`
Shape: [batch_size * num_segments, self._max_length].
"""
num_segment_concat_wordpieces = token_ids.size(1)
num_segments = math.ceil(num_segment_concat_wordpieces / self._max_length)
padded_length = num_segments * self._max_length
length_to_pad = padded_length - num_segment_concat_wordpieces
def fold(tensor): # Shape: [batch_size, num_segment_concat_wordpieces]
# Shape: [batch_size, num_segments * self._max_length]
tensor = F.pad(tensor, [0, length_to_pad], value=0)
# Shape: [batch_size * num_segments, self._max_length]
return tensor.reshape(-1, self._max_length)
return fold(token_ids), fold(mask), fold(type_ids) if type_ids is not None else None
def _unfold_long_sequences(
self,
embeddings: torch.FloatTensor,
mask: torch.BoolTensor,
batch_size: int,
num_segment_concat_wordpieces: int,
) -> torch.FloatTensor:
"""
We take 2D segments of a long sequence and flatten them out to get the whole sequence
representation while remove unnecessary special tokens.
[ [ [CLS]_emb A_emb B_emb C_emb [SEP]_emb ], [ [CLS]_emb D_emb E_emb [SEP]_emb [PAD]_emb ] ]
-> [ [CLS]_emb A_emb B_emb C_emb D_emb E_emb [SEP]_emb ]
We truncate the start and end tokens for all segments, recombine the segments,
and manually add back the start and end tokens.
# Parameters
embeddings: `torch.FloatTensor`
Shape: [batch_size * num_segments, self._max_length, embedding_size].
mask: `torch.BoolTensor`
Shape: [batch_size * num_segments, self._max_length].
The mask for the concatenated segments of wordpieces. The same as `segment_concat_mask`
in `forward()`.
batch_size: `int`
num_segment_concat_wordpieces: `int`
The length of the original "[ [CLS] A B C [SEP] [CLS] D E F [SEP] ]", i.e.
the original `token_ids.size(1)`.
# Returns:
embeddings: `torch.FloatTensor`
Shape: [batch_size, self._num_wordpieces, embedding_size].
"""
def lengths_to_mask(lengths, max_len, device):
return torch.arange(max_len, device=device).expand(
lengths.size(0), max_len
) < lengths.unsqueeze(1)
device = embeddings.device
num_segments = int(embeddings.size(0) / batch_size)
embedding_size = embeddings.size(2)
# We want to remove all segment-level special tokens but maintain sequence-level ones
num_wordpieces = num_segment_concat_wordpieces - (num_segments - 1) * self._num_added_tokens
embeddings = embeddings.reshape(batch_size, num_segments * self._max_length, embedding_size)
mask = mask.reshape(batch_size, num_segments * self._max_length)
# We assume that all 1s in the mask precede all 0s, and add an assert for that.
# Open an issue on GitHub if this breaks for you.
# Shape: (batch_size,)
seq_lengths = mask.sum(-1)
if not (lengths_to_mask(seq_lengths, mask.size(1), device) == mask).all():
raise ValueError(
"Long sequence splitting only supports masks with all 1s preceding all 0s."
)
# Shape: (batch_size, self._num_added_end_tokens); this is a broadcast op
end_token_indices = (
seq_lengths.unsqueeze(-1) - torch.arange(self._num_added_end_tokens, device=device) - 1
)
# Shape: (batch_size, self._num_added_start_tokens, embedding_size)
start_token_embeddings = embeddings[:, : self._num_added_start_tokens, :]
# Shape: (batch_size, self._num_added_end_tokens, embedding_size)
end_token_embeddings = batched_index_select(embeddings, end_token_indices)
embeddings = embeddings.reshape(batch_size, num_segments, self._max_length, embedding_size)
embeddings = embeddings[
:, :, self._num_added_start_tokens : -self._num_added_end_tokens, :
] # truncate segment-level start/end tokens
embeddings = embeddings.reshape(batch_size, -1, embedding_size) # flatten
# Now try to put end token embeddings back which is a little tricky.
# The number of segment each sequence spans, excluding padding. Mimicking ceiling operation.
# Shape: (batch_size,)
num_effective_segments = (seq_lengths + self._max_length - 1) / self._max_length
# The number of indices that end tokens should shift back.
num_removed_non_end_tokens = (
num_effective_segments * self._num_added_tokens - self._num_added_end_tokens
)
# Shape: (batch_size, self._num_added_end_tokens)
end_token_indices -= num_removed_non_end_tokens.unsqueeze(-1)
assert (end_token_indices >= self._num_added_start_tokens).all()
# Add space for end embeddings
embeddings = torch.cat([embeddings, torch.zeros_like(end_token_embeddings)], 1)
# Add end token embeddings back
embeddings.scatter_(
1, end_token_indices.unsqueeze(-1).expand_as(end_token_embeddings), end_token_embeddings
)
# Now put back start tokens. We can do this before putting back end tokens, but then
# we need to change `num_removed_non_end_tokens` a little.
embeddings = torch.cat([start_token_embeddings, embeddings], 1)
# Truncate to original length
embeddings = embeddings[:, :num_wordpieces, :]
return embeddings
| 45.335593
| 110
| 0.66719
|
748215b6da66f5ae1eff827082cc9066f02f2bd5
| 8,929
|
py
|
Python
|
train.py
|
bhsimon0810/attentive-reader
|
67d70938545523656ec50152a4c7049ec053b54f
|
[
"MIT"
] | null | null | null |
train.py
|
bhsimon0810/attentive-reader
|
67d70938545523656ec50152a4c7049ec053b54f
|
[
"MIT"
] | null | null | null |
train.py
|
bhsimon0810/attentive-reader
|
67d70938545523656ec50152a4c7049ec053b54f
|
[
"MIT"
] | null | null | null |
import os
import sys
import time
import datetime
import numpy as np
import tensorflow as tf
from model import Reader
from dataset import Dataset
from utils import gen_embeddings, load_dict
# Parameters
# ==================================================
# Data loading params
tf.flags.DEFINE_string("train_file", "data/cnn-train.pkl", "Data source for the training data.")
tf.flags.DEFINE_string("dev_file", "data/cnn-dev.pkl", "Data source for the validating data.")
tf.flags.DEFINE_string("word_dict_file", "data/cnn-word-dict.pkl", "Data source for the word dict.")
tf.flags.DEFINE_string("entity_dict_file", "data/cnn-entity-dict.pkl", "Data source for the entity dict.")
# Model Hyperparameters
tf.flags.DEFINE_string("cell_type", "gru", "Type of rnn cell. Choose 'vanilla' or 'lstm' or 'gru' (Default: gru)")
tf.flags.DEFINE_integer("emb_size", 50, "Dimensionality of character embedding (default: 200)")
tf.flags.DEFINE_integer("hid_size", 50, "Dimensionality of rnn cell units (Default: 128)")
tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda", 0.0, "L2 regularization lambda (default: 0.0)")
# Training parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (Default: 64)")
tf.flags.DEFINE_integer("num_epochs", 10, "Number of training epochs (Default: 100)")
tf.flags.DEFINE_float("max_grad_norm", 5.0,
"Maximum value of the global norm of the gradients for clipping (default: 5.0)")
# tf.flags.DEFINE_boolean("debug", True, "Whether it is debug mode i.e. use only first 100 examples")
# tf.flags.DEFINE_integer("display_every", 10, "Number of iterations to display training info.")
# tf.flags.DEFINE_integer("evaluate_every", 100, "Evaluate model on dev set after this many steps")
# tf.flags.DEFINE_integer("checkpoint_every", 100, "Save model after this many steps")
tf.flags.DEFINE_integer("num_checkpoints", 5, "Number of checkpoints to store")
tf.flags.DEFINE_float("learning_rate", 1e-3, "Which learning rate to start with. (Default: 1e-3)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
# FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{} = {}".format(attr.upper(), value))
print("")
def train():
print('-' * 50)
print('Load data files..')
print('*' * 10 + ' Train')
train_examples = Dataset(FLAGS.train_file)
print('*' * 10 + ' Dev')
dev_examples = Dataset(FLAGS.dev_file)
print('-' * 50)
print('Build dictionary..')
word_dict, entity_dict = load_dict(FLAGS.word_dict_file, FLAGS.entity_dict_file)
print('-' * 50)
# Load embedding file
embeddings = gen_embeddings(word_dict, FLAGS.emb_size, "data/glove.6B.{}d.txt".format(FLAGS.emb_size))
print('-' * 50)
print('Creating TF computation graph...')
with tf.Graph().as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement
)
sess = tf.Session(config=session_conf)
with sess.as_default():
reader = Reader(
cell_type=FLAGS.cell_type,
hid_size=FLAGS.hid_size,
emb_size=FLAGS.emb_size,
vocab_size=len(word_dict),
num_labels=len(entity_dict),
pretrained_embs=embeddings,
l2_reg_lambda=FLAGS.l2_reg_lambda
)
# Define training procedure
global_step = tf.Variable(0, name="global_step", trainable=False)
train_op = tf.train.GradientDescentOptimizer(FLAGS.learning_rate).minimize(reader.loss, global_step=global_step)
acc, acc_op = tf.metrics.accuracy(labels=reader.y, predictions=reader.predictions, name="metrics/acc")
metrics_vars = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope="metrics")
metrics_init_op = tf.variables_initializer(var_list=metrics_vars)
# Output directory for models and summaries
timestamp = str(int(time.time()))
out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp))
print("writing to {}\n".format(out_dir))
# Summaries for loss and accuracy
loss_summary = tf.summary.scalar("loss", reader.loss)
acc_summary = tf.summary.scalar("accuracy", reader.accuracy)
# Train summaries
train_summary_op = tf.summary.merge([loss_summary, acc_summary])
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
# dev summaries
dev_step = 0
dev_summary_op = tf.summary.merge([loss_summary, acc_summary])
dev_summary_dir = os.path.join(out_dir, "summaries", "dev")
dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)
# Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.num_checkpoints)
# initialize all variables
best_dev_acc = 0.0
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
for epoch in range(FLAGS.num_epochs):
print('-' * 50)
print('{}> epoch: {}'.format(datetime.datetime.now().isoformat(), epoch))
# print('Start training...')
for batch in train_examples.batch_iter(FLAGS.batch_size, desc="Training", shuffle=True):
mb_x1, mb_x1_lengths, mb_x2, mb_x2_lengths, mb_mask, mb_y = batch
feed_dict = {
reader.x1: mb_x1,
reader.x1_lengths: mb_x1_lengths,
reader.x2: mb_x2,
reader.x2_lengths: mb_x2_lengths,
reader.mask: mb_mask,
reader.y: mb_y,
reader.is_training: True,
reader.dropout_keep_prob: FLAGS.dropout_keep_prob
}
_, step, summaries, loss, accuracy, _ = sess.run(
[train_op, global_step, train_summary_op, reader.loss, reader.accuracy, acc_op], feed_dict
)
train_summary_writer.add_summary(summaries, step)
print("training accuracy = {:.2f}".format(sess.run(acc) * 100))
sess.run(metrics_init_op)
# Validating process
for batch in dev_examples.batch_iter(FLAGS.batch_size, desc="Validating", shuffle=False):
dev_step += 1
mb_x1, mb_x1_lengths, mb_x2, mb_x2_lengths, mb_mask, mb_y = batch
feed_dict = {
reader.x1: mb_x1,
reader.x1_lengths: mb_x1_lengths,
reader.x2: mb_x2,
reader.x2_lengths: mb_x2_lengths,
reader.mask: mb_mask,
reader.y: mb_y,
reader.is_training: False,
reader.dropout_keep_prob: 0.0
}
summaries, loss, accuracy, _ = sess.run(
[dev_summary_op, reader.loss, reader.accuracy, acc_op], feed_dict
)
dev_summary_writer.add_summary(summaries, global_step=dev_step)
dev_acc = sess.run(acc) * 100
print("validating accuracy = {:.2f}".format(dev_acc))
# model checkpoint
if dev_acc > best_dev_acc:
best_dev_acc = dev_acc
path = saver.save(sess, checkpoint_prefix)
print("saved model checkpoint to {}".format(path))
print("current best validating accuracy = {:.2f}".format(best_dev_acc))
print("{} optimization finished!".format(datetime.datetime.now()))
print("best validating accuracy = {:.2f}".format(best_dev_acc))
def main(_):
train()
if __name__ == '__main__':
tf.app.run()
| 47.748663
| 125
| 0.603875
|
40e2f3192f0b9e5fbfe9382ffe645eb3dc00f9a6
| 628
|
py
|
Python
|
scripts/quest/q1426e.py
|
cuongdt1994/v204.1
|
9b0e2d05bcb6d6c1cf2341a93267aa1b8c4bf7a5
|
[
"MIT"
] | 9
|
2021-04-26T11:59:29.000Z
|
2021-12-20T13:15:27.000Z
|
scripts/quest/q1426e.py
|
varenty-x/v203.4
|
359d6575ef8256bb2d6df87bf4156c4608243232
|
[
"MIT"
] | null | null | null |
scripts/quest/q1426e.py
|
varenty-x/v203.4
|
359d6575ef8256bb2d6df87bf4156c4608243232
|
[
"MIT"
] | 6
|
2021-07-14T06:32:05.000Z
|
2022-02-06T02:32:56.000Z
|
# [Job Adv] (Lv.30) Gunslinger of the Seven Seas
darkMarble = 4031013
job = "Gunslinger"
sm.setSpeakerID(1090000)
if sm.hasItem(darkMarble, 30):
sm.sendNext("I am impressed, you surpassed the test. Only few are talented enough.\r\n"
"You have proven yourself to be worthy, I shall mold your body into a #b"+ job +"#k.")
else:
sm.sendSayOkay("You have not retrieved the #t"+ darkMarble+"#s yet, I will be waiting.")
sm.dispose()
sm.consumeItem(darkMarble, 30)
sm.completeQuestNoRewards(parentID)
sm.setJob(520) # Gunslinger
sm.addSP(5)
sm.sendNext("You are now a #b"+ job +"#k.")
sm.dispose()
| 29.904762
| 102
| 0.686306
|
bb5e040798b190a627c0307ddee6a117d3a50962
| 138
|
py
|
Python
|
v1/chapter8/5-NltkTokenize.py
|
QTYResources/python-scraping
|
d7afe25a012fb5d079ee42372c7fce94b9494b9f
|
[
"MIT"
] | null | null | null |
v1/chapter8/5-NltkTokenize.py
|
QTYResources/python-scraping
|
d7afe25a012fb5d079ee42372c7fce94b9494b9f
|
[
"MIT"
] | null | null | null |
v1/chapter8/5-NltkTokenize.py
|
QTYResources/python-scraping
|
d7afe25a012fb5d079ee42372c7fce94b9494b9f
|
[
"MIT"
] | null | null | null |
from nltk import word_tokenize
from nltk import Text
tokens = word_tokenize("Here is some not very interesting text")
text = Text(tokens)
| 27.6
| 64
| 0.797101
|
2c4907a110a283541cd4f98f925a3a0777ad2d00
| 11,681
|
py
|
Python
|
zipline/gens/tradesimulation.py
|
npezolano/zipline
|
71effa5e98bd0425ac1863e1861c9b51fbc77242
|
[
"Apache-2.0"
] | 1
|
2016-03-16T12:54:07.000Z
|
2016-03-16T12:54:07.000Z
|
zipline/gens/tradesimulation.py
|
Miles0918/zipline
|
e7a5e097c419bed7816d3cd6c370b5171db37b33
|
[
"Apache-2.0"
] | null | null | null |
zipline/gens/tradesimulation.py
|
Miles0918/zipline
|
e7a5e097c419bed7816d3cd6c370b5171db37b33
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logbook import Logger, Processor
from pandas.tslib import normalize_date
from zipline.finance import trading
from zipline.protocol import (
BarData,
SIDData,
DATASOURCE_TYPE
)
from zipline.gens.utils import hash_args
log = Logger('Trade Simulation')
class AlgorithmSimulator(object):
EMISSION_TO_PERF_KEY_MAP = {
'minute': 'minute_perf',
'daily': 'daily_perf'
}
def get_hash(self):
"""
There should only ever be one TSC in the system, so
we don't bother passing args into the hash.
"""
return self.__class__.__name__ + hash_args()
def __init__(self, algo, sim_params):
# ==============
# Simulation
# Param Setup
# ==============
self.sim_params = sim_params
# ==============
# Algo Setup
# ==============
self.algo = algo
self.algo_start = normalize_date(self.sim_params.first_open)
# ==============
# Snapshot Setup
# ==============
# The algorithm's data as of our most recent event.
# We want an object that will have empty objects as default
# values on missing keys.
self.current_data = BarData()
# We don't have a datetime for the current snapshot until we
# receive a message.
self.simulation_dt = None
# =============
# Logging Setup
# =============
# Processor function for injecting the algo_dt into
# user prints/logs.
def inject_algo_dt(record):
if 'algo_dt' not in record.extra:
record.extra['algo_dt'] = self.simulation_dt
self.processor = Processor(inject_algo_dt)
@property
def perf_key(self):
return self.EMISSION_TO_PERF_KEY_MAP[
self.algo.perf_tracker.emission_rate]
def process_event(self, event):
process_trade = self.algo.blotter.process_trade
for txn, order in process_trade(event):
self.algo.perf_tracker.process_event(txn)
self.algo.perf_tracker.process_event(order)
self.algo.perf_tracker.process_event(event)
def transform(self, stream_in):
"""
Main generator work loop.
"""
# Initialize the mkt_close
mkt_open = self.algo.perf_tracker.market_open
mkt_close = self.algo.perf_tracker.market_close
# inject the current algo
# snapshot time to any log record generated.
with self.processor.threadbound():
data_frequency = self.sim_params.data_frequency
self._call_before_trading_start(mkt_open)
for date, snapshot in stream_in:
self.simulation_dt = date
self.on_dt_changed(date)
# If we're still in the warmup period. Use the event to
# update our universe, but don't yield any perf messages,
# and don't send a snapshot to handle_data.
if date < self.algo_start:
for event in snapshot:
if event.type == DATASOURCE_TYPE.SPLIT:
self.algo.blotter.process_split(event)
elif event.type in (DATASOURCE_TYPE.TRADE,
DATASOURCE_TYPE.CUSTOM):
self.update_universe(event)
self.algo.perf_tracker.process_event(event)
else:
message = self._process_snapshot(
date,
snapshot,
self.algo.instant_fill,
)
# Perf messages are only emitted if the snapshot contained
# a benchmark event.
if message is not None:
yield message
# When emitting minutely, we re-iterate the day as a
# packet with the entire days performance rolled up.
if date == mkt_close:
if self.algo.perf_tracker.emission_rate == 'minute':
daily_rollup = self.algo.perf_tracker.to_dict(
emission_type='daily'
)
daily_rollup['daily_perf']['recorded_vars'] = \
self.algo.recorded_vars
yield daily_rollup
tp = self.algo.perf_tracker.todays_performance
tp.rollover()
if mkt_close <= self.algo.perf_tracker.last_close:
before_last_close = \
mkt_close < self.algo.perf_tracker.last_close
try:
mkt_open, mkt_close = \
trading.environment \
.next_open_and_close(mkt_close)
except trading.NoFurtherDataError:
# If at the end of backtest history,
# skip advancing market close.
pass
if (self.algo.perf_tracker.emission_rate
== 'minute'):
self.algo.perf_tracker\
.handle_intraday_market_close(
mkt_open,
mkt_close)
if before_last_close:
self._call_before_trading_start(mkt_open)
elif data_frequency == 'daily':
next_day = trading.environment.next_trading_day(date)
if (next_day is not None
and next_day
< self.algo.perf_tracker.last_close):
self._call_before_trading_start(next_day)
self.algo.portfolio_needs_update = True
self.algo.account_needs_update = True
self.algo.performance_needs_update = True
risk_message = self.algo.perf_tracker.handle_simulation_end()
yield risk_message
def _process_snapshot(self, dt, snapshot, instant_fill):
"""
Process a stream of events corresponding to a single datetime, possibly
returning a perf message to be yielded.
If @instant_fill = True, we delay processing of events until after the
user's call to handle_data, and we process the user's placed orders
before the snapshot's events. Note that this introduces a lookahead
bias, since the user effectively is effectively placing orders that are
filled based on trades that happened prior to the call the handle_data.
If @instant_fill = False, we process Trade events before calling
handle_data. This means that orders are filled based on trades
occurring in the next snapshot. This is the more conservative model,
and as such it is the default behavior in TradingAlgorithm.
"""
# Flags indicating whether we saw any events of type TRADE and type
# BENCHMARK. Respectively, these control whether or not handle_data is
# called for this snapshot and whether we emit a perf message for this
# snapshot.
any_trade_occurred = False
benchmark_event_occurred = False
if instant_fill:
events_to_be_processed = []
for event in snapshot:
if event.type == DATASOURCE_TYPE.TRADE:
self.update_universe(event)
any_trade_occurred = True
elif event.type == DATASOURCE_TYPE.BENCHMARK:
benchmark_event_occurred = True
elif event.type == DATASOURCE_TYPE.CUSTOM:
self.update_universe(event)
elif event.type == DATASOURCE_TYPE.SPLIT:
self.algo.blotter.process_split(event)
if not instant_fill:
self.process_event(event)
else:
events_to_be_processed.append(event)
if any_trade_occurred:
new_orders = self._call_handle_data()
for order in new_orders:
self.algo.perf_tracker.process_event(order)
if instant_fill:
# Now that handle_data has been called and orders have been placed,
# process the event stream to fill user orders based on the events
# from this snapshot.
for event in events_to_be_processed:
self.process_event(event)
if benchmark_event_occurred:
return self.get_message(dt)
else:
return None
def _call_handle_data(self):
"""
Call the user's handle_data, returning any orders placed by the algo
during the call.
"""
self.algo.event_manager.handle_data(
self.algo,
self.current_data,
self.simulation_dt,
)
orders = self.algo.blotter.new_orders
self.algo.blotter.new_orders = []
return orders
def _call_before_trading_start(self, dt):
dt = normalize_date(dt)
self.simulation_dt = dt
self.on_dt_changed(dt)
self.algo.before_trading_start()
def on_dt_changed(self, dt):
if self.algo.datetime != dt:
self.algo.on_dt_changed(dt)
def get_message(self, dt):
"""
Get a perf message for the given datetime.
"""
# Ensure that updated_portfolio has been called at least once for this
# dt before we emit a perf message. This is a no-op if
# updated_portfolio has already been called this dt.
self.algo.updated_portfolio()
self.algo.updated_account()
rvars = self.algo.recorded_vars
if self.algo.perf_tracker.emission_rate == 'daily':
perf_message = \
self.algo.perf_tracker.handle_market_close_daily()
perf_message['daily_perf']['recorded_vars'] = rvars
return perf_message
elif self.algo.perf_tracker.emission_rate == 'minute':
self.algo.perf_tracker.handle_minute_close(dt)
perf_message = self.algo.perf_tracker.to_dict()
perf_message['minute_perf']['recorded_vars'] = rvars
return perf_message
def update_universe(self, event):
"""
Update the universe with new event information.
"""
# Update our knowledge of this event's sid
# rather than use if event.sid in ..., just trying
# and handling the exception is significantly faster
try:
sid_data = self.current_data[event.sid]
except KeyError:
sid_data = self.current_data[event.sid] = SIDData(event.sid)
sid_data.__dict__.update(event.__dict__)
| 37.680645
| 79
| 0.566304
|
a08efd76e80db21932a56bd3069bf91cca0ca074
| 863
|
py
|
Python
|
data_sourcery/sources/images/base.py
|
tiagoprn/data_sourcery
|
4e847f42b228cb2359fa785119a2c21c1a12f656
|
[
"MIT"
] | null | null | null |
data_sourcery/sources/images/base.py
|
tiagoprn/data_sourcery
|
4e847f42b228cb2359fa785119a2c21c1a12f656
|
[
"MIT"
] | 7
|
2021-03-19T00:24:03.000Z
|
2022-01-13T01:12:47.000Z
|
data_sourcery/sources/images/base.py
|
tiagoprn/data_sourcery
|
4e847f42b228cb2359fa785119a2c21c1a12f656
|
[
"MIT"
] | null | null | null |
import os
class BaseImageDownloader:
local_repository_path = ''
remote_path = ''
def __init__(self, remote_path=''):
home_folder = os.environ.get('HOME')
local_repository_home = (f'{home_folder}/.local/'
f'share/data_sourcery')
self.local_repository_path = f'{local_repository_home}/images'
self.remote_path = remote_path
self.create_local_repository_if_not_exists()
def create_local_repository_if_not_exists(self):
if not os.path.exists(self.local_repository_path):
os.makedirs(self.local_repository_path, exist_ok=True)
def _download(self):
"""
Core download logic goes here.
"""
raise NotImplementedError
def download(self):
"""
Main funcion.
"""
raise NotImplementedError
| 27.83871
| 70
| 0.628042
|
8a8033971e3e0561994788b43e15e5dada3c0e08
| 536
|
py
|
Python
|
app/account/authentication.py
|
rogeriopaulos/gep
|
e56fd0450bdb8f572e2e35cc59a74ab0f0b372e2
|
[
"MIT"
] | null | null | null |
app/account/authentication.py
|
rogeriopaulos/gep
|
e56fd0450bdb8f572e2e35cc59a74ab0f0b372e2
|
[
"MIT"
] | 2
|
2021-09-02T04:22:45.000Z
|
2021-09-02T04:52:26.000Z
|
app/account/authentication.py
|
rogeriopaulos/gep
|
e56fd0450bdb8f572e2e35cc59a74ab0f0b372e2
|
[
"MIT"
] | 1
|
2021-09-15T02:16:38.000Z
|
2021-09-15T02:16:38.000Z
|
# -*- coding: utf-8 -*-
from django.contrib.auth.models import User
class EmailAuthBackend(object):
def authenticate(self, username=None, password=None):
try:
user = User.objects.get(email=username)
if user.check_password(password):
return user
return None
except User.DoesNotExist:
return None
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
| 24.363636
| 57
| 0.591418
|
e47b42588e2677e296d9a7d716301d0a18bb5c0a
| 2,956
|
py
|
Python
|
word2vec/data/reader.py
|
sdliuyuzhi/word2vec-pytorch
|
2a3aa41983e8b655f1289eb2fc0524c3309f9280
|
[
"MIT"
] | null | null | null |
word2vec/data/reader.py
|
sdliuyuzhi/word2vec-pytorch
|
2a3aa41983e8b655f1289eb2fc0524c3309f9280
|
[
"MIT"
] | null | null | null |
word2vec/data/reader.py
|
sdliuyuzhi/word2vec-pytorch
|
2a3aa41983e8b655f1289eb2fc0524c3309f9280
|
[
"MIT"
] | 2
|
2018-12-23T20:42:28.000Z
|
2021-02-21T02:21:17.000Z
|
import logging
import os
import numpy as np
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
from word2vec.nlp.corpus import Corpus
from word2vec.nlp.tokenizer import UNK
class DataReader(Dataset):
def __init__(self, data_path, window_size=5, min_count=1, n_negs=6, batch_size=50, padding_idx=0):
self.data_path = data_path
self.window_size = window_size
self.min_count = min_count
self.n_negs = n_negs
self.batch_size = batch_size
self.padding_idx = padding_idx
self.corpus = Corpus(
data_path,
preprocessing=str.lower,
min_count=min_count,
)
self.corpus.profile_data()
self.contexts = []
self.centers = []
self.sliding_read()
def sliding_read(self):
logger = logging.getLogger(__name__)
tokenizer = self.corpus.tokenizer
preprocessing = self.corpus.preprocessing
context = []
sliding_window = []
r = self.window_size
padding_idx = self.padding_idx
logger.info(f"Sliding window {self.window_size}")
with tqdm(total=os.path.getsize(self.data_path)) as pbar:
with open(self.data_path, "r") as rs:
for line_id, line in enumerate(rs):
ids = self.corpus.vocab.encode_idx(
tokenizer(preprocessing(line))
)
len_ids = len(ids)
if len_ids:
sliding_window = [padding_idx] * r + ids + [padding_idx] * r
for i in range(r, r + len_ids):
self.contexts.append(
np.concatenate([
sliding_window[i-r:i],
sliding_window[i+1:i+1+r]
])
)
self.centers.append(sliding_window[i])
pbar.update(len(line))
pbar.set_postfix(line=line_id)
self.size_ = len(self.centers)
self.id_array_ = np.array(range(self.size_), dtype=np.int32)
self.unmask = np.full(self.size_, True, dtype=bool)
self.id_array = self.id_array_[self.unmask]
logger.info("Training data summary:")
logger.info(f"Number of training samples: {self.size_}")
def __len__(self):
return len(self.id_array)
def __getitem__(self, idx):
idx_ = self.id_array[idx]
return self.centers[idx_], self.contexts[idx_], self.corpus.neg_samples(size=self.n_negs)
def subsamples(self):
sub_sample_weights = self.corpus.sub_sample_weights
self.unmask = np.random.uniform(size=self.size_) < np.array([
sub_sample_weights[idx]
for idx in self.centers
])
self.id_array = self.id_array_[self.unmask]
| 35.614458
| 102
| 0.560555
|
fd15467d497238963a24b1819472591464bb3000
| 210
|
py
|
Python
|
template.py
|
fenna/BFVP3INF2_DEMO
|
75f9c86adfdfe20989a63af464a7f537a326bdc5
|
[
"CNRI-Python"
] | null | null | null |
template.py
|
fenna/BFVP3INF2_DEMO
|
75f9c86adfdfe20989a63af464a7f537a326bdc5
|
[
"CNRI-Python"
] | null | null | null |
template.py
|
fenna/BFVP3INF2_DEMO
|
75f9c86adfdfe20989a63af464a7f537a326bdc5
|
[
"CNRI-Python"
] | null | null | null |
#!/usr/bin/env python3
"""
description of the program
"""
__author__ = "my name"
import sys
def main(args):
return 0
if __name__ == "__main__":
exitcode = main(sys.argv)
sys.exit(exitcode)
| 10
| 29
| 0.638095
|
6a5050aff0ffccfbaa9ad42f2a37a24ddb170e80
| 2,187
|
py
|
Python
|
lib/googlecloudsdk/third_party/apis/securitycenter/v1beta1/resources.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/third_party/apis/securitycenter/v1beta1/resources.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | 11
|
2020-02-29T02:51:12.000Z
|
2022-03-30T23:20:08.000Z
|
lib/googlecloudsdk/third_party/apis/securitycenter/v1beta1/resources.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | 1
|
2020-07-24T18:47:35.000Z
|
2020-07-24T18:47:35.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2015 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource definitions for cloud platform apis."""
import enum
BASE_URL = 'https://securitycenter.googleapis.com/v1beta1/'
DOCS_URL = 'https://console.cloud.google.com/apis/api/securitycenter.googleapis.com/overview'
class Collections(enum.Enum):
"""Collections for all supported apis."""
ORGANIZATIONS = (
'organizations',
'organizations/{organizationsId}',
{},
[u'organizationsId'],
True
)
ORGANIZATIONS_ASSETS = (
'organizations.assets',
'organizations/{organizationsId}/assets/{assetsId}',
{},
[u'organizationsId', u'assetsId'],
True
)
ORGANIZATIONS_OPERATIONS = (
'organizations.operations',
'{+name}',
{
'':
'organizations/{organizationsId}/operations/{operationsId}',
},
[u'name'],
True
)
ORGANIZATIONS_SOURCES = (
'organizations.sources',
'{+name}',
{
'':
'organizations/{organizationsId}/sources/{sourcesId}',
},
[u'name'],
True
)
ORGANIZATIONS_SOURCES_FINDINGS = (
'organizations.sources.findings',
'organizations/{organizationsId}/sources/{sourcesId}/findings/'
'{findingId}',
{},
[u'organizationsId', u'sourcesId', u'findingId'],
True
)
def __init__(self, collection_name, path, flat_paths, params,
enable_uri_parsing):
self.collection_name = collection_name
self.path = path
self.flat_paths = flat_paths
self.params = params
self.enable_uri_parsing = enable_uri_parsing
| 28.402597
| 93
| 0.656607
|
2c50105e7b020292295531bc31331004869defe3
| 3,059
|
py
|
Python
|
tools/poolout_tool.py
|
ThuYShao/pytorch-worker
|
1d5a576e0ad887d981c0aa06bdae6a23637b870b
|
[
"MIT"
] | null | null | null |
tools/poolout_tool.py
|
ThuYShao/pytorch-worker
|
1d5a576e0ad887d981c0aa06bdae6a23637b870b
|
[
"MIT"
] | null | null | null |
tools/poolout_tool.py
|
ThuYShao/pytorch-worker
|
1d5a576e0ad887d981c0aa06bdae6a23637b870b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
__author__ = 'yshao'
import logging
import torch
import json
from torch.autograd import Variable
from timeit import default_timer as timer
from tools.eval_tool import gen_time_str, output_value
logger = logging.getLogger(__name__)
def load_state_keywise(model, pretrained_dict):
logger.info("load state keywise start ...")
model_dict = model.state_dict()
# print(model_dict.keys())
# input("continue?")
tmp_cnt = 0
for k, v in pretrained_dict.items():
# kk = k.replace("module.", "")
# print('k=', k)
# input("continue?")
# print('kk=', kk)
# input("continue?")
if k in model_dict and v.size() == model_dict[k].size():
model_dict[k] = v
tmp_cnt += 1
else:
continue
logger.info('tot #para=%d, load from pretrained #paras=%d' % (len(model_dict), tmp_cnt))
model.load_state_dict(model_dict)
return model
def pool_out(parameters, config, gpu_list, _outname):
model = parameters["model"]
dataset = parameters["test_dataset"]
model.eval()
acc_result = None
total_loss = 0
cnt = 0
total_len = len(dataset)
start_time = timer()
output_info = "Pool_Out"
output_time = config.getint("output", "output_time")
save_step = config.getint("output", "save_step")
step = -1
result = []
for step, data in enumerate(dataset):
for key in data.keys():
if isinstance(data[key], torch.Tensor):
if len(gpu_list) > 0:
data[key] = Variable(data[key].cuda())
else:
data[key] = Variable(data[key])
results = model(data, config, gpu_list, acc_result, "poolout")
result = result + results["output"]
cnt += 1
if step % output_time == 0:
delta_t = timer() - start_time
output_value(0, "poolout", "%d/%d" % (step + 1, total_len), "%s/%s" % (
gen_time_str(delta_t), gen_time_str(delta_t * (total_len - step - 1) / (step + 1))),
"%.3lf" % (total_loss / (step + 1)), output_info, '\r', config)
if save_step > 0 and step % save_step == 0:
out_file = open(_outname, 'w', encoding='utf-8')
for item in result:
tmp_dict = {
'id_': item[0],
'res': item[1]
}
out_line = json.dumps(tmp_dict, ensure_ascii=False) + '\n'
out_file.write(out_line)
out_file.close()
if step == -1:
logger.error("There is no data given to the model in this epoch, check your data.")
raise NotImplementedError
delta_t = timer() - start_time
output_info = "Pool_Out"
output_value(0, "poolout", "%d/%d" % (step + 1, total_len), "%s/%s" % (
gen_time_str(delta_t), gen_time_str(delta_t * (total_len - step - 1) / (step + 1))),
"%.3lf" % (total_loss / (step + 1)), output_info, None, config)
return result
| 31.536082
| 100
| 0.560968
|
3a2326e25eacd6aa055c595478f999562efd9a37
| 60,741
|
py
|
Python
|
src/app/voltdb/voltdb_src/lib/python/voltdbclient.py
|
OpenMPDK/SMDK
|
8f19d32d999731242cb1ab116a4cb445d9993b15
|
[
"BSD-3-Clause"
] | 44
|
2022-03-16T08:32:31.000Z
|
2022-03-31T16:02:35.000Z
|
src/app/voltdb/voltdb_src/lib/python/voltdbclient.py
|
H2O0Lee/SMDK
|
eff49bc17a55a83ea968112feb2e2f2ea18c4ff5
|
[
"BSD-3-Clause"
] | 1
|
2022-03-29T02:30:28.000Z
|
2022-03-30T03:40:46.000Z
|
src/app/voltdb/voltdb_src/lib/python/voltdbclient.py
|
H2O0Lee/SMDK
|
eff49bc17a55a83ea968112feb2e2f2ea18c4ff5
|
[
"BSD-3-Clause"
] | 18
|
2022-03-19T04:41:04.000Z
|
2022-03-31T03:32:12.000Z
|
#!/usr/bin/env python3
# This file is part of VoltDB.
# Copyright (C) 2008-2021 VoltDB Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with VoltDB. If not, see <http://www.gnu.org/licenses/>.
import sys
if sys.hexversion < 0x03060000:
raise Exception("Python version 3.6 or greater is required.")
import array
import socket
import base64, textwrap
import struct
import datetime
import decimal
import hashlib
import re
import math
import os
import stat
try:
import ssl
ssl_available = True
except ImportError as e:
ssl_available = False
ssl_exception = e
try:
import gssapi
kerberos_available = True
except ImportError as e:
kerberos_available = False
kerberos_exception = e
try:
import jks
pyjks_available = True
except ImportError as e:
pyjks_available = False
pyjks_exception = e
logger = None
def use_logging():
import logging
global logger
logger = logging.getLogger()
def error(text):
if logger:
logger.error(text)
else:
print(text)
decimal.getcontext().prec = 38
def int16toBytes(val):
return [val >> 8 & 0xff,
val >> 0 & 0xff]
def int32toBytes(val):
return [val >> 24 & 0xff,
val >> 16 & 0xff,
val >> 8 & 0xff,
val >> 0 & 0xff]
def int64toBytes(val):
return [val >> 56 & 0xff,
val >> 48 & 0xff,
val >> 40 & 0xff,
val >> 32 & 0xff,
val >> 24 & 0xff,
val >> 16 & 0xff,
val >> 8 & 0xff,
val >> 0 & 0xff]
def isNaN(d):
# Per IEEE 754, 'NaN == NaN' must be false,
# so we cannot check for simple equality
if d == None:
return False
else: # routine misnamed, returns true for 'Inf' too
return math.isnan(d) or math.isinf(d)
class ReadBuffer(object):
"""
Read buffer management class.
"""
def __init__(self):
self.clear()
def clear(self):
self._buf = bytes()
self._off = 0
def buffer_length(self):
return len(self._buf)
def remaining(self):
return (len(self._buf) - self._off)
def get_buffer(self):
return self._buf
def append(self, content):
self._buf += content
def shift(self, size):
self._off += size
def read(self, size):
return self._buf[self._off:self._off+size]
def unpack(self, format, size):
try:
values = struct.unpack_from(format, self._buf, self._off)
except struct.error as e:
error('Exception unpacking %d bytes using format "%s": %s' % (size, format, str(e)))
raise e
self.shift(size)
return values
class FastSerializer:
"Primitive type de/serialization in VoltDB formats"
LITTLE_ENDIAN = '<'
BIG_ENDIAN = '>'
ARRAY = -99
# VoltType enumerations
VOLTTYPE_NULL = 1
VOLTTYPE_TINYINT = 3 # int8
VOLTTYPE_SMALLINT = 4 # int16
VOLTTYPE_INTEGER = 5 # int32
VOLTTYPE_BIGINT = 6 # int64
VOLTTYPE_FLOAT = 8 # float64
VOLTTYPE_STRING = 9
VOLTTYPE_TIMESTAMP = 11 # 8 byte long
VOLTTYPE_DECIMAL = 22 # fixed precision decimal
VOLTTYPE_MONEY = 20 # 8 byte long
VOLTTYPE_VOLTTABLE = 21
VOLTTYPE_VARBINARY = 25
VOLTTYPE_GEOGRAPHY_POINT = 26
VOLTTYPE_GEOGRAPHY = 27
# SQL NULL indicator for object type serializations (string, decimal)
NULL_STRING_INDICATOR = -1
NULL_DECIMAL_INDICATOR = -170141183460469231731687303715884105728
NULL_TINYINT_INDICATOR = -128
NULL_SMALLINT_INDICATOR = -32768
NULL_INTEGER_INDICATOR = -2147483648
NULL_BIGINT_INDICATOR = -9223372036854775808
NULL_FLOAT_INDICATOR = -1.7E308
# default decimal scale
DEFAULT_DECIMAL_SCALE = 12
# protocol constants
AUTH_HANDSHAKE_VERSION = 2
AUTH_SERVICE_NAME = 4
AUTH_HANDSHAKE = 5
# procedure call result codes
PROC_OK = 0
# there are assumptions here about datatype sizes which are
# machine dependent. the program exits with an error message
# if these assumptions are not true. it is further assumed
# that host order is little endian. See isNaN().
# default ssl configuration
if (ssl_available):
DEFAULT_SSL_CONFIG = {
'keyfile': None,
'certfile': None,
'cert_reqs': ssl.CERT_NONE,
'ca_certs': None,
'do_handshake_on_connect': True
}
else:
DEFAULT_SSL_CONFIG = {}
def __init__(self, host = None, port = 21212, usessl = False,
username = "", password = "",
kerberos = False,
dump_file_path = None,
connect_timeout = 8,
procedure_timeout = None,
default_timeout = None,
ssl_config_file = None,
ssl_config = DEFAULT_SSL_CONFIG):
"""
:param host: host string for connection or None
:param port: port for connection or None
:param usessl: switch for use ssl or not
:param username: authentication user name for connection or None
:param password: authentication password for connection or None
:param kerberos: use Kerberos authentication
:param dump_file_path: path to optional dump file or None
:param connect_timeout: timeout (secs) or None for authentication (default=8)
:param procedure_timeout: timeout (secs) or None for procedure calls (default=None)
:param default_timeout: default timeout (secs) or None for all other operations (default=None)
:param ssl_config_file: config file that defines java keystore and truststore files
"""
# connect a socket to host, port and get a file object
self.wbuf = array.array('B')
self.host = host
self.port = port
self.usessl = usessl
if kerberos is None:
self.usekerberos = False
else:
self.usekerberos = kerberos
self.kerberosprinciple = None
self.ssl_config = ssl_config
self.ssl_config_file = ssl_config_file
if not dump_file_path is None:
self.dump_file = open(dump_file_path, "wb")
else:
self.dump_file = None
self.default_timeout = default_timeout
self.procedure_timeout = procedure_timeout
self.socket = None
if self.host != None and self.port != None:
ai = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_ADDRCONFIG)[0]
# ai = (family, socktype, proto, canonname, sockaddr)
ss = socket.socket(ai[0], ai[1], ai[2])
if self.usessl:
if ssl_available:
self.socket = self.__wrap_socket(ss)
else:
error("ERROR: To use SSL functionality please install the Python ssl module.")
raise ssl_exception
else:
self.socket = ss
self.socket.setblocking(1)
self.socket.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
try:
self.socket.connect(ai[4])
except Exception:
error("ERROR: Failed to connect to %s port %s" % (ai[4][0], ai[4][1]))
raise
#if self.usessl:
# print 'Cipher suite: ' + str(self.socket.cipher())
# input can be big or little endian
self.inputBOM = self.BIG_ENDIAN # byte order if input stream
self.localBOM = self.LITTLE_ENDIAN # byte order of host
# Type to reader/writer mappings
self.READER = {self.VOLTTYPE_NULL: self.readNull,
self.VOLTTYPE_TINYINT: self.readByte,
self.VOLTTYPE_SMALLINT: self.readInt16,
self.VOLTTYPE_INTEGER: self.readInt32,
self.VOLTTYPE_BIGINT: self.readInt64,
self.VOLTTYPE_FLOAT: self.readFloat64,
self.VOLTTYPE_STRING: self.readString,
self.VOLTTYPE_VARBINARY: self.readVarbinary,
self.VOLTTYPE_TIMESTAMP: self.readDate,
self.VOLTTYPE_DECIMAL: self.readDecimal,
self.VOLTTYPE_GEOGRAPHY_POINT: self.readGeographyPoint,
self.VOLTTYPE_GEOGRAPHY: self.readGeography}
self.WRITER = {self.VOLTTYPE_NULL: self.writeNull,
self.VOLTTYPE_TINYINT: self.writeByte,
self.VOLTTYPE_SMALLINT: self.writeInt16,
self.VOLTTYPE_INTEGER: self.writeInt32,
self.VOLTTYPE_BIGINT: self.writeInt64,
self.VOLTTYPE_FLOAT: self.writeFloat64,
self.VOLTTYPE_STRING: self.writeString,
self.VOLTTYPE_VARBINARY: self.writeVarbinary,
self.VOLTTYPE_TIMESTAMP: self.writeDate,
self.VOLTTYPE_DECIMAL: self.writeDecimal,
self.VOLTTYPE_GEOGRAPHY_POINT: self.writeGeographyPoint,
self.VOLTTYPE_GEOGRAPHY: self.writeGeography}
self.ARRAY_READER = {self.VOLTTYPE_TINYINT: self.readByteArray,
self.VOLTTYPE_SMALLINT: self.readInt16Array,
self.VOLTTYPE_INTEGER: self.readInt32Array,
self.VOLTTYPE_BIGINT: self.readInt64Array,
self.VOLTTYPE_FLOAT: self.readFloat64Array,
self.VOLTTYPE_STRING: self.readStringArray,
self.VOLTTYPE_TIMESTAMP: self.readDateArray,
self.VOLTTYPE_DECIMAL: self.readDecimalArray,
self.VOLTTYPE_GEOGRAPHY_POINT: self.readGeographyPointArray,
self.VOLTTYPE_GEOGRAPHY: self.readGeographyArray}
self.__compileStructs()
# Check if the value of a given type is NULL
self.NULL_DECIMAL_INDICATOR = \
self.__intToBytes(self.__class__.NULL_DECIMAL_INDICATOR, 0)
self.NullCheck = {self.VOLTTYPE_NULL:
lambda x: None,
self.VOLTTYPE_TINYINT:
lambda x: None if x == self.__class__.NULL_TINYINT_INDICATOR else x,
self.VOLTTYPE_SMALLINT:
lambda x: None if x == self.__class__.NULL_SMALLINT_INDICATOR else x,
self.VOLTTYPE_INTEGER:
lambda x: None if x == self.__class__.NULL_INTEGER_INDICATOR else x,
self.VOLTTYPE_BIGINT:
lambda x: None if x == self.__class__.NULL_BIGINT_INDICATOR else x,
self.VOLTTYPE_FLOAT:
lambda x: None if abs(x - self.__class__.NULL_FLOAT_INDICATOR) < 1e307 else x,
self.VOLTTYPE_STRING:
lambda x: None if x == self.__class__.NULL_STRING_INDICATOR else x,
self.VOLTTYPE_VARBINARY:
lambda x: None if x == self.__class__.NULL_STRING_INDICATOR else x,
self.VOLTTYPE_DECIMAL:
lambda x: None if x == self.NULL_DECIMAL_INDICATOR else x}
self.read_buffer = ReadBuffer()
if self.usekerberos:
if not kerberos_available:
raise RuntimeError("Requested Kerberos authentication but unable to import the GSSAPI package.")
if not self.has_ticket():
raise RuntimeError("Requested Kerberos authentication but no valid ticket found. Authenticate with Kerberos first.")
assert not self.socket is None
self.socket.settimeout(connect_timeout)
self.authenticate(str(self.kerberosprinciple), "")
elif not username is None and not password is None and not host is None:
assert not self.socket is None
self.socket.settimeout(connect_timeout)
self.authenticate(username, password)
if self.socket:
self.socket.settimeout(self.default_timeout)
# Front end to SSL socket support.
#
# The SSL config file contains a sequence of key=value lines which
# are used to provide the arguments to the SSL wrap_socket call:
# Key Value Provides arguments
# ------------------ ------------------------ ------------------
# keystore path to JKS keystore keyfile, certfile
# keystorepassword password for keystore --
# truststore path to JKS truststore ca_certs, cert_reqs
# truststorepassword password for truststore --
# cacerts path to PEM cert chain ca_certs, cert_reqs
# ssl_version ignored --
#
# Thus keystore identifies the client (rarely needed), whereas truststore
# and cacerts identify the server. If truststore and cacerts are both
# specified, cacerts takes precedence.
#
# An empty or missing ssl_config_file results in no certificate checks.
def __wrap_socket(self, ss):
parsed_config = {}
jks_config = {}
if self.ssl_config_file:
with open(os.path.expandvars(os.path.expanduser(self.ssl_config_file)), 'r') as f:
for line in f:
try:
l = line.strip()
if l:
k, v = l.split('=', 1)
parsed_config[k.lower()] = v
except:
raise ValueError('Malformed line in SSL config: ' + line)
if ('keystore' in parsed_config and parsed_config['keystore']) or \
('truststore' in parsed_config and parsed_config['truststore']):
self.__convert_jks_files(ss, parsed_config)
if 'cacerts' in parsed_config and parsed_config['cacerts']:
self.ssl_config['ca_certs'] = parsed_config['cacerts']
self.ssl_config['cert_reqs'] = ssl.CERT_REQUIRED
protocol = ssl.PROTOCOL_TLS
return ssl.wrap_socket(ss,
keyfile=self.ssl_config['keyfile'],
certfile=self.ssl_config['certfile'],
server_side=False,
cert_reqs=self.ssl_config['cert_reqs'],
ssl_version=protocol,
ca_certs=self.ssl_config['ca_certs'])
def __get_name_from_path(self, path):
x = re.sub('/','-', path)
y = re.sub('^-', '', x)
tmpdir = os.getenv('TMPDIR', '/tmp')
return tmpdir + '/' + y
def __convert_jks_files(self, ss, jks_config):
if not pyjks_available:
error("To use Java KeyStore please install the 'pyjks' module")
raise pyjks_exception
def write_pem(der_bytes, type, f):
f.write("-----BEGIN %s-----\n" % type)
f.write("\r\n".join(textwrap.wrap(base64.b64encode(der_bytes).decode('ascii'), 64)))
f.write("\n-----END %s-----\n" % type)
# extract key and certs
use_key_cert = False
if 'keystore' in jks_config and jks_config['keystore'] and \
'keystorepassword' in jks_config and jks_config['keystorepassword']:
ks = jks.KeyStore.load(jks_config['keystore'], jks_config['keystorepassword'])
kfname = self.__get_name_from_path(jks_config['keystore'])
keyfilename = kfname + '.key.pem'
keyfile = None
if not os.path.exists(keyfilename):
keyfile = self.__create(keyfilename)
certfilename = kfname + '.cert.pem'
certfile = None
if not os.path.exists(certfilename):
certfile = self.__create(certfilename)
for alias, pk in list(ks.private_keys.items()):
# print("Private key: %s" % pk.alias)
if keyfile is not None:
if pk.algorithm_oid == jks.util.RSA_ENCRYPTION_OID:
write_pem(pk.pkey, "RSA PRIVATE KEY", keyfile)
else:
write_pem(pk.pkey_pkcs8, "PRIVATE KEY", keyfile)
if certfile is not None:
for c in pk.cert_chain:
write_pem(c[1], "CERTIFICATE", certfile)
use_key_cert = True
if keyfile is not None:
keyfile.close()
if certfile is not None:
certfile.close()
if use_key_cert:
self.ssl_config['keyfile'] = keyfilename
self.ssl_config['certfile'] = certfilename
# extract ca certs
use_ca_cert = False
if 'truststore' in jks_config and jks_config['truststore'] and \
'truststorepassword' in jks_config and jks_config['truststorepassword']:
ts = jks.KeyStore.load(jks_config['truststore'], jks_config['truststorepassword'])
tfname = self.__get_name_from_path(jks_config['truststore'])
cafilename = tfname + '.ca.cert.pem'
cafile = None
if not os.path.exists(cafilename):
cafile = self.__create(cafilename)
for alias, c in list(ts.certs.items()):
# print("Certificate: %s" % c.alias)
if cafile is not None:
write_pem(c.cert, "CERTIFICATE", cafile)
cafile.close()
use_ca_cert = True
if use_ca_cert:
self.ssl_config['ca_certs'] = cafilename
self.ssl_config['cert_reqs'] = ssl.CERT_REQUIRED
def __create(self, filename):
f = open(filename, 'w')
os.chmod(filename, stat.S_IRUSR|stat.S_IWUSR)
return f
def __compileStructs(self):
# Compiled structs for each type
self.byteType = lambda length : '%c%db' % (self.inputBOM, length)
self.ubyteType = lambda length : '%c%dB' % (self.inputBOM, length)
self.int16Type = lambda length : '%c%dh' % (self.inputBOM, length)
self.int32Type = lambda length : '%c%di' % (self.inputBOM, length)
self.int64Type = lambda length : '%c%dq' % (self.inputBOM, length)
self.uint64Type = lambda length : '%c%dQ' % (self.inputBOM, length)
self.float64Type = lambda length : '%c%dd' % (self.inputBOM, length)
self.stringType = lambda length : '%c%ds' % (self.inputBOM, length)
self.varbinaryType = lambda length : '%c%ds' % (self.inputBOM, length)
def close(self):
if self.dump_file != None:
self.dump_file.close()
self.socket.close()
def authenticate(self, username, password):
# Requires sending a length preceded username and password even if
# authentication is turned off.
#protocol version
self.writeByte(1)
#sha256
self.writeByte(1)
# service requested
if (self.usekerberos):
self.writeString("kerberos")
else:
self.writeString("database")
if username:
# utf8 encode supplied username or kerberos principal name
self.writeString(username)
else:
# no username, just output length of 0
self.writeString("")
# password supplied, sha-256 hash it
m = hashlib.sha256()
encoded_password = password.encode("utf-8")
m.update(encoded_password)
pwHash = bytearray(m.digest())
self.wbuf.extend(pwHash)
self.prependLength()
self.flush()
# A length, version number, and status code is returned
try:
self.bufferForRead()
except IOError as e:
error("ERROR: Connection failed. Please check that the host, port, and ssl settings are correct.")
raise e
except socket.timeout:
raise RuntimeError("Authentication timed out after %d seconds."
% self.socket.gettimeout())
version = self.readByte()
status = self.readByte()
if (version == self.AUTH_HANDSHAKE_VERSION):
#service name supplied by VoltDB Server
service_string = self.readString().encode('ascii','ignore')
try:
service_name = gssapi.Name(service_string, name_type=gssapi.NameType.kerberos_principal)
ctx = gssapi.SecurityContext(name=service_name, mech=gssapi.MechType.kerberos)
in_token = None
out_token = ctx.step(in_token)
while not ctx.complete:
self.writeByte(self.AUTH_HANDSHAKE_VERSION)
self.writeByte(self.AUTH_HANDSHAKE)
self.wbuf.extend(out_token)
self.prependLength()
self.flush()
try:
self.bufferForRead()
except IOError as e:
error("ERROR: Connection failed. Please check that the host, port, and ssl settings are correct.")
raise e
except socket.timeout:
raise RuntimeError("Authentication timed out after %d seconds."
% self.socket.gettimeout())
version = self.readByte()
status = self.readByte()
if version != self.AUTH_HANDSHAKE_VERSION or status != self.AUTH_HANDSHAKE:
raise RuntimeError("Authentication failed.")
in_token = self.readVarbinaryContent(self.read_buffer.remaining()).tobytes()
out_token = ctx.step(in_token)
try:
self.bufferForRead()
except IOError as e:
error("ERROR: Connection failed. Please check that the host, port, and ssl settings are correct.")
raise e
except socket.timeout:
raise RuntimeError("Authentication timed out after %d seconds."
% self.socket.gettimeout())
version = self.readByte()
status = self.readByte()
except Exception as e:
raise RuntimeError("Authentication failed.")
if status != 0:
raise RuntimeError("Authentication failed.")
self.readInt32()
self.readInt64()
self.readInt64()
self.readInt32()
for x in range(self.readInt32()):
self.readByte()
def has_ticket(self):
'''
Checks to see if the user has a valid ticket.
'''
default_cred = None
retval = False
try:
default_cred = gssapi.creds.Credentials(usage='initiate')
if default_cred.lifetime > 0:
self.kerberosprinciple = str(default_cred.name)
retval = True
else:
error("ERROR: Kerberos principal found but login expired.")
except gssapi.raw.misc.GSSError as e:
error("ERROR: unable to find default principal from Kerberos cache.")
return retval
def setInputByteOrder(self, bom):
# assuming bom is high bit set?
if bom == 1:
self.inputBOM = self.LITTLE_ENDIAN
else:
self.inputBOM = self.BIG_ENDIAN
# recompile the structs
self.__compileStructs()
def prependLength(self):
# write 32 bit array length at offset 0, NOT including the
# size of this length preceding value. This value is written
# in the network order.
ttllen = self.wbuf.buffer_info()[1] * self.wbuf.itemsize
lenBytes = int32toBytes(ttllen)
#lenBytes = struct.pack(self.inputBOM + 'i', ttllen)
[self.wbuf.insert(0, x) for x in lenBytes[::-1]]
def size(self):
"""Returns the size of the write buffer.
"""
return (self.wbuf.buffer_info()[1] * self.wbuf.itemsize)
def flush(self):
if self.socket is None:
error("ERROR: not connected to server.")
raise IOError("No Connection")
if self.dump_file != None:
self.dump_file.write(self.wbuf)
self.dump_file.write(b"\n")
self.socket.sendall(self.wbuf.tobytes())
self.wbuf = array.array('B')
def bufferForRead(self):
if self.socket is None:
error("ERROR: not connected to server.")
raise IOError("No Connection")
# fully buffer a new length preceded message from socket
# read the length. the read until the buffer is completed.
responseprefix = bytes()
while (len(responseprefix) < 4):
responseprefix += self.socket.recv(4 - len(responseprefix))
if responseprefix == b'':
raise IOError("Connection broken")
if self.dump_file != None:
self.dump_file.write(responseprefix)
responseLength = struct.unpack(self.int32Type(1), responseprefix)[0]
self.read_buffer.clear()
remaining = responseLength
while remaining > 0:
message = self.socket.recv(remaining)
self.read_buffer.append(message)
remaining = responseLength - self.read_buffer.buffer_length()
if not self.dump_file is None:
self.dump_file.write(self.read_buffer.get_buffer())
self.dump_file.write(b"\n")
def read(self, type):
if type not in self.READER:
error("ERROR: can't read wire type(%d) yet." % (type))
raise IOError("ERROR: can't read wire type(%d) yet." % (type))
return self.READER[type]()
def write(self, type, value):
if type not in self.WRITER:
error("ERROR: can't write wire type(%d) yet." % (type))
raise IOError("ERROR: can't write wire type(%d) yet." % (type))
return self.WRITER[type](value)
def readWireType(self):
type = self.readByte()
return self.read(type)
def writeWireType(self, type, value):
if type not in self.WRITER:
error("ERROR: can't write wire type(%d) yet." % (type))
raise IOError("ERROR: can't write wire type(%d) yet." % (type))
self.writeByte(type)
return self.write(type, value)
def getRawBytes(self):
return self.wbuf
def writeRawBytes(self, value):
"""Appends the given raw bytes to the end of the write buffer.
"""
self.wbuf.extend(value)
def __str__(self):
return repr(self.wbuf)
def readArray(self, type):
if type not in self.ARRAY_READER:
error("ERROR: can't read wire type(%d) yet." % (type))
raise IOError("ERROR: can't write wire type(%d) yet." % (type))
return self.ARRAY_READER[type]()
def readNull(self):
return None
def writeNull(self, value):
return
def writeArray(self, type, array):
if (not array) or (len(array) == 0) or (not type):
return
if type not in self.ARRAY_READER:
error("ERROR: Unsupported date type (%d)." % (type))
raise IOError("ERROR: Unsupported date type (%d)." % (type))
# serialize arrays of bytes as larger values to support
# strings and varbinary input
if type != FastSerializer.VOLTTYPE_TINYINT:
self.writeInt16(len(array))
else:
self.writeInt32(len(array))
for i in array:
self.WRITER[type](i)
def writeWireTypeArray(self, type, array):
if type not in self.ARRAY_READER:
error("ERROR: can't write wire type(%d) yet." % (type))
raise IOError("ERROR: Unsupported date type (%d)." % (type))
self.writeByte(type)
self.writeArray(type, array)
# byte
def readByteArrayContent(self, cnt):
offset = cnt * struct.calcsize('b')
return self.read_buffer.unpack(self.byteType(cnt), offset)
def readByteArray(self):
length = self.readInt32()
val = self.readByteArrayContent(length)
val = list(map(self.NullCheck[self.VOLTTYPE_TINYINT], val))
return val
def readByte(self):
val = self.readByteArrayContent(1)[0]
return self.NullCheck[self.VOLTTYPE_TINYINT](val)
def readByteRaw(self):
val = self.readByteArrayContent(1)[0]
if val > 127:
return val - 256
else:
return val
def writeByte(self, value):
if value == None:
value = self.__class__.NULL_TINYINT_INDICATOR
if value < 0:
value += 256
self.wbuf.append(value)
# int16
def readInt16ArrayContent(self, cnt):
offset = cnt * struct.calcsize('h')
return self.read_buffer.unpack(self.int16Type(cnt), offset)
def readInt16Array(self):
length = self.readInt16()
val = self.readInt16ArrayContent(length)
val = list(map(self.NullCheck[self.VOLTTYPE_SMALLINT], val))
return val
def readInt16(self):
val = self.readInt16ArrayContent(1)[0]
return self.NullCheck[self.VOLTTYPE_SMALLINT](val)
def writeInt16(self, value):
if value == None:
val = self.__class__.NULL_SMALLINT_INDICATOR
else:
val = value
self.wbuf.extend(int16toBytes(val))
# int32
def readInt32ArrayContent(self, cnt):
offset = cnt * struct.calcsize('i')
return self.read_buffer.unpack(self.int32Type(cnt), offset)
def readInt32Array(self):
length = self.readInt16()
val = self.readInt32ArrayContent(length)
val = list(map(self.NullCheck[self.VOLTTYPE_INTEGER], val))
return val
def readInt32(self):
val = self.readInt32ArrayContent(1)[0]
return self.NullCheck[self.VOLTTYPE_INTEGER](val)
def writeInt32(self, value):
if value == None:
val = self.__class__.NULL_INTEGER_INDICATOR
else:
val = value
self.wbuf.extend(int32toBytes(val))
# int64
def readInt64ArrayContent(self, cnt):
offset = cnt * struct.calcsize('q')
return self.read_buffer.unpack(self.int64Type(cnt), offset)
def readInt64Array(self):
length = self.readInt16()
val = self.readInt64ArrayContent(length)
val = list(map(self.NullCheck[self.VOLTTYPE_BIGINT], val))
return val
def readInt64(self):
val = self.readInt64ArrayContent(1)[0]
return self.NullCheck[self.VOLTTYPE_BIGINT](val)
def writeInt64(self, value):
if value == None:
val = self.__class__.NULL_BIGINT_INDICATOR
else:
val = value
self.wbuf.extend(int64toBytes(val))
# float64
def readFloat64ArrayContent(self, cnt):
offset = cnt * struct.calcsize('d')
return self.read_buffer.unpack(self.float64Type(cnt), offset)
def readFloat64Array(self):
length = self.readInt16()
val = self.readFloat64ArrayContent(length)
val = list(map(self.NullCheck[self.VOLTTYPE_FLOAT], val))
return val
def readFloat64(self):
val = self.readFloat64ArrayContent(1)[0]
return self.NullCheck[self.VOLTTYPE_FLOAT](val)
def writeFloat64(self, value):
if value == None:
val = self.__class__.NULL_FLOAT_INDICATOR
else:
val = float(value)
ba = bytearray(struct.pack(self.float64Type(1), val))
self.wbuf.extend(ba)
# string
def readStringContent(self, cnt):
if cnt == 0:
return ""
offset = cnt * struct.calcsize('c')
val = self.read_buffer.unpack(self.stringType(cnt), offset)
return val[0].decode("utf-8")
def readString(self):
# length preceeded (4 byte value) string
length = self.readInt32()
if self.NullCheck[self.VOLTTYPE_STRING](length) == None:
return None
return self.readStringContent(length)
def readStringArray(self):
retval = []
cnt = self.readInt16()
for i in range(cnt):
retval.append(self.readString())
return tuple(retval)
def writeString(self, value):
if value is None:
self.writeInt32(self.NULL_STRING_INDICATOR)
return
encoded_value = value.encode("utf-8")
ba = bytearray(encoded_value)
self.writeInt32(len(encoded_value))
self.wbuf.extend(ba)
# varbinary
def readVarbinaryContent(self, cnt):
if cnt == 0:
return array.array('B', [])
offset = cnt * struct.calcsize('c')
val = self.read_buffer.unpack(self.varbinaryType(cnt), offset)
return array.array('B', val[0])
def readVarbinary(self):
# length preceeded (4 byte value) string
length = self.readInt32()
if self.NullCheck[self.VOLTTYPE_VARBINARY](length) == None:
return None
return self.readVarbinaryContent(length)
def writeVarbinary(self, value):
if value is None:
self.writeInt32(self.NULL_STRING_INDICATOR)
return
self.writeInt32(len(value))
self.wbuf.extend(value)
# date
# The timestamp we receive from the server is a 64-bit integer representing
# microseconds since the epoch. It will be converted to a datetime object in
# the local timezone.
def readDate(self):
raw = self.readInt64()
if raw == None:
return None
# microseconds before or after Jan 1, 1970 UTC
return datetime.datetime.fromtimestamp(raw/1000000.0)
def readDateArray(self):
retval = []
raw = self.readInt64Array()
for i in raw:
val = None
if i != None:
val = datetime.datetime.fromtimestamp(i/1000000.0)
retval.append(val)
return tuple(retval)
def writeDate(self, value):
if value is None:
val = self.__class__.NULL_BIGINT_INDICATOR
else:
seconds = int(value.strftime("%s"))
val = seconds * 1000000 + value.microsecond
self.wbuf.extend(int64toBytes(val))
def readDecimal(self):
offset = 16 * struct.calcsize('b')
if self.NullCheck[self.VOLTTYPE_DECIMAL](self.read_buffer.read(offset)) == None:
self.read_buffer.shift(offset)
return None
val = list(self.read_buffer.unpack(self.ubyteType(16), offset))
mostSignificantBit = 1 << 7
isNegative = (val[0] & mostSignificantBit) != 0
unscaledValue = -(val[0] & mostSignificantBit) << 120
# Clear the highest bit
# Unleash the powers of the butterfly
val[0] &= ~mostSignificantBit
# Get the 2's complement
for x in range(16):
unscaledValue += val[x] << ((15 - x) * 8)
unscaledValue = [int(x) for x in str(abs(unscaledValue))]
return decimal.Decimal((isNegative, tuple(unscaledValue),
-self.__class__.DEFAULT_DECIMAL_SCALE))
def readDecimalArray(self):
retval = []
cnt = self.readInt16()
for i in range(cnt):
retval.append(self.readDecimal())
return tuple(retval)
def __intToBytes(self, value, sign):
value_bytes = bytes()
if sign == 1:
value = ~value + 1 # 2's complement
# Turn into byte array
while value != 0 and value != -1:
byte = value & 0xff
# flip the high order bits to 1 only if the number is negative and
# this is the highest order byte
if value >> 8 == 0 and sign == 1:
mask = 1 << 7
while mask > 0 and (byte & mask) == 0:
byte |= mask
mask >> 1
value_bytes = struct.pack(self.ubyteType(1), byte) + value_bytes
value = value >> 8
if len(value_bytes) > 16:
raise ValueError("Precision of this decimal is >38 digits");
if sign == 1:
ret = struct.pack(self.ubyteType(1), 0xff)
else:
ret = struct.pack(self.ubyteType(1), 0)
# Pad it
ret *= 16 - len(value_bytes)
ret += value_bytes
return ret
def writeDecimal(self, num):
if num is None:
self.wbuf.extend(self.NULL_DECIMAL_INDICATOR)
return
if not isinstance(num, decimal.Decimal):
raise TypeError("num must be of the type decimal.Decimal")
(sign, digits, exponent) = num.as_tuple()
precision = len(digits)
scale = -exponent
if (scale > self.__class__.DEFAULT_DECIMAL_SCALE):
raise ValueError("Scale of this decimal is %d and the max is 12"
% (scale))
rest = precision - scale
if rest > 26:
raise ValueError("Precision to the left of the decimal point is %d"
" and the max is 26" % (rest))
scale_factor = self.__class__.DEFAULT_DECIMAL_SCALE - scale
unscaled_int = int(decimal.Decimal((0, digits, scale_factor)))
data = self.__intToBytes(unscaled_int, sign)
self.wbuf.extend(data)
def writeDecimalString(self, num):
if num is None:
self.writeString(None)
return
if not isinstance(num, decimal.Decimal):
raise TypeError("num must be of type decimal.Decimal")
self.writeString(num.to_eng_string())
# cash!
def readMoney(self):
# money-unit * 10,000
return self.readInt64()
def readGeographyPoint(self):
# returns a tuple of a pair of doubles representing lat,long
lng = self.readFloat64()
lat = self.readFloat64()
if (lat == Geography.NULL_COORD) and (lon == Geography.NULL_COORD):
return None
return (lng, lat)
def readGeographyPointArray(self):
retval = []
cnt = self.readInt16()
for i in range(cnt):
retval.append(self.readGeographyPoint())
return tuple(retval)
def writeGeographyPoint(self, point):
if point is None:
self.writeFloat64(Geography.NULL_COORD)
self.writeFloat64(Geography.NULL_COORD)
return
if not isinstance(num, tuple):
raise TypeError("point must be a 2-tuple of floats")
if len(tuple) != 2:
raise TypeError("point must be a 2-tuple of floats")
self.writeFloat64(point[0])
self.writeFloat64(point[1])
def readGeography(self):
return Geography.unflatten(self)
def readGeographyArray(self):
retval = []
cnt = self.readInt16()
for i in range(cnt):
retval.append(Geography.unflatten(self))
return tuple(retval)
def writeGeography(self, geo):
if geo is None:
writeInt32(NULL_STRING_INDICATOR)
else:
geo.flatten(self)
class XYZPoint(object):
"""
Google's S2 geometry library uses (x, y, z) representation of polygon vertices,
But the interface we expose to users is (lat, lng). This class is the
internal representation for vertices.
"""
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
@staticmethod
def fromGeographyPoint(p):
latRadians = p[0] * (math.pi / 180) # AKA phi
lngRadians = p[1] * (math.pi / 180) # AKA theta
cosPhi = math.cos(latRadians)
x = math.cos(lngRadians) * cosPhi
y = math.sin(lngRadians) * cosPhi
z = math.sin(latRadians)
return XYZPoint(x, y, z)
def toGeogrpahyPoint(self):
latRadians = math.atan2(self.z, math.sqrt(self.x * self.x + self.y * self.y))
lngRadians = math.atan2(self.y, self.x)
latDegrees = latRadians * (180 / math.pi)
lngDegrees = lngRadians * (180 / math.pi)
return (lngDegrees, latDegrees)
def __eq__(self, other):
"""Overrides the default implementation"""
if isinstance(self, other.__class__):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other):
"""Overrides the default implementation (unnecessary in Python 3)"""
return not self.__eq__(other)
def __str__(self):
p = self.toGeogrpahyPoint()
return "(%s,%s)" % (p[0], p[1])
class Geography(object):
"""
S2-esque geography element representing a polygon for now
"""
EPSILON = 1.0e-12
NULL_COORD = 360.0
def __init__(self, loops=[]):
self.loops = loops
# Serialization format for polygons.
#
# This is the format used by S2 in the EE. Most of the
# metadata (especially lat/lng rect bounding boxes) are
# ignored here in Java.
#
# 1 byte encoding version
# 1 byte boolean owns_loops
# 1 byte boolean has_holes
# 4 bytes number of loops
# And then for each loop:
# 1 byte encoding version
# 4 bytes number of vertices
# ((number of vertices) * sizeof(double) * 3) bytes vertices as XYZPoints
# 1 byte boolean origin_inside
# 4 bytes depth (nesting level of loop)
# 33 bytes bounding box
# 33 bytes bounding box
#
# We use S2 in the EE for all geometric computation, so polygons sent to
# the EE will be missing bounding box and other info. We indicate this
# by passing INCOMPLETE_ENCODING_FROM_JAVA in the version field. This
# tells the EE to compute bounding boxes and other metadata before storing
# the polygon to memory.
# for encoding byte + lat min, lat max, lng min, lng max as doubles
BOUND_LENGTH_IN_BYTES = 33
POLYGON_OVERHEAD_IN_BYTES = 7 + BOUND_LENGTH_IN_BYTES
# 1 byte for encoding version
# 4 bytes for number of vertices
# number of vertices * 8 * 3 bytes for vertices as XYZPoints
# 1 byte for origin_inside_
# 4 bytes for depth_
# length of bound
LOOP_OVERHEAD_IN_BYTES = 10 + BOUND_LENGTH_IN_BYTES
VERTEX_SIZE_IN_BYTES = 24
def serializedSize(self):
length = POLYGON_OVERHEAD_IN_BYTES
for loop in self.loops:
length += loopSerializedSize(loop);
return length
@staticmethod
def loopSerializedSize(loop):
LOOP_OVERHEAD_IN_BYTES + (len(loop) * VERTEX_SIZE_IN_BYTES)
@staticmethod
def unflatten(fs):
length = fs.readInt32() # size
if (length == fs.NULL_STRING_INDICATOR):
return None
version = fs.readByteRaw() # encoding version
fs.readByteRaw() # owns loops
fs.readByteRaw() # has holes
numLoops = fs.readInt32()
loops = []
indexOfOuterRing = 0
for i in range(numLoops):
depth, loop = Geography.__unflattenLoop(fs)
if depth == 0:
indexOfOuterRing = i
loops.append(loop)
Geography.__unflattenBound(fs)
return Geography(loops);
@staticmethod
def __unflattenLoop(fs):
# 1 byte for encoding version
# 4 bytes for number of vertices
# number of vertices * 8 * 3 bytes for vertices as XYZPoints
# 1 byte for origin_inside_
# 4 bytes for depth_
# length of bound
loop = []
fs.readByteRaw() # encoding version
numVertices = fs.readInt32()
for i in range(numVertices):
x = fs.readFloat64()
y = fs.readFloat64()
z = fs.readFloat64()
loop.append(XYZPoint(x, y, z))
fs.readByteRaw() # origin_inside_
depth = fs.readInt32() # depth
Geography.__unflattenBound(fs);
return (depth, loop)
@staticmethod
def __unflattenBound(fs):
fs.readByteRaw() # for encoding version
fs.readFloat64()
fs.readFloat64()
fs.readFloat64()
fs.readFloat64()
def flatten(self, fs):
fs.writeInt32(self.serializedSize()) # prepend length
fs.writeByte(0); # encoding version
fs.writeByte(1); # owns_loops
if len(self.loops) > 1: # has_holes
fs.writeByte(1)
else:
fs.writeByte(0)
fs.writeInt32(len(self.loops))
depth = 0
for loop in self.loops:
Geography.__flattenLoop(loop, depth, fs);
depth = 1;
Geography.__flattenEmptyBound(fs);
@staticmethod
def __flattenLoop(loop, depth, fs):
# 1 byte for encoding version
# 4 bytes for number of vertices
# number of vertices * 8 * 3 bytes for vertices as XYZPoints
# 1 byte for origin_inside_
# 4 bytes for depth_
# length of bound
fs.writeByte(0);
fs.writeInt32(len(loop))
for xyzp in loop:
fs.writeFloat64(xyzp.x)
fs.writeFloat64(xyzp.y)
fs.writeFloat64(xyzp.z)
fs.writeByte(0); # origin_inside
fs.writeInt32(depth); # depth
Geography.__flattenEmptyBound(fs);
@staticmethod
def __flattenEmptyBound(fs):
fs.writeByte(0); # for encoding version
fs.writeFloat64(Geography.NULL_COORD)
fs.writeFloat64(Geography.NULL_COORD)
fs.writeFloat64(Geography.NULL_COORD)
fs.writeFloat64(Geography.NULL_COORD)
@staticmethod
def formatPoint(point):
# auto convert XYZ points
if isinstance(point, XYZPoint):
point = point.toGeogrpahyPoint()
fmt = "{}"
#DecimalFormat df = new DecimalFormat("##0.0###########");
# Explicitly test for differences less than 1.0e-12 and
# force them to be zero. Otherwise you may find a case
# where two points differ in the less significant bits, but
# they format as the same number.
lng = point[0]
if lng < Geography.EPSILON:
lng = 0.0
lat = point[1]
if lat < Geography.EPSILON:
lat = 0.0
return fmt.format(lng) + " " + fmt.format(lat);
@staticmethod
def pointToWKT(point):
# auto convert XYZ points
if isinstance(point, XYZPoint):
point = point.toGeogrpahyPoint()
# This is not GEOGRAPHY_POINT. This is wkt syntax.
return "POINT (" + Geography.formatGeographyPoint(point) + ")"
wktPointMatcher = re.compile(r"^\s*point\s*\(\s*(-?\d+[\.\d*]*)\s+(-?\d+[\.\d*]*)\s*\)", flags=re.IGNORECASE)
@staticmethod
def pointFromWKT(wkt):
if wkt is None:
raise ValueError("None passed to pointFromWKT")
match = re.search()
lngStr = match.group(1)
latStr = match.group(2)
if latStr is None or lngStr is None:
return None
lng = float(lngStr)
lat = float(latStr)
return (lng, lat)
@staticmethod
def geographyFromWKT(wkt):
pass
def __str__(self):
# return representation in Well Known Text (WKT)
wkt = "POLYGON ("
isFirstLoop = True
for loop in self.loops:
if not isFirstLoop:
wkt += ", "
wkt += "("
# iterate backwards
startIdx = len(loop) - 1
endIdx = 0
increment = -1
# reverse direction for first loop
if isFirstLoop:
startIdx = 1
endIdx = len(loop)
increment = 1
wkt += Geography.formatPoint(loop[0]) + ", "
for idx in range(startIdx, endIdx, increment):
xyzp = loop[idx]
wkt += Geography.formatPoint(xyzp) + ", "
# Repeat the start vertex to close the loop as WKT requires.
wkt += Geography.formatPoint(loop[0]) + ")"
isFirstLoop = False
wkt += ")"
return wkt
def __repr__(self):
return self.__str__()
class VoltColumn:
"definition of one VoltDB table column"
def __init__(self, fser = None, type = None, name = None):
if fser != None:
self.type = fser.readByte()
self.name = None
elif type != None and name != None:
self.type = type
self.name = name
def __str__(self):
# If the name is empty, use the default "modified tuples". Has to do
# this because HSQLDB doesn't return a column name if the table is
# empty.
return "(%s: %d)" % (self.name or "modified tuples" ,
self.type)
def __eq__(self, other):
# For now, if we've been through the query on a column with no name,
# just assume that there's no way the types are matching up cleanly
# and there ain't no one for to give us no pain
if (not self.name or not other.name):
return True
return (self.type == other.type and self.name == other.name)
def readName(self, fser):
self.name = fser.readString()
def writeType(self, fser):
fser.writeByte(self.type)
def writeName(self, fser):
fser.writeString(self.name)
class VoltTable:
"definition and content of one VoltDB table"
def __init__(self, fser):
self.fser = fser
self.columns = [] # column definitions
self.tuples = []
def __str__(self):
result = ""
result += "column count: %d\n" % (len(self.columns))
result += "row count: %d\n" % (len(self.tuples))
result += "cols: "
result += ", ".join([str(x) for x in self.columns])
result += "\n"
result += "rows -\n"
result += "\n".join([str(["NULL" if y is None else y for y in x]) for x in self.tuples])
return result
def __getstate__(self):
return (self.columns, self.tuples)
def __setstate__(self, state):
self.fser = None
self.columns, self.tuples = state
def __eq__(self, other):
if len(self.tuples) > 0:
return (self.columns == other.columns) and \
(self.tuples == other.tuples)
return (self.tuples == other.tuples)
# The VoltTable is always serialized in big-endian order.
#
# How to read a table off the wire.
# 1. Read the length of the whole table
# 2. Read the columns
# a. read the column header size
# a. read the column count
# b. read column definitions.
# 3. Read the tuples count.
# a. read the row count
# b. read tuples recording string lengths
def readFromSerializer(self):
# 1.
tablesize = self.fser.readInt32()
limit_position = self.fser.read_buffer._off + tablesize
# 2.
headersize = self.fser.readInt32()
statuscode = self.fser.readByte()
columncount = self.fser.readInt16()
for i in range(columncount):
column = VoltColumn(fser = self.fser)
self.columns.append(column)
list([x.readName(self.fser) for x in self.columns])
# 3.
rowcount = self.fser.readInt32()
for i in range(rowcount):
rowsize = self.fser.readInt32()
# list comprehension: build list by calling read for each column in
# row/tuple
row = [self.fser.read(self.columns[j].type)
for j in range(columncount)]
self.tuples.append(row)
# advance offset to end of table-size on read_buffer
if self.fser.read_buffer._off != limit_position:
self.fser.read_buffer._off = limit_position
return self
def writeToSerializer(self):
table_fser = FastSerializer()
# We have to pack the header into a buffer first so that we can
# calculate the size
header_fser = FastSerializer()
header_fser.writeByte(0)
header_fser.writeInt16(len(self.columns))
list([x.writeType(header_fser) for x in self.columns])
list([x.writeName(header_fser) for x in self.columns])
table_fser.writeInt32(header_fser.size() - 4)
table_fser.writeRawBytes(header_fser.getRawBytes())
table_fser.writeInt32(len(self.tuples))
for i in self.tuples:
row_fser = FastSerializer()
list([row_fser.write(self.columns[x].type, i[x]) for x in range(len(i))])
table_fser.writeInt32(row_fser.size())
table_fser.writeRawBytes(row_fser.getRawBytes())
table_fser.prependLength()
self.fser.writeRawBytes(table_fser.getRawBytes())
class VoltException:
# Volt SerializableException enumerations
VOLTEXCEPTION_NONE = 0
VOLTEXCEPTION_EEEXCEPTION = 1
VOLTEXCEPTION_SQLEXCEPTION = 2
VOLTEXCEPTION_CONSTRAINTFAILURE = 3
VOLTEXCEPTION_GENERIC = 4
def __init__(self, fser):
self.type = self.VOLTEXCEPTION_NONE
self.typestr = "None"
self.message = ""
if fser != None:
self.deserialize(fser)
def deserialize(self, fser):
self.length = fser.readInt32()
if self.length == 0:
self.type = self.VOLTEXCEPTION_NONE
return
self.type = fser.readByte()
# quick and dirty exception skipping
if self.type == self.VOLTEXCEPTION_NONE:
return
self.message = []
self.message_len = fser.readInt32()
for i in range(0, self.message_len):
self.message.append(chr(fser.readByte()))
self.message = ''.join(self.message)
if self.type == self.VOLTEXCEPTION_GENERIC:
self.typestr = "Generic"
elif self.type == self.VOLTEXCEPTION_EEEXCEPTION:
self.typestr = "EE Exception"
# serialized size from EEException.java is 4 bytes
self.error_code = fser.readInt32()
elif self.type == self.VOLTEXCEPTION_SQLEXCEPTION or \
self.type == self.VOLTEXCEPTION_CONSTRAINTFAILURE:
self.sql_state_bytes = []
for i in range(0, 5):
self.sql_state_bytes.append(chr(fser.readByte()))
self.sql_state_bytes = ''.join(self.sql_state_bytes)
if self.type == self.VOLTEXCEPTION_SQLEXCEPTION:
self.typestr = "SQL Exception"
else:
self.typestr = "Constraint Failure"
self.constraint_type = fser.readInt32()
self.table_name = fser.readString()
self.buffer_size = fser.readInt32()
self.buffer = []
for i in range(0, self.buffer_size):
self.buffer.append(fser.readByte())
else:
for i in range(0, self.length - 3 - 2 - self.message_len):
fser.readByte()
error("Python client deserialized unknown VoltException.")
def __str__(self):
msgstr = "VoltException: type: %s\n" % self.typestr
if self.type == self.VOLTEXCEPTION_EEEXCEPTION:
msgstr += " Error code: %d\n" % self.error_code
elif self.type == self.VOLTEXCEPTION_SQLEXCEPTION:
msgstr += " SQL code: "
msgstr += self.sql_state_bytes
elif self.type == self.VOLTEXCEPTION_SQLEXCEPTION:
msgstr += " Constraint violation type: %d\n" + self.constraint_type
msgstr += " on table: %s\n" + self.table_name
return msgstr
class VoltResponse:
"VoltDB called procedure response (ClientResponse.java)"
def __init__(self, fser):
self.fser = fser
self.version = -1
self.clientHandle = -1
self.status = -1
self.statusString = ""
self.appStatus = -1
self.appStatusString = ""
self.roundtripTime = -1
self.exception = None
self.tables = None
if fser != None:
self.deserialize(fser)
def deserialize(self, fser):
# serialization order: response-length, status, roundtripTime, exception,
# tables[], info, id.
fser.bufferForRead()
self.version = fser.readByte()
self.clientHandle = fser.readInt64()
presentFields = fser.readByteRaw();
self.status = fser.readByte()
if presentFields & (1 << 5) != 0:
self.statusString = fser.readString()
else:
self.statusString = None
self.appStatus = fser.readByte()
if presentFields & (1 << 7) != 0:
self.appStatusString = fser.readString()
else:
self.appStatusString = None
self.roundtripTime = fser.readInt32()
if presentFields & (1 << 6) != 0:
self.exception = VoltException(fser)
else:
self.exception = None
# tables[]
tablecount = fser.readInt16()
self.tables = []
for i in range(tablecount):
table = VoltTable(fser)
self.tables.append(table.readFromSerializer())
def __str__(self):
tablestr=""
if self.tables != None:
tablestr = "\n\n".join([str(i) for i in self.tables])
if self.exception is None:
return "Status: %d\nInformation: %s\n%s" % (self.status,
self.statusString,
tablestr)
else:
msgstr = "Status: %d\nInformation: %s\n%s\n" % (self.status,
self.statusString,
tablestr)
msgstr += "Exception: %s" % (self.exception)
return msgstr
class VoltProcedure:
"VoltDB called procedure interface"
def __init__(self, fser, name, paramtypes = []):
self.fser = fser # FastSerializer object
self.name = name # procedure class name
self.paramtypes = paramtypes # list of fser.WIRE_* values
def call(self, params = None, response = True, timeout = None):
self.fser.writeByte(0) # version number
self.fser.writeString(self.name)
self.fser.writeInt64(1) # client handle
self.fser.writeInt16(len(self.paramtypes))
for i in range(len(self.paramtypes)):
try:
iter(params[i]) # Test if this is an array
if isinstance(params[i], str): # String is a special case
raise TypeError
self.fser.writeByte(FastSerializer.ARRAY)
self.fser.writeByte(self.paramtypes[i])
self.fser.writeArray(self.paramtypes[i], params[i])
except TypeError:
self.fser.writeWireType(self.paramtypes[i], params[i])
self.fser.prependLength() # prepend the total length of the invocation
self.fser.flush()
# The timeout in effect for the procedure call is the timeout argument
# if not None or self.procedure_timeout. Exceeding that time will raise
# a timeout exception. Restores the original timeout value when done.
# This default argument usage does not allow overriding with None.
if timeout is None:
timeout = self.fser.procedure_timeout
original_timeout = self.fser.socket.gettimeout()
self.fser.socket.settimeout(timeout)
try:
try:
res = VoltResponse(self.fser)
except socket.timeout:
res = VoltResponse(None)
res.statusString = "timeout: procedure call took longer than %d seconds" % timeout
except IOError as err:
res = VoltResponse(None)
res.statusString = str(err)
finally:
self.fser.socket.settimeout(original_timeout)
return response and res or None
| 36.220036
| 132
| 0.580728
|
0d862190c0eba884a1edccea5f414834e07c2ce6
| 4,079
|
py
|
Python
|
django/contrib/gis/tests/__init__.py
|
Smarsh/django
|
ffb738e0f56027e16564a79b709cbf44596c2335
|
[
"BSD-3-Clause"
] | 19
|
2015-05-01T19:59:03.000Z
|
2021-12-09T08:03:16.000Z
|
django/contrib/gis/tests/__init__.py
|
aprefontaine/TMScheduler
|
298a332532b9df1d3f6a80b1334630bc106d3b78
|
[
"BSD-3-Clause"
] | 1
|
2018-01-03T15:26:49.000Z
|
2018-01-03T15:26:49.000Z
|
django/contrib/gis/tests/__init__.py
|
aprefontaine/TMScheduler
|
298a332532b9df1d3f6a80b1334630bc106d3b78
|
[
"BSD-3-Clause"
] | 30
|
2015-03-25T19:40:07.000Z
|
2021-05-28T22:59:26.000Z
|
import sys
def run_tests(*args, **kwargs):
from django.test.simple import run_tests as base_run_tests
return base_run_tests(*args, **kwargs)
def geo_suite():
"""
Builds a test suite for the GIS package. This is not named
`suite` so it will not interfere with the Django test suite (since
spatial database tables are required to execute these tests on
some backends).
"""
from django.conf import settings
from django.contrib.gis.geos import GEOS_PREPARE
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.utils import HAS_GEOIP
from django.contrib.gis.tests.utils import postgis, mysql
from django.db import connection
from django.utils.importlib import import_module
gis_tests = []
# Adding the GEOS tests.
from django.contrib.gis.geos import tests as geos_tests
gis_tests.append(geos_tests.suite())
# Tests that require use of a spatial database (e.g., creation of models)
test_apps = ['geoapp', 'relatedapp']
if postgis and connection.ops.geography:
# Test geography support with PostGIS 1.5+.
test_apps.append('geogapp')
# Tests that do not require setting up and tearing down a spatial database.
test_suite_names = [
'test_measure',
]
if HAS_GDAL:
# These tests require GDAL.
if not mysql:
test_apps.append('distapp')
# Only PostGIS using GEOS 3.1+ can support 3D so far.
if postgis and GEOS_PREPARE:
test_apps.append('geo3d')
test_suite_names.extend(['test_spatialrefsys', 'test_geoforms'])
test_apps.append('layermap')
# Adding the GDAL tests.
from django.contrib.gis.gdal import tests as gdal_tests
gis_tests.append(gdal_tests.suite())
else:
print >>sys.stderr, "GDAL not available - no tests requiring GDAL will be run."
if HAS_GEOIP and hasattr(settings, 'GEOIP_PATH'):
test_suite_names.append('test_geoip')
# Adding the rest of the suites from the modules specified
# in the `test_suite_names`.
for suite_name in test_suite_names:
tsuite = import_module('django.contrib.gis.tests.' + suite_name)
gis_tests.append(tsuite.suite())
return gis_tests, test_apps
def run_gis_tests(test_labels, **kwargs):
"""
Use this routine as the TEST_RUNNER in your settings in order to run the
GeoDjango test suite. This must be done as a database superuser for
PostGIS, so read the docstring in `run_test()` below for more details.
"""
from django.conf import settings
from django.db.models import loading
from django.contrib.gis.tests.utils import mysql
# Getting initial values.
old_installed = settings.INSTALLED_APPS
old_root_urlconf = settings.ROOT_URLCONF
# Overridding the INSTALLED_APPS with only what we need,
# to prevent unnecessary database table creation.
new_installed = ['django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.gis',
]
# Setting the URLs.
settings.ROOT_URLCONF = 'django.contrib.gis.tests.urls'
# Creating the test suite, adding the test models to INSTALLED_APPS
# so they will be tested.
gis_tests, test_apps = geo_suite()
for test_model in test_apps:
module_name = 'django.contrib.gis.tests.%s' % test_model
new_installed.append(module_name)
# Resetting the loaded flag to take into account what we appended to
# the INSTALLED_APPS (since this routine is invoked through
# django/core/management, it caches the apps; this ensures that syncdb
# will see our appended models)
settings.INSTALLED_APPS = new_installed
loading.cache.loaded = False
kwargs['extra_tests'] = gis_tests
# Running the tests using the GIS test runner.
result = run_tests(test_labels, **kwargs)
# Restoring modified settings.
settings.INSTALLED_APPS = old_installed
settings.ROOT_URLCONF = old_root_urlconf
return result
| 35.163793
| 87
| 0.689385
|
2f09c823ce531a80d03537d90aca6c82534771ee
| 553
|
py
|
Python
|
stanCode_Projects/name_searching_system/test.py
|
kunyi1022/sc-projects
|
0ab0019b2cdc86c434a0acff39b862263dcbc970
|
[
"MIT"
] | null | null | null |
stanCode_Projects/name_searching_system/test.py
|
kunyi1022/sc-projects
|
0ab0019b2cdc86c434a0acff39b862263dcbc970
|
[
"MIT"
] | null | null | null |
stanCode_Projects/name_searching_system/test.py
|
kunyi1022/sc-projects
|
0ab0019b2cdc86c434a0acff39b862263dcbc970
|
[
"MIT"
] | null | null | null |
def main():
year = ''
with open('data/full/baby-1900.txt', 'r') as f:
for line in f:
if len(line) == 5:
year = line[:4]
else:
name_lst = line.split(",")
rank_b = name_lst[0]
name1_b = name_lst[1]
name2_b = name_lst[2]
rank = rank_b.strip()
name1 = name1_b.strip()
name2 = name2_b.strip()
print(f'{year} {rank} {name1} {name2}')
if __name__ == "__main__":
main()
| 26.333333
| 55
| 0.423146
|
e0f53805ab2f5ea01cd2f18208750b866d690319
| 15,976
|
py
|
Python
|
bot/logic/unit_manager/unit_manager.py
|
Scottdecat/HiveMind
|
cbd9de0645d756a63d65918f6c971753e1178652
|
[
"MIT"
] | null | null | null |
bot/logic/unit_manager/unit_manager.py
|
Scottdecat/HiveMind
|
cbd9de0645d756a63d65918f6c971753e1178652
|
[
"MIT"
] | null | null | null |
bot/logic/unit_manager/unit_manager.py
|
Scottdecat/HiveMind
|
cbd9de0645d756a63d65918f6c971753e1178652
|
[
"MIT"
] | null | null | null |
import math
import random
from collections import defaultdict
from math import sqrt
from typing import Union
import bot.injector as injector
from bot.logic.army_strategy_manager.army_strategy_manager_interface import \
ArmyStrategyManagerInterface
from bot.logic.overlord_manager import OverlordManager
from bot.logic.queen_manager.default_queen_manager import DefaultQueenManager
from bot.logic.spending_actions.default_spending_actions import \
DefaultSpendingActions
from bot.logic.unit_manager.priority_calculations import (
enemy_group_priority, unit_desperation_threshold, unit_to_group_priority)
from bot.model.unit_type_abstraction import UnitTypeAbstraction
from bot.services.action_service import ActionService
from bot.services.debug_service import DebugService
from bot.services.state_service import StateService
from bot.services.unit_group_service import UnitGroup, UnitGroupService
from bot.services.unit_type_service import UnitTypeService
from bot.util.mapping_functions import steep_decline
from bot.util.priority_queue import PriorityQueue
from bot.util.unit_type_utils import get_unit_origin_type, is_combat_unit
from sc2 import AbilityId, UnitTypeId
from sc2.position import Point3, Rect
from sc2.unit import Unit
from sc2.units import Units
from .assigned_group import AssignedGroup
from .group_tactics import GroupTactics, distance_from_boundary
from .micro import Micro
from .worker_distribution import WorkerDistributor
class UnitManager:
def __init__(self):
self.state: StateService = injector.inject(StateService)
self.debug_service: DebugService = injector.inject(DebugService)
self.group_service: UnitGroupService = injector.inject(UnitGroupService)
self.action_service: ActionService = injector.inject(ActionService)
self.unit_type: UnitTypeService = injector.inject(UnitTypeService)
self.group_tactics: GroupTactics = GroupTactics()
self.micro: Micro = Micro()
self.worker_distributor: WorkerDistributor = WorkerDistributor()
self.spending_actions = DefaultSpendingActions()
self.overlord_manager = OverlordManager()
self.queen_manager = DefaultQueenManager()
self.assigned_groups = []
self.previously_assigned_units = {}
self.proxy_scouts: Units = Units([])
self.proxy_scout_idx: int = 0
self.expansion_locations: list = []
def on_init(self) -> None:
expansion_locations = list(self.state._bot.expansion_locations.keys())
expansion_locations.append(self.state._bot.enemy_start_locations[0])
expansion_locations.sort(key=lambda p: p.distance_to(self.state._bot.enemy_start_locations[0]))
self.expansion_locations = expansion_locations
async def on_step(self):
unassigned = self.state.own_units
enemy_groups: PriorityQueue = self.state.enemy_groups
self.assigned_groups = self.assign_groups(unassigned, enemy_groups)
builder_units: Units = self.get_builder_units(unassigned, self.state.enemy_groups)
assigned_tags = set()
for g in self.assigned_groups:
assigned_tags = assigned_tags.union(self.group_tactics.manage_group(g))
await self.micro.micro_units(g.group.units, assigned_tags)
unassigned = unassigned.tags_not_in(assigned_tags)
a = await self.spending_actions.build(self.state.build_queue, builder_units)
unassigned = unassigned.tags_not_in(a.tags)
# saturate remaining workers
unassigned_workers = unassigned({UnitTypeId.DRONE, UnitTypeId.SCV, UnitTypeId.PROBE, UnitTypeId.MULE})
self.worker_distributor.distribute_workers(unassigned_workers)
unassigned = unassigned.tags_not_in(unassigned_workers.tags)
unassigned_overlords = unassigned({UnitTypeId.OVERLORD, UnitTypeId.OVERSEER})
await self.overlord_manager.on_step(unassigned_overlords)
unassigned = unassigned.tags_not_in(unassigned_overlords.tags)
unassigned_queens = unassigned({UnitTypeId.QUEEN})
unassigned = unassigned.tags_not_in(await self.queen_manager.on_step(unassigned_queens))
# use remaining units to do cool things
# scout enemy bases with idle units
#TODO ideally, should only reassign idle proxy scouts. However, can't really figure out how to get that working, so just putting this hack for now.
if unassigned.exists:
to_remove = set()
for s in self.proxy_scouts:
s: Unit
tag = s.tag
s = self.state.own_units.find_by_tag(s.tag)
if not s or s.tag not in unassigned.tags or s.is_idle:
to_remove.add(tag)
self.proxy_scouts = self.proxy_scouts.tags_not_in(to_remove)
missing_scouts = 4 - self.proxy_scouts.amount
new_scouts = unassigned({UnitTypeId.ZERGLING, UnitTypeId.ROACH}).sorted(lambda u: u.movement_speed, reverse=True).take(missing_scouts, require_all=False)
for scout in new_scouts:
scout: Unit
self.action_service.add(scout.tag, scout.move(random.choice(self.expansion_locations)))
self.proxy_scouts.append(scout)
unassigned = unassigned.tags_not_in(self.proxy_scouts.tags)
'''
if (int(self.state.getTimeInSeconds()) % 15) == 0:
self.reassign_proxy_scouts()
num_scouting_units = 4
if self.proxy_scouts.amount < num_scouting_units and self.state.mode == 'defend':
unassigned_scouts = unassigned.filter(self.is_scout)
unassigned_scouts = unassigned_scouts.sorted(lambda u: u.movement_speed, reverse=True).take(num_scouting_units - self.proxy_scouts.amount, require_all=False)
self.append_proxy_scouts(unassigned_scouts)
elif self.state.mode == 'attack':
unassigned_scouts = unassigned.filter(self.is_scout)
self.append_proxy_scouts(unassigned_scouts)
unassigned = unassigned.tags_not_in(self.proxy_scouts.tags)
'''
# idle position at nearest base
for unit in unassigned:
unit: Unit
if unit.movement_speed > 0 and unit.type_id not in {UnitTypeId.OVERLORD, UnitTypeId.LARVA}:
pos = unit.position.closest(self.state.own_townhalls).position
if unit.distance_to(pos) < 10:
self.action_service.add(unit.tag, unit.attack(pos))
else:
self.action_service.add(unit.tag, unit.move(pos))
self.debug_service.text_world(f'IDLE', unit.position3d, None, 16)
def priority_apply_unit_modifier(self, priority, enemy_group: UnitGroup, unit: Unit):
if enemy_group.range_hull:
dist = distance_from_boundary(enemy_group.range_hull, unit.position)
else:
dist = unit.position.distance_to(enemy_group.location)
dist = min(self.state.map_diagonal_len, max(0, dist))
dist_mod = dist / self.state.map_diagonal_len
dist_mod = (0.5 + steep_decline(dist_mod)) ** 2
# increase priority by if unit was assigned to this group in the last iteration
percentage_of_previously_assigned = 0
if unit.tag in self.previously_assigned_units:
intersection = enemy_group.units.tags.intersection(self.previously_assigned_units[unit.tag])
percentage_of_previously_assigned = len(intersection) / len(enemy_group.units.tags)
prev_mod = 1 + percentage_of_previously_assigned
return priority * dist_mod * prev_mod
class Temp:
units: Units
value = 0
ground_value = 0
air_value = 0
cloak = 0
retreaters: Units
def unit_activation_function(self, unit: Unit, priority, enemy_group: UnitGroup, oversaturation = 0):
if unit.type_id == UnitTypeId.DRONE:
return priority > 0.1 and enemy_group.units.exclude_type(UnitTypeId.REAPER).exists and (oversaturation == 0 and (
(
unit.distance_to(enemy_group.units.center) < 15
and (self.state.own_townhalls.exists
and (unit.distance_to(self.state.own_townhalls.closest_to(enemy_group.location).position) < 20))
)
or (enemy_group.location.distance_to(self.state.own_natural_position) < 10)
or (
enemy_group.units({UnitTypeId.PHOTONCANNON, UnitTypeId.PYLON}).exists
and enemy_group.location.distance_to_closest(self.state.own_townhalls) < 20
))
) or (
enemy_group.value > 100
and enemy_group.units.exclude_type({UnitTypeId.SCV, UnitTypeId.PROBE, UnitTypeId.DRONE}).exists
and enemy_group.range_hull
and distance_from_boundary(enemy_group.range_hull, unit.position) <= 1
and unit.position.distance_to_closest(self.state.own_townhalls) < 15
)
elif unit.type_id == UnitTypeId.QUEEN:
return priority > 0 and (enemy_group.location.distance_to_closest(self.state.own_townhalls.ready) < 20 or (self.state._bot.has_creep(enemy_group.location) and enemy_group.location.distance_to_closest(self.state.own_townhalls.ready) < 30))
elif unit.type_id in {UnitTypeId.CHANGELING, UnitTypeId.CHANGELINGMARINE, UnitTypeId.CHANGELINGMARINESHIELD, UnitTypeId.CHANGELINGZEALOT, UnitTypeId.CHANGELINGZERGLING, UnitTypeId.CHANGELINGZERGLINGWINGS}:
return False
elif unit.type_id == UnitTypeId.OVERSEER:
return enemy_group.cloak_value > 0
else:
return priority > 0
def assign_groups(self, unassigned: Units, priorities: PriorityQueue):
groups = []
units = unassigned.not_structure.filter(is_combat_unit)
d = {}
for enemy_group in priorities:
t = self.Temp()
t.units = Units([])
t.retreaters = Units([])
d[enemy_group] = t
#assign army units
unit_to_priorities = defaultdict(PriorityQueue)
for unit in units:
for p in priorities.iterate2():
priority = self.priority_apply_unit_modifier(p[1], p[0], unit)
unit_to_priorities[unit].enqueue(p[0], priority)
#sort units so that those who have a very high priority for the first enemy group
#and low priority for the rest are assigned first
def sort_by_diff(unit: Unit):
s = 0
if not unit_to_priorities[unit].isEmpty():
prio = unit_to_priorities[unit].peek2()
enemy_group = prio[0]
priority = prio[1]
percentage_of_previously_assigned = 0
if unit.tag in self.previously_assigned_units:
intersection = enemy_group.units.tags.intersection(self.previously_assigned_units[unit.tag])
percentage_of_previously_assigned = len(intersection) / len(enemy_group.units.tags)
s = 0.5 + (percentage_of_previously_assigned / 2) * priority
return s
if not priorities.isEmpty():
units = units.sorted(sort_by_diff, True)
##
for unit in units:
unit: Unit
sorted_enemy_groups = PriorityQueue()
for p in unit_to_priorities[unit].iterate2():
priority = p[1]
own_val = d[p[0]].value
enemy_val = p[0].value
#should_fight, val = self.group_tactics.evaluate_engagement(d[p[0]], p[0], show_group_vals=True)
#own_val, enemy_val = val
# dont send lings vs voidrays
if not unit.can_attack_air:
# TODO performance: dont recalculate for every unit
priority -= p[0].percentage_of_air_in_group * priority
# group oversaturation
oversaturation = 0
if own_val >= enemy_val:
diff = own_val - enemy_val
oversaturation = max(0.01, diff / (enemy_val if enemy_val else 1))
mult = max(0.01, 1 - oversaturation)
priority *= 0.5 + (mult / 2)
if self.unit_activation_function(unit, priority, p[0], oversaturation):
sorted_enemy_groups.enqueue(p[0], priority)
if sorted_enemy_groups.isEmpty():
continue
enemy_group: UnitGroup = sorted_enemy_groups.peek()
self.debug_service.text_world(f'{round(sorted_enemy_groups.peek2()[1],2)}', Point3((unit.position3d.x, unit.position3d.y - 0.35, unit.position3d.z)), Point3((0, 255, 0)), 12)
if (not unit.can_attack_air and enemy_group.percentage_of_air_in_group > 0.8) or (unit.type_id in {UnitTypeId.DRONE} and d[enemy_group].value > enemy_group.value):
d[enemy_group].retreaters.append(unit)
else:
d[enemy_group].units.append(unit)
# TODO consider cloak values
d[enemy_group].value += self.unit_type.get_unit_combat_value_enemy_group(unit.type_id, enemy_group.units) * sum(self.unit_type.get_resource_value(unit.type_id))
self.previously_assigned_units = {}
for key, value in d.items():
a = AssignedGroup()
a.enemies = key
a.group = self.group_service.create_group(value.units)
a.retreaters = self.group_service.create_group(value.retreaters)
groups.append(a)
for unit in a.group.units:
self.previously_assigned_units[unit.tag] = a.enemies.units.tags
return groups
def get_builder_units(self, own_units: Units, enemy_groups: PriorityQueue) -> {'unit tags'}:
'''Determines if any units in own group are in the ideal conditions to build into a different unit.
Returns all units that can build.'''
origins_build_queue = {get_unit_origin_type(unit_id) for unit_id in self.state.build_queue}.union({UnitTypeId.DRONE, UnitTypeId.PROBE, UnitTypeId.SCV})
return own_units.of_type(origins_build_queue)
def append_proxy_scouts(self, own_units : Units) -> None:
'''Will append a unit even if that unit is already in self.proxy_scouts, so be careful!'''
for unit in own_units:
self.give_scouting_order(unit)
self.proxy_scouts.append(unit)
def give_scouting_order(self, scout: Unit) -> None:
'''Gives a scouting order to the given scout unit.'''
if self.proxy_scout_idx == len(self.expansion_locations) - 1:
self.proxy_scout_idx = 0
pos = self.expansion_locations[self.proxy_scout_idx]
self.proxy_scout_idx += 1
self.action_service.add(scout.tag, scout.move(pos), 10)
def reassign_proxy_scouts(self) -> None:
'''Reassigns proxy scouts that have completed their mission. Deletes proxy scouts who have died.'''
#remove dead scouts from self.proxy_scouts
#TODO only do this when unit dies (on_unit_destroyed), however need to be careful about making this hookable because on_step is explicitly called
self.proxy_scouts = self.proxy_scouts.tags_in({scout.tag for scout in self.proxy_scouts if scout in self.state.own_units})
#assign scouts that are done to a new task
scouts_that_are_done: set = {scout for scout in self.proxy_scouts if scout.is_idle}
for scout in scouts_that_are_done:
self.give_scouting_order(scout)
def is_scout(self, u: Unit) -> bool:
'''Determines whether or not the given unit should be considered a scouting unit.'''
return is_combat_unit(u) and u.type_id not in {UnitTypeId.DRONE, UnitTypeId.QUEEN} and not u in self.proxy_scouts
| 51.535484
| 250
| 0.668315
|
46c6dde37457f1d5ad3558b9acde5292308da09b
| 60,656
|
py
|
Python
|
tests/test_service.py
|
krasm/python-onapsdk
|
87cd3017fc542a8afd3be51fbd89934ed87ed3a7
|
[
"Apache-2.0"
] | null | null | null |
tests/test_service.py
|
krasm/python-onapsdk
|
87cd3017fc542a8afd3be51fbd89934ed87ed3a7
|
[
"Apache-2.0"
] | null | null | null |
tests/test_service.py
|
krasm/python-onapsdk
|
87cd3017fc542a8afd3be51fbd89934ed87ed3a7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
"""Test Service module."""
from os import path
from pathlib import Path
from unittest import mock
from unittest.mock import MagicMock, PropertyMock
import shutil
import oyaml as yaml
import pytest
import onapsdk.constants as const
from onapsdk.exceptions import ParameterError, RequestError, ResourceNotFound, StatusError, ValidationError
from onapsdk.sdc.category_management import ServiceCategory
from onapsdk.sdc.component import Component
from onapsdk.sdc.properties import ComponentProperty, Property
from onapsdk.sdc.service import Service, ServiceInstantiationType
from onapsdk.sdc.sdc_resource import SdcResource
from onapsdk.utils.headers_creator import headers_sdc_operator
from onapsdk.utils.headers_creator import headers_sdc_creator
ARTIFACTS = {
"componentInstances" : [
{
"uniqueId" : "test_unique_id",
"componentName" : "ubuntu16test_VF 0"
}
]
}
COMPONENTS = {
"componentInstances":[
{
"actualComponentUid":"374f0a98-a280-43f1-9e6c-00b436782ce7",
"createdFromCsar":True,
"uniqueId":"bcfa7544-6e3d-4666-93b1-c5973356d069.374f0a98-a280-43f1-9e6c-00b436782ce7.abstract_vsn",
"normalizedName":"abstract_vsn",
"name":"abstract_vsn",
"originType":"CVFC",
"customizationUUID":"971043e1-495b-4b75-901e-3d09baed7521",
"componentUid":"374f0a98-a280-43f1-9e6c-00b436782ce7",
"componentVersion":"1.0",
"toscaComponentName":"org.openecomp.resource.vfc.11111cvfc.abstract.abstract.nodes.vsn",
"componentName":"11111-nodes.vsnCvfc",
"groupInstances":None
}
]
}
COMPONENT = {
"metadata":{
"uniqueId":"374f0a98-a280-43f1-9e6c-00b436782ce7",
"name":"11111-nodes.vsnCvfc",
"version":"1.0",
"isHighestVersion":True,
"creationDate":1594898496259,
"lastUpdateDate":1594898496325,
"description":"Complex node type that is used as nested type in VF",
"lifecycleState":"CERTIFIED",
"tags":[
"11111-nodes.vsnCvfc"
],
"icon":"defaulticon",
"normalizedName":"11111nodesvsncvfc",
"systemName":"11111NodesVsncvfc",
"contactId":"cs0008",
"allVersions":{
"1.0":"374f0a98-a280-43f1-9e6c-00b436782ce7"
},
"isDeleted":None,
"projectCode":None,
"csarUUID":None,
"csarVersion":None,
"importedToscaChecksum":None,
"invariantUUID":"3c027ba1-8d3a-4b59-9394-d748fec5e42c",
"componentType":"RESOURCE",
"name":"Generic",
"normalizedName":"generic",
"uniqueId":"resourceNewCategory.generic",
"icons":None,
"creatorUserId":"cs0008",
"creatorFullName":"Carlos Santana",
"lastUpdaterUserId":"cs0008",
"lastUpdaterFullName":"Carlos Santana",
"archiveTime":0,
"vendorName":"mj",
"vendorRelease":"1.0",
"resourceVendorModelNumber":"",
"resourceType":"CVFC",
"isAbstract":None,
"cost":None,
"licenseType":None,
"toscaResourceName":"org.openecomp.resource.vfc.11111cvfc.abstract.abstract.nodes.vsn",
"derivedFrom":None,
"uuid":"59f05bfb-ccea-4857-8799-6acff59e6344",
"archived":False,
"vspArchived":False,
"groupInstances":None
}
}
COMPONENT_PROPERTIES = [
{
"uniqueId":"3d9a184f-4268-4a0e-9ddd-252e49670013.vf_module_id",
"type":"string",
"required":False,
"definition":False,
"description":"The vFirewall Module ID is provided by ECOMP",
"password":False,
"name":"vf_module_id",
"label":"vFirewall module ID",
"hidden":False,
"immutable":False,
"isDeclaredListInput":False,
"getInputProperty":False,
"empty":False
},{
"uniqueId":"74f79006-ae56-4d58-947e-6a5089000774.skip_post_instantiation_configuration",
"type":"boolean",
"required":False,
"definition":False,
"password":False,
"name":"skip_post_instantiation_configuration",
"value":"true",
"hidden":False,
"immutable":False,
"parentUniqueId":"74f79006-ae56-4d58-947e-6a5089000774",
"isDeclaredListInput":False,
"getInputProperty":False,
"ownerId":"74f79006-ae56-4d58-947e-6a5089000774",
"empty":False
}
]
COMPONENTS_WITH_ALL_ORIGIN_TYPES = {
"componentInstances":[
{
"actualComponentUid":"374f0a98-a280-43f1-9e6c-00b436782ce7",
"createdFromCsar":True,
"uniqueId":"bcfa7544-6e3d-4666-93b1-c5973356d069.374f0a98-a280-43f1-9e6c-00b436782ce7.abstract_vsn",
"normalizedName":"abstract_vsn",
"name":"abstract_vsn",
"originType":"VF",
"customizationUUID":"971043e1-495b-4b75-901e-3d09baed7521",
"componentUid":"374f0a98-a280-43f1-9e6c-00b436782ce7",
"componentVersion":"1.0",
"toscaComponentName":"org.openecomp.resource.vfc.11111cvfc.abstract.abstract.nodes.vsn",
"componentName":"11111-nodes.vsnCvfc",
"groupInstances":None
},
{
"actualComponentUid":"374f0a98-a280-43f1-9e6c-00b436782ce7",
"createdFromCsar":True,
"uniqueId":"bcfa7544-6e3d-4666-93b1-c5973356d069.374f0a98-a280-43f1-9e6c-00b436782ce7.abstract_vsn",
"normalizedName":"abstract_vsn",
"name":"abstract_vsn",
"originType":"PNF",
"customizationUUID":"971043e1-495b-4b75-901e-3d09baed7521",
"componentUid":"374f0a98-a280-43f1-9e6c-00b436782ce7",
"componentVersion":"1.0",
"toscaComponentName":"org.openecomp.resource.vfc.11111cvfc.abstract.abstract.nodes.vsn",
"componentName":"11111-nodes.vsnCvfc",
"groupInstances":None
},
{
"actualComponentUid":"374f0a98-a280-43f1-9e6c-00b436782ce7",
"createdFromCsar":True,
"uniqueId":"bcfa7544-6e3d-4666-93b1-c5973356d069.374f0a98-a280-43f1-9e6c-00b436782ce7.abstract_vsn",
"normalizedName":"abstract_vsn",
"name":"abstract_vsn",
"originType":"VL",
"customizationUUID":"971043e1-495b-4b75-901e-3d09baed7521",
"componentUid":"374f0a98-a280-43f1-9e6c-00b436782ce7",
"componentVersion":"1.0",
"toscaComponentName":"org.openecomp.resource.vfc.11111cvfc.abstract.abstract.nodes.vsn",
"componentName":"11111-nodes.vsnCvfc",
"groupInstances":None
}
]
}
def test_init_no_name():
"""Check init with no names."""
svc = Service()
assert isinstance(svc, SdcResource)
assert svc._identifier is None
assert svc._version is None
assert svc.name == "ONAP-test-Service"
assert svc.headers["USER_ID"] == "cs0008"
assert svc.distribution_status is None
assert svc._distribution_id is None
assert isinstance(svc._base_url(), str)
@mock.patch.object(Service, 'exists')
def test_init_with_name(mock_exists):
"""Check init with no names."""
mock_exists.return_value = False
svc = Service(name="YOLO")
assert svc._identifier == None
assert svc._version == None
assert svc.name == "YOLO"
assert svc.created() == False
assert svc.headers["USER_ID"] == "cs0008"
assert svc.distribution_status is None
assert svc._distribution_id is None
assert isinstance(svc._base_url(), str)
@mock.patch.object(Service, 'exists')
def test_init_with_sdc_values(mock_exists):
"""Check init with no names."""
sdc_values = {'uuid': '12', 'version': '14', 'invariantUUID': '56',
'distributionStatus': 'yes', 'lifecycleState': 'state',
'category': 'Network Service'}
svc = Service(sdc_values=sdc_values)
mock_exists.return_value = True
assert svc._identifier == "12"
assert svc._version == "14"
assert svc.name == "ONAP-test-Service"
assert svc.created()
assert svc.headers["USER_ID"] == "cs0008"
assert svc.distribution_status == "yes"
assert svc._distribution_id is None
assert svc.category_name == "Network Service"
assert isinstance(svc._base_url(), str)
@mock.patch.object(Service, 'get_all')
def test_version_filter(mock_get_all):
"""Check version filter"""
svc_1 = Service(name="test_version_filter")
svc_1.identifier = "1111"
svc_1.unique_uuid = "2222"
svc_1.unique_identifier = "3333"
svc_1.status = const.CERTIFIED
svc_1.version = "1.0"
svc_2 = Service(name="test_version_filter")
svc_2.identifier = "1111"
svc_2.unique_uuid = "2222"
svc_2.unique_identifier = "3333"
svc_2.status = const.DRAFT
svc_2.version = "1.1"
mock_get_all.return_value = [svc_1, svc_2]
svc = Service(name='test_version_filter')
assert svc.exists()
assert svc.version == "1.1"
svc = Service(name='test_version_filter', version='1.0')
assert svc.exists()
assert svc.version == "1.0"
svc = Service(name='test_version_filter', version='-111')
assert not svc.exists()
assert not svc.version
@mock.patch.object(Service, 'get_all')
def test_get_the_latest_version(mock_get_all):
svc_1 = Service(name="test_get_max_version")
svc_1.identifier = "1111"
svc_1.unique_uuid = "2222"
svc_1.unique_identifier = "3333"
svc_1.status = const.CERTIFIED
svc_1.version = "9.0"
svc_2 = Service(name="test_get_max_version")
svc_2.identifier = "1111"
svc_2.unique_uuid = "2222"
svc_2.unique_identifier = "3333"
svc_2.status = const.DRAFT
svc_2.version = "10.0"
mock_get_all.return_value = [svc_1, svc_2]
svc = Service(name='test_get_max_version')
assert svc.version == "10.0"
svc_3 = Service(name="test_get_max_version")
svc_3.identifier = "1111"
svc_3.unique_uuid = "2222"
svc_3.unique_identifier = "3333"
svc_3.status = const.DRAFT
svc_3.version = "10.1"
mock_get_all.return_value = [svc_1, svc_2, svc_3]
svc = Service(name='test_get_max_version')
assert svc.version == "10.1"
svc_4 = Service(name="test_get_max_version")
svc_4.identifier = "1111"
svc_4.unique_uuid = "2222"
svc_4.unique_identifier = "3333"
svc_4.status = const.DRAFT
svc_4.version = "20.0"
mock_get_all.return_value = [svc_1, svc_2, svc_3, svc_4]
svc = Service(name='test_get_max_version')
assert svc.version == "20.0"
svc_5 = Service(name="test_get_max_version")
svc_5.identifier = "1111"
svc_5.unique_uuid = "2222"
svc_5.unique_identifier = "3333"
svc_5.status = const.DRAFT
svc_5.version = "99.0"
svc_6 = Service(name="test_get_max_version")
svc_6.identifier = "1111"
svc_6.unique_uuid = "2222"
svc_6.unique_identifier = "3333"
svc_6.status = const.DRAFT
svc_6.version = "100.0"
mock_get_all.return_value = [svc_1, svc_2, svc_3, svc_4, svc_5, svc_6]
svc = Service(name='test_get_max_version')
assert svc.version == "100.0"
def test_equality_really_equals():
"""Check two vfs are equals if name is the same."""
svc_1 = Service(name="equal")
svc_1.identifier = "1234"
svc_2 = Service(name="equal")
svc_2.identifier = "1235"
assert svc_1 == svc_2
def test_equality_not_equals():
"""Check two vfs are not equals if name is not the same."""
svc_1 = Service(name="equal")
svc_1.identifier = "1234"
svc_2 = Service(name="not_equal")
svc_2.identifier = "1234"
assert svc_1 != svc_2
def test_equality_not_equals_not_same_object():
"""Check a vf and something different are not equals."""
svc_1 = Service(name="equal")
svc_1.identifier = "1234"
svc_2 = SdcResource()
svc_2.name = "equal"
assert svc_1 != svc_2
@mock.patch.object(Service, 'load_metadata')
def test_distribution_id_no_load(mock_load):
svc = Service()
svc.identifier = "1234"
svc._distribution_id = "4567"
assert svc.distribution_id == "4567"
mock_load.assert_not_called()
@mock.patch.object(Service, 'load_metadata')
def test_distribution_id_load(mock_load):
svc = Service()
svc.identifier = "1234"
assert svc.distribution_id is None
mock_load.assert_called_once()
@mock.patch.object(Service, '_check_distributed')
def test_distributed_no_load(mock_check_distributed):
svc = Service()
svc.identifier = "1234"
svc._distributed = True
assert svc.distributed
mock_check_distributed.assert_not_called()
@mock.patch.object(Service, '_check_distributed')
def test_distributed_load(mock_check_distributed):
svc = Service()
svc.identifier = "1234"
assert not svc.distributed
mock_check_distributed.assert_called_once()
def test_distribution_id_setter():
svc = Service()
svc.identifier = "1234"
svc.distribution_id = "4567"
assert svc._distribution_id == "4567"
@mock.patch.object(Service, '_create')
@mock.patch.object(Service, "category", new_callable=mock.PropertyMock)
@mock.patch.object(Service, "exists")
def test_create(mock_exists, mock_category, mock_create):
mock_exists.return_value = False
svc = Service()
svc.create()
mock_create.assert_called_once_with("service_create.json.j2",
name="ONAP-test-Service",
instantiation_type="A-la-carte",
category=svc.category)
mock_create.reset_mock()
svc = Service(instantiation_type=ServiceInstantiationType.MACRO)
svc.create()
mock_create.assert_called_once_with("service_create.json.j2",
name="ONAP-test-Service",
instantiation_type="Macro",
category=svc.category)
@mock.patch.object(Service, 'exists')
@mock.patch.object(Service, 'send_message')
def test_add_resource_not_draft(mock_send, mock_exists):
mock_exists.return_value = False
svc = Service()
resource = SdcResource()
with pytest.raises(StatusError):
svc.add_resource(resource)
mock_send.assert_not_called()
@mock.patch.object(Service, 'load')
@mock.patch.object(Service, 'send_message')
def test_add_resource_bad_result(mock_send, mock_load):
svc = Service()
svc.unique_identifier = "45"
svc.identifier = "93"
svc.status = const.DRAFT
mock_send.return_value = {}
resource = SdcResource()
resource.unique_identifier = "12"
resource.created = MagicMock(return_value=True)
resource.version = "40"
resource.name = "test"
assert svc.add_resource(resource) is None
mock_send.assert_called_once_with(
'POST', 'Add SDCRESOURCE to ServiceProxy',
'https://sdc.api.fe.simpledemo.onap.org:30207/sdc1/feProxy/rest/v1/catalog/services/45/resourceInstance',
data='{\n "name": "test",\n "componentVersion": "40",\n "posY": 100,\n "posX": 200,\n "uniqueId": "12",\n "originType": "SDCRESOURCE",\n "componentUid": "12",\n "icon": "defaulticon"\n}')
@mock.patch.object(Service, 'load')
@mock.patch.object(Service, 'send_message')
def test_add_resource_OK(mock_send, mock_load):
svc = Service()
svc.unique_identifier = "45"
svc.identifier = "93"
svc.status = const.DRAFT
mock_send.return_value = {'yes': 'indeed'}
resource = SdcResource()
resource.unique_identifier = "12"
resource.created = MagicMock(return_value=True)
resource.version = "40"
resource.name = "test"
result = svc.add_resource(resource)
assert result['yes'] == "indeed"
mock_send.assert_called_once_with(
'POST', 'Add SDCRESOURCE to ServiceProxy',
'https://sdc.api.fe.simpledemo.onap.org:30207/sdc1/feProxy/rest/v1/catalog/services/45/resourceInstance',
data='{\n "name": "test",\n "componentVersion": "40",\n "posY": 100,\n "posX": 200,\n "uniqueId": "12",\n "originType": "SDCRESOURCE",\n "componentUid": "12",\n "icon": "defaulticon"\n}')
@mock.patch.object(Service, '_verify_action_to_sdc')
def test_checkin(mock_verify):
svc = Service()
svc.checkin()
mock_verify.assert_called_once_with(const.DRAFT, const.CHECKIN, 'lifecycleState')
@mock.patch.object(Service, '_verify_action_to_sdc')
def test_submit(mock_verify):
svc = Service()
svc.submit()
mock_verify.assert_called_once_with(const.CHECKED_IN, const.SUBMIT_FOR_TESTING, 'lifecycleState')
@mock.patch.object(Service, '_verify_action_to_sdc')
def test_certify(mock_verify):
svc = Service()
svc.certify()
mock_verify.assert_called_once_with(
const.CHECKED_IN, const.CERTIFY, 'lifecycleState',
headers=headers_sdc_creator(svc.headers))
@mock.patch.object(Service, '_verify_action_to_sdc')
def test_distribute(mock_verify):
svc = Service()
svc.distribute()
mock_verify.assert_called_once_with(
const.CERTIFIED, const.DISTRIBUTE, 'distribution',
headers=headers_sdc_creator(svc.headers))
@mock.patch.object(Service, '_verify_action_to_sdc')
def test_redistribute(mock_verify):
svc = Service()
svc.redistribute()
mock_verify.assert_called_once_with(
const.DISTRIBUTED, const.DISTRIBUTE, 'distribution',
headers=headers_sdc_creator(svc.headers))
@mock.patch.object(Service, 'send_message')
def test_get_tosca_no_result(mock_send):
if path.exists('/tmp/tosca_files'):
shutil.rmtree('/tmp/tosca_files')
mock_send.return_value = {}
svc = Service()
svc.identifier = "12"
svc.get_tosca()
headers = headers_sdc_creator(svc.headers)
headers['Accept'] = 'application/octet-stream'
mock_send.assert_called_once_with(
'GET', 'Download Tosca Model for ONAP-test-Service',
'https://sdc.api.be.simpledemo.onap.org:30204/sdc/v1/catalog/services/12/toscaModel',
headers=headers)
assert not path.exists('/tmp/tosca_files')
def test_get_tosca_bad_csart(requests_mock):
if path.exists('/tmp/tosca_files'):
shutil.rmtree('/tmp/tosca_files')
svc = Service()
svc.identifier = "12"
with open('tests/data/bad.csar', mode='rb') as file:
file_content = file.read()
requests_mock.get(
'https://sdc.api.be.simpledemo.onap.org:30204/sdc/v1/catalog/services/12/toscaModel',
content=file_content)
svc.get_tosca()
def test_get_tosca_result(requests_mock):
if path.exists('/tmp/tosca_files'):
shutil.rmtree('/tmp/tosca_files')
with open('tests/data/test.csar', mode='rb') as file:
file_content = file.read()
requests_mock.get(
'https://sdc.api.be.simpledemo.onap.org:30204/sdc/v1/catalog/services/12/toscaModel',
content=file_content)
svc = Service()
svc.identifier = "12"
svc.get_tosca()
def test_get_tosca_result_no_service_in_csar(requests_mock):
if path.exists('/tmp/tosca_files'):
shutil.rmtree('/tmp/tosca_files')
with open('tests/data/bad_no_service.csar', mode='rb') as file:
file_content = file.read()
requests_mock.get(
'https://sdc.api.be.simpledemo.onap.org:30204/sdc/v1/catalog/services/12/toscaModel',
content=file_content)
svc = Service()
svc.identifier = "12"
with pytest.raises(ValidationError):
svc.get_tosca()
@mock.patch.object(Service, 'send_message_json')
def test_distributed_api_error(mock_send):
mock_send.side_effect = ResourceNotFound
svc = Service()
svc.distribution_id = "12"
assert not svc.distributed
@mock.patch.object(Service, 'send_message_json')
def test_distributed_not_distributed(mock_send):
mock_send.return_value = {
'distributionStatusList':[
{'omfComponentID': "SO", 'status': "DOWNLOAD_OK"},
{'omfComponentID': "sdnc", 'status': "DOWNLOAD_NOK"},
{'omfComponentID': "aai", 'status': "DOWNLOAD_OK"}]}
svc = Service()
svc.distribution_id = "12"
assert not svc.distributed
mock_send.assert_called_once_with(
'GET', 'Check distribution for ONAP-test-Service',
'https://sdc.api.fe.simpledemo.onap.org:30207/sdc1/feProxy/rest/v1/catalog/services/distribution/12',
headers=headers_sdc_operator(svc.headers))
@mock.patch.object(Service, 'send_message_json')
def test_distributed_not_distributed(mock_send):
mock_send.return_value = {
'distributionStatusList':[
{'omfComponentID': "SO", 'status': "DOWNLOAD_OK"},
{'omfComponentID': "aai", 'status': "DOWNLOAD_OK"}]}
svc = Service()
svc.distribution_id = "12"
assert not svc.distributed
mock_send.assert_called_once_with(
'GET', 'Check distribution for ONAP-test-Service',
'https://sdc.api.fe.simpledemo.onap.org:30207/sdc1/feProxy/rest/v1/catalog/services/distribution/12',
headers=headers_sdc_creator(svc.headers))
@mock.patch.object(Service, 'send_message_json')
def test_distributed_distributed(mock_send):
mock_send.return_value = {
'distributionStatusList':[
{'omfComponentID': "SO", 'status': "DOWNLOAD_OK"},
{'omfComponentID': "sdnc", 'status': "DOWNLOAD_OK"},
{'omfComponentID': "aai", 'status': "DOWNLOAD_OK"}]}
svc = Service()
svc.distribution_id = "12"
assert svc.distributed
mock_send.assert_called_once_with(
'GET', 'Check distribution for ONAP-test-Service',
'https://sdc.api.fe.simpledemo.onap.org:30207/sdc1/feProxy/rest/v1/catalog/services/distribution/12',
headers=headers_sdc_creator(svc.headers))
@mock.patch.object(Service, 'send_message_json')
def test_load_metadata_no_result(mock_send):
mock_send.return_value = {}
svc = Service()
svc.identifier = "1"
svc.load_metadata()
assert svc._distribution_id is None
mock_send.assert_called_once_with(
'GET', 'Get Metadata for ONAP-test-Service',
'https://sdc.api.fe.simpledemo.onap.org:30207/sdc1/feProxy/rest/v1/catalog/services/1/distribution',
headers=headers_sdc_creator(svc.headers))
@mock.patch.object(Service, 'send_message_json')
def test_load_metadata_bad_json(mock_send):
mock_send.return_value = {'yolo': 'in the wood'}
svc = Service()
svc.identifier = "1"
svc.load_metadata()
assert svc._distribution_id is None
mock_send.assert_called_once_with(
'GET', 'Get Metadata for ONAP-test-Service',
'https://sdc.api.fe.simpledemo.onap.org:30207/sdc1/feProxy/rest/v1/catalog/services/1/distribution',
headers=headers_sdc_creator(svc.headers))
@mock.patch.object(Service, 'send_message_json')
def test_load_metadata_OK(mock_send):
mock_send.return_value = {'distributionStatusOfServiceList': [
{'distributionID': "11"}, {'distributionID': "12"}]}
svc = Service()
svc.identifier = "1"
svc.load_metadata()
assert svc._distribution_id == "11"
mock_send.assert_called_once_with(
'GET', 'Get Metadata for ONAP-test-Service',
'https://sdc.api.fe.simpledemo.onap.org:30207/sdc1/feProxy/rest/v1/catalog/services/1/distribution',
headers=headers_sdc_creator(svc.headers))
def test_get_all_url():
assert Service._get_all_url() == "https://sdc.api.be.simpledemo.onap.org:30204/sdc/v1/catalog/services"
@mock.patch.object(Service, '_action_to_sdc')
@mock.patch.object(Service, 'load')
def test_really_submit_request_failed(mock_load, mock_action):
mock_action.side_effect = RequestError
svc = Service()
with pytest.raises(RequestError) as err:
svc._really_submit()
assert err.type == RequestError
mock_load.assert_not_called()
mock_action.assert_called_once_with('Certify', action_type='lifecycleState')
@mock.patch.object(Service, '_action_to_sdc')
@mock.patch.object(Service, 'load')
def test_really_submit_OK(mock_load, mock_action):
mock_action.return_value = "yes"
svc = Service()
svc._really_submit()
mock_load.assert_called_once()
mock_action.assert_called_once_with('Certify', action_type='lifecycleState')
@mock.patch.object(Service, 'load')
@mock.patch.object(Service, '_action_to_sdc')
@mock.patch.object(Service, 'created')
def test_verify_action_to_sdc_not_created(mock_created, mock_action, mock_load):
mock_created.return_value = False
svc = Service()
svc._status = "no_yes"
svc._verify_action_to_sdc("yes", "action", action_type='lifecycleState')
mock_created.assert_called()
mock_action.assert_not_called()
mock_load.assert_not_called()
@mock.patch.object(Service, 'load')
@mock.patch.object(Service, '_action_to_sdc')
@mock.patch.object(Service, 'created')
def test_verify_action_to_sdc_bad_status(mock_created, mock_action, mock_load):
mock_created.return_value = True
svc = Service()
svc._status = "no_yes"
with pytest.raises(StatusError) as err:
svc._verify_action_to_sdc("yes", "action", action_type='lifecycleState')
assert err.type == StatusError
mock_created.assert_called()
mock_action.assert_not_called()
mock_load.assert_not_called()
@mock.patch.object(Service, 'load')
@mock.patch.object(Service, '_action_to_sdc')
@mock.patch.object(Service, 'created')
def test_verify_action_to_sdc_OK(mock_created, mock_action, mock_load):
mock_created.return_value = True
mock_action.return_value = "good"
svc = Service()
svc._status = "yes"
svc._verify_action_to_sdc("yes", "action", action_type='lifecycleState')
mock_created.assert_called()
mock_action.assert_called_once()
mock_load.assert_called_once()
@mock.patch.object(Service, 'distribute')
@mock.patch.object(Service, 'approve')
@mock.patch.object(Service, 'certify')
@mock.patch.object(Service, 'start_certification')
@mock.patch.object(Service, 'submit')
@mock.patch.object(Service, 'checkin')
@mock.patch.object(Service, 'add_resource')
@mock.patch.object(Service, 'create')
def test_onboard_new_service(mock_create, mock_add_resource,
mock_checkin, mock_submit,
mock_start_certification, mock_certify,
mock_approve, mock_distribute):
getter_mock = mock.Mock(wraps=Service.status.fget)
mock_status = Service.status.getter(getter_mock)
with mock.patch.object(Service, 'status', mock_status):
getter_mock.side_effect = [None, const.DISTRIBUTED, const.DISTRIBUTED,
const.DISTRIBUTED, const.DISTRIBUTED,
const.DISTRIBUTED, const.DISTRIBUTED,
const.DISTRIBUTED, None]
service = Service()
service._time_wait = 0
service.onboard()
mock_create.assert_called_once()
mock_add_resource.assert_not_called()
mock_checkin.assert_not_called()
mock_submit.assert_not_called()
mock_start_certification.assert_not_called()
mock_certify.assert_not_called()
mock_approve.assert_not_called()
mock_distribute.assert_not_called()
@mock.patch.object(Service, 'status')
def test_onboard_invalid_status(mock_status):
mock_status.return_value = False
service = Service()
service._time_wait = 0
with pytest.raises(StatusError) as err:
service.onboard()
assert err.type == StatusError
@mock.patch.object(Service, 'distribute')
@mock.patch.object(Service, 'approve')
@mock.patch.object(Service, 'certify')
@mock.patch.object(Service, 'start_certification')
@mock.patch.object(Service, 'submit')
@mock.patch.object(Service, 'checkin')
@mock.patch.object(Service, 'add_resource')
@mock.patch.object(Service, 'create')
def test_onboard_service_no_resources(mock_create,
mock_add_resource, mock_checkin,
mock_submit, mock_start_certification,
mock_certify, mock_approve,
mock_distribute):
getter_mock = mock.Mock(wraps=Service.status.fget)
mock_status = Service.status.getter(getter_mock)
with mock.patch.object(Service, 'status', mock_status):
getter_mock.side_effect = [const.DRAFT, const.DRAFT, const.DISTRIBUTED,
const.DISTRIBUTED, const.DISTRIBUTED,
const.DISTRIBUTED, const.DISTRIBUTED,
const.DISTRIBUTED, const.DISTRIBUTED, None]
service = Service()
service._time_wait = 0
with pytest.raises(ParameterError):
service.onboard()
mock_create.assert_not_called()
mock_add_resource.assert_not_called()
mock_checkin.assert_not_called()
mock_submit.assert_not_called()
mock_start_certification.assert_not_called()
mock_certify.assert_not_called()
mock_approve.assert_not_called()
mock_distribute.assert_not_called()
@mock.patch.object(Service, 'distribute')
@mock.patch.object(Service, 'approve')
@mock.patch.object(Service, 'certify')
@mock.patch.object(Service, 'start_certification')
@mock.patch.object(Service, 'submit')
@mock.patch.object(Service, 'checkin')
@mock.patch.object(Service, 'add_resource')
@mock.patch.object(Service, 'create')
def test_onboard_service_resources(mock_create, mock_add_resource,
mock_checkin, mock_submit,
mock_start_certification, mock_certify,
mock_approve, mock_distribute):
getter_mock = mock.Mock(wraps=Service.status.fget)
mock_status = Service.status.getter(getter_mock)
with mock.patch.object(Service, 'status', mock_status):
getter_mock.side_effect = [const.DRAFT, const.DRAFT, const.DISTRIBUTED,
const.DISTRIBUTED, const.DISTRIBUTED,
const.DISTRIBUTED, const.DISTRIBUTED,
const.DISTRIBUTED, const.DISTRIBUTED, None]
resource = SdcResource()
service = Service(resources=[resource])
service._time_wait = 0
service.onboard()
mock_create.assert_not_called()
mock_add_resource.assert_called_once_with(resource)
mock_checkin.assert_called_once()
mock_submit.assert_not_called()
mock_start_certification.assert_not_called()
mock_certify.assert_not_called()
mock_approve.assert_not_called()
mock_distribute.assert_not_called()
@mock.patch.object(Service, 'distribute')
@mock.patch.object(Service, 'approve')
@mock.patch.object(Service, 'certify')
@mock.patch.object(Service, 'start_certification')
@mock.patch.object(Service, 'submit')
@mock.patch.object(Service, 'checkin')
@mock.patch.object(Service, 'add_resource')
@mock.patch.object(Service, 'create')
def test_onboard_service_several_resources(mock_create,
mock_add_resource, mock_checkin,
mock_submit,
mock_start_certification,
mock_certify, mock_approve,
mock_distribute):
getter_mock = mock.Mock(wraps=Service.status.fget)
mock_status = Service.status.getter(getter_mock)
with mock.patch.object(Service, 'status', mock_status):
getter_mock.side_effect = [const.DRAFT, const.DRAFT, const.DISTRIBUTED,
const.DISTRIBUTED, const.DISTRIBUTED,
const.DISTRIBUTED, const.DISTRIBUTED,
const.DISTRIBUTED, const.DISTRIBUTED, None]
resource1 = SdcResource()
resource2 = SdcResource()
service = Service(resources=[resource1, resource2])
service._time_wait = 0
service.onboard()
mock_create.assert_not_called()
calls = [mock.call(resource1), mock.call(resource2)]
mock_add_resource.assert_has_calls(calls, any_order=True)
assert mock_add_resource.call_count == 2
mock_checkin.assert_called_once()
mock_submit.assert_not_called()
mock_start_certification.assert_not_called()
mock_certify.assert_not_called()
mock_approve.assert_not_called()
mock_distribute.assert_not_called()
@mock.patch.object(Service, 'distribute')
@mock.patch.object(Service, 'approve')
@mock.patch.object(Service, 'certify')
@mock.patch.object(Service, 'start_certification')
@mock.patch.object(Service, 'submit')
@mock.patch.object(Service, 'checkin')
@mock.patch.object(Service, 'add_resource')
@mock.patch.object(Service, 'create')
def test_onboard_service_certifi(mock_create,
mock_add_resource, mock_checkin,
mock_submit, mock_start_certification,
mock_certify, mock_approve,
mock_distribute):
getter_mock = mock.Mock(wraps=Service.status.fget)
mock_status = Service.status.getter(getter_mock)
with mock.patch.object(Service, 'status', mock_status):
getter_mock.side_effect = [const.CHECKED_IN,
const.CHECKED_IN,
const.CHECKED_IN,
const.CHECKED_IN,
const.CHECKED_IN,
const.DISTRIBUTED, const.DISTRIBUTED,
const.DISTRIBUTED, const.DISTRIBUTED,
const.DISTRIBUTED, const.DISTRIBUTED,
const.DISTRIBUTED, None]
service = Service()
service._time_wait = 0
service.onboard()
mock_create.assert_not_called()
mock_add_resource.assert_not_called()
mock_checkin.assert_not_called()
mock_submit.assert_not_called()
mock_start_certification.assert_not_called()
mock_certify.assert_called_once()
mock_approve.assert_not_called()
mock_distribute.assert_not_called()
@mock.patch.object(Service, 'distribute')
@mock.patch.object(Service, 'certify')
@mock.patch.object(Service, 'checkin')
@mock.patch.object(Service, 'add_resource')
@mock.patch.object(Service, 'create')
def test_onboard_service_distribute(mock_create,
mock_add_resource,
mock_checkin,
mock_certify,
mock_distribute):
getter_mock = mock.Mock(wraps=Service.status.fget)
mock_status = Service.status.getter(getter_mock)
with mock.patch.object(Service, 'status', mock_status):
getter_mock.side_effect = [const.CERTIFIED, const.CERTIFIED, const.CERTIFIED,
const.CERTIFIED, const.CERTIFIED, const.CERTIFIED,
const.CERTIFIED, const.DISTRIBUTED,
const.DISTRIBUTED, const.DISTRIBUTED,
const.DISTRIBUTED, const.DISTRIBUTED,
const.DISTRIBUTED, const.DISTRIBUTED, None]
service = Service()
service._time_wait = 0
service.onboard()
mock_create.assert_not_called()
mock_add_resource.assert_not_called()
mock_checkin.assert_not_called()
mock_certify.assert_not_called()
mock_distribute.assert_called_once()
@mock.patch.object(Service, 'distribute')
@mock.patch.object(Service, 'certify')
@mock.patch.object(Service, 'checkin')
@mock.patch.object(Service, 'add_resource')
@mock.patch.object(Service, 'create')
def test_onboard_whole_service(mock_create,
mock_add_resource,
mock_checkin,
mock_certify,
mock_distribute):
getter_mock = mock.Mock(wraps=Service.status.fget)
mock_status = Service.status.getter(getter_mock)
with mock.patch.object(Service, 'status', mock_status):
getter_mock.side_effect = [None, const.DRAFT, const.DRAFT,const.CHECKED_IN,
const.CHECKED_IN, const.CHECKED_IN,
const.CERTIFIED, const.CERTIFIED,
const.CERTIFIED, const.CERTIFIED,
const.CERTIFIED, const.CERTIFIED,
const.DISTRIBUTED, const.DISTRIBUTED,
const.DISTRIBUTED, const.DISTRIBUTED,
const.DISTRIBUTED, const.DISTRIBUTED,
const.DISTRIBUTED, None]
resource = SdcResource()
service = Service(resources=[resource])
service._time_wait = 0
service.onboard()
mock_create.assert_called_once()
mock_add_resource.assert_called_once_with(resource)
mock_checkin.assert_called_once()
mock_certify.assert_called_once()
mock_distribute.assert_called_once()
@mock.patch("onapsdk.sdc.service.Service.send_message_json")
@mock.patch("onapsdk.sdc.service.SdcResource.import_from_sdc")
@mock.patch("onapsdk.sdc.service.Service.resource_inputs_url", new_callable=mock.PropertyMock)
def test_vnf_vf_modules_one(mock_service_resource_inputs_url, mock_import_from_sdc, mock_send_message_json):
"""Test parsing TOSCA file with one VNF which has associated one VFmodule"""
service = Service(name="test")
mock_send_message_json.side_effect = [{
"componentInstances": [{
"actualComponentUid": "123",
"originType": "VF",
"name": "ubuntu16_VF 0",
"toscaComponentName": "org.openecomp.resource.vf.Ubuntu16Vf",
"createdFromCsar": False,
"uniqueId": "123",
"normalizedName": "123",
"customizationUUID": "123",
"componentUid": "123",
"componentVersion": "123",
"componentName": "123",
"groupInstances": [
{
"name": "ubuntu16_vf0..Ubuntu16Vf..base_ubuntu16..module-0",
"type": "org.openecomp.groups.VfModule",
"groupName": "Ubuntu16Vf..base_ubuntu16..module-0",
"groupUUID": "ed041b38-63fc-486d-9d4d-4e2531bc7e54",
"invariantUUID": "f47c3a9b-6a5f-4d1a-8a0b-b7f56ebb9a90",
"version": "1",
"customizationUUID": "d946ea06-ec4b-4ed2-921a-117e1379b913",
"properties": [
{
"name": "123",
"type": "test type",
"value": "val",
"description": "12234",
},
{
"name": "123",
"type": "test type",
"value": None,
"description": "12234",
}
]
}
]
}]
}, MagicMock()
]
vnfs = list(service.vnfs)
assert len(vnfs) == 1
vnf = vnfs[0]
assert vnf.name == "ubuntu16_VF 0"
assert vnf.node_template_type == "org.openecomp.resource.vf.Ubuntu16Vf"
assert vnf.vf_modules
assert vnf.vf_modules[0].name == "ubuntu16_vf0..Ubuntu16Vf..base_ubuntu16..module-0"
assert len(list(vnf.vf_modules[0].properties)) == 1
@mock.patch("onapsdk.sdc.service.Service.send_message_json")
@mock.patch("onapsdk.sdc.service.SdcResource.import_from_sdc")
@mock.patch("onapsdk.sdc.service.Service.resource_inputs_url", new_callable=mock.PropertyMock)
def test_pnf_modules_one(mock_service_resource_inputs_url, mock_import_from_sdc, mock_send_message_json):
"""Test parsing TOSCA file with one PNF which has associated one PNFmodule"""
service = Service(name="test")
mock_send_message_json.side_effect = [{
"componentInstances": [{
"actualComponentUid": "123",
"originType": "PNF",
"name": "test_pnf_vsp 0",
"toscaComponentName": "org.openecomp.resource.pnf.TestPnfVsp",
"createdFromCsar": False,
"uniqueId": "123",
"normalizedName": "123",
"customizationUUID": "123",
"componentUid": "123",
"componentVersion": "123",
"componentName": "123",
"groupInstances": None
}]
}, MagicMock()
]
pnfs = list(service.pnfs)
assert len(pnfs) == 1
pnf = pnfs[0]
assert pnf.name == "test_pnf_vsp 0"
assert pnf.node_template_type == "org.openecomp.resource.pnf.TestPnfVsp"
@mock.patch("onapsdk.sdc.service.Service.send_message_json")
@mock.patch("onapsdk.sdc.service.SdcResource.import_from_sdc")
@mock.patch("onapsdk.sdc.service.Service.resource_inputs_url", new_callable=mock.PropertyMock)
def test_vnf_vf_modules_two(mock_service_resource_inputs_url, mock_import_from_sdc, mock_send_message_json):
"""Test parsing TOSCA file with two VNF which has associated one VFmodule"""
service = Service(name="test")
mock_send_message_json.side_effect = [{
"componentInstances": [{
"actualComponentUid": "123",
"originType": "VF",
"name": "vFWCL_vPKG-vf 0",
"toscaComponentName": "org.openecomp.resource.vf.VfwclVpkgVf",
"createdFromCsar": False,
"uniqueId": "123",
"normalizedName": "123",
"customizationUUID": "123",
"componentUid": "123",
"componentVersion": "123",
"componentName": "123",
"groupInstances": [
{
"name": "vfwcl_vpkgvf0..VfwclVpkgVf..base_vpkg..module-0",
"type": "org.openecomp.groups.VfModule",
"groupName": "Ubuntu16Vf..base_ubuntu16..module-0",
"groupUUID": "ed041b38-63fc-486d-9d4d-4e2531bc7e54",
"invariantUUID": "f47c3a9b-6a5f-4d1a-8a0b-b7f56ebb9a90",
"version": "1",
"customizationUUID": "d946ea06-ec4b-4ed2-921a-117e1379b913",
"properties": [
{
"name": "123",
"type": "test type",
"value": "val",
"description": "12234",
},
{
"name": "333",
"type": "test type",
"value": "val",
"description": "12234",
},
{
"name": "123",
"type": "test type",
"value": None,
"description": "12234",
}
]
},
{
"name": "vfwcl_vpkgvf0..base_template_dummy_ignore..base_vpkg..module-0",
"type": "org.openecomp.groups.VfModule",
"groupName": "Ubuntu16Vf..base_ubuntu16..module-0",
"groupUUID": "ed041b38-63fc-486d-9d4d-4e2531bc7e54",
"invariantUUID": "f47c3a9b-6a5f-4d1a-8a0b-b7f56ebb9a90",
"version": "1",
"customizationUUID": "d946ea06-ec4b-4ed2-921a-117e1379b913",
"properties": [
{
"name": "123",
"type": "test type",
"value": "val",
"description": "12234",
},
{
"name": "333",
"type": "test type",
"value": "val",
"description": "12234",
},
{
"name": "vf_module_label",
"type": "test type",
"value": "base_template_dummy_ignore",
"description": "12234",
}
]
}
]
},
{
"actualComponentUid": "123",
"originType": "VF",
"name": "vFWCL_vFWSNK-vf 0",
"toscaComponentName": "org.openecomp.resource.vf.VfwclVfwsnkVf",
"createdFromCsar": False,
"uniqueId": "123",
"normalizedName": "123",
"customizationUUID": "123",
"componentUid": "123",
"componentVersion": "123",
"componentName": "123",
"groupInstances": [
{
"name": "vfwcl_vfwsnkvf0..VfwclVfwsnkVf..base_vfw..module-0",
"type": "org.openecomp.groups.VfModule",
"groupName": "Ubuntu16Vf..base_ubuntu16..module-0",
"groupUUID": "ed041b38-63fc-486d-9d4d-4e2531bc7e54",
"invariantUUID": "f47c3a9b-6a5f-4d1a-8a0b-b7f56ebb9a90",
"version": "1",
"customizationUUID": "d946ea06-ec4b-4ed2-921a-117e1379b913",
"properties": [
{
"name": "123",
"type": "test type",
"value": "val",
"description": "12234",
},
{
"name": "123",
"type": "test type",
"value": None,
"description": "12234",
}
]
}
]
}]
}, MagicMock(), MagicMock()
]
vnfs = list(service.vnfs)
assert len(vnfs) == 2
vnf = vnfs[0]
assert vnf.name == "vFWCL_vPKG-vf 0"
assert vnf.node_template_type == "org.openecomp.resource.vf.VfwclVpkgVf"
assert vnf.vf_modules
assert len(vnf.vf_modules) == 1
assert vnf.vf_modules[0].name == "vfwcl_vpkgvf0..VfwclVpkgVf..base_vpkg..module-0"
assert len(list(vnf.vf_modules[0].properties)) == 2
vnf = vnfs[1]
assert vnf.name == "vFWCL_vFWSNK-vf 0"
assert vnf.node_template_type == "org.openecomp.resource.vf.VfwclVfwsnkVf"
assert vnf.vf_modules
assert vnf.vf_modules[0].name == "vfwcl_vfwsnkvf0..VfwclVfwsnkVf..base_vfw..module-0"
assert len(list(vnf.vf_modules[0].properties)) == 1
@mock.patch.object(Service, 'send_message_json')
def test_get_vnf_unique_id(mock_send):
"""Test Service get nf uid with One Vf"""
svc = Service()
svc.unique_identifier = "service_unique_identifier"
mock_send.return_value = ARTIFACTS
unique_id = svc.get_nf_unique_id(nf_name="ubuntu16test_VF 0")
mock_send.assert_called_once_with(
'GET', 'Get nf unique ID',
f"https://sdc.api.fe.simpledemo.onap.org:30207/sdc1/feProxy/rest/v1/catalog/services/{svc.unique_identifier}")
assert unique_id == 'test_unique_id'
@mock.patch.object(Service, 'send_message_json')
def test_get_vnf_unique_id_not_found(mock_send):
"""Test Service get nf uid with One Vf"""
svc = Service()
svc.unique_identifier = "service_unique_identifier"
artifacts = {"componentInstances": []}
mock_send.return_value = artifacts
with pytest.raises(ResourceNotFound) as err:
svc.get_nf_unique_id(nf_name="ubuntu16test_VF 0")
assert err.type == ResourceNotFound
mock_send.assert_called_once_with(
'GET', 'Get nf unique ID',
f"https://sdc.api.fe.simpledemo.onap.org:30207/sdc1/feProxy/rest/v1/catalog/services/{svc.unique_identifier}")
@mock.patch.object(Service, 'get_nf_unique_id')
@mock.patch.object(Service, 'load')
@mock.patch.object(Service, 'send_message')
def test_add_artifact_to_vf(mock_send_message, mock_load, mock_add):
"""Test Service add artifact"""
svc = Service()
mock_add.return_value = "54321"
result = svc.add_artifact_to_vf(vnf_name="ubuntu16test_VF 0",
artifact_type="DCAE_INVENTORY_BLUEPRINT",
artifact_name="clampnode.yaml",
artifact="data".encode('utf-8'))
mock_send_message.assert_called()
method, description, url = mock_send_message.call_args[0]
assert method == "POST"
assert description == "Add artifact to vf"
assert url == ("https://sdc.api.fe.simpledemo.onap.org:30207/sdc1/feProxy/rest/v1/catalog/services/"
f"{svc.unique_identifier}/resourceInstance/54321/artifacts")
@mock.patch.object(Service, 'load')
@mock.patch.object(Service, 'send_message')
def test_add_artifact_to_service(mock_send_message, mock_load):
"""Test Service add artifact"""
svc = Service()
svc.status = const.DRAFT
mycbapath = Path(Path(__file__).resolve().parent, "data/vLB_CBA_Python.zip")
result = svc.add_deployment_artifact(artifact_label="cba",
artifact_type="CONTROLLER_BLUEPRINT_ARCHIVE",
artifact_name="vLB_CBA_Python.zip",
artifact=mycbapath)
mock_send_message.assert_called()
method, description, url = mock_send_message.call_args[0]
assert method == "POST"
assert description == "Add deployment artifact for ONAP-test-Service sdc resource"
assert url == ("https://sdc.api.fe.simpledemo.onap.org:30207/sdc1/feProxy/rest/v1/catalog/services/"
f"{svc.unique_identifier}/artifacts")
@mock.patch("onapsdk.sdc.service.Service.send_message_json")
@mock.patch("onapsdk.sdc.service.SdcResource.import_from_sdc")
@mock.patch("onapsdk.sdc.service.Service.resource_inputs_url", new_callable=mock.PropertyMock)
def test_service_networks(mock_service_resource_inputs_url, mock_import_from_sdc, mock_send_message_json):
mock_send_message_json.side_effect = [{
"componentInstances": [{
"actualComponentUid": "123",
"originType": "VL",
"name": "NeutronNet 0",
"toscaComponentName": "org.openecomp.resource.vl.nodes.heat.network.neutron.Net",
"createdFromCsar": False,
"uniqueId": "123",
"normalizedName": "123",
"customizationUUID": "123",
"componentUid": "123",
"componentVersion": "123",
"componentName": "123",
"groupInstances": None
}]
}, MagicMock()
]
service = Service(name="test")
networks = list(service.networks)
assert len(networks) == 1
network = networks[0]
assert network.name == "NeutronNet 0"
assert network.node_template_type == "org.openecomp.resource.vl.nodes.heat.network.neutron.Net"
@mock.patch.object(Service, '_unzip_csar_file')
def test_tosca_template_no_tosca_model(mock_unzip):
service = Service(name="test")
getter_mock = mock.Mock(wraps=Service.tosca_model.fget)
getter_mock.return_value = False
mock_tosca_model = Service.tosca_model.getter(getter_mock)
with mock.patch.object(Service, 'tosca_model', mock_tosca_model):
service.tosca_template
mock_unzip.assert_not_called()
@mock.patch.object(Service, '_unzip_csar_file')
def test_tosca_template_tosca_model(mock_unzip):
service = Service(name="test")
service._tosca_model = str.encode("test")
service.tosca_template
mock_unzip.assert_called_once_with(mock.ANY, mock.ANY)
@mock.patch.object(Service, '_unzip_csar_file')
def test_tosca_template_present(mock_unzip):
service = Service(name="test")
service._tosca_template = "test"
assert service.tosca_template == "test"
mock_unzip.assert_not_called()
@mock.patch.object(Service, 'send_message')
def test_tosca_model(mock_send):
service = Service(name="test")
service.identifier = "toto"
service.tosca_model
mock_send.assert_called_once_with("GET", "Download Tosca Model for test",
"https://sdc.api.be.simpledemo.onap.org:30204/sdc/v1/catalog/services/toto/toscaModel",
headers={'Content-Type': 'application/json', 'Accept': 'application/octet-stream', 'USER_ID': 'cs0008', 'Authorization': 'Basic YWFpOktwOGJKNFNYc3pNMFdYbGhhazNlSGxjc2UyZ0F3ODR2YW9HR21KdlV5MlU=', 'X-ECOMP-InstanceID': 'onapsdk'})
@mock.patch.object(Service, "send_message_json")
def test_add_properties(mock_send_message_json):
service = Service(name="test")
service._identifier = "toto"
service._unique_identifier = "toto"
service._status = const.CERTIFIED
with pytest.raises(StatusError):
service.add_property(Property(name="test", property_type="string"))
service._status = const.DRAFT
service.add_property(Property(name="test", property_type="string"))
mock_send_message_json.assert_called_once()
@mock.patch.object(Service, "send_message_json")
def test_service_components(mock_send_message_json):
service = Service(name="test")
service.unique_identifier = "toto"
mock_send_message_json.return_value = {}
assert len(list(service.components)) == 0
mock_send_message_json.reset_mock()
mock_send_message_json.side_effect = [COMPONENTS, COMPONENT]
components = list(service.components)
assert len(components) == 1
assert mock_send_message_json.call_count == 2
component = components[0]
assert component.actual_component_uid == "374f0a98-a280-43f1-9e6c-00b436782ce7"
assert component.sdc_resource.unique_uuid == "3c027ba1-8d3a-4b59-9394-d748fec5e42c"
def test_component_properties():
sdc_resource = mock.MagicMock()
service = Service(name="test")
service.unique_identifier = "toto"
component = Component(
created_from_csar=False,
actual_component_uid="123",
unique_id="123",
normalized_name="123",
name="123",
origin_type="123",
customization_uuid="123",
tosca_component_name="123",
component_name="123",
component_uid="123",
component_version="123",
sdc_resource=sdc_resource,
parent_sdc_resource=service,
group_instances=None
)
sdc_resource.send_message_json.return_value = {}
assert not len(list(component.properties))
sdc_resource.send_message_json.return_value = COMPONENT_PROPERTIES
properties = list(component.properties)
assert len(properties) == 2
prop1, prop2 = properties
assert prop1.unique_id == "3d9a184f-4268-4a0e-9ddd-252e49670013.vf_module_id"
assert prop1.property_type == "string"
assert prop1.name == "vf_module_id"
assert prop1.value is None
assert prop2.unique_id == "74f79006-ae56-4d58-947e-6a5089000774.skip_post_instantiation_configuration"
assert prop2.property_type == "boolean"
assert prop2.name == "skip_post_instantiation_configuration"
assert prop2.value == "true"
@mock.patch.object(Component, "properties", new_callable=mock.PropertyMock)
def test_component_property_set_value(mock_component_properties):
mock_sdc_resource = mock.MagicMock()
service = Service(name="test")
service.unique_identifier = "toto"
component = Component(
created_from_csar=False,
actual_component_uid="123",
unique_id="123",
normalized_name="123",
name="123",
origin_type="123",
customization_uuid="123",
tosca_component_name="123",
component_name="123",
component_uid="123",
component_version="123",
sdc_resource=mock_sdc_resource,
parent_sdc_resource=service,
group_instances=None
)
mock_component_properties.return_value = [
ComponentProperty(
unique_id="123",
property_type="string",
name="test_property",
component=component
)
]
with pytest.raises(ParameterError):
component.get_property(property_name="non_exists")
prop1 = component.get_property(property_name="test_property")
assert prop1.name == "test_property"
assert prop1.unique_id == "123"
assert prop1.property_type == "string"
assert not prop1.value
prop1.value = "123"
mock_sdc_resource.send_message_json.assert_called_once()
@mock.patch.object(Service, "add_resource")
@mock.patch.object(Service, "add_property")
@mock.patch.object(Service, "declare_input")
def test_declare_resources_and_properties(mock_declare_input, mock_add_property, mock_add_resource):
service = Service(name="test",
resources=[SdcResource()],
properties=[Property(name="test", property_type="string")],
inputs=[Property(name="test", property_type="string")])
service.declare_resources_and_properties()
mock_add_resource.assert_called_once()
mock_add_property.assert_called_once()
mock_declare_input.assert_called_once()
@mock.patch.object(Service, "created")
@mock.patch.object(ServiceCategory, "get")
def test_service_category(mock_resource_category, mock_created):
mock_created.return_value = False
service = Service(name="test")
_ = service.category
mock_resource_category.assert_called_once_with(name="Network Service")
mock_resource_category.reset_mock()
service = Service(name="test", category="test")
_ = service.category
mock_resource_category.assert_called_once_with(name="test")
mock_resource_category.reset_mock()
mock_created.return_value = True
_ = service.category
mock_resource_category.assert_called_once_with(name="test")
def test_service_origin_type():
service = Service(name="test")
assert service.origin_type == "ServiceProxy"
@mock.patch.object(Service, "unique_identifier", new_callable=PropertyMock)
def test_service_metadata_url(mock_uniquie_identifier):
mock_uniquie_identifier.return_value = "1233"
service = Service(name="test")
assert service.metadata_url == f"{service._base_create_url()}/services/1233/filteredDataByParams?include=metadata"
@mock.patch.object(Service, "created")
@mock.patch.object(Service, "send_message_json")
@mock.patch.object(Service, "metadata_url", new_callable=PropertyMock)
def test_service_instantiation_type(mock_metadata_url, mock_send_message_json, mock_created):
mock_created.return_value = False
service = Service(name="test")
assert service.instantiation_type == ServiceInstantiationType.A_LA_CARTE
service = Service(name="test", instantiation_type=ServiceInstantiationType.MACRO)
assert service.instantiation_type == ServiceInstantiationType.MACRO
mock_created.return_value = True
mock_send_message_json.return_value = {"metadata": {"instantiationType": "A-la-carte"}}
service = Service(name="test")
assert service.instantiation_type == ServiceInstantiationType.A_LA_CARTE
mock_send_message_json.return_value = {"metadata": {"instantiationType": "Macro"}}
service = Service(name="test")
assert service.instantiation_type == ServiceInstantiationType.MACRO
@mock.patch.object(Service, "get_all")
def test_service_get_by_unique_uuid(mock_get_all):
mock_get_all.return_value = []
with pytest.raises(ResourceNotFound):
Service.get_by_unique_uuid("test")
mock_service = MagicMock()
mock_service.unique_uuid = "test"
mock_get_all.return_value = [mock_service]
Service.get_by_unique_uuid("test")
@mock.patch.object(Service, "send_message_json")
def test_service_components(mock_send_message_json):
service = Service(name="test")
service.unique_identifier = "toto"
mock_send_message_json.side_effect = [COMPONENTS, COMPONENT, COMPONENTS, COMPONENT, COMPONENTS, COMPONENT]
assert not service.has_vnfs
assert not service.has_pnfs
assert not service.has_vls
mock_send_message_json.side_effect = [COMPONENTS_WITH_ALL_ORIGIN_TYPES, COMPONENT,
COMPONENTS_WITH_ALL_ORIGIN_TYPES, COMPONENT, COMPONENT,
COMPONENTS_WITH_ALL_ORIGIN_TYPES, COMPONENT, COMPONENT, COMPONENT]
assert service.has_vnfs
assert service.has_pnfs
assert service.has_vls
| 40.983784
| 266
| 0.639195
|
c6bc3faa8391507b27fe607d455e4f07486ce67a
| 8,828
|
py
|
Python
|
Code/models/VisualizeLip.py
|
Pooventhiran/VSR
|
de6d23c7fc4633e73a4d9c37e3e55c7561b35525
|
[
"MIT"
] | 4
|
2020-02-03T17:05:08.000Z
|
2021-04-21T12:47:30.000Z
|
Code/models/VisualizeLip.py
|
Pooventhiran/VSR
|
de6d23c7fc4633e73a4d9c37e3e55c7561b35525
|
[
"MIT"
] | null | null | null |
Code/models/VisualizeLip.py
|
Pooventhiran/VSR
|
de6d23c7fc4633e73a4d9c37e3e55c7561b35525
|
[
"MIT"
] | 3
|
2021-10-16T05:38:13.000Z
|
2021-12-29T14:57:10.000Z
|
# Copyright {2017} {Amirsina Torfi}
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# The code has minor modifications accordingly for our work
import numpy as np
import cv2
import dlib
import argparse
import os
import skvideo.io
"""
PART1: Construct the argument parse and parse the arguments
"""
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", required=True, help="path to input video file")
ap.add_argument("-f", "--fps", type=int, default=10, help="FPS of output video")
ap.add_argument("-c", "--codec", type=str, default="MJPG", help="codec of output video")
args = vars(ap.parse_args())
args["output"] = os.path.join(os.path.dirname("VisualizeLip.py"), "..", "data")
for spk in os.listdir(args["input"]):
for word in range(1, 11):
for i in range(1, 11):
"""
PART2: Calling and defining required parameters for:
1 - Processing video for extracting each frame.
2 - Lip extraction from frames.
"""
# Dlib requirements.
predictor_path = "./shape_predictor_68_face_landmarks.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
mouth_destination_path = os.path.join(
args["output"], spk, "words", "%02d" % word, "%02d" % i
)
if not os.path.exists(mouth_destination_path):
os.makedirs(mouth_destination_path)
inputparameters = {}
outputparameters = {}
reader = skvideo.io.FFmpegReader(
os.path.join(
args["input"],
spk,
"words",
"%02d" % word,
"%02d" % i,
"word%02dutterance%02d.mp4" % (word, i),
),
inputdict=inputparameters,
outputdict=outputparameters,
)
video_shape = reader.getShape()
(num_frames, h, w, c) = video_shape
print(num_frames, h, w, c)
activation = []
max_counter = 300
total_num_frames = int(video_shape[0])
num_frames = min(total_num_frames, max_counter)
counter = 0
font = cv2.FONT_HERSHEY_SIMPLEX
writer = skvideo.io.FFmpegWriter(
os.path.join(
args["output"],
spk,
"words",
"%02d" % word,
"%02d" % i,
"lip-word%02dutterance%02d.mp4" % (word, i),
)
)
width_crop_max = 0
height_crop_max = 0
for frame in reader.nextFrame():
print("frame_shape:", frame.shape)
if counter > num_frames:
break
detections = detector(frame, 1)
marks = np.zeros((2, 20))
Features_Abnormal = np.zeros((190, 1))
print(len(detections))
if len(detections) > 0:
for k, d in enumerate(detections):
print("Location:", d)
shape = predictor(frame, d)
co = 0
for ii in range(48, 68):
X = shape.part(ii)
A = (X.x, X.y)
marks[0, co] = X.x
marks[1, co] = X.y
co += 1
X_left, Y_left, X_right, Y_right = [
int(np.amin(marks, axis=1)[0]),
int(np.amin(marks, axis=1)[1]),
int(np.amax(marks, axis=1)[0]),
int(np.amax(marks, axis=1)[1]),
]
X_center = (X_left + X_right) / 2.0
Y_center = (Y_left + Y_right) / 2.0
border = 10
X_left_new = X_left - border
Y_left_new = Y_left - border
X_right_new = X_right + border
Y_right_new = Y_right + border
# Width and height for cropping(before and after considering the border).
width_new = X_right_new - X_left_new
height_new = Y_right_new - Y_left_new
width_current = X_right - X_left
height_current = Y_right - Y_left
# Determine the cropping rectangle dimensions(the main purpose is to have a fixed area).
if width_crop_max == 0 and height_crop_max == 0:
width_crop_max = width_new
height_crop_max = height_new
else:
width_crop_max += 1.5 * np.maximum(
width_current - width_crop_max, 0
)
height_crop_max += 1.5 * np.maximum(
height_current - height_crop_max, 0
)
X_left_crop = int(X_center - width_crop_max / 2.0)
X_right_crop = int(X_center + width_crop_max / 2.0)
Y_left_crop = int(Y_center - height_crop_max / 2.0)
Y_right_crop = int(Y_center + height_crop_max / 2.0)
if (
X_left_crop >= 0
and Y_left_crop >= 0
and X_right_crop < w
and Y_right_crop < h
):
mouth = frame[
Y_left_crop:Y_right_crop, X_left_crop:X_right_crop, :
]
mouth_gray = cv2.cvtColor(mouth, cv2.COLOR_RGB2GRAY)
cv2.imwrite(
os.path.join(
mouth_destination_path,
"color_" + "_" + "%03d.jpg" % (counter + 1),
),
mouth_gray,
)
print("The cropped mouth is detected ...")
activation.append(1)
else:
cv2.putText(
frame,
"The full mouth is not detectable. ",
(30, 30),
font,
1,
(0, 255, 255),
2,
)
print("The full mouth is not detectable. ...")
activation.append(0)
else:
cv2.putText(
frame,
"Mouth is not detectable. ",
(30, 30),
font,
1,
(0, 0, 255),
2,
)
print("Mouth is not detectable. ...")
activation.append(0)
if activation[counter] == 1:
cv2.rectangle(
frame,
(X_left_crop, Y_left_crop),
(X_right_crop, Y_right_crop),
(0, 255, 0),
2,
)
print("frame number %d of %d" % (counter, num_frames))
print(
"writing frame %d with activation %d"
% (counter + 1, activation[counter])
)
writer.writeFrame(frame)
counter += 1
writer.close()
| 38.889868
| 113
| 0.415723
|
4f9eb066ebafb2a2d4e3f358e15feb253e4573f0
| 9,657
|
py
|
Python
|
conftest.py
|
wtgee/POCS
|
c88f5d0db3aff8e17c95339ba64249b56b3ceef7
|
[
"MIT"
] | null | null | null |
conftest.py
|
wtgee/POCS
|
c88f5d0db3aff8e17c95339ba64249b56b3ceef7
|
[
"MIT"
] | null | null | null |
conftest.py
|
wtgee/POCS
|
c88f5d0db3aff8e17c95339ba64249b56b3ceef7
|
[
"MIT"
] | null | null | null |
import logging
import os
import stat
import pytest
import tempfile
import shutil
from contextlib import suppress
from _pytest.logging import caplog as _caplog # noqa
from panoptes.pocs import hardware
from panoptes.utils.config.client import set_config
from panoptes.utils.database import PanDB
from panoptes.pocs.utils.logger import get_logger, PanLogger
# TODO download IERS files.
_all_databases = ['file', 'memory']
TESTING_LOG_LEVEL = 'TRACE'
LOGGER_INFO = PanLogger()
logger = get_logger(console_log_file=TESTING_LOG_LEVEL)
logger.enable('panoptes')
# Add a level above TRACE and below DEBUG
logger.level("testing", no=15, icon="🤖", color="<LIGHT-BLUE><white>")
log_fmt = "<lvl>{level:.1s}</lvl> " \
"<light-blue>{time:MM-DD HH:mm:ss.ss!UTC}</>" \
"<blue> ({time:HH:mm:ss.ss})</> " \
"| <c>{name} {function}:{line}</c> | " \
"<lvl>{message}</lvl>"
log_file_path = os.path.expandvars('${PANLOG}/panoptes-testing.log')
startup_message = f' STARTING NEW PYTEST RUN - LOGS: {log_file_path} '
logger.add(log_file_path,
enqueue=True, # multiprocessing
format=log_fmt,
colorize=True,
# TODO decide on these options
backtrace=True,
diagnose=True,
catch=True,
# Start new log file for each testing run.
rotation=lambda msg, _: startup_message in msg,
level=TESTING_LOG_LEVEL)
logger.log('testing', '*' * 25 + startup_message + '*' * 25)
# Make the log file world readable.
os.chmod(log_file_path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
def pytest_addoption(parser):
hw_names = ",".join(hardware.get_all_names()) + ' (or all for all hardware)'
db_names = ",".join(_all_databases) + ' (or all for all databases)'
group = parser.getgroup("PANOPTES pytest options")
group.addoption(
"--with-hardware",
nargs='+',
default=[],
help=f"A comma separated list of hardware to test. List items can include: {hw_names}")
group.addoption(
"--without-hardware",
nargs='+',
default=[],
help=f"A comma separated list of hardware to NOT test. List items can include: {hw_names}")
group.addoption(
"--test-databases",
nargs="+",
default=['file'],
help=f"Test databases in the list. List items can include: {db_names}. Note that "
f"travis-ci will test all of "
f"them by default.")
def pytest_collection_modifyitems(config, items):
"""Modify tests to skip or not based on cli options.
Certain tests should only be run when the appropriate hardware is attached.
Other tests fail if real hardware is attached (e.g. they expect there is no
hardware). The names of the types of hardware are in hardware.py, but
include 'mount' and 'camera'. For a test that requires a mount, for
example, the test should be marked as follows:
`@pytest.mark.with_mount`
And the same applies for the names of other types of hardware.
For a test that requires that there be no cameras attached, mark the test
as follows:
`@pytest.mark.without_camera`
"""
# without_hardware is a list of hardware names whose tests we don't want to run.
without_hardware = hardware.get_simulator_names(
simulator=config.getoption('--without-hardware'))
# with_hardware is a list of hardware names for which we have that hardware attached.
with_hardware = hardware.get_simulator_names(simulator=config.getoption('--with-hardware'))
for name in without_hardware:
# User does not want to run tests that interact with hardware called name,
# whether it is marked as with_name or without_name.
if name in with_hardware:
print(f'Warning: {name} in both --with-hardware and --without-hardware')
with_hardware.remove(name)
skip = pytest.mark.skip(reason=f"--without-hardware={name} specified")
with_keyword = f'with_{name}'
without_keyword = f'without_{name}'
for item in items:
if with_keyword in item.keywords or without_keyword in item.keywords:
item.add_marker(skip)
for name in hardware.get_all_names(without=with_hardware):
# We don't have hardware called name, so find all tests that need that
# hardware and mark it to be skipped.
skip = pytest.mark.skip(reason=f"Test needs --with-hardware={name} option to run")
keyword = 'with_' + name
for item in items:
if keyword in item.keywords:
item.add_marker(skip)
def pytest_runtest_logstart(nodeid, location):
"""Signal the start of running a single test item.
This hook will be called before pytest_runtest_setup(),
pytest_runtest_call() and pytest_runtest_teardown() hooks.
Args:
nodeid (str) – full id of the item
location – a triple of (filename, linenum, testname)
"""
with suppress(Exception):
logger.log('testing', '##########' * 8)
logger.log('testing', f' START TEST {nodeid}')
logger.log('testing', '')
def pytest_runtest_logfinish(nodeid, location):
"""Signal the complete finish of running a single test item.
This hook will be called after pytest_runtest_setup(),
pytest_runtest_call() and pytest_runtest_teardown() hooks.
Args:
nodeid (str) – full id of the item
location – a triple of (filename, linenum, testname)
"""
with suppress(Exception):
logger.log('testing', '')
logger.log('testing', f' END TEST {nodeid}')
logger.log('testing', '##########' * 8)
def pytest_runtest_logreport(report):
"""Adds the failure info that pytest prints to stdout into the log."""
if report.skipped or report.outcome != 'failed':
return
with suppress(Exception):
logger.log('testing', '')
logger.log('testing',
f' TEST {report.nodeid} FAILED during {report.when} {report.longreprtext} ')
if report.capstdout:
logger.log('testing',
f'============ Captured stdout during {report.when} {report.capstdout} '
f'============')
if report.capstderr:
logger.log('testing',
f'============ Captured stdout during {report.when} {report.capstderr} '
f'============')
@pytest.fixture(scope='session')
def config_host():
return os.getenv('PANOPTES_CONFIG_HOST', 'localhost')
@pytest.fixture(scope='session')
def config_port():
return os.getenv('PANOPTES_CONFIG_PORT', 6563)
@pytest.fixture(scope='session')
def config_path():
return os.getenv('PANOPTES_CONFIG_FILE', '/var/panoptes/POCS/tests/testing.yaml')
@pytest.fixture
def temp_file(tmp_path):
d = tmp_path
d.mkdir(exist_ok=True)
f = d / 'temp'
yield f
f.unlink(missing_ok=True)
@pytest.fixture(scope='session')
def db_name():
return 'panoptes_testing'
@pytest.fixture(scope='session')
def images_dir(tmpdir_factory):
directory = tmpdir_factory.mktemp('images')
set_config('directories.images', str(directory))
return str(directory)
@pytest.fixture(scope='function', params=_all_databases)
def db_type(request, db_name):
db_list = request.config.option.test_databases
if request.param not in db_list and 'all' not in db_list:
pytest.skip(f"Skipping {request.param} DB, set --test-all-databases=True")
PanDB.permanently_erase_database(request.param, db_name, really='Yes', dangerous='Totally')
return request.param
@pytest.fixture(scope='function')
def db(db_type, db_name):
return PanDB(db_type=db_type, db_name=db_name, connect=True)
@pytest.fixture(scope='function')
def memory_db(db_name):
PanDB.permanently_erase_database('memory', db_name, really='Yes', dangerous='Totally')
return PanDB(db_type='memory', db_name=db_name)
@pytest.fixture(scope='session')
def data_dir():
return os.path.expandvars('${POCS}/tests/data')
@pytest.fixture(scope='function')
def unsolved_fits_file(data_dir):
orig_file = os.path.join(data_dir, 'unsolved.fits')
with tempfile.TemporaryDirectory() as tmpdirname:
copy_file = shutil.copy2(orig_file, tmpdirname)
yield copy_file
@pytest.fixture(scope='function')
def solved_fits_file(data_dir):
orig_file = os.path.join(data_dir, 'solved.fits.fz')
with tempfile.TemporaryDirectory() as tmpdirname:
copy_file = shutil.copy2(orig_file, tmpdirname)
yield copy_file
@pytest.fixture(scope='function')
def tiny_fits_file(data_dir):
orig_file = os.path.join(data_dir, 'tiny.fits')
with tempfile.TemporaryDirectory() as tmpdirname:
copy_file = shutil.copy2(orig_file, tmpdirname)
yield copy_file
@pytest.fixture(scope='function')
def noheader_fits_file(data_dir):
orig_file = os.path.join(data_dir, 'noheader.fits')
with tempfile.TemporaryDirectory() as tmpdirname:
copy_file = shutil.copy2(orig_file, tmpdirname)
yield copy_file
@pytest.fixture(scope='function')
def cr2_file(data_dir):
cr2_path = os.path.join(data_dir, 'canon.cr2')
if not os.path.exists(cr2_path):
pytest.skip("No CR2 file found, skipping test.")
return cr2_path
@pytest.fixture()
def caplog(_caplog):
class PropagatedHandler(logging.Handler):
def emit(self, record):
logging.getLogger(record.name).handle(record)
handler_id = logger.add(PropagatedHandler(), format="{message}")
yield _caplog
with suppress(ValueError):
logger.remove(handler_id)
| 34.003521
| 100
| 0.66646
|
f4d5e2f545a684464725314e335c54d55f6a0a6e
| 2,371
|
py
|
Python
|
ops/viewport_display_from_shader.py
|
D4KU/BlenderScripts
|
d847b06ceb9c46de5472e38018b1253dd7397645
|
[
"MIT"
] | 2
|
2020-03-20T03:14:00.000Z
|
2020-03-21T19:49:31.000Z
|
ops/viewport_display_from_shader.py
|
D4KU/BlenderScripts
|
d847b06ceb9c46de5472e38018b1253dd7397645
|
[
"MIT"
] | 2
|
2021-03-02T20:11:23.000Z
|
2021-05-26T21:52:48.000Z
|
ops/viewport_display_from_shader.py
|
D4KU/BlenderScripts
|
d847b06ceb9c46de5472e38018b1253dd7397645
|
[
"MIT"
] | 1
|
2021-03-02T01:19:50.000Z
|
2021-03-02T01:19:50.000Z
|
import bpy
from smorgasbord.common.decorate import register
@register
class ViewportDisplayFromShader(bpy.types.Operator):
bl_idname = "object.viewport_display_from_shader"
bl_label = "Viewport Display from Shader"
bl_description = (
"For each active material of each selected object, search its "
"node tree for a shader node and apply its properties to the "
"material's viewport display properties")
bl_options = {'REGISTER', 'UNDO'}
menus = [bpy.types.MATERIAL_MT_context_menu]
# each supported shader node, ordered by decreasing priority.
# key: node name, value: inputs to read
shader_props = {
"Principled BSDF": ('Base Color', 'Roughness', 'Metallic'),
"Diffuse BSDF": ('Color', 'Roughness'),
"Glass BSDF": ('Color', 'Roughness'),
"Emission": ('Color'),
}
# viewport display attributes of materials
viewport_props = ('diffuse_color', 'roughness', 'metallic')
reverse: bpy.props.BoolProperty(
name="Reverse",
description="Instead, apply to shader node from viewport display",
)
@classmethod
def poll(cls, context):
return bool(context.selected_editable_objects)
def execute(self, context):
for o in context.selected_editable_objects:
mat = o.active_material
if not (mat and mat.node_tree):
continue
for node_name, input_names in self.shader_props.items():
try:
# get shader node
node = mat.node_tree.nodes[node_name]
except KeyError:
# try shader node with next-highest priority
continue
# get input references from their names
inputs = (node.inputs[x] for x in input_names)
# apply matching properties from viewport display to
# shader node, or vice versa
for input, vp_prop in zip(inputs, self.viewport_props):
if self.reverse:
input.default_value = getattr(mat, vp_prop)
else:
setattr(mat, vp_prop, input.default_value)
# we found a fitting shader node, no need to search for
# more
break
return {'FINISHED'}
| 35.38806
| 74
| 0.592999
|
f1b0c8c64b5c89cbc6649eba9483c0c9327d9420
| 7,003
|
py
|
Python
|
inpaint.py
|
HugoSenetaire/vaeac
|
451d34dd4986c52f2f37c508f03ee3db9e7408d3
|
[
"MIT"
] | null | null | null |
inpaint.py
|
HugoSenetaire/vaeac
|
451d34dd4986c52f2f37c508f03ee3db9e7408d3
|
[
"MIT"
] | null | null | null |
inpaint.py
|
HugoSenetaire/vaeac
|
451d34dd4986c52f2f37c508f03ee3db9e7408d3
|
[
"MIT"
] | null | null | null |
from argparse import ArgumentParser
from importlib import import_module
from os import makedirs
from os.path import join
from unicodedata import normalize
import torch
from torch.utils.data import DataLoader
from torchvision.transforms import ToPILImage
from tqdm import tqdm
from datasets import load_dataset, ZipDatasets
from train_utils import extend_batch_tuple
from VAEAC import VAEAC
parser = ArgumentParser(description='Inpaint images using a given model.')
parser.add_argument('--model_dir', type=str, action='store', required=True,
help='Directory with a model and its checkpoints. ' +
'It must be a directory in the root ' +
'of this repository.')
parser.add_argument('--num_samples', type=int, action='store', default=5,
help='Number of different inpaintings per image.')
parser.add_argument('--dataset', type=str, action='store', required=False, default='fashionMNIST_test',
help='The name of dataset of images to inpaint ' +
'(see load_datasets function in datasets.py)')
parser.add_argument('--masks', type=str, action='store', required=False, default= 'fashionMNIST_inpainting_masks',
help='The name of masks dataset of the same length ' +
'as the images dataset. White color (i. e. one ' +
'in each channel) means a pixel to inpaint.')
parser.add_argument('--out_dir', type=str, action='store', required=True,
help='The name of directory where to save ' +
'inpainted images.')
parser.add_argument('--max_image', type=int, action = 'store', default=10)
parser.add_argument('--dataset_root_dir', type= str, action="store", required=True)
parser.add_argument('--use_last_checkpoint', action='store_true',
default=False,
help='By default the model with the best ' +
'validation IWAE (best_checkpoint.tar) is used ' +
'to generate inpaintings. This flag indicates ' +
'that the last model (last_checkpoint.tar) ' +
'should be used instead.')
args = parser.parse_args()
# Default parameters which are not supposed to be changed from user interface
use_cuda = torch.cuda.is_available()
verbose = True
# Non-zero number of workers cause nasty warnings because of some bug in
# multiprocess library. It might be fixed now, so maybe it is time to set it
# to the number of CPU cores in the system.
num_workers = 0
# import the module with the model networks definitions
model_module = import_module(args.model_dir + '.model')
# build VAEAC on top of the imported networks
model = VAEAC(
model_module.reconstruction_log_prob,
model_module.proposal_network,
model_module.prior_network,
model_module.generative_network
)
if use_cuda:
model = model.cuda()
batch_size = model_module.batch_size
sampler = model_module.sampler
mask_generator = model_module.mask_generator
normalize = model_module.normalize
# load the required checkpoint
location = 'cuda' if use_cuda else 'cpu'
checkpoint_path = join(args.model_dir,
'last_checkpoint.tar' if args.use_last_checkpoint
else 'best_checkpoint.tar')
checkpoint = torch.load(checkpoint_path, map_location=location)
model.load_state_dict(checkpoint['model_state_dict'])
# load images and masks datasets, build a dataloader on top of them
dataset = load_dataset(args.dataset, root_dir = args.dataset_root_dir, default_mask= mask_generator, normalize = normalize)
masks = load_dataset(args.masks, root_dir = args.dataset_root_dir, default_mask= mask_generator, normalize=normalize)
dataloader = DataLoader(ZipDatasets(dataset, masks), batch_size=batch_size,
shuffle=False, drop_last=False,
num_workers=num_workers)
# print(model.state_dict()["generative_network.50.weight"])
# saves inpainting to file
def save_img(img, path):
ToPILImage()((img / 2 + 0.5).clamp(0, 1).cpu()).save(path)
# create directory for inpaintings, if not exists
makedirs(args.out_dir, exist_ok=True)
iterator = dataloader
if verbose:
iterator = iterator
image_num = 0
for batch_tuple in iterator:
if image_num > args.max_image :
break
batch, target = batch_tuple[0]
masks = batch_tuple[1]
init_shape = batch.shape[0]
new_batch_tuple = (batch, masks)
# if batch size is less than batch_size, extend it with objects
# from the beginning of the dataset
batch_tuple_extended = extend_batch_tuple(new_batch_tuple, dataloader,
batch_size)
batch_extended, masks_extended = batch_tuple_extended
if use_cuda:
batch_extended = batch_extended.cuda()
masks_extended = masks_extended.cuda()
batch = batch.cuda()
masks = masks.cuda()
# compute imputation distributions parameters
with torch.no_grad():
samples_params = model.generate_samples_params(batch_extended,
masks_extended,
args.num_samples)
samples_params = samples_params[:init_shape]
# save model input, groundtruth and inpaintings to out_dir
for groundtruth, mask, img_samples_params \
in zip(batch, masks, samples_params):
if image_num > args.max_image :
break
# save groundtruth image
save_img(groundtruth,
join(args.out_dir, '%05d_groundtruth.jpg' % image_num))
# to show mask on the model input we use gray color
model_input_visualization = torch.tensor(groundtruth)
model_input_visualization[mask.byte()] = 0.5
# save model input visualization
save_img(mask.byte(),
join(args.out_dir, '%05d_mask.jpg' % image_num))
save_img(model_input_visualization.byte(),
join(args.out_dir, '%05d_input.jpg' % image_num))
# in the model input the unobserved part is zeroed
model_input = torch.tensor(groundtruth)
model_input[mask.byte()] = 0
print("img_sample_params", img_samples_params.shape)
img_samples = sampler(img_samples_params)
print("img_sample", img_samples.shape)
for i, sample in enumerate(img_samples):
sample_filename = join(args.out_dir,
'%05d_sample_%03d.jpg' % (image_num, i))
sample_better_filename = join(args.out_dir,
'%05d_sample_better_%03d.jpg' % (image_num, i))
save_img(sample, sample_filename)
sample[1 - mask.byte()] = 0
sample += model_input
save_img(sample, sample_better_filename)
image_num += 1
| 38.478022
| 123
| 0.651578
|
a22333d22a1a440fc97ce7bf34297be659920ea9
| 148,771
|
py
|
Python
|
pyvista/plotting/plotting.py
|
LucaZampieri/pyvista
|
ae2a7a0559961839c5aa2979228fcdef1f4b188e
|
[
"MIT"
] | null | null | null |
pyvista/plotting/plotting.py
|
LucaZampieri/pyvista
|
ae2a7a0559961839c5aa2979228fcdef1f4b188e
|
[
"MIT"
] | null | null | null |
pyvista/plotting/plotting.py
|
LucaZampieri/pyvista
|
ae2a7a0559961839c5aa2979228fcdef1f4b188e
|
[
"MIT"
] | null | null | null |
"""Pyvista plotting module."""
import collections
import logging
import os
import time
import warnings
from functools import wraps
from threading import Thread
import imageio
import numpy as np
import vtk
from vtk.util import numpy_support as VN
from vtk.util.numpy_support import numpy_to_vtk, vtk_to_numpy
import pyvista
import scooby
from pyvista.utilities import (assert_empty_kwargs,
convert_array, convert_string_array, get_array,
is_pyvista_dataset, numpy_to_texture,
raise_not_matching, try_callback, wrap)
from .colors import get_cmap_safe
from .export_vtkjs import export_plotter_vtkjs
from .mapper import make_mapper
from .picking import PickingHelper
from .renderer import Renderer
from .background_renderer import BackgroundRenderer
from .theme import (FONT_KEYS, MAX_N_COLOR_BARS, parse_color,
parse_font_family, rcParams)
from .tools import normalize, opacity_transfer_function
from .widgets import WidgetHelper
try:
import matplotlib
has_matplotlib = True
except ImportError:
has_matplotlib = False
_ALL_PLOTTERS = {}
def close_all():
"""Close all open/active plotters and clean up memory."""
for key, p in _ALL_PLOTTERS.items():
if not p._closed:
p.close()
p.deep_clean()
_ALL_PLOTTERS.clear()
return True
log = logging.getLogger(__name__)
log.setLevel('CRITICAL')
class BasePlotter(PickingHelper, WidgetHelper):
"""To be used by the Plotter and QtInteractor classes.
Parameters
----------
shape : list or tuple, optional
Number of sub-render windows inside of the main window.
Specify two across with ``shape=(2, 1)`` and a two by two grid
with ``shape=(2, 2)``. By default there is only one renderer.
Can also accept a shape as string descriptor. E.g.:
shape="3|1" means 3 plots on the left and 1 on the right,
shape="4/2" means 4 plots on top of 2 at bottom.
border : bool, optional
Draw a border around each render window. Default False.
border_color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
border_width : float, optional
Width of the border in pixels when enabled.
title : str, optional
Window title of the scalar bar
"""
mouse_position = None
click_position = None
def __new__(cls, *args, **kwargs):
"""Create an instance of base plotter."""
if cls is BasePlotter:
raise TypeError("pyvista.BasePlotter is an abstract class and may not be instantiated.")
return object.__new__(cls)
def __init__(self, shape=(1, 1), border=None, border_color='k',
border_width=2.0, title=None, splitting_position=None):
"""Initialize base plotter."""
self.image_transparent_background = rcParams['transparent_background']
self.mesh = None
if title is None:
title = rcParams['title']
self.title = str(title)
# by default add border for multiple plots
if border is None:
if shape != (1, 1):
border = True
else:
border = False
# add render windows
self._active_renderer_index = 0
self.renderers = []
if isinstance(shape, str):
if '|' in shape:
n = int(shape.split('|')[0])
m = int(shape.split('|')[1])
rangen = reversed(range(n))
rangem = reversed(range(m))
else:
m = int(shape.split('/')[0])
n = int(shape.split('/')[1])
rangen = range(n)
rangem = range(m)
if splitting_position is None:
splitting_position = rcParams['multi_rendering_splitting_position']
if splitting_position is None:
if n >= m:
xsplit = m/(n+m)
else:
xsplit = 1-n/(n+m)
else:
xsplit = splitting_position
for i in rangen:
arenderer = Renderer(self, border, border_color, border_width)
if '|' in shape:
arenderer.SetViewport(0, i/n, xsplit, (i+1)/n)
else:
arenderer.SetViewport(i/n, 0, (i+1)/n, xsplit)
self.renderers.append(arenderer)
for i in rangem:
arenderer = Renderer(self, border, border_color, border_width)
if '|' in shape:
arenderer.SetViewport(xsplit, i/m, 1, (i+1)/m)
else:
arenderer.SetViewport(i/m, xsplit, (i+1)/m, 1)
self.renderers.append(arenderer)
self.shape = (n+m,)
else:
assert_str = '"shape" should be a list, tuple or string descriptor'
assert isinstance(shape, collections.Iterable), assert_str
assert shape[0] > 0, '"shape" must be positive'
assert shape[1] > 0, '"shape" must be positive'
self.shape = shape
for i in reversed(range(shape[0])):
for j in range(shape[1]):
renderer = Renderer(self, border, border_color, border_width)
x0 = i/shape[0]
y0 = j/shape[1]
x1 = (i+1)/shape[0]
y1 = (j+1)/shape[1]
renderer.SetViewport(y0, x0, y1, x1)
self.renderers.append(renderer)
# each render will also have an associated background renderer
self._background_renderers = [None for _ in range(len(self.renderers))]
# This keeps track of scalars names already plotted and their ranges
self._scalar_bar_ranges = {}
self._scalar_bar_mappers = {}
self._scalar_bar_actors = {}
self._scalar_bar_widgets = {}
# track if the camera has been setup
# self.camera_set = False
self._first_time = True
# Keep track of the scale
self._labels = []
# Set default style
self._style = vtk.vtkInteractorStyleRubberBandPick()
# this helps managing closed plotters
self._closed = False
# Add self to open plotters
self._id_name = "{}-{}".format(str(hex(id(self))), len(_ALL_PLOTTERS))
_ALL_PLOTTERS[self._id_name] = self
# lighting style
self.lighting = vtk.vtkLightKit()
# self.lighting.SetHeadLightWarmth(1.0)
# self.lighting.SetHeadLightWarmth(1.0)
for renderer in self.renderers:
self.lighting.AddLightsToRenderer(renderer)
renderer.LightFollowCameraOn()
# Key bindings
self.reset_key_events()
#### Manage the active Renderer ####
def loc_to_index(self, loc):
"""Return index of the render window given a location index.
Parameters
----------
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``.
Return
------
idx : int
Index of the render window.
"""
if loc is None:
return self._active_renderer_index
elif isinstance(loc, int):
return loc
elif isinstance(loc, collections.Iterable):
if not len(loc) == 2:
raise AssertionError('"loc" must contain two items')
index_row = loc[0]
index_column = loc[1]
if index_row < 0 or index_row >= self.shape[0]:
raise IndexError('Row index is out of range ({})'.format(self.shape[0]))
if index_column < 0 or index_column >= self.shape[1]:
raise IndexError('Column index is out of range ({})'.format(self.shape[1]))
sz = int(self.shape[0] * self.shape[1])
idxs = np.array([i for i in range(sz)], dtype=int).reshape(self.shape)
return idxs[index_row, index_column]
def index_to_loc(self, index):
"""Convert a 1D index location to the 2D location on the plotting grid."""
if len(self.shape) == 1:
return index
sz = int(self.shape[0] * self.shape[1])
idxs = np.array([i for i in range(sz)], dtype=int).reshape(self.shape)
args = np.argwhere(idxs == index)
if len(args) < 1:
raise RuntimeError('Index ({}) is out of range.')
return args[0]
@property
def renderer(self):
"""Return the active renderer."""
return self.renderers[self._active_renderer_index]
def subplot(self, index_row, index_column=None):
"""Set the active subplot.
Parameters
----------
index_row : int
Index of the subplot to activate along the rows.
index_column : int
Index of the subplot to activate along the columns.
"""
if len(self.shape) == 1:
self._active_renderer_index = index_row
return
if index_row < 0 or index_row >= self.shape[0]:
raise IndexError('Row index is out of range ({})'.format(self.shape[0]))
if index_column < 0 or index_column >= self.shape[1]:
raise IndexError('Column index is out of range ({})'.format(self.shape[1]))
self._active_renderer_index = self.loc_to_index((index_row, index_column))
#### Wrap Renderer methods ####
@wraps(Renderer.add_floor)
def add_floor(self, *args, **kwargs):
"""Wrap ``Renderer.add_floor``."""
return self.renderer.add_floor(*args, **kwargs)
@wraps(Renderer.remove_floors)
def remove_floors(self, *args, **kwargs):
"""Wrap ``Renderer.remove_floors``."""
return self.renderer.remove_floors(*args, **kwargs)
@wraps(Renderer.enable_anti_aliasing)
def enable_anti_aliasing(self, *args, **kwargs):
"""Wrap ``Renderer.enable_anti_aliasing``."""
self.renderer.enable_anti_aliasing(*args, **kwargs)
@wraps(Renderer.disable_anti_aliasing)
def disable_anti_aliasing(self, *args, **kwargs):
"""Wrap ``Renderer.disable_anti_aliasing``."""
self.renderer.disable_anti_aliasing(*args, **kwargs)
@wraps(Renderer.set_focus)
def set_focus(self, *args, **kwargs):
"""Wrap ``Renderer.set_focus``."""
self.renderer.set_focus(*args, **kwargs)
self.render()
@wraps(Renderer.set_position)
def set_position(self, *args, **kwargs):
"""Wrap ``Renderer.set_position``."""
self.renderer.set_position(*args, **kwargs)
self.render()
@wraps(Renderer.set_viewup)
def set_viewup(self, *args, **kwargs):
"""Wrap ``Renderer.set_viewup``."""
self.renderer.set_viewup(*args, **kwargs)
self.render()
@wraps(Renderer.add_orientation_widget)
def add_orientation_widget(self, *args, **kwargs):
"""Wrap ``Renderer.add_orientation_widget``."""
return self.renderer.add_orientation_widget(*args, **kwargs)
@wraps(Renderer.add_axes)
def add_axes(self, *args, **kwargs):
"""Wrap ``Renderer.add_axes``."""
return self.renderer.add_axes(*args, **kwargs)
@wraps(Renderer.hide_axes)
def hide_axes(self, *args, **kwargs):
"""Wrap ``Renderer.hide_axes``."""
return self.renderer.hide_axes(*args, **kwargs)
@wraps(Renderer.show_axes)
def show_axes(self, *args, **kwargs):
"""Wrap ``Renderer.show_axes``."""
return self.renderer.show_axes(*args, **kwargs)
@wraps(Renderer.update_bounds_axes)
def update_bounds_axes(self, *args, **kwargs):
"""Wrap ``Renderer.update_bounds_axes``."""
return self.renderer.update_bounds_axes(*args, **kwargs)
@wraps(Renderer.add_actor)
def add_actor(self, *args, **kwargs):
"""Wrap ``Renderer.add_actor``."""
return self.renderer.add_actor(*args, **kwargs)
@wraps(Renderer.enable_parallel_projection)
def enable_parallel_projection(self, *args, **kwargs):
"""Wrap ``Renderer.enable_parallel_projection``."""
return self.renderer.enable_parallel_projection(*args, **kwargs)
@wraps(Renderer.disable_parallel_projection)
def disable_parallel_projection(self, *args, **kwargs):
"""Wrap ``Renderer.disable_parallel_projection``."""
return self.renderer.disable_parallel_projection(*args, **kwargs)
@wraps(Renderer.add_axes_at_origin)
def add_axes_at_origin(self, *args, **kwargs):
"""Wrap ``Renderer.add_axes_at_origin``."""
return self.renderer.add_axes_at_origin(*args, **kwargs)
@wraps(Renderer.show_bounds)
def show_bounds(self, *args, **kwargs):
"""Wrap ``Renderer.show_bounds``."""
return self.renderer.show_bounds(*args, **kwargs)
@wraps(Renderer.add_bounds_axes)
def add_bounds_axes(self, *args, **kwargs):
"""Wrap ``add_bounds_axes``."""
return self.renderer.add_bounds_axes(*args, **kwargs)
@wraps(Renderer.add_bounding_box)
def add_bounding_box(self, *args, **kwargs):
"""Wrap ``Renderer.add_bounding_box``."""
return self.renderer.add_bounding_box(*args, **kwargs)
@wraps(Renderer.remove_bounding_box)
def remove_bounding_box(self, *args, **kwargs):
"""Wrap ``Renderer.remove_bounding_box``."""
return self.renderer.remove_bounding_box(*args, **kwargs)
@wraps(Renderer.remove_bounds_axes)
def remove_bounds_axes(self, *args, **kwargs):
"""Wrap ``Renderer.remove_bounds_axes``."""
return self.renderer.remove_bounds_axes(*args, **kwargs)
@wraps(Renderer.show_grid)
def show_grid(self, *args, **kwargs):
"""Wrap ``Renderer.show_grid``."""
return self.renderer.show_grid(*args, **kwargs)
@wraps(Renderer.set_scale)
def set_scale(self, *args, **kwargs):
"""Wrap ``Renderer.set_scale``."""
return self.renderer.set_scale(*args, **kwargs)
@wraps(Renderer.enable_eye_dome_lighting)
def enable_eye_dome_lighting(self, *args, **kwargs):
"""Wrap ``Renderer.enable_eye_dome_lighting``."""
return self.renderer.enable_eye_dome_lighting(*args, **kwargs)
@wraps(Renderer.disable_eye_dome_lighting)
def disable_eye_dome_lighting(self, *args, **kwargs):
"""Wrap ``Renderer.disable_eye_dome_lighting``."""
return self.renderer.disable_eye_dome_lighting(*args, **kwargs)
@wraps(Renderer.reset_camera)
def reset_camera(self, *args, **kwargs):
"""Wrap ``Renderer.reset_camera``."""
self.renderer.reset_camera(*args, **kwargs)
self.render()
@wraps(Renderer.isometric_view)
def isometric_view(self, *args, **kwargs):
"""Wrap ``Renderer.isometric_view``."""
return self.renderer.isometric_view(*args, **kwargs)
@wraps(Renderer.view_isometric)
def view_isometric(self, *args, **kwarg):
"""Wrap ``Renderer.view_isometric``."""
return self.renderer.view_isometric(*args, **kwarg)
@wraps(Renderer.view_vector)
def view_vector(self, *args, **kwarg):
"""Wrap ``Renderer.view_vector``."""
return self.renderer.view_vector(*args, **kwarg)
@wraps(Renderer.view_xy)
def view_xy(self, *args, **kwarg):
"""Wrap ``Renderer.view_xy``."""
return self.renderer.view_xy(*args, **kwarg)
@wraps(Renderer.view_yx)
def view_yx(self, *args, **kwarg):
"""Wrap ``Renderer.view_yx``."""
return self.renderer.view_yx(*args, **kwarg)
@wraps(Renderer.view_xz)
def view_xz(self, *args, **kwarg):
"""Wrap ``Renderer.view_xz``."""
return self.renderer.view_xz(*args, **kwarg)
@wraps(Renderer.view_zx)
def view_zx(self, *args, **kwarg):
"""Wrap ``Renderer.view_zx``."""
return self.renderer.view_zx(*args, **kwarg)
@wraps(Renderer.view_yz)
def view_yz(self, *args, **kwarg):
"""Wrap ``Renderer.view_yz``."""
return self.renderer.view_yz(*args, **kwarg)
@wraps(Renderer.view_zy)
def view_zy(self, *args, **kwarg):
"""Wrap ``Renderer.view_zy``."""
return self.renderer.view_zy(*args, **kwarg)
@wraps(Renderer.disable)
def disable(self, *args, **kwarg):
"""Wrap ``Renderer.disable``."""
return self.renderer.disable(*args, **kwarg)
@wraps(Renderer.enable)
def enable(self, *args, **kwarg):
"""Wrap ``Renderer.enable``."""
return self.renderer.enable(*args, **kwarg)
@wraps(Renderer.enable_depth_peeling)
def enable_depth_peeling(self, *args, **kwargs):
"""Wrap ``Renderer.enable_depth_peeling``."""
if hasattr(self, 'ren_win'):
result = self.renderer.enable_depth_peeling(*args, **kwargs)
if result:
self.ren_win.AlphaBitPlanesOn()
return result
@wraps(Renderer.disable_depth_peeling)
def disable_depth_peeling(self):
"""Wrap ``Renderer.disable_depth_peeling``."""
if hasattr(self, 'ren_win'):
self.ren_win.AlphaBitPlanesOff()
return self.renderer.disable_depth_peeling()
@wraps(Renderer.get_default_cam_pos)
def get_default_cam_pos(self, *args, **kwargs):
"""Wrap ``Renderer.get_default_cam_pos``."""
return self.renderer.get_default_cam_pos(*args, **kwargs)
@wraps(Renderer.remove_actor)
def remove_actor(self, actor, reset_camera=False):
"""Wrap ``Renderer.remove_actor``."""
for renderer in self.renderers:
renderer.remove_actor(actor, reset_camera)
return True
#### Properties from Renderer ####
@property
def camera(self):
"""Return the active camera of the active renderer."""
return self.renderer.camera
@camera.setter
def camera(self, camera):
"""Set the active camera for the rendering scene."""
self.renderer.camera = camera
@property
def camera_set(self):
"""Return if the camera of the active renderer has been set."""
return self.renderer.camera_set
@camera_set.setter
def camera_set(self, is_set):
"""Set if the camera has been set on the active renderer."""
self.renderer.camera_set = is_set
@property
def bounds(self):
"""Return the bounds of the active renderer."""
return self.renderer.bounds
@property
def length(self):
"""Return the length of the diagonal of the bounding box of the scene."""
return self.renderer.length
@property
def center(self):
"""Return the center of the active renderer."""
return self.renderer.center
@property
def _scalar_bar_slots(self):
"""Return the scalar bar slots of the active renderer."""
return self.renderer._scalar_bar_slots
@property
def _scalar_bar_slot_lookup(self):
"""Return the scalar bar slot lookup of the active renderer."""
return self.renderer._scalar_bar_slot_lookup
@_scalar_bar_slots.setter
def _scalar_bar_slots(self, value):
"""Set the scalar bar slots of the active renderer."""
self.renderer._scalar_bar_slots = value
@_scalar_bar_slot_lookup.setter
def _scalar_bar_slot_lookup(self, value):
"""Set the scalar bar slot lookup of the active renderer."""
self.renderer._scalar_bar_slot_lookup = value
@property
def scale(self):
"""Return the scaling of the active renderer."""
return self.renderer.scale
@scale.setter
def scale(self, scale):
"""Set the scaling of the active renderer."""
return self.renderer.set_scale(*scale)
@property
def camera_position(self):
"""Return camera position of the active render window."""
return self.renderer.camera_position
@camera_position.setter
def camera_position(self, camera_location):
"""Set camera position of the active render window."""
self.renderer.camera_position = camera_location
@property
def background_color(self):
"""Return the background color of the first render window."""
return self.renderers[0].GetBackground()
@background_color.setter
def background_color(self, color):
"""Set the background color of all the render windows."""
self.set_background(color)
#### Properties of the BasePlotter ####
@property
def window_size(self):
"""Return the render window size."""
return list(self.ren_win.GetSize())
@window_size.setter
def window_size(self, window_size):
"""Set the render window size."""
self.ren_win.SetSize(window_size[0], window_size[1])
@property
def image_depth(self):
"""Return a depth image representing current render window.
Helper attribute for ``get_image_depth``.
"""
return self.get_image_depth()
@property
def image(self):
"""Return an image array of current render window."""
if not hasattr(self, 'ren_win') and hasattr(self, 'last_image'):
return self.last_image
ifilter = vtk.vtkWindowToImageFilter()
ifilter.SetInput(self.ren_win)
ifilter.ReadFrontBufferOff()
if self.image_transparent_background:
ifilter.SetInputBufferTypeToRGBA()
else:
ifilter.SetInputBufferTypeToRGB()
return self._run_image_filter(ifilter)
#### Everything else ####
def render(self):
"""Render the main window.
If this is called before ``show()``, nothing will happen.
"""
if hasattr(self, 'ren_win') and not self._first_time:
self.ren_win.Render()
# Not sure if this is ever needed but here as a reminder
# if hasattr(self, 'iren') and not self._first_time:
# self.iren.Render()
return
def add_key_event(self, key, callback):
"""Add a function to callback when the given key is pressed.
These are non-unique - thus a key could map to many callback
functions. The callback function must not have any arguments.
Parameters
----------
key : str
The key to trigger the event
callback : callable
A callable that takes no arguments
"""
if not hasattr(callback, '__call__'):
raise TypeError('callback must be callable.')
self._key_press_event_callbacks[key].append(callback)
def _add_observer(self, event, call):
if hasattr(self, 'iren'):
self._observers[event] = self.iren.AddObserver(event, call)
def _remove_observer(self, event):
if hasattr(self, 'iren') and event in self._observers:
self.iren.RemoveObserver(event)
del self._observers[event]
def clear_events_for_key(self, key):
"""Remove the callbacks associated to the key."""
self._key_press_event_callbacks.pop(key)
def store_mouse_position(self, *args):
"""Store mouse position."""
if not hasattr(self, "iren"):
raise AttributeError("This plotting window is not interactive.")
self.mouse_position = self.iren.GetEventPosition()
def store_click_position(self, *args):
"""Store click position in viewport coordinates."""
if not hasattr(self, "iren"):
raise AttributeError("This plotting window is not interactive.")
self.click_position = self.iren.GetEventPosition()
self.mouse_position = self.click_position
def track_mouse_position(self):
"""Keep track of the mouse position.
This will potentially slow down the interactor. No callbacks supported
here - use :func:`pyvista.BasePlotter.track_click_position` instead.
"""
if hasattr(self, "iren"):
self._add_observer(vtk.vtkCommand.MouseMoveEvent,
self.store_mouse_position)
def untrack_mouse_position(self):
"""Stop tracking the mouse position."""
self._remove_observer(vtk.vtkCommand.MouseMoveEvent)
def track_click_position(self, callback=None, side="right",
viewport=False):
"""Keep track of the click position.
By default, it only tracks right clicks.
Parameters
----------
callback : callable
A callable method that will use the click position. Passes the
click position as a length two tuple.
side : str
The side of the mouse for the button to track (left or right).
Default is left. Also accepts ``'r'`` or ``'l'``.
viewport: bool
If ``True``, uses the normalized viewport coordinate system
(values between 0.0 and 1.0 and support for HiDPI) when passing the
click position to the callback
"""
if not hasattr(self, "iren"):
return
side = str(side).lower()
if side in ["right", "r"]:
event = vtk.vtkCommand.RightButtonPressEvent
elif side in ["left", "l"]:
event = vtk.vtkCommand.LeftButtonPressEvent
else:
raise TypeError("Side ({}) not supported. Try `left` or `right`".format(side))
def _click_callback(obj, event):
self.store_click_position()
if hasattr(callback, '__call__'):
if viewport:
try_callback(callback, self.click_position)
else:
try_callback(callback, self.pick_click_position())
self._add_observer(event, _click_callback)
def untrack_click_position(self):
"""Stop tracking the click position."""
if hasattr(self, "_click_observer"):
self.iren.RemoveObserver(self._click_observer)
del self._click_observer
def _prep_for_close(self):
"""Make sure a screenshot is acquired before closing.
This doesn't actually close anything! It just preps the plotter for
closing.
"""
# Grab screenshot right before renderer closes
self.last_image = self.screenshot(True, return_img=True)
self.last_image_depth = self.get_image_depth()
def increment_point_size_and_line_width(self, increment):
"""Increment point size and line width of all actors.
For every actor in the scene, increment both its point size and
line width by the given value.
"""
for renderer in self.renderers:
for actor in renderer._actors.values():
if hasattr(actor, "GetProperty"):
prop = actor.GetProperty()
if hasattr(prop, "SetPointSize"):
prop.SetPointSize(prop.GetPointSize() + increment)
if hasattr(prop, "SetLineWidth"):
prop.SetLineWidth(prop.GetLineWidth() + increment)
self.render()
return
def reset_key_events(self):
"""Reset all of the key press events to their defaults."""
self._key_press_event_callbacks = collections.defaultdict(list)
self.add_key_event('q', self._prep_for_close) # Add no matter what
b_left_down_callback = lambda: self._add_observer('LeftButtonPressEvent', self.left_button_down)
self.add_key_event('b', b_left_down_callback)
self.add_key_event('v', lambda: self.isometric_view_interactive())
self.add_key_event('f', self.fly_to_mouse_position)
self.add_key_event('C', lambda: self.enable_cell_picking())
self.add_key_event('Up', lambda: self.camera.Zoom(1.05))
self.add_key_event('Down', lambda: self.camera.Zoom(0.95))
self.add_key_event('plus', lambda: self.increment_point_size_and_line_width(1))
self.add_key_event('minus', lambda: self.increment_point_size_and_line_width(-1))
def key_press_event(self, obj, event):
"""Listen for key press event."""
try:
key = self.iren.GetKeySym()
log.debug('Key %s pressed' % key)
self._last_key = key
if key in self._key_press_event_callbacks.keys():
# Note that defaultdict's will never throw a key error
callbacks = self._key_press_event_callbacks[key]
for func in callbacks:
func()
except Exception as e:
log.error('Exception encountered for keypress "%s": %s' % (key, e))
def left_button_down(self, obj, event_type):
"""Register the event for a left button down click."""
# Get 2D click location on window
click_pos = self.iren.GetEventPosition()
# Get corresponding click location in the 3D plot
picker = vtk.vtkWorldPointPicker()
picker.Pick(click_pos[0], click_pos[1], 0, self.renderer)
self.pickpoint = np.asarray(picker.GetPickPosition()).reshape((-1, 3))
if np.any(np.isnan(self.pickpoint)):
self.pickpoint[:] = 0
def update_style(self):
"""Update the camera interactor style."""
if not hasattr(self, '_style'):
self._style = vtk.vtkInteractorStyleTrackballCamera()
if hasattr(self, 'iren'):
return self.iren.SetInteractorStyle(self._style)
def enable_trackball_style(self):
"""Set the interactive style to trackball camera.
The trackball camera is the default interactor style.
"""
self._style = vtk.vtkInteractorStyleTrackballCamera()
return self.update_style()
def enable_trackball_actor_style(self):
"""Set the interactive style to trackball actor.
This allows to rotate actors around the scene.
"""
self._style = vtk.vtkInteractorStyleTrackballActor()
return self.update_style()
def enable_image_style(self):
"""Set the interactive style to image.
Controls:
- Left Mouse button triggers window level events
- CTRL Left Mouse spins the camera around its view plane normal
- SHIFT Left Mouse pans the camera
- CTRL SHIFT Left Mouse dollys (a positional zoom) the camera
- Middle mouse button pans the camera
- Right mouse button dollys the camera.
- SHIFT Right Mouse triggers pick events
"""
self._style = vtk.vtkInteractorStyleImage()
return self.update_style()
def enable_joystick_style(self):
"""Set the interactive style to joystick.
It allows the user to move (rotate, pan, etc.) the camera, the point of
view for the scene. The position of the mouse relative to the center of
the scene determines the speed at which the camera moves, and the speed
of the mouse movement determines the acceleration of the camera, so the
camera continues to move even if the mouse if not moving.
For a 3-button mouse, the left button is for rotation, the right button
for zooming, the middle button for panning, and ctrl + left button for
spinning. (With fewer mouse buttons, ctrl + shift + left button is
for zooming, and shift + left button is for panning.)
"""
self._style = vtk.vtkInteractorStyleJoystickCamera()
return self.update_style()
def enable_zoom_style(self):
"""Set the interactive style to rubber band zoom.
This interactor style allows the user to draw a rectangle in the render
window using the left mouse button. When the mouse button is released,
the current camera zooms by an amount determined from the shorter side
of the drawn rectangle.
"""
self._style = vtk.vtkInteractorStyleRubberBandZoom()
return self.update_style()
def enable_terrain_style(self):
"""Set the interactive style to terrain.
Used to manipulate a camera which is viewing a scene with a natural
view up, e.g., terrain. The camera in such a scene is manipulated by
specifying azimuth (angle around the view up vector) and elevation
(the angle from the horizon).
"""
self._style = vtk.vtkInteractorStyleTerrain()
return self.update_style()
def enable_rubber_band_style(self):
"""Set the interactive style to rubber band picking.
This interactor style allows the user to draw a rectangle in the render
window by hitting 'r' and then using the left mouse button.
When the mouse button is released, the attached picker operates on the
pixel in the center of the selection rectangle. If the picker happens to
be a vtkAreaPicker it will operate on the entire selection rectangle.
When the 'p' key is hit the above pick operation occurs on a 1x1
rectangle. In other respects it behaves the same as its parent class.
"""
self._style = vtk.vtkInteractorStyleRubberBandPick()
return self.update_style()
def hide_axes_all(self):
"""Hide the axes orientation widget in all renderers."""
for renderer in self.renderers:
renderer.hide_axes()
return
def show_axes_all(self):
"""Show the axes orientation widget in all renderers."""
for renderer in self.renderers:
renderer.show_axes()
return
def isometric_view_interactive(self):
"""Set the current interactive render window to isometric view."""
interactor = self.iren.GetInteractorStyle()
renderer = interactor.GetCurrentRenderer()
if renderer is None:
renderer = self.renderer
renderer.view_isometric()
def update(self, stime=1, force_redraw=True):
"""Update window, redraw, process messages query.
Parameters
----------
stime : int, optional
Duration of timer that interrupt vtkRenderWindowInteractor in
milliseconds.
force_redraw : bool, optional
Call ``render`` immediately.
"""
if stime <= 0:
stime = 1
curr_time = time.time()
if Plotter.last_update_time > curr_time:
Plotter.last_update_time = curr_time
if not hasattr(self, 'iren'):
return
update_rate = self.iren.GetDesiredUpdateRate()
if (curr_time - Plotter.last_update_time) > (1.0/update_rate):
self.right_timer_id = self.iren.CreateRepeatingTimer(stime)
self.iren.Start()
self.iren.DestroyTimer(self.right_timer_id)
self.render()
Plotter.last_update_time = curr_time
elif force_redraw:
self.render()
def add_mesh(self, mesh, color=None, style=None, scalars=None,
clim=None, show_edges=None, edge_color=None,
point_size=5.0, line_width=None, opacity=1.0,
flip_scalars=False, lighting=None, n_colors=256,
interpolate_before_map=True, cmap=None, label=None,
reset_camera=None, scalar_bar_args=None, show_scalar_bar=None,
stitle=None, multi_colors=False, name=None, texture=None,
render_points_as_spheres=None, render_lines_as_tubes=False,
smooth_shading=False, ambient=0.0, diffuse=1.0, specular=0.0,
specular_power=100.0, nan_color=None, nan_opacity=1.0,
culling=None, rgb=False, categories=False,
use_transparency=False, below_color=None, above_color=None,
annotations=None, pickable=True, preference="point",
log_scale=False, **kwargs):
"""Add any PyVista/VTK mesh or dataset that PyVista can wrap to the scene.
This method is using a mesh representation to view the surfaces
and/or geometry of datasets. For volume rendering, see
:func:`pyvista.BasePlotter.add_volume`.
Parameters
----------
mesh : pyvista.Common or pyvista.MultiBlock
Any PyVista or VTK mesh is supported. Also, any dataset
that :func:`pyvista.wrap` can handle including NumPy arrays of XYZ
points.
color : string or 3 item list, optional, defaults to white
Use to make the entire mesh have a single solid color.
Either a string, RGB list, or hex color string. For example:
``color='white'``, ``color='w'``, ``color=[1, 1, 1]``, or
``color='#FFFFFF'``. Color will be overridden if scalars are
specified.
style : string, optional
Visualization style of the mesh. One of the following:
``style='surface'``, ``style='wireframe'``, ``style='points'``.
Defaults to ``'surface'``. Note that ``'wireframe'`` only shows a
wireframe of the outer geometry.
scalars : str or numpy.ndarray, optional
Scalars used to "color" the mesh. Accepts a string name of an
array that is present on the mesh or an array equal
to the number of cells or the number of points in the
mesh. Array should be sized as a single vector. If both
``color`` and ``scalars`` are ``None``, then the active scalars are
used.
clim : 2 item list, optional
Color bar range for scalars. Defaults to minimum and
maximum of scalars array. Example: ``[-1, 2]``. ``rng``
is also an accepted alias for this.
show_edges : bool, optional
Shows the edges of a mesh. Does not apply to a wireframe
representation.
edge_color : string or 3 item list, optional, defaults to black
The solid color to give the edges when ``show_edges=True``.
Either a string, RGB list, or hex color string.
point_size : float, optional
Point size of any nodes in the dataset plotted. Also applicable
when style='points'. Default ``5.0``
line_width : float, optional
Thickness of lines. Only valid for wireframe and surface
representations. Default None.
opacity : float, str, array-like
Opacity of the mesh. If a siblge float value is given, it will be
the global opacity of the mesh and uniformly applied everywhere -
should be between 0 and 1. A string can also be specified to map
the scalars range to a predefined opacity transfer function
(options include: 'linear', 'linear_r', 'geom', 'geom_r').
A string could also be used to map a scalars array from the mesh to
the opacity (must have same number of elements as the
``scalars`` argument). Or you can pass a custum made transfer
function that is an array either ``n_colors`` in length or shorter.
flip_scalars : bool, optional
Flip direction of cmap. Most colormaps allow ``*_r`` suffix to do
this as well.
lighting : bool, optional
Enable or disable view direction lighting. Default False.
n_colors : int, optional
Number of colors to use when displaying scalars. Defaults to 256.
The scalar bar will also have this many colors.
interpolate_before_map : bool, optional
Enabling makes for a smoother scalars display. Default is True.
When False, OpenGL will interpolate the mapped colors which can
result is showing colors that are not present in the color map.
cmap : str, list, optional
Name of the Matplotlib colormap to us when mapping the ``scalars``.
See available Matplotlib colormaps. Only applicable for when
displaying ``scalars``. Requires Matplotlib to be installed.
``colormap`` is also an accepted alias for this. If ``colorcet`` or
``cmocean`` are installed, their colormaps can be specified by name.
You can also specify a list of colors to override an
existing colormap with a custom one. For example, to
create a three color colormap you might specify
``['green', 'red', 'blue']``
label : str, optional
String label to use when adding a legend to the scene with
:func:`pyvista.BasePlotter.add_legend`
reset_camera : bool, optional
Reset the camera after adding this mesh to the scene
scalar_bar_args : dict, optional
Dictionary of keyword arguments to pass when adding the scalar bar
to the scene. For options, see
:func:`pyvista.BasePlotter.add_scalar_bar`.
show_scalar_bar : bool
If False, a scalar bar will not be added to the scene. Defaults
to ``True``.
stitle : string, optional
Scalar bar title. By default the scalar bar is given a title of the
the scalars array used to color the mesh.
To create a bar with no title, use an empty string (i.e. '').
multi_colors : bool, optional
If a ``MultiBlock`` dataset is given this will color each
block by a solid color using matplotlib's color cycler.
name : str, optional
The name for the added mesh/actor so that it can be easily
updated. If an actor of this name already exists in the
rendering window, it will be replaced by the new actor.
texture : vtk.vtkTexture or np.ndarray or boolean, optional
A texture to apply if the input mesh has texture
coordinates. This will not work with MultiBlock
datasets. If set to ``True``, the first available texture
on the object will be used. If a string name is given, it
will pull a texture with that name associated to the input
mesh.
render_points_as_spheres : bool, optional
render_lines_as_tubes : bool, optional
smooth_shading : bool, optional
ambient : float, optional
When lighting is enabled, this is the amount of light from
0 to 1 that reaches the actor when not directed at the
light source emitted from the viewer. Default 0.0
diffuse : float, optional
The diffuse lighting coefficient. Default 1.0
specular : float, optional
The specular lighting coefficient. Default 0.0
specular_power : float, optional
The specular power. Between 0.0 and 128.0
nan_color : string or 3 item list, optional, defaults to gray
The color to use for all ``NaN`` values in the plotted scalar
array.
nan_opacity : float, optional
Opacity of ``NaN`` values. Should be between 0 and 1.
Default 1.0
culling : str, optional
Does not render faces that are culled. Options are ``'front'`` or
``'back'``. This can be helpful for dense surface meshes,
especially when edges are visible, but can cause flat
meshes to be partially displayed. Defaults ``False``.
rgb : bool, optional
If an 2 dimensional array is passed as the scalars, plot those
values as RGB(A) colors! ``rgba`` is also accepted alias for this.
Opacity (the A) is optional.
categories : bool, optional
If set to ``True``, then the number of unique values in the scalar
array will be used as the ``n_colors`` argument.
use_transparency : bool, optional
Invert the opacity mappings and make the values correspond to
transparency.
below_color : string or 3 item list, optional
Solid color for values below the scalars range (``clim``). This
will automatically set the scalar bar ``below_label`` to
``'Below'``
above_color : string or 3 item list, optional
Solid color for values below the scalars range (``clim``). This
will automatically set the scalar bar ``above_label`` to
``'Above'``
annotations : dict, optional
Pass a dictionary of annotations. Keys are the float values in the
scalars range to annotate on the scalar bar and the values are the
the string annotations.
pickable : bool
Set whether this mesh is pickable
Return
------
actor: vtk.vtkActor
VTK actor of the mesh.
"""
# Convert the VTK data object to a pyvista wrapped object if necessary
if not is_pyvista_dataset(mesh):
mesh = wrap(mesh)
if not is_pyvista_dataset(mesh):
raise TypeError('Object type ({}) not supported for plotting in PyVista.'.format(type(mesh)))
##### Parse arguments to be used for all meshes #####
if scalar_bar_args is None:
scalar_bar_args = {}
if show_edges is None:
show_edges = rcParams['show_edges']
if edge_color is None:
edge_color = rcParams['edge_color']
if show_scalar_bar is None:
show_scalar_bar = rcParams['show_scalar_bar']
if lighting is None:
lighting = rcParams['lighting']
# supported aliases
clim = kwargs.pop('rng', clim)
cmap = kwargs.pop('colormap', cmap)
culling = kwargs.pop("backface_culling", culling)
if render_points_as_spheres is None:
render_points_as_spheres = rcParams['render_points_as_spheres']
if name is None:
name = '{}({})'.format(type(mesh).__name__, mesh.memory_address)
if nan_color is None:
nan_color = rcParams['nan_color']
nan_color = list(parse_color(nan_color))
nan_color.append(nan_opacity)
if color is True:
color = rcParams['color']
if texture is False:
texture = None
if culling is True:
culling = 'backface'
rgb = kwargs.pop('rgba', rgb)
if "scalar" in kwargs:
raise TypeError("`scalar` is an invalid keyword argument for `add_mesh`. Perhaps you mean `scalars` with an s?")
assert_empty_kwargs(**kwargs)
##### Handle composite datasets #####
if isinstance(mesh, pyvista.MultiBlock):
# first check the scalars
if clim is None and scalars is not None:
# Get the data range across the array for all blocks
# if scalars specified
if isinstance(scalars, str):
clim = mesh.get_data_range(scalars)
else:
# TODO: an array was given... how do we deal with
# that? Possibly a 2D arrays or list of
# arrays where first index corresponds to
# the block? This could get complicated real
# quick.
raise RuntimeError('scalars array must be given as a string name for multiblock datasets.')
the_arguments = locals()
the_arguments.pop('self')
the_arguments.pop('mesh')
the_arguments.pop('kwargs')
if multi_colors:
# Compute unique colors for each index of the block
if has_matplotlib:
from itertools import cycle
cycler = matplotlib.rcParams['axes.prop_cycle']
colors = cycle(cycler)
else:
multi_colors = False
logging.warning('Please install matplotlib for color cycles')
# Now iteratively plot each element of the multiblock dataset
actors = []
for idx in range(mesh.GetNumberOfBlocks()):
if mesh[idx] is None:
continue
# Get a good name to use
next_name = '{}-{}'.format(name, idx)
# Get the data object
if not is_pyvista_dataset(mesh[idx]):
data = wrap(mesh.GetBlock(idx))
if not is_pyvista_dataset(mesh[idx]):
continue # move on if we can't plot it
else:
data = mesh.GetBlock(idx)
if data is None or (not isinstance(data, pyvista.MultiBlock) and data.n_points < 1):
# Note that a block can exist but be None type
# or it could have zeros points (be empty) after filtering
continue
# Now check that scalars is available for this dataset
if isinstance(data, vtk.vtkMultiBlockDataSet) or get_array(data, scalars) is None:
ts = None
else:
ts = scalars
if multi_colors:
color = next(colors)['color']
## Add to the scene
the_arguments['color'] = color
the_arguments['scalars'] = ts
the_arguments['name'] = next_name
the_arguments['texture'] = None
a = self.add_mesh(data, **the_arguments)
actors.append(a)
if (reset_camera is None and not self.camera_set) or reset_camera:
cpos = self.get_default_cam_pos()
self.camera_position = cpos
self.camera_set = False
self.reset_camera()
return actors
##### Plot a single PyVista mesh #####
# Compute surface normals if using smooth shading
if smooth_shading:
# extract surface if mesh is exterior
if not isinstance(mesh, pyvista.PolyData):
grid = mesh
mesh = grid.extract_surface()
ind = mesh.point_arrays['vtkOriginalPointIds']
# remap scalars
if isinstance(scalars, np.ndarray):
scalars = scalars[ind]
mesh.compute_normals(cell_normals=False, inplace=True)
if mesh.n_points < 1:
raise RuntimeError('Empty meshes cannot be plotted. Input mesh has zero points.')
# Try to plot something if no preference given
if scalars is None and color is None and texture is None:
# Prefer texture first
if len(list(mesh.textures.keys())) > 0:
texture = True
# If no texture, plot any active scalar
else:
# Make sure scalars components are not vectors/tuples
scalars = mesh.active_scalars_name
# Don't allow plotting of string arrays by default
if scalars is not None:# and np.issubdtype(mesh.active_scalars.dtype, np.number):
if stitle is None:
stitle = scalars
else:
scalars = None
# set main values
self.mesh = mesh
self.mapper = make_mapper(vtk.vtkDataSetMapper)
self.mapper.SetInputData(self.mesh)
self.mapper.GetLookupTable().SetNumberOfTableValues(n_colors)
if interpolate_before_map:
self.mapper.InterpolateScalarsBeforeMappingOn()
actor, prop = self.add_actor(self.mapper,
reset_camera=reset_camera,
name=name, culling=culling,
pickable=pickable)
# Make sure scalars is a numpy array after this point
original_scalar_name = None
if isinstance(scalars, str):
self.mapper.SetArrayName(scalars)
original_scalar_name = scalars
scalars = get_array(mesh, scalars,
preference=preference, err=True)
if stitle is None:
stitle = original_scalar_name
if texture is True or isinstance(texture, (str, int)):
texture = mesh._activate_texture(texture)
if texture:
if isinstance(texture, np.ndarray):
texture = numpy_to_texture(texture)
if not isinstance(texture, (vtk.vtkTexture, vtk.vtkOpenGLTexture)):
raise TypeError('Invalid texture type ({})'.format(type(texture)))
if mesh.GetPointData().GetTCoords() is None:
raise AssertionError('Input mesh does not have texture coordinates to support the texture.')
actor.SetTexture(texture)
# Set color to white by default when using a texture
if color is None:
color = 'white'
if scalars is None:
show_scalar_bar = False
self.mapper.SetScalarModeToUsePointFieldData()
# Handle making opacity array =========================================
_custom_opac = False
if isinstance(opacity, str):
try:
# Get array from mesh
opacity = get_array(mesh, opacity,
preference=preference, err=True)
opacity = normalize(opacity)
_custom_opac = True
except:
# Or get opacity transfer function
opacity = opacity_transfer_function(opacity, n_colors)
else:
if scalars.shape[0] != opacity.shape[0]:
raise RuntimeError('Opacity array and scalars array must have the same number of elements.')
elif isinstance(opacity, (np.ndarray, list, tuple)):
opacity = np.array(opacity)
if scalars.shape[0] == opacity.shape[0]:
# User could pass an array of opacities for every point/cell
pass
else:
opacity = opacity_transfer_function(opacity, n_colors)
if use_transparency and np.max(opacity) <= 1.0:
opacity = 1 - opacity
elif use_transparency and isinstance(opacity, np.ndarray):
opacity = 255 - opacity
# Scalars formatting ==================================================
if cmap is None: # Set default map if matplotlib is available
if has_matplotlib:
cmap = rcParams['cmap']
# Set the array title for when it is added back to the mesh
if _custom_opac:
title = '__custom_rgba'
elif stitle is None:
title = 'Data'
else:
title = stitle
if scalars is not None:
# if scalars is a string, then get the first array found with that name
if not isinstance(scalars, np.ndarray):
scalars = np.asarray(scalars)
_using_labels = False
if not np.issubdtype(scalars.dtype, np.number):
# raise TypeError('Non-numeric scalars are currently not supported for plotting.')
# TODO: If str array, digitive and annotate
cats, scalars = np.unique(scalars.astype('|S'), return_inverse=True)
values = np.unique(scalars)
clim = [np.min(values) - 0.5, np.max(values) + 0.5]
title = '{}-digitized'.format(title)
n_colors = len(cats)
scalar_bar_args.setdefault('n_labels', 0)
_using_labels = True
if rgb:
if scalars.ndim != 2 or scalars.shape[1] < 3 or scalars.shape[1] > 4:
raise ValueError('RGB array must be n_points/n_cells by 3/4 in shape.')
if scalars.ndim != 1:
if rgb:
pass
elif scalars.ndim == 2 and (scalars.shape[0] == mesh.n_points or scalars.shape[0] == mesh.n_cells):
scalars = np.linalg.norm(scalars.copy(), axis=1)
title = '{}-normed'.format(title)
else:
scalars = scalars.ravel()
if scalars.dtype == np.bool:
scalars = scalars.astype(np.float)
def prepare_mapper(scalars):
# Scalars interpolation approach
if scalars.shape[0] == mesh.n_points:
self.mesh._add_point_array(scalars, title, True)
self.mapper.SetScalarModeToUsePointData()
elif scalars.shape[0] == mesh.n_cells:
self.mesh._add_cell_array(scalars, title, True)
self.mapper.SetScalarModeToUseCellData()
else:
raise_not_matching(scalars, mesh)
# Common tasks
self.mapper.GetLookupTable().SetNumberOfTableValues(n_colors)
if interpolate_before_map:
self.mapper.InterpolateScalarsBeforeMappingOn()
if rgb or _custom_opac:
self.mapper.SetColorModeToDirectScalars()
else:
self.mapper.SetColorModeToMapScalars()
return
prepare_mapper(scalars)
table = self.mapper.GetLookupTable()
if log_scale:
table.SetScaleToLog10()
if _using_labels:
table.SetAnnotations(convert_array(values), convert_string_array(cats))
if isinstance(annotations, dict):
for val, anno in annotations.items():
table.SetAnnotation(float(val), str(anno))
# Set scalars range
if clim is None:
clim = [np.nanmin(scalars), np.nanmax(scalars)]
elif isinstance(clim, float) or isinstance(clim, int):
clim = [-clim, clim]
if np.any(clim) and not rgb:
self.mapper.scalar_range = clim[0], clim[1]
table.SetNanColor(nan_color)
if above_color:
table.SetUseAboveRangeColor(True)
table.SetAboveRangeColor(*parse_color(above_color, opacity=1))
scalar_bar_args.setdefault('above_label', 'Above')
if below_color:
table.SetUseBelowRangeColor(True)
table.SetBelowRangeColor(*parse_color(below_color, opacity=1))
scalar_bar_args.setdefault('below_label', 'Below')
if cmap is not None:
if not has_matplotlib:
cmap = None
logging.warning('Please install matplotlib for color maps.')
cmap = get_cmap_safe(cmap)
if categories:
if categories is True:
n_colors = len(np.unique(scalars))
elif isinstance(categories, int):
n_colors = categories
ctable = cmap(np.linspace(0, 1, n_colors))*255
ctable = ctable.astype(np.uint8)
# Set opactities
if isinstance(opacity, np.ndarray) and not _custom_opac:
ctable[:,-1] = opacity
if flip_scalars:
ctable = np.ascontiguousarray(ctable[::-1])
table.SetTable(VN.numpy_to_vtk(ctable))
if _custom_opac:
hue = normalize(scalars, minimum=clim[0], maximum=clim[1])
scalars = cmap(hue)[:, :3]
# combine colors and alpha into a Nx4 matrix
scalars = np.concatenate((scalars, opacity[:, None]), axis=1)
scalars = (scalars * 255).astype(np.uint8)
prepare_mapper(scalars)
else: # no cmap specified
if flip_scalars:
table.SetHueRange(0.0, 0.66667)
else:
table.SetHueRange(0.66667, 0.0)
else:
self.mapper.SetScalarModeToUseFieldData()
# Set actor properties ================================================
# select view style
if not style:
style = 'surface'
style = style.lower()
if style == 'wireframe':
prop.SetRepresentationToWireframe()
if color is None:
color = rcParams['outline_color']
elif style == 'points':
prop.SetRepresentationToPoints()
elif style == 'surface':
prop.SetRepresentationToSurface()
else:
raise Exception('Invalid style. Must be one of the following:\n'
'\t"surface"\n'
'\t"wireframe"\n'
'\t"points"\n')
prop.SetPointSize(point_size)
prop.SetAmbient(ambient)
prop.SetDiffuse(diffuse)
prop.SetSpecular(specular)
prop.SetSpecularPower(specular_power)
if smooth_shading:
prop.SetInterpolationToPhong()
else:
prop.SetInterpolationToFlat()
# edge display style
if show_edges:
prop.EdgeVisibilityOn()
rgb_color = parse_color(color)
prop.SetColor(rgb_color)
if isinstance(opacity, (float, int)):
prop.SetOpacity(opacity)
prop.SetEdgeColor(parse_color(edge_color))
if render_points_as_spheres:
prop.SetRenderPointsAsSpheres(render_points_as_spheres)
if render_lines_as_tubes:
prop.SetRenderLinesAsTubes(render_lines_as_tubes)
# legend label
if label:
if not isinstance(label, str):
raise AssertionError('Label must be a string')
geom = pyvista.single_triangle()
if scalars is not None:
geom = pyvista.Box()
rgb_color = parse_color('black')
geom.points -= geom.center
self._labels.append([geom, label, rgb_color])
# lighting display style
if not lighting:
prop.LightingOff()
# set line thickness
if line_width:
prop.SetLineWidth(line_width)
# Add scalar bar if available
if stitle is not None and show_scalar_bar and (not rgb or _custom_opac):
self.add_scalar_bar(stitle, **scalar_bar_args)
self.renderer.Modified()
return actor
def add_volume(self, volume, scalars=None, clim=None, resolution=None,
opacity='linear', n_colors=256, cmap=None, flip_scalars=False,
reset_camera=None, name=None, ambient=0.0, categories=False,
culling=False, multi_colors=False,
blending='composite', mapper=None,
stitle=None, scalar_bar_args=None, show_scalar_bar=None,
annotations=None, pickable=True, preference="point",
opacity_unit_distance=None, shade=False,
diffuse=0.7, specular=0.2, specular_power=10.0, **kwargs):
"""Add a volume, rendered using a smart mapper by default.
Requires a 3D :class:`numpy.ndarray` or :class:`pyvista.UniformGrid`.
Parameters
----------
volume : 3D numpy.ndarray or pyvista.UniformGrid
The input volume to visualize. 3D numpy arrays are accepted.
scalars : str or numpy.ndarray, optional
Scalars used to "color" the mesh. Accepts a string name of an
array that is present on the mesh or an array equal
to the number of cells or the number of points in the
mesh. Array should be sized as a single vector. If ``scalars`` is
``None``, then the active scalars are used.
clim : 2 item list, optional
Color bar range for scalars. Defaults to minimum and
maximum of scalars array. Example: ``[-1, 2]``. ``rng``
is also an accepted alias for this.
opacity : string or numpy.ndarray, optional
Opacity mapping for the scalars array.
A string can also be specified to map the scalars range to a
predefined opacity transfer function (options include: 'linear',
'linear_r', 'geom', 'geom_r'). Or you can pass a custum made
transfer function that is an array either ``n_colors`` in length or
shorter.
n_colors : int, optional
Number of colors to use when displaying scalars. Defaults to 256.
The scalar bar will also have this many colors.
cmap : str, optional
Name of the Matplotlib colormap to us when mapping the ``scalars``.
See available Matplotlib colormaps. Only applicable for when
displaying ``scalars``. Requires Matplotlib to be installed.
``colormap`` is also an accepted alias for this. If ``colorcet`` or
``cmocean`` are installed, their colormaps can be specified by name.
flip_scalars : bool, optional
Flip direction of cmap. Most colormaps allow ``*_r`` suffix to do
this as well.
reset_camera : bool, optional
Reset the camera after adding this mesh to the scene
name : str, optional
The name for the added actor so that it can be easily
updated. If an actor of this name already exists in the
rendering window, it will be replaced by the new actor.
ambient : float, optional
When lighting is enabled, this is the amount of light from
0 to 1 that reaches the actor when not directed at the
light source emitted from the viewer. Default 0.0.
culling : str, optional
Does not render faces that are culled. Options are ``'front'`` or
``'back'``. This can be helpful for dense surface meshes,
especially when edges are visible, but can cause flat
meshes to be partially displayed. Defaults ``False``.
categories : bool, optional
If set to ``True``, then the number of unique values in the scalar
array will be used as the ``n_colors`` argument.
multi_colors : bool, optional
Whether or not to use multiple colors when plotting MultiBlock
object. Blocks will be colored sequentially as 'Reds', 'Greens',
'Blues', and 'Grays'.
blending : str, optional
Blending mode for visualisation of the input object(s). Can be
one of 'additive', 'maximum', 'minimum', 'composite', or
'average'. Defaults to 'additive'.
mapper : str, optional
Volume mapper to use given by name. Options include:
``'fixed_point'``, ``'gpu'``, ``'open_gl'``, and ``'smart'``.
If ``None`` the ``"volume_mapper"`` in the ``rcParams`` is used.
scalar_bar_args : dict, optional
Dictionary of keyword arguments to pass when adding the scalar bar
to the scene. For options, see
:func:`pyvista.BasePlotter.add_scalar_bar`.
show_scalar_bar : bool
If False, a scalar bar will not be added to the scene. Defaults
to ``True``.
stitle : string, optional
Scalar bar title. By default the scalar bar is given a title of the
the scalars array used to color the mesh.
To create a bar with no title, use an empty string (i.e. '').
annotations : dict, optional
Pass a dictionary of annotations. Keys are the float values in the
scalars range to annotate on the scalar bar and the values are the
the string annotations.
opacity_unit_distance : float
Set/Get the unit distance on which the scalar opacity transfer
function is defined. Meaning that over that distance, a given
opacity (from the transfer function) is accumulated. This is
adjusted for the actual sampling distance during rendering. By
default, this is the length of the diagonal of the bounding box of
the volume divided by the dimensions.
shade : bool
Default off. If shading is turned on, the mapper may perform
shading calculations - in some cases shading does not apply
(for example, in a maximum intensity projection) and therefore
shading will not be performed even if this flag is on.
diffuse : float, optional
The diffuse lighting coefficient. Default 1.0
specular : float, optional
The specular lighting coefficient. Default 0.0
specular_power : float, optional
The specular power. Between 0.0 and 128.0
Return
------
actor: vtk.vtkVolume
VTK volume of the input data.
"""
# Handle default arguments
# Supported aliases
clim = kwargs.pop('rng', clim)
cmap = kwargs.pop('colormap', cmap)
culling = kwargs.pop("backface_culling", culling)
if "scalar" in kwargs:
raise TypeError("`scalar` is an invalid keyword argument for `add_mesh`. Perhaps you mean `scalars` with an s?")
assert_empty_kwargs(**kwargs)
if scalar_bar_args is None:
scalar_bar_args = {}
if show_scalar_bar is None:
show_scalar_bar = rcParams['show_scalar_bar']
if culling is True:
culling = 'backface'
if mapper is None:
mapper = rcParams["volume_mapper"]
# Convert the VTK data object to a pyvista wrapped object if necessary
if not is_pyvista_dataset(volume):
if isinstance(volume, np.ndarray):
volume = wrap(volume)
if resolution is None:
resolution = [1,1,1]
elif len(resolution) != 3:
raise ValueError('Invalid resolution dimensions.')
volume.spacing = resolution
else:
volume = wrap(volume)
if not is_pyvista_dataset(volume):
raise TypeError('Object type ({}) not supported for plotting in PyVista.'.format(type(volume)))
else:
# HACK: Make a copy so the original object is not altered.
# Also, place all data on the nodes as issues arise when
# volume rendering on the cells.
volume = volume.cell_data_to_point_data()
if name is None:
name = '{}({})'.format(type(volume).__name__, volume.memory_address)
if isinstance(volume, pyvista.MultiBlock):
from itertools import cycle
cycler = cycle(['Reds', 'Greens', 'Blues', 'Greys', 'Oranges', 'Purples'])
# Now iteratively plot each element of the multiblock dataset
actors = []
for idx in range(volume.GetNumberOfBlocks()):
if volume[idx] is None:
continue
# Get a good name to use
next_name = '{}-{}'.format(name, idx)
# Get the data object
block = wrap(volume.GetBlock(idx))
if resolution is None:
try:
block_resolution = block.GetSpacing()
except AttributeError:
block_resolution = resolution
else:
block_resolution = resolution
if multi_colors:
color = next(cycler)
else:
color = cmap
a = self.add_volume(block, resolution=block_resolution, opacity=opacity,
n_colors=n_colors, cmap=color, flip_scalars=flip_scalars,
reset_camera=reset_camera, name=next_name,
ambient=ambient, categories=categories,
culling=culling, clim=clim,
mapper=mapper, pickable=pickable,
opacity_unit_distance=opacity_unit_distance,
shade=shade, diffuse=diffuse, specular=specular,
specular_power=specular_power)
actors.append(a)
return actors
if not isinstance(volume, pyvista.UniformGrid):
raise TypeError('Type {} not supported for volume rendering at this time. Use `pyvista.UniformGrid`.'.format(type(volume)))
if opacity_unit_distance is None:
opacity_unit_distance = volume.length / (np.mean(volume.dimensions) - 1)
if scalars is None:
# Make sure scalars components are not vectors/tuples
scalars = volume.active_scalars
# Don't allow plotting of string arrays by default
if scalars is not None and np.issubdtype(scalars.dtype, np.number):
if stitle is None:
stitle = volume.active_scalars_info[1]
else:
raise RuntimeError('No scalars to use for volume rendering.')
elif isinstance(scalars, str):
pass
##############
title = 'Data' if stitle is None else stitle
if isinstance(scalars, str):
title = scalars
scalars = get_array(volume, scalars,
preference=preference, err=True)
if stitle is None:
stitle = title
if not isinstance(scalars, np.ndarray):
scalars = np.asarray(scalars)
if not np.issubdtype(scalars.dtype, np.number):
raise TypeError('Non-numeric scalars are currently not supported for volume rendering.')
if scalars.ndim != 1:
scalars = scalars.ravel()
if scalars.dtype == np.bool or scalars.dtype == np.uint8:
scalars = scalars.astype(np.float)
# Define mapper, volume, and add the correct properties
mappers = {
'fixed_point': vtk.vtkFixedPointVolumeRayCastMapper,
'gpu': vtk.vtkGPUVolumeRayCastMapper,
'open_gl': vtk.vtkOpenGLGPUVolumeRayCastMapper,
'smart': vtk.vtkSmartVolumeMapper,
}
if not isinstance(mapper, str) or mapper not in mappers.keys():
raise RuntimeError('Mapper ({}) unknown. Available volume mappers include: {}'.format(mapper, ', '.join(mappers.keys())))
self.mapper = make_mapper(mappers[mapper])
# Scalars interpolation approach
if scalars.shape[0] == volume.n_points:
volume._add_point_array(scalars, title, True)
self.mapper.SetScalarModeToUsePointData()
elif scalars.shape[0] == volume.n_cells:
volume._add_cell_array(scalars, title, True)
self.mapper.SetScalarModeToUseCellData()
else:
raise_not_matching(scalars, volume)
# Set scalars range
if clim is None:
clim = [np.nanmin(scalars), np.nanmax(scalars)]
elif isinstance(clim, float) or isinstance(clim, int):
clim = [-clim, clim]
###############
scalars = scalars.astype(np.float)
with np.errstate(invalid='ignore'):
idxs0 = scalars < clim[0]
idxs1 = scalars > clim[1]
scalars[idxs0] = clim[0]
scalars[idxs1] = clim[1]
scalars = ((scalars - np.nanmin(scalars)) / (np.nanmax(scalars) - np.nanmin(scalars))) * 255
# scalars = scalars.astype(np.uint8)
volume[title] = scalars
self.mapper.scalar_range = clim
# Set colormap and build lookup table
table = vtk.vtkLookupTable()
# table.SetNanColor(nan_color) # NaN's are chopped out with current implementation
# above/below colors not supported with volume rendering
if isinstance(annotations, dict):
for val, anno in annotations.items():
table.SetAnnotation(float(val), str(anno))
if cmap is None: # Set default map if matplotlib is available
if has_matplotlib:
cmap = rcParams['cmap']
if cmap is not None:
if not has_matplotlib:
cmap = None
raise RuntimeError('Please install matplotlib for volume rendering.')
cmap = get_cmap_safe(cmap)
if categories:
if categories is True:
n_colors = len(np.unique(scalars))
elif isinstance(categories, int):
n_colors = categories
if flip_scalars:
cmap = cmap.reversed()
color_tf = vtk.vtkColorTransferFunction()
for ii in range(n_colors):
color_tf.AddRGBPoint(ii, *cmap(ii)[:-1])
# Set opacities
if isinstance(opacity, (float, int)):
opacity_values = [opacity] * n_colors
elif isinstance(opacity, str):
opacity_values = pyvista.opacity_transfer_function(opacity, n_colors)
elif isinstance(opacity, (np.ndarray, list, tuple)):
opacity = np.array(opacity)
opacity_values = opacity_transfer_function(opacity, n_colors)
opacity_tf = vtk.vtkPiecewiseFunction()
for ii in range(n_colors):
opacity_tf.AddPoint(ii, opacity_values[ii] / n_colors)
# Now put color tf and opacity tf into a lookup table for the scalar bar
table.SetNumberOfTableValues(n_colors)
lut = cmap(np.array(range(n_colors))) * 255
lut[:,3] = opacity_values
lut = lut.astype(np.uint8)
table.SetTable(VN.numpy_to_vtk(lut))
table.SetRange(*clim)
self.mapper.lookup_table = table
self.mapper.SetInputData(volume)
blending = blending.lower()
if blending in ['additive', 'add', 'sum']:
self.mapper.SetBlendModeToAdditive()
elif blending in ['average', 'avg', 'average_intensity']:
self.mapper.SetBlendModeToAverageIntensity()
elif blending in ['composite', 'comp']:
self.mapper.SetBlendModeToComposite()
elif blending in ['maximum', 'max', 'maximum_intensity']:
self.mapper.SetBlendModeToMaximumIntensity()
elif blending in ['minimum', 'min', 'minimum_intensity']:
self.mapper.SetBlendModeToMinimumIntensity()
else:
raise ValueError('Blending mode \'{}\' invalid. '.format(blending) +
'Please choose one ' + 'of \'additive\', '
'\'composite\', \'minimum\' or ' + '\'maximum\'.')
self.mapper.Update()
self.volume = vtk.vtkVolume()
self.volume.SetMapper(self.mapper)
prop = vtk.vtkVolumeProperty()
prop.SetColor(color_tf)
prop.SetScalarOpacity(opacity_tf)
prop.SetAmbient(ambient)
prop.SetScalarOpacityUnitDistance(opacity_unit_distance)
prop.SetShade(shade)
prop.SetDiffuse(diffuse)
prop.SetSpecular(specular)
prop.SetSpecularPower(specular_power)
self.volume.SetProperty(prop)
actor, prop = self.add_actor(self.volume, reset_camera=reset_camera,
name=name, culling=culling,
pickable=pickable)
# Add scalar bar
if stitle is not None and show_scalar_bar:
self.add_scalar_bar(stitle, **scalar_bar_args)
self.renderer.Modified()
return actor
def update_scalar_bar_range(self, clim, name=None):
"""Update the value range of the active or named scalar bar.
Parameters
----------
2 item list
The new range of scalar bar. Example: ``[-1, 2]``.
name : str, optional
The title of the scalar bar to update
"""
if isinstance(clim, float) or isinstance(clim, int):
clim = [-clim, clim]
if len(clim) != 2:
raise TypeError('clim argument must be a length 2 iterable of values: (min, max).')
if name is None:
if not hasattr(self, 'mapper'):
raise RuntimeError('This plotter does not have an active mapper.')
self.mapper.scalar_range = clim
return
# Use the name to find the desired actor
def update_mapper(mapper_helper):
mapper_helper.scalar_range = clim
return
try:
for mh in self._scalar_bar_mappers[name]:
update_mapper(mh)
except KeyError:
raise KeyError('Name ({}) not valid/not found in this plotter.')
return
def clear(self):
"""Clear plot by removing all actors and properties."""
for renderer in self.renderers:
renderer.clear()
for renderer in self._background_renderers:
if renderer is not None:
renderer.clear()
self._scalar_bar_slots = set(range(MAX_N_COLOR_BARS))
self._scalar_bar_slot_lookup = {}
self._scalar_bar_ranges = {}
self._scalar_bar_mappers = {}
self._scalar_bar_actors = {}
self._scalar_bar_widgets = {}
self.mesh = None
def link_views(self, views=0):
"""Link the views' cameras.
Parameters
----------
views : int | tuple or list
If ``views`` is int, link the views to the given view
index or if ``views`` is a tuple or a list, link the given
views cameras.
"""
if isinstance(views, int):
for renderer in self.renderers:
renderer.camera = self.renderers[views].camera
elif isinstance(views, collections.Iterable):
for view_index in views:
self.renderers[view_index].camera = \
self.renderers[views[0]].camera
else:
raise TypeError('Expected type is int, list or tuple:'
'{} is given'.format(type(views)))
def unlink_views(self, views=None):
"""Unlink the views' cameras.
Parameters
----------
views : None | int | tuple or list
If ``views`` is None unlink all the views, if ``views``
is int unlink the selected view's camera or if ``views``
is a tuple or a list, unlink the given views cameras.
"""
if views is None:
for renderer in self.renderers:
renderer.camera = vtk.vtkCamera()
renderer.reset_camera()
elif isinstance(views, int):
self.renderers[views].camera = vtk.vtkCamera()
self.renderers[views].reset_camera()
elif isinstance(views, collections.Iterable):
for view_index in views:
self.renderers[view_index].camera = vtk.vtkCamera()
self.renderers[view_index].reset_camera()
else:
raise TypeError('Expected type is None, int, list or tuple:'
'{} is given'.format(type(views)))
def add_scalar_bar(self, title=None, n_labels=5, italic=False,
bold=False, title_font_size=None,
label_font_size=None, color=None,
font_family=None, shadow=False, mapper=None,
width=None, height=None, position_x=None,
position_y=None, vertical=None,
interactive=None, fmt=None, use_opacity=True,
outline=False, nan_annotation=False,
below_label=None, above_label=None,
background_color=None, n_colors=None, fill=False):
"""Create scalar bar using the ranges as set by the last input mesh.
Parameters
----------
title : string, optional
Title of the scalar bar. Default None
n_labels : int, optional
Number of labels to use for the scalar bar.
italic : bool, optional
Italicises title and bar labels. Default False.
bold : bool, optional
Bolds title and bar labels. Default True
title_font_size : float, optional
Sets the size of the title font. Defaults to None and is sized
automatically.
label_font_size : float, optional
Sets the size of the title font. Defaults to None and is sized
automatically.
color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
font_family : string, optional
Font family. Must be either courier, times, or arial.
shadow : bool, optional
Adds a black shadow to the text. Defaults to False
width : float, optional
The percentage (0 to 1) width of the window for the colorbar
height : float, optional
The percentage (0 to 1) height of the window for the colorbar
position_x : float, optional
The percentage (0 to 1) along the windows's horizontal
direction to place the bottom left corner of the colorbar
position_y : float, optional
The percentage (0 to 1) along the windows's vertical
direction to place the bottom left corner of the colorbar
interactive : bool, optional
Use a widget to control the size and location of the scalar bar.
use_opacity : bool, optional
Optionally display the opacity mapping on the scalar bar
outline : bool, optional
Optionally outline the scalar bar to make opacity mappings more
obvious.
nan_annotation : bool, optional
Annotate the NaN color
below_label : str, optional
String annotation for values below the scalars range
above_label : str, optional
String annotation for values above the scalars range
background_color : array, optional
The color used for the background in RGB format.
n_colors : int, optional
The maximum number of color displayed in the scalar bar.
fill : bool
Draw a filled box behind the scalar bar with the ``background_color``
Notes
-----
Setting title_font_size, or label_font_size disables automatic font
sizing for both the title and label.
"""
if interactive is None:
interactive = rcParams['interactive']
if font_family is None:
font_family = rcParams['font']['family']
if label_font_size is None:
label_font_size = rcParams['font']['label_size']
if title_font_size is None:
title_font_size = rcParams['font']['title_size']
if color is None:
color = rcParams['font']['color']
if fmt is None:
fmt = rcParams['font']['fmt']
if vertical is None:
if rcParams['colorbar_orientation'].lower() == 'vertical':
vertical = True
# Automatically choose size if not specified
if width is None:
if vertical:
width = rcParams['colorbar_vertical']['width']
else:
width = rcParams['colorbar_horizontal']['width']
if height is None:
if vertical:
height = rcParams['colorbar_vertical']['height']
else:
height = rcParams['colorbar_horizontal']['height']
# check if maper exists
if mapper is None:
if not hasattr(self, 'mapper') or self.mapper is None:
raise Exception('Mapper does not exist. '
'Add a mesh with scalars first.')
mapper = self.mapper
if title:
# Check that this data hasn't already been plotted
if title in list(self._scalar_bar_ranges.keys()):
clim = list(self._scalar_bar_ranges[title])
newrng = mapper.scalar_range
oldmappers = self._scalar_bar_mappers[title]
# get max for range and reset everything
if newrng[0] < clim[0]:
clim[0] = newrng[0]
if newrng[1] > clim[1]:
clim[1] = newrng[1]
for mh in oldmappers:
mh.scalar_range = clim[0], clim[1]
mapper.scalar_range = clim[0], clim[1]
self._scalar_bar_mappers[title].append(mapper)
self._scalar_bar_ranges[title] = clim
# Color bar already present and ready to be used so returning
return
# Automatically choose location if not specified
if position_x is None or position_y is None:
try:
slot = min(self._scalar_bar_slots)
self._scalar_bar_slots.remove(slot)
self._scalar_bar_slot_lookup[title] = slot
except:
raise RuntimeError('Maximum number of color bars reached.')
if position_x is None:
if vertical:
position_x = rcParams['colorbar_vertical']['position_x']
position_x -= slot * (width + 0.2 * width)
else:
position_x = rcParams['colorbar_horizontal']['position_x']
if position_y is None:
if vertical:
position_y = rcParams['colorbar_vertical']['position_y']
else:
position_y = rcParams['colorbar_horizontal']['position_y']
position_y += slot * height
# Adjust to make sure on the screen
if position_x + width > 1:
position_x -= width
if position_y + height > 1:
position_y -= height
# parse color
color = parse_color(color)
# Create scalar bar
self.scalar_bar = vtk.vtkScalarBarActor()
if background_color is not None:
background_color = parse_color(background_color, opacity=1.0)
background_color = np.array(background_color) * 255
self.scalar_bar.GetBackgroundProperty().SetColor(background_color[0:3])
if fill:
self.scalar_bar.DrawBackgroundOn()
lut = vtk.vtkLookupTable()
lut.DeepCopy(mapper.lookup_table)
ctable = vtk_to_numpy(lut.GetTable())
alphas = ctable[:, -1][:, np.newaxis] / 255.
use_table = ctable.copy()
use_table[:, -1] = 255.
ctable = (use_table * alphas) + background_color * (1 - alphas)
lut.SetTable(numpy_to_vtk(ctable, array_type=vtk.VTK_UNSIGNED_CHAR))
else:
lut = mapper.lookup_table
self.scalar_bar.SetLookupTable(lut)
if n_colors is not None:
self.scalar_bar.SetMaximumNumberOfColors(n_colors)
if n_labels < 1:
self.scalar_bar.DrawTickLabelsOff()
else:
self.scalar_bar.DrawTickLabelsOn()
self.scalar_bar.SetNumberOfLabels(n_labels)
if nan_annotation:
self.scalar_bar.DrawNanAnnotationOn()
if above_label:
self.scalar_bar.DrawAboveRangeSwatchOn()
self.scalar_bar.SetAboveRangeAnnotation(above_label)
if below_label:
self.scalar_bar.DrawBelowRangeSwatchOn()
self.scalar_bar.SetBelowRangeAnnotation(below_label)
# edit the size of the colorbar
self.scalar_bar.SetHeight(height)
self.scalar_bar.SetWidth(width)
self.scalar_bar.SetPosition(position_x, position_y)
if fmt is not None:
self.scalar_bar.SetLabelFormat(fmt)
if vertical:
self.scalar_bar.SetOrientationToVertical()
else:
self.scalar_bar.SetOrientationToHorizontal()
if label_font_size is not None or title_font_size is not None:
self.scalar_bar.UnconstrainedFontSizeOn()
self.scalar_bar.AnnotationTextScalingOn()
label_text = self.scalar_bar.GetLabelTextProperty()
anno_text = self.scalar_bar.GetAnnotationTextProperty()
label_text.SetColor(color)
anno_text.SetColor(color)
label_text.SetShadow(shadow)
anno_text.SetShadow(shadow)
# Set font
label_text.SetFontFamily(parse_font_family(font_family))
anno_text.SetFontFamily(parse_font_family(font_family))
label_text.SetItalic(italic)
anno_text.SetItalic(italic)
label_text.SetBold(bold)
anno_text.SetBold(bold)
if label_font_size:
label_text.SetFontSize(label_font_size)
anno_text.SetFontSize(label_font_size)
# Set properties
if title:
clim = mapper.scalar_range
self._scalar_bar_ranges[title] = clim
self._scalar_bar_mappers[title] = [mapper]
self.scalar_bar.SetTitle(title)
title_text = self.scalar_bar.GetTitleTextProperty()
title_text.SetJustificationToCentered()
title_text.SetItalic(italic)
title_text.SetBold(bold)
title_text.SetShadow(shadow)
if title_font_size:
title_text.SetFontSize(title_font_size)
# Set font
title_text.SetFontFamily(parse_font_family(font_family))
# set color
title_text.SetColor(color)
self._scalar_bar_actors[title] = self.scalar_bar
if interactive is None:
interactive = rcParams['interactive']
if self.shape != (1, 1):
interactive = False
elif interactive and self.shape != (1, 1):
err_str = 'Interactive scalar bars disabled for multi-renderer plots'
raise Exception(err_str)
if interactive and hasattr(self, 'iren'):
self.scalar_widget = vtk.vtkScalarBarWidget()
self.scalar_widget.SetScalarBarActor(self.scalar_bar)
self.scalar_widget.SetInteractor(self.iren)
self.scalar_widget.SetEnabled(1)
rep = self.scalar_widget.GetRepresentation()
# self.scalar_widget.On()
if vertical is True or vertical is None:
rep.SetOrientation(1) # 0 = Horizontal, 1 = Vertical
else:
rep.SetOrientation(0) # 0 = Horizontal, 1 = Vertical
self._scalar_bar_widgets[title] = self.scalar_widget
if use_opacity:
self.scalar_bar.SetUseOpacity(True)
if outline:
self.scalar_bar.SetDrawFrame(True)
frame_prop = self.scalar_bar.GetFrameProperty()
frame_prop.SetColor(color)
else:
self.scalar_bar.SetDrawFrame(False)
self.add_actor(self.scalar_bar, reset_camera=False, pickable=False)
return self.scalar_bar # return the actor
def update_scalars(self, scalars, mesh=None, render=True):
"""Update scalars of an object in the plotter.
Parameters
----------
scalars : np.ndarray
Scalars to replace existing scalars.
mesh : vtk.PolyData or vtk.UnstructuredGrid, optional
Object that has already been added to the Plotter. If
None, uses last added mesh.
render : bool, optional
Forces an update to the render window. Default True.
"""
if mesh is None:
mesh = self.mesh
if isinstance(mesh, (collections.Iterable, pyvista.MultiBlock)):
# Recursive if need to update scalars on many meshes
for m in mesh:
self.update_scalars(scalars, mesh=m, render=False)
if render:
self.render()
return
if isinstance(scalars, str):
# Grab scalars array if name given
scalars = get_array(mesh, scalars)
if scalars is None:
if render:
self.render()
return
if scalars.shape[0] == mesh.GetNumberOfPoints():
data = mesh.GetPointData()
elif scalars.shape[0] == mesh.GetNumberOfCells():
data = mesh.GetCellData()
else:
raise_not_matching(scalars, mesh)
vtk_scalars = data.GetScalars()
if vtk_scalars is None:
raise Exception('No active scalars')
s = convert_array(vtk_scalars)
s[:] = scalars
data.Modified()
try:
# Why are the points updated here? Not all datasets have points
# and only the scalars array is modified by this function...
mesh.GetPoints().Modified()
except:
pass
if render:
self.render()
def update_coordinates(self, points, mesh=None, render=True):
"""Update the points of an object in the plotter.
Parameters
----------
points : np.ndarray
Points to replace existing points.
mesh : vtk.PolyData or vtk.UnstructuredGrid, optional
Object that has already been added to the Plotter. If
None, uses last added mesh.
render : bool, optional
Forces an update to the render window. Default True.
"""
if mesh is None:
mesh = self.mesh
mesh.points = points
if render:
self.render()
def _clear_ren_win(self):
"""Clear the render window."""
if hasattr(self, 'ren_win'):
self.ren_win.Finalize()
del self.ren_win
def close(self):
"""Close the render window."""
# must close out widgets first
super(BasePlotter, self).close()
# Renderer has an axes widget, so close it
for renderer in self.renderers:
renderer.close()
# Grab screenshots of last render
self.last_image = self.screenshot(None, return_img=True)
self.last_image_depth = self.get_image_depth()
if hasattr(self, 'scalar_widget'):
del self.scalar_widget
# reset scalar bar stuff
self.clear()
self._clear_ren_win()
if hasattr(self, '_style'):
del self._style
if hasattr(self, 'iren'):
# self.iren.RemoveAllObservers()
for obs in self._observers.values():
self.iren.RemoveObservers(obs)
del self._observers
self.iren.TerminateApp()
del self.iren
if hasattr(self, 'textActor'):
del self.textActor
# end movie
if hasattr(self, 'mwriter'):
try:
self.mwriter.close()
except BaseException:
pass
# this helps managing closed plotters
self._closed = True
def deep_clean(self):
"""Clean the plotter of the memory."""
for renderer in self.renderers:
renderer.deep_clean()
for renderer in self._background_renderers:
if renderer is not None:
renderer.deep_clean()
# Do not remove the renderers on the clean
self.mesh = None
self.mapper = None
def add_text(self, text, position='upper_left', font_size=18, color=None,
font=None, shadow=False, name=None, viewport=False):
"""Add text to plot object in the top left corner by default.
Parameters
----------
text : str
The text to add the rendering
position : str, tuple(float)
Position to place the bottom left corner of the text box.
If tuple is used, the position of the text uses the pixel
coordinate system (default). In this case,
it returns a more general `vtkOpenGLTextActor`.
If string name is used, it returns a `vtkCornerAnnotation`
object normally used for fixed labels (like title or xlabel).
Default is to find the top left corner of the rendering window
and place text box up there. Available position: ``'lower_left'``,
``'lower_right'``, ``'upper_left'``, ``'upper_right'``,
``'lower_edge'``, ``'upper_edge'``, ``'right_edge'``, and
``'left_edge'``
font : string, optional
Font name may be courier, times, or arial
shadow : bool, optional
Adds a black shadow to the text. Defaults to False
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
viewport: bool
If True and position is a tuple of float, uses
the normalized viewport coordinate system (values between 0.0
and 1.0 and support for HiDPI).
Return
------
textActor : vtk.vtkTextActor
Text actor added to plot
"""
if font is None:
font = rcParams['font']['family']
if font_size is None:
font_size = rcParams['font']['size']
if color is None:
color = rcParams['font']['color']
if position is None:
# Set the position of the text to the top left corner
window_size = self.window_size
x = (window_size[0] * 0.02) / self.shape[0]
y = (window_size[1] * 0.85) / self.shape[0]
position = [x, y]
corner_mappings = {
'lower_left': vtk.vtkCornerAnnotation.LowerLeft,
'lower_right': vtk.vtkCornerAnnotation.LowerRight,
'upper_left': vtk.vtkCornerAnnotation.UpperLeft,
'upper_right': vtk.vtkCornerAnnotation.UpperRight,
'lower_edge': vtk.vtkCornerAnnotation.LowerEdge,
'upper_edge': vtk.vtkCornerAnnotation.UpperEdge,
'left_edge': vtk.vtkCornerAnnotation.LeftEdge,
'right_edge': vtk.vtkCornerAnnotation.RightEdge,
}
corner_mappings['ll'] = corner_mappings['lower_left']
corner_mappings['lr'] = corner_mappings['lower_right']
corner_mappings['ul'] = corner_mappings['upper_left']
corner_mappings['ur'] = corner_mappings['upper_right']
corner_mappings['top'] = corner_mappings['upper_edge']
corner_mappings['bottom'] = corner_mappings['lower_edge']
corner_mappings['right'] = corner_mappings['right_edge']
corner_mappings['r'] = corner_mappings['right_edge']
corner_mappings['left'] = corner_mappings['left_edge']
corner_mappings['l'] = corner_mappings['left_edge']
if isinstance(position, (int, str, bool)):
if isinstance(position, str):
position = corner_mappings[position]
elif position is True:
position = corner_mappings['upper_left']
self.textActor = vtk.vtkCornerAnnotation()
# This is how you set the font size with this actor
self.textActor.SetLinearFontScaleFactor(font_size // 2)
self.textActor.SetText(position, text)
else:
self.textActor = vtk.vtkTextActor()
self.textActor.SetInput(text)
self.textActor.SetPosition(position)
if viewport:
self.textActor.GetActualPositionCoordinate().SetCoordinateSystemToNormalizedViewport()
self.textActor.GetActualPosition2Coordinate().SetCoordinateSystemToNormalizedViewport()
self.textActor.GetTextProperty().SetFontSize(int(font_size * 2))
self.textActor.GetTextProperty().SetColor(parse_color(color))
self.textActor.GetTextProperty().SetFontFamily(FONT_KEYS[font])
self.textActor.GetTextProperty().SetShadow(shadow)
self.add_actor(self.textActor, reset_camera=False, name=name, pickable=False)
return self.textActor
def open_movie(self, filename, framerate=24):
"""Establish a connection to the ffmpeg writer.
Parameters
----------
filename : str
Filename of the movie to open. Filename should end in mp4,
but other filetypes may be supported. See "imagio.get_writer"
framerate : int, optional
Frames per second.
"""
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
self.mwriter = imageio.get_writer(filename, fps=framerate)
def open_gif(self, filename):
"""Open a gif file.
Parameters
----------
filename : str
Filename of the gif to open. Filename must end in gif.
"""
if filename[-3:] != 'gif':
raise Exception('Unsupported filetype. Must end in .gif')
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
self._gif_filename = os.path.abspath(filename)
self.mwriter = imageio.get_writer(filename, mode='I')
def write_frame(self):
"""Write a single frame to the movie file."""
if not hasattr(self, 'mwriter'):
raise AssertionError('This plotter has not opened a movie or GIF file.')
self.mwriter.append_data(self.image)
def _run_image_filter(self, ifilter):
# Update filter and grab pixels
ifilter.Modified()
ifilter.Update()
image = pyvista.wrap(ifilter.GetOutput())
img_size = image.dimensions
img_array = pyvista.utilities.point_array(image, 'ImageScalars')
# Reshape and write
tgt_size = (img_size[1], img_size[0], -1)
return img_array.reshape(tgt_size)[::-1]
def get_image_depth(self,
fill_value=np.nan,
reset_camera_clipping_range=True):
"""Return a depth image representing current render window.
Parameters
----------
fill_value : float
Fill value for points in image that don't include objects in scene.
To not use a fill value, pass ``None``.
reset_camera_clipping_range : bool
Reset the camera clipping range to include data in view?
Return
------
image_depth : numpy.ndarray
Image of depth values from camera orthogonal to image plane
Notes
-----
Values in image_depth are negative to adhere to a
right-handed coordinate system.
"""
if not hasattr(self, 'ren_win') and hasattr(self, 'last_image_depth'):
zval = self.last_image_depth.copy()
if fill_value is not None:
zval[self._image_depth_null] = fill_value
return zval
# Ensure points in view are within clipping range of renderer?
if reset_camera_clipping_range:
self.renderer.ResetCameraClippingRange()
# Get the z-buffer image
ifilter = vtk.vtkWindowToImageFilter()
ifilter.SetInput(self.ren_win)
ifilter.ReadFrontBufferOff()
ifilter.SetInputBufferTypeToZBuffer()
zbuff = self._run_image_filter(ifilter)[:, :, 0]
# Convert z-buffer values to depth from camera
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
near, far = self.camera.GetClippingRange()
if self.camera.GetParallelProjection():
zval = (zbuff - near) / (far - near)
else:
zval = 2 * near * far / ((zbuff - 0.5) * 2 * (far - near) - near - far)
# Consider image values outside clipping range as nans
args = np.logical_or(zval < -far, np.isclose(zval, -far))
self._image_depth_null = args
if fill_value is not None:
zval[args] = fill_value
return zval
def add_lines(self, lines, color=(1, 1, 1), width=5, label=None, name=None):
"""Add lines to the plotting object.
Parameters
----------
lines : np.ndarray or pyvista.PolyData
Points representing line segments. For example, two line segments
would be represented as:
np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])
color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
width : float, optional
Thickness of lines
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
Return
------
actor : vtk.vtkActor
Lines actor.
"""
if not isinstance(lines, np.ndarray):
raise Exception('Input should be an array of point segments')
lines = pyvista.lines_from_points(lines)
# Create mapper and add lines
mapper = vtk.vtkDataSetMapper()
mapper.SetInputData(lines)
rgb_color = parse_color(color)
# legend label
if label:
if not isinstance(label, str):
raise AssertionError('Label must be a string')
self._labels.append([lines, label, rgb_color])
# Create actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetLineWidth(width)
actor.GetProperty().EdgeVisibilityOn()
actor.GetProperty().SetEdgeColor(rgb_color)
actor.GetProperty().SetColor(rgb_color)
actor.GetProperty().LightingOff()
# Add to renderer
self.add_actor(actor, reset_camera=False, name=name, pickable=False)
return actor
def remove_scalar_bar(self):
"""Remove the scalar bar."""
if hasattr(self, 'scalar_bar'):
self.remove_actor(self.scalar_bar, reset_camera=False)
def add_point_labels(self, points, labels, italic=False, bold=True,
font_size=None, text_color=None,
font_family=None, shadow=False,
show_points=True, point_color=None, point_size=5,
name=None, shape_color='grey', shape='rounded_rect',
fill_shape=True, margin=3, shape_opacity=1.0,
pickable=False, render_points_as_spheres=False,
tolerance=0.001):
"""Create a point actor with one label from list labels assigned to each point.
Parameters
----------
points : np.ndarray or pyvista.Common
n x 3 numpy array of points or pyvista dataset with points
labels : list or str
List of labels. Must be the same length as points. If a string name
is given with a pyvista.Common input for points, then these are fetched.
italic : bool, optional
Italicises title and bar labels. Default False.
bold : bool, optional
Bolds title and bar labels. Default True
font_size : float, optional
Sets the size of the title font. Defaults to 16.
text_color : string or 3 item list, optional
Color of text. Either a string, rgb list, or hex color string.
text_color='white'
text_color='w'
text_color=[1, 1, 1]
text_color='#FFFFFF'
font_family : string, optional
Font family. Must be either courier, times, or arial.
shadow : bool, optional
Adds a black shadow to the text. Defaults to False
show_points : bool, optional
Controls if points are visible. Default True
point_color : string or 3 item list, optional. Color of points (if visible).
Either a string, rgb list, or hex color string. For example:
text_color='white'
text_color='w'
text_color=[1, 1, 1]
text_color='#FFFFFF'
point_size : float, optional
Size of points (if visible)
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
shape_color : string or 3 item list, optional. Color of points (if visible).
Either a string, rgb list, or hex color string. For example:
shape : str, optional
The string name of the shape to use. Options are ``'rect'`` or
``'rounded_rect'``. If you want no shape, pass ``None``
fill_shape : bool, optional
Fill the shape with the ``shape_color``. Outlines if ``False``.
margin : int, optional
The size of the margin on the label background shape. Default is 3.
shape_opacity : float
The opacity of the shape between zero and one.
tolerance : float
a tolerance to use to determine whether a point label is visible.
A tolerance is usually required because the conversion from world
space to display space during rendering introduces numerical
round-off.
Return
------
labelActor : vtk.vtkActor2D
VTK label actor. Can be used to change properties of the labels.
"""
if font_family is None:
font_family = rcParams['font']['family']
if font_size is None:
font_size = rcParams['font']['size']
if point_color is None:
point_color = rcParams['color']
if text_color is None:
text_color = rcParams['font']['color']
if isinstance(points, (list, tuple)):
points = np.array(points)
if isinstance(points, np.ndarray):
vtkpoints = pyvista.PolyData(points) # Cast to poly data
elif is_pyvista_dataset(points):
vtkpoints = pyvista.PolyData(points.points)
if isinstance(labels, str):
labels = points.point_arrays[labels].astype(str)
else:
raise TypeError('Points type not usable: {}'.format(type(points)))
if len(vtkpoints.points) != len(labels):
raise Exception('There must be one label for each point')
if name is None:
name = '{}({})'.format(type(vtkpoints).__name__, vtkpoints.memory_address)
vtklabels = vtk.vtkStringArray()
vtklabels.SetName('labels')
for item in labels:
vtklabels.InsertNextValue(str(item))
vtkpoints.GetPointData().AddArray(vtklabels)
# Only show visible points
vis_points = vtk.vtkSelectVisiblePoints()
vis_points.SetInputData(vtkpoints)
vis_points.SetRenderer(self.renderer)
vis_points.SetTolerance(tolerance)
# Create hierarchy
hier = vtk.vtkPointSetToLabelHierarchy()
hier.SetInputConnection(vis_points.GetOutputPort())
hier.SetLabelArrayName('labels')
# create label mapper
labelMapper = vtk.vtkLabelPlacementMapper()
labelMapper.SetInputConnection(hier.GetOutputPort())
if not isinstance(shape, str):
labelMapper.SetShapeToNone()
elif shape.lower() in 'rect':
labelMapper.SetShapeToRect()
elif shape.lower() in 'rounded_rect':
labelMapper.SetShapeToRoundedRect()
else:
raise RuntimeError('Shape ({}) not understood'.format(shape))
if fill_shape:
labelMapper.SetStyleToFilled()
else:
labelMapper.SetStyleToOutline()
labelMapper.SetBackgroundColor(parse_color(shape_color))
labelMapper.SetBackgroundOpacity(shape_opacity)
labelMapper.SetMargin(margin)
textprop = hier.GetTextProperty()
textprop.SetItalic(italic)
textprop.SetBold(bold)
textprop.SetFontSize(font_size)
textprop.SetFontFamily(parse_font_family(font_family))
textprop.SetColor(parse_color(text_color))
textprop.SetShadow(shadow)
self.remove_actor('{}-points'.format(name), reset_camera=False)
self.remove_actor('{}-labels'.format(name), reset_camera=False)
# add points
if show_points:
style = 'points'
else:
style = 'surface'
self.add_mesh(vtkpoints, style=style, color=point_color,
point_size=point_size, name='{}-points'.format(name),
pickable=pickable,
render_points_as_spheres=render_points_as_spheres)
labelActor = vtk.vtkActor2D()
labelActor.SetMapper(labelMapper)
self.add_actor(labelActor, reset_camera=False,
name='{}-labels'.format(name), pickable=False)
return labelActor
def add_point_scalar_labels(self, points, labels, fmt=None, preamble='', **kwargs):
"""Label the points from a dataset with the values of their scalars.
Wrapper for :func:`pyvista.BasePlotter.add_point_labels`.
Parameters
----------
points : np.ndarray or pyvista.Common
n x 3 numpy array of points or pyvista dataset with points
labels : str
String name of the point data array to use.
fmt : str
String formatter used to format numerical data
"""
if not is_pyvista_dataset(points):
raise TypeError('input points must be a pyvista dataset, not: {}'.format(type(points)))
if not isinstance(labels, str):
raise TypeError('labels must be a string name of the scalars array to use')
if fmt is None:
fmt = rcParams['font']['fmt']
if fmt is None:
fmt = '%.6e'
scalars = points.point_arrays[labels]
phrase = '{} {}'.format(preamble, '%.3e')
labels = [phrase % val for val in scalars]
return self.add_point_labels(points, labels, **kwargs)
def add_points(self, points, **kwargs):
"""Add points to a mesh."""
kwargs['style'] = 'points'
return self.add_mesh(points, **kwargs)
def add_arrows(self, cent, direction, mag=1, **kwargs):
"""Add arrows to plotting object."""
direction = direction.copy()
if cent.ndim != 2:
cent = cent.reshape((-1, 3))
if direction.ndim != 2:
direction = direction.reshape((-1, 3))
direction[:,0] *= mag
direction[:,1] *= mag
direction[:,2] *= mag
pdata = pyvista.vector_poly_data(cent, direction)
# Create arrow object
arrow = vtk.vtkArrowSource()
arrow.Update()
glyph3D = vtk.vtkGlyph3D()
glyph3D.SetSourceData(arrow.GetOutput())
glyph3D.SetInputData(pdata)
glyph3D.SetVectorModeToUseVector()
glyph3D.Update()
arrows = wrap(glyph3D.GetOutput())
return self.add_mesh(arrows, **kwargs)
@staticmethod
def _save_image(image, filename, return_img=None):
"""Save a NumPy image array.
This is an internal helper.
"""
if not image.size:
raise Exception('Empty image. Have you run plot() first?')
# write screenshot to file
supported_formats = [".png", ".jpeg", ".jpg", ".bmp", ".tif", ".tiff"]
if isinstance(filename, str):
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
if not any([filename.lower().endswith(ext) for ext in supported_formats]):
filename += ".png"
filename = os.path.abspath(os.path.expanduser(filename))
w = imageio.imwrite(filename, image)
if not return_img:
return w
return image
def save_graphic(self, filename, title='PyVista Export', raster=True, painter=True):
"""Save a screenshot of the rendering window as a graphic file.
The supported formats are: '.svg', '.eps', '.ps', '.pdf', '.tex'
"""
if not hasattr(self, 'ren_win'):
raise AttributeError('This plotter is closed and unable to save a screenshot.')
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
filename = os.path.abspath(os.path.expanduser(filename))
extension = pyvista.fileio.get_ext(filename)
valid = ['.svg', '.eps', '.ps', '.pdf', '.tex']
if extension not in valid:
raise RuntimeError('Extension ({}) is an invalid choice. Valid options include: {}'.format(extension, ', '.join(valid)))
writer = vtk.vtkGL2PSExporter()
modes = {
'.svg': writer.SetFileFormatToSVG,
'.eps': writer.SetFileFormatToEPS,
'.ps': writer.SetFileFormatToPS,
'.pdf': writer.SetFileFormatToPDF,
'.tex': writer.SetFileFormatToTeX,
}
writer.CompressOff()
writer.SetFilePrefix(filename.replace(extension, ''))
writer.SetInput(self.ren_win)
modes[extension]()
writer.SetTitle(title)
writer.SetWrite3DPropsAsRasterImage(raster)
if painter:
writer.UsePainterSettings()
writer.Update()
return
def screenshot(self, filename=None, transparent_background=None,
return_img=None, window_size=None):
"""Take screenshot at current camera position.
Parameters
----------
filename : str, optional
Location to write image to. If None, no image is written.
transparent_background : bool, optional
Makes the background transparent. Default False.
return_img : bool, optional
If a string filename is given and this is true, a NumPy array of
the image will be returned.
Return
------
img : numpy.ndarray
Array containing pixel RGB and alpha. Sized:
[Window height x Window width x 3] for transparent_background=False
[Window height x Window width x 4] for transparent_background=True
Examples
--------
>>> import pyvista
>>> sphere = pyvista.Sphere()
>>> plotter = pyvista.Plotter()
>>> actor = plotter.add_mesh(sphere)
>>> plotter.screenshot('screenshot.png') # doctest:+SKIP
"""
if window_size is not None:
self.window_size = window_size
# configure image filter
if transparent_background is None:
transparent_background = rcParams['transparent_background']
self.image_transparent_background = transparent_background
# This if statement allows you to save screenshots of closed plotters
# This is needed for the sphinx-gallery work
if not hasattr(self, 'ren_win'):
# If plotter has been closed...
# check if last_image exists
if hasattr(self, 'last_image'):
# Save last image
return self._save_image(self.last_image, filename, return_img)
# Plotter hasn't been rendered or was improperly closed
raise AttributeError('This plotter is closed and unable to save a screenshot.')
self.render()
# debug: this needs to be called twice for some reason,
img = self.image
img = self.image
return self._save_image(img, filename, return_img)
def add_legend(self, labels=None, bcolor=(0.5, 0.5, 0.5), border=False,
size=None, name=None):
"""Add a legend to render window.
Entries must be a list containing one string and color entry for each
item.
Parameters
----------
labels : list, optional
When set to None, uses existing labels as specified by
- add_mesh
- add_lines
- add_points
List containing one entry for each item to be added to the
legend. Each entry must contain two strings, [label,
color], where label is the name of the item to add, and
color is the color of the label to add.
bcolor : list or string, optional
Background color, either a three item 0 to 1 RGB color
list, or a matplotlib color string (e.g. 'w' or 'white'
for a white color). If None, legend background is
disabled.
border : bool, optional
Controls if there will be a border around the legend.
Default False.
size : list, optional
Two float list, each float between 0 and 1. For example
[0.1, 0.1] would make the legend 10% the size of the
entire figure window.
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
Return
------
legend : vtk.vtkLegendBoxActor
Actor for the legend.
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> mesh = examples.load_hexbeam()
>>> othermesh = examples.load_uniform()
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(mesh, label='My Mesh')
>>> _ = plotter.add_mesh(othermesh, 'k', label='My Other Mesh')
>>> _ = plotter.add_legend()
>>> plotter.show() # doctest:+SKIP
Alternative manual example
>>> import pyvista
>>> from pyvista import examples
>>> mesh = examples.load_hexbeam()
>>> othermesh = examples.load_uniform()
>>> legend_entries = []
>>> legend_entries.append(['My Mesh', 'w'])
>>> legend_entries.append(['My Other Mesh', 'k'])
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(mesh)
>>> _ = plotter.add_mesh(othermesh, 'k')
>>> _ = plotter.add_legend(legend_entries)
>>> plotter.show() # doctest:+SKIP
"""
self.legend = vtk.vtkLegendBoxActor()
if labels is None:
# use existing labels
if not self._labels:
raise Exception('No labels input.\n\n'
'Add labels to individual items when adding them to'
'the plotting object with the "label=" parameter. '
'or enter them as the "labels" parameter.')
self.legend.SetNumberOfEntries(len(self._labels))
for i, (vtk_object, text, color) in enumerate(self._labels):
self.legend.SetEntry(i, vtk_object, text, parse_color(color))
else:
self.legend.SetNumberOfEntries(len(labels))
legendface = pyvista.single_triangle()
for i, (text, color) in enumerate(labels):
self.legend.SetEntry(i, legendface, text, parse_color(color))
if size:
self.legend.SetPosition2(size[0], size[1])
if bcolor is None:
self.legend.UseBackgroundOff()
else:
self.legend.UseBackgroundOn()
self.legend.SetBackgroundColor(bcolor)
if border:
self.legend.BorderOn()
else:
self.legend.BorderOff()
# Add to renderer
self.add_actor(self.legend, reset_camera=False, name=name, pickable=False)
return self.legend
def set_background(self, color, top=None, all_renderers=True):
"""Set the background color.
Parameters
----------
color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
top : string or 3 item list, optional, defaults to None
If given, this will enable a gradient background where the
``color`` argument is at the bottom and the color given in ``top``
will be the color at the top of the renderer.
all_renderers : bool
If True, applies to all renderers in subplots. If False, then
only applies to the active renderer.
"""
if all_renderers:
for renderer in self.renderers:
renderer.set_background(color, top=top)
else:
self.renderer.set_background(color, top=top)
def remove_legend(self):
"""Remove the legend actor."""
if hasattr(self, 'legend'):
self.remove_actor(self.legend, reset_camera=False)
self.render()
def generate_orbital_path(self, factor=3., n_points=20, viewup=None, shift=0.0):
"""Generate an orbital path around the data scene.
Parameters
----------
factor : float
A scaling factor when biulding the orbital extent
n_points : int
number of points on the orbital path
viewup : list(float)
the normal to the orbital plane
shift : float, optional
shift the plane up/down from the center of the scene by this amount
"""
if viewup is None:
viewup = rcParams['camera']['viewup']
center = np.array(self.center)
bnds = np.array(self.bounds)
radius = (bnds[1] - bnds[0]) * factor
y = (bnds[3] - bnds[2]) * factor
if y > radius:
radius = y
center += np.array(viewup) * shift
return pyvista.Polygon(center=center, radius=radius, normal=viewup, n_sides=n_points)
def fly_to(self, point):
"""Move the current camera's focal point to a position point.
The movement is animated over the number of frames specified in
NumberOfFlyFrames. The LOD desired frame rate is used.
"""
if not hasattr(self, 'iren'):
raise AttributeError('This plotter does not have an interactive window')
return self.iren.FlyTo(self.renderer, *point)
def orbit_on_path(self, path=None, focus=None, step=0.5, viewup=None,
bkg=True, write_frames=False):
"""Orbit on the given path focusing on the focus point.
Parameters
----------
path : pyvista.PolyData
Path of orbital points. The order in the points is the order of
travel
focus : list(float) of length 3, optional
The point of focus the camera.
step : float, optional
The timestep between flying to each camera position
viewup : list(float)
the normal to the orbital plane
write_frames : bool
Assume a file is open and write a frame on each camera view during
the orbit.
"""
if focus is None:
focus = self.center
if viewup is None:
viewup = rcParams['camera']['viewup']
if path is None:
path = self.generate_orbital_path(viewup=viewup)
if not is_pyvista_dataset(path):
path = pyvista.PolyData(path)
points = path.points
# Make sure the whole scene is visible
self.camera.SetThickness(path.length)
def orbit():
"""Define the internal thread for running the orbit."""
for point in points:
self.set_position(point)
self.set_focus(focus)
self.set_viewup(viewup)
self.renderer.ResetCameraClippingRange()
self.render()
if bkg:
time.sleep(step)
if write_frames:
self.write_frame()
if bkg and isinstance(self, pyvista.BackgroundPlotter):
thread = Thread(target=orbit)
thread.start()
else:
bkg = False
orbit()
return
def export_vtkjs(self, filename, compress_arrays=False):
"""Export the current rendering scene as a VTKjs scene.
It can be used for rendering in a web browser.
"""
if not hasattr(self, 'ren_win'):
raise RuntimeError('Export must be called before showing/closing the scene.')
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
else:
filename = os.path.abspath(os.path.expanduser(filename))
return export_plotter_vtkjs(self, filename, compress_arrays=compress_arrays)
def export_obj(self, filename):
"""Export scene to OBJ format."""
if not hasattr(self, "ren_win"):
raise RuntimeError("This plotter must still have a render window open.")
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
else:
filename = os.path.abspath(os.path.expanduser(filename))
exporter = vtk.vtkOBJExporter()
exporter.SetFilePrefix(filename)
exporter.SetRenderWindow(self.ren_win)
return exporter.Write()
def __del__(self):
"""Delete the plotter."""
if not self._closed:
self.close()
self.deep_clean()
del self.renderers
def add_background_image(self, image_path, scale=1, auto_resize=True,
as_global=True):
"""Add a background image to a plot.
Parameters
----------
image_path : str
Path to an image file.
scale : float, optional
Scale the image larger or smaller relative to the size of
the window. For example, a scale size of 2 will make the
largest dimension of the image twice as large as the
largest dimension of the render window. Defaults to 1.
auto_resize : bool, optional
Resize the background when the render window changes size.
as_global : bool, optional
When multiple render windows are present, setting
``as_global=False`` will cause the background to only
appear in one window.
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> plotter = pyvista.Plotter()
>>> actor = plotter.add_mesh(pyvista.Sphere())
>>> plotter.add_background_image(examples.mapfile)
>>> plotter.show() # doctest:+SKIP
"""
# verify no render exists
if self._background_renderers[self._active_renderer_index] is not None:
raise RuntimeError('A background image already exists. '
'Remove it with remove_background_image '
'before adding one')
# Need to change the number of layers to support an additional
# background layer
self.ren_win.SetNumberOfLayers(2)
if as_global:
for renderer in self.renderers:
renderer.SetLayer(1)
view_port = None
else:
self.renderer.SetLayer(1)
view_port = self.renderer.GetViewport()
renderer = BackgroundRenderer(self, image_path, scale, view_port)
self.ren_win.AddRenderer(renderer)
self._background_renderers[self._active_renderer_index] = renderer
# setup autoscaling of the image
if auto_resize and hasattr(self, 'iren'): # pragma: no cover
self._add_observer('ModifiedEvent', renderer.resize)
def remove_background_image(self):
"""Remove the background image from the current subplot."""
renderer = self._background_renderers[self._active_renderer_index]
if renderer is None:
raise RuntimeError('No background image to remove at this subplot')
renderer.deep_clean()
self._background_renderers[self._active_renderer_index] = None
class Plotter(BasePlotter):
"""Plotting object to display vtk meshes or numpy arrays.
Example
-------
>>> import pyvista
>>> from pyvista import examples
>>> mesh = examples.load_hexbeam()
>>> another_mesh = examples.load_uniform()
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(mesh, color='red')
>>> _ = plotter.add_mesh(another_mesh, color='blue')
>>> plotter.show() # doctest:+SKIP
Parameters
----------
off_screen : bool, optional
Renders off screen when True. Useful for automated screenshots.
notebook : bool, optional
When True, the resulting plot is placed inline a jupyter notebook.
Assumes a jupyter console is active. Automatically enables off_screen.
shape : list or tuple, optional
Number of sub-render windows inside of the main window.
Specify two across with ``shape=(2, 1)`` and a two by two grid
with ``shape=(2, 2)``. By default there is only one render window.
Can also accept a shape as string descriptor. E.g.:
shape="3|1" means 3 plots on the left and 1 on the right,
shape="4/2" means 4 plots on top of 2 at bottom.
border : bool, optional
Draw a border around each render window. Default False.
border_color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
window_size : list, optional
Window size in pixels. Defaults to [1024, 768]
multi_samples : int
The number of multi-samples used to mitigate aliasing. 4 is a good
default but 8 will have better results with a potential impact on
performance.
line_smoothing : bool
If True, enable line smothing
point_smoothing : bool
If True, enable point smothing
polygon_smoothing : bool
If True, enable polygon smothing
"""
last_update_time = 0.0
right_timer_id = -1
def __init__(self, off_screen=None, notebook=None, shape=(1, 1),
border=None, border_color='k', border_width=2.0,
window_size=None, multi_samples=None, line_smoothing=False,
point_smoothing=False, polygon_smoothing=False,
splitting_position=None, title=None):
"""Initialize a vtk plotting object."""
super(Plotter, self).__init__(shape=shape, border=border,
border_color=border_color,
border_width=border_width,
splitting_position=splitting_position,
title=title)
log.debug('Initializing')
def on_timer(iren, event_id):
"""Exit application if interactive renderer stops."""
if event_id == 'TimerEvent':
self.iren.TerminateApp()
if off_screen is None:
off_screen = pyvista.OFF_SCREEN
if notebook is None:
notebook = scooby.in_ipykernel()
self.notebook = notebook
if self.notebook:
off_screen = True
self.off_screen = off_screen
if window_size is None:
window_size = rcParams['window_size']
self.__prior_window_size = window_size
if multi_samples is None:
multi_samples = rcParams['multi_samples']
# initialize render window
self.ren_win = vtk.vtkRenderWindow()
self.ren_win.SetMultiSamples(multi_samples)
self.ren_win.SetBorders(True)
if line_smoothing:
self.ren_win.LineSmoothingOn()
if point_smoothing:
self.ren_win.PointSmoothingOn()
if polygon_smoothing:
self.ren_win.PolygonSmoothingOn()
for renderer in self.renderers:
self.ren_win.AddRenderer(renderer)
if self.off_screen:
self.ren_win.SetOffScreenRendering(1)
else: # Allow user to interact
self.iren = vtk.vtkRenderWindowInteractor()
self.iren.LightFollowCameraOff()
self.iren.SetDesiredUpdateRate(30.0)
self.iren.SetRenderWindow(self.ren_win)
self.enable_trackball_style()
self._observers = {} # Map of events to observers of self.iren
self._add_observer("KeyPressEvent", self.key_press_event)
self.update_style()
# Set background
self.set_background(rcParams['background'])
# Set window size
self.window_size = window_size
# add timer event if interactive render exists
self._add_observer(vtk.vtkCommand.TimerEvent, on_timer)
if rcParams["depth_peeling"]["enabled"]:
if self.enable_depth_peeling():
for renderer in self.renderers:
renderer.enable_depth_peeling()
def show(self, title=None, window_size=None, interactive=True,
auto_close=None, interactive_update=False, full_screen=False,
screenshot=False, return_img=False, use_panel=None, cpos=None,
height=400):
"""Display the plotting window.
Notes
-----
Please use the ``q``-key to close the plotter as some operating systems
(namely Windows) will experience issues saving a screenshot if the
exit button in the GUI is prressed.
Parameters
----------
title : string, optional
Title of plotting window.
window_size : list, optional
Window size in pixels. Defaults to [1024, 768]
interactive : bool, optional
Enabled by default. Allows user to pan and move figure.
auto_close : bool, optional
Enabled by default. Exits plotting session when user
closes the window when interactive is True.
interactive_update: bool, optional
Disabled by default. Allows user to non-blocking draw,
user should call Update() in each iteration.
full_screen : bool, optional
Opens window in full screen. When enabled, ignores
window_size. Default False.
use_panel : bool, optional
If False, the interactive rendering from panel will not be used in
notebooks
cpos : list(tuple(floats))
The camera position to use
height : int, optional
height for panel pane. Only used with panel.
Return
------
cpos : list
List of camera position, focal point, and view up
"""
if use_panel is None:
use_panel = rcParams['use_panel']
if auto_close is None:
auto_close = rcParams['auto_close']
if not hasattr(self, "ren_win"):
raise RuntimeError("This plotter has been closed and cannot be shown.")
# reset unless camera for the first render unless camera is set
if self._first_time: # and not self.camera_set:
for renderer in self.renderers:
if not renderer.camera_set and cpos is None:
renderer.camera_position = renderer.get_default_cam_pos()
renderer.ResetCamera()
elif cpos is not None:
renderer.camera_position = cpos
self._first_time = False
# if full_screen:
if full_screen:
self.ren_win.SetFullScreen(True)
self.ren_win.BordersOn() # super buggy when disabled
else:
if window_size is None:
window_size = self.window_size
self.ren_win.SetSize(window_size[0], window_size[1])
# Render
log.debug('Rendering')
self.render()
# This has to be after the first render for some reason
if title is None:
title = self.title
if title:
self.ren_win.SetWindowName(title)
self.title = title
# Keep track of image for sphinx-gallery
self.last_image = self.screenshot(screenshot, return_img=True)
self.last_image_depth = self.get_image_depth()
disp = None
self.update() # For Windows issues. Resolves #186
# See: https://github.com/pyvista/pyvista/issues/186#issuecomment-550993270
if interactive and (not self.off_screen):
try: # interrupts will be caught here
log.debug('Starting iren')
self.update_style()
self.iren.Initialize()
if not interactive_update:
self.iren.Start()
except KeyboardInterrupt:
log.debug('KeyboardInterrupt')
self.close()
raise KeyboardInterrupt
elif self.notebook and use_panel and not hasattr(self, 'volume'):
try:
from panel.pane import VTK as panel_display
disp = panel_display(self.ren_win, sizing_mode='stretch_width',
height=height)
except:
pass
# In the event that the user hits the exit-button on the GUI (on
# Windows OS) then it must be finalized and deleted as accessing it
# will kill the kernel.
# Here we check for that and clean it up before moving on to any of
# the closing routines that might try to still access that
# render window.
if not self.ren_win.IsCurrent():
self._clear_ren_win() # The ren_win is deleted
# proper screenshots cannot be saved if this happens
if not auto_close:
warnings.warn("`auto_close` ignored: by clicking the exit button, you have destroyed the render window and we have to close it out.")
auto_close = True
# NOTE: after this point, nothing from the render window can be accessed
# as if a user presed the close button, then it destroys the
# the render view and a stream of errors will kill the Python
# kernel if code here tries to access that renderer.
# See issues #135 and #186 for insight before editing the
# remainder of this function.
# Get camera position before closing
cpos = self.camera_position
# NOTE: our conversion to panel currently does not support mult-view
# so we should display the static screenshot in notebooks for
# multi-view plots until we implement this feature
# If notebook is true and panel display failed:
if self.notebook and (disp is None or self.shape != (1, 1)):
import PIL.Image
# sanity check
try:
import IPython
except ImportError:
raise Exception('Install IPython to display image in a notebook')
disp = IPython.display.display(PIL.Image.fromarray(self.last_image))
# Cleanup
if auto_close:
self.close()
# Return the notebook display: either panel object or image display
if self.notebook:
return disp
# If user asked for screenshot, return as numpy array after camera
# position
if return_img or screenshot is True:
return cpos, self.last_image
# default to returning last used camera position
return cpos
def plot(self, *args, **kwargs):
"""Create a plotting window.
Present for backwards compatibility.
DEPRECATED: Please use `show()` instead.
"""
logging.warning("`.plot()` is deprecated. Please use `.show()` instead.")
return self.show(*args, **kwargs)
| 37.893785
| 149
| 0.592575
|
8cdb62593b1c4dae23b41a04212ca9d7469cecf0
| 3,611
|
py
|
Python
|
inou/prp/tests/prplib.py
|
realdavidpang/livehd
|
c0462922400d34c0327b4aabb450332bda50f174
|
[
"BSD-3-Clause"
] | 46
|
2018-05-31T23:07:02.000Z
|
2019-09-16T20:21:03.000Z
|
inou/prp/tests/prplib.py
|
realdavidpang/livehd
|
c0462922400d34c0327b4aabb450332bda50f174
|
[
"BSD-3-Clause"
] | 120
|
2018-05-16T23:11:09.000Z
|
2019-09-25T18:52:49.000Z
|
inou/prp/tests/prplib.py
|
realdavidpang/livehd
|
c0462922400d34c0327b4aabb450332bda50f174
|
[
"BSD-3-Clause"
] | 8
|
2018-11-08T18:53:52.000Z
|
2019-09-05T20:04:20.000Z
|
#!/usr/bin/env python3
import argparse
import os
import re
import subprocess
import sys
import yaml
class PrpTest:
"""
Pyrope Test Object
"""
def __init__(self, prp_file):
# Set default values
self.params = {}
self.params['name'] = os.path.basename(prp_file)
self.params['files'] = prp_file
self.params['incdirs'] = os.path.dirname(prp_file)
self.params['top_module'] = 'top'
self.params['defines'] = ''
self.params['type'] = 'parsing'
# Extract parameters in pyrope file
try:
with open(prp_file) as f:
for line in f:
param = re.search(r'^:([a-zA-Z_-]+):\s*(.+)', line)
if param == None:
continue
param_name = param[1]
param_value = param[2]
self.params[param_name] = param_value
except Exception as e:
print('Failed to process "{}"'.format(prp_file))
sys.exit(1)
# Post-process
self.params['files'] = self.params['files'].split()
self.params['incdirs'] = self.params['incdirs'].split()
self.params['type'] = self.params['type'].split()
class PrpRunner:
"""
LiveHD Pyrope Compilation Runner
"""
config : {}
def __init__(self, config_file):
try:
with open(config_file) as f:
self.config = yaml.load(f.read(), yaml.Loader)
except Exception as e:
print(e)
print('Failed to process config file "{}"'.format(config_file))
sys.exit(1)
def lgshell_parse(self, test):
lg_cmd = []
lg_cmd.append('inou.prp')
lg_cmd.append('files:{}'.format(','.join(test.params['files'])))
return lg_cmd
def lgshell_lgraph(self, test):
lg_cmd = self.lgshell_parse(test)
lg_cmd.append('|>')
lg_cmd.append('pass.lnastopt')
lg_cmd.append('|>')
lg_cmd.append('pass.lnast_tolg')
return lg_cmd
def lgshell_lg_compile(self, test):
lg_cmd = self.lgshell_lgraph(test)
lg_cmd.append('|>')
lg_cmd.append('pass.cprop')
lg_cmd.append('|>')
lg_cmd.append('pass.bitwidth')
return lg_cmd
def gen_lgshell_cmd(self, test, mode):
gen_lg_cmd = {
'parsing' : self.lgshell_parse,
'lgraph' : self.lgshell_lgraph,
'compile' : self.lgshell_lg_compile
}
cmd = []
cmd.append(self.config['lgshell']['bin'])
cmd.append(' '.join(gen_lg_cmd[mode](test)))
return cmd
def run(self, tmp_dir, test: PrpTest):
for mode in test.params['type']:
cmd = []
if mode == 'simulation':
pass
else:
cmd = self.gen_lgshell_cmd(test, mode)
proc = subprocess.Popen(
cmd,
cwd=tmp_dir,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
try:
log, _ = proc.communicate()
rc = proc.returncode
except:
proc.kill()
if rc == 0:
print('{} - {} - success'.format(test.params['name'], mode))
else:
print('{} - {} - failed'.format(test.params['name'], mode))
print(log.decode('utf-8', 'ignore'))
return rc
| 27.150376
| 76
| 0.495154
|
7a1fa208a9ed9387f80f6962f41e56431f37be62
| 8,984
|
py
|
Python
|
retratodefases/Cobweb.py
|
Loracio/retrato-de-fases
|
a2d870a69b911af3b78288708cb569c957506940
|
[
"MIT"
] | 3
|
2021-03-22T00:07:28.000Z
|
2021-03-22T12:11:18.000Z
|
retratodefases/Cobweb.py
|
Loracio/retrato-de-fases
|
a2d870a69b911af3b78288708cb569c957506940
|
[
"MIT"
] | null | null | null |
retratodefases/Cobweb.py
|
Loracio/retrato-de-fases
|
a2d870a69b911af3b78288708cb569c957506940
|
[
"MIT"
] | 2
|
2021-03-20T19:00:53.000Z
|
2021-03-22T12:19:52.000Z
|
from inspect import signature
import matplotlib.pyplot as plt
import numpy as np
from .exceptions import exceptions
from .phase_diagrams import Funcion1D
from .sliders import sliders
from .utils import utils
class Cobweb:
"""
Cobweb
------
Class dedicated cobweb plots of 1 dimension maps `x(t+1) = f(x)`.
Methods
-------
plot :
Prepares the plots, compute the values, and plots them.
Returns the axis and the figure.
add_slider :
Adds a `Slider` for the `dF` funcion.
initial_position_slider :
Adds a slider for changing the initial value.
add_funcion :
Adds a funcion to the Cobweb plot.
"""
_name_ = 'Cobweb'
def __init__(self, dF, initial_position, xrange, *, dF_args={None}, yrange=[], max_steps=100, n_points=100, **kargs):
"""
Cobweb
------
Parameters
----------
dF : callable
A dF type funcion.
initial_position : float
Initial x of the iteration.
xrange : list
Range of the x axis in the main plot.
Key Arguments
-------------
dF_args : dict
If necesary, must contain the kargs for the `dF` funcion.
yrange : list
Range of the y axis in the main plot
max_steps : int
Maximun number of poits to be represented.
n_points : int
Number of points in the bisector.
Title : str
Title of the plot.
xlabel : str
x label of the plot.
ylabel : str
y label of the plot.
"""
self.dF = dF
self.dF_args = dF_args
self.initial_position = initial_position
self.xrange = xrange
self.yrange = yrange
self.max_steps = max_steps
self.n_points = n_points
self.Title = kargs['Title'] if kargs.get('Title') else 'Cobweb plot'
self.xlabel = kargs['xlabel'] if kargs.get('xlabel') else r'$X_n$'
self.ylabel = kargs['ylabel'] if kargs.get('ylabel') else r'$X_{n+1}$'
figCobweb, axCobweb = plt.subplots()
figTimeSeries, axTimeSeries = plt.subplots()
self.fig = {
'Cobweb': figCobweb,
'TimeSeries': figTimeSeries
}
self.ax = {
'Cobweb': axCobweb,
'TimeSeries': axTimeSeries
}
self.sliders = {}
self.sliders_fig = False
self.funcions = []
def _prepare_plot(self, min_value, max_value):
self.ax['Cobweb'].set_title(self.Title)
self.ax['Cobweb'].set_xlabel(self.xlabel)
self.ax['Cobweb'].set_ylabel(self.ylabel)
if self.yrange==[]:
self.ax['Cobweb'].set_ylim(bottom= 1.10*min_value,top=1.10*max_value)
else:
self.ax['Cobweb'].set_ylim(self.yrange)
self.ax['Cobweb'].grid()
self.ax['TimeSeries'].set_title('Time Series')
self.ax['TimeSeries'].set_ylabel(r'$x_t$')
self.ax['TimeSeries'].set_xlabel('t')
self.ax['TimeSeries'].set_ylim(self.xrange)
self.ax['TimeSeries'].grid()
def plot(self, *args, **kargs):
"""
Prepares the plots, compute the values and plots them.
Returns
-------
tuple(matplotlib Figure (Cobweb plot), matplotlib Axis (Cobweb plot), matplotlib Figure (Time series), matplotlib Axis (Time series))
"""
bisector = np.linspace(self.xrange[0], self.xrange[1], self.n_points)
func_result = self.dF(bisector, **self.dF_args)
xTimeSeries = []
yTimeSeries = []
self._prepare_plot(np.min(func_result), np.max(func_result))
self.ax['Cobweb'].plot(bisector, func_result, 'b')
self.ax['Cobweb'].plot(bisector, bisector, "k:")
x, y = self.initial_position, self.dF(self.initial_position, **self.dF_args)
self.ax['Cobweb'].plot([x, x], [0, y], 'k:')
self.ax['Cobweb'].scatter(x , 0, color='green')
xTimeSeries.append(0)
yTimeSeries.append(x)
for i in range(self.max_steps):
self.ax['Cobweb'].plot([x, y], [y, y], 'k:')
self.ax['Cobweb'].plot([y, y], [y, self.dF(y, **self.dF_args)], 'k:')
x, y = y, self.dF(y, **self.dF_args)
xTimeSeries.append(i)
yTimeSeries.append(x)
if y>self.xrange[1] or y<self.xrange[0]:
print(f'Warning: cobweb plot got out of range and could not compute {self.max_steps} steps.')
break
self.ax['TimeSeries'].scatter(xTimeSeries , yTimeSeries, color='black', s=10)
self.ax['TimeSeries'].plot(xTimeSeries , yTimeSeries, 'k:')
self.fig['Cobweb'].canvas.draw_idle()
self.fig['TimeSeries'].canvas.draw_idle()
return self.fig['Cobweb'], self.ax['Cobweb'], self.fig['TimeSeries'], self.ax['TimeSeries']
def add_slider(self, param_name, *, valinit=None, valstep=0.1, valinterval=10):
"""
Adds a slider on an existing plot.
Parameters
----------
param_name : str
The string key of the variable. Must be the same as the key in the `dF` funcion.
Key Arguments
-------------
valinit : float
Initial value of the parameter.
valinterval : Union[float, list]
The range of values the slider of the parameter will cover.
valstep : float
Precision in the slider.
"""
self._create_sliders_plot()
self.sliders.update({param_name: sliders.Slider(self, param_name, valinit=valinit, valstep=valstep, valinterval=valinterval)})
self.sliders[param_name].slider.on_changed(self.sliders[param_name])
def _create_sliders_plot(self):
"""
Internally used method. Checks if there is already a sliders plot. If not, it creates it.
"""
if not isinstance(self.sliders_fig, plt.Figure):
self.sliders_fig, self.sliders_ax = plt.subplots()
self.sliders_ax.set_visible(False)
def add_funcion(self, funcion1d, *, n_points=500, xRange=None, dF_args=None, color='g'):
"""
Adds a funcion to the cobweb plot.
Parameters
---------
funcion1d : callable
A dF type funcion.
Key Arguments
------------
n_points : int
Number of points in the funcion representation.
xRange : list
The x range in which the points are calculated.
dF_args : dict
If necesary, must contain the kargs for the `dF` funcion.
color : str
String matplotlib color identifier.
"""
self.funcions.append(Funcion1D(self, funcion1d, n_points=n_points, xRange=xRange, dF_args=None, color=color))
def update_dF_args(self):
for name, slider in self.sliders.items():
if slider.value!= None and name!=r'$x_0$':
self.dF_args[name] = slider.value
if self.sliders.get(r'$x_0$'):
self.initial_position = self.sliders[r'$x_0$'].value
def initial_position_slider(self, *, valinit=None, valstep=0.05, valinterval=None):
"""
Adds a slider for changing initial value on a cobweb plot.
Key Arguments
-------------
valinterval : Union[float, list]
The range of values the slider of the parameter will cover.
valstep : float
Precision in the slider.
"""
if valinit is None:
valinit = self.initial_position
if valinterval is None:
valinterval = list(self.xrange)
self.add_slider(r'$x_0$', valinit=valinit, valstep=valstep, valinterval=valinterval)
@property
def dF(self):
return self._dF
@dF.setter
def dF(self, func):
if not callable(func):
raise exceptions.dFNotCallable(func)
try:
sig = signature(func)
except ValueError:
pass
self._dF = func
@property
def xrange(self):
return self._xrange
@xrange.setter
def xrange(self, value):
if value == None:
self._xrange = None
return
self._xrange = np.array(utils.construct_interval(value, dim=1))
@property
def yrange(self):
return self._yrange
@yrange.setter
def yrange(self, value):
if value == []:
self._yrange = []
return
self._yrange = np.array(utils.construct_interval(value, dim=1))
@property
def dF_args(self):
return self._dF_args
@dF_args.setter
def dF_args(self, value):
if value:
if not isinstance(value, dict):
raise exceptions.dF_argsInvalid(value)
self._dF_args = value
| 29.748344
| 141
| 0.56801
|
c89e9e5feecf379f7049b45e2dcc243b964dd95e
| 961
|
py
|
Python
|
api/cases/migrations/0019_prep_advice_model.py
|
django-doctor/lite-api
|
1ba278ba22ebcbb977dd7c31dd3701151cd036bf
|
[
"MIT"
] | 3
|
2019-05-15T09:30:39.000Z
|
2020-04-22T16:14:23.000Z
|
api/cases/migrations/0019_prep_advice_model.py
|
django-doctor/lite-api
|
1ba278ba22ebcbb977dd7c31dd3701151cd036bf
|
[
"MIT"
] | 85
|
2019-04-24T10:39:35.000Z
|
2022-03-21T14:52:12.000Z
|
api/cases/migrations/0019_prep_advice_model.py
|
django-doctor/lite-api
|
1ba278ba22ebcbb977dd7c31dd3701151cd036bf
|
[
"MIT"
] | 1
|
2021-01-17T11:12:19.000Z
|
2021-01-17T11:12:19.000Z
|
# Generated by Django 2.2.11 on 2020-05-07 11:24
import django
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("teams", "0002_auto_20200307_1805"),
("cases", "0018_auto_20200505_1721"),
]
operations = [
migrations.AlterModelTable(name="advice", table="advice",),
migrations.AddField(
model_name="advice",
name="level",
field=models.CharField(
choices=[("user", "User"), ("team", "Team"), ("final", "Final")], default="user", max_length=30
),
preserve_default=False,
),
migrations.RenameField(model_name="teamadvice", old_name="team", new_name="old_team",),
migrations.AddField(
model_name="advice",
name="team",
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to="teams.Team"),
),
]
| 32.033333
| 111
| 0.59001
|
5d01af9fb0f78fd34ac13bc93d2dea6d95f208c7
| 9,602
|
py
|
Python
|
akshare/futures/futures_basis.py
|
dindom999/akshare
|
95b38d3430c71637c3ee9ba799618c20afe4a010
|
[
"MIT"
] | 1
|
2020-10-07T01:19:13.000Z
|
2020-10-07T01:19:13.000Z
|
akshare/futures/futures_basis.py
|
dindom999/akshare
|
95b38d3430c71637c3ee9ba799618c20afe4a010
|
[
"MIT"
] | 1
|
2020-09-07T11:18:55.000Z
|
2020-09-07T11:18:55.000Z
|
akshare/futures/futures_basis.py
|
dindom999/akshare
|
95b38d3430c71637c3ee9ba799618c20afe4a010
|
[
"MIT"
] | 1
|
2021-04-06T17:06:49.000Z
|
2021-04-06T17:06:49.000Z
|
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2020/3/24 15:00
Desc: 生意社网站采集大宗商品现货价格及相应基差数据, 数据时间段从 20110104-至今
备注:现期差 = 现货价格 - 期货价格(这里的期货价格为结算价)
黄金为 元/克, 白银为 元/千克, 玻璃现货为 元/平方米, 鸡蛋现货为 元/公斤, 鸡蛋期货为 元/500千克, 其余为 元/吨.
焦炭现货规格是: 一级冶金焦; 焦炭期货规格: 介于一级和二级之间, 焦炭现期差仅供参考.
铁矿石现货价格是: 湿吨, 铁矿石期货价格是: 干吨
网页地址: http://www.100ppi.com/sf/
历史数据可以通过修改 url 地址来获取, 比如: http://www.100ppi.com/sf/day-2017-09-12.html
发现生意社的 bugs:
1. 2018-09-12 周三 数据缺失是因为生意社源数据在该交易日缺失: http://www.100ppi.com/sf/day-2018-09-12.html
"""
import datetime
import re
import time
import warnings
import pandas as pd
from akshare.futures import cons
from akshare.futures.requests_fun import pandas_read_html_link
from akshare.futures.symbol_var import chinese_to_english
calendar = cons.get_calendar()
def futures_spot_price_daily(start_day=None, end_day=None, vars_list=cons.contract_symbols):
"""
获取某段时间大宗商品现货价格及相应基差
:param start_day: str 开始日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象; 默认为当天
:param end_day: str 结束数据 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象; 默认为当天
:param vars_list: list 合约品种如 [RB, AL]; 默认参数为所有商品
:return: pandas.DataFrame
展期收益率数据:
var 商品品种 string
sp 现货价格 float
near_symbol 临近交割合约 string
near_price 临近交割合约结算价 float
dom_symbol 主力合约 string
dom_price 主力合约结算价 float
near_basis 临近交割合约相对现货的基差 float
dom_basis 主力合约相对现货的基差 float
near_basis_rate 临近交割合约相对现货的基差率 float
dom_basis_rate 主力合约相对现货的基差率 float
date 日期 string YYYYMMDD
"""
start_day = (
cons.convert_date(start_day) if start_day is not None else datetime.date.today()
)
end_day = (
cons.convert_date(end_day)
if end_day is not None
else cons.convert_date(cons.get_latest_data_date(datetime.datetime.now()))
)
df_list = []
while start_day <= end_day:
print(start_day)
temp_df = futures_spot_price(start_day, vars_list)
if temp_df is False:
return pd.concat(df_list).reset_index(drop=True)
elif temp_df is not None:
df_list.append(temp_df)
start_day += datetime.timedelta(days=1)
if len(df_list) > 0:
temp_df = pd.concat(df_list)
temp_df.reset_index(drop=True, inplace=True)
return temp_df
def futures_spot_price(date="20200401", vars_list=cons.contract_symbols):
"""
获取某个交易日大宗商品现货价格及相应基差
:param date: 开始日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
:param vars_list: 合约品种如RB、AL等列表 为空时为所有商品
:return: pandas.DataFrame
展期收益率数据:
var 商品品种 string
sp 现货价格 float
near_symbol 临近交割合约 string
near_price 临近交割合约结算价 float
dom_symbol 主力合约 string
dom_price 主力合约结算价 float
near_basis 临近交割合约相对现货的基差 float
dom_basis 主力合约相对现货的基差 float
near_basis_rate 临近交割合约相对现货的基差率 float
dom_basis_rate 主力合约相对现货的基差率 float
date 日期 string YYYYMMDD
"""
date = cons.convert_date(date) if date is not None else datetime.date.today()
if date < datetime.date(2011, 1, 4):
raise Exception("数据源开始日期为 20110104, 请将获取数据时间点设置在 20110104 后")
if date.strftime("%Y%m%d") not in calendar:
warnings.warn(f"{date.strftime('%Y%m%d')}非交易日")
return None
u1 = cons.SYS_SPOT_PRICE_LATEST_URL
u2 = cons.SYS_SPOT_PRICE_URL.format(date.strftime("%Y-%m-%d"))
i = 1
while True:
for url in [u2, u1]:
try:
# url = u2
r = pandas_read_html_link(url)
string = r[0].loc[1, 1]
news = "".join(re.findall(r"[0-9]", string))
if news[3:11] == date.strftime("%Y%m%d"):
records = _check_information(r[1], date)
records.index = records["symbol"]
var_list_in_market = [i for i in vars_list if i in records.index]
temp_df = records.loc[var_list_in_market, :]
temp_df.reset_index(drop=True, inplace=True)
return temp_df
else:
time.sleep(3)
except:
print(f"{date.strftime('%Y-%m-%d')}日生意社数据连接失败,第{str(i)}次尝试,最多5次")
i += 1
if i > 5:
print(
f"{date.strftime('%Y-%m-%d')}日生意社数据连接失败, 如果当前交易日是 2018-09-12, 由于生意社源数据缺失, 无法访问, 否则为重复访问已超过5次,您的地址被网站墙了,请保存好返回数据,稍后从该日期起重试"
)
return False
def _check_information(df_data, date):
"""
进行数据验证和计算模块
:param df_data: pandas.DataFrame 采集的数据
:param date: datetime.date 具体某一天 YYYYMMDD
:return: pandas.DataFrame
中间数据
symbol spot_price near_contract ... near_basis_rate dom_basis_rate date
CU 49620.00 cu1811 ... -0.002418 -0.003426 20181108
RB 4551.54 rb1811 ... -0.013521 -0.134359 20181108
ZN 22420.00 zn1811 ... -0.032114 -0.076271 20181108
AL 13900.00 al1812 ... 0.005396 0.003957 20181108
AU 274.10 au1811 ... 0.005655 0.020430 20181108
WR 4806.25 wr1903 ... -0.180026 -0.237035 20181108
RU 10438.89 ru1811 ... -0.020969 0.084406 20181108
PB 18600.00 pb1811 ... -0.001344 -0.010215 20181108
AG 3542.67 ag1811 ... -0.000754 0.009408 20181108
BU 4045.53 bu1811 ... -0.129904 -0.149679 20181108
HC 4043.33 hc1811 ... -0.035449 -0.088128 20...
"""
df_data = df_data.loc[:, [0, 1, 2, 3, 5, 6]]
df_data.columns = [
"symbol",
"spot_price",
"near_contract",
"near_contract_price",
"dominant_contract",
"dominant_contract_price",
]
records = pd.DataFrame()
for string in df_data["symbol"].tolist():
if string == "PTA":
news = "PTA"
else:
news = "".join(re.findall(r"[\u4e00-\u9fa5]", string))
if news != "" and news not in ["商品", "价格", "上海期货交易所", "郑州商品交易所", "大连商品交易所"]:
symbol = chinese_to_english(news)
record = pd.DataFrame(df_data[df_data["symbol"] == string])
record.loc[:, "symbol"] = symbol
record.loc[:, "spot_price"] = record.loc[:, "spot_price"].astype(float)
if (
symbol == "JD"
): # 鸡蛋现货为元/公斤, 鸡蛋期货为元/500千克, 其余元/吨(http://www.100ppi.com/sf/)
record.loc[:, "spot_price"] = float(record["spot_price"]) * 500
elif (
symbol == "FG"
): # 上表中现货单位为元/平方米, 期货单位为元/吨. 换算公式:元/平方米*80=元/吨(http://www.100ppi.com/sf/959.html)
record.loc[:, "spot_price"] = float(record["spot_price"]) * 80
records = records.append(record)
records.loc[
:, ["near_contract_price", "dominant_contract_price", "spot_price"]
] = records.loc[
:, ["near_contract_price", "dominant_contract_price", "spot_price"]
].astype(
"float"
)
records.loc[:, "near_contract"] = records["near_contract"].replace(
r"[^0-9]*(\d*)$", r"\g<1>", regex=True
)
records.loc[:, "dominant_contract"] = records["dominant_contract"].replace(
r"[^0-9]*(\d*)$", r"\g<1>", regex=True
)
records.loc[:, "near_contract"] = records["symbol"] + records.loc[
:, "near_contract"
].astype("int").astype("str")
records.loc[:, "dominant_contract"] = records["symbol"] + records.loc[
:, "dominant_contract"
].astype("int").astype("str")
records["near_contract"] = records["near_contract"].apply(
lambda x: x.lower()
if x[:-4]
in cons.market_exchange_symbols["shfe"] + cons.market_exchange_symbols["dce"]
else x
)
records.loc[:, "dominant_contract"] = records.loc[:, "dominant_contract"].apply(
lambda x: x.lower()
if x[:-4]
in cons.market_exchange_symbols["shfe"] + cons.market_exchange_symbols["dce"]
else x
)
records.loc[:, "near_contract"] = records.loc[:, "near_contract"].apply(
lambda x: x[:-4] + x[-3:]
if x[:-4] in cons.market_exchange_symbols["czce"]
else x
)
records.loc[:, "dominant_contract"] = records.loc[:, "dominant_contract"].apply(
lambda x: x[:-4] + x[-3:]
if x[:-4] in cons.market_exchange_symbols["czce"]
else x
)
records["near_basis"] = records["near_contract_price"] - records["spot_price"]
records["dom_basis"] = records["dominant_contract_price"] - records["spot_price"]
records["near_basis_rate"] = (
records["near_contract_price"] / records["spot_price"] - 1
)
records["dom_basis_rate"] = (
records["dominant_contract_price"] / records["spot_price"] - 1
)
records.loc[:, "date"] = date.strftime("%Y%m%d")
return records
if __name__ == "__main__":
get_spot_price_daily_df = futures_spot_price_daily(start_day="20200315", end_day="20200325")
print(get_spot_price_daily_df)
get_spot_price_df = futures_spot_price("20200115")
print(get_spot_price_df)
| 40.344538
| 146
| 0.57009
|
7dd5254a4c3ed579f5034c4746f1945447fc235f
| 8,744
|
py
|
Python
|
components/contributions.py
|
n8wachT/BotListBot
|
457160498a90c8d0a63d5a9f7400227e35431b6d
|
[
"MIT"
] | null | null | null |
components/contributions.py
|
n8wachT/BotListBot
|
457160498a90c8d0a63d5a9f7400227e35431b6d
|
[
"MIT"
] | null | null | null |
components/contributions.py
|
n8wachT/BotListBot
|
457160498a90c8d0a63d5a9f7400227e35431b6d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import datetime
import logging
import re
from pprint import pprint
from peewee import fn
from telegram import Message as TelegramMessage
from telegram import ParseMode
from telegram.ext import ConversationHandler
import mdformat
import settings
import util
from model import User, Bot, Suggestion, Country
from model.revision import Revision
from util import track_groups
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
log = logging.getLogger(__name__)
def extract_bot_mentions(message: TelegramMessage):
text = message.text
matches = re.findall(settings.REGEX_BOT_IN_TEXT, text)
pprint(matches)
# If it ends in "bot", we can be sure it's a bot.
# Other ones will be thrown away, assuming that we already have all the verified bots
def notify_bot_spam(bot, update, args=None):
tg_user = update.message.from_user
user = User.from_telegram_object(tg_user)
if util.stop_banned(update, user):
return
reply_to = util.original_reply_id(update)
if args:
text = ' '.join(args)
else:
text = update.message.text
command_no_args = len(re.findall(r'^/spam\s*$', text)) > 0 or text.lower().strip() == '/spam@botlistbot'
if command_no_args:
update.message.reply_text(
util.action_hint("Please use this command with an argument. For example:\n/spam @mybot"),
reply_to_message_id=reply_to)
return
# `#spam` is already checked by handler
try:
username = re.match(settings.REGEX_BOT_IN_TEXT, text).groups()[0]
if username == '@' + settings.SELF_BOT_NAME:
log.info("Ignoring {}".format(text))
return
except AttributeError:
if args:
update.message.reply_text(util.failure("Sorry, but you didn't send me a bot `@username`."), quote=True,
parse_mode=ParseMode.MARKDOWN, reply_to_message_id=reply_to)
else:
log.info("Ignoring {}".format(text))
# no bot username, ignore update
pass
return
try:
spam_bot = Bot.get(fn.lower(Bot.username) ** username.lower(), Bot.approved == True)
try:
Suggestion.get(action="spam", subject=spam_bot)
except Suggestion.DoesNotExist:
suggestion = Suggestion(user=user, action="spam", date=datetime.date.today(), subject=spam_bot)
suggestion.save()
update.message.reply_text(util.success("Thank you! We will review your suggestion and mark the bot as spammy."),
reply_to_message_id=reply_to)
except Bot.DoesNotExist:
update.message.reply_text(util.action_hint("The bot you sent me is not in the @BotList."),
reply_to_message_id=reply_to)
return ConversationHandler.END
def notify_bot_offline(bot, update, args=None):
tg_user = update.message.from_user
user = User.from_telegram_object(tg_user)
if util.stop_banned(update, user):
return
reply_to = util.original_reply_id(update)
if args:
text = ' '.join(args)
else:
text = update.message.text
command_no_args = len(re.findall(r'^/offline\s*$', text)) > 0 or text.lower().strip() == '/offline@botlistbot'
if command_no_args:
update.message.reply_text(
util.action_hint("Please use this command with an argument. For example:\n/offline @mybot"),
reply_to_message_id=reply_to)
return
# `#offline` is already checked by handler
try:
username = re.match(settings.REGEX_BOT_IN_TEXT, text).groups()[0]
if username == '@' + settings.SELF_BOT_NAME:
log.info("Ignoring {}".format(text))
return
except AttributeError:
if args:
update.message.reply_text(util.failure("Sorry, but you didn't send me a bot `@username`."), quote=True,
parse_mode=ParseMode.MARKDOWN, reply_to_message_id=reply_to)
else:
log.info("Ignoring {}".format(text))
# no bot username, ignore update
pass
return
def already_reported():
update.message.reply_text(mdformat.none_action("Someone already reported this, thanks anyway 😊"),
reply_to_message_id=reply_to)
try:
offline_bot = Bot.get(fn.lower(Bot.username) ** username.lower(), Bot.approved == True)
if offline_bot.offline:
return already_reported()
if offline_bot.official:
update.message.reply_text(mdformat.none_action("Official bots usually don't go offline for a long time. "
"Just wait a couple hours and it will be back up ;)"),
reply_to_message_id=reply_to)
return
try:
Suggestion.get(action="offline", subject=offline_bot, executed=False)
return already_reported()
except Suggestion.DoesNotExist:
suggestion = Suggestion(user=user, action="offline", value=True, date=datetime.date.today(),
subject=offline_bot)
suggestion.save()
update.message.reply_text(util.success("Thank you! We will review your suggestion and set the bot offline."),
reply_to_message_id=reply_to)
except Bot.DoesNotExist:
update.message.reply_text(
util.action_hint("The bot you sent me is not in the @BotList."), reply_to_message_id=reply_to)
return ConversationHandler.END
@track_groups
def new_bot_submission(bot, update, chat_data, args=None):
tg_user = update.message.from_user
user = User.from_telegram_object(tg_user)
if util.stop_banned(update, user):
return
reply_to = util.original_reply_id(update)
if args:
text = ' '.join(args)
else:
text = update.message.text
command_no_args = len(re.findall(r'^/new\s*$', text)) > 0 or text.lower().strip() == '/new@botlistbot'
if command_no_args:
update.message.reply_text(util.action_hint(
"Please use this command with an argument. For example:\n/new @mybot 🔎"),
reply_to_message_id=reply_to)
return
# `#new` is already checked by handler
try:
username = re.match(settings.REGEX_BOT_IN_TEXT, text).groups()[0]
if username.lower() == '@' + settings.SELF_BOT_NAME.lower():
log.info("Ignoring {}".format(text))
return
except AttributeError:
if args:
update.message.reply_text(util.failure("Sorry, but you didn't send me a bot `@username`."), quote=True,
parse_mode=ParseMode.MARKDOWN, reply_to_message_id=reply_to)
log.info("Ignoring {}".format(text))
# no bot username, ignore update
return
try:
new_bot = Bot.by_username(username)
if new_bot.approved:
update.message.reply_text(
util.action_hint("Sorry fool, but {} is already in the @BotList 😉".format(new_bot.username)),
reply_to_message_id=reply_to)
else:
update.message.reply_text(
util.action_hint("{} has already been submitted. Please have patience...".format(new_bot.username)),
reply_to_message_id=reply_to)
return
except Bot.DoesNotExist:
new_bot = Bot(revision=Revision.get_instance().next, approved=False, username=username, submitted_by=user)
new_bot.inlinequeries = "🔎" in text
new_bot.official = "🔹" in text
# find language
languages = Country.select().execute()
for lang in languages:
if lang.emoji in text:
new_bot.country = lang
new_bot.date_added = datetime.date.today()
description_reg = re.match(settings.REGEX_BOT_IN_TEXT + ' -\s?(.*)', text)
description_notify = ''
if description_reg:
description = description_reg.group(2)
new_bot.description = description
description_notify = ' Your description was included.'
new_bot.save()
if util.is_private_message(update) and util.uid_from_update(update) in settings.MODERATORS:
from components.explore import send_bot_details
send_bot_details(bot, update, chat_data, new_bot)
else:
msg = update.message.reply_text(
util.success("You submitted {} for approval.{}".format(new_bot, description_notify)),
parse_mode=ParseMode.MARKDOWN, reply_to_message_id=reply_to)
return ConversationHandler.END
| 39.745455
| 120
| 0.631404
|
2125d50f0a942780416d3a75b0ee164f257d3cba
| 9,082
|
py
|
Python
|
DroneCode/coordTrans.py
|
RafaelCabanas/ret2018software
|
de810b8a5840a401a87d794cee991939bd6c35dd
|
[
"MIT"
] | null | null | null |
DroneCode/coordTrans.py
|
RafaelCabanas/ret2018software
|
de810b8a5840a401a87d794cee991939bd6c35dd
|
[
"MIT"
] | null | null | null |
DroneCode/coordTrans.py
|
RafaelCabanas/ret2018software
|
de810b8a5840a401a87d794cee991939bd6c35dd
|
[
"MIT"
] | 1
|
2019-07-16T15:44:51.000Z
|
2019-07-16T15:44:51.000Z
|
from tkinter import Tk # to get window and select file.
from tkinter.filedialog import askopenfilename
import sys # to kill program when needed.
import os # to run instruction in the bash.
import pandas # to manipulate csv files.
import math # to use sin and cos
import numpy # to use numpyarrays
import pyproj # library that does coordinate
# transformations.
import matplotlib as mpl # to plot data.
from mpl_toolkits.mplot3d import Axes3D
mpl.use("TKAgg") ### Uncomment this line to use framework TKAgg
# That will be a temporary solution. Trying the files before:
# Use it becuase the python is using the wrong frameowrk and gives an error.
# Creating the following .matplotlib/matplotlibrc file seem to solve the problem:
# Downloaded a sample file that only contains uncommented the following line:
# backend : TKAgg
# The file is in the folder ~/.matplotlib and it has extension .dms
# That doesn't solve the problem.
# I added the same file to the anaconda matplotlib folder.
# /anaconda3/lib/python3.6/site-packages/matplotlib/
# Didn't work either... so I just have to add that line with TKAgg framework...
# Solution maybe here: https://www.codesofinterest.com/2018/05/fixing-matplotlib-pyplot-import-errors.html
import matplotlib.pyplot as plt
####### Function that gets the file name you want to trasnform. #######
# and make a GPS coordinates (Lat, Long, Alt) change to ECEF coordinate
# system.
def GPS_to_ECEF_pyproj_fromFile():
print("Select .CSV file to convert. Make sure latitude, longitude and altitude columns are labled Lat, Lon, Alt")
Tk().withdraw() # we don't want the full GUI, this keeps the root window
# from appearing.
fileName=askopenfilename()
extension=fileName[len(fileName)-3::] # gets extension
print(fileName)
print(extension)
if extension=='csv' or extension!='CSV':
pass
else:
print('Wrong extension of file, try again...')
sys.exit()
# read the csv file into variable DataFrame (This is for files processed with
# our own software, labled "CLEAN" If it is a random CSV file, won't read the right columns
# or you labled them by hand 'Lat', 'Lon', 'Alt')
colnamesNew=['timestamp','Lat','Lon','Alt']
df=pandas.read_csv(fileName,usecols=colnamesNew) # df is of the type DataFrame.
# create new variable that is an array:
gpsLatLonAlt=df.to_numpy(copy=True) # returns a ndarray (Numpy-array, NOT a Numpy-matrix). Options for columns= list, optional.
print(gpsLatLonAlt)
#### Point that links witht the main program, that does the transformatoin
# reading from a CSV file. Here we already have the CSV raw file. CoordTrans
# function reads the clean CSV file, but the user might chose not to produce it.
######## Coordinate transformation ########
ecef = pyproj.Proj(proj='geocent', ellps='WGS84', datum='WGS84')
lla = pyproj.Proj(proj='latlong', ellps='WGS84', datum='WGS84')
GPSxyz=numpy.empty(shape=gpsLatLonAlt.shape)
GPSxyz[:,0]=gpsLatLonAlt[:,0] # timestamp is the same time.
for i in range(gpsLatLonAlt.shape[0]):
GPSxyz[i,1],GPSxyz[i,2],GPSxyz[i,3]= pyproj.transform(lla, ecef, gpsLatLonAlt[i,2], gpsLatLonAlt[i,1], gpsLatLonAlt[i,3], radians=False)
print(GPSxyz)
ECEFcoordinates=pandas.DataFrame(GPSxyz, columns=['timestamp','x','y','z'])
print(ECEFcoordinates)
ECEFcoordinates.to_csv('ECEF_Coordinates_pyProj.csv',columns=['timestamp','x','y','z'])
####### Function that gets the file name you want to trasnform. #######
# and make a GPS coordinates (Lat, Long, Alt) change to ECEF coordinate
# system, using a raw coordinate transformation. Custom made.
def GPS_to_ECEF_custom_fromFile():
print("Select .CSV file to convert. Make sure latitude, longitude and altitude columns are labled Lat, Lon, Alt")
Tk().withdraw() # we don't want the full GUI, this keeps the root window
# from appearing.
fileName=askopenfilename()
extension=fileName[len(fileName)-3::] # gets extension
print(fileName)
print(extension)
if extension=='csv' or extension!='CSV':
pass
else:
print('Wrong extension of file, try again...')
sys.exit()
# read the csv file into variable DataFrame (This is for files processed with
# our own software, labled "CLEAN" If it is a random CSV file, won't read the right columns
# or you labled them by hand 'Lat', 'Lon', 'Alt')
colnamesNew=['timestamp','Lat','Lon','Alt']
df=pandas.read_csv(fileName,usecols=colnamesNew) # df is of the type DataFrame.
# create new variable that is an array:
gpsLatLonAlt=df.to_numpy(copy=True) # returns a ndarray (Numpy-array, NOT a Numpy-matrix). Options for columns= list, optional.
# print(gpsLatLonAlt)
#### Point that links witht the main program, that does the transformatoin
# reading from a CSV file. Here we already have the CSV raw file. CoordTrans
# function reads the clean CSV file, but the user might chose not to produce it.
######## Coordinate transformation ########
GPSxyz=numpy.empty(shape=gpsLatLonAlt.shape)
GPSxyz[:,0]=gpsLatLonAlt[:,0] # timestamp is the same time.
# write latitude and longitude in radians:
gpsLatLonAlt[:,1]=gpsLatLonAlt[:,1]*math.pi/180
gpsLatLonAlt[:,2]=gpsLatLonAlt[:,2]*math.pi/180
# Define variables, like axis ellipsoid and excentriciy
a=6378137.0 # Semi-major axis of ellipsoid of Earth.
# WGS84 Datum. In meters.
inv_f=298.257223563 # Inverse Flattening of ellipsoid of Earth.
# https://confluence.qps.nl/qinsy/en/world-geodetic-system-1984-wgs84-29855173.html#WorldGeodeticSystem1984(WGS84)-WGS84,ITRFandGDA94
f=1/inv_f
e2=1-(1-f)*(1-f) # Eccentricity square; e^2= 2f-f^2
# Creates a numpy array of 1 column and length same as gps matrix rows
N_lat=numpy.empty((numpy.size(gpsLatLonAlt,0),1))
for n in range(numpy.size(gpsLatLonAlt,0)):
N_lat[n,0]=a/math.sqrt(1-e2*math.sin(gpsLatLonAlt[n,1])*math.sin(gpsLatLonAlt[n,1]))
GPSxyz[n,1] = (N_lat[n,0]+gpsLatLonAlt[n,3])*math.cos(gpsLatLonAlt[n,1])*math.cos(gpsLatLonAlt[n,2])
GPSxyz[n,2] = (N_lat[n,0]+gpsLatLonAlt[n,3])*math.cos(gpsLatLonAlt[n,1])*math.sin(gpsLatLonAlt[n,2])
GPSxyz[n,3] = (N_lat[n,0]*(1-e2)+gpsLatLonAlt[n,3])*math.sin(gpsLatLonAlt[n,1])
# print(N_lat)
ECEFcoordinates=pandas.DataFrame(GPSxyz, columns=['timestamp','x','y','z'])
# print(ECEFcoordinates)
ECEFcoordinates.to_csv('ECEF_Coordinates_custom.csv',columns=['timestamp','x','y','z'])
return(GPSxyz,gpsLatLonAlt[0,1],gpsLatLonAlt[0,2])
'''
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(GPSxyz[:,1], GPSxyz[:,2], GPSxyz[:,3], label='parametric curve')
ax.legend()
plt.show()
'''
# Returns GPS coordinates in the ECEF ref system numpyarray, the FIRST Point
# of the file latitude and longitude. This point will be the ORIGIN of the
# ENU reference system in the TLP.
##### function that transforms ECEF coordinate into ENU coordinate, given an
# origin for the reference system.
def ECEF_to_ENU(gpsCoorECEFAsNumpayArray,lat3,lon3):
relativeECEF=numpy.empty(shape=gpsCoorECEFAsNumpayArray.shape)
latRef=lat3
lonRef=lon3
# Use reference point of TLP (Tangent Local Plane) for ENU (East, North, UP)
# reference system at the FIRST point of the file. We can change this to be
# maybe a point at a later time, or after some time has past, or even ask the
# user to introduce the reference lat, lon they might want.
relativeECEF=gpsCoorECEFAsNumpayArray
relativeECEF[:,:]=relativeECEF[:,:]-relativeECEF[0,:]
# Coordinate sytem transformation from ECEF to ENU TLP:
coorENU_rel=numpy.empty(shape=gpsCoorECEFAsNumpayArray.shape)
coorENU_rel[:,0]=relativeECEF[:,0] # Relative timestamp is the same time.
print(coorENU_rel)
for n in range(numpy.size(relativeECEF,0)):
coorENU_rel[n,1]=(-math.sin(lonRef)*relativeECEF[n,1])+(math.cos(lonRef)*relativeECEF[n,2])
coorENU_rel[n,2]=(-math.sin(latRef)*math.cos(lonRef)*relativeECEF[n,1])-(math.sin(latRef)*math.sin(lonRef)*relativeECEF[n,2])+(math.cos(latRef)*relativeECEF[n,3])
coorENU_rel[n,3]=(math.cos(latRef)*math.cos(lonRef)*relativeECEF[n,1])+(math.cos(latRef)*math.sin(lonRef)*relativeECEF[n,2])+(math.sin(latRef)*relativeECEF[n,3])
print(coorENU_rel)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(coorENU_rel[:,1], coorENU_rel[:,2], coorENU_rel[:,3], label='parametric curve')
ax.legend()
plt.show()
if __name__=="__main__":
coorECEF=GPS_to_ECEF_custom_fromFile()
ECEF_to_ENU(coorECEF[0],coorECEF[1],coorECEF[2])
# GPS_to_ECEF_pyproj_fromFile()
| 41.852535
| 170
| 0.676943
|
2d072dff59c23dc8b1840564f14dd0450b4d3844
| 442
|
py
|
Python
|
generate_data.py
|
meggers/pyflix
|
76a322de3e1b8c75988e05e1af343bfbfbd6be10
|
[
"MIT"
] | null | null | null |
generate_data.py
|
meggers/pyflix
|
76a322de3e1b8c75988e05e1af343bfbfbd6be10
|
[
"MIT"
] | null | null | null |
generate_data.py
|
meggers/pyflix
|
76a322de3e1b8c75988e05e1af343bfbfbd6be10
|
[
"MIT"
] | null | null | null |
#generates a csv containing 30,000 1024 byte frames and their order
import os, binascii
csv_file = "server/movie_data.txt"
frame_size = 509
num_frames = 30000
target = open(csv_file, 'wb')
target.truncate()
for index in range(0, num_frames):
frame_no = '{0:05d}'.format(index)
frame_data = binascii.b2a_hex(os.urandom(frame_size))
frame = "{0}{1}\n".format(frame_no, frame_data)
target.write(frame)
target.close()
| 22.1
| 67
| 0.705882
|
67a423f61c8e541f0cdb78f3d01af8eb164660bd
| 1,682
|
py
|
Python
|
sample/sample-python/sample-japanese-tables.py
|
George-/tsduck
|
bcda6aa095c54071c9818ad8c035b5b86389d158
|
[
"BSD-2-Clause"
] | 542
|
2017-06-21T07:40:10.000Z
|
2022-03-29T13:44:39.000Z
|
sample/sample-python/sample-japanese-tables.py
|
George-/tsduck
|
bcda6aa095c54071c9818ad8c035b5b86389d158
|
[
"BSD-2-Clause"
] | 939
|
2017-09-01T21:00:42.000Z
|
2022-03-31T14:39:27.000Z
|
sample/sample-python/sample-japanese-tables.py
|
George-/tsduck
|
bcda6aa095c54071c9818ad8c035b5b86389d158
|
[
"BSD-2-Clause"
] | 167
|
2017-10-30T12:07:29.000Z
|
2022-03-23T11:36:10.000Z
|
#!/usr/bin/env python
#----------------------------------------------------------------------------
#
# TSDuck sample Python application : manipulate PSI/SI tables using various
# options (DTV standard, character sets, etc.)
#
# The input file japanese-tables.bin, located in the same directory as the
# sample source code, contains a TOT and an SDT from the Japanese DTTV.
# The standard is ISDB-T which reuses DVB tables such as TOT and SDT but
# uses different representations for character strings, time reference and
# define ISDB-specific descriptors. When interpreted with the DVB defaults,
# the strings, times and some descriptors are incorrect. The proper settings
# for Japan shall be set before deserializing the tables.
#
#----------------------------------------------------------------------------
import tsduck
# Create a SectionFile.
rep = tsduck.StdErrReport()
duck = tsduck.DuckContext(rep)
file = tsduck.SectionFile(duck)
# Load a binary file containing tables which were capture on a Japanese TS.
file.loadBinary("japanese-tables.bin")
print("Loaded %d bytes, %d sections, %d tables" % (file.binarySize(), file.sectionsCount(), file.tablesCount()))
print()
# Convert to XML.
print("---- XML file content with default DVB settings ----")
print(file.toXML())
# Use typical settings for Japan.
duck.addStandards(tsduck.DuckContext.ISDB | tsduck.DuckContext.JAPAN)
duck.setDefaultCharset("ARIB-STD-B24")
duck.setTimeReference("JST")
# Convert to XML again, see the difference.
print("---- XML file content with Japanese settings ----")
print(file.toXML())
# Deallocate C++ resources (in reverse order from creation).
file.delete()
duck.delete()
rep.delete()
| 36.565217
| 112
| 0.687277
|
466d0c10af31181815eb5f252f30aff7fd6e4ee8
| 2,971
|
py
|
Python
|
qa/rpc-tests/test_framework/coverage.py
|
Bitcoin-OLD/Bitcoin-OLD
|
16627f390aa418a99103843f9d94c48931fad826
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/test_framework/coverage.py
|
Bitcoin-OLD/Bitcoin-OLD
|
16627f390aa418a99103843f9d94c48931fad826
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/test_framework/coverage.py
|
Bitcoin-OLD/Bitcoin-OLD
|
16627f390aa418a99103843f9d94c48931fad826
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoinold Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
This module contains utilities for doing coverage analysis on the RPC
interface.
It provides a way to track which RPC commands are exercised during
testing.
"""
import os
REFERENCE_FILENAME = 'rpc_interface.txt'
class AuthServiceProxyWrapper(object):
"""
An object that wraps AuthServiceProxy to record specific RPC calls.
"""
def __init__(self, auth_service_proxy_instance, coverage_logfile=None):
"""
Kwargs:
auth_service_proxy_instance (AuthServiceProxy): the instance
being wrapped.
coverage_logfile (str): if specified, write each service_name
out to a file when called.
"""
self.auth_service_proxy_instance = auth_service_proxy_instance
self.coverage_logfile = coverage_logfile
def __getattr__(self, *args, **kwargs):
return_val = self.auth_service_proxy_instance.__getattr__(
*args, **kwargs)
return AuthServiceProxyWrapper(return_val, self.coverage_logfile)
def __call__(self, *args, **kwargs):
"""
Delegates to AuthServiceProxy, then writes the particular RPC method
called to a file.
"""
return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs)
rpc_method = self.auth_service_proxy_instance._service_name
if self.coverage_logfile:
with open(self.coverage_logfile, 'a+', encoding='utf8') as f:
f.write("%s\n" % rpc_method)
return return_val
@property
def url(self):
return self.auth_service_proxy_instance.url
def get_filename(dirname, n_node):
"""
Get a filename unique to the test process ID and node.
This file will contain a list of RPC commands covered.
"""
pid = str(os.getpid())
return os.path.join(
dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node)))
def write_all_rpc_commands(dirname, node):
"""
Write out a list of all RPC functions available in `bitcoinold-cli` for
coverage comparison. This will only happen once per coverage
directory.
Args:
dirname (str): temporary test dir
node (AuthServiceProxy): client
Returns:
bool. if the RPC interface file was written.
"""
filename = os.path.join(dirname, REFERENCE_FILENAME)
if os.path.isfile(filename):
return False
help_output = node.help().split('\n')
commands = set()
for line in help_output:
line = line.strip()
# Ignore blanks and headers
if line and not line.startswith('='):
commands.add("%s\n" % line.split()[0])
with open(filename, 'w', encoding='utf8') as f:
f.writelines(list(commands))
return True
| 27.766355
| 79
| 0.661393
|
ce0c757ffa9b1a80535dc76f7afb0c4e33b8f897
| 6,691
|
py
|
Python
|
result1.py
|
eseJiHeaLim/find_child
|
29596529ccf39241492b092b01baf03b76d0eb3a
|
[
"MIT"
] | null | null | null |
result1.py
|
eseJiHeaLim/find_child
|
29596529ccf39241492b092b01baf03b76d0eb3a
|
[
"MIT"
] | null | null | null |
result1.py
|
eseJiHeaLim/find_child
|
29596529ccf39241492b092b01baf03b76d0eb3a
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
import tkinter as tk
import PIL
import os
from PIL import Image
from PIL import ImageTk
#import face_recognition
# 정보
info=[[0]*30 for i in range(4)]
index=0
find_index=-1;
temp_child_name = []
temp_par_name = []
temp_contact1 = []
temp_contact2 = []
# main window
window=tk.Tk()
window.title("main")
window.geometry("640x400+100+100")
window.resizable(False, False)
label=tk.Label(window, text="미아 방지")
label.pack()
width, height = 300, 300
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
# take image to upload data
def enroll_take ():
global index
_, frame = cap.read()
frame = cv2.flip(frame, 1)
img_name = "data{}.png".format(index)
cv2.imwrite(os.path.join(img_name), frame)
print("{} written!".format(img_name))
# take image to find child- test data
def enroll_test_take ():
_, frame = cap.read()
frame = cv2.flip(frame, 1)
img_name = "test_data{}.png".format(0)
cv2.imwrite(os.path.join(img_name), frame)
print("{} written!".format(img_name))
# camera preview
def show_frame(lmain):
_, frame = cap.read()
frame = cv2.flip(frame, 1)
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(image=img)
lmain.imgtk = imgtk
lmain.configure(image=imgtk)
# update imformation
def save_child_name(entry_name):
global temp_child_name
temp_child_name=str(entry_name.get())
print(temp_child_name)
def save_par_name(entry_Parname):
global temp_par_name
temp_par_name=str(entry_Parname.get())
def save_contact1(entry_contact1):
global temp_contact1
temp_contact1=str(entry_contact1.get())
def save_contact2(entry_contact2):
global temp_contact2
temp_contact2=str(entry_contact2.get())
# save imformation
def save_input(entry_name,entry_Parname,entry_contact1,entry_contact2):
global index
global info
global temp_child_name
global temp_par_name
global temp_contact1
global temp_contact2
temp_child_name= entry_name.get("1.0","end-1c")
temp_par_name = entry_Parname.get("1.0", "end-1c")
temp_contact1 = entry_contact1.get("1.0", "end-1c")
temp_contact2 = entry_contact2.get("1.0", "end-1c")
info[index][0] = temp_child_name
info[index][1] = temp_par_name
info[index][2] = temp_contact1
info[index][3] = temp_contact2
print(info[index][0])
print(info[index][1])
print(info[index][2])
print(info[index][3])
index=index+1
# enrollment window
def enrollment_information():
popup_enrollInformation=tk.Toplevel(window)
popup_enrollInformation.geometry("800x800+100+100")
popup_enrollInformation.title("정보입력")
image = tk.PhotoImage(file="opencv_frame_0.png")
label = tk.Label(popup_enrollInformation, image=image)
label.pack()
entry_name = tk.Text(popup_enrollInformation,height=1,width=10)
entry_name.pack()
entry_Parname = tk.Text(popup_enrollInformation,height=1,width=10)
entry_Parname.pack()
entry_contact1 = tk.Text(popup_enrollInformation,height=1,width=10)
entry_contact1.pack()
entry_contact2 = tk.Text(popup_enrollInformation,height=1,width=10)
entry_contact2.pack()
buttonCommit = tk.Button(popup_enrollInformation, height=2, text="finish", command=lambda: save_input(entry_name,entry_Parname,entry_contact1,entry_contact2))
buttonCommit.pack()
popup_enrollInformation.mainloop()
# to upload imformation of image window
def enrollment():
popup_enrollment = tk.Toplevel(window)
popup_enrollment.wm_title("enrollment")
popup_enrollment.geometry("640x400+100+100")
popup_enrollment.tkraise(window) # This just tells the message to be on top of the root window.
takeP=tk.Button(popup_enrollment, text="사진찍기", command=enroll_take)
reTakeP=tk.Button(popup_enrollment, text="다시찍기", command=enroll_take)
turnNext = tk.Button(popup_enrollment, text="다음", command=enrollment_information)
takeP.pack()
reTakeP.pack()
turnNext.pack()
lmain = tk.Label(popup_enrollment)
lmain.pack()
lmain.after(1, show_frame(lmain))
popup_enrollment.mainloop()
# show find result window
def show_information():
global find_index
global info
popup_result=tk.Toplevel(window)
popup_result.geometry("800x800+100+100")
popup_result.title("got it ")
# 찾은 이미지
image = tk.PhotoImage(file="test_data0.png")
label = tk.Label(popup_result, image=image)
label.pack()
label_name = tk.Label(popup_result, text="아이 이름 : " + info[find_index][0])
label_name.pack()
label_parname = tk.Label(popup_result, text="부모 이름 : " + info[find_index][1])
label_parname.pack()
label_num1 = tk.Label(popup_result, text="보호자 번호1 : " + info[find_index][2])
label_num1.pack()
label_num2 = tk.Label(popup_result, text="보호자 번호2 : " + info[find_index][3])
label_num2.pack()
popup_result.mainloop()
def check_imformation():
global find_index
picture_of_me = face_recognition.load_image_file("test_data.png")
my_face_encoding = face_recognition.face_encodings(picture_of_me)[0]
for root, dirs, files in os.walk('/home/odroid/Desktop/test_facerecognition/inforamtion'):
for fname in files:
full_fname = os.path.join(root, fname)
unknown_picture = face_recognition.load_image_file(full_fname)
unknown_face_encoding = face_recognition.face_encodings(unknown_picture)[0]
results = face_recognition.compare_faces([my_face_encoding], unknown_face_encoding)
if results[0] == True:
find_index=int(fname.split('.')[0])
else:
print("not exist child")
show_information()
def findPar():
popup_findPar = tk.Toplevel(window)
popup_findPar.wm_title("findPAr")
popup_findPar.geometry("640x400+100+100")
popup_findPar.tkraise(window) # This just tells the message to be on top of the root window.
takenP = tk.Button(popup_findPar, text="사진찍기", command=enroll_test_take)
reTakenP = tk.Button(popup_findPar, text="다시찍기", command=enroll_test_take)
Next = tk.Button(popup_findPar, text="다음", command=check_imformation)
takenP.pack()
reTakenP.pack()
Next.pack()
l = tk.Label(popup_findPar)
l.pack()
l.after(1, show_frame(l))
popup_findPar.mainloop()
# main window upload child imformation
button_enrollment= tk.Button(window, text="미아등록" , command=enrollment)
button_enrollment.pack()
#main window find child button
button_find= tk.Button(window, text="보호자 찾기" , command=findPar)
button_find.pack()
window.mainloop()
| 30.276018
| 162
| 0.712599
|
ef1f30c3924c00344adccaf01b0a31971836ef41
| 32,699
|
py
|
Python
|
code/BERT/eval_mixup_model_OOD.py
|
StevenyzZhang/Guided-Adversarial-Augmentation
|
35602d20a969597bb1b1ddb4e98f65a53d4d21a9
|
[
"Apache-2.0"
] | 1
|
2022-03-22T19:37:45.000Z
|
2022-03-22T19:37:45.000Z
|
code/BERT/eval_mixup_model_OOD.py
|
StevenyzZhang/Guided-Adversarial-Augmentation
|
35602d20a969597bb1b1ddb4e98f65a53d4d21a9
|
[
"Apache-2.0"
] | 1
|
2022-03-22T19:49:43.000Z
|
2022-03-22T19:50:10.000Z
|
code/BERT/eval_mixup_model_OOD.py
|
StevenyzZhang/Guided-Adversarial-Augmentation
|
35602d20a969597bb1b1ddb4e98f65a53d4d21a9
|
[
"Apache-2.0"
] | 1
|
2022-03-22T19:40:57.000Z
|
2022-03-22T19:40:57.000Z
|
import argparse
import glob
import logging as log
import os
import random
import time
import torch.nn.functional as F
import numpy as np
import torch
from eval_utils import f1_score, precision_score, recall_score, classification_report, macro_score
from utils import gen_knn_mix_batch
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
import pickle
from transformers import *
from read_data import *
from tensorboardX import SummaryWriter
from bert_models import BertModel4Mix
logger = log.getLogger(__name__)
use_cuda = torch.cuda.is_available()
#CUDA_VISIBLE_DEVICES=6,7
#os.environ["CUDA_VISIBLE_DEVICES"] = "6,7"
MODEL_CLASSES = {"bert": (BertConfig, BertForTokenClassification, BertTokenizer)}
parser = argparse.ArgumentParser(description='PyTorch BaseNER')
parser.add_argument("--data-dir", default = './data', type = str, required = True)
parser.add_argument("--model-type", default = 'bert', type = str)
parser.add_argument("--model-name", default = 'bert-base-multilingual-cased', type = str)
parser.add_argument("--output-dir", default = './german_eval', type = str)
parser.add_argument('--gpu', default='0,1,2,3', type=str, help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--train-examples', default = -1, type = int)
parser.add_argument("--labels", default = "", type = str)
parser.add_argument('--config-name', default = '', type = str)
parser.add_argument("--tokenizer-name", default = '', type = str)
parser.add_argument("--max-seq-length", default = 128, type = int)
parser.add_argument("--do-train", action="store_true", help="Whether to run training.")
parser.add_argument("--do-eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument("--do-predict", action="store_true", help="Whether to run predictions on the test set.")
parser.add_argument("--evaluate-during-training", action="store_true", help="Whether to run evaluation during training at each logging step.")
parser.add_argument("--do-lower-case", action="store_true", help="Set this flag if you are using an uncased model.")
parser.add_argument("--batch-size", default = 16, type = int)
parser.add_argument('--eval-batch-size', default = 128, type = int)
parser.add_argument("--gradient-accumulation-steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning-rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight-decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam-epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max-grad-norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--num-train-epochs", default=20, type=float, help="Total number of training epochs to perform.")
parser.add_argument("--max-steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument('--warmup-steps', default = 0, type = int, help="Linear warmup over warmup_steps.")
parser.add_argument('--logging-steps', default = 150, type = int, help="Log every X updates steps.")
parser.add_argument("--save-steps", type=int, default=0, help="Save checkpoint every X updates steps.")
parser.add_argument("--eval-all-checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
parser.add_argument("--overwrite-output-dir", action="store_true", help="Overwrite the content of the output directory")
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--pad-subtoken-with-real-label", action="store_true", help="give real label to the padded token instead of `-100` ")
parser.add_argument("--subtoken-label-type",default='real', type=str,help="[real|repeat|O] three ways to do pad subtoken with real label. [real] give the subtoken a real label e.g., B -> B I. [repeat] simply repeat the label e.g., B -> B B. [O] give it a O label. B -> B O")
parser.add_argument("--eval-pad-subtoken-with-first-subtoken-only", action="store_true", help="only works when --pad-subtoken-with-real-label is true, in this mode, we only test the prediction of the first subtoken of each word (if the word could be tokenized into multiple subtoken)")
parser.add_argument("--label-sep-cls", action="store_true", help="label [SEP] [CLS] with special labels, but not [PAD]")
# inter mix
parser.add_argument('--mix-layers-set', nargs='+', default = [6,9,12], type=int)
# semi
parser.add_argument("--u-batch-size", default = 64, type = int)
parser.add_argument('--T', type = float, default = 1.0,help='sharpen temperature')
parser.add_argument('--sharp', action='store_true')
parser.add_argument('--weight', type = float, default = 1.0)
parser.add_argument("--log-file", default = "results.csv", type = str,help="the file to store resutls")
parser.add_argument("--optimizer", default = "adam", type = str,help='optimizer')
parser.add_argument('--special-label-weight', default=0, type=float, help='the special_label_weight in training . default 0')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.device = device
args.n_gpu = torch.cuda.device_count()
print("gpu num: ", args.n_gpu)
best_f1 = 0
#print('perform mix: ', args.mix_option)
print("mix layers sets: ", args.mix_layers_set)
def set_seed(args):
logger.info("random seed %s", args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if len(args.gpu) > 0:
torch.cuda.manual_seed_all(args.seed)
def gen_aug_mix_batch(augalpha,augbeta,regalpha,regbeta,minbool,example_to_aug_example,aug_example_to_example_dict,batch,train_dataset,train_size):
"""
Inputs:
batch: the main training batch. a list of features. batch[5]: all_sent_id, a tensor of each sent's id
train_dataset: the whole training set
sent_id_knn_array: the I array from FAISS
knn_mix_ratio: ratio of mix batch's samples are from the cluster, other samples are randomly selected.
train_size: the size of training set
Outputs:
mix_batch: similar to batch. to do mix
"""
#1. fatch the sent_id from batch
sent_id_batch = batch[5]
#2. sample a sent_id for each sent
B_mix_batch=[]
A_mix_batch=[]
regular_nomix_batch=[]
l=0
l_list=[]
if (200-0)<1e-6:
l_reg=1
else:
if regbeta==-1:
l = np.random.beta(regalpha, regalpha)
else:
l = np.random.beta(regalpha, regbeta)
l_reg = max(l, 1-l)
if (augalpha-0)<1e-6:
l_aug=1
else:
if augbeta==-1:
l = np.random.beta(augalpha, augalpha)
else:
l = np.random.beta(augalpha, augbeta)
#if minbool:
# l_aug = min(l, 1-l)
#else:
l_aug = max(l, 1-l)
for sent_id in sent_id_batch:
sentenceid=sent_id.cpu().numpy()
s=int(sentenceid)
if s in example_to_aug_example.keys():
A_mix_batch.append(s)
B_mix_batch.append(example_to_aug_example[s])
l_list.append(l_reg)
elif s in aug_example_to_example_dict.keys():
A_mix_batch.append(s)
B_mix_batch.append(aug_example_to_example_dict[s])
l_list.append(l_aug)
else:
regular_nomix_batch.append(s)
#3. make the batch
B_mix_batch_datapoints = train_dataset[B_mix_batch]
A_mix_batch_datapoints = train_dataset[A_mix_batch]
reg_nomix_batch = train_dataset[regular_nomix_batch]
l_tensor=torch.ones(len(A_mix_batch)).cuda()
j=0
for i in l_list:
l_tensor[j]=i
j=j+1
l_expanded=l_tensor[:, None,None]
return B_mix_batch_datapoints,A_mix_batch_datapoints,reg_nomix_batch,l_expanded
def linear_rampup(current, rampup_length=args.num_train_epochs):
if rampup_length == 0:
return 1.0
else:
current = np.clip(current / rampup_length, 0.0, 1.0)
return float(current)
def train(args,train_dataset, eval_dataset, test_dataset_regular,test_dataset_challenging, model, tokenizer, labels, pad_token_label_id,example_to_aug_example,aug_example_to_example_dict,unlabeled_dataset=None):#example_to_aug_example second to last param
global best_f1
tb_writer = SummaryWriter()
print('tb_writer.logdir',tb_writer.logdir)
train_dataloader = DataLoader(train_dataset, batch_size = args.batch_size, shuffle = True)
labeled_dataloader = train_dataloader
augalpha=200
augbeta=5
regalpha= 200
regbeta=5
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
if args.n_gpu > 1:
print("Somehow got here!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
model = torch.nn.DataParallel(model)
if args.optimizer=='adam':
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
elif args.optimizer=='sgd':
optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(
" Total train batch size (w. parallel, accumulation) = %d",
args.batch_size
* args.gradient_accumulation_steps),
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
tr_loss, logging_loss = 0.0, 0.0
#eval_f1 = []
test_f1 = []
test_f1_regular = []
test_f1_challenging = []
model.zero_grad()
minbool=True
train_iterator = trange(epochs_trained, int(args.num_train_epochs), desc='Epoch')
set_seed(args)
for epoch in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration")
for step, batchorig in enumerate(epoch_iterator):
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batchorig = tuple(t.to(args.device) for t in batchorig)
#inputs_a = {"input_ids": batch[0],"attention_mask": batch[1],'subtoken_ids':batch[4]}
#target_a=batch[3]
# set inputs A and inputs B
batch_b,batch,batch_noaugpair,l = gen_aug_mix_batch(augalpha,augbeta,regalpha,regbeta,minbool,example_to_aug_example,aug_example_to_example_dict,batch=batchorig,train_dataset=train_dataset,train_size=args.train_examples)
inputs_a = {"input_ids": batch[0].to(args.device),"attention_mask": batch[1].to(args.device),'subtoken_ids':batch[4].to(args.device)}
target_a=batch[3].to(args.device)
inputs_c = {"input_ids": batch_noaugpair[0].to(args.device),"attention_mask": batch_noaugpair[1].to(args.device),'subtoken_ids':batch_noaugpair[4].to(args.device)}
target_c=batch_noaugpair[3].to(args.device)
assert len(batch_b)==len(batch)
inputs_b = {"input_ids": batch_b[0],"attention_mask": batch_b[1]}
target_b=batch_b[3]
#else:
# idx=torch.randperm(batch[0].size(0))
# inputs_b = {"input_ids": batch[0][idx],"attention_mask": batch[1][idx]}
# target_b=batch[3][idx]
mix_layer = np.random.choice(args.mix_layers_set, 1)[0]
mix_layer = mix_layer -1
#Mix Aug
inputs_b['input_ids'] = inputs_b['input_ids'].to(args.device)
inputs_b["attention_mask"] = inputs_b["attention_mask"].to(args.device)
target_b = target_b.to(args.device)
# mix the attention mask to be the longer one.
attention_mask = ((inputs_a["attention_mask"]+inputs_b["attention_mask"])>0).type(torch.long)
attention_mask = attention_mask.to(args.device)
outputs,loss_mix = model(inputs_a['input_ids'],target_a,inputs_b['input_ids'],
target_b,l, mix_layer,
attention_mask = attention_mask,
special_label_weight=args.special_label_weight,
subtoken_ids=None,
do_intra_mix=False)
# No mix
outputs_nomix,loss_nomix = model(inputs_c['input_ids'],target_c,
attention_mask = inputs_c["attention_mask"],
special_label_weight=args.special_label_weight,
subtoken_ids=None,
do_intra_mix=False)
loss=torch.cat((loss_mix, loss_nomix), 0)
if args.n_gpu >= 1:
loss = loss.mean()
#print('totalloss mean shape: ',totalloss.shape)
#quit()
#exit()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
if args.logging_steps > 0 and global_step % args.logging_steps == 0:
print("augalpha ",augalpha)
print("regalpha ",regalpha)
# Log metrics
if (args.evaluate_during_training):
results, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, eval_dataset, parallel = False, mode="dev", prefix = str(global_step))
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
logger.info("Model name: %s", args.output_dir)
logger.info("Epoch is %s", epoch)
if results['f1'] >= best_f1:
best_f1 = results['f1']
results, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, test_dataset_regular, parallel = False, mode="test", prefix = str(global_step))
test_f1_regular.append(results['f1'])
results, _ = evaluate(args,model, tokenizer, labels, pad_token_label_id, test_dataset_challenging, parallel = False, mode="test", prefix = str(global_step))
test_f1_challenging.append(results['f1'])
output_dir = os.path.join(args.output_dir, "best")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
logger.info("Saving best model to %s", output_dir)
logger.info("Epochs trained is %s", epochs_trained)
logger.info("Epoch is %s", epoch)
model_to_save = (
model.module if hasattr(model, "module") else model)
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
tb_writer.add_scalar("lr", scheduler.get_last_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
logger.info("logging train info!!!")
logger.info("*")
# eval and save the best model based on dev set after each epoch
if (args.evaluate_during_training):
results, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, eval_dataset, parallel = False, mode="dev", prefix = str(global_step))
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
if results['f1'] >= best_f1:
best_f1 = results['f1']
results, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, test_dataset_regular, parallel = False, mode="test", prefix = str(global_step))
test_f1_regular.append(results['f1'])
results, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, test_dataset_challenging, parallel = False, mode="test", prefix = str(global_step))
test_f1_challenging.append(results['f1'])
output_dir = os.path.join(args.output_dir, "best")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
logger.info("Saving best model to %s", output_dir)
logger.info("Epochs trained is %s", epochs_trained)
logger.info("Epoch is %s", epoch)
model_to_save = (model.module if hasattr(model, "module") else model)
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
logger.info("Epoch is %s", epoch)
args.tb_writer_logdir=tb_writer.logdir
tb_writer.close()
return global_step, tr_loss / global_step, test_f1_regular ,test_f1_challenging
def output_eval_results(out_label_list,preds_list,input_id_list,file_name):
with open(file_name,'w') as fout:
for i in range(len(out_label_list)):
label=out_label_list[i]
pred=preds_list[i]
tokens=input_id_list[i]
for j in range(len(label)):
if tokens[j]=='[PAD]':
continue
fout.write('{}\t{}\t{}\n'.format(tokens[j] ,label[j],pred[j]))
fout.write('\n')
def read_examples_from_ontonotes_file(args, tokenizer, labelslist, pad_token_label_id, mode,
omit_sep_cls_token=False,
pad_subtoken_with_real_label=False):
file_path = os.path.join(args.data_dir, 'ontotest.ner')
guid_index = 0
examples = []
label_map = {"PER": "PER",
"person": "PER",
"PERSON": "PER",
"DOCTOR": "PER",
"PATIENT": "PER",
"LOC": "LOC",
"location": "LOC",
"GPE": "LOC",
"FAC": "LOC",
"HOSPITAL": "LOC",
"CITY": "LOC",
"STATE": "LOC",
"COUNTRY": "LOC",
"LOCATION_OTHER": "LOC",
"ORG": "ORG",
"corporation": "ORG",
"group": "ORG",
"ORGANIZATION": "ORG",
"MISC": "MISC",
"product": "MISC",
"creative-work": "MISC",
"PRODUCT": "MISC",
"NORP": "MISC",
"EVENT": "MISC",
"LANGUAGE": "MISC",
"LAW": "MISC",
"WORK_OF_ART": "MISC"}
with open(file_path, encoding="utf-8") as f:
words = []
labels = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if words:
examples.append(InputExample(
guid="{}-{}".format(mode, guid_index), words=words, labels=labels))
guid_index += 1
words = []
labels = []
else:
elems = line.strip().split()
label = elems[-1].replace("\n", "")
words.append(elems[0])
if label != "O":
iob_prefix = label[:2]
etype = label[2:]
if etype in label_map:
labels.append(iob_prefix + label_map[etype])
else:
labels.append("O")
else:
labels.append("O")
if words:
examples.append(InputExample(
guid="{}-{}".format(mode, guid_index), words=words, labels=labels))
entityheavyexamples=[]
guid_index=0
for e in examples:
count=0
for l in e.labels:
if l!="O":
count=count+1
if count/len( e.labels)>0.5:
e.guid="{}-{}".format(mode, guid_index)
entityheavyexamples.append(e)
guid_index += 1
if len(entityheavyexamples)>49:
break
print(e.guid," ", e.words)
print(e.labels)
#examples = examples[0]
print(mode)
print('data num: {}'.format(len(examples)))
features = convert_examples_to_features(entityheavyexamples, labelslist, args.max_seq_length, tokenizer,
cls_token = tokenizer.cls_token, sep_token = tokenizer.sep_token, pad_token = tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
cls_token_segment_id = 2 if args.model_type in ["xlnet"] else 0,
sequence_a_segment_id = 0, pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
pad_token_label_id = pad_token_label_id,
omit_sep_cls_token=omit_sep_cls_token,
pad_subtoken_with_real_label=pad_subtoken_with_real_label,
subtoken_label_type=args.subtoken_label_type,
label_sep_cls=args.label_sep_cls)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
all_subtoken_ids = torch.tensor([f.subtoken_ids for f in features], dtype=torch.long)
all_sent_id = torch.tensor([f.sent_id for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_subtoken_ids,all_sent_id)
return dataset
def evaluate(args, model, tokenizer, labels, pad_token_label_id, eval_dataset = None, parallel = False, mode = 'dev', prefix = ''):
#if eval_dataset is None and mode=='dev':
# eval_dataset = read_data(args, tokenizer, labels, pad_token_label_id, mode = mode)
eval_dataloader = DataLoader(eval_dataset, batch_size = args.eval_batch_size, shuffle = False)
if parallel:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation %s *****", mode + '-' + prefix)
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
all_subtoken_ids=None
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3],'sent_id' : batch[5]}
inputs["token_type_ids"] = batch[2]
target=inputs['labels']
logits,tmp_eval_loss = model(inputs['input_ids'],target,attention_mask = inputs["attention_mask"],
special_label_weight=args.special_label_weight)
if args.n_gpu > 1:
tmp_eval_loss = tmp_eval_loss.mean()
#eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
all_subtoken_ids=batch[4].detach().cpu().numpy()
sent_id=inputs['sent_id'].detach().cpu().numpy()
input_ids=inputs['input_ids'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
all_subtoken_ids = np.append(all_subtoken_ids, batch[4].detach().cpu().numpy(), axis=0)
sent_id = np.append(sent_id, inputs['sent_id'].detach().cpu().numpy(), axis=0)
input_ids= np.append(input_ids, inputs['input_ids'].detach().cpu().numpy(), axis=0)
#eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
input_id_list = [[] for _ in range(input_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if args.pad_subtoken_with_real_label or args.label_sep_cls:
if args.eval_pad_subtoken_with_first_subtoken_only:
if all_subtoken_ids[i,j] ==1:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
tid=input_ids[i][j]
input_id_list[i].append(tokenizer.convert_ids_to_tokens([tid])[0])
else:
if all_subtoken_ids[i,j] in [0,1] and out_label_ids[i, j] != pad_token_label_id:# in this case, we consider all the tokens.
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
input_id_list[i].append(tokenizer.convert_ids_to_tokens([input_ids[i][j]]))
else:
if all_subtoken_ids[i,j] in [0,1] and out_label_ids[i, j] != pad_token_label_id:# in this case, we consider all the tokens.
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
input_id_list[i].append(tokenizer.convert_ids_to_tokens([input_ids[i][j]]))
file_name=os.path.join(args.output_dir,'{}_pred_results.tsv'.format(mode))
output_eval_results(out_label_list,preds_list,input_id_list,file_name)
macro_scores=macro_score(out_label_list, preds_list)
results = {
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list),
'macro_f1':macro_scores['macro_f1'],
'macro_precision':macro_scores['macro_precision'],
'macro_recall':macro_scores['macro_recall']
}
logger.info("***** Eval results %s *****", mode + '-' + prefix)
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
return results, preds_list
def main():
global best_f1
if (os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir):
raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))
logger.setLevel(log.INFO)
formatter = log.Formatter("%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S")
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
fh = log.FileHandler(args.output_dir +'/' + str(args.train_examples)+'-' + 'log.txt')
fh.setLevel(log.INFO)
fh.setFormatter(formatter)
ch = log.StreamHandler()
ch.setLevel(log.INFO)
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.addHandler(fh)
logger.info("------NEW RUN-----")
logger.info("device: %s, n_gpu: %s", args.device, args.n_gpu)
set_seed(args)
labels = get_labels(args.labels)
num_labels = len(labels)
args.num_labels=num_labels
pad_token_label_id = CrossEntropyLoss().ignore_index
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name,
num_labels=num_labels,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name,
do_lower_case=args.do_lower_case,
)
model_class = BertModel4Mix(config)
model = model_class.from_pretrained(
args.model_name,
from_tf=bool(".ckpt" in args.model_name),
config=config,
)
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
if args.do_predict:
print("Doing Predict!!!!")
test_dataset_ontonotes = read_examples_from_ontonotes_file(args, tokenizer, labels, pad_token_label_id, mode = 'test', pad_subtoken_with_real_label=args.pad_subtoken_with_real_label)
output_dir = os.path.join(args.output_dir, "best")
tokenizer = tokenizer_class.from_pretrained(output_dir, do_lower_case=args.do_lower_case)
model = model_class.from_pretrained(output_dir)
model.to(args.device)
result, predictions = evaluate(args, model, tokenizer, labels, pad_token_label_id,test_dataset_ontonotes, mode="test", prefix = 'final')
print("ontonotes test set results: ",result)
main()
| 42.083655
| 285
| 0.606502
|
bb84956b71aa41972ca466de5b5e9a9b005df483
| 531
|
py
|
Python
|
algorithm/rating/main.py
|
kosyachniy/dev
|
39bb5c5ee10780bfcd8a59cf59cfb1a348ac52a4
|
[
"Apache-2.0"
] | 13
|
2018-12-17T23:30:54.000Z
|
2021-12-29T14:31:43.000Z
|
algorithm/rating/main.py
|
kosyachniy/dev
|
39bb5c5ee10780bfcd8a59cf59cfb1a348ac52a4
|
[
"Apache-2.0"
] | 36
|
2018-06-07T21:34:13.000Z
|
2022-03-13T21:01:43.000Z
|
algorithm/rating/main.py
|
kosyachniy/dev
|
39bb5c5ee10780bfcd8a59cf59cfb1a348ac52a4
|
[
"Apache-2.0"
] | 2
|
2021-01-03T11:47:20.000Z
|
2021-12-29T14:31:49.000Z
|
a=list()
with open('db.txt', 'r') as file:
for i in file:
a.append(i.strip())
b=input()
interval=len(a)
shift=0
while interval>=1:
t=interval%2
interval//=2
i=interval+t
print('1 - ', a[i+shift-1], ' | 2 - ', b)
r=input()
while r!='1' and r!='2':
r=input()
if r=='1':
shift+=i
else:
if not t:
interval-=1
a.append(b)
for i in range(shift+1,len(a))[::-1]:
a[i], a[i-1]=a[i-1], a[i]
with open('db.txt', 'w') as file:
for i in range(len(a)-1):
print(a[i], file=file)
print(a[len(a)-1], end='', file=file)
| 17.7
| 44
| 0.559322
|
22d4e0e177fd7dc75d591fb0f81815eb6d7dbc3b
| 831
|
py
|
Python
|
server/protocols/volgactf.py
|
suiljex/DestructiveFarm
|
e10d786a927cce6965259bc201dce139976ecb03
|
[
"MIT"
] | 159
|
2018-07-14T18:07:14.000Z
|
2022-03-08T15:47:49.000Z
|
server/protocols/volgactf.py
|
suiljex/DestructiveFarm
|
e10d786a927cce6965259bc201dce139976ecb03
|
[
"MIT"
] | 18
|
2018-10-16T06:34:18.000Z
|
2022-03-17T13:18:26.000Z
|
server/protocols/volgactf.py
|
suiljex/DestructiveFarm
|
e10d786a927cce6965259bc201dce139976ecb03
|
[
"MIT"
] | 65
|
2018-10-14T09:05:29.000Z
|
2022-03-05T17:44:24.000Z
|
from themis.finals.attack.helper import Helper
from themis.finals.attack.result import Result
from server.models import FlagStatus, SubmitResult
RESPONSES = {
FlagStatus.ACCEPTED: [Result.SUCCESS_FLAG_ACCEPTED],
FlagStatus.REJECTED: [Result.ERROR_FLAG_EXPIRED, Result.ERROR_FLAG_YOURS,
Result.ERROR_FLAG_SUBMITTED, Result.ERROR_FLAG_NOT_FOUND],
}
def submit_flags(flags, config):
h = Helper(config['SYSTEM_HOST'])
codes = h.attack(*[item.flag for item in flags])
for item, code in zip(flags, codes):
for status, possible_codes in RESPONSES.items():
if code in possible_codes:
found_status = status
break
else:
found_status = FlagStatus.QUEUED
yield SubmitResult(item.flag, found_status, code.name)
| 30.777778
| 84
| 0.679904
|
d130432b7b712ba50b131cc44ffd84353997f159
| 10,879
|
py
|
Python
|
config/settings/base.py
|
giussepi/wsp-math-exam
|
b1c83029291635dc8387dc6692fb186ae302f5ee
|
[
"MIT"
] | null | null | null |
config/settings/base.py
|
giussepi/wsp-math-exam
|
b1c83029291635dc8387dc6692fb186ae302f5ee
|
[
"MIT"
] | null | null | null |
config/settings/base.py
|
giussepi/wsp-math-exam
|
b1c83029291635dc8387dc6692fb186ae302f5ee
|
[
"MIT"
] | null | null | null |
"""
Base settings to build other settings files upon.
"""
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (math_class_exams/config/settings/base.py - 3 = math_class_exams/)
APPS_DIR = ROOT_DIR.path('math_class_exams')
env = environ.Env()
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR.path('.env')))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = 'Europe/London'
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': env.db('DATABASE_URL'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = 'config.urls'
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'django.contrib.humanize', # Handy template tags
'django.contrib.admin',
]
THIRD_PARTY_APPS = [
'crispy_forms',
'allauth',
'allauth.account',
'allauth.socialaccount',
'rest_framework',
]
LOCAL_APPS = [
'math_class_exams.users.apps.UsersAppConfig',
# Your stuff: custom apps go here
'math_class_exams.assessments',
'math_class_exams.api',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {
'sites': 'math_class_exams.contrib.sites.migrations'
}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = 'users.User'
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = 'users:redirect'
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = 'account_login'
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.middleware.cache.UpdateCacheMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [
str(APPS_DIR.path('static')),
]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = 'DENY'
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = 'admin/'
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [
("""Giussepi LM""", 'mail@mail.com'),
]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = 'none'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_ADAPTER = 'math_class_exams.users.adapters.AccountAdapter'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SOCIALACCOUNT_ADAPTER = 'math_class_exams.users.adapters.SocialAccountAdapter'
# Your stuff...
# ------------------------------------------------------------------------------
# CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
# 'LOCATION': 'signal_cache_table',
# }
# }
CACHE_MIDDLEWARE_ALIAS = 'default'
CACHE_MIDDLEWARE_SECONDS = 65
CACHE_MIDDLEWARE_KEY_PREFIX = 'db_cache'
# django-rest_framework
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10,
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
)
}
| 38.992832
| 107
| 0.638478
|
d7692a76c8adb07a78687134ea20b43472898d83
| 27,278
|
py
|
Python
|
vnpy/trader/utility.py
|
Jornpen/vnpy
|
5b46e1359d95adcda314f9f529a329925cbbb7d0
|
[
"MIT"
] | null | null | null |
vnpy/trader/utility.py
|
Jornpen/vnpy
|
5b46e1359d95adcda314f9f529a329925cbbb7d0
|
[
"MIT"
] | null | null | null |
vnpy/trader/utility.py
|
Jornpen/vnpy
|
5b46e1359d95adcda314f9f529a329925cbbb7d0
|
[
"MIT"
] | null | null | null |
"""
General utility functions.
"""
import json
import logging
import sys
from pathlib import Path
from typing import Callable, Dict, Tuple, Union, Optional
from decimal import Decimal
from math import floor, ceil
import numpy as np
import talib
from .object import BarData, TickData
from .constant import Exchange, Interval
log_formatter = logging.Formatter('[%(asctime)s] %(message)s')
def extract_vt_symbol(vt_symbol: str) -> Tuple[str, Exchange]:
"""
:return: (symbol, exchange)
"""
symbol, exchange_str = vt_symbol.split(".")
return symbol, Exchange(exchange_str)
def generate_vt_symbol(symbol: str, exchange: Exchange) -> str:
"""
return vt_symbol
"""
return f"{symbol}.{exchange.value}"
def _get_trader_dir(temp_name: str) -> Tuple[Path, Path]:
"""
Get path where trader is running in.
"""
cwd = Path.cwd()
temp_path = cwd.joinpath(temp_name)
# If .vntrader folder exists in current working directory,
# then use it as trader running path.
if temp_path.exists():
return cwd, temp_path
# Otherwise use home path of system.
home_path = Path.home()
temp_path = home_path.joinpath(temp_name)
# Create .vntrader folder under home path if not exist.
if not temp_path.exists():
temp_path.mkdir()
return home_path, temp_path
TRADER_DIR, TEMP_DIR = _get_trader_dir(".vntrader")
sys.path.append(str(TRADER_DIR))
def get_file_path(filename: str) -> Path:
"""
Get path for temp file with filename.
"""
return TEMP_DIR.joinpath(filename)
def get_folder_path(folder_name: str) -> Path:
"""
Get path for temp folder with folder name.
"""
folder_path = TEMP_DIR.joinpath(folder_name)
if not folder_path.exists():
folder_path.mkdir()
return folder_path
def get_icon_path(filepath: str, ico_name: str) -> str:
"""
Get path for icon file with ico name.
"""
ui_path = Path(filepath).parent
icon_path = ui_path.joinpath("ico", ico_name)
return str(icon_path)
def load_json(filename: str) -> dict:
"""
Load data from json file in temp path.
"""
filepath = get_file_path(filename)
if filepath.exists():
with open(filepath, mode="r", encoding="UTF-8") as f:
data = json.load(f)
return data
else:
save_json(filename, {})
return {}
def save_json(filename: str, data: dict) -> None:
"""
Save data into json file in temp path.
"""
filepath = get_file_path(filename)
with open(filepath, mode="w+", encoding="UTF-8") as f:
json.dump(
data,
f,
indent=4,
ensure_ascii=False
)
def round_to(value: float, target: float) -> float:
"""
Round price to price tick value.
"""
value = Decimal(str(value))
target = Decimal(str(target))
rounded = float(int(round(value / target)) * target)
return rounded
def floor_to(value: float, target: float) -> float:
"""
Similar to math.floor function, but to target float number.
"""
value = Decimal(str(value))
target = Decimal(str(target))
result = float(int(floor(value / target)) * target)
return result
def ceil_to(value: float, target: float) -> float:
"""
Similar to math.ceil function, but to target float number.
"""
value = Decimal(str(value))
target = Decimal(str(target))
result = float(int(ceil(value / target)) * target)
return result
def get_digits(value: float) -> int:
"""
Get number of digits after decimal point.
"""
value_str = str(value)
if "e-" in value_str:
_, buf = value_str.split("e-")
return int(buf)
elif "." in value_str:
_, buf = value_str.split(".")
return len(buf)
else:
return 0
class BarGenerator:
"""
For:
1. generating 1 minute bar data from tick data
2. generating x minute bar/x hour bar data from 1 minute data
Notice:
1. for x minute bar, x must be able to divide 60: 2, 3, 5, 6, 10, 15, 20, 30
2. for x hour bar, x can be any number
"""
def __init__(
self,
on_bar: Callable,
window: int = 0,
on_window_bar: Callable = None,
interval: Interval = Interval.MINUTE
):
"""Constructor"""
self.bar: BarData = None
self.on_bar: Callable = on_bar
self.interval: Interval = interval
self.interval_count: int = 0
self.hour_bar: BarData = None
self.window: int = window
self.window_bar: BarData = None
self.on_window_bar: Callable = on_window_bar
self.last_tick: TickData = None
def update_tick(self, tick: TickData) -> None:
"""
Update new tick data into generator.
"""
new_minute = False
# Filter tick data with 0 last price
if not tick.last_price:
return
# Filter tick data with older timestamp
if self.last_tick and tick.datetime < self.last_tick.datetime:
return
if not self.bar:
new_minute = True
elif (
(self.bar.datetime.minute != tick.datetime.minute)
or (self.bar.datetime.hour != tick.datetime.hour)
):
self.bar.datetime = self.bar.datetime.replace(
second=0, microsecond=0
)
self.on_bar(self.bar)
new_minute = True
if new_minute:
self.bar = BarData(
symbol=tick.symbol,
exchange=tick.exchange,
interval=Interval.MINUTE,
datetime=tick.datetime,
gateway_name=tick.gateway_name,
open_price=tick.last_price,
high_price=tick.last_price,
low_price=tick.last_price,
close_price=tick.last_price,
open_interest=tick.open_interest
)
else:
self.bar.high_price = max(self.bar.high_price, tick.last_price)
if tick.high_price > self.last_tick.high_price:
self.bar.high_price = max(self.bar.high_price, tick.high_price)
self.bar.low_price = min(self.bar.low_price, tick.last_price)
if tick.low_price < self.last_tick.low_price:
self.bar.low_price = min(self.bar.low_price, tick.low_price)
self.bar.close_price = tick.last_price
self.bar.open_interest = tick.open_interest
self.bar.datetime = tick.datetime
if self.last_tick:
volume_change = tick.volume - self.last_tick.volume
self.bar.volume += max(volume_change, 0)
turnover_change = tick.turnover - self.last_tick.turnover
self.bar.turnover += max(turnover_change, 0)
self.last_tick = tick
def update_bar(self, bar: BarData) -> None:
"""
Update 1 minute bar into generator
"""
if self.interval == Interval.MINUTE:
self.update_bar_minute_window(bar)
else:
self.update_bar_hour_window(bar)
def update_bar_minute_window(self, bar: BarData) -> None:
""""""
# If not inited, create window bar object
if not self.window_bar:
dt = bar.datetime.replace(second=0, microsecond=0)
self.window_bar = BarData(
symbol=bar.symbol,
exchange=bar.exchange,
datetime=dt,
gateway_name=bar.gateway_name,
open_price=bar.open_price,
high_price=bar.high_price,
low_price=bar.low_price
)
# Otherwise, update high/low price into window bar
else:
self.window_bar.high_price = max(
self.window_bar.high_price,
bar.high_price
)
self.window_bar.low_price = min(
self.window_bar.low_price,
bar.low_price
)
# Update close price/volume/turnover into window bar
self.window_bar.close_price = bar.close_price
self.window_bar.volume += bar.volume
self.window_bar.turnover += bar.turnover
self.window_bar.open_interest = bar.open_interest
# Check if window bar completed
if not (bar.datetime.minute + 1) % self.window:
self.on_window_bar(self.window_bar)
self.window_bar = None
def update_bar_hour_window(self, bar: BarData) -> None:
""""""
# If not inited, create window bar object
if not self.hour_bar:
dt = bar.datetime.replace(minute=0, second=0, microsecond=0)
self.hour_bar = BarData(
symbol=bar.symbol,
exchange=bar.exchange,
datetime=dt,
gateway_name=bar.gateway_name,
open_price=bar.open_price,
high_price=bar.high_price,
low_price=bar.low_price,
volume=bar.volume,
turnover=bar.turnover
)
return
finished_bar = None
# If minute is 59, update minute bar into window bar and push
if bar.datetime.minute == 59:
self.hour_bar.high_price = max(
self.hour_bar.high_price,
bar.high_price
)
self.hour_bar.low_price = min(
self.hour_bar.low_price,
bar.low_price
)
self.hour_bar.close_price = bar.close_price
self.hour_bar.volume += bar.volume
self.hour_bar.turnover += bar.turnover
self.hour_bar.open_interest = bar.open_interest
finished_bar = self.hour_bar
self.hour_bar = None
# If minute bar of new hour, then push existing window bar
elif bar.datetime.hour != self.hour_bar.datetime.hour:
finished_bar = self.hour_bar
dt = bar.datetime.replace(minute=0, second=0, microsecond=0)
self.hour_bar = BarData(
symbol=bar.symbol,
exchange=bar.exchange,
datetime=dt,
gateway_name=bar.gateway_name,
open_price=bar.open_price,
high_price=bar.high_price,
low_price=bar.low_price,
close_price=bar.close_price,
volume=bar.volume,
turnover=bar.turnover
)
# Otherwise only update minute bar
else:
self.hour_bar.high_price = max(
self.hour_bar.high_price,
bar.high_price
)
self.hour_bar.low_price = min(
self.hour_bar.low_price,
bar.low_price
)
self.hour_bar.close_price = bar.close_price
self.hour_bar.volume += bar.volume
self.hour_bar.turnover += bar.turnover
self.hour_bar.open_interest = bar.open_interest
# Push finished window bar
if finished_bar:
self.on_hour_bar(finished_bar)
def on_hour_bar(self, bar: BarData) -> None:
""""""
if self.window == 1:
self.on_window_bar(bar)
else:
if not self.window_bar:
self.window_bar = BarData(
symbol=bar.symbol,
exchange=bar.exchange,
datetime=bar.datetime,
gateway_name=bar.gateway_name,
open_price=bar.open_price,
high_price=bar.high_price,
low_price=bar.low_price
)
else:
self.window_bar.high_price = max(
self.window_bar.high_price,
bar.high_price
)
self.window_bar.low_price = min(
self.window_bar.low_price,
bar.low_price
)
self.window_bar.close_price = bar.close_price
self.window_bar.volume += bar.volume
self.window_bar.turnover += bar.turnover
self.window_bar.open_interest = bar.open_interest
self.interval_count += 1
if not self.interval_count % self.window:
self.interval_count = 0
self.on_window_bar(self.window_bar)
self.window_bar = None
def generate(self) -> Optional[BarData]:
"""
Generate the bar data and call callback immediately.
"""
bar = self.bar
if self.bar:
bar.datetime = bar.datetime.replace(second=0, microsecond=0)
self.on_bar(bar)
self.bar = None
return bar
class ArrayManager(object):
"""
For:
1. time series container of bar data
2. calculating technical indicator value
"""
def __init__(self, size: int = 100):
"""Constructor"""
self.count: int = 0
self.size: int = size
self.inited: bool = False
self.open_array: np.ndarray = np.zeros(size)
self.high_array: np.ndarray = np.zeros(size)
self.low_array: np.ndarray = np.zeros(size)
self.close_array: np.ndarray = np.zeros(size)
self.volume_array: np.ndarray = np.zeros(size)
self.turnover_array: np.ndarray = np.zeros(size)
self.open_interest_array: np.ndarray = np.zeros(size)
def update_bar(self, bar: BarData) -> None:
"""
Update new bar data into array manager.
"""
self.count += 1
if not self.inited and self.count >= self.size:
self.inited = True
self.open_array[:-1] = self.open_array[1:]
self.high_array[:-1] = self.high_array[1:]
self.low_array[:-1] = self.low_array[1:]
self.close_array[:-1] = self.close_array[1:]
self.volume_array[:-1] = self.volume_array[1:]
self.turnover_array[:-1] = self.turnover_array[1:]
self.open_interest_array[:-1] = self.open_interest_array[1:]
self.open_array[-1] = bar.open_price
self.high_array[-1] = bar.high_price
self.low_array[-1] = bar.low_price
self.close_array[-1] = bar.close_price
self.volume_array[-1] = bar.volume
self.turnover_array[-1] = bar.turnover
self.open_interest_array[-1] = bar.open_interest
@property
def open(self) -> np.ndarray:
"""
Get open price time series.
"""
return self.open_array
@property
def high(self) -> np.ndarray:
"""
Get high price time series.
"""
return self.high_array
@property
def low(self) -> np.ndarray:
"""
Get low price time series.
"""
return self.low_array
@property
def close(self) -> np.ndarray:
"""
Get close price time series.
"""
return self.close_array
@property
def volume(self) -> np.ndarray:
"""
Get trading volume time series.
"""
return self.volume_array
@property
def turnover(self) -> np.ndarray:
"""
Get trading turnover time series.
"""
return self.turnover_array
@property
def open_interest(self) -> np.ndarray:
"""
Get trading volume time series.
"""
return self.open_interest_array
def sma(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
Simple moving average.
"""
result = talib.SMA(self.close, n)
if array:
return result
return result[-1]
def ema(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
Exponential moving average.
"""
result = talib.EMA(self.close, n)
if array:
return result
return result[-1]
def kama(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
KAMA.
"""
result = talib.KAMA(self.close, n)
if array:
return result
return result[-1]
def wma(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
WMA.
"""
result = talib.WMA(self.close, n)
if array:
return result
return result[-1]
def apo(
self,
fast_period: int,
slow_period: int,
matype: int = 0,
array: bool = False
) -> Union[float, np.ndarray]:
"""
APO.
"""
result = talib.APO(self.close, fast_period, slow_period, matype)
if array:
return result
return result[-1]
def cmo(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
CMO.
"""
result = talib.CMO(self.close, n)
if array:
return result
return result[-1]
def mom(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
MOM.
"""
result = talib.MOM(self.close, n)
if array:
return result
return result[-1]
def ppo(
self,
fast_period: int,
slow_period: int,
matype: int = 0,
array: bool = False
) -> Union[float, np.ndarray]:
"""
PPO.
"""
result = talib.PPO(self.close, fast_period, slow_period, matype)
if array:
return result
return result[-1]
def roc(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
ROC.
"""
result = talib.ROC(self.close, n)
if array:
return result
return result[-1]
def rocr(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
ROCR.
"""
result = talib.ROCR(self.close, n)
if array:
return result
return result[-1]
def rocp(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
ROCP.
"""
result = talib.ROCP(self.close, n)
if array:
return result
return result[-1]
def rocr_100(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
ROCR100.
"""
result = talib.ROCR100(self.close, n)
if array:
return result
return result[-1]
def trix(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
TRIX.
"""
result = talib.TRIX(self.close, n)
if array:
return result
return result[-1]
def std(self, n: int, nbdev: int = 1, array: bool = False) -> Union[float, np.ndarray]:
"""
Standard deviation.
"""
result = talib.STDDEV(self.close, n, nbdev)
if array:
return result
return result[-1]
def obv(self, array: bool = False) -> Union[float, np.ndarray]:
"""
OBV.
"""
result = talib.OBV(self.close, self.volume)
if array:
return result
return result[-1]
def cci(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
Commodity Channel Index (CCI).
"""
result = talib.CCI(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def atr(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
Average True Range (ATR).
"""
result = talib.ATR(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def natr(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
NATR.
"""
result = talib.NATR(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def rsi(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
Relative Strenght Index (RSI).
"""
result = talib.RSI(self.close, n)
if array:
return result
return result[-1]
def macd(
self,
fast_period: int,
slow_period: int,
signal_period: int,
array: bool = False
) -> Union[
Tuple[np.ndarray, np.ndarray, np.ndarray],
Tuple[float, float, float]
]:
"""
MACD.
"""
macd, signal, hist = talib.MACD(
self.close, fast_period, slow_period, signal_period
)
if array:
return macd, signal, hist
return macd[-1], signal[-1], hist[-1]
def adx(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
ADX.
"""
result = talib.ADX(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def adxr(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
ADXR.
"""
result = talib.ADXR(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def dx(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
DX.
"""
result = talib.DX(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def minus_di(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
MINUS_DI.
"""
result = talib.MINUS_DI(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def plus_di(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
PLUS_DI.
"""
result = talib.PLUS_DI(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def willr(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
WILLR.
"""
result = talib.WILLR(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def ultosc(
self,
time_period1: int = 7,
time_period2: int = 14,
time_period3: int = 28,
array: bool = False
) -> Union[float, np.ndarray]:
"""
Ultimate Oscillator.
"""
result = talib.ULTOSC(self.high, self.low, self.close, time_period1, time_period2, time_period3)
if array:
return result
return result[-1]
def trange(self, array: bool = False) -> Union[float, np.ndarray]:
"""
TRANGE.
"""
result = talib.TRANGE(self.high, self.low, self.close)
if array:
return result
return result[-1]
def boll(
self,
n: int,
dev: float,
array: bool = False
) -> Union[
Tuple[np.ndarray, np.ndarray],
Tuple[float, float]
]:
"""
Bollinger Channel.
"""
mid = self.sma(n, array)
std = self.std(n, 1, array)
up = mid + std * dev
down = mid - std * dev
return up, down
def keltner(
self,
n: int,
dev: float,
array: bool = False
) -> Union[
Tuple[np.ndarray, np.ndarray],
Tuple[float, float]
]:
"""
Keltner Channel.
"""
mid = self.sma(n, array)
atr = self.atr(n, array)
up = mid + atr * dev
down = mid - atr * dev
return up, down
def donchian(
self, n: int, array: bool = False
) -> Union[
Tuple[np.ndarray, np.ndarray],
Tuple[float, float]
]:
"""
Donchian Channel.
"""
up = talib.MAX(self.high, n)
down = talib.MIN(self.low, n)
if array:
return up, down
return up[-1], down[-1]
def aroon(
self,
n: int,
array: bool = False
) -> Union[
Tuple[np.ndarray, np.ndarray],
Tuple[float, float]
]:
"""
Aroon indicator.
"""
aroon_down, aroon_up = talib.AROON(self.high, self.low, n)
if array:
return aroon_up, aroon_down
return aroon_up[-1], aroon_down[-1]
def aroonosc(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
Aroon Oscillator.
"""
result = talib.AROONOSC(self.high, self.low, n)
if array:
return result
return result[-1]
def minus_dm(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
MINUS_DM.
"""
result = talib.MINUS_DM(self.high, self.low, n)
if array:
return result
return result[-1]
def plus_dm(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
PLUS_DM.
"""
result = talib.PLUS_DM(self.high, self.low, n)
if array:
return result
return result[-1]
def mfi(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
Money Flow Index.
"""
result = talib.MFI(self.high, self.low, self.close, self.volume, n)
if array:
return result
return result[-1]
def ad(self, array: bool = False) -> Union[float, np.ndarray]:
"""
AD.
"""
result = talib.AD(self.high, self.low, self.close, self.volume)
if array:
return result
return result[-1]
def adosc(
self,
fast_period: int,
slow_period: int,
array: bool = False
) -> Union[float, np.ndarray]:
"""
ADOSC.
"""
result = talib.ADOSC(self.high, self.low, self.close, self.volume, fast_period, slow_period)
if array:
return result
return result[-1]
def bop(self, array: bool = False) -> Union[float, np.ndarray]:
"""
BOP.
"""
result = talib.BOP(self.open, self.high, self.low, self.close)
if array:
return result
return result[-1]
def virtual(func: Callable) -> Callable:
"""
mark a function as "virtual", which means that this function can be override.
any base class should use this or @abstractmethod to decorate all functions
that can be (re)implemented by subclasses.
"""
return func
file_handlers: Dict[str, logging.FileHandler] = {}
def _get_file_logger_handler(filename: str) -> logging.FileHandler:
handler = file_handlers.get(filename, None)
if handler is None:
handler = logging.FileHandler(filename)
file_handlers[filename] = handler # Am i need a lock?
return handler
def get_file_logger(filename: str) -> logging.Logger:
"""
return a logger that writes records into a file.
"""
logger = logging.getLogger(filename)
handler = _get_file_logger_handler(filename) # get singleton handler.
handler.setFormatter(log_formatter)
logger.addHandler(handler) # each handler will be added only once.
return logger
| 27.778004
| 104
| 0.546154
|
1ce849a74243e4388f803a6d34ee744b1b590e35
| 506
|
py
|
Python
|
infrapy/cli/run_assoc.py
|
LANL-Seismoacoustics/infrapy
|
132c1f5f9c074eca7300ab35d23109d8423a9912
|
[
"MIT"
] | 23
|
2020-03-17T18:43:19.000Z
|
2022-03-22T17:47:14.000Z
|
infrapy/cli/run_assoc.py
|
LANL-Seismoacoustics/infrapy
|
132c1f5f9c074eca7300ab35d23109d8423a9912
|
[
"MIT"
] | 10
|
2020-04-28T01:09:35.000Z
|
2022-02-28T06:06:20.000Z
|
infrapy/cli/run_assoc.py
|
LANL-Seismoacoustics/infrapy
|
132c1f5f9c074eca7300ab35d23109d8423a9912
|
[
"MIT"
] | 2
|
2021-03-08T20:29:27.000Z
|
2021-03-28T18:03:39.000Z
|
#!/usr/bin/env python
import sys
from infrapy.database.taskbase.assoc import AssocInfraPy_LANL
def run(config_file):
pdetect = AssocInfraPy_LANL(config_file)
pdetect.database_connecting()
pdetect.data_processing()
if __name__ == '__main__':
try:
config_file = sys.argv[1]
except Exception as ex1:
print('A configuration file is required to start data processing')
sys.exit()
print('run FK, with configuration file:', config_file)
run(config_file)
| 22
| 74
| 0.703557
|
9e895dbce951fb9e601825a8231650772b2f4357
| 1,450
|
py
|
Python
|
add_frac.py
|
HRA1173/homework1
|
befffeffa27ffa87031a94bb3348e229eeed9794
|
[
"Apache-2.0"
] | null | null | null |
add_frac.py
|
HRA1173/homework1
|
befffeffa27ffa87031a94bb3348e229eeed9794
|
[
"Apache-2.0"
] | null | null | null |
add_frac.py
|
HRA1173/homework1
|
befffeffa27ffa87031a94bb3348e229eeed9794
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 5 19:28:36 2020
@author: User
"""
"""
Aufgabe3.1:
Schreiben Sie eine Funktion add_frac, die zwei Brüche addieren kann.
Dabei werden jeweils Zähler und Nenner als Input an die Funktion übergeben add_frac(Zaehler1,Nenner1,Zaehler2,Nenner2) und als Liste verwaltet.
Die Eingabe soll nicht über einen Konsolen/User-Input erfolgen. Als Input sind nur ganze Zahlen zulässig.
Die Ausgabe ist ein neuer Bruch - es werden also wiederum Zähler und Nenner ausgegeben.
Zusatzaufgabe: Kürzen Sie das Resultat. Dies gelingt am einfachsten, wenn Sie Zähler und Nenner des Resultats durch ihren größten gemeinsamen Teiler (ggT) dividieren.
"""
def add_frac(Zaehler1,Nenner1,Zaehler2,Nenner2):
"""
Diese Funktion addiert zwei Brüche miteinander.
Es dürfen nur Ganzzahlen eingegeben werden.
Die Nenner dürfen nicht null sein.
"""
#Überprüfung der Eingabe auf Ganzzahl
if type(Zaehler1) != int or type(Nenner1) != int or type(Zaehler2) != int or type(Nenner2) != int:
print("Bitte geben Sie nur Ganzzahlen ein!")
#Überprüfung der Nenner auf Nulldivision
elif Nenner1 == 0 or Nenner2 == 0:
print("Fehler bei der Eingabe. Nenner darf nicht Null sein!")
#Bruchaddition
else:
Nenner3 = Nenner1*Nenner2
Zaehler3 = Zaehler1*Nenner2+Zaehler2*Nenner1
print("Ergebnis:", Zaehler3, "/", Nenner3)
| 39.189189
| 167
| 0.70069
|
26105935616a1ae4765be3247bf00a6e3f7d364d
| 8,980
|
py
|
Python
|
kaolin/transforms/meshfunc.py
|
Bob-Yeah/kaolin
|
7ad34f8158000499a30b8dfa14fb3ed86d2e57a6
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-07-21T16:02:47.000Z
|
2020-07-21T16:02:47.000Z
|
kaolin/transforms/meshfunc.py
|
Bob-Yeah/kaolin
|
7ad34f8158000499a30b8dfa14fb3ed86d2e57a6
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
kaolin/transforms/meshfunc.py
|
Bob-Yeah/kaolin
|
7ad34f8158000499a30b8dfa14fb3ed86d2e57a6
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2021-08-10T09:19:19.000Z
|
2021-11-12T08:18:17.000Z
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Iterable, List, Optional, Type, Union
import numpy as np
import torch
import torch.nn.functional as F
from scipy import ndimage
from kaolin.rep import Mesh, TriangleMesh, QuadMesh
from kaolin import helpers
# Tiny eps
EPS = 1e-6
def sample_triangle_mesh(vertices: torch.Tensor, faces: torch.Tensor,
num_samples: int, eps: float = 1e-10):
r""" Uniformly samples the surface of a mesh.
Args:
vertices (torch.Tensor): Vertices of the mesh (shape:
:math:`N \times 3`, where :math:`N` is the number of vertices)
faces (torch.LongTensor): Faces of the mesh (shape: :math:`F \times 3`,
where :math:`F` is the number of faces).
num_samples (int): Number of points to sample
eps (float): A small number to prevent division by zero
for small surface areas.
Returns:
(torch.Tensor): Uniformly sampled points from the triangle mesh.
Example:
>>> points = sample_triangle_mesh(vertices, faces, 10)
>>> points
tensor([[ 0.0293, 0.2179, 0.2168],
[ 0.2003, -0.3367, 0.2187],
[ 0.2152, -0.0943, 0.1907],
[-0.1852, 0.1686, -0.0522],
[-0.2167, 0.3171, 0.0737],
[ 0.2219, -0.0289, 0.1531],
[ 0.2217, -0.0115, 0.1247],
[-0.1400, 0.0364, -0.1618],
[ 0.0658, -0.0310, -0.2198],
[ 0.1926, -0.1867, -0.2153]])
"""
helpers._assert_tensor(vertices)
helpers._assert_tensor(faces)
helpers._assert_dim_ge(vertices, 2)
helpers._assert_dim_ge(faces, 2)
# We want the last dimension of vertices to be of shape 3.
helpers._assert_shape_eq(vertices, (-1, 3), dim=-1)
dist_uni = torch.distributions.Uniform(torch.zeros((1,), device=vertices.device),
1.)
# calculate area of each face
x1, x2, x3 = torch.split(torch.index_select(
vertices, 0, faces[:, 0]) - torch.index_select(
vertices, 0, faces[:, 1]), 1, dim=1)
y1, y2, y3 = torch.split(torch.index_select(
vertices, 0, faces[:, 1]) - torch.index_select(
vertices, 0, faces[:, 2]), 1, dim=1)
a = (x2 * y3 - x3 * y2) ** 2
b = (x3 * y1 - x1 * y3) ** 2
c = (x1 * y2 - x2 * y1) ** 2
Areas = torch.sqrt(a + b + c) / 2
# percentage of each face w.r.t. full surface area
Areas = Areas / (torch.sum(Areas) + eps)
# define descrete distribution w.r.t. face area ratios caluclated
cat_dist = torch.distributions.Categorical(Areas.view(-1))
face_choices = cat_dist.sample([num_samples])
# from each face sample a point
select_faces = faces[face_choices]
xs = torch.index_select(vertices, 0, select_faces[:, 0])
ys = torch.index_select(vertices, 0, select_faces[:, 1])
zs = torch.index_select(vertices, 0, select_faces[:, 2])
u = torch.sqrt(dist_uni.sample([num_samples]))
v = dist_uni.sample([num_samples])
points = (1 - u) * xs + (u * (1 - v)) * ys + u * v * zs
return points
def normalize(mesh: Type[Mesh], inplace: Optional[bool] = True):
r"""Normalize a mesh such that it is centered at the orgin and has
unit standard deviation.
Args:
mesh (Mesh): Mesh to be normalized.
inplace (bool, optional): Bool to make this operation in-place.
Returns:
(Mesh): Normalized mesh.
"""
if not isinstance(mesh, Mesh):
raise TypeError('Input mesh must be of type Mesh. '
'Got {0} instead.'.format(type(mesh)))
if not inplace:
mesh = mesh.clone()
mesh.vertices = (mesh.vertices - mesh.vertices.mean(-2).unsqueeze(-2))\
/ (mesh.vertices.std(-2).unsqueeze(-2) + EPS)
return mesh
def scale(mesh: Type[Mesh], scf: Union[float, Iterable],
inplace: Optional[bool] = True):
r"""Scale a mesh given a specified scaling factor. A scalar scaling factor
can be provided, in which case it is applied isotropically to all dims.
Optionally, a list/tuple of anisotropic scale factors can be provided per
dimension.
Args:
mesh (Mesh): Mesh to be scaled.
scf (float or iterable): Scaling factor per dimension. If only a single
scaling factor is provided (or a list of size 1 is provided), it is
isotropically applied to all dimensions. Else, a list/tuple of 3
scaling factors is expected, which are applied to the X, Y, and Z
directions respectively.
inplace (bool, optional): Bool to make this operation in-place.
Returns:
(Mesh): Scaled mesh.
"""
if not isinstance(mesh, Mesh):
raise TypeError('Input mesh must be of type Mesh. '
'Got {0} instead.'.format(type(mesh)))
if not inplace:
mesh = mesh.clone()
_scf = []
if isinstance(scf, float) or isinstance(scf, int):
_scf = [scf, scf, scf]
elif isinstance(scf, list) or isinstance(scf, tuple):
if len(scf) == 1:
_scf = [scf[0], scf[0], scf[0]]
elif len(scf) == 3:
_scf = [scf[0], scf[1], scf[2]]
else:
raise ValueError('Exactly 1 or 3 values required for input scf.'
'Got {0} instead.'.format(len(scf)))
else:
raise TypeError('Input scf must be of type int, float, list, or tuple.'
' Got {0} instead.'.format(type(scf)))
_scf = torch.Tensor(_scf).to(mesh.vertices.device).view(1, 3)
mesh.vertices = _scf * mesh.vertices
return mesh
def translate(mesh: Type[Mesh], trans: Union[torch.Tensor, Iterable],
inplace: Optional[bool] = True):
r"""Translate a mesh given a (3D) translation vector.
Args:
mesh (Mesh): Mesh to be normalized.
trans (torch.Tensor or iterable): Translation vector (shape:
torch.Tensor or iterable must have exactly 3 elements).
inplace (bool, optional): Bool to make this operation in-place.
Returns:
(Mesh): Translated mesh.
"""
if not isinstance(mesh, Mesh):
raise TypeError('Input mesh must be of type Mesh. '
'Got {0} instead.'.format(type(mesh)))
if not inplace:
mesh = mesh.clone()
if torch.is_tensor(trans):
if trans.numel() != 3:
raise ValueError('Input trans must contain exactly 3 elements. '
'Got {0} instead.'.format(trans.numel()))
trans = trans.view(1, 3)
elif isinstance(trans, list) or isinstance(trans, tuple):
if len(trans) != 3:
raise ValueError('Exactly 1 or 3 values required for input trans.'
'Got {0} instead.'.format(len(trans)))
trans = torch.Tensor([trans[0], trans[1], trans[2]]).to(
mesh.vertices.device).view(1, 3)
mesh.vertices = mesh.vertices + trans
return mesh
def rotate(mesh: Type[Mesh], rotmat: torch.Tensor,
inplace: Optional[bool] = True):
r"""Rotate a mesh given a 3 x 3 rotation matrix.
Args:
mesh (Mesh): Mesh to be rotated.
rotmat (torch.Tensor): Rotation matrix (shape: :math:`3 \times 3`).
inplace (bool, optional): Bool to make this operation in-place.
Returns:
(Mesh): Rotatted mesh.
"""
if not isinstance(mesh, Mesh):
raise TypeError('Input mesh must be of type Mesh. '
'Got {0} instead.'.format(type(mesh)))
if not inplace:
mesh = mesh.clone()
helpers._assert_tensor(rotmat)
helpers._assert_shape_eq(rotmat, (3, 3))
mesh.vertices = torch.matmul(rotmat, mesh.vertices.t()).t()
return mesh
if __name__ == '__main__':
device = 'cpu'
mesh = TriangleMesh.from_obj('tests/model.obj')
# # Test sample_triangle_mesh
# pts = sample_triangle_mesh(mesh.vertices.to(device),
# mesh.faces.to(device), 10)
# print(pts)
# # Test normalize
# mesh = normalize(mesh)
# # Test scale
# print(mesh.vertices[:10])
# mesh = scale(mesh, [2, 1, 2])
# print(mesh.vertices[:10])
# # Test translate
# print(mesh.vertices[:10])
# mesh = translate(mesh, torch.Tensor([2, 2, 2]))
# print(mesh.vertices[:10])
# # Test rotate
# print(mesh.vertices[:10])
# rmat = 2 * torch.eye(3)
# mesh = rotate(mesh, rmat)
# print(mesh.vertices[:10])
| 34.40613
| 85
| 0.606904
|
9634e3c2887e8eec83a793bad36f4c173832cb08
| 408
|
py
|
Python
|
string-transforms-into-another-string/string-transforms-into-another-string.py
|
QQuinn03/LeetHub
|
51ce21d721f0f524a07ed24266fb5fca473fcaa5
|
[
"MIT"
] | null | null | null |
string-transforms-into-another-string/string-transforms-into-another-string.py
|
QQuinn03/LeetHub
|
51ce21d721f0f524a07ed24266fb5fca473fcaa5
|
[
"MIT"
] | null | null | null |
string-transforms-into-another-string/string-transforms-into-another-string.py
|
QQuinn03/LeetHub
|
51ce21d721f0f524a07ed24266fb5fca473fcaa5
|
[
"MIT"
] | null | null | null |
class Solution:
def canConvert(self, str1: str, str2: str) -> bool:
dic={}
idx=0
while idx<len(str1):
if str1[idx] not in dic:
dic[str1[idx]]=str2[idx]
if dic[str1[idx]]!= str2[idx]:
print(dic[str1[idx]],str2[idx])
return False
idx+=1
return str1 == str2 or len(set(str2)) < 26
| 31.384615
| 55
| 0.460784
|
9a5f65921654368be9c7791f6a1225258b41e8cf
| 25,169
|
py
|
Python
|
airflow/providers/google/cloud/hooks/vision.py
|
Hartorn/airflow
|
a79e2d4c4aa105f3fac5ae6a28e29af9cd572407
|
[
"Apache-2.0"
] | 1
|
2020-07-17T20:06:33.000Z
|
2020-07-17T20:06:33.000Z
|
airflow/providers/google/cloud/hooks/vision.py
|
Hartorn/airflow
|
a79e2d4c4aa105f3fac5ae6a28e29af9cd572407
|
[
"Apache-2.0"
] | 9
|
2021-03-01T21:20:13.000Z
|
2022-03-29T22:28:14.000Z
|
airflow/providers/google/cloud/hooks/vision.py
|
vuppalli/airflow
|
dfe8337ca2d3ed173d9ecc112938271519792c40
|
[
"Apache-2.0"
] | 2
|
2020-03-08T14:12:55.000Z
|
2020-06-10T10:17:32.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains a Google Cloud Vision Hook.
"""
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
from cached_property import cached_property
from google.api_core.retry import Retry
from google.cloud.vision_v1 import ImageAnnotatorClient, ProductSearchClient
from google.cloud.vision_v1.types import (
AnnotateImageRequest, FieldMask, Image, Product, ProductSet, ReferenceImage,
)
from google.protobuf.json_format import MessageToDict
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
ERR_DIFF_NAMES = \
"""The {label} name provided in the object ({explicit_name}) is different than the name created
from the input parameters ({constructed_name}). Please either:
1) Remove the {label} name,
2) Remove the location and {id_label} parameters,
3) Unify the {label} name and input parameters.
"""
ERR_UNABLE_TO_CREATE = \
"""Unable to determine the {label} name. Please either set the name directly
in the {label} object or provide the `location` and `{id_label}` parameters.
"""
class NameDeterminer:
"""
Helper class to determine entity name.
"""
def __init__(self, label: str, id_label: str, get_path: Callable[[str, str, str], str]) -> None:
self.label = label
self.id_label = id_label
self.get_path = get_path
def get_entity_with_name(
self,
entity: Any,
entity_id: Optional[str],
location: Optional[str],
project_id: str
) -> Any:
"""
Check if entity has the `name` attribute set:
* If so, no action is taken.
* If not, and the name can be constructed from other parameters provided, it is created and filled in
the entity.
* If both the entity's 'name' attribute is set and the name can be constructed from other parameters
provided:
* If they are the same - no action is taken
* if they are different - an exception is thrown.
:param entity: Entity
:type entity: any
:param entity_id: Entity id
:type entity_id: str
:param location: Location
:type location: str
:param project_id: The id of Google Cloud Vision project.
:type project_id: str
:return: The same entity or entity with new name
:rtype: str
:raises: AirflowException
"""
entity = deepcopy(entity)
explicit_name = getattr(entity, 'name')
if location and entity_id:
# Necessary parameters to construct the name are present. Checking for conflict with explicit name
constructed_name = self.get_path(project_id, location, entity_id)
if not explicit_name:
entity.name = constructed_name
return entity
if explicit_name != constructed_name:
raise AirflowException(ERR_DIFF_NAMES.format(
label=self.label,
explicit_name=explicit_name,
constructed_name=constructed_name,
id_label=self.id_label)
)
# Not enough parameters to construct the name. Trying to use the name from Product / ProductSet.
if explicit_name:
return entity
else:
raise AirflowException(
ERR_UNABLE_TO_CREATE.format(label=self.label, id_label=self.id_label)
)
class CloudVisionHook(GoogleBaseHook):
"""
Hook for Google Cloud Vision APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
product_name_determiner = NameDeterminer('Product', 'product_id', ProductSearchClient.product_path)
product_set_name_determiner = NameDeterminer(
'ProductSet', 'productset_id', ProductSearchClient.product_set_path
)
def __init__(self, gcp_conn_id: str = 'google_cloud_default', delegate_to: Optional[str] = None) -> None:
super().__init__(gcp_conn_id, delegate_to)
self._client = None
def get_conn(self) -> ProductSearchClient:
"""
Retrieves connection to Cloud Vision.
:return: Google Cloud Vision client object.
:rtype: google.cloud.vision_v1.ProductSearchClient
"""
if not self._client:
self._client = ProductSearchClient(
credentials=self._get_credentials(),
client_info=self.client_info
)
return self._client
@cached_property
def annotator_client(self) -> ImageAnnotatorClient:
"""
Creates ImageAnnotatorClient.
:return: Google Image Annotator client object.
:rtype: google.cloud.vision_v1.ImageAnnotatorClient
"""
return ImageAnnotatorClient(credentials=self._get_credentials())
@staticmethod
def _check_for_error(response: Dict) -> None:
if "error" in response:
raise AirflowException(response)
@GoogleBaseHook.fallback_to_default_project_id
def create_product_set(
self,
location: str,
product_set: Union[dict, ProductSet],
project_id: str,
product_set_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> str:
"""
For the documentation see:
:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionCreateProductSetOperator`
"""
client = self.get_conn()
parent = ProductSearchClient.location_path(project_id, location)
self.log.info('Creating a new ProductSet under the parent: %s', parent)
response = client.create_product_set(
parent=parent,
product_set=product_set,
product_set_id=product_set_id,
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info('ProductSet created: %s', response.name if response else '')
self.log.debug('ProductSet created:\n%s', response)
if not product_set_id:
# Product set id was generated by the API
product_set_id = self._get_autogenerated_id(response)
self.log.info('Extracted autogenerated ProductSet ID from the response: %s', product_set_id)
return product_set_id
@GoogleBaseHook.fallback_to_default_project_id
def get_product_set(
self,
location: str,
product_set_id: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None, metadata: Optional[Sequence[Tuple[str, str]]] = None
) -> Dict:
"""
For the documentation see:
:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionGetProductSetOperator`
"""
client = self.get_conn()
name = ProductSearchClient.product_set_path(project_id, location, product_set_id)
self.log.info('Retrieving ProductSet: %s', name)
response = client.get_product_set(name=name, retry=retry, timeout=timeout, metadata=metadata)
self.log.info('ProductSet retrieved.')
self.log.debug('ProductSet retrieved:\n%s', response)
return MessageToDict(response)
@GoogleBaseHook.fallback_to_default_project_id
def update_product_set(
self,
product_set: Union[dict, ProductSet],
project_id: str,
location: Optional[str] = None,
product_set_id: Optional[str] = None,
update_mask: Union[dict, FieldMask] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Dict:
"""
For the documentation see:
:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionUpdateProductSetOperator`
"""
client = self.get_conn()
product_set = self.product_set_name_determiner.get_entity_with_name(
product_set, product_set_id, location, project_id
)
self.log.info('Updating ProductSet: %s', product_set.name)
response = client.update_product_set(
product_set=product_set, update_mask=update_mask, retry=retry, timeout=timeout, metadata=metadata
)
self.log.info('ProductSet updated: %s', response.name if response else '')
self.log.debug('ProductSet updated:\n%s', response)
return MessageToDict(response)
@GoogleBaseHook.fallback_to_default_project_id
def delete_product_set(
self,
location: str,
product_set_id: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
):
"""
For the documentation see:
:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionDeleteProductSetOperator`
"""
client = self.get_conn()
name = ProductSearchClient.product_set_path(project_id, location, product_set_id)
self.log.info('Deleting ProductSet: %s', name)
client.delete_product_set(name=name, retry=retry, timeout=timeout, metadata=metadata)
self.log.info('ProductSet with the name [%s] deleted.', name)
@GoogleBaseHook.fallback_to_default_project_id
def create_product(
self,
location: str,
product: Union[dict, Product],
project_id: str,
product_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
):
"""
For the documentation see:
:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionCreateProductOperator`
"""
client = self.get_conn()
parent = ProductSearchClient.location_path(project_id, location)
self.log.info('Creating a new Product under the parent: %s', parent)
response = client.create_product(
parent=parent,
product=product,
product_id=product_id,
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info('Product created: %s', response.name if response else '')
self.log.debug('Product created:\n%s', response)
if not product_id:
# Product id was generated by the API
product_id = self._get_autogenerated_id(response)
self.log.info('Extracted autogenerated Product ID from the response: %s', product_id)
return product_id
@GoogleBaseHook.fallback_to_default_project_id
def get_product(
self,
location: str,
product_id: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
):
"""
For the documentation see:
:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionGetProductOperator`
"""
client = self.get_conn()
name = ProductSearchClient.product_path(project_id, location, product_id)
self.log.info('Retrieving Product: %s', name)
response = client.get_product(name=name, retry=retry, timeout=timeout, metadata=metadata)
self.log.info('Product retrieved.')
self.log.debug('Product retrieved:\n%s', response)
return MessageToDict(response)
@GoogleBaseHook.fallback_to_default_project_id
def update_product(
self,
product: Union[dict, Product],
project_id: str,
location: Optional[str] = None,
product_id: Optional[str] = None,
update_mask: Optional[Dict[str, FieldMask]] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
For the documentation see:
:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionUpdateProductOperator`
"""
client = self.get_conn()
product = self.product_name_determiner.get_entity_with_name(product, product_id, location, project_id)
self.log.info('Updating ProductSet: %s', product.name)
response = client.update_product(
product=product, update_mask=update_mask, retry=retry, timeout=timeout, metadata=metadata
)
self.log.info('Product updated: %s', response.name if response else '')
self.log.debug('Product updated:\n%s', response)
return MessageToDict(response)
@GoogleBaseHook.fallback_to_default_project_id
def delete_product(
self,
location: str,
product_id: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
):
"""
For the documentation see:
:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionDeleteProductOperator`
"""
client = self.get_conn()
name = ProductSearchClient.product_path(project_id, location, product_id)
self.log.info('Deleting ProductSet: %s', name)
client.delete_product(name=name, retry=retry, timeout=timeout, metadata=metadata)
self.log.info('Product with the name [%s] deleted:', name)
@GoogleBaseHook.fallback_to_default_project_id
def create_reference_image(
self,
location: str,
product_id: str,
reference_image: Union[Dict, ReferenceImage],
project_id: str,
reference_image_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> str:
"""
For the documentation see:
:py:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionCreateReferenceImageOperator`
"""
client = self.get_conn()
self.log.info('Creating ReferenceImage')
parent = ProductSearchClient.product_path(project=project_id, location=location, product=product_id)
response = client.create_reference_image(
parent=parent,
reference_image=reference_image,
reference_image_id=reference_image_id,
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info('ReferenceImage created: %s', response.name if response else '')
self.log.debug('ReferenceImage created:\n%s', response)
if not reference_image_id:
# Reference image id was generated by the API
reference_image_id = self._get_autogenerated_id(response)
self.log.info(
'Extracted autogenerated ReferenceImage ID from the response: %s', reference_image_id
)
return reference_image_id
@GoogleBaseHook.fallback_to_default_project_id
def delete_reference_image(
self,
location: str,
product_id: str,
reference_image_id: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Dict:
"""
For the documentation see:
:py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionReferenceImageCreateOperator`
"""
client = self.get_conn()
self.log.info('Deleting ReferenceImage')
name = ProductSearchClient.reference_image_path(
project=project_id, location=location, product=product_id, reference_image=reference_image_id
)
response = client.delete_reference_image(name=name, # pylint: disable=assignment-from-no-return
retry=retry,
timeout=timeout,
metadata=metadata)
self.log.info('ReferenceImage with the name [%s] deleted.', name)
return MessageToDict(response)
@GoogleBaseHook.fallback_to_default_project_id
def add_product_to_product_set(
self,
product_set_id: str,
product_id: str,
project_id: str,
location: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> None:
"""
For the documentation see:
:py:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionAddProductToProductSetOperator`
"""
client = self.get_conn()
product_name = ProductSearchClient.product_path(project_id, location, product_id)
product_set_name = ProductSearchClient.product_set_path(project_id, location, product_set_id)
self.log.info('Add Product[name=%s] to Product Set[name=%s]', product_name, product_set_name)
client.add_product_to_product_set(
name=product_set_name, product=product_name, retry=retry, timeout=timeout, metadata=metadata
)
self.log.info('Product added to Product Set')
@GoogleBaseHook.fallback_to_default_project_id
def remove_product_from_product_set(
self,
product_set_id: str,
product_id: str,
project_id: str,
location: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> None:
"""
For the documentation see:
:py:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionRemoveProductFromProductSetOperator` # pylint: disable=line-too-long # noqa
"""
client = self.get_conn()
product_name = ProductSearchClient.product_path(project_id, location, product_id)
product_set_name = ProductSearchClient.product_set_path(project_id, location, product_set_id)
self.log.info('Remove Product[name=%s] from Product Set[name=%s]', product_name, product_set_name)
client.remove_product_from_product_set(
name=product_set_name, product=product_name, retry=retry, timeout=timeout, metadata=metadata
)
self.log.info('Product removed from Product Set')
def annotate_image(
self,
request: Union[dict, AnnotateImageRequest],
retry: Optional[Retry] = None,
timeout: Optional[float] = None
) -> Dict:
"""
For the documentation see:
:py:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionImageAnnotateOperator`
"""
client = self.annotator_client
self.log.info('Annotating image')
# pylint: disable=no-member
response = client.annotate_image(request=request, retry=retry, timeout=timeout)
self.log.info('Image annotated')
return MessageToDict(response)
@GoogleBaseHook.quota_retry()
def batch_annotate_images(
self,
requests: Union[List[dict], List[AnnotateImageRequest]],
retry: Optional[Retry] = None,
timeout: Optional[float] = None
) -> Dict:
"""
For the documentation see:
:py:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionImageAnnotateOperator`
"""
client = self.annotator_client
self.log.info('Annotating images')
response = client.batch_annotate_images(requests=requests, # pylint: disable=no-member
retry=retry,
timeout=timeout)
self.log.info('Images annotated')
return MessageToDict(response)
@GoogleBaseHook.quota_retry()
def text_detection(
self,
image: Union[Dict, Image],
max_results: Optional[int] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
additional_properties: Optional[Dict] = None
) -> Dict:
"""
For the documentation see:
:py:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionDetectTextOperator`
"""
client = self.annotator_client
self.log.info("Detecting text")
if additional_properties is None:
additional_properties = {}
response = client.text_detection( # pylint: disable=no-member
image=image, max_results=max_results, retry=retry, timeout=timeout, **additional_properties
)
response = MessageToDict(response)
self._check_for_error(response)
self.log.info("Text detection finished")
return response
@GoogleBaseHook.quota_retry()
def document_text_detection(
self,
image: Union[Dict, Image],
max_results: Optional[int] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
additional_properties: Optional[Dict] = None
) -> Dict:
"""
For the documentation see:
:py:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionTextDetectOperator`
"""
client = self.annotator_client
self.log.info("Detecting document text")
if additional_properties is None:
additional_properties = {}
response = client.document_text_detection( # pylint: disable=no-member
image=image, max_results=max_results, retry=retry, timeout=timeout, **additional_properties
)
response = MessageToDict(response)
self._check_for_error(response)
self.log.info("Document text detection finished")
return response
@GoogleBaseHook.quota_retry()
def label_detection(
self,
image: Union[Dict, Image],
max_results: Optional[int] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
additional_properties: Optional[Dict] = None
) -> Dict:
"""
For the documentation see:
:py:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionDetectImageLabelsOperator`
"""
client = self.annotator_client
self.log.info("Detecting labels")
if additional_properties is None:
additional_properties = {}
response = client.label_detection( # pylint: disable=no-member
image=image, max_results=max_results, retry=retry, timeout=timeout, **additional_properties
)
response = MessageToDict(response)
self._check_for_error(response)
self.log.info("Labels detection finished")
return response
@GoogleBaseHook.quota_retry()
def safe_search_detection(
self,
image: Union[Dict, Image],
max_results: Optional[int] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
additional_properties: Optional[Dict] = None
) -> Dict:
"""
For the documentation see:
:py:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionDetectImageSafeSearchOperator`
"""
client = self.annotator_client
self.log.info("Detecting safe search")
if additional_properties is None:
additional_properties = {}
response = client.safe_search_detection( # pylint: disable=no-member
image=image, max_results=max_results, retry=retry, timeout=timeout, **additional_properties
)
response = MessageToDict(response)
self._check_for_error(response)
self.log.info("Safe search detection finished")
return response
@staticmethod
def _get_autogenerated_id(response) -> str:
try:
name = response.name
except AttributeError as e:
raise AirflowException('Unable to get name from response... [{}]\n{}'.format(response, e))
if '/' not in name:
raise AirflowException('Unable to get id from name... [{}]'.format(name))
return name.rsplit('/', 1)[1]
| 37.509687
| 154
| 0.644881
|
e800386f478b3677b89454e5cd048391f765608b
| 43,853
|
py
|
Python
|
text/src/autogluon/text/automm/utils.py
|
canerturkmen/autogluon
|
f3e5d0f3f8d0156984dfd3f80fb6832a67875219
|
[
"Apache-2.0"
] | null | null | null |
text/src/autogluon/text/automm/utils.py
|
canerturkmen/autogluon
|
f3e5d0f3f8d0156984dfd3f80fb6832a67875219
|
[
"Apache-2.0"
] | null | null | null |
text/src/autogluon/text/automm/utils.py
|
canerturkmen/autogluon
|
f3e5d0f3f8d0156984dfd3f80fb6832a67875219
|
[
"Apache-2.0"
] | null | null | null |
import pytz
import datetime
import os
import functools
import logging
import pandas as pd
import pickle
import collections
import copy
import torch
from torch import nn
import warnings
from contextlib import contextmanager
from typing import Optional, List, Any, Dict, Tuple, Union
from nptyping import NDArray
from omegaconf import OmegaConf, DictConfig
from autogluon.core.metrics import get_metric
from .models import (
HFAutoModelForTextPrediction,
TimmAutoModelForImagePrediction,
CLIPForImageText,
CategoricalMLP,
NumericalMLP,
MultimodalFusionMLP,
NumericalTransformer,
CategoricalTransformer,
MultimodalFusionTransformer,
)
from .data import (
ImageProcessor,
TextProcessor,
CategoricalProcessor,
NumericalProcessor,
LabelProcessor,
MultiModalFeaturePreprocessor,
)
from .constants import (
ACCURACY, RMSE, R2, PEARSONR, SPEARMANR, ALL_MODALITIES,
IMAGE, TEXT, CATEGORICAL, NUMERICAL,
LABEL, MULTICLASS, BINARY, REGRESSION,
Y_PRED_PROB, Y_PRED, Y_TRUE, AUTOMM,
CLIP, TIMM_IMAGE, HF_TEXT, NUMERICAL_MLP,
CATEGORICAL_MLP, FUSION_MLP, NUMERICAL_TRANSFORMER,
CATEGORICAL_TRANSFORMER, FUSION_TRANSFORMER,
ROC_AUC, AVERAGE_PRECISION, LOG_LOSS,
)
from .presets import (
list_model_presets,
get_preset,
)
logger = logging.getLogger(AUTOMM)
def infer_metrics(
problem_type: Optional[str] = None,
eval_metric_name: Optional[str] = None,
):
"""
Infer the validation metric and the evaluation metric if not provided.
Validation metric is for early-stopping and selecting the best model checkpoints.
Evaluation metric is to report performance to users.
If the evaluation metric is provided, then we use it as the validation metric.
But there are some exceptions that validation metric is different from evaluation metric.
For example, if the provided evaluation metric is `r2`, we set the validation metric as `rmse`
since `torchmetrics.R2Score` may encounter errors for per gpu batch size 1. Another example is
that `torchmetrics.AUROC` requires that both positive and negative examples are available in a mini-batch.
When training a large model, the per gpu batch size is probably small, leading to an incorrect
roc_auc score.
Parameters
----------
problem_type
Type of problem.
eval_metric_name
Name of evaluation metric provided by users.
Returns
-------
validation_metric_name
Name of validation metric.
eval_metric_name
Name of evaluation metric.
"""
if eval_metric_name is not None:
if eval_metric_name.lower() in [R2, PEARSONR, SPEARMANR]:
validation_metric_name = RMSE
elif eval_metric_name.lower() in [ROC_AUC, AVERAGE_PRECISION]:
logger.info(
f"We use {LOG_LOSS} as the validation metric for more stable training. "
f"We avoid using {eval_metric_name} as the validation metric because `torchmetrics` "
f"requires that both positive and negative examples are available in a mini-batch."
f"If the per gpu batch size is too small to cover both, `torchmetrics` would"
f"compute {eval_metric_name} scores incorrectly."
)
validation_metric_name = LOG_LOSS
else:
validation_metric_name = eval_metric_name
return validation_metric_name, eval_metric_name
if problem_type in [MULTICLASS, BINARY]:
eval_metric_name = ACCURACY
elif problem_type == REGRESSION:
eval_metric_name = RMSE
else:
raise NotImplementedError(
f"Problem type: {problem_type} is not supported yet!"
)
validation_metric_name = eval_metric_name
return validation_metric_name, eval_metric_name
def get_config(
config: Union[dict, DictConfig],
overrides: Optional[Union[str, List[str], Dict]] = None,
):
"""
Construct configurations for model, data, optimization, and environment.
It supports to overrides some default configurations.
Parameters
----------
config
A dictionary including four keys: "model", "data", "optimization", and "environment".
If any key is not not given, we will fill in with the default value.
The value of each key can be a string, yaml path, or DictConfig object. For example:
config = {
"model": "fusion_mlp_image_text_tabular",
"data": "default",
"optimization": "adamw",
"environment": "default",
}
or
config = {
"model": "/path/to/model/config.yaml",
"data": "/path/to/data/config.yaml",
"optimization": "/path/to/optimization/config.yaml",
"environment": "/path/to/environment/config.yaml",
}
or
config = {
"model": OmegaConf.load("/path/to/model/config.yaml"),
"data": OmegaConf.load("/path/to/data/config.yaml"),
"optimization": OmegaConf.load("/path/to/optimization/config.yaml"),
"environment": OmegaConf.load("/path/to/environment/config.yaml"),
}
overrides
This is to override some default configurations.
For example, changing the text and image backbones can be done by formatting:
a string
overrides = "model.hf_text.checkpoint_name=google/electra-small-discriminator
model.timm_image.checkpoint_name=swin_small_patch4_window7_224"
or a list of strings
overrides = ["model.hf_text.checkpoint_name=google/electra-small-discriminator",
"model.timm_image.checkpoint_name=swin_small_patch4_window7_224"]
or a dictionary
overrides = {
"model.hf_text.checkpoint_name": "google/electra-small-discriminator",
"model.timm_image.checkpoint_name": "swin_small_patch4_window7_224",
}
Returns
-------
Configurations as a DictConfig object
"""
if config is None:
config = get_preset(list_model_presets()[0])
if not isinstance(config, DictConfig):
all_configs = []
for k, default_value in [('model', 'fusion_mlp_image_text_tabular'),
('data', 'default'),
('optimization', 'adamw'),
('environment', 'default')]:
if k not in config:
config[k] = default_value
for k, v in config.items():
if isinstance(v, dict):
per_config = OmegaConf.create(v)
elif isinstance(v, DictConfig):
per_config = v
elif isinstance(v, str):
if v.lower().endswith((".yaml", ".yml")):
per_config = OmegaConf.load(os.path.expanduser(v))
else:
cur_path = os.path.dirname(os.path.abspath(__file__))
config_path = os.path.join(cur_path, "configs", k, f"{v}.yaml")
per_config = OmegaConf.load(config_path)
else:
raise ValueError(f"Unknown configuration type: {type(v)}")
all_configs.append(per_config)
config = OmegaConf.merge(*all_configs)
verify_model_names(config.model)
logger.debug(f"overrides: {overrides}")
if overrides is not None:
# avoid manipulating user-provided overrides
overrides = copy.deepcopy(overrides)
# apply customized model names
overrides = parse_dotlist_conf(overrides) # convert to a dict
config.model = customize_model_names(
config=config.model,
customized_names=overrides.get("model.names", None),
)
# remove `model.names` from overrides since it's already applied.
overrides.pop("model.names", None)
# apply all the overrides
config = apply_omegaconf_overrides(config, overrides=overrides, check_key_exist=True)
verify_model_names(config.model)
return config
def verify_model_names(config: DictConfig):
"""
Verify whether provided model names are valid.
Parameters
----------
config
Config should have a attribute `names`, which contains a list of
attribute names, e.g., ["timm_image", "hf_text"]. And each string in
`config.names` should also be a attribute of `config`, e.g, `config.timm_image`.
"""
# must have attribute `names`
assert hasattr(config, "names")
# assure no duplicate names
assert len(config.names) == len(set(config.names))
# verify that strings in `config.names` match the keys of `config`.
keys = list(config.keys())
keys.remove("names")
assert set(config.names).issubset(set(keys)), \
f"`{config.names}` do not match config keys {keys}"
# verify that no name starts with another one
names = sorted(config.names, key=lambda ele: len(ele), reverse=True)
for i in range(len(names)):
if names[i].startswith(tuple(names[i+1:])):
raise ValueError(
f"name {names[i]} starts with one of another name: {names[i+1:]}"
)
def get_name_prefix(
name: str,
prefixes: List[str],
):
"""
Get a name's prefix from some available candidates.
Parameters
----------
name
A name string
prefixes
Available prefixes.
Returns
-------
Prefix of the name.
"""
search_results = [pre for pre in prefixes if name.lower().startswith(pre)]
if len(search_results) == 0:
return None
elif len(search_results) >= 2:
raise ValueError(
f"Model name `{name}` is mapped to multiple models, "
f"which means some names in `{prefixes}` have duplicate prefixes."
)
else:
return search_results[0]
def customize_model_names(
config: DictConfig,
customized_names: Union[str, List[str]],
):
"""
Customize attribute names of `config` with the provided names.
A valid customized name string should start with one available name
string in `config`.
Parameters
----------
config
Config should have a attribute `names`, which contains a list of
attribute names, e.g., ["timm_image", "hf_text"]. And each string in
`config.names` should also be a attribute of `config`, e.g, `config.timm_image`.
customized_names
The provided names to replace the existing ones in `config.names` as well as
the corresponding attribute names. For example, if `customized_names` is
["timm_image_123", "hf_text_abc"], then `config.timm_image` and `config.hf_text`
are changed to `config.timm_image_123` and `config.hf_text_abc`.
Returns
-------
A new config with its first-level attributes customized by the provided names.
"""
if not customized_names:
return config
if isinstance(customized_names, str):
customized_names = OmegaConf.from_dotlist([f'names={customized_names}']).names
new_config = OmegaConf.create()
new_config.names = []
available_prefixes = list(config.keys())
available_prefixes.remove("names")
for per_name in customized_names:
per_prefix = get_name_prefix(
name=per_name,
prefixes=available_prefixes,
)
if per_prefix:
per_config = getattr(config, per_prefix)
setattr(new_config, per_name, copy.deepcopy(per_config))
new_config.names.append(per_name)
else:
logger.debug(
f"Removing {per_name}, which doesn't start with any of these prefixes: {available_prefixes}."
)
if len(new_config.names) == 0:
raise ValueError(
f"No customized name in `{customized_names}` starts with name prefixes in `{available_prefixes}`."
)
return new_config
def select_model(
config: DictConfig,
df_preprocessor: MultiModalFeaturePreprocessor,
):
"""
Filter model config through the detected modalities in the training data.
If MultiModalFeaturePreprocessor can't detect some modality,
this function will remove the models that use this modality. This function is to
maximize the user flexibility in defining the config.
For example, if one uses the "fusion_mlp_image_text_tabular" as the model config template
but the training data don't have images, this function will filter out all the models
using images, such as Swin Transformer and CLIP.
Parameters
----------
config
A DictConfig object. The model config should be accessible by "config.model"
df_preprocessor
A MultiModalFeaturePreprocessor object, which has called .fit() on the training data.
Column names of the same modality are grouped into one list. If a modality's list is empty,
it means the training data don't have this modality.
Returns
-------
Config with some unused models removed.
"""
data_status = {}
for per_modality in ALL_MODALITIES:
data_status[per_modality] = False
if len(df_preprocessor.image_path_names) > 0:
data_status[IMAGE] = True
if len(df_preprocessor.text_feature_names) > 0:
data_status[TEXT] = True
if len(df_preprocessor.categorical_feature_names) > 0:
data_status[CATEGORICAL] = True
if len(df_preprocessor.numerical_feature_names) > 0:
data_status[NUMERICAL] = True
names = config.model.names
if isinstance(names, str):
names = [names]
selected_model_names = []
fusion_model_name = []
for model_name in names:
model_config = getattr(config.model, model_name)
if model_config.data_types is None:
fusion_model_name.append(model_name)
continue
model_data_status = [data_status[d_type] for d_type in model_config.data_types]
if all(model_data_status):
selected_model_names.append(model_name)
else:
delattr(config.model, model_name)
if len(selected_model_names) == 0:
raise ValueError("No model is available for this dataset.")
# only allow no more than 1 fusion model
assert len(fusion_model_name) <= 1
if len(selected_model_names) > 1:
assert len(fusion_model_name) == 1
selected_model_names.extend(fusion_model_name)
else: # remove the fusion model's config make `config.model.names` and the keys of `config.model` consistent.
if len(fusion_model_name) == 1 and hasattr(config.model, fusion_model_name[0]):
delattr(config.model, fusion_model_name[0])
config.model.names = selected_model_names
logger.debug(f"selected models: {selected_model_names}")
return config
def init_df_preprocessor(
config: DictConfig,
column_types: collections.OrderedDict,
label_column: str,
train_df_x: pd.DataFrame,
train_df_y: pd.Series,
):
"""
Initialize the dataframe preprocessor by calling .fit().
Parameters
----------
config
A DictConfig containing only the data config.
column_types
A dictionary that maps column names to their data types.
For example: `column_types = {"item_name": "text", "image": "image_path",
"product_description": "text", "height": "numerical"}`
may be used for a table with columns: "item_name", "brand", "product_description", and "height".
label_column
Name of the column that contains the target variable to predict.
train_df_x
A pd.DataFrame containing only the feature columns.
train_df_y
A pd.Series object containing only the label column.
Returns
-------
Initialized dataframe preprocessor.
"""
df_preprocessor = MultiModalFeaturePreprocessor(
config=config,
column_types=column_types,
label_column=label_column,
)
df_preprocessor.fit(
X=train_df_x,
y=train_df_y,
)
return df_preprocessor
def init_data_processors(
config: DictConfig,
df_preprocessor: MultiModalFeaturePreprocessor,
):
"""
Create the data processors according to the model config. This function creates one processor for
each modality of each model. For example, if one model config contains BERT, ViT, and CLIP, then
BERT would have its own text processor, ViT would have its own image processor, and CLIP would have
its own text and image processors. This is to support training arbitrary combinations of single-modal
and multimodal models since two models may share the same modality but have different processing. Text
sequence length is a good example. BERT's sequence length is generally 512, while CLIP uses sequences of
length 77.
Parameters
----------
config
A DictConfig object. The model config should be accessible by "config.model".
df_preprocessor
The dataframe preprocessor.
Returns
-------
A dictionary with modalities as the keys. Each modality has a list of processors.
Note that "label" is also treated as a modality for convenience.
"""
names = config.model.names
if isinstance(names, str):
names = [names]
data_processors = {
IMAGE: [],
TEXT: [],
CATEGORICAL: [],
NUMERICAL: [],
LABEL: [],
}
for model_name in names:
model_config = getattr(config.model, model_name)
# each model has its own label processor
data_processors[LABEL].append(
LabelProcessor(prefix=model_name)
)
if model_config.data_types is None:
continue
for d_type in model_config.data_types:
if d_type == IMAGE:
data_processors[IMAGE].append(
ImageProcessor(
prefix=model_name,
checkpoint_name=model_config.checkpoint_name,
train_transform_types=model_config.train_transform_types,
val_transform_types=model_config.val_transform_types,
image_column_names=df_preprocessor.image_path_names,
norm_type=model_config.image_norm,
size=model_config.image_size,
max_img_num_per_col=model_config.max_img_num_per_col,
missing_value_strategy=config.data.image.missing_value_strategy,
)
)
elif d_type == TEXT:
data_processors[TEXT].append(
TextProcessor(
prefix=model_name,
tokenizer_name=model_config.tokenizer_name,
checkpoint_name=model_config.checkpoint_name,
text_column_names=df_preprocessor.text_feature_names,
max_len=model_config.max_text_len,
insert_sep=model_config.insert_sep,
text_segment_num=model_config.text_segment_num,
stochastic_chunk=model_config.stochastic_chunk,
)
)
elif d_type == CATEGORICAL:
data_processors[CATEGORICAL].append(
CategoricalProcessor(
prefix=model_name,
categorical_column_names=df_preprocessor.categorical_feature_names,
)
)
elif d_type == NUMERICAL:
data_processors[NUMERICAL].append(
NumericalProcessor(
prefix=model_name,
numerical_column_names=df_preprocessor.numerical_feature_names,
merge=model_config.merge,
)
)
else:
raise ValueError(f"unknown data type: {d_type}")
assert len(data_processors[LABEL]) > 0
# Only keep the modalities with non-empty processors.
data_processors = {k: v for k, v in data_processors.items() if len(v) > 0}
return data_processors
def create_model(
config: DictConfig,
num_classes: int,
num_numerical_columns: Optional[int] = None,
num_categories: Optional[List[int]] = None,
pretrained: Optional[bool] = True,
):
"""
Create models. It supports the auto models of huggingface text and timm image.
Multimodal models, e.g., CLIP, should be added case-by-case since their configs and usages
may be different. It uses MLP for the numerical features, categorical features, and late-fusion.
Parameters
----------
config
A DictConfig object. The model config should be accessible by "config.model".
num_classes
The class number for a classification task. It should be 1 for a regression task.
num_numerical_columns
The number of numerical columns in the training dataframe.
num_categories
The category number for each categorical column in the training dataframe.
pretrained
Whether using the pretrained timm models. If pretrained=True, download the pretrained model.
Returns
-------
A Pytorch model.
"""
names = config.model.names
if isinstance(names, str):
names = [names]
# make sure no duplicate model names
assert len(names) == len(set(names))
logger.debug(f"output_shape: {num_classes}")
all_models = []
for model_name in names:
model_config = getattr(config.model, model_name)
if model_name.lower().startswith(CLIP):
model = CLIPForImageText(
prefix=model_name,
checkpoint_name=model_config.checkpoint_name,
num_classes=num_classes,
)
elif model_name.lower().startswith(TIMM_IMAGE):
model = TimmAutoModelForImagePrediction(
prefix=model_name,
checkpoint_name=model_config.checkpoint_name,
num_classes=num_classes,
mix_choice=model_config.mix_choice,
pretrained=pretrained,
)
elif model_name.lower().startswith(HF_TEXT):
model = HFAutoModelForTextPrediction(
prefix=model_name,
checkpoint_name=model_config.checkpoint_name,
num_classes=num_classes,
)
elif model_name.lower().startswith(NUMERICAL_MLP):
model = NumericalMLP(
prefix=model_name,
in_features=num_numerical_columns,
hidden_features=model_config.hidden_size,
out_features=model_config.hidden_size,
num_layers=model_config.num_layers,
activation=model_config.activation,
dropout_prob=model_config.drop_rate,
normalization=model_config.normalization,
num_classes=num_classes,
)
elif model_name.lower().startswith(NUMERICAL_TRANSFORMER):
model = NumericalTransformer(
prefix=model_name,
in_features=num_numerical_columns,
out_features=model_config.out_features,
d_token=model_config.d_token,
n_blocks=model_config.num_trans_blocks,
attention_n_heads=model_config.num_attn_heads,
attention_dropout=model_config.attention_dropout,
residual_dropout=model_config.residual_dropout,
ffn_dropout=model_config.ffn_dropout,
attention_normalization=model_config.normalization,
ffn_normalization=model_config.normalization,
head_normalization=model_config.normalization,
ffn_activation=model_config.ffn_activation,
head_activation=model_config.head_activation,
num_classes=num_classes,
cls_token=True if len(names) == 1 else False,
)
elif model_name.lower().startswith(CATEGORICAL_MLP):
model = CategoricalMLP(
prefix=model_name,
num_categories=num_categories,
out_features=model_config.hidden_size,
num_layers=model_config.num_layers,
activation=model_config.activation,
dropout_prob=model_config.drop_rate,
normalization=model_config.normalization,
num_classes=num_classes,
)
elif model_name.lower().startswith(CATEGORICAL_TRANSFORMER):
model = CategoricalTransformer(
prefix=model_name,
num_categories=num_categories,
out_features=model_config.out_features,
d_token=model_config.d_token,
n_blocks=model_config.num_trans_blocks,
attention_n_heads=model_config.num_attn_heads,
attention_dropout=model_config.attention_dropout,
residual_dropout=model_config.residual_dropout,
ffn_dropout=model_config.ffn_dropout,
attention_normalization=model_config.normalization,
ffn_normalization=model_config.normalization,
head_normalization=model_config.normalization,
ffn_activation=model_config.ffn_activation,
head_activation=model_config.head_activation,
num_classes=num_classes,
cls_token=True if len(names) == 1 else False,
)
elif model_name.lower().startswith(FUSION_MLP):
fusion_model = functools.partial(
MultimodalFusionMLP,
prefix=model_name,
hidden_features=model_config.hidden_sizes,
num_classes=num_classes,
adapt_in_features=model_config.adapt_in_features,
activation=model_config.activation,
dropout_prob=model_config.drop_rate,
normalization=model_config.normalization,
loss_weight=model_config.weight if hasattr(model_config, "weight") else None,
)
continue
elif model_name.lower().startswith(FUSION_TRANSFORMER):
fusion_model = functools.partial(
MultimodalFusionTransformer,
prefix=model_name,
hidden_features=model_config.hidden_size,
num_classes=num_classes,
n_blocks=model_config.n_blocks,
attention_n_heads=model_config.attention_n_heads,
ffn_d_hidden=model_config.ffn_d_hidden,
attention_dropout=model_config.attention_dropout,
residual_dropout=model_config.residual_dropout,
ffn_dropout=model_config.ffn_dropout,
attention_normalization=model_config.normalization,
ffn_normalization=model_config.normalization,
head_normalization=model_config.normalization,
ffn_activation=model_config.ffn_activation,
head_activation=model_config.head_activation,
adapt_in_features=model_config.adapt_in_features,
loss_weight=model_config.weight if hasattr(model_config, "weight") else None,
)
continue
else:
raise ValueError(f"unknown model name: {model_name}")
all_models.append(model)
if len(all_models) > 1:
# must have one fusion model if there are multiple independent models
return fusion_model(models=all_models)
elif len(all_models) == 1:
return all_models[0]
else:
raise ValueError(f"No available models for {names}")
def save_pretrained_models(
model: nn.Module,
config: DictConfig,
path: str,
) -> DictConfig:
"""
Save the pretrained models and configs to local to make future loading not dependent on Internet access.
By loading local checkpoints, Huggingface doesn't need to download pretrained checkpoints from Internet.
It is called by setting "standalone=True" in "AutoMMPredictor.load()".
Parameters
----------
model
One model.
config
A DictConfig object. The model config should be accessible by "config.model".
path
The path to save pretrained checkpoints.
"""
requires_saving = any([
model_name.lower().startswith((CLIP, HF_TEXT)) for model_name in config.model.names
])
if not requires_saving:
return config
if len(config.model.names) == 1:
model = nn.ModuleList([model])
else: # assumes the fusion model has a model attribute, a nn.ModuleList
model = model.model
for per_model in model:
if per_model.prefix.lower().startswith((CLIP, HF_TEXT)):
per_model.model.save_pretrained(os.path.join(path, per_model.prefix))
model_config = getattr(config.model, per_model.prefix)
model_config.checkpoint_name = os.path.join('local://', per_model.prefix)
return config
def convert_checkpoint_name(
config: DictConfig,
path: str
) -> DictConfig:
"""
Convert the checkpoint name from relative path to absolute path for
loading the pretrained weights in offline deployment.
It is called by setting "standalone=True" in "AutoMMPredictor.load()".
Parameters
----------
config
A DictConfig object. The model config should be accessible by "config.model".
path
The saving path to the pretrained Huggingface models.
"""
for model_name in config.model.names:
if model_name.lower().startswith((CLIP, HF_TEXT)):
model_config = getattr(config.model, model_name)
if model_config.checkpoint_name.startswith('local://'):
model_config.checkpoint_name = os.path.join(path, model_config.checkpoint_name[len('local://'):])
assert os.path.exists(os.path.join(model_config.checkpoint_name, 'config.json')) # guarantee the existence of local configs
assert os.path.exists(os.path.join(model_config.checkpoint_name, 'pytorch_model.bin'))
return config
def save_text_tokenizers(
text_processors: List[TextProcessor],
path: str,
) -> List[TextProcessor]:
"""
Save all the text tokenizers and record their relative paths, which are
the corresponding model names, e.g, hf_text.
Parameters
----------
text_processors
A list of text processors with tokenizers.
path
The root path.
Returns
-------
A list of text processors with tokenizers replaced by their local relative paths.
"""
for per_text_processor in text_processors:
per_path = os.path.join(path, per_text_processor.prefix)
per_text_processor.tokenizer.save_pretrained(per_path)
per_text_processor.tokenizer = per_text_processor.prefix
return text_processors
def load_text_tokenizers(
text_processors: List[TextProcessor],
path: str,
) -> List[TextProcessor]:
"""
Load saved text tokenizers. If text processors already have tokenizers,
then do nothing.
Parameters
----------
text_processors
A list of text processors with tokenizers or their relative paths.
path
The root path.
Returns
-------
A list of text processors with tokenizers loaded.
"""
for per_text_processor in text_processors:
if isinstance(per_text_processor.tokenizer, str):
per_path = os.path.join(path, per_text_processor.tokenizer)
per_text_processor.tokenizer = per_text_processor.get_pretrained_tokenizer(
tokenizer_name=per_text_processor.tokenizer_name,
checkpoint_name=per_path,
)
return text_processors
def make_exp_dir(
root_path: str,
job_name: str,
create: Optional[bool] = True,
):
"""
Creates the exp dir of format e.g.,: root_path/2022_01_01/job_name_12_00_00/
This function is to better organize the training runs. It is recommended to call this
function and pass the returned "exp_dir" to "AutoMMPredictor.fit(save_path=exp_dir)".
Parameters
----------
root_path
The basic path where to create saving directories for training runs.
job_name
The job names to name training runs.
create
Whether to make the directory.
Returns
-------
The formatted directory path.
"""
tz = pytz.timezone('US/Pacific')
ct = datetime.datetime.now(tz=tz)
date_stamp = ct.strftime("%Y_%m_%d")
time_stamp = ct.strftime("%H_%M_%S")
# Group logs by day first
exp_dir = os.path.join(root_path, date_stamp)
# Then, group by run_name and hour + min + sec to avoid duplicates
exp_dir = os.path.join(exp_dir, "_".join([job_name, time_stamp]))
if create:
os.makedirs(exp_dir, mode=0o777, exist_ok=False)
return exp_dir
def average_checkpoints(
checkpoint_paths: List[str],
):
"""
Average a list of checkpoints' state_dicts.
Parameters
----------
checkpoint_paths
A list of model checkpoint paths.
Returns
-------
The averaged state_dict.
"""
avg_state_dict = {}
for per_path in checkpoint_paths:
state_dict = torch.load(per_path, map_location=torch.device("cpu"))["state_dict"]
for key in state_dict:
if key in avg_state_dict:
avg_state_dict[key] += state_dict[key]
else:
avg_state_dict[key] = state_dict[key]
del state_dict
num = torch.tensor(len(checkpoint_paths))
for key in avg_state_dict:
avg_state_dict[key] = avg_state_dict[key] / num.to(avg_state_dict[key])
return avg_state_dict
def compute_score(
metric_data: dict,
metric_name: str,
) -> float:
"""
Use sklearn to compute the score of one metric.
Parameters
----------
metric_data
A dictionary with the groundtruth (Y_TRUE) and predicted values (Y_PRED, Y_PRED_PROB).
The predicted class probabilities are required to compute the roc_auc score.
metric_name
The name of metric to compute.
Returns
-------
Computed score.
"""
metric = get_metric(metric_name)
if metric.name in [ROC_AUC, AVERAGE_PRECISION]:
return metric._sign * metric(metric_data[Y_TRUE], metric_data[Y_PRED_PROB][:, 1])
else:
return metric._sign * metric(metric_data[Y_TRUE], metric_data[Y_PRED])
def parse_dotlist_conf(conf):
"""Parse the config files that is potentially in the dotlist format to a dictionary
Parameters
----------
conf
Apply the conf stored as dotlist, e.g.,
'aaa=a, bbb=b' or ['aaa=a, ', 'bbb=b'] to {'aaa': 'a', 'bbb': b}
Returns
-------
new_conf
"""
if isinstance(conf, str):
conf = conf.split()
need_parse = True
elif isinstance(conf, (list, tuple)):
need_parse = True
elif isinstance(conf, dict):
need_parse = False
else:
raise ValueError(f'Unsupported format of conf={conf}')
if need_parse:
new_conf = dict()
curr_key = None
curr_value = ''
for ele in conf:
if '=' in ele:
key, v = ele.split('=')
if curr_key is not None:
new_conf[curr_key] = curr_value
curr_key = key
curr_value = v
else:
if curr_key is None:
raise ValueError(f'Cannot parse the conf={conf}')
curr_value = curr_value + ' ' + ele
if curr_key is not None:
new_conf[curr_key] = curr_value
return new_conf
else:
return conf
def apply_omegaconf_overrides(
conf: DictConfig,
overrides: Union[List, Tuple, str, Dict, DictConfig],
check_key_exist=True,
):
"""
Apply omegaconf overrides.
Parameters
----------
conf
The base configuration.
overrides
The overrides can be a string or a list.
check_key_exist
Whether to check if all keys in the overrides must exist in the conf.
Returns
-------
new_conf
The updated configuration.
"""
overrides = parse_dotlist_conf(overrides)
def _check_exist_dotlist(C, key_in_dotlist):
if not isinstance(key_in_dotlist, list):
key_in_dotlist = key_in_dotlist.split('.')
if key_in_dotlist[0] in C:
if len(key_in_dotlist) > 1:
return _check_exist_dotlist(C[key_in_dotlist[0]], key_in_dotlist[1:])
else:
return True
else:
return False
if check_key_exist:
for ele in overrides.items():
if not _check_exist_dotlist(conf, ele[0]):
raise KeyError(f'"{ele[0]}" is not found in the config. You may need to check the overrides. '
f'overrides={overrides}')
override_conf = OmegaConf.from_dotlist([f'{ele[0]}={ele[1]}' for ele in overrides.items()])
conf = OmegaConf.merge(conf, override_conf)
return conf
class LogFilter(logging.Filter):
"""
Filter log messages with patterns.
"""
def __init__(self, blacklist: Union[str, List[str]]):
"""
Parameters
----------
blacklist
Patterns to be suppressed in logging.
"""
super().__init__()
if isinstance(blacklist, str):
blacklist = [blacklist]
self._blacklist = blacklist
def filter(self, record):
"""
Check whether to suppress a logging message.
Parameters
----------
record
A logging message.
Returns
-------
If True, no pattern exists in the message, hence printed out.
If False, some pattern is in the message, hence filtered out.
"""
matches = [pattern not in record.msg for pattern in self._blacklist]
return all(matches)
def add_log_filter(target_logger, log_filter):
"""
Add one log filter to the target logger.
Parameters
----------
target_logger
Target logger
log_filter
Log filter
"""
for handler in target_logger.handlers:
handler.addFilter(log_filter)
def remove_log_filter(target_logger, log_filter):
"""
Remove one log filter to the target logger.
Parameters
----------
target_logger
Target logger
log_filter
Log filter
"""
for handler in target_logger.handlers:
handler.removeFilter(log_filter)
@contextmanager
def apply_log_filter(log_filter):
"""
User contextmanager to control the scope of applying one log filter.
Currently, it is to filter some pytorch lightning's log messages.
But we can easily extend it to cover more loggers.
Parameters
----------
log_filter
Log filter.
"""
try:
add_log_filter(logging.getLogger(), log_filter)
add_log_filter(logging.getLogger("pytorch_lightning"), log_filter)
yield
finally:
remove_log_filter(logging.getLogger(), log_filter)
remove_log_filter(logging.getLogger("pytorch_lightning"), log_filter)
def modify_duplicate_model_names(
predictor,
postfix: str,
blacklist: List[str],
):
"""
Modify a predictor's model names if they exist in a blacklist.
Parameters
----------
predictor
An AutoMMPredictor object.
postfix
The postfix used to change the duplicate names.
blacklist
A list of names. The provided predictor can't use model names in the list.
Returns
-------
The predictor guaranteed has no duplicate model names with the backlist names.
"""
model_names = []
for n in predictor._config.model.names:
if n in blacklist:
new_name = f"{n}_{postfix}"
assert new_name not in blacklist
assert new_name not in predictor._config.model.names
# modify model prefix
if n == predictor._model.prefix:
predictor._model.prefix = new_name
else:
assert isinstance(predictor._model.model, nn.ModuleList)
for per_model in predictor._model.model:
if n == per_model.prefix:
per_model.prefix = new_name
break
# modify data processor prefix
for per_modality_processors in predictor._data_processors.values():
for per_processor in per_modality_processors:
if n == per_processor.prefix:
per_processor.prefix = new_name
# modify model config keys
setattr(predictor._config.model, new_name, getattr(predictor._config.model, n))
delattr(predictor._config.model, n)
model_names.append(new_name)
else:
model_names.append(n)
predictor._config.model.names = model_names
return predictor
def assign_feature_column_names(
data_processors: Dict,
df_preprocessor: MultiModalFeaturePreprocessor,
):
"""
Assign feature column names to data processors.
This is to patch the data processors saved by AutoGluon 0.4.0.
Parameters
----------
data_processors
The data processors.
df_preprocessor
The dataframe preprocessor.
Returns
-------
The data processors with feature column names added.
"""
for per_modality in data_processors:
if per_modality == LABEL:
continue
for per_model_processor in data_processors[per_modality]:
# requires_column_info=True is used for feature column distillation.
per_model_processor.requires_column_info = False
if per_modality == IMAGE:
per_model_processor.image_column_names = df_preprocessor.image_path_names
elif per_modality == TEXT:
per_model_processor.text_column_names = df_preprocessor.text_feature_names
elif per_modality == NUMERICAL:
per_model_processor.numerical_column_names = df_preprocessor.numerical_feature_names
elif per_modality == CATEGORICAL:
per_model_processor.categorical_column_names = df_preprocessor.categorical_feature_names
else:
raise ValueError(f"Unknown modality: {per_modality}")
return data_processors
def turn_on_off_feature_column_info(
data_processors: Dict,
flag: bool,
):
"""
Turn on or off returning feature column information in data processors.
Since feature column information is not always required in training models,
we optionally turn this flag on or off.
Parameters
----------
data_processors
The data processors.
flag
True/False
Returns
-------
The data processors with the flag on or off.
"""
for per_modality_processors in data_processors.values():
for per_model_processor in per_modality_processors:
# label processor doesn't have requires_column_info.
if hasattr(per_model_processor, "requires_column_info"):
per_model_processor.requires_column_info = flag
return data_processors
| 35.594968
| 139
| 0.631405
|
fe57a71e81c7fa82099f8ce22ec0d5f651400e1c
| 388
|
py
|
Python
|
maro/cli/utils/params.py
|
KangFengjian/maro
|
2694a75731d5174ba5b33780670ba38d776d8c5a
|
[
"MIT"
] | 1
|
2021-04-16T14:53:47.000Z
|
2021-04-16T14:53:47.000Z
|
maro/cli/utils/params.py
|
KangFengjian/maro
|
2694a75731d5174ba5b33780670ba38d776d8c5a
|
[
"MIT"
] | 2
|
2020-12-15T09:13:43.000Z
|
2020-12-16T08:02:41.000Z
|
maro/cli/utils/params.py
|
KangFengjian/maro
|
2694a75731d5174ba5b33780670ba38d776d8c5a
|
[
"MIT"
] | 1
|
2021-10-01T09:17:43.000Z
|
2021-10-01T09:17:43.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
class GlobalParams:
PARALLELS = 5
LOG_LEVEL = logging.INFO
class GlobalPaths:
MARO_LIB = '~/.maro/lib'
MARO_GRASS_LIB = '~/.maro/lib/grass'
MARO_K8S_LIB = '~/.maro/lib/k8s'
MARO_CLUSTERS = '~/.maro/clusters'
MARO_DATA = '~/.maro/data'
MARO_TEST = '~/.maro/test'
| 19.4
| 40
| 0.654639
|
286d0a8a42c77cc4bbc98b014e78c2d26e36dbbe
| 1,142
|
py
|
Python
|
test/test_del_contact.py
|
Nish1975/python_training
|
55e9149aea2bb274b03c2aa828bfac5fd76b0ce6
|
[
"Apache-2.0"
] | null | null | null |
test/test_del_contact.py
|
Nish1975/python_training
|
55e9149aea2bb274b03c2aa828bfac5fd76b0ce6
|
[
"Apache-2.0"
] | null | null | null |
test/test_del_contact.py
|
Nish1975/python_training
|
55e9149aea2bb274b03c2aa828bfac5fd76b0ce6
|
[
"Apache-2.0"
] | null | null | null |
from model.contact import Contact
import random
import time
def test_delete_some_contact(app,db,check_ui):
if len(db.get_contact_list()) == 0:
app.contact.create_contact(
Contact(firstname="Kirill", middlename="A", lastname="Zimin", nickname="kira", title="Mr.",
company="Genesys",
address="Sredij pr.88", home="1111", mobile="2222", work="3333", fax="4444", email="a@mail.ru",
email2="b@mail.ru", email3="c@mail.ru", homepage="www.home.ru", byear="2000", ayear="2017",
address2="Moskovsky", phone2="5555", notes="aaa"))
old_contacts=db.get_contact_list()
contact=random.choice(old_contacts)
app.contact.delete_contact_by_id(contact.id)
print(old_contacts)
print(contact.id)
time.sleep(5)
new_contacts=db.get_contact_list()
print(new_contacts)
assert len(old_contacts)-1 == len(new_contacts)
old_contacts.remove(contact)
assert old_contacts==new_contacts
if check_ui:
assert sorted(new_contacts,key=Contact.id_or_max)==sorted(app.contact.get_contact_list(),key=Contact.id_or_max)
| 42.296296
| 119
| 0.665499
|
9f03b318bff090a113ff1996fc7ca2f1a6803284
| 4,270
|
py
|
Python
|
scripts/dom_comparitor.py
|
twoodcock/pythonDOMComparitor
|
6ac152c6a23fbc4ff981b844c47cb621612a4ccc
|
[
"MIT"
] | 1
|
2020-08-08T22:14:44.000Z
|
2020-08-08T22:14:44.000Z
|
scripts/dom_comparitor.py
|
pythonthings/pythonDOMComparitor
|
6ac152c6a23fbc4ff981b844c47cb621612a4ccc
|
[
"MIT"
] | null | null | null |
scripts/dom_comparitor.py
|
pythonthings/pythonDOMComparitor
|
6ac152c6a23fbc4ff981b844c47cb621612a4ccc
|
[
"MIT"
] | 1
|
2020-04-30T20:45:46.000Z
|
2020-04-30T20:45:46.000Z
|
#!env/bin/python3
import subprocess
import argparse
from htmldom import htmldom
import difflib
import urllib.parse
import os
# force latin1 encoding.
class LatinHtmlDom(htmldom.HtmlDom):
def getEncoding(self, response):
return "iso-8859-1";
def MyURL(uri, path="", query=""):
theURI = urllib.parse.quote(urllib.parse.urljoin(uri, path), safe=':/.')
if (query == None):
query = ""
if isinstance(query, list):
query = [urllib.parse.quote(arg, safe='=/:.') for arg in query]
else:
query = [urllib.parse.quote(query, safe='=/:.&')]
if (len(query) > 0):
theURI += "?" + "&".join(query)
o = urllib.parse.urlparse(theURI)
return o.geturl()
def getFiles(url1, url2):
oldcmd = ["wget", "-O", "url1-output.html", url1];
newcmd = ["wget", "-O", "url2-output.html", url2];
subprocess.call(oldcmd);
subprocess.call(newcmd);
def FindNodes(document, find):
if document:
return document.find(find)
else:
raise Exception("no document loaded")
def rstripList(list): return [x.rstrip("\n") for x in list]
def DOMDiff(url1, url2, findDOM):
print("site1: {}".format(url1));
print("site2: {}".format(url2));
site1Document = LatinHtmlDom(url1)
site1Document.createDom()
site2Document = LatinHtmlDom(url2)
site2Document.createDom()
myRes = ""
for domQuery in findDOM:
print("finddom: " + domQuery + " at " + url1)
print("finddom: " + domQuery + " at " + url2)
site1Text = []
site2Text = []
for node in FindNodes(site1Document, domQuery):
site1Text.append(node.html())
for node in FindNodes(site2Document, domQuery):
site2Text.append(node.html())
count = len(site1Text)
if (count < len(site2Text)):
count = len(site2Text)
for i in range(count):
if not i in site1Text:
site1Text.append("")
if not i in site2Text:
site2Text.append("")
res = list(diff(site1Text[i], site2Text[i], url1, url2))
if (len(res) == 0):
print("{sel} {i}: no differences".format(
sel=domQuery,
i=i
))
else:
print("{sel} {i}: diff:\n{diff}".format(
sel=domQuery,
i=i,
diff="\n".join(rstripList(res))
))
print("done comparing {query}:\n{url1}\n{url2}".format(
query=domQuery,
url1=url1,
url2=url2
))
def diff(old, new, oldStr="old", newStr="new"):
return difflib.unified_diff(
old.split("\n"),
new.split("\n"),
fromfile=oldStr,
tofile=newStr
)
def _cli():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument(
'-get',
required=False,
action='store_true',
help='download the content, stored in ./url1-output.html, ./url2-output.html'
)
parser.add_argument(
'-url1',
type=str,
required=False,
help='url1 URL',
default="file://"+os.path.join(os.getcwd(), 'url1-output.html')
)
parser.add_argument(
'-url2',
type=str,
required=False,
help='url2 URL',
default="file://"+os.path.join(os.getcwd(), 'url2-output.html')
)
parser.add_argument(
'-path',
type=str,
required=False,
help='common path string'
)
parser.add_argument(
'-query',
type=str,
required=False,
help='query string'
)
parser.add_argument(
'-find',
type=str,
required=False,
help='DOM query string',
action='append'
)
return parser.parse_args()
def main():
args = _cli()
print(repr(args.find))
url1 = MyURL(args.url1, args.path, args.query)
url2 = MyURL(args.url2, args.path, args.query)
if args.find == None:
args.find = ['body']
if (args.get):
getFiles(url1, url2)
else:
DOMDiff(url1, url2, args.find);
if __name__ == "__main__": main()
| 27.548387
| 85
| 0.540749
|
bde8757229dab6e08551f5e82676f9fee9b8e466
| 586
|
py
|
Python
|
Driver.py
|
CamIHelpYou/BIC_CalendarSync
|
091b755fbbfa0503419084b715454465e78004aa
|
[
"MIT"
] | null | null | null |
Driver.py
|
CamIHelpYou/BIC_CalendarSync
|
091b755fbbfa0503419084b715454465e78004aa
|
[
"MIT"
] | null | null | null |
Driver.py
|
CamIHelpYou/BIC_CalendarSync
|
091b755fbbfa0503419084b715454465e78004aa
|
[
"MIT"
] | null | null | null |
#!/Library/Frameworks/Python.framework/Versions/3.6/bin/python3
'''
Created on Oct 17, 2018
@author: camli
'''
from HTML import getDates
from Parse import getEvents
from Calendar import modifyCal
import getpass
def main():
user = input("User: ")
password = getpass.getpass("Password: ")
print("Gathering Dates...")
print("If you get stuck here, the user/password is probably incorrect")
dates = getDates(user, password)
print("Creating Events...")
Events = getEvents(dates)
modifyCal(Events)
print("done")
if __name__ == '__main__':
main()
| 20.928571
| 75
| 0.686007
|
3fc00ee3ca6e18cc152881f71f5840646b01aa8b
| 1,079
|
py
|
Python
|
resources/lib/serial.py
|
DamonToumbourou/plugin.audio.serial
|
081189551dcf54ecf5a5a9dd8603e76423f44147
|
[
"MIT"
] | null | null | null |
resources/lib/serial.py
|
DamonToumbourou/plugin.audio.serial
|
081189551dcf54ecf5a5a9dd8603e76423f44147
|
[
"MIT"
] | null | null | null |
resources/lib/serial.py
|
DamonToumbourou/plugin.audio.serial
|
081189551dcf54ecf5a5a9dd8603e76423f44147
|
[
"MIT"
] | null | null | null |
import requests
import re
import urllib2
from bs4 import BeautifulSoup as bs
def get_soup(url):
page = requests.get(url)
soup = bs(page.text, 'html.parser')
return soup
def get_podcast_s1(url):
soup = get_soup(url)
content = soup.find_all('div', {'class': 'node node-episode node-promoted node-teaser view-teaser season-1 live clearfix'})
output = []
for i in content:
label = i.get_text().strip()
path = i.find('a', {'class': 'play'})['data-audio']
item = {
'label': label,
'path': path,
}
output.append(item)
return output
def get_podcast_s2(url):
soup = get_soup(url)
content = soup.find_all('div', {'class': 'wrapper'})
output = []
content = content [:-1]
for i in content:
label = i.find('div', {'class': 'episode'}).get_text()
path = i.find('a', {'class': 'play'})['data-audio']
item = {
'label': label,
'path': path,
}
output.append(item)
return output
| 20.75
| 127
| 0.545876
|
5a199f36d3768516e3c84db287169fca85186bad
| 179,415
|
py
|
Python
|
ryu/ofproto/ofproto_v1_4_parser.py
|
Rashminadig/SDN
|
9945f93156ca488bcad9b95c298d7ddc90873a87
|
[
"Apache-2.0"
] | null | null | null |
ryu/ofproto/ofproto_v1_4_parser.py
|
Rashminadig/SDN
|
9945f93156ca488bcad9b95c298d7ddc90873a87
|
[
"Apache-2.0"
] | null | null | null |
ryu/ofproto/ofproto_v1_4_parser.py
|
Rashminadig/SDN
|
9945f93156ca488bcad9b95c298d7ddc90873a87
|
[
"Apache-2.0"
] | 1
|
2019-02-04T21:41:00.000Z
|
2019-02-04T21:41:00.000Z
|
# Copyright (C) 2012, 2013, 2014 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2012, 2013 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
import itertools
from ryu.lib import addrconv
from ryu.lib import mac
from ryu import utils
from ofproto_parser import StringifyMixin, MsgBase, msg_pack_into, msg_str_attr
from . import ether
from . import ofproto_parser
from . import ofproto_common
from . import ofproto_v1_4 as ofproto
_MSG_PARSERS = {}
def _set_msg_type(msg_type):
def _set_cls_msg_type(cls):
cls.cls_msg_type = msg_type
return cls
return _set_cls_msg_type
def _register_parser(cls):
'''class decorator to register msg parser'''
assert cls.cls_msg_type is not None
assert cls.cls_msg_type not in _MSG_PARSERS
_MSG_PARSERS[cls.cls_msg_type] = cls.parser
return cls
@ofproto_parser.register_msg_parser(ofproto.OFP_VERSION)
def msg_parser(datapath, version, msg_type, msg_len, xid, buf):
parser = _MSG_PARSERS.get(msg_type)
return parser(datapath, version, msg_type, msg_len, xid, buf)
@_register_parser
@_set_msg_type(ofproto.OFPT_HELLO)
class OFPHello(MsgBase):
"""
Hello message
When connection is started, the hello message is exchanged between a
switch and a controller.
This message is handled by the Ryu framework, so the Ryu application
do not need to process this typically.
========== =========================================================
Attribute Description
========== =========================================================
elements list of ``OFPHelloElemVersionBitmap`` instance
========== =========================================================
"""
def __init__(self, datapath, elements=[]):
super(OFPHello, self).__init__(datapath)
self.elements = elements
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPHello, cls).parser(datapath, version, msg_type,
msg_len, xid, buf)
offset = ofproto.OFP_HELLO_HEADER_SIZE
elems = []
while offset < msg.msg_len:
type_, length = struct.unpack_from(
ofproto.OFP_HELLO_ELEM_HEADER_PACK_STR, msg.buf, offset)
# better to register Hello Element classes but currently
# Only VerisonBitmap is supported so let's be simple.
if type_ == ofproto.OFPHET_VERSIONBITMAP:
elem = OFPHelloElemVersionBitmap.parser(msg.buf, offset)
elems.append(elem)
offset += length
msg.elements = elems
return msg
class OFPHelloElemVersionBitmap(StringifyMixin):
"""
Version bitmap Hello Element
========== =========================================================
Attribute Description
========== =========================================================
versions list of versions of OpenFlow protocol a device supports
========== =========================================================
"""
def __init__(self, versions, type_=None, length=None):
super(OFPHelloElemVersionBitmap, self).__init__()
self.type = ofproto.OFPHET_VERSIONBITMAP
self.length = None
self._bitmaps = None
self.versions = versions
@classmethod
def parser(cls, buf, offset):
type_, length = struct.unpack_from(
ofproto.OFP_HELLO_ELEM_VERSIONBITMAP_HEADER_PACK_STR,
buf, offset)
assert type_ == ofproto.OFPHET_VERSIONBITMAP
bitmaps_len = (length -
ofproto.OFP_HELLO_ELEM_VERSIONBITMAP_HEADER_SIZE)
offset += ofproto.OFP_HELLO_ELEM_VERSIONBITMAP_HEADER_SIZE
bitmaps = []
while bitmaps_len >= 4:
bitmap = struct.unpack_from('!I', buf, offset)
bitmaps.append(bitmap[0])
offset += 4
bitmaps_len -= 4
versions = [i * 32 + shift
for i, bitmap in enumerate(bitmaps)
for shift in range(31) if bitmap & (1 << shift)]
elem = cls(versions)
elem.length = length
elem._bitmaps = bitmaps
return elem
@_register_parser
@_set_msg_type(ofproto.OFPT_ECHO_REQUEST)
class OFPEchoRequest(MsgBase):
"""
Echo request message
This message is handled by the Ryu framework, so the Ryu application
do not need to process this typically.
========== =========================================================
Attribute Description
========== =========================================================
data An arbitrary length data
========== =========================================================
Example::
def send_echo_request(self, datapath, data):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPEchoRequest(datapath, data)
datapath.send_msg(req)
@set_ev_cls(ofp_event.EventOFPEchoRequest,
[HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER])
def echo_request_handler(self, ev):
self.logger.debug('OFPEchoRequest received: data=%s',
utils.hex_array(ev.msg.data))
"""
def __init__(self, datapath, data=None):
super(OFPEchoRequest, self).__init__(datapath)
self.data = data
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPEchoRequest, cls).parser(datapath, version, msg_type,
msg_len, xid, buf)
msg.data = msg.buf[ofproto.OFP_HEADER_SIZE:]
return msg
def _serialize_body(self):
if self.data is not None:
self.buf += self.data
@_register_parser
@_set_msg_type(ofproto.OFPT_ERROR)
class OFPErrorMsg(MsgBase):
"""
Error message
The switch notifies controller of problems by this message.
========== =========================================================
Attribute Description
========== =========================================================
type High level type of error
code Details depending on the type
data Variable length data depending on the type and code
========== =========================================================
``type`` attribute corresponds to ``type_`` parameter of __init__.
Types and codes are defined in ``ryu.ofproto.ofproto``.
============================= ===========
Type Code
============================= ===========
OFPET_HELLO_FAILED OFPHFC_*
OFPET_BAD_REQUEST OFPBRC_*
OFPET_BAD_ACTION OFPBAC_*
OFPET_BAD_INSTRUCTION OFPBIC_*
OFPET_BAD_MATCH OFPBMC_*
OFPET_FLOW_MOD_FAILED OFPFMFC_*
OFPET_GROUP_MOD_FAILED OFPGMFC_*
OFPET_PORT_MOD_FAILED OFPPMFC_*
OFPET_TABLE_MOD_FAILED OFPTMFC_*
OFPET_QUEUE_OP_FAILED OFPQOFC_*
OFPET_SWITCH_CONFIG_FAILED OFPSCFC_*
OFPET_ROLE_REQUEST_FAILED OFPRRFC_*
OFPET_METER_MOD_FAILED OFPMMFC_*
OFPET_TABLE_FEATURES_FAILED OFPTFFC_*
OFPET_EXPERIMENTER N/A
============================= ===========
Example::
@set_ev_cls(ofp_event.EventOFPErrorMsg,
[HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER])
def error_msg_handler(self, ev):
msg = ev.msg
self.logger.debug('OFPErrorMsg received: type=0x%02x code=0x%02x '
'message=%s',
msg.type, msg.code, utils.hex_array(msg.data))
"""
def __init__(self, datapath, type_=None, code=None, data=None):
super(OFPErrorMsg, self).__init__(datapath)
self.type = type_
self.code = code
self.data = data
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
type_, = struct.unpack_from('!H', buffer(buf),
ofproto.OFP_HEADER_SIZE)
if type_ == ofproto.OFPET_EXPERIMENTER:
return OFPErrorExperimenterMsg.parser(datapath, version, msg_type,
msg_len, xid, buf)
msg = super(OFPErrorMsg, cls).parser(datapath, version, msg_type,
msg_len, xid, buf)
msg.type, msg.code = struct.unpack_from(
ofproto.OFP_ERROR_MSG_PACK_STR, msg.buf,
ofproto.OFP_HEADER_SIZE)
msg.data = msg.buf[ofproto.OFP_ERROR_MSG_SIZE:]
return msg
def _serialize_body(self):
assert self.data is not None
msg_pack_into(ofproto.OFP_ERROR_MSG_PACK_STR, self.buf,
ofproto.OFP_HEADER_SIZE, self.type, self.code)
self.buf += self.data
class OFPErrorExperimenterMsg(MsgBase):
def __init__(self, datapath, type_=None, exp_type=None, experimenter=None,
data=None):
super(OFPErrorExperimenterMsg, self).__init__(datapath)
self.type = ofproto.OFPET_EXPERIMENTER
self.exp_type = exp_type
self.experimenter = experimenter
self.data = data
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
cls.cls_msg_type = msg_type
msg = super(OFPErrorExperimenterMsg, cls).parser(
datapath, version, msg_type, msg_len, xid, buf)
msg.type, msg.exp_type, msg.experimenter = struct.unpack_from(
ofproto.OFP_ERROR_EXPERIMENTER_MSG_PACK_STR, msg.buf,
ofproto.OFP_HEADER_SIZE)
msg.data = msg.buf[ofproto.OFP_ERROR_EXPERIMENTER_MSG_SIZE:]
return msg
def _serialize_body(self):
assert self.data is not None
msg_pack_into(ofproto.OFP_ERROR_EXPERIMENTER_MSG_PACK_STR,
self.buf, ofproto.OFP_HEADER_SIZE,
self.type, self.exp_type, self.experimenter)
self.buf += self.data
@_register_parser
@_set_msg_type(ofproto.OFPT_ECHO_REPLY)
class OFPEchoReply(MsgBase):
"""
Echo reply message
This message is handled by the Ryu framework, so the Ryu application
do not need to process this typically.
========== =========================================================
Attribute Description
========== =========================================================
data An arbitrary length data
========== =========================================================
Example::
def send_echo_reply(self, datapath, data):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
reply = ofp_parser.OFPEchoReply(datapath, data)
datapath.send_msg(reply)
@set_ev_cls(ofp_event.EventOFPEchoReply,
[HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER])
def echo_reply_handler(self, ev):
self.logger.debug('OFPEchoReply received: data=%s',
utils.hex_array(ev.msg.data))
"""
def __init__(self, datapath, data=None):
super(OFPEchoReply, self).__init__(datapath)
self.data = data
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPEchoReply, cls).parser(datapath, version, msg_type,
msg_len, xid, buf)
msg.data = msg.buf[ofproto.OFP_HEADER_SIZE:]
return msg
def _serialize_body(self):
assert self.data is not None
self.buf += self.data
@_set_msg_type(ofproto.OFPT_FEATURES_REQUEST)
class OFPFeaturesRequest(MsgBase):
"""
Features request message
The controller sends a feature request to the switch upon session
establishment.
This message is handled by the Ryu framework, so the Ryu application
do not need to process this typically.
Example::
def send_features_request(self, datapath):
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPFeaturesRequest(datapath)
datapath.send_msg(req)
"""
def __init__(self, datapath):
super(OFPFeaturesRequest, self).__init__(datapath)
@_register_parser
@_set_msg_type(ofproto.OFPT_EXPERIMENTER)
class OFPExperimenter(MsgBase):
"""
Experimenter extension message
============= =========================================================
Attribute Description
============= =========================================================
experimenter Experimenter ID
exp_type Experimenter defined
data Experimenter defined arbitrary additional data
============= =========================================================
"""
def __init__(self, datapath, experimenter=None, exp_type=None, data=None):
super(OFPExperimenter, self).__init__(datapath)
self.experimenter = experimenter
self.exp_type = exp_type
self.data = data
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPExperimenter, cls).parser(datapath, version,
msg_type, msg_len,
xid, buf)
(msg.experimenter, msg.exp_type) = struct.unpack_from(
ofproto.OFP_EXPERIMENTER_HEADER_PACK_STR, msg.buf,
ofproto.OFP_HEADER_SIZE)
msg.data = msg.buf[ofproto.OFP_EXPERIMENTER_HEADER_SIZE:]
return msg
def _serialize_body(self):
assert self.data is not None
msg_pack_into(ofproto.OFP_EXPERIMENTER_HEADER_PACK_STR,
self.buf, ofproto.OFP_HEADER_SIZE,
self.experimenter, self.exp_type)
self.buf += self.data
@_register_parser
@_set_msg_type(ofproto.OFPT_FEATURES_REPLY)
class OFPSwitchFeatures(MsgBase):
"""
Features reply message
The switch responds with a features reply message to a features
request.
This message is handled by the Ryu framework, so the Ryu application
do not need to process this typically.
Example::
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
msg = ev.msg
self.logger.debug('OFPSwitchFeatures received: '
'datapath_id=0x%016x n_buffers=%d '
'n_tables=%d auxiliary_id=%d '
'capabilities=0x%08x',
msg.datapath_id, msg.n_buffers, msg.n_tables,
msg.auxiliary_id, msg.capabilities)
"""
def __init__(self, datapath, datapath_id=None, n_buffers=None,
n_tables=None, auxiliary_id=None, capabilities=None):
super(OFPSwitchFeatures, self).__init__(datapath)
self.datapath_id = datapath_id
self.n_buffers = n_buffers
self.n_tables = n_tables
self.auxiliary_id = auxiliary_id
self.capabilities = capabilities
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPSwitchFeatures, cls).parser(datapath, version, msg_type,
msg_len, xid, buf)
(msg.datapath_id,
msg.n_buffers,
msg.n_tables,
msg.auxiliary_id,
msg.capabilities,
msg._reserved) = struct.unpack_from(
ofproto.OFP_SWITCH_FEATURES_PACK_STR, msg.buf,
ofproto.OFP_HEADER_SIZE)
return msg
@_set_msg_type(ofproto.OFPT_GET_CONFIG_REQUEST)
class OFPGetConfigRequest(MsgBase):
"""
Get config request message
The controller sends a get config request to query configuration
parameters in the switch.
Example::
def send_get_config_request(self, datapath):
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPGetConfigRequest(datapath)
datapath.send_msg(req)
"""
def __init__(self, datapath):
super(OFPGetConfigRequest, self).__init__(datapath)
@_register_parser
@_set_msg_type(ofproto.OFPT_GET_CONFIG_REPLY)
class OFPGetConfigReply(MsgBase):
"""
Get config reply message
The switch responds to a configuration request with a get config reply
message.
============= =========================================================
Attribute Description
============= =========================================================
flags One of the following configuration flags.
OFPC_FRAG_NORMAL
OFPC_FRAG_DROP
OFPC_FRAG_REASM
OFPC_FRAG_MASK
miss_send_len Max bytes of new flow that datapath should send to the
controller
============= =========================================================
Example::
@set_ev_cls(ofp_event.EventOFPGetConfigReply, MAIN_DISPATCHER)
def get_config_reply_handler(self, ev):
msg = ev.msg
dp = msg.datapath
ofp = dp.ofproto
if msg.flags == ofp.OFPC_FRAG_NORMAL:
flags = 'NORMAL'
elif msg.flags == ofp.OFPC_FRAG_DROP:
flags = 'DROP'
elif msg.flags == ofp.OFPC_FRAG_REASM:
flags = 'REASM'
elif msg.flags == ofp.OFPC_FRAG_MASK:
flags = 'MASK'
else:
flags = 'unknown'
self.logger.debug('OFPGetConfigReply received: '
'flags=%s miss_send_len=%d',
flags, msg.miss_send_len)
"""
def __init__(self, datapath, flags=None, miss_send_len=None):
super(OFPGetConfigReply, self).__init__(datapath)
self.flags = flags
self.miss_send_len = miss_send_len
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPGetConfigReply, cls).parser(datapath, version, msg_type,
msg_len, xid, buf)
msg.flags, msg.miss_send_len = struct.unpack_from(
ofproto.OFP_SWITCH_CONFIG_PACK_STR, msg.buf,
ofproto.OFP_HEADER_SIZE)
return msg
@_set_msg_type(ofproto.OFPT_SET_CONFIG)
class OFPSetConfig(MsgBase):
"""
Set config request message
The controller sends a set config request message to set configuraion
parameters.
============= =========================================================
Attribute Description
============= =========================================================
flags One of the following configuration flags.
OFPC_FRAG_NORMAL
OFPC_FRAG_DROP
OFPC_FRAG_REASM
OFPC_FRAG_MASK
miss_send_len Max bytes of new flow that datapath should send to the
controller
============= =========================================================
Example::
def send_set_config(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPSetConfig(datapath, ofp.OFPC_FRAG_NORMAL, 256)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags=0, miss_send_len=0):
super(OFPSetConfig, self).__init__(datapath)
self.flags = flags
self.miss_send_len = miss_send_len
def _serialize_body(self):
assert self.flags is not None
assert self.miss_send_len is not None
msg_pack_into(ofproto.OFP_SWITCH_CONFIG_PACK_STR,
self.buf, ofproto.OFP_HEADER_SIZE,
self.flags, self.miss_send_len)
class OFPMatch(StringifyMixin):
"""
Flow Match Structure
This class is implementation of the flow match structure having
compose/query API.
You can define the flow match by the keyword arguments.
The following arguments are available.
================ =============== ==================================
Argument Value Description
================ =============== ==================================
in_port Integer 32bit Switch input port
in_phy_port Integer 32bit Switch physical input port
metadata Integer 64bit Metadata passed between tables
eth_dst MAC address Ethernet destination address
eth_src MAC address Ethernet source address
eth_type Integer 16bit Ethernet frame type
vlan_vid Integer 16bit VLAN id
vlan_pcp Integer 8bit VLAN priority
ip_dscp Integer 8bit IP DSCP (6 bits in ToS field)
ip_ecn Integer 8bit IP ECN (2 bits in ToS field)
ip_proto Integer 8bit IP protocol
ipv4_src IPv4 address IPv4 source address
ipv4_dst IPv4 address IPv4 destination address
tcp_src Integer 16bit TCP source port
tcp_dst Integer 16bit TCP destination port
udp_src Integer 16bit UDP source port
udp_dst Integer 16bit UDP destination port
sctp_src Integer 16bit SCTP source port
sctp_dst Integer 16bit SCTP destination port
icmpv4_type Integer 8bit ICMP type
icmpv4_code Integer 8bit ICMP code
arp_op Integer 16bit ARP opcode
arp_spa IPv4 address ARP source IPv4 address
arp_tpa IPv4 address ARP target IPv4 address
arp_sha MAC address ARP source hardware address
arp_tha MAC address ARP target hardware address
ipv6_src IPv6 address IPv6 source address
ipv6_dst IPv6 address IPv6 destination address
ipv6_flabel Integer 32bit IPv6 Flow Label
icmpv6_type Integer 8bit ICMPv6 type
icmpv6_code Integer 8bit ICMPv6 code
ipv6_nd_target IPv6 address Target address for ND
ipv6_nd_sll MAC address Source link-layer for ND
ipv6_nd_tll MAC address Target link-layer for ND
mpls_label Integer 32bit MPLS label
mpls_tc Integer 8bit MPLS TC
mpls_bos Integer 8bit MPLS BoS bit
pbb_isid Integer 24bit PBB I-SID
tunnel_id Integer 64bit Logical Port Metadata
ipv6_exthdr Integer 16bit IPv6 Extension Header pseudo-field
pbb_uca Integer 8bit PBB UCA header field
================ =============== ==================================
Example::
>>> # compose
>>> match = parser.OFPMatch(
... in_port=1,
... eth_type=0x86dd,
... ipv6_src=('2001:db8:bd05:1d2:288a:1fc0:1:10ee',
... 'ffff:ffff:ffff:ffff::'),
... ipv6_dst='2001:db8:bd05:1d2:288a:1fc0:1:10ee')
>>> # query
>>> if 'ipv6_src' in match:
... print match['ipv6_src']
...
('2001:db8:bd05:1d2:288a:1fc0:1:10ee', 'ffff:ffff:ffff:ffff::')
"""
def __init__(self, type_=None, length=None, _ordered_fields=None,
**kwargs):
super(OFPMatch, self).__init__()
self.type = ofproto.OFPMT_OXM
self.length = length
if not _ordered_fields is None:
assert not kwargs
self._fields2 = _ordered_fields
else:
kwargs = dict(ofproto.oxm_normalize_user(k, v) for
(k, v) in kwargs.iteritems())
fields = [ofproto.oxm_from_user(k, v) for (k, v)
in kwargs.iteritems()]
# assumption: sorting by OXM type values makes fields
# meet ordering requirements (eg. eth_type before ipv4_src)
fields.sort()
self._fields2 = [ofproto.oxm_to_user(n, v, m) for (n, v, m)
in fields]
@classmethod
def parser(cls, buf, offset):
"""
Returns an object which is generated from a buffer including the
expression of the wire protocol of the flow match.
"""
match = OFPMatch()
type_, length = struct.unpack_from('!HH', buf, offset)
match.type = type_
match.length = length
# ofp_match adjustment
offset += 4
length -= 4
fields = []
while length > 0:
n, value, mask, field_len = ofproto.oxm_parse(buf, offset)
k, uv = ofproto.oxm_to_user(n, value, mask)
fields.append((k, uv))
offset += field_len
length -= field_len
match._fields2 = fields
return match
def serialize(self, buf, offset):
"""
Outputs the expression of the wire protocol of the flow match into
the buf.
Returns the output length.
"""
fields = [ofproto.oxm_from_user(k, uv) for (k, uv)
in self._fields2]
hdr_pack_str = '!HH'
field_offset = offset + struct.calcsize(hdr_pack_str)
for (n, value, mask) in fields:
field_offset += ofproto.oxm_serialize(n, value, mask, buf,
field_offset)
length = field_offset - offset
msg_pack_into(hdr_pack_str, buf, offset, ofproto.OFPMT_OXM, length)
self.length = length
pad_len = utils.round_up(length, 8) - length
ofproto_parser.msg_pack_into("%dx" % pad_len, buf, field_offset)
return length + pad_len
def __getitem__(self, key):
return dict(self._fields2)[key]
def __contains__(self, key):
return key in dict(self._fields2)
def iteritems(self):
return dict(self._fields2).iteritems()
def get(self, key, default=None):
return dict(self._fields2).get(key, default)
def stringify_attrs(self):
yield "oxm_fields", dict(self._fields2)
def to_jsondict(self):
"""
Returns a dict expressing the flow match.
"""
body = {"oxm_fields": [ofproto.oxm_to_jsondict(k, uv) for k, uv
in self._fields2],
"length": self.length,
"type": self.type}
return {self.__class__.__name__: body}
@classmethod
def from_jsondict(cls, dict_):
"""
Returns an object which is generated from a dict.
Exception raises:
KeyError -- Unknown match field is defined in dict
"""
fields = [ofproto.oxm_from_jsondict(f) for f
in dict_['oxm_fields']]
return OFPMatch(_ordered_fields=fields)
class OFPPropUnknown(StringifyMixin):
def __init__(self, type_=None, length=None, buf=None):
self.buf = buf
@classmethod
def parser(cls, buf):
return cls(buf=buf)
class OFPPropBase(StringifyMixin):
_PACK_STR = '!HH'
# _TYPES = {} must be an attribute of subclass
def __init__(self, type_, length=None):
self.type = type_
self.length = length
@classmethod
def register_type(cls, type_):
def _register_type(subcls):
cls._TYPES[type_] = subcls
return subcls
return _register_type
@classmethod
def parse(cls, buf):
(type_, length) = struct.unpack_from(cls._PACK_STR, buf, 0)
# needs
rest = buf[utils.round_up(length, 8):]
try:
subcls = cls._TYPES[type_]
except KeyError:
subcls = OFPPropUnknown
prop = subcls.parser(buf)
prop.type = type_
prop.length = length
return prop, rest
class OFPPortProp(OFPPropBase):
_TYPES = {}
@OFPPortProp.register_type(ofproto.OFPPDPT_ETHERNET)
class OFPPortDescPropEthernet(StringifyMixin):
def __init__(self, type_=None, length=None, curr=None, advertised=None,
supported=None, peer=None, curr_speed=None, max_speed=None):
self.type = type_
self.length = length
self.curr = curr
self.advertised = advertised
self.supported = supported
self.peer = peer
self.curr_speed = curr_speed
self.max_speed = max_speed
@classmethod
def parser(cls, buf):
ether = cls()
(ether.type, ether.length, ether.curr,
ether.advertised, ether.supported,
ether.peer, ether.curr_speed, ether.max_speed) = struct.unpack_from(
ofproto.OFP_PORT_DESC_PROP_ETHERNET_PACK_STR, buf, 0)
return ether
class OFPMatchField(StringifyMixin):
_FIELDS_HEADERS = {}
@staticmethod
def register_field_header(headers):
def _register_field_header(cls):
for header in headers:
OFPMatchField._FIELDS_HEADERS[header] = cls
return cls
return _register_field_header
def __init__(self, header):
self.header = header
self.n_bytes = ofproto.oxm_tlv_header_extract_length(header)
self.length = 0
@classmethod
def cls_to_header(cls, cls_, hasmask):
# XXX efficiency
inv = dict((v, k) for k, v in cls._FIELDS_HEADERS.iteritems()
if (((k >> 8) & 1) != 0) == hasmask)
return inv[cls_]
@staticmethod
def make(header, value, mask=None):
cls_ = OFPMatchField._FIELDS_HEADERS.get(header)
return cls_(header, value, mask)
@classmethod
def parser(cls, buf, offset):
(header,) = struct.unpack_from('!I', buf, offset)
cls_ = OFPMatchField._FIELDS_HEADERS.get(header)
if cls_:
field = cls_.field_parser(header, buf, offset)
else:
field = OFPMatchField(header)
field.length = (header & 0xff) + 4
return field
@classmethod
def field_parser(cls, header, buf, offset):
hasmask = (header >> 8) & 1
mask = None
if ofproto.oxm_tlv_header_extract_hasmask(header):
pack_str = '!' + cls.pack_str[1:] * 2
(value, mask) = struct.unpack_from(pack_str, buf, offset + 4)
else:
(value,) = struct.unpack_from(cls.pack_str, buf, offset + 4)
return cls(header, value, mask)
def serialize(self, buf, offset):
if ofproto.oxm_tlv_header_extract_hasmask(self.header):
self.put_w(buf, offset, self.value, self.mask)
else:
self.put(buf, offset, self.value)
def _put_header(self, buf, offset):
ofproto_parser.msg_pack_into('!I', buf, offset, self.header)
self.length = 4
def _put(self, buf, offset, value):
ofproto_parser.msg_pack_into(self.pack_str, buf, offset, value)
self.length += self.n_bytes
def put_w(self, buf, offset, value, mask):
self._put_header(buf, offset)
self._put(buf, offset + self.length, value)
self._put(buf, offset + self.length, mask)
def put(self, buf, offset, value):
self._put_header(buf, offset)
self._put(buf, offset + self.length, value)
def _putv6(self, buf, offset, value):
ofproto_parser.msg_pack_into(self.pack_str, buf, offset,
*value)
self.length += self.n_bytes
def putv6(self, buf, offset, value, mask=None):
self._put_header(buf, offset)
self._putv6(buf, offset + self.length, value)
if mask and len(mask):
self._putv6(buf, offset + self.length, mask)
def oxm_len(self):
return self.header & 0xff
def to_jsondict(self):
# remove some redundant attributes
d = super(OFPMatchField, self).to_jsondict()
v = d[self.__class__.__name__]
del v['header']
del v['length']
del v['n_bytes']
return d
@classmethod
def from_jsondict(cls, dict_):
# just pass the dict around.
# it will be converted by OFPMatch.__init__().
return {cls.__name__: dict_}
def stringify_attrs(self):
f = super(OFPMatchField, self).stringify_attrs
if not ofproto.oxm_tlv_header_extract_hasmask(self.header):
# something like the following, but yield two values (k,v)
# return itertools.ifilter(lambda k, v: k != 'mask', iter())
def g():
for k, v in f():
if k != 'mask':
yield (k, v)
return g()
else:
return f()
@OFPMatchField.register_field_header([ofproto.OXM_OF_IN_PORT])
class MTInPort(OFPMatchField):
pack_str = '!I'
def __init__(self, header, value, mask=None):
super(MTInPort, self).__init__(header)
self.value = value
@OFPMatchField.register_field_header([ofproto.OXM_OF_METADATA,
ofproto.OXM_OF_METADATA_W])
class MTMetadata(OFPMatchField):
pack_str = '!Q'
def __init__(self, header, value, mask=None):
super(MTMetadata, self).__init__(header)
self.value = value
self.mask = mask
@OFPMatchField.register_field_header([ofproto.OXM_OF_IN_PHY_PORT])
class MTInPhyPort(OFPMatchField):
pack_str = '!I'
def __init__(self, header, value, mask=None):
super(MTInPhyPort, self).__init__(header)
self.value = value
@OFPMatchField.register_field_header([ofproto.OXM_OF_ETH_DST,
ofproto.OXM_OF_ETH_DST_W])
class MTEthDst(OFPMatchField):
pack_str = '!6s'
def __init__(self, header, value, mask=None):
super(MTEthDst, self).__init__(header)
self.value = value
self.mask = mask
@OFPMatchField.register_field_header([ofproto.OXM_OF_ETH_SRC,
ofproto.OXM_OF_ETH_SRC_W])
class MTEthSrc(OFPMatchField):
pack_str = '!6s'
def __init__(self, header, value, mask=None):
super(MTEthSrc, self).__init__(header)
self.value = value
self.mask = mask
@OFPMatchField.register_field_header([ofproto.OXM_OF_ETH_TYPE])
class MTEthType(OFPMatchField):
pack_str = '!H'
def __init__(self, header, value, mask=None):
super(MTEthType, self).__init__(header)
self.value = value
@OFPMatchField.register_field_header([ofproto.OXM_OF_VLAN_VID,
ofproto.OXM_OF_VLAN_VID_W])
class MTVlanVid(OFPMatchField):
pack_str = '!H'
def __init__(self, header, value, mask=None):
super(MTVlanVid, self).__init__(header)
self.value = value
self.mask = mask
@classmethod
def field_parser(cls, header, buf, offset):
m = super(MTVlanVid, cls).field_parser(header, buf, offset)
m.value &= ~ofproto.OFPVID_PRESENT
return m
def serialize(self, buf, offset):
self.value |= ofproto.OFPVID_PRESENT
super(MTVlanVid, self).serialize(buf, offset)
@OFPMatchField.register_field_header([ofproto.OXM_OF_VLAN_PCP])
class MTVlanPcp(OFPMatchField):
pack_str = '!B'
def __init__(self, header, value, mask=None):
super(MTVlanPcp, self).__init__(header)
self.value = value
@OFPMatchField.register_field_header([ofproto.OXM_OF_IP_DSCP])
class MTIPDscp(OFPMatchField):
pack_str = '!B'
def __init__(self, header, value, mask=None):
super(MTIPDscp, self).__init__(header)
self.value = value
@OFPMatchField.register_field_header([ofproto.OXM_OF_IP_ECN])
class MTIPECN(OFPMatchField):
pack_str = '!B'
def __init__(self, header, value, mask=None):
super(MTIPECN, self).__init__(header)
self.value = value
@OFPMatchField.register_field_header([ofproto.OXM_OF_IP_PROTO])
class MTIPProto(OFPMatchField):
pack_str = '!B'
def __init__(self, header, value, mask=None):
super(MTIPProto, self).__init__(header)
self.value = value
@OFPMatchField.register_field_header([ofproto.OXM_OF_IPV4_SRC,
ofproto.OXM_OF_IPV4_SRC_W])
class MTIPV4Src(OFPMatchField):
pack_str = '!I'
def __init__(self, header, value, mask=None):
super(MTIPV4Src, self).__init__(header)
self.value = value
self.mask = mask
@OFPMatchField.register_field_header([ofproto.OXM_OF_IPV4_DST,
ofproto.OXM_OF_IPV4_DST_W])
class MTIPV4Dst(OFPMatchField):
pack_str = '!I'
def __init__(self, header, value, mask=None):
super(MTIPV4Dst, self).__init__(header)
self.value = value
self.mask = mask
@OFPMatchField.register_field_header([ofproto.OXM_OF_TCP_SRC])
class MTTCPSrc(OFPMatchField):
pack_str = '!H'
def __init__(self, header, value, mask=None):
super(MTTCPSrc, self).__init__(header)
self.value = value
@OFPMatchField.register_field_header([ofproto.OXM_OF_TCP_DST])
class MTTCPDst(OFPMatchField):
pack_str = '!H'
def __init__(self, header, value, mask=None):
super(MTTCPDst, self).__init__(header)
self.value = value
@OFPMatchField.register_field_header([ofproto.OXM_OF_UDP_SRC])
class MTUDPSrc(OFPMatchField):
pack_str = '!H'
def __init__(self, header, value, mask=None):
super(MTUDPSrc, self).__init__(header)
self.value = value
@OFPMatchField.register_field_header([ofproto.OXM_OF_UDP_DST])
class MTUDPDst(OFPMatchField):
pack_str = '!H'
def __init__(self, header, value, mask=None):
super(MTUDPDst, self).__init__(header)
self.value = value
@OFPMatchField.register_field_header([ofproto.OXM_OF_SCTP_SRC])
class MTSCTPSrc(OFPMatchField):
pack_str = '!H'
def __init__(self, header, value, mask=None):
super(MTSCTPSrc, self).__init__(header)
self.value = value
@OFPMatchField.register_field_header([ofproto.OXM_OF_SCTP_DST])
class MTSCTPDst(OFPMatchField):
pack_str = '!H'
def __init__(self, header, value, mask=None):
super(MTSCTPDst, self).__init__(header)
self.value = value
@OFPMatchField.register_field_header([ofproto.OXM_OF_ICMPV4_TYPE])
class MTICMPV4Type(OFPMatchField):
pack_str = '!B'
def __init__(self, header, value, mask=None):
super(MTICMPV4Type, self).__init__(header)
self.value = value
@OFPMatchField.register_field_header([ofproto.OXM_OF_ICMPV4_CODE])
class MTICMPV4Code(OFPMatchField):
pack_str = '!B'
def __init__(self, header, value, mask=None):
super(MTICMPV4Code, self).__init__(header)
self.value = value
@OFPMatchField.register_field_header([ofproto.OXM_OF_ARP_OP])
class MTArpOp(OFPMatchField):
pack_str = '!H'
def __init__(self, header, value, mask=None):
super(MTArpOp, self).__init__(header)
self.value = value
@OFPMatchField.register_field_header([ofproto.OXM_OF_ARP_SPA,
ofproto.OXM_OF_ARP_SPA_W])
class MTArpSpa(OFPMatchField):
pack_str = '!I'
def __init__(self, header, value, mask=None):
super(MTArpSpa, self).__init__(header)
self.value = value
self.mask = mask
@OFPMatchField.register_field_header([ofproto.OXM_OF_ARP_TPA,
ofproto.OXM_OF_ARP_TPA_W])
class MTArpTpa(OFPMatchField):
pack_str = '!I'
def __init__(self, header, value, mask=None):
super(MTArpTpa, self).__init__(header)
self.value = value
self.mask = mask
@OFPMatchField.register_field_header([ofproto.OXM_OF_ARP_SHA,
ofproto.OXM_OF_ARP_SHA_W])
class MTArpSha(OFPMatchField):
pack_str = '!6s'
def __init__(self, header, value, mask=None):
super(MTArpSha, self).__init__(header)
self.value = value
self.mask = mask
@OFPMatchField.register_field_header([ofproto.OXM_OF_ARP_THA,
ofproto.OXM_OF_ARP_THA_W])
class MTArpTha(OFPMatchField):
pack_str = '!6s'
def __init__(self, header, value, mask=None):
super(MTArpTha, self).__init__(header)
self.value = value
self.mask = mask
class MTIPv6(StringifyMixin):
@classmethod
def field_parser(cls, header, buf, offset):
if ofproto.oxm_tlv_header_extract_hasmask(header):
pack_str = '!' + cls.pack_str[1:] * 2
value = struct.unpack_from(pack_str, buf, offset + 4)
return cls(header, list(value[:8]), list(value[8:]))
else:
value = struct.unpack_from(cls.pack_str, buf, offset + 4)
return cls(header, list(value))
def serialize(self, buf, offset):
self.putv6(buf, offset, self.value, self.mask)
@OFPMatchField.register_field_header([ofproto.OXM_OF_IPV6_SRC,
ofproto.OXM_OF_IPV6_SRC_W])
class MTIPv6Src(MTIPv6, OFPMatchField):
pack_str = '!8H'
def __init__(self, header, value, mask=None):
super(MTIPv6Src, self).__init__(header)
self.value = value
self.mask = mask
@OFPMatchField.register_field_header([ofproto.OXM_OF_IPV6_DST,
ofproto.OXM_OF_IPV6_DST_W])
class MTIPv6Dst(MTIPv6, OFPMatchField):
pack_str = '!8H'
def __init__(self, header, value, mask=None):
super(MTIPv6Dst, self).__init__(header)
self.value = value
self.mask = mask
@OFPMatchField.register_field_header([ofproto.OXM_OF_IPV6_FLABEL,
ofproto.OXM_OF_IPV6_FLABEL_W])
class MTIPv6Flabel(OFPMatchField):
pack_str = '!I'
def __init__(self, header, value, mask=None):
super(MTIPv6Flabel, self).__init__(header)
self.value = value
self.mask = mask
@OFPMatchField.register_field_header([ofproto.OXM_OF_MPLS_LABEL])
class MTMplsLabel(OFPMatchField):
pack_str = '!I'
def __init__(self, header, value, mask=None):
super(MTMplsLabel, self).__init__(header)
self.value = value
@OFPMatchField.register_field_header([ofproto.OXM_OF_ICMPV6_TYPE])
class MTICMPV6Type(OFPMatchField):
pack_str = '!B'
def __init__(self, header, value, mask=None):
super(MTICMPV6Type, self).__init__(header)
self.value = value
@OFPMatchField.register_field_header([ofproto.OXM_OF_ICMPV6_CODE])
class MTICMPV6Code(OFPMatchField):
pack_str = '!B'
def __init__(self, header, value, mask=None):
super(MTICMPV6Code, self).__init__(header)
self.value = value
@OFPMatchField.register_field_header([ofproto.OXM_OF_IPV6_ND_TARGET])
class MTIPv6NdTarget(MTIPv6, OFPMatchField):
pack_str = '!8H'
def __init__(self, header, value, mask=None):
super(MTIPv6NdTarget, self).__init__(header)
self.value = value
def serialize(self, buf, offset):
self.putv6(buf, offset, self.value)
@OFPMatchField.register_field_header([ofproto.OXM_OF_IPV6_ND_SLL])
class MTIPv6NdSll(OFPMatchField):
pack_str = '!6s'
def __init__(self, header, value, mask=None):
super(MTIPv6NdSll, self).__init__(header)
self.value = value
@OFPMatchField.register_field_header([ofproto.OXM_OF_IPV6_ND_TLL])
class MTIPv6NdTll(OFPMatchField):
pack_str = '!6s'
def __init__(self, header, value, mask=None):
super(MTIPv6NdTll, self).__init__(header)
self.value = value
@OFPMatchField.register_field_header([ofproto.OXM_OF_MPLS_TC])
class MTMplsTc(OFPMatchField):
pack_str = '!B'
def __init__(self, header, value, mask=None):
super(MTMplsTc, self).__init__(header)
self.value = value
@OFPMatchField.register_field_header([ofproto.OXM_OF_MPLS_BOS])
class MTMplsBos(OFPMatchField):
pack_str = '!B'
def __init__(self, header, value, mask=None):
super(MTMplsBos, self).__init__(header)
self.value = value
@OFPMatchField.register_field_header([ofproto.OXM_OF_PBB_ISID,
ofproto.OXM_OF_PBB_ISID_W])
class MTPbbIsid(OFPMatchField):
pack_str = '!3B'
def __init__(self, header, value, mask=None):
super(MTPbbIsid, self).__init__(header)
self.value = value
self.mask = mask
@classmethod
def field_parser(cls, header, buf, offset):
hasmask = (header >> 8) & 1
mask = None
if ofproto.oxm_tlv_header_extract_hasmask(header):
pack_str = '!' + cls.pack_str[1:] * 2
(v1, v2, v3, m1, m2, m3) = struct.unpack_from(pack_str, buf,
offset + 4)
value = v1 << 16 | v2 << 8 | v3
mask = m1 << 16 | m2 << 8 | m3
else:
(v1, v2, v3,) = struct.unpack_from(cls.pack_str, buf, offset + 4)
value = v1 << 16 | v2 << 8 | v3
return cls(header, value, mask)
def _put(self, buf, offset, value):
ofproto_parser.msg_pack_into(self.pack_str, buf, offset,
(value >> 16) & 0xff,
(value >> 8) & 0xff,
(value >> 0) & 0xff)
self.length += self.n_bytes
@OFPMatchField.register_field_header([ofproto.OXM_OF_TUNNEL_ID,
ofproto.OXM_OF_TUNNEL_ID_W])
class MTTunnelId(OFPMatchField):
pack_str = '!Q'
def __init__(self, header, value, mask=None):
super(MTTunnelId, self).__init__(header)
self.value = value
self.mask = mask
@OFPMatchField.register_field_header([ofproto.OXM_OF_IPV6_EXTHDR,
ofproto.OXM_OF_IPV6_EXTHDR_W])
class MTIPv6ExtHdr(OFPMatchField):
pack_str = '!H'
def __init__(self, header, value, mask=None):
super(MTIPv6ExtHdr, self).__init__(header)
self.value = value
self.mask = mask
@_register_parser
@_set_msg_type(ofproto.OFPT_PACKET_IN)
class OFPPacketIn(MsgBase):
"""
Packet-In message
The switch sends the packet that received to the controller by this
message.
============= =========================================================
Attribute Description
============= =========================================================
buffer_id ID assigned by datapath
total_len Full length of frame
reason Reason packet is being sent.
OFPR_TABLE_MISS
OFPR_APPLY_ACTION
OFPR_INVALID_TTL
OFPR_ACTION_SET
OFPR_GROUP
OFPR_PACKET_OUT
table_id ID of the table that was looked up
cookie Cookie of the flow entry that was looked up
match Instance of ``OFPMatch``
data Ethernet frame
============= =========================================================
Example::
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def packet_in_handler(self, ev):
msg = ev.msg
ofp = dp.ofproto
if msg.reason == ofp.TABLE_MISS:
reason = 'TABLE MISS'
elif msg.reason == ofp.OFPR_APPLY_ACTION:
reason = 'APPLY ACTION'
elif msg.reason == ofp.OFPR_INVALID_TTL:
reason = 'INVALID TTL'
elif msg.reason == ofp.OFPR_ACTION_SET:
reason = 'ACTION SET'
elif msg.reason == ofp.OFPR_GROUP:
reason = 'GROUP'
elif msg.reason == ofp.OFPR_PACKET_OUT:
reason = 'PACKET OUT'
else:
reason = 'unknown'
self.logger.debug('OFPPacketIn received: '
'buffer_id=%x total_len=%d reason=%s '
'table_id=%d cookie=%d match=%s data=%s',
msg.buffer_id, msg.total_len, reason,
msg.table_id, msg.cookie, msg.match,
utils.hex_array(msg.data))
"""
def __init__(self, datapath, buffer_id=None, total_len=None, reason=None,
table_id=None, cookie=None, match=None, data=None):
super(OFPPacketIn, self).__init__(datapath)
self.buffer_id = buffer_id
self.total_len = total_len
self.reason = reason
self.table_id = table_id
self.cookie = cookie
self.match = match
self.data = data
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPPacketIn, cls).parser(datapath, version, msg_type,
msg_len, xid, buf)
(msg.buffer_id, msg.total_len, msg.reason,
msg.table_id, msg.cookie) = struct.unpack_from(
ofproto.OFP_PACKET_IN_PACK_STR,
msg.buf, ofproto.OFP_HEADER_SIZE)
msg.match = OFPMatch.parser(msg.buf, ofproto.OFP_PACKET_IN_SIZE -
ofproto.OFP_MATCH_SIZE)
match_len = utils.round_up(msg.match.length, 8)
msg.data = msg.buf[(ofproto.OFP_PACKET_IN_SIZE -
ofproto.OFP_MATCH_SIZE + match_len + 2):]
if msg.total_len < len(msg.data):
# discard padding for 8-byte alignment of OFP packet
msg.data = msg.data[:msg.total_len]
return msg
@_register_parser
@_set_msg_type(ofproto.OFPT_FLOW_REMOVED)
class OFPFlowRemoved(MsgBase):
"""
Flow removed message
When flow entries time out or are deleted, the switch notifies controller
with this message.
================ ======================================================
Attribute Description
================ ======================================================
cookie Opaque controller-issued identifier
priority Priority level of flow entry
reason One of the following values.
OFPRR_IDLE_TIMEOUT
OFPRR_HARD_TIMEOUT
OFPRR_DELETE
OFPRR_GROUP_DELETE
OFPRR_METER_DELETE
OFPRR_EVICTION
table_id ID of the table
duration_sec Time flow was alive in seconds
duration_nsec Time flow was alive in nanoseconds beyond duration_sec
idle_timeout Idle timeout from original flow mod
hard_timeout Hard timeout from original flow mod
packet_count Number of packets that was associated with the flow
byte_count Number of bytes that was associated with the flow
match Instance of ``OFPMatch``
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPFlowRemoved, MAIN_DISPATCHER)
def flow_removed_handler(self, ev):
msg = ev.msg
dp = msg.datapath
ofp = dp.ofproto
if msg.reason == ofp.OFPRR_IDLE_TIMEOUT:
reason = 'IDLE TIMEOUT'
elif msg.reason == ofp.OFPRR_HARD_TIMEOUT:
reason = 'HARD TIMEOUT'
elif msg.reason == ofp.OFPRR_DELETE:
reason = 'DELETE'
elif msg.reason == ofp.OFPRR_GROUP_DELETE:
reason = 'GROUP DELETE'
else:
reason = 'unknown'
self.logger.debug('OFPFlowRemoved received: '
'cookie=%d priority=%d reason=%s table_id=%d '
'duration_sec=%d duration_nsec=%d '
'idle_timeout=%d hard_timeout=%d '
'packet_count=%d byte_count=%d match.fields=%s',
msg.cookie, msg.priority, reason, msg.table_id,
msg.duration_sec, msg.duration_nsec,
msg.idle_timeout, msg.hard_timeout,
msg.packet_count, msg.byte_count, msg.match)
"""
def __init__(self, datapath, cookie=None, priority=None, reason=None,
table_id=None, duration_sec=None, duration_nsec=None,
idle_timeout=None, hard_timeout=None, packet_count=None,
byte_count=None, match=None):
super(OFPFlowRemoved, self).__init__(datapath)
self.cookie = cookie
self.priority = priority
self.reason = reason
self.table_id = table_id
self.duration_sec = duration_sec
self.duration_nsec = duration_nsec
self.idle_timeout = idle_timeout
self.hard_timeout = hard_timeout
self.packet_count = packet_count
self.byte_count = byte_count
self.match = match
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPFlowRemoved, cls).parser(datapath, version, msg_type,
msg_len, xid, buf)
(msg.cookie, msg.priority, msg.reason,
msg.table_id, msg.duration_sec, msg.duration_nsec,
msg.idle_timeout, msg.hard_timeout, msg.packet_count,
msg.byte_count) = struct.unpack_from(
ofproto.OFP_FLOW_REMOVED_PACK_STR0,
msg.buf, ofproto.OFP_HEADER_SIZE)
offset = (ofproto.OFP_FLOW_REMOVED_SIZE - ofproto.OFP_MATCH_SIZE)
msg.match = OFPMatch.parser(msg.buf, offset)
return msg
class OFPPort(StringifyMixin):
_TYPE = {
'ascii': [
'hw_addr',
],
'utf-8': [
# OF spec is unclear about the encoding of name.
# we assumes UTF-8, which is used by OVS.
'name',
]
}
def __init__(self, port_no=None, length=None, hw_addr=None, name=None,
config=None, state=None, properties=None):
super(OFPPort, self).__init__()
self.port_no = port_no
self.length = length
self.hw_addr = hw_addr
self.name = name
self.config = config
self.state = state
self.properties = properties
@classmethod
def parser(cls, buf, offset):
(port_no, length, hw_addr, name, config, state) = struct.unpack_from(
ofproto.OFP_PORT_PACK_STR, buf, offset)
hw_addr = addrconv.mac.bin_to_text(hw_addr)
name = name.rstrip('\0')
props = []
rest = buf[offset + ofproto.OFP_PORT_SIZE:offset + length]
while rest:
p, rest = OFPPortProp.parse(rest)
props.append(p)
ofpport = cls(port_no, length, hw_addr, name, config, state, props)
return ofpport
def _set_stats_type(stats_type, stats_body_cls):
def _set_cls_stats_type(cls):
cls.cls_stats_type = stats_type
cls.cls_stats_body_cls = stats_body_cls
return cls
return _set_cls_stats_type
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPMultipartRequest(MsgBase):
def __init__(self, datapath, flags):
super(OFPMultipartRequest, self).__init__(datapath)
self.type = self.__class__.cls_stats_type
self.flags = flags
def _serialize_stats_body(self):
pass
def _serialize_body(self):
msg_pack_into(ofproto.OFP_MULTIPART_REQUEST_PACK_STR,
self.buf, ofproto.OFP_HEADER_SIZE,
self.type, self.flags)
self._serialize_stats_body()
@_set_msg_type(ofproto.OFPT_METER_MOD)
class OFPMeterMod(MsgBase):
"""
Meter modification message
The controller sends this message to modify the meter.
================ ======================================================
Attribute Description
================ ======================================================
command One of the following values.
OFPMC_ADD
OFPMC_MODIFY
OFPMC_DELETE
flags One of the following flags.
OFPMF_KBPS
OFPMF_PKTPS
OFPMF_BURST
OFPMF_STATS
meter_id Meter instance
bands list of the following class instance.
OFPMeterBandDrop
OFPMeterBandDscpRemark
OFPMeterBandExperimenter
================ ======================================================
"""
def __init__(self, datapath, command=ofproto.OFPMC_ADD,
flags=ofproto.OFPMF_KBPS, meter_id=1, bands=[]):
super(OFPMeterMod, self).__init__(datapath)
self.command = command
self.flags = flags
self.meter_id = meter_id
self.bands = bands
def _serialize_body(self):
msg_pack_into(ofproto.OFP_METER_MOD_PACK_STR, self.buf,
ofproto.OFP_HEADER_SIZE,
self.command, self.flags, self.meter_id)
offset = ofproto.OFP_METER_MOD_SIZE
for b in self.bands:
b.serialize(self.buf, offset)
offset += b.len
@_set_msg_type(ofproto.OFPT_TABLE_MOD)
class OFPTableMod(MsgBase):
"""
Flow table configuration message
The controller sends this message to configure table state.
================ ======================================================
Attribute Description
================ ======================================================
table_id ID of the table (OFPTT_ALL indicates all tables)
config Bitmap of the following flags.
OFPTC_DEPRECATED_MASK (3)
================ ======================================================
Example::
def send_table_mod(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPTableMod(datapath, 1, 3)
datapath.send_msg(req)
"""
def __init__(self, datapath, table_id, config):
super(OFPTableMod, self).__init__(datapath)
self.table_id = table_id
self.config = config
def _serialize_body(self):
msg_pack_into(ofproto.OFP_TABLE_MOD_PACK_STR, self.buf,
ofproto.OFP_HEADER_SIZE,
self.table_id, self.config)
@_register_parser
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPMultipartReply(MsgBase):
_STATS_MSG_TYPES = {}
@staticmethod
def register_stats_type(body_single_struct=False):
def _register_stats_type(cls):
assert cls.cls_stats_type is not None
assert cls.cls_stats_type not in OFPMultipartReply._STATS_MSG_TYPES
assert cls.cls_stats_body_cls is not None
cls.cls_body_single_struct = body_single_struct
OFPMultipartReply._STATS_MSG_TYPES[cls.cls_stats_type] = cls
return cls
return _register_stats_type
def __init__(self, datapath, body=None, flags=None):
super(OFPMultipartReply, self).__init__(datapath)
self.body = body
self.flags = flags
@classmethod
def parser_stats_body(cls, buf, msg_len, offset):
body_cls = cls.cls_stats_body_cls
body = []
while offset < msg_len:
entry = body_cls.parser(buf, offset)
body.append(entry)
offset += entry.length
if cls.cls_body_single_struct:
return body[0]
return body
@classmethod
def parser_stats(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = MsgBase.parser.__func__(
cls, datapath, version, msg_type, msg_len, xid, buf)
msg.body = msg.parser_stats_body(msg.buf, msg.msg_len,
ofproto.OFP_MULTIPART_REPLY_SIZE)
return msg
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
type_, flags = struct.unpack_from(
ofproto.OFP_MULTIPART_REPLY_PACK_STR, buffer(buf),
ofproto.OFP_HEADER_SIZE)
stats_type_cls = cls._STATS_MSG_TYPES.get(type_)
msg = super(OFPMultipartReply, stats_type_cls).parser(
datapath, version, msg_type, msg_len, xid, buf)
msg.type = type_
msg.flags = flags
offset = ofproto.OFP_MULTIPART_REPLY_SIZE
body = []
while offset < msg_len:
b = stats_type_cls.cls_stats_body_cls.parser(msg.buf, offset)
body.append(b)
offset += b.length if hasattr(b, 'length') else b.len
if stats_type_cls.cls_body_single_struct:
msg.body = body[0]
else:
msg.body = body
return msg
class OFPDescStats(ofproto_parser.namedtuple('OFPDescStats', (
'mfr_desc', 'hw_desc', 'sw_desc', 'serial_num', 'dp_desc'))):
_TYPE = {
'ascii': [
'mfr_desc',
'hw_desc',
'sw_desc',
'serial_num',
'dp_desc',
]
}
@classmethod
def parser(cls, buf, offset):
desc = struct.unpack_from(ofproto.OFP_DESC_PACK_STR,
buf, offset)
desc = list(desc)
desc = map(lambda x: x.rstrip('\0'), desc)
stats = cls(*desc)
stats.length = ofproto.OFP_DESC_SIZE
return stats
@_set_stats_type(ofproto.OFPMP_DESC, OFPDescStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPDescStatsRequest(OFPMultipartRequest):
"""
Description statistics request message
The controller uses this message to query description of the switch.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
================ ======================================================
Example::
def send_desc_stats_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPDescStatsRequest(datapath, 0)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags=0, type_=None):
super(OFPDescStatsRequest, self).__init__(datapath, flags)
@OFPMultipartReply.register_stats_type(body_single_struct=True)
@_set_stats_type(ofproto.OFPMP_DESC, OFPDescStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPDescStatsReply(OFPMultipartReply):
"""
Description statistics reply message
The switch responds with this message to a description statistics
request.
================ ======================================================
Attribute Description
================ ======================================================
body Instance of ``OFPDescStats``
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPDescStatsReply, MAIN_DISPATCHER)
def desc_stats_reply_handler(self, ev):
body = ev.msg.body
self.logger.debug('DescStats: mfr_desc=%s hw_desc=%s sw_desc=%s '
'serial_num=%s dp_desc=%s',
body.mfr_desc, body.hw_desc, body.sw_desc,
body.serial_num, body.dp_desc)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPDescStatsReply, self).__init__(datapath, **kwargs)
class OFPTableFeaturesStats(StringifyMixin):
_TYPE = {
'utf-8': [
# OF spec is unclear about the encoding of name.
# we assumes UTF-8.
'name',
]
}
def __init__(self, table_id=None, name=None, metadata_match=None,
metadata_write=None, config=None, max_entries=None,
properties=None, length=None):
super(OFPTableFeaturesStats, self).__init__()
self.length = None
self.table_id = table_id
self.name = name
self.metadata_match = metadata_match
self.metadata_write = metadata_write
self.config = config
self.max_entries = max_entries
self.properties = properties
@classmethod
def parser(cls, buf, offset):
table_features = cls()
(table_features.length, table_features.table_id,
name, table_features.metadata_match,
table_features.metadata_write, table_features.config,
table_features.max_entries
) = struct.unpack_from(ofproto.OFP_TABLE_FEATURES_PACK_STR,
buf, offset)
table_features.name = name.rstrip('\0')
props = []
rest = buf[offset + ofproto.OFP_TABLE_FEATURES_SIZE:
offset + table_features.length]
while rest:
p, rest = OFPTableFeatureProp.parse(rest)
props.append(p)
table_features.properties = props
return table_features
def serialize(self):
# fixup
bin_props = bytearray()
for p in self.properties:
bin_props += p.serialize()
self.length = ofproto.OFP_TABLE_FEATURES_SIZE + len(bin_props)
buf = bytearray()
msg_pack_into(ofproto.OFP_TABLE_FEATURES_PACK_STR, buf, 0,
self.length, self.table_id, self.name,
self.metadata_match, self.metadata_write,
self.config, self.max_entries)
return buf + bin_props
class OFPTableFeatureProp(OFPPropBase):
_TYPES = {}
@classmethod
def get_rest(cls, buf):
(type_, length) = struct.unpack_from(cls._PACK_STR, buf, 0)
offset = struct.calcsize(cls._PACK_STR)
return buf[offset:length]
def serialize(self):
# Body
# serialize_body should be implemented by subclass
body = bytearray()
body += self.serialize_body()
# fixup
self.length = len(body) + struct.calcsize(self._PACK_STR)
# Header
buf = bytearray()
msg_pack_into(self._PACK_STR, buf, 0, self.type, self.length)
buf += body
# Pad
pad_len = utils.round_up(self.length, 8) - self.length
ofproto_parser.msg_pack_into("%dx" % pad_len, buf, len(buf))
return buf
class OFPInstructionId(StringifyMixin):
_PACK_STR = '!HH' # type, len
def __init__(self, type_, len_=None):
self.type = type_
self.len = len_
# XXX experimenter
@classmethod
def parse(cls, buf):
(type_, len_,) = struct.unpack_from(cls._PACK_STR, buffer(buf), 0)
rest = buf[len_:]
return cls(type_=type_, len_=len_), rest
def serialize(self):
# fixup
self.len = struct.calcsize(self._PACK_STR)
buf = bytearray()
msg_pack_into(self._PACK_STR, buf, 0, self.type, self.len)
return buf
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_INSTRUCTIONS)
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_INSTRUCTIONS_MISS)
class OFPTableFeaturePropInstructions(OFPTableFeatureProp):
def __init__(self, type_=None, length=None, instruction_ids=[]):
super(OFPTableFeaturePropInstructions, self).__init__(type_, length)
self.instruction_ids = instruction_ids
@classmethod
def parser(cls, buf):
rest = cls.get_rest(buf)
ids = []
while rest:
i, rest = OFPInstructionId.parse(rest)
ids.append(i)
return cls(instruction_ids=ids)
def serialize_body(self):
bin_ids = bytearray()
for i in self.instruction_ids:
bin_ids += i.serialize()
return bin_ids
# Implementation note: While OpenFlow 1.3.2 shares the same ofp_action_header
# for flow_mod and table_features, we have separate classes. We named this
# class to match with OpenFlow 1.4's name. (ofp_action_id)
class OFPActionId(StringifyMixin):
_PACK_STR = '!HH' # type, len
def __init__(self, type_, len_=None):
self.type = type_
self.len = len_
# XXX experimenter
@classmethod
def parse(cls, buf):
(type_, len_,) = struct.unpack_from(cls._PACK_STR, buffer(buf), 0)
rest = buf[len_:]
return cls(type_=type_, len_=len_), rest
def serialize(self):
# fixup
self.len = struct.calcsize(self._PACK_STR)
buf = bytearray()
msg_pack_into(self._PACK_STR, buf, 0, self.type, self.len)
return buf
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_WRITE_ACTIONS)
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_WRITE_ACTIONS_MISS)
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_APPLY_ACTIONS)
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_APPLY_ACTIONS_MISS)
class OFPTableFeaturePropActions(OFPTableFeatureProp):
def __init__(self, type_=None, length=None, action_ids=[]):
super(OFPTableFeaturePropActions, self).__init__(type_, length)
self.action_ids = action_ids
@classmethod
def parser(cls, buf):
rest = cls.get_rest(buf)
ids = []
while rest:
i, rest = OFPActionId.parse(rest)
ids.append(i)
return cls(action_ids=ids)
def serialize_body(self):
bin_ids = bytearray()
for i in self.action_ids:
bin_ids += i.serialize()
return bin_ids
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_NEXT_TABLES)
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_NEXT_TABLES_MISS)
class OFPTableFeaturePropNextTables(OFPTableFeatureProp):
_TABLE_ID_PACK_STR = '!B'
def __init__(self, type_=None, length=None, table_ids=[]):
super(OFPTableFeaturePropNextTables, self).__init__(type_, length)
self.table_ids = table_ids
@classmethod
def parser(cls, buf):
rest = cls.get_rest(buf)
ids = []
while rest:
(i,) = struct.unpack_from(cls._TABLE_ID_PACK_STR, buffer(rest), 0)
rest = rest[struct.calcsize(cls._TABLE_ID_PACK_STR):]
ids.append(i)
return cls(table_ids=ids)
def serialize_body(self):
bin_ids = bytearray()
for i in self.table_ids:
bin_id = bytearray()
msg_pack_into(self._TABLE_ID_PACK_STR, bin_id, 0, i)
bin_ids += bin_id
return bin_ids
# Implementation note: OFPOxmId is specific to this implementation.
# It does not have a corresponding structure in the specification.
# (the specification uses plain uint32_t for them.)
#
# i have taken a look at some of software switch implementations
# but they all look broken or incomplete. according to the spec,
# oxm_hasmask should be 1 if a switch supports masking for the type.
# the right value for oxm_length is not clear from the spec.
# update: OpenFlow 1.3.3 "clarified" that oxm_length here is the payload
# length. it's still unclear if it should be doubled for hasmask or not,
# though.
# ofsoftswitch13
# oxm_hasmask always 0
# oxm_length same as ofp_match etc (as without mask)
# linc/of_protocol
# oxm_hasmask always 0
# oxm_length always 0
# ovs:
# table-feature is not implemented
class OFPOxmId(StringifyMixin):
_PACK_STR = '!I' # oxm header
_TYPE = {
'ascii': [
'type',
],
}
def __init__(self, type_, hasmask=False, length=None):
self.type = type_
self.hasmask = hasmask
self.length = length
# XXX experimenter
@classmethod
def parse(cls, buf):
(oxm,) = struct.unpack_from(cls._PACK_STR, buffer(buf), 0)
(type_, _v) = ofproto.oxm_to_user(oxm >> 9, None, None)
hasmask = ofproto.oxm_tlv_header_extract_hasmask(oxm)
length = oxm & 0xff # XXX see the comment on OFPOxmId
rest = buf[4:] # XXX see the comment on OFPOxmId
return cls(type_=type_, hasmask=hasmask, length=length), rest
def serialize(self):
# fixup
self.length = 0 # XXX see the comment on OFPOxmId
(n, _v, _m) = ofproto.oxm_from_user(self.type, None)
oxm = (n << 9) | (self.hasmask << 8) | self.length
buf = bytearray()
msg_pack_into(self._PACK_STR, buf, 0, oxm)
return buf
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_MATCH)
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_WILDCARDS)
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_WRITE_SETFIELD)
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_WRITE_SETFIELD_MISS)
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_APPLY_SETFIELD)
@OFPTableFeatureProp.register_type(ofproto.OFPTFPT_APPLY_SETFIELD_MISS)
class OFPTableFeaturePropOxm(OFPTableFeatureProp):
def __init__(self, type_=None, length=None, oxm_ids=[]):
super(OFPTableFeaturePropOxm, self).__init__(type_, length)
self.oxm_ids = oxm_ids
@classmethod
def parser(cls, buf):
rest = cls.get_rest(buf)
ids = []
while rest:
i, rest = OFPOxmId.parse(rest)
ids.append(i)
return cls(oxm_ids=ids)
def serialize_body(self):
bin_ids = bytearray()
for i in self.oxm_ids:
bin_ids += i.serialize()
return bin_ids
@_set_stats_type(ofproto.OFPMP_TABLE_FEATURES, OFPTableFeaturesStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPTableFeaturesStatsRequest(OFPMultipartRequest):
"""
Table features statistics request message
The controller uses this message to query table features.
================ ======================================================
Attribute Description
================ ======================================================
body List of ``OFPTableFeaturesStats`` instances.
The default is [].
================ ======================================================
"""
def __init__(self, datapath, flags=0, body=[], type_=None):
super(OFPTableFeaturesStatsRequest, self).__init__(datapath, flags)
self.body = body
def _serialize_stats_body(self):
bin_body = bytearray()
for p in self.body:
bin_body += p.serialize()
self.buf += bin_body
@OFPMultipartReply.register_stats_type()
@_set_stats_type(ofproto.OFPMP_TABLE_FEATURES, OFPTableFeaturesStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPTableFeaturesStatsReply(OFPMultipartReply):
"""
Table features statistics reply message
The switch responds with this message to a table features statistics
request.
This implmentation is still incomplete.
Namely, this implementation does not parse ``properties`` list and
always reports it empty.
================ ======================================================
Attribute Description
================ ======================================================
body List of ``OFPTableFeaturesStats`` instance
================ ======================================================
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPTableFeaturesStatsReply, self).__init__(datapath, **kwargs)
@_set_stats_type(ofproto.OFPMP_PORT_DESC, OFPPort)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPPortDescStatsRequest(OFPMultipartRequest):
"""
Port description request message
The controller uses this message to query description of all the ports.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
================ ======================================================
Example::
def send_port_desc_stats_request(self, datapath):
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPPortDescStatsRequest(datapath, 0)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags=0, type_=None):
super(OFPPortDescStatsRequest, self).__init__(datapath, flags)
@OFPMultipartReply.register_stats_type()
@_set_stats_type(ofproto.OFPMP_PORT_DESC, OFPPort)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPPortDescStatsReply(OFPMultipartReply):
"""
Port description reply message
The switch responds with this message to a port description request.
================ ======================================================
Attribute Description
================ ======================================================
body List of ``OFPPortDescStats`` instance
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPPortDescStatsReply, MAIN_DISPATCHER)
def port_desc_stats_reply_handler(self, ev):
ports = []
for p in ev.msg.body:
ports.append('port_no=%d hw_addr=%s name=%s config=0x%08x '
'state=0x%08x properties=%s' %
(p.port_no, p.hw_addr,
p.name, p.config, p.state, repr(p.properties)))
self.logger.debug('OFPPortDescStatsReply received: %s', ports)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPPortDescStatsReply, self).__init__(datapath, **kwargs)
class OFPQueueProp(OFPPropBase):
_TYPES = {}
class OFPQueueStats(StringifyMixin):
def __init__(self, length=None, port_no=None, queue_id=None,
tx_bytes=None, tx_packets=None, tx_errors=None,
duration_sec=None, duration_nsec=None, properties=None):
super(OFPQueueStats, self).__init__()
self.length = length
self.port_no = port_no
self.queue_id = queue_id
self.tx_bytes = tx_bytes
self.tx_packets = tx_packets
self.tx_errors = tx_errors
self.duration_sec = duration_sec
self.duration_nsec = duration_nsec
self.properties = properties
@classmethod
def parser(cls, buf, offset):
(length, port_no, queue_id, tx_bytes, tx_packets, tx_errors,
duration_sec, duration_nsec) = struct.unpack_from(
ofproto.OFP_QUEUE_STATS_PACK_STR, buf, offset)
props = []
rest = buf[offset + ofproto.OFP_QUEUE_STATS_SIZE:offset + length]
while rest:
p, rest = OFPQueueProp.parse(rest)
props.append(p)
stats = cls(length, port_no, queue_id, tx_bytes, tx_packets, tx_errors,
duration_sec, duration_nsec, props)
return stats
@_set_stats_type(ofproto.OFPMP_QUEUE, OFPQueueStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPQueueStatsRequest(OFPMultipartRequest):
"""
Queue statistics request message
The controller uses this message to query queue statictics.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
port_no Port number to read
queue_id ID of queue to read
================ ======================================================
Example::
def send_queue_stats_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPQueueStatsRequest(datapath, 0, ofp.OFPP_ANY,
ofp.OFPQ_ALL)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags=0, port_no=ofproto.OFPP_ANY,
queue_id=ofproto.OFPQ_ALL, type_=None):
super(OFPQueueStatsRequest, self).__init__(datapath, flags)
self.port_no = port_no
self.queue_id = queue_id
def _serialize_stats_body(self):
msg_pack_into(ofproto.OFP_QUEUE_STATS_REQUEST_PACK_STR,
self.buf,
ofproto.OFP_MULTIPART_REQUEST_SIZE,
self.port_no, self.queue_id)
@OFPMultipartReply.register_stats_type()
@_set_stats_type(ofproto.OFPMP_QUEUE, OFPQueueStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPQueueStatsReply(OFPMultipartReply):
"""
Queue statistics reply message
The switch responds with this message to an aggregate flow statistics
request.
================ ======================================================
Attribute Description
================ ======================================================
body List of ``OFPQueueStats`` instance
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPQueueStatsReply, MAIN_DISPATCHER)
def queue_stats_reply_handler(self, ev):
queues = []
for stat in ev.msg.body:
queues.append('port_no=%d queue_id=%d '
'tx_bytes=%d tx_packets=%d tx_errors=%d '
'duration_sec=%d duration_nsec=%d' %
(stat.port_no, stat.queue_id,
stat.tx_bytes, stat.tx_packets, stat.tx_errors,
stat.duration_sec, stat.duration_nsec,
repr(stat.properties)))
self.logger.debug('QueueStats: %s', queues)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPQueueStatsReply, self).__init__(datapath, **kwargs)
class OFPBucketCounter(StringifyMixin):
def __init__(self, packet_count, byte_count):
super(OFPBucketCounter, self).__init__()
self.packet_count = packet_count
self.byte_count = byte_count
@classmethod
def parser(cls, buf, offset):
packet_count, byte_count = struct.unpack_from(
ofproto.OFP_BUCKET_COUNTER_PACK_STR, buf, offset)
return cls(packet_count, byte_count)
class OFPGroupStats(StringifyMixin):
def __init__(self, length=None, group_id=None, ref_count=None,
packet_count=None, byte_count=None, duration_sec=None,
duration_nsec=None, bucket_stats=None):
super(OFPGroupStats, self).__init__()
self.length = length
self.group_id = group_id
self.ref_count = ref_count
self.packet_count = packet_count
self.byte_count = byte_count
self.duration_sec = duration_sec
self.duration_nsec = duration_nsec
self.bucket_stats = bucket_stats
@classmethod
def parser(cls, buf, offset):
group = struct.unpack_from(ofproto.OFP_GROUP_STATS_PACK_STR,
buf, offset)
group_stats = cls(*group)
group_stats.bucket_stats = []
total_len = group_stats.length + offset
offset += ofproto.OFP_GROUP_STATS_SIZE
while total_len > offset:
b = OFPBucketCounter.parser(buf, offset)
group_stats.bucket_stats.append(b)
offset += ofproto.OFP_BUCKET_COUNTER_SIZE
return group_stats
@_set_stats_type(ofproto.OFPMP_GROUP, OFPGroupStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPGroupStatsRequest(OFPMultipartRequest):
"""
Group statistics request message
The controller uses this message to query statistics of one or more
groups.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
group_id ID of group to read (OFPG_ALL to all groups)
================ ======================================================
Example::
def send_group_stats_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPGroupStatsRequest(datapath, 0, ofp.OFPG_ALL)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags=0, group_id=ofproto.OFPG_ALL,
type_=None):
super(OFPGroupStatsRequest, self).__init__(datapath, flags)
self.group_id = group_id
def _serialize_stats_body(self):
msg_pack_into(ofproto.OFP_GROUP_STATS_REQUEST_PACK_STR,
self.buf,
ofproto.OFP_MULTIPART_REQUEST_SIZE,
self.group_id)
@OFPMultipartReply.register_stats_type()
@_set_stats_type(ofproto.OFPMP_GROUP, OFPGroupStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPGroupStatsReply(OFPMultipartReply):
"""
Group statistics reply message
The switch responds with this message to a group statistics request.
================ ======================================================
Attribute Description
================ ======================================================
body List of ``OFPGroupStats`` instance
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPGroupStatsReply, MAIN_DISPATCHER)
def group_stats_reply_handler(self, ev):
groups = []
for stat in ev.msg.body:
groups.append('length=%d group_id=%d '
'ref_count=%d packet_count=%d byte_count=%d '
'duration_sec=%d duration_nsec=%d' %
(stat.length, stat.group_id,
stat.ref_count, stat.packet_count,
stat.byte_count, stat.duration_sec,
stat.duration_nsec))
self.logger.debug('GroupStats: %s', groups)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPGroupStatsReply, self).__init__(datapath, **kwargs)
class OFPGroupDescStats(StringifyMixin):
def __init__(self, type_=None, group_id=None, buckets=None, length=None):
super(OFPGroupDescStats, self).__init__()
self.type = type_
self.group_id = group_id
self.buckets = buckets
@classmethod
def parser(cls, buf, offset):
stats = cls()
(stats.length, stats.type, stats.group_id) = struct.unpack_from(
ofproto.OFP_GROUP_DESC_STATS_PACK_STR, buf, offset)
offset += ofproto.OFP_GROUP_DESC_STATS_SIZE
stats.buckets = []
length = ofproto.OFP_GROUP_DESC_STATS_SIZE
while length < stats.length:
bucket = OFPBucket.parser(buf, offset)
stats.buckets.append(bucket)
offset += bucket.len
length += bucket.len
return stats
@_set_stats_type(ofproto.OFPMP_GROUP_DESC, OFPGroupDescStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPGroupDescStatsRequest(OFPMultipartRequest):
"""
Group description request message
The controller uses this message to list the set of groups on a switch.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
================ ======================================================
Example::
def send_group_desc_stats_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPGroupDescStatsRequest(datapath, 0)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags=0, type_=None):
super(OFPGroupDescStatsRequest, self).__init__(datapath, flags)
@OFPMultipartReply.register_stats_type()
@_set_stats_type(ofproto.OFPMP_GROUP_DESC, OFPGroupDescStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPGroupDescStatsReply(OFPMultipartReply):
"""
Group description reply message
The switch responds with this message to a group description request.
================ ======================================================
Attribute Description
================ ======================================================
body List of ``OFPGroupDescStats`` instance
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPGroupDescStatsReply, MAIN_DISPATCHER)
def group_desc_stats_reply_handler(self, ev):
descs = []
for stat in ev.msg.body:
descs.append('length=%d type=%d group_id=%d '
'buckets=%s' %
(stat.length, stat.type, stat.group_id,
stat.bucket))
self.logger.debug('GroupDescStats: %s', groups)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPGroupDescStatsReply, self).__init__(datapath, **kwargs)
class OFPGroupFeaturesStats(ofproto_parser.namedtuple('OFPGroupFeaturesStats',
('types', 'capabilities', 'max_groups',
'actions'))):
@classmethod
def parser(cls, buf, offset):
group_features = struct.unpack_from(
ofproto.OFP_GROUP_FEATURES_PACK_STR, buf, offset)
types = group_features[0]
capabilities = group_features[1]
max_groups = list(group_features[2:6])
actions = list(group_features[6:10])
stats = cls(types, capabilities, max_groups, actions)
stats.length = ofproto.OFP_GROUP_FEATURES_SIZE
return stats
@_set_stats_type(ofproto.OFPMP_GROUP_FEATURES, OFPGroupFeaturesStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPGroupFeaturesStatsRequest(OFPMultipartRequest):
"""
Group features request message
The controller uses this message to list the capabilities of groups on
a switch.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
================ ======================================================
Example::
def send_group_features_stats_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPGroupFeaturesStatsRequest(datapath, 0)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags=0, type_=None):
super(OFPGroupFeaturesStatsRequest, self).__init__(datapath, flags)
@OFPMultipartReply.register_stats_type(body_single_struct=True)
@_set_stats_type(ofproto.OFPMP_GROUP_FEATURES, OFPGroupFeaturesStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPGroupFeaturesStatsReply(OFPMultipartReply):
"""
Group features reply message
The switch responds with this message to a group features request.
================ ======================================================
Attribute Description
================ ======================================================
body Instance of ``OFPGroupFeaturesStats``
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPGroupFeaturesStatsReply, MAIN_DISPATCHER)
def group_features_stats_reply_handler(self, ev):
body = ev.msg.body
self.logger.debug('GroupFeaturesStats: types=%d '
'capabilities=0x%08x max_groups=%s '
'actions=%s',
body.types, body.capabilities,
body.max_groups, body.actions)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPGroupFeaturesStatsReply, self).__init__(datapath, **kwargs)
class OFPMeterBandStats(StringifyMixin):
def __init__(self, packet_band_count, byte_band_count):
super(OFPMeterBandStats, self).__init__()
self.packet_band_count = packet_band_count
self.byte_band_count = byte_band_count
@classmethod
def parser(cls, buf, offset):
band_stats = struct.unpack_from(
ofproto.OFP_METER_BAND_STATS_PACK_STR, buf, offset)
return cls(*band_stats)
class OFPMeterStats(StringifyMixin):
def __init__(self, meter_id=None, flow_count=None, packet_in_count=None,
byte_in_count=None, duration_sec=None, duration_nsec=None,
band_stats=None, len_=None):
super(OFPMeterStats, self).__init__()
self.meter_id = meter_id
self.len = 0
self.flow_count = flow_count
self.packet_in_count = packet_in_count
self.byte_in_count = byte_in_count
self.duration_sec = duration_sec
self.duration_nsec = duration_nsec
self.band_stats = band_stats
@classmethod
def parser(cls, buf, offset):
meter_stats = cls()
(meter_stats.meter_id, meter_stats.len,
meter_stats.flow_count, meter_stats.packet_in_count,
meter_stats.byte_in_count, meter_stats.duration_sec,
meter_stats.duration_nsec) = struct.unpack_from(
ofproto.OFP_METER_STATS_PACK_STR, buf, offset)
offset += ofproto.OFP_METER_STATS_SIZE
meter_stats.band_stats = []
length = ofproto.OFP_METER_STATS_SIZE
while length < meter_stats.len:
band_stats = OFPMeterBandStats.parser(buf, offset)
meter_stats.band_stats.append(band_stats)
offset += ofproto.OFP_METER_BAND_STATS_SIZE
length += ofproto.OFP_METER_BAND_STATS_SIZE
return meter_stats
@_set_stats_type(ofproto.OFPMP_METER, OFPMeterStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPMeterStatsRequest(OFPMultipartRequest):
"""
Meter statistics request message
The controller uses this message to query statistics for one or more
meters.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
meter_id ID of meter to read (OFPM_ALL to all meters)
================ ======================================================
Example::
def send_meter_stats_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPMeterStatsRequest(datapath, 0, ofp.OFPM_ALL)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags=0, meter_id=ofproto.OFPM_ALL,
type_=None):
super(OFPMeterStatsRequest, self).__init__(datapath, flags)
self.meter_id = meter_id
def _serialize_stats_body(self):
msg_pack_into(ofproto.OFP_METER_MULTIPART_REQUEST_PACK_STR,
self.buf,
ofproto.OFP_MULTIPART_REQUEST_SIZE,
self.meter_id)
@OFPMultipartReply.register_stats_type()
@_set_stats_type(ofproto.OFPMP_METER, OFPMeterStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPMeterStatsReply(OFPMultipartReply):
"""
Meter statistics reply message
The switch responds with this message to a meter statistics request.
================ ======================================================
Attribute Description
================ ======================================================
body List of ``OFPMeterStats`` instance
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPMeterStatsReply, MAIN_DISPATCHER)
def meter_stats_reply_handler(self, ev):
meters = []
for stat in ev.msg.body:
meters.append('meter_id=0x%08x len=%d flow_count=%d '
'packet_in_count=%d byte_in_count=%d '
'duration_sec=%d duration_nsec=%d '
'band_stats=%s' %
(stat.meter_id, stat.len, stat.flow_count,
stat.packet_in_count, stat.byte_in_count,
stat.duration_sec, stat.duration_nsec,
stat.band_stats))
self.logger.debug('MeterStats: %s', meters)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPMeterStatsReply, self).__init__(datapath, **kwargs)
class OFPMeterBand(StringifyMixin):
def __init__(self, type_, len_):
super(OFPMeterBand, self).__init__()
self.type = type_
self.len = len_
class OFPMeterBandHeader(OFPMeterBand):
_METER_BAND = {}
@staticmethod
def register_meter_band_type(type_, len_):
def _register_meter_band_type(cls):
OFPMeterBandHeader._METER_BAND[type_] = cls
cls.cls_meter_band_type = type_
cls.cls_meter_band_len = len_
return cls
return _register_meter_band_type
def __init__(self):
cls = self.__class__
super(OFPMeterBandHeader, self).__init__(cls.cls_meter_band_type,
cls.cls_meter_band_len)
@classmethod
def parser(cls, buf, offset):
type_, len_, _rate, _burst_size = struct.unpack_from(
ofproto.OFP_METER_BAND_HEADER_PACK_STR, buf, offset)
cls_ = cls._METER_BAND[type_]
assert cls_.cls_meter_band_len == len_
return cls_.parser(buf, offset)
@OFPMeterBandHeader.register_meter_band_type(
ofproto.OFPMBT_DROP, ofproto.OFP_METER_BAND_DROP_SIZE)
class OFPMeterBandDrop(OFPMeterBandHeader):
def __init__(self, rate, burst_size, type_=None, len_=None):
super(OFPMeterBandDrop, self).__init__()
self.rate = rate
self.burst_size = burst_size
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_METER_BAND_DROP_PACK_STR, buf, offset,
self.type, self.len, self.rate, self.burst_size)
@classmethod
def parser(cls, buf, offset):
type_, len_, rate, burst_size = struct.unpack_from(
ofproto.OFP_METER_BAND_DROP_PACK_STR, buf, offset)
assert cls.cls_meter_band_type == type_
assert cls.cls_meter_band_len == len_
return cls(rate, burst_size)
@OFPMeterBandHeader.register_meter_band_type(
ofproto.OFPMBT_DSCP_REMARK,
ofproto.OFP_METER_BAND_DSCP_REMARK_SIZE)
class OFPMeterBandDscpRemark(OFPMeterBandHeader):
def __init__(self, rate, burst_size, prec_level, type_=None, len_=None):
super(OFPMeterBandDscpRemark, self).__init__()
self.rate = rate
self.burst_size = burst_size
self.prec_level = prec_level
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_METER_BAND_DSCP_REMARK_PACK_STR, buf,
offset, self.type, self.len, self.rate,
self.burst_size, self.prec_level)
@classmethod
def parser(cls, buf, offset):
type_, len_, rate, burst_size, prec_level = struct.unpack_from(
ofproto.OFP_METER_BAND_DSCP_REMARK_PACK_STR, buf, offset)
assert cls.cls_meter_band_type == type_
assert cls.cls_meter_band_len == len_
return cls(rate, burst_size, prec_level)
@OFPMeterBandHeader.register_meter_band_type(
ofproto.OFPMBT_EXPERIMENTER,
ofproto.OFP_METER_BAND_EXPERIMENTER_SIZE)
class OFPMeterBandExperimenter(OFPMeterBandHeader):
def __init__(self, rate, burst_size, experimenter, type_=None, len_=None):
super(OFPMeterBandExperimenter, self).__init__()
self.rate = rate
self.burst_size = burst_size
self.experimenter = experimenter
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_METER_BAND_EXPERIMENTER_PACK_STR, buf,
offset, self.type, self.len, self.rate,
self.burst_size, self.experimenter)
@classmethod
def parser(cls, buf, offset):
type_, len_, rate, burst_size, experimenter = struct.unpack_from(
ofproto.OFP_METER_BAND_EXPERIMENTER_PACK_STR, buf, offset)
assert cls.cls_meter_band_type == type_
assert cls.cls_meter_band_len == len_
return cls(rate, burst_size, experimenter)
class OFPMeterConfigStats(StringifyMixin):
def __init__(self, flags=None, meter_id=None, bands=None, length=None):
super(OFPMeterConfigStats, self).__init__()
self.length = None
self.flags = flags
self.meter_id = meter_id
self.bands = bands
@classmethod
def parser(cls, buf, offset):
meter_config = cls()
(meter_config.length, meter_config.flags,
meter_config.meter_id) = struct.unpack_from(
ofproto.OFP_METER_CONFIG_PACK_STR, buf, offset)
offset += ofproto.OFP_METER_CONFIG_SIZE
meter_config.bands = []
length = ofproto.OFP_METER_CONFIG_SIZE
while length < meter_config.length:
band = OFPMeterBandHeader.parser(buf, offset)
meter_config.bands.append(band)
offset += band.len
length += band.len
return meter_config
@_set_stats_type(ofproto.OFPMP_METER_CONFIG, OFPMeterConfigStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPMeterConfigStatsRequest(OFPMultipartRequest):
"""
Meter configuration statistics request message
The controller uses this message to query configuration for one or more
meters.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
meter_id ID of meter to read (OFPM_ALL to all meters)
================ ======================================================
Example::
def send_meter_config_stats_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPMeterConfigStatsRequest(datapath, 0,
ofp.OFPM_ALL)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags=0, meter_id=ofproto.OFPM_ALL,
type_=None):
super(OFPMeterConfigStatsRequest, self).__init__(datapath, flags)
self.meter_id = meter_id
def _serialize_stats_body(self):
msg_pack_into(ofproto.OFP_METER_MULTIPART_REQUEST_PACK_STR,
self.buf,
ofproto.OFP_MULTIPART_REQUEST_SIZE,
self.meter_id)
@OFPMultipartReply.register_stats_type()
@_set_stats_type(ofproto.OFPMP_METER_CONFIG, OFPMeterConfigStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPMeterConfigStatsReply(OFPMultipartReply):
"""
Meter configuration statistics reply message
The switch responds with this message to a meter configuration
statistics request.
================ ======================================================
Attribute Description
================ ======================================================
body List of ``OFPMeterConfigStats`` instance
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPMeterConfigStatsReply, MAIN_DISPATCHER)
def meter_config_stats_reply_handler(self, ev):
configs = []
for stat in ev.msg.body:
configs.append('length=%d flags=0x%04x meter_id=0x%08x '
'bands=%s' %
(stat.length, stat.flags, stat.meter_id,
stat.bands))
self.logger.debug('MeterConfigStats: %s', configs)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPMeterConfigStatsReply, self).__init__(datapath, **kwargs)
class OFPMeterFeaturesStats(ofproto_parser.namedtuple('OFPMeterFeaturesStats',
('max_meter', 'band_types', 'capabilities',
'max_band', 'max_color'))):
@classmethod
def parser(cls, buf, offset):
meter_features = struct.unpack_from(
ofproto.OFP_METER_FEATURES_PACK_STR, buf, offset)
stats = cls(*meter_features)
stats.length = ofproto.OFP_METER_FEATURES_SIZE
return stats
@_set_stats_type(ofproto.OFPMP_METER_FEATURES, OFPMeterFeaturesStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPMeterFeaturesStatsRequest(OFPMultipartRequest):
"""
Meter features statistics request message
The controller uses this message to query the set of features of the
metering subsystem.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
================ ======================================================
Example::
def send_meter_features_stats_request(self, datapath):
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPMeterFeaturesStatsRequest(datapath, 0)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags=0, type_=None):
super(OFPMeterFeaturesStatsRequest, self).__init__(datapath, flags)
@OFPMultipartReply.register_stats_type()
@_set_stats_type(ofproto.OFPMP_METER_FEATURES, OFPMeterFeaturesStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPMeterFeaturesStatsReply(OFPMultipartReply):
"""
Meter features statistics reply message
The switch responds with this message to a meter features statistics
request.
================ ======================================================
Attribute Description
================ ======================================================
body List of ``OFPMeterFeaturesStats`` instance
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPMeterFeaturesStatsReply, MAIN_DISPATCHER)
def meter_features_stats_reply_handler(self, ev):
features = []
for stat in ev.msg.body:
features.append('max_meter=%d band_types=0x%08x '
'capabilities=0x%08x max_band=%d '
'max_color=%d' %
(stat.max_meter, stat.band_types,
stat.capabilities, stat.max_band,
stat.max_color))
self.logger.debug('MeterFeaturesStats: %s', configs)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPMeterFeaturesStatsReply, self).__init__(datapath, **kwargs)
class OFPExperimenterMultipart(ofproto_parser.namedtuple(
'OFPExperimenterMultipart',
('experimenter', 'exp_type', 'data'))):
"""
The body of OFPExperimenterStatsReply multipart messages.
================ ======================================================
Attribute Description
================ ======================================================
experimenter Experimenter ID
exp_type Experimenter defined
data Experimenter defined additional data
================ ======================================================
"""
@classmethod
def parser(cls, buf, offset):
args = struct.unpack_from(
ofproto.OFP_EXPERIMENTER_MULTIPART_HEADER_PACK_STR, buf,
offset)
args = list(args)
args.append(buf[offset +
ofproto.OFP_EXPERIMENTER_MULTIPART_HEADER_SIZE:])
stats = cls(*args)
stats.length = ofproto.OFP_METER_FEATURES_SIZE
return stats
def serialize(self):
buf = bytearray()
msg_pack_into(ofproto.OFP_EXPERIMENTER_MULTIPART_HEADER_PACK_STR,
buf, 0,
self.experimenter, self.exp_type)
return buf + self.data
class OFPExperimenterStatsRequestBase(OFPMultipartRequest):
def __init__(self, datapath, flags,
experimenter, exp_type,
type_=None):
super(OFPExperimenterStatsRequestBase, self).__init__(datapath, flags)
self.experimenter = experimenter
self.exp_type = exp_type
@_set_stats_type(ofproto.OFPMP_EXPERIMENTER, OFPExperimenterMultipart)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPExperimenterStatsRequest(OFPExperimenterStatsRequestBase):
"""
Experimenter multipart request message
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
experimenter Experimenter ID
exp_type Experimenter defined
data Experimenter defined additional data
================ ======================================================
"""
def __init__(self, datapath, flags,
experimenter, exp_type, data,
type_=None):
super(OFPExperimenterStatsRequest, self).__init__(datapath, flags,
experimenter,
exp_type, type_)
self.data = data
def _serialize_stats_body(self):
body = OFPExperimenterMultipart(experimenter=self.experimenter,
exp_type=self.exp_type,
data=self.data)
self.buf += body.serialize()
@OFPMultipartReply.register_stats_type(body_single_struct=True)
@_set_stats_type(ofproto.OFPMP_EXPERIMENTER, OFPExperimenterMultipart)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPExperimenterStatsReply(OFPMultipartReply):
"""
Experimenter multipart reply message
================ ======================================================
Attribute Description
================ ======================================================
body An ``OFPExperimenterMultipart`` instance
================ ======================================================
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPExperimenterStatsReply, self).__init__(datapath, **kwargs)
class OFPFlowStats(StringifyMixin):
def __init__(self, table_id=None, duration_sec=None, duration_nsec=None,
priority=None, idle_timeout=None, hard_timeout=None,
flags=None, importance=None, cookie=None, packet_count=None,
byte_count=None, match=None, instructions=None,
length=None):
super(OFPFlowStats, self).__init__()
self.length = 0
self.table_id = table_id
self.duration_sec = duration_sec
self.duration_nsec = duration_nsec
self.priority = priority
self.idle_timeout = idle_timeout
self.hard_timeout = hard_timeout
self.flags = flags
self.importance = importance
self.cookie = cookie
self.packet_count = packet_count
self.byte_count = byte_count
self.match = match
self.instructions = instructions
@classmethod
def parser(cls, buf, offset):
flow_stats = cls()
(flow_stats.length, flow_stats.table_id,
flow_stats.duration_sec, flow_stats.duration_nsec,
flow_stats.priority, flow_stats.idle_timeout,
flow_stats.hard_timeout, flow_stats.flags,
flow_stats.importance, flow_stats.cookie,
flow_stats.packet_count,
flow_stats.byte_count) = struct.unpack_from(
ofproto.OFP_FLOW_STATS_0_PACK_STR, buf, offset)
offset += ofproto.OFP_FLOW_STATS_0_SIZE
flow_stats.match = OFPMatch.parser(buf, offset)
match_length = utils.round_up(flow_stats.match.length, 8)
inst_length = (flow_stats.length - (ofproto.OFP_FLOW_STATS_SIZE -
ofproto.OFP_MATCH_SIZE +
match_length))
offset += match_length
instructions = []
while inst_length > 0:
inst = OFPInstruction.parser(buf, offset)
instructions.append(inst)
offset += inst.len
inst_length -= inst.len
flow_stats.instructions = instructions
return flow_stats
class OFPFlowStatsRequestBase(OFPMultipartRequest):
def __init__(self, datapath, flags, table_id, out_port, out_group,
cookie, cookie_mask, match):
super(OFPFlowStatsRequestBase, self).__init__(datapath, flags)
self.table_id = table_id
self.out_port = out_port
self.out_group = out_group
self.cookie = cookie
self.cookie_mask = cookie_mask
self.match = match
def _serialize_stats_body(self):
offset = ofproto.OFP_MULTIPART_REQUEST_SIZE
msg_pack_into(ofproto.OFP_FLOW_STATS_REQUEST_0_PACK_STR,
self.buf, offset, self.table_id, self.out_port,
self.out_group, self.cookie, self.cookie_mask)
offset += ofproto.OFP_FLOW_STATS_REQUEST_0_SIZE
self.match.serialize(self.buf, offset)
@_set_stats_type(ofproto.OFPMP_FLOW, OFPFlowStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPFlowStatsRequest(OFPFlowStatsRequestBase):
"""
Individual flow statistics request message
The controller uses this message to query individual flow statistics.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
table_id ID of table to read
out_port Require matching entries to include this as an output
port
out_group Require matching entries to include this as an output
group
cookie Require matching entries to contain this cookie value
cookie_mask Mask used to restrict the cookie bits that must match
match Instance of ``OFPMatch``
================ ======================================================
Example::
def send_flow_stats_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
cookie = cookie_mask = 0
match = ofp_parser.OFPMatch(in_port=1)
req = ofp_parser.OFPFlowStatsRequest(datapath, 0,
ofp.OFPTT_ALL,
ofp.OFPP_ANY, ofp.OFPG_ANY,
cookie, cookie_mask,
match)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags=0, table_id=ofproto.OFPTT_ALL,
out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY,
cookie=0, cookie_mask=0, match=None, type_=None):
if match is None:
match = OFPMatch()
super(OFPFlowStatsRequest, self).__init__(datapath, flags, table_id,
out_port, out_group,
cookie, cookie_mask, match)
@OFPMultipartReply.register_stats_type()
@_set_stats_type(ofproto.OFPMP_FLOW, OFPFlowStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPFlowStatsReply(OFPMultipartReply):
"""
Individual flow statistics reply message
The switch responds with this message to an individual flow statistics
request.
================ ======================================================
Attribute Description
================ ======================================================
body List of ``OFPFlowStats`` instance
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)
def flow_stats_reply_handler(self, ev):
flows = []
for stat in ev.msg.body:
flows.append('table_id=%s '
'duration_sec=%d duration_nsec=%d '
'priority=%d '
'idle_timeout=%d hard_timeout=%d flags=0x%04x '
'importance=%d cookie=%d packet_count=%d '
'byte_count=%d match=%s instructions=%s' %
(stat.table_id,
stat.duration_sec, stat.duration_nsec,
stat.priority,
stat.idle_timeout, stat.hard_timeout,
stat.flags, stat.importance,
stat.cookie, stat.packet_count, stat.byte_count,
stat.match, stat.instructions))
self.logger.debug('FlowStats: %s', flows)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPFlowStatsReply, self).__init__(datapath, **kwargs)
class OFPAggregateStats(ofproto_parser.namedtuple('OFPAggregateStats', (
'packet_count', 'byte_count', 'flow_count'))):
@classmethod
def parser(cls, buf, offset):
agg = struct.unpack_from(
ofproto.OFP_AGGREGATE_STATS_REPLY_PACK_STR, buf, offset)
stats = cls(*agg)
stats.length = ofproto.OFP_AGGREGATE_STATS_REPLY_SIZE
return stats
@_set_stats_type(ofproto.OFPMP_AGGREGATE, OFPAggregateStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPAggregateStatsRequest(OFPFlowStatsRequestBase):
"""
Aggregate flow statistics request message
The controller uses this message to query aggregate flow statictics.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
table_id ID of table to read
out_port Require matching entries to include this as an output
port
out_group Require matching entries to include this as an output
group
cookie Require matching entries to contain this cookie value
cookie_mask Mask used to restrict the cookie bits that must match
match Instance of ``OFPMatch``
================ ======================================================
Example::
def send_aggregate_stats_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
cookie = cookie_mask = 0
match = ofp_parser.OFPMatch(in_port=1)
req = ofp_parser.OFPAggregateStatsRequest(datapath, 0,
ofp.OFPTT_ALL,
ofp.OFPP_ANY,
ofp.OFPG_ANY,
cookie, cookie_mask,
match)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags, table_id, out_port, out_group,
cookie, cookie_mask, match, type_=None):
super(OFPAggregateStatsRequest, self).__init__(datapath,
flags,
table_id,
out_port,
out_group,
cookie,
cookie_mask,
match)
@OFPMultipartReply.register_stats_type(body_single_struct=True)
@_set_stats_type(ofproto.OFPMP_AGGREGATE, OFPAggregateStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPAggregateStatsReply(OFPMultipartReply):
"""
Aggregate flow statistics reply message
The switch responds with this message to an aggregate flow statistics
request.
================ ======================================================
Attribute Description
================ ======================================================
body Instance of ``OFPAggregateStats``
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPAggregateStatsReply, MAIN_DISPATCHER)
def aggregate_stats_reply_handler(self, ev):
body = ev.msg.body
self.logger.debug('AggregateStats: packet_count=%d byte_count=%d '
'flow_count=%d',
body.packet_count, body.byte_count,
body.flow_count)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPAggregateStatsReply, self).__init__(datapath, **kwargs)
class OFPTableStats(ofproto_parser.namedtuple('OFPTableStats', (
'table_id', 'active_count', 'lookup_count',
'matched_count'))):
@classmethod
def parser(cls, buf, offset):
tbl = struct.unpack_from(ofproto.OFP_TABLE_STATS_PACK_STR,
buf, offset)
stats = cls(*tbl)
stats.length = ofproto.OFP_TABLE_STATS_SIZE
return stats
class OFPTableStats(ofproto_parser.namedtuple('OFPTableStats', (
'table_id', 'active_count', 'lookup_count',
'matched_count'))):
@classmethod
def parser(cls, buf, offset):
tbl = struct.unpack_from(ofproto.OFP_TABLE_STATS_PACK_STR,
buf, offset)
stats = cls(*tbl)
stats.length = ofproto.OFP_TABLE_STATS_SIZE
return stats
@_set_stats_type(ofproto.OFPMP_TABLE, OFPTableStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPTableStatsRequest(OFPMultipartRequest):
"""
Table statistics request message
The controller uses this message to query flow table statictics.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
================ ======================================================
Example::
def send_table_stats_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPTableStatsRequest(datapath, 0)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags, type_=None):
super(OFPTableStatsRequest, self).__init__(datapath, flags)
@OFPMultipartReply.register_stats_type()
@_set_stats_type(ofproto.OFPMP_TABLE, OFPTableStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPTableStatsReply(OFPMultipartReply):
"""
Table statistics reply message
The switch responds with this message to a table statistics request.
================ ======================================================
Attribute Description
================ ======================================================
body List of ``OFPTableStats`` instance
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPTableStatsReply, MAIN_DISPATCHER)
def table_stats_reply_handler(self, ev):
tables = []
for stat in ev.msg.body:
tables.append('table_id=%d active_count=%d lookup_count=%d '
' matched_count=%d' %
(stat.table_id, stat.active_count,
stat.lookup_count, stat.matched_count))
self.logger.debug('TableStats: %s', tables)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPTableStatsReply, self).__init__(datapath, **kwargs)
class OFPPortStatsProp(OFPPropBase):
_TYPES = {}
@OFPPortStatsProp.register_type(ofproto.OFPPSPT_ETHERNET)
class OFPPortStatsPropEthernet(StringifyMixin):
def __init__(self, type_=None, length=None, rx_frame_err=None,
rx_over_err=None, rx_crc_err=None, collisions=None):
self.type = type_
self.length = length
self.rx_frame_err = rx_frame_err
self.rx_over_err = rx_over_err
self.rx_crc_err = rx_crc_err
self.collisions = collisions
@classmethod
def parser(cls, buf):
ether = cls()
(ether.type, ether.length, ether.rx_frame_err, ether.rx_over_err,
ether.rx_crc_err, ether.collisions) = struct.unpack_from(
ofproto.OFP_PORT_STATS_PROP_ETHERNET_PACK_STR, buf, 0)
return ether
class OFPPortStats(StringifyMixin):
def __init__(self, length=None, port_no=None, duration_sec=None,
duration_nsec=None, rx_packets=None, tx_packets=None,
rx_bytes=None, tx_bytes=None, rx_dropped=None,
tx_dropped=None, rx_errors=None, tx_errors=None,
properties=None):
super(OFPPortStats, self).__init__()
self.length = length
self.port_no = port_no
self.duration_sec = duration_sec
self.duration_nsec = duration_nsec
self.rx_packets = rx_packets
self.tx_packets = tx_packets
self.rx_bytes = rx_bytes
self.tx_bytes = tx_bytes
self.rx_dropped = rx_dropped
self.tx_dropped = tx_dropped
self.rx_errors = rx_errors
self.tx_errors = tx_errors
self.properties = properties
@classmethod
def parser(cls, buf, offset):
(length, port_no, duration_sec, duration_nsec, rx_packets,
tx_packets, rx_bytes, tx_bytes, rx_dropped, tx_dropped,
rx_errors, tx_errors) = struct.unpack_from(
ofproto.OFP_PORT_STATS_PACK_STR, buf, offset)
props = []
rest = buf[offset + ofproto.OFP_PORT_STATS_SIZE:offset + length]
while rest:
p, rest = OFPPortStatsProp.parse(rest)
props.append(p)
stats = cls(length, port_no, duration_sec, duration_nsec, rx_packets,
tx_packets, rx_bytes, tx_bytes, rx_dropped, tx_dropped,
rx_errors, tx_errors, props)
return stats
@_set_stats_type(ofproto.OFPMP_PORT_STATS, OFPPortStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPPortStatsRequest(OFPMultipartRequest):
"""
Port statistics request message
The controller uses this message to query information about ports
statistics.
================ ======================================================
Attribute Description
================ ======================================================
flags Zero or ``OFPMPF_REQ_MORE``
port_no Port number to read (OFPP_ANY to all ports)
================ ======================================================
Example::
def send_port_stats_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPPortStatsRequest(datapath, 0, ofp.OFPP_ANY)
datapath.send_msg(req)
"""
def __init__(self, datapath, flags, port_no, type_=None):
super(OFPPortStatsRequest, self).__init__(datapath, flags)
self.port_no = port_no
def _serialize_stats_body(self):
msg_pack_into(ofproto.OFP_PORT_STATS_REQUEST_PACK_STR,
self.buf,
ofproto.OFP_MULTIPART_REQUEST_SIZE,
self.port_no)
@OFPMultipartReply.register_stats_type()
@_set_stats_type(ofproto.OFPMP_PORT_STATS, OFPPortStats)
@_set_msg_type(ofproto.OFPT_MULTIPART_REPLY)
class OFPPortStatsReply(OFPMultipartReply):
"""
Port statistics reply message
The switch responds with this message to a port statistics request.
================ ======================================================
Attribute Description
================ ======================================================
body List of ``OFPPortStats`` instance
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER)
def port_stats_reply_handler(self, ev):
ports = []
for stat in ev.msg.body:
ports.append(stat.length, stat.port_no,
stat.duration_sec, stat.duration_nsec,
stat.rx_packets, stat.tx_packets,
stat.rx_bytes, stat.tx_bytes,
stat.rx_dropped, stat.tx_dropped,
stat.rx_errors, stat.tx_errors,
repr(stat.properties))
self.logger.debug('PortStats: %s', ports)
"""
def __init__(self, datapath, type_=None, **kwargs):
super(OFPPortStatsReply, self).__init__(datapath, **kwargs)
@_set_msg_type(ofproto.OFPT_BARRIER_REQUEST)
class OFPBarrierRequest(MsgBase):
"""
Barrier request message
The controller sends this message to ensure message dependencies have
been met or receive notifications for completed operations.
Example::
def send_barrier_request(self, datapath):
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPBarrierRequest(datapath)
datapath.send_msg(req)
"""
def __init__(self, datapath):
super(OFPBarrierRequest, self).__init__(datapath)
@_register_parser
@_set_msg_type(ofproto.OFPT_BARRIER_REPLY)
class OFPBarrierReply(MsgBase):
"""
Barrier reply message
The switch responds with this message to a barrier request.
Example::
@set_ev_cls(ofp_event.EventOFPBarrierReply, MAIN_DISPATCHER)
def barrier_reply_handler(self, ev):
self.logger.debug('OFPBarrierReply received')
"""
def __init__(self, datapath):
super(OFPBarrierReply, self).__init__(datapath)
@_register_parser
@_set_msg_type(ofproto.OFPT_PORT_STATUS)
class OFPPortStatus(MsgBase):
"""
Port status message
The switch notifies controller of change of ports.
================ ======================================================
Attribute Description
================ ======================================================
reason One of the following values.
OFPPR_ADD
OFPPR_DELETE
OFPPR_MODIFY
desc instance of ``OFPPort``
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER)
def port_status_handler(self, ev):
msg = ev.msg
dp = msg.datapath
ofp = dp.ofproto
if msg.reason == ofp.OFPPR_ADD:
reason = 'ADD'
elif msg.reason == ofp.OFPPR_DELETE:
reason = 'DELETE'
elif msg.reason == ofp.OFPPR_MODIFY:
reason = 'MODIFY'
else:
reason = 'unknown'
self.logger.debug('OFPPortStatus received: reason=%s desc=%s',
reason, msg.desc)
"""
def __init__(self, datapath, reason=None, desc=None):
super(OFPPortStatus, self).__init__(datapath)
self.reason = reason
self.desc = desc
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPPortStatus, cls).parser(datapath, version, msg_type,
msg_len, xid, buf)
msg.reason = struct.unpack_from(
ofproto.OFP_PORT_STATUS_PACK_STR, msg.buf,
ofproto.OFP_HEADER_SIZE)[0]
msg.desc = OFPPort.parser(msg.buf, ofproto.OFP_PORT_STATUS_DESC_OFFSET)
return msg
@_set_msg_type(ofproto.OFPT_PACKET_OUT)
class OFPPacketOut(MsgBase):
"""
Packet-Out message
The controller uses this message to send a packet out throught the
switch.
================ ======================================================
Attribute Description
================ ======================================================
buffer_id ID assigned by datapath (OFP_NO_BUFFER if none)
in_port Packet's input port or ``OFPP_CONTROLLER``
actions list of OpenFlow action class
data Packet data
================ ======================================================
Example::
def send_packet_out(self, datapath, buffer_id, in_port):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
actions = [ofp_parser.OFPActionOutput(ofp.OFPP_FLOOD, 0)]
req = ofp_parser.OFPPacketOut(datapath, buffer_id,
in_port, actions)
datapath.send_msg(req)
"""
def __init__(self, datapath, buffer_id=None, in_port=None, actions=None,
data=None, actions_len=None):
assert in_port is not None
super(OFPPacketOut, self).__init__(datapath)
self.buffer_id = buffer_id
self.in_port = in_port
self.actions_len = 0
self.actions = actions
self.data = data
def _serialize_body(self):
self.actions_len = 0
offset = ofproto.OFP_PACKET_OUT_SIZE
for a in self.actions:
a.serialize(self.buf, offset)
offset += a.len
self.actions_len += a.len
if self.data is not None:
assert self.buffer_id == 0xffffffff
self.buf += self.data
msg_pack_into(ofproto.OFP_PACKET_OUT_PACK_STR,
self.buf, ofproto.OFP_HEADER_SIZE,
self.buffer_id, self.in_port, self.actions_len)
@_set_msg_type(ofproto.OFPT_FLOW_MOD)
class OFPFlowMod(MsgBase):
"""
Modify Flow entry message
The controller sends this message to modify the flow table.
================ ======================================================
Attribute Description
================ ======================================================
cookie Opaque controller-issued identifier
cookie_mask Mask used to restrict the cookie bits that must match
when the command is ``OPFFC_MODIFY*`` or
``OFPFC_DELETE*``
table_id ID of the table to put the flow in
command One of the following values.
OFPFC_ADD
OFPFC_MODIFY
OFPFC_MODIFY_STRICT
OFPFC_DELETE
OFPFC_DELETE_STRICT
idle_timeout Idle time before discarding (seconds)
hard_timeout Max time before discarding (seconds)
priority Priority level of flow entry
buffer_id Buffered packet to apply to (or OFP_NO_BUFFER)
out_port For ``OFPFC_DELETE*`` commands, require matching
entries to include this as an output port
out_group For ``OFPFC_DELETE*`` commands, require matching
entries to include this as an output group
flags One of the following values.
OFPFF_SEND_FLOW_REM
OFPFF_CHECK_OVERLAP
OFPFF_RESET_COUNTS
OFPFF_NO_PKT_COUNTS
OFPFF_NO_BYT_COUNTS
importance Eviction precedence
match Instance of ``OFPMatch``
instructions list of ``OFPInstruction*`` instance
================ ======================================================
Example::
def send_flow_mod(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
cookie = cookie_mask = 0
table_id = 0
idle_timeout = hard_timeout = 0
priority = 32768
buffer_id = ofp.OFP_NO_BUFFER
importance = 0
match = ofp_parser.OFPMatch(in_port=1, eth_dst='ff:ff:ff:ff:ff:ff')
actions = [ofp_parser.OFPActionOutput(ofp.OFPP_NORMAL, 0)]
inst = [ofp_parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,
actions)]
req = ofp_parser.OFPFlowMod(datapath, cookie, cookie_mask,
table_id, ofp.OFPFC_ADD,
idle_timeout, hard_timeout,
priority, buffer_id,
ofp.OFPP_ANY, ofp.OFPG_ANY,
ofp.OFPFF_SEND_FLOW_REM,
imporotance,
match, inst)
datapath.send_msg(req)
"""
def __init__(self, datapath, cookie=0, cookie_mask=0, table_id=0,
command=ofproto.OFPFC_ADD,
idle_timeout=0, hard_timeout=0,
priority=ofproto.OFP_DEFAULT_PRIORITY,
buffer_id=ofproto.OFP_NO_BUFFER,
out_port=0, out_group=0, flags=0, importance=0,
match=None,
instructions=[]):
super(OFPFlowMod, self).__init__(datapath)
self.cookie = cookie
self.cookie_mask = cookie_mask
self.table_id = table_id
self.command = command
self.idle_timeout = idle_timeout
self.hard_timeout = hard_timeout
self.priority = priority
self.buffer_id = buffer_id
self.out_port = out_port
self.out_group = out_group
self.flags = flags
self.importance = importance
if match is None:
match = OFPMatch()
assert isinstance(match, OFPMatch)
self.match = match
for i in instructions:
assert isinstance(i, OFPInstruction)
self.instructions = instructions
def _serialize_body(self):
msg_pack_into(ofproto.OFP_FLOW_MOD_PACK_STR0, self.buf,
ofproto.OFP_HEADER_SIZE,
self.cookie, self.cookie_mask, self.table_id,
self.command, self.idle_timeout, self.hard_timeout,
self.priority, self.buffer_id, self.out_port,
self.out_group, self.flags, self.importance)
offset = (ofproto.OFP_FLOW_MOD_SIZE -
ofproto.OFP_MATCH_SIZE)
match_len = self.match.serialize(self.buf, offset)
offset += match_len
for inst in self.instructions:
inst.serialize(self.buf, offset)
offset += inst.len
class OFPInstruction(StringifyMixin):
_INSTRUCTION_TYPES = {}
@staticmethod
def register_instruction_type(types):
def _register_instruction_type(cls):
for type_ in types:
OFPInstruction._INSTRUCTION_TYPES[type_] = cls
return cls
return _register_instruction_type
@classmethod
def parser(cls, buf, offset):
(type_, len_) = struct.unpack_from('!HH', buf, offset)
cls_ = cls._INSTRUCTION_TYPES.get(type_)
return cls_.parser(buf, offset)
@OFPInstruction.register_instruction_type([ofproto.OFPIT_GOTO_TABLE])
class OFPInstructionGotoTable(OFPInstruction):
"""
Goto table instruction
This instruction indicates the next table in the processing pipeline.
================ ======================================================
Attribute Description
================ ======================================================
table_id Next table
================ ======================================================
"""
def __init__(self, table_id, type_=None, len_=None):
super(OFPInstructionGotoTable, self).__init__()
self.type = ofproto.OFPIT_GOTO_TABLE
self.len = ofproto.OFP_INSTRUCTION_GOTO_TABLE_SIZE
self.table_id = table_id
@classmethod
def parser(cls, buf, offset):
(type_, len_, table_id) = struct.unpack_from(
ofproto.OFP_INSTRUCTION_GOTO_TABLE_PACK_STR,
buf, offset)
return cls(table_id)
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_INSTRUCTION_GOTO_TABLE_PACK_STR,
buf, offset, self.type, self.len, self.table_id)
@OFPInstruction.register_instruction_type([ofproto.OFPIT_WRITE_METADATA])
class OFPInstructionWriteMetadata(OFPInstruction):
"""
Write metadata instruction
This instruction writes the masked metadata value into the metadata field.
================ ======================================================
Attribute Description
================ ======================================================
metadata Metadata value to write
metadata_mask Metadata write bitmask
================ ======================================================
"""
def __init__(self, metadata, metadata_mask, type_=None, len_=None):
super(OFPInstructionWriteMetadata, self).__init__()
self.type = ofproto.OFPIT_WRITE_METADATA
self.len = ofproto.OFP_INSTRUCTION_WRITE_METADATA_SIZE
self.metadata = metadata
self.metadata_mask = metadata_mask
@classmethod
def parser(cls, buf, offset):
(type_, len_, metadata, metadata_mask) = struct.unpack_from(
ofproto.OFP_INSTRUCTION_WRITE_METADATA_PACK_STR,
buf, offset)
return cls(metadata, metadata_mask)
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_INSTRUCTION_WRITE_METADATA_PACK_STR,
buf, offset, self.type, self.len, self.metadata,
self.metadata_mask)
@OFPInstruction.register_instruction_type([ofproto.OFPIT_WRITE_ACTIONS,
ofproto.OFPIT_APPLY_ACTIONS,
ofproto.OFPIT_CLEAR_ACTIONS])
class OFPInstructionActions(OFPInstruction):
"""
Actions instruction
This instruction writes/applies/clears the actions.
================ ======================================================
Attribute Description
================ ======================================================
type One of following values.
OFPIT_WRITE_ACTIONS
OFPIT_APPLY_ACTIONS
OFPIT_CLEAR_ACTIONS
actions list of OpenFlow action class
================ ======================================================
``type`` attribute corresponds to ``type_`` parameter of __init__.
"""
def __init__(self, type_, actions=None, len_=None):
super(OFPInstructionActions, self).__init__()
self.type = type_
for a in actions:
assert isinstance(a, OFPAction)
self.actions = actions
@classmethod
def parser(cls, buf, offset):
(type_, len_) = struct.unpack_from(
ofproto.OFP_INSTRUCTION_ACTIONS_PACK_STR,
buf, offset)
offset += ofproto.OFP_INSTRUCTION_ACTIONS_SIZE
actions = []
actions_len = len_ - ofproto.OFP_INSTRUCTION_ACTIONS_SIZE
while actions_len > 0:
a = OFPAction.parser(buf, offset)
actions.append(a)
actions_len -= a.len
offset += a.len
inst = cls(type_, actions)
inst.len = len_
return inst
def serialize(self, buf, offset):
action_offset = offset + ofproto.OFP_INSTRUCTION_ACTIONS_SIZE
if self.actions:
for a in self.actions:
a.serialize(buf, action_offset)
action_offset += a.len
self.len = action_offset - offset
pad_len = utils.round_up(self.len, 8) - self.len
ofproto_parser.msg_pack_into("%dx" % pad_len, buf, action_offset)
self.len += pad_len
msg_pack_into(ofproto.OFP_INSTRUCTION_ACTIONS_PACK_STR,
buf, offset, self.type, self.len)
@OFPInstruction.register_instruction_type([ofproto.OFPIT_METER])
class OFPInstructionMeter(OFPInstruction):
"""
Meter instruction
This instruction applies the meter.
================ ======================================================
Attribute Description
================ ======================================================
meter_id Meter instance
================ ======================================================
"""
def __init__(self, meter_id, type_=None, len_=None):
super(OFPInstructionMeter, self).__init__()
self.type = ofproto.OFPIT_METER
self.len = ofproto.OFP_INSTRUCTION_METER_SIZE
self.meter_id = meter_id
@classmethod
def parser(cls, buf, offset):
(type_, len_, meter_id) = struct.unpack_from(
ofproto.OFP_INSTRUCTION_METER_PACK_STR,
buf, offset)
return cls(meter_id)
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_INSTRUCTION_METER_PACK_STR,
buf, offset, self.type, self.len, self.meter_id)
class OFPActionHeader(StringifyMixin):
def __init__(self, type_, len_):
self.type = type_
self.len = len_
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_ACTION_HEADER_PACK_STR,
buf, offset, self.type, self.len)
class OFPAction(OFPActionHeader):
_ACTION_TYPES = {}
@staticmethod
def register_action_type(type_, len_):
def _register_action_type(cls):
cls.cls_action_type = type_
cls.cls_action_len = len_
OFPAction._ACTION_TYPES[cls.cls_action_type] = cls
return cls
return _register_action_type
def __init__(self):
cls = self.__class__
super(OFPAction, self).__init__(cls.cls_action_type,
cls.cls_action_len)
@classmethod
def parser(cls, buf, offset):
type_, len_ = struct.unpack_from(
ofproto.OFP_ACTION_HEADER_PACK_STR, buf, offset)
cls_ = cls._ACTION_TYPES.get(type_)
assert cls_ is not None
return cls_.parser(buf, offset)
@OFPAction.register_action_type(ofproto.OFPAT_OUTPUT,
ofproto.OFP_ACTION_OUTPUT_SIZE)
class OFPActionOutput(OFPAction):
"""
Output action
This action indicates output a packet to the switch port.
================ ======================================================
Attribute Description
================ ======================================================
port Output port
max_len Max length to send to controller
================ ======================================================
"""
def __init__(self, port, max_len=ofproto.OFPCML_MAX,
type_=None, len_=None):
super(OFPActionOutput, self).__init__()
self.port = port
self.max_len = max_len
@classmethod
def parser(cls, buf, offset):
type_, len_, port, max_len = struct.unpack_from(
ofproto.OFP_ACTION_OUTPUT_PACK_STR, buf, offset)
return cls(port, max_len)
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_ACTION_OUTPUT_PACK_STR, buf,
offset, self.type, self.len, self.port, self.max_len)
@OFPAction.register_action_type(ofproto.OFPAT_GROUP,
ofproto.OFP_ACTION_GROUP_SIZE)
class OFPActionGroup(OFPAction):
"""
Group action
This action indicates the group used to process the packet.
================ ======================================================
Attribute Description
================ ======================================================
group_id Group identifier
================ ======================================================
"""
def __init__(self, group_id, type_=None, len_=None):
super(OFPActionGroup, self).__init__()
self.group_id = group_id
@classmethod
def parser(cls, buf, offset):
(type_, len_, group_id) = struct.unpack_from(
ofproto.OFP_ACTION_GROUP_PACK_STR, buf, offset)
return cls(group_id)
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_ACTION_GROUP_PACK_STR, buf,
offset, self.type, self.len, self.group_id)
@OFPAction.register_action_type(ofproto.OFPAT_SET_QUEUE,
ofproto.OFP_ACTION_SET_QUEUE_SIZE)
class OFPActionSetQueue(OFPAction):
"""
Set queue action
This action sets the queue id that will be used to map a flow to an
already-configured queue on a port.
================ ======================================================
Attribute Description
================ ======================================================
queue_id Queue ID for the packets
================ ======================================================
"""
def __init__(self, queue_id, type_=None, len_=None):
super(OFPActionSetQueue, self).__init__()
self.queue_id = queue_id
@classmethod
def parser(cls, buf, offset):
(type_, len_, queue_id) = struct.unpack_from(
ofproto.OFP_ACTION_SET_QUEUE_PACK_STR, buf, offset)
return cls(queue_id)
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_ACTION_SET_QUEUE_PACK_STR, buf,
offset, self.type, self.len, self.queue_id)
@OFPAction.register_action_type(ofproto.OFPAT_SET_MPLS_TTL,
ofproto.OFP_ACTION_MPLS_TTL_SIZE)
class OFPActionSetMplsTtl(OFPAction):
"""
Set MPLS TTL action
This action sets the MPLS TTL.
================ ======================================================
Attribute Description
================ ======================================================
mpls_ttl MPLS TTL
================ ======================================================
"""
def __init__(self, mpls_ttl, type_=None, len_=None):
super(OFPActionSetMplsTtl, self).__init__()
self.mpls_ttl = mpls_ttl
@classmethod
def parser(cls, buf, offset):
(type_, len_, mpls_ttl) = struct.unpack_from(
ofproto.OFP_ACTION_MPLS_TTL_PACK_STR, buf, offset)
return cls(mpls_ttl)
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_ACTION_MPLS_TTL_PACK_STR, buf,
offset, self.type, self.len, self.mpls_ttl)
@OFPAction.register_action_type(ofproto.OFPAT_DEC_MPLS_TTL,
ofproto.OFP_ACTION_HEADER_SIZE)
class OFPActionDecMplsTtl(OFPAction):
"""
Decrement MPLS TTL action
This action decrements the MPLS TTL.
"""
def __init__(self, type_=None, len_=None):
super(OFPActionDecMplsTtl, self).__init__()
@classmethod
def parser(cls, buf, offset):
(type_, len_) = struct.unpack_from(
ofproto.OFP_ACTION_HEADER_PACK_STR, buf, offset)
return cls()
@OFPAction.register_action_type(ofproto.OFPAT_SET_NW_TTL,
ofproto.OFP_ACTION_NW_TTL_SIZE)
class OFPActionSetNwTtl(OFPAction):
"""
Set IP TTL action
This action sets the IP TTL.
================ ======================================================
Attribute Description
================ ======================================================
nw_ttl IP TTL
================ ======================================================
"""
def __init__(self, nw_ttl, type_=None, len_=None):
super(OFPActionSetNwTtl, self).__init__()
self.nw_ttl = nw_ttl
@classmethod
def parser(cls, buf, offset):
(type_, len_, nw_ttl) = struct.unpack_from(
ofproto.OFP_ACTION_NW_TTL_PACK_STR, buf, offset)
return cls(nw_ttl)
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_ACTION_NW_TTL_PACK_STR, buf, offset,
self.type, self.len, self.nw_ttl)
@OFPAction.register_action_type(ofproto.OFPAT_DEC_NW_TTL,
ofproto.OFP_ACTION_HEADER_SIZE)
class OFPActionDecNwTtl(OFPAction):
"""
Decrement IP TTL action
This action decrements the IP TTL.
"""
def __init__(self, type_=None, len_=None):
super(OFPActionDecNwTtl, self).__init__()
@classmethod
def parser(cls, buf, offset):
(type_, len_) = struct.unpack_from(
ofproto.OFP_ACTION_HEADER_PACK_STR, buf, offset)
return cls()
@OFPAction.register_action_type(ofproto.OFPAT_COPY_TTL_OUT,
ofproto.OFP_ACTION_HEADER_SIZE)
class OFPActionCopyTtlOut(OFPAction):
"""
Copy TTL Out action
This action copies the TTL from the next-to-outermost header with TTL to
the outermost header with TTL.
"""
def __init__(self, type_=None, len_=None):
super(OFPActionCopyTtlOut, self).__init__()
@classmethod
def parser(cls, buf, offset):
(type_, len_) = struct.unpack_from(
ofproto.OFP_ACTION_HEADER_PACK_STR, buf, offset)
return cls()
@OFPAction.register_action_type(ofproto.OFPAT_COPY_TTL_IN,
ofproto.OFP_ACTION_HEADER_SIZE)
class OFPActionCopyTtlIn(OFPAction):
"""
Copy TTL In action
This action copies the TTL from the outermost header with TTL to the
next-to-outermost header with TTL.
"""
def __init__(self, type_=None, len_=None):
super(OFPActionCopyTtlIn, self).__init__()
@classmethod
def parser(cls, buf, offset):
(type_, len_) = struct.unpack_from(
ofproto.OFP_ACTION_HEADER_PACK_STR, buf, offset)
return cls()
@OFPAction.register_action_type(ofproto.OFPAT_PUSH_VLAN,
ofproto.OFP_ACTION_PUSH_SIZE)
class OFPActionPushVlan(OFPAction):
"""
Push VLAN action
This action pushes a new VLAN tag to the packet.
================ ======================================================
Attribute Description
================ ======================================================
ethertype Ether type. The default is 802.1Q. (0x8100)
================ ======================================================
"""
def __init__(self, ethertype=ether.ETH_TYPE_8021Q, type_=None, len_=None):
super(OFPActionPushVlan, self).__init__()
self.ethertype = ethertype
@classmethod
def parser(cls, buf, offset):
(type_, len_, ethertype) = struct.unpack_from(
ofproto.OFP_ACTION_PUSH_PACK_STR, buf, offset)
return cls(ethertype)
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_ACTION_PUSH_PACK_STR, buf, offset,
self.type, self.len, self.ethertype)
@OFPAction.register_action_type(ofproto.OFPAT_PUSH_MPLS,
ofproto.OFP_ACTION_PUSH_SIZE)
class OFPActionPushMpls(OFPAction):
"""
Push MPLS action
This action pushes a new MPLS header to the packet.
================ ======================================================
Attribute Description
================ ======================================================
ethertype Ether type
================ ======================================================
"""
def __init__(self, ethertype=ether.ETH_TYPE_MPLS, type_=None, len_=None):
super(OFPActionPushMpls, self).__init__()
self.ethertype = ethertype
@classmethod
def parser(cls, buf, offset):
(type_, len_, ethertype) = struct.unpack_from(
ofproto.OFP_ACTION_PUSH_PACK_STR, buf, offset)
return cls(ethertype)
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_ACTION_PUSH_PACK_STR, buf, offset,
self.type, self.len, self.ethertype)
@OFPAction.register_action_type(ofproto.OFPAT_POP_VLAN,
ofproto.OFP_ACTION_HEADER_SIZE)
class OFPActionPopVlan(OFPAction):
"""
Pop VLAN action
This action pops the outermost VLAN tag from the packet.
"""
def __init__(self, type_=None, len_=None):
super(OFPActionPopVlan, self).__init__()
@classmethod
def parser(cls, buf, offset):
(type_, len_) = struct.unpack_from(
ofproto.OFP_ACTION_HEADER_PACK_STR, buf, offset)
return cls()
@OFPAction.register_action_type(ofproto.OFPAT_POP_MPLS,
ofproto.OFP_ACTION_POP_MPLS_SIZE)
class OFPActionPopMpls(OFPAction):
"""
Pop MPLS action
This action pops the MPLS header from the packet.
"""
def __init__(self, ethertype=ether.ETH_TYPE_IP, type_=None, len_=None):
super(OFPActionPopMpls, self).__init__()
self.ethertype = ethertype
@classmethod
def parser(cls, buf, offset):
(type_, len_, ethertype) = struct.unpack_from(
ofproto.OFP_ACTION_POP_MPLS_PACK_STR, buf, offset)
return cls(ethertype)
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_ACTION_POP_MPLS_PACK_STR, buf, offset,
self.type, self.len, self.ethertype)
@OFPAction.register_action_type(ofproto.OFPAT_SET_FIELD,
ofproto.OFP_ACTION_SET_FIELD_SIZE)
class OFPActionSetField(OFPAction):
"""
Set field action
This action modifies a header field in the packet.
================ ======================================================
Attribute Description
================ ======================================================
field Instance of ``OFPMatchField``
================ ======================================================
"""
def __init__(self, field=None, **kwargs):
super(OFPActionSetField, self).__init__()
assert len(kwargs) == 1
key = kwargs.keys()[0]
value = kwargs[key]
assert isinstance(key, (str, unicode))
assert not isinstance(value, tuple) # no mask
self.key = key
self.value = value
@classmethod
def parser(cls, buf, offset):
(type_, len_) = struct.unpack_from(
ofproto.OFP_ACTION_SET_FIELD_PACK_STR, buf, offset)
(n, value, mask, _len) = ofproto.oxm_parse(buf, offset + 4)
k, uv = ofproto.oxm_to_user(n, value, mask)
action = cls(**{k: uv})
action.len = len_
return action
def serialize(self, buf, offset):
n, value, mask = ofproto.oxm_from_user(self.key, self.value)
len_ = ofproto.oxm_serialize(n, value, mask, buf, offset + 4)
self.len = utils.round_up(4 + len_, 8)
msg_pack_into('!HH', buf, offset, self.type, self.len)
pad_len = self.len - (4 + len_)
ofproto_parser.msg_pack_into("%dx" % pad_len, buf, offset + 4 + len_)
def to_jsondict(self):
return {
self.__class__.__name__: {
'field': ofproto.oxm_to_jsondict(self.key, self.value)
}
}
@classmethod
def from_jsondict(cls, dict_):
k, v = ofproto.oxm_from_jsondict(dict_['field'])
return OFPActionSetField(**{k: v})
def stringify_attrs(self):
yield (self.key, self.value)
@OFPAction.register_action_type(ofproto.OFPAT_PUSH_PBB,
ofproto.OFP_ACTION_PUSH_SIZE)
class OFPActionPushPbb(OFPAction):
"""
Push PBB action
This action pushes a new PBB header to the packet.
================ ======================================================
Attribute Description
================ ======================================================
ethertype Ether type
================ ======================================================
"""
def __init__(self, ethertype, type_=None, len_=None):
super(OFPActionPushPbb, self).__init__()
self.ethertype = ethertype
@classmethod
def parser(cls, buf, offset):
(type_, len_, ethertype) = struct.unpack_from(
ofproto.OFP_ACTION_PUSH_PACK_STR, buf, offset)
return cls(ethertype)
def serialize(self, buf, offset):
msg_pack_into(ofproto.OFP_ACTION_PUSH_PACK_STR, buf, offset,
self.type, self.len, self.ethertype)
@OFPAction.register_action_type(ofproto.OFPAT_POP_PBB,
ofproto.OFP_ACTION_HEADER_SIZE)
class OFPActionPopPbb(OFPAction):
"""
Pop PBB action
This action pops the outermost PBB service instance header from
the packet.
"""
def __init__(self, type_=None, len_=None):
super(OFPActionPopPbb, self).__init__()
@classmethod
def parser(cls, buf, offset):
(type_, len_) = struct.unpack_from(
ofproto.OFP_ACTION_HEADER_PACK_STR, buf, offset)
return cls()
@classmethod
def parser(cls, buf, offset):
(type_, len_) = struct.unpack_from(
ofproto.OFP_ACTION_HEADER_PACK_STR, buf, offset)
return cls()
@_set_msg_type(ofproto.OFPT_GROUP_MOD)
class OFPGroupMod(MsgBase):
"""
Modify group entry message
The controller sends this message to modify the group table.
================ ======================================================
Attribute Description
================ ======================================================
command One of the following values.
OFPGC_ADD
OFPGC_MODIFY
OFPGC_DELETE
type One of the following values.
OFPGT_ALL
OFPGT_SELECT
OFPGT_INDIRECT
OFPGT_FF
group_id Group identifier
buckets list of ``OFPBucket``
================ ======================================================
``type`` attribute corresponds to ``type_`` parameter of __init__.
Example::
def send_group_mod(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
port = 1
max_len = 2000
actions = [ofp_parser.OFPActionOutput(port, max_len)]
weight = 100
watch_port = 0
watch_group = 0
buckets = [ofp_parser.OFPBucket(weight, watch_port, watch_group,
actions)]
group_id = 1
req = ofp_parser.OFPGroupMod(datapath, ofp.OFPGC_ADD,
ofp.OFPGT_SELECT, group_id, buckets)
datapath.send_msg(req)
"""
def __init__(self, datapath, command, type_, group_id, buckets):
super(OFPGroupMod, self).__init__(datapath)
self.command = command
self.type = type_
self.group_id = group_id
self.buckets = buckets
def _serialize_body(self):
msg_pack_into(ofproto.OFP_GROUP_MOD_PACK_STR, self.buf,
ofproto.OFP_HEADER_SIZE,
self.command, self.type, self.group_id)
offset = ofproto.OFP_GROUP_MOD_SIZE
for b in self.buckets:
b.serialize(self.buf, offset)
offset += b.len
class OFPPortModPropEthernet(StringifyMixin):
_PACK_STR = '!HHI' # type, len, advertise
def __init__(self, type_=None, length=None, advertise=None):
self.type = type_
self.advertise = advertise
def serialize(self):
# fixup
self.length = struct.calcsize(self._PACK_STR)
buf = bytearray()
msg_pack_into(self._PACK_STR, buf, 0, self.type, self.length,
self.advertise)
return buf
@_set_msg_type(ofproto.OFPT_PORT_MOD)
class OFPPortMod(MsgBase):
"""
Port modification message
The controller sneds this message to modify the behavior of the port.
================ ======================================================
Attribute Description
================ ======================================================
port_no Port number to modify
hw_addr The hardware address that must be the same as hw_addr
of ``OFPPort`` of ``OFPSwitchFeatures``
config Bitmap of configuration flags.
OFPPC_PORT_DOWN
OFPPC_NO_RECV
OFPPC_NO_FWD
OFPPC_NO_PACKET_IN
mask Bitmap of configuration flags above to be changed
properties List of ``OFPPortProp`` subclass instance
================ ======================================================
Example::
def send_port_mod(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
port_no = 3
hw_addr = 'fa:c8:e8:76:1d:7e'
config = 0
mask = (ofp.OFPPC_PORT_DOWN | ofp.OFPPC_NO_RECV |
ofp.OFPPC_NO_FWD | ofp.OFPPC_NO_PACKET_IN)
advertise = (ofp.OFPPF_10MB_HD | ofp.OFPPF_100MB_FD |
ofp.OFPPF_1GB_FD | ofp.OFPPF_COPPER |
ofp.OFPPF_AUTONEG | ofp.OFPPF_PAUSE |
ofp.OFPPF_PAUSE_ASYM)
properties = ofp_parser.OFPPortModPropEthernet(advertise)
req = ofp_parser.OFPPortMod(datapath, port_no, hw_addr, config,
mask, properties)
datapath.send_msg(req)
"""
_TYPE = {
'ascii': [
'hw_addr',
]
}
def __init__(self, datapath, port_no, hw_addr, config, mask, properties):
super(OFPPortMod, self).__init__(datapath)
self.port_no = port_no
self.hw_addr = hw_addr
self.config = config
self.mask = mask
self.properties = properties
def _serialize_body(self):
bin_props = bytearray()
for p in self.properties:
bin_props += p.serialize()
msg_pack_into(ofproto.OFP_PORT_MOD_PACK_STR, self.buf,
ofproto.OFP_HEADER_SIZE,
self.port_no, addrconv.mac.text_to_bin(self.hw_addr),
self.config,
self.mask)
self.buf += bin_props
class OFPBucket(StringifyMixin):
def __init__(self, weight, watch_port, watch_group, actions, len_=None):
super(OFPBucket, self).__init__()
self.weight = weight
self.watch_port = watch_port
self.watch_group = watch_group
self.actions = actions
@classmethod
def parser(cls, buf, offset):
(len_, weight, watch_port, watch_group) = struct.unpack_from(
ofproto.OFP_BUCKET_PACK_STR, buf, offset)
msg = cls(weight, watch_port, watch_group, [])
msg.len = len_
length = ofproto.OFP_BUCKET_SIZE
offset += ofproto.OFP_BUCKET_SIZE
while length < msg.len:
action = OFPAction.parser(buf, offset)
msg.actions.append(action)
offset += action.len
length += action.len
return msg
def serialize(self, buf, offset):
action_offset = offset + ofproto.OFP_BUCKET_SIZE
action_len = 0
for a in self.actions:
a.serialize(buf, action_offset)
action_offset += a.len
action_len += a.len
self.len = utils.round_up(ofproto.OFP_BUCKET_SIZE + action_len, 8)
msg_pack_into(ofproto.OFP_BUCKET_PACK_STR, buf, offset,
self.len, self.weight, self.watch_port,
self.watch_group)
@_set_msg_type(ofproto.OFPT_ROLE_REQUEST)
class OFPRoleRequest(MsgBase):
"""
Role request message
The controller uses this message to change its role.
================ ======================================================
Attribute Description
================ ======================================================
role One of the following values.
OFPCR_ROLE_NOCHANGE
OFPCR_ROLE_EQUAL
OFPCR_ROLE_MASTER
OFPCR_ROLE_SLAVE
generation_id Master Election Generation ID
================ ======================================================
Example::
def send_role_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPRoleRequest(datapath, ofp.OFPCR_ROLE_EQUAL, 0)
datapath.send_msg(req)
"""
def __init__(self, datapath, role=None, generation_id=None):
super(OFPRoleRequest, self).__init__(datapath)
self.role = role
self.generation_id = generation_id
def _serialize_body(self):
assert self.role is not None
assert self.generation_id is not None
msg_pack_into(ofproto.OFP_ROLE_REQUEST_PACK_STR,
self.buf, ofproto.OFP_HEADER_SIZE,
self.role, self.generation_id)
@_register_parser
@_set_msg_type(ofproto.OFPT_ROLE_REPLY)
class OFPRoleReply(MsgBase):
"""
Role reply message
The switch responds with this message to a role request.
================ ======================================================
Attribute Description
================ ======================================================
role One of the following values.
OFPCR_ROLE_NOCHANGE
OFPCR_ROLE_EQUAL
OFPCR_ROLE_MASTER
OFPCR_ROLE_SLAVE
generation_id Master Election Generation ID
================ ======================================================
Example::
@set_ev_cls(ofp_event.EventOFPRoleReply, MAIN_DISPATCHER)
def role_reply_handler(self, ev):
msg = ev.msg
ofp = dp.ofproto
if msg.role == ofp.OFPCR_ROLE_NOCHANGE:
role = 'NOCHANGE'
elif msg.role == ofp.OFPCR_ROLE_EQUAL:
role = 'EQUAL'
elif msg.role == ofp.OFPCR_ROLE_MASTER:
role = 'MASTER'
elif msg.role == ofp.OFPCR_ROLE_SLAVE:
role = 'SLAVE'
else:
role = 'unknown'
self.logger.debug('OFPRoleReply received: '
'role=%s generation_id=%d',
role, msg.generation_id)
"""
def __init__(self, datapath, role=None, generation_id=None):
super(OFPRoleReply, self).__init__(datapath)
self.role = role
self.generation_id = generation_id
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPRoleReply, cls).parser(datapath, version,
msg_type, msg_len, xid,
buf)
(msg.role, msg.generation_id) = struct.unpack_from(
ofproto.OFP_ROLE_REQUEST_PACK_STR, msg.buf,
ofproto.OFP_HEADER_SIZE)
return msg
@_set_msg_type(ofproto.OFPT_GET_ASYNC_REQUEST)
class OFPGetAsyncRequest(MsgBase):
"""
Get asynchronous configuration request message
The controller uses this message to query the asynchronous message.
Example::
def send_get_async_request(self, datapath):
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPGetAsyncRequest(datapath)
datapath.send_msg(req)
"""
def __init__(self, datapath):
super(OFPGetAsyncRequest, self).__init__(datapath)
@_register_parser
@_set_msg_type(ofproto.OFPT_GET_ASYNC_REPLY)
class OFPGetAsyncReply(MsgBase):
"""
Get asynchronous configuration reply message
The switch responds with this message to a get asynchronous configuration
request.
================== ====================================================
Attribute Description
================== ====================================================
packet_in_mask 2-element array: element 0, when the controller has a
OFPCR_ROLE_EQUAL or OFPCR_ROLE_MASTER role. element 1,
OFPCR_ROLE_SLAVE role controller.
Bitmasks of following values.
OFPR_NO_MATCH
OFPR_ACTION
OFPR_INVALID_TTL
port_status_mask 2-element array.
Bitmasks of following values.
OFPPR_ADD
OFPPR_DELETE
OFPPR_MODIFY
flow_removed_mask 2-element array.
Bitmasks of following values.
OFPRR_IDLE_TIMEOUT
OFPRR_HARD_TIMEOUT
OFPRR_DELETE
OFPRR_GROUP_DELETE
================== ====================================================
Example::
@set_ev_cls(ofp_event.EventOFPGetAsyncReply, MAIN_DISPATCHER)
def get_async_reply_handler(self, ev):
msg = ev.msg
self.logger.debug('OFPGetAsyncReply received: '
'packet_in_mask=0x%08x:0x%08x '
'port_status_mask=0x%08x:0x%08x '
'flow_removed_mask=0x%08x:0x%08x',
msg.packet_in_mask[0],
msg.packet_in_mask[1],
msg.port_status_mask[0],
msg.port_status_mask[1],
msg.flow_removed_mask[0],
msg.flow_removed_mask[1])
"""
def __init__(self, datapath, packet_in_mask=None, port_status_mask=None,
flow_removed_mask=None):
super(OFPGetAsyncReply, self).__init__(datapath)
self.packet_in_mask = packet_in_mask
self.port_status_mask = port_status_mask
self.flow_removed_mask = flow_removed_mask
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPGetAsyncReply, cls).parser(datapath, version,
msg_type, msg_len,
xid, buf)
(packet_in_mask_m, packet_in_mask_s,
port_status_mask_m, port_status_mask_s,
flow_removed_mask_m, flow_removed_mask_s) = struct.unpack_from(
ofproto.OFP_ASYNC_CONFIG_PACK_STR, msg.buf,
ofproto.OFP_HEADER_SIZE)
msg.packet_in_mask = [packet_in_mask_m, packet_in_mask_s]
msg.port_status_mask = [port_status_mask_m, port_status_mask_s]
msg.flow_removed_mask = [flow_removed_mask_m, flow_removed_mask_s]
return msg
@_set_msg_type(ofproto.OFPT_SET_ASYNC)
class OFPSetAsync(MsgBase):
"""
Set asynchronous configuration message
The controller sends this message to set the asynchronous messages that
it wants to receive on a given OpneFlow channel.
================== ====================================================
Attribute Description
================== ====================================================
packet_in_mask 2-element array: element 0, when the controller has a
OFPCR_ROLE_EQUAL or OFPCR_ROLE_MASTER role. element 1,
OFPCR_ROLE_SLAVE role controller.
Bitmasks of following values.
OFPR_NO_MATCH
OFPR_ACTION
OFPR_INVALID_TTL
port_status_mask 2-element array.
Bitmasks of following values.
OFPPR_ADD
OFPPR_DELETE
OFPPR_MODIFY
flow_removed_mask 2-element array.
Bitmasks of following values.
OFPRR_IDLE_TIMEOUT
OFPRR_HARD_TIMEOUT
OFPRR_DELETE
OFPRR_GROUP_DELETE
================== ====================================================
Example::
def send_set_async(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
packet_in_mask = ofp.OFPR_ACTION | ofp.OFPR_INVALID_TTL
port_status_mask = (ofp.OFPPR_ADD | ofp.OFPPR_DELETE |
ofp.OFPPR_MODIFY)
flow_removed_mask = (ofp.OFPRR_IDLE_TIMEOUT |
ofp.OFPRR_HARD_TIMEOUT |
ofp.OFPRR_DELETE)
req = ofp_parser.OFPSetAsync(datapath,
[packet_in_mask, 0],
[port_status_mask, 0],
[flow_removed_mask, 0])
datapath.send_msg(req)
"""
def __init__(self, datapath,
packet_in_mask, port_status_mask, flow_removed_mask):
super(OFPSetAsync, self).__init__(datapath)
self.packet_in_mask = packet_in_mask
self.port_status_mask = port_status_mask
self.flow_removed_mask = flow_removed_mask
def _serialize_body(self):
msg_pack_into(ofproto.OFP_ASYNC_CONFIG_PACK_STR, self.buf,
ofproto.OFP_HEADER_SIZE,
self.packet_in_mask[0], self.packet_in_mask[1],
self.port_status_mask[0], self.port_status_mask[1],
self.flow_removed_mask[0], self.flow_removed_mask[1])
| 36.296783
| 79
| 0.568498
|
b8d0ebf5afa5ac80f6d42e72c50180cf0816f36c
| 5,971
|
py
|
Python
|
tests/lastfm/commands/test_cmd_add.py
|
Starz0r/pytuber
|
5bb53edde6a39cedec48c4a8f41ba22db21d4727
|
[
"MIT"
] | 8
|
2019-01-27T00:52:20.000Z
|
2021-07-15T15:57:19.000Z
|
tests/lastfm/commands/test_cmd_add.py
|
Starz0r/pytuber
|
5bb53edde6a39cedec48c4a8f41ba22db21d4727
|
[
"MIT"
] | 22
|
2019-01-25T14:57:08.000Z
|
2021-12-13T19:55:04.000Z
|
tests/lastfm/commands/test_cmd_add.py
|
Starz0r/pytuber
|
5bb53edde6a39cedec48c4a8f41ba22db21d4727
|
[
"MIT"
] | 4
|
2019-02-17T09:56:30.000Z
|
2021-04-17T17:53:13.000Z
|
from unittest import mock
from pytuber import cli
from pytuber.core.models import PlaylistManager, Provider
from pytuber.lastfm.models import PlaylistType, UserPlaylistType
from pytuber.lastfm.params import (
ArtistParamType,
CountryParamType,
TagParamType,
UserParamType,
)
from tests.utils import CommandTestCase, PlaylistFixture
class CommandAddTests(CommandTestCase):
@mock.patch("pytuber.lastfm.commands.cmd_add.fetch_tracks")
@mock.patch.object(UserParamType, "convert")
@mock.patch.object(PlaylistManager, "set")
def test_user_playlist(self, create_playlist, convert, fetch_tracks):
convert.return_value = "bbb"
create_playlist.return_value = PlaylistFixture.one()
result = self.runner.invoke(
cli,
["add", "lastfm", "user-playlist"],
input="\n".join(("aaa", "2", "50", "My Favorite ")),
catch_exceptions=False,
)
expected_output = (
"Last.fm username: aaa",
"Playlist Types",
"[1] User Loved Tracks",
"[2] User Top Tracks",
"[3] User Recent Tracks",
"[4] User Friends Recent Tracks",
"Select a playlist type 1-4: 2",
"Maximum tracks [50]: 50",
"Title: My Favorite ",
"Added playlist: id_a!",
)
self.assertEqual(0, result.exit_code)
self.assertOutput(expected_output, result.output)
create_playlist.assert_called_once_with(
dict(
type=UserPlaylistType.USER_TOP_TRACKS,
provider=Provider.lastfm,
arguments=dict(limit=50, username="bbb"),
title="My Favorite",
)
)
fetch_tracks.assert_called_once_with("id_a")
@mock.patch("pytuber.lastfm.commands.cmd_add.fetch_tracks")
@mock.patch.object(PlaylistManager, "set")
def test_chart_playlist(self, create_playlist, fetch_tracks):
create_playlist.return_value = PlaylistFixture.one()
result = self.runner.invoke(
cli, ["add", "lastfm", "chart-playlist"], input="50\n "
)
expected_output = (
"Maximum tracks [50]: 50",
"Title: ",
"Added playlist: id_a!",
)
self.assertEqual(0, result.exit_code)
self.assertOutput(expected_output, result.output)
create_playlist.assert_called_once_with(
dict(
type=PlaylistType.CHART,
provider=Provider.lastfm,
arguments=dict(limit=50),
title="",
)
)
fetch_tracks.assert_called_once_with("id_a")
@mock.patch("pytuber.lastfm.commands.cmd_add.fetch_tracks")
@mock.patch.object(CountryParamType, "convert")
@mock.patch.object(PlaylistManager, "set")
def test_country_playlist(
self, create_playlist, country_param_type, fetch_tracks
):
country_param_type.return_value = "greece"
create_playlist.return_value = PlaylistFixture.one()
result = self.runner.invoke(
cli, ["add", "lastfm", "country-playlist"], input=b"gr\n50\n "
)
expected_output = (
"Country Code: gr",
"Maximum tracks [50]: 50",
"Title: ",
"Added playlist: id_a!",
)
self.assertEqual(0, result.exit_code)
self.assertOutput(expected_output, result.output)
create_playlist.assert_called_once_with(
dict(
type=PlaylistType.COUNTRY,
provider=Provider.lastfm,
arguments=dict(limit=50, country="greece"),
title="",
)
)
fetch_tracks.assert_called_once_with("id_a")
@mock.patch("pytuber.lastfm.commands.cmd_add.fetch_tracks")
@mock.patch.object(TagParamType, "convert")
@mock.patch.object(PlaylistManager, "set")
def test_tag_playlist(self, create_playlist, convert, fetch_tracks):
convert.return_value = "rock"
create_playlist.return_value = PlaylistFixture.one(synced=111)
result = self.runner.invoke(
cli, ["add", "lastfm", "tag-playlist"], input="rock\n50\n "
)
expected_output = (
"Tag: rock",
"Maximum tracks [50]: 50",
"Title: ",
"Updated playlist: id_a!",
)
self.assertEqual(0, result.exit_code)
self.assertOutput(expected_output, result.output)
create_playlist.assert_called_once_with(
dict(
type=PlaylistType.TAG,
provider=Provider.lastfm,
arguments=dict(limit=50, tag="rock"),
title="",
)
)
fetch_tracks.assert_called_once_with("id_a")
@mock.patch("pytuber.lastfm.commands.cmd_add.fetch_tracks")
@mock.patch.object(ArtistParamType, "convert")
@mock.patch.object(PlaylistManager, "set")
def test_artist_playlist(
self, create_playlist, artist_param, fetch_tracks
):
artist_param.return_value = "Queen"
create_playlist.return_value = PlaylistFixture.one()
result = self.runner.invoke(
cli,
["add", "lastfm", "artist-playlist"],
input="Queen\n50\nQueen....",
catch_exceptions=False,
)
expected_output = (
"Artist: Queen",
"Maximum tracks [50]: 50",
"Title: Queen....",
"Added playlist: id_a!",
)
self.assertEqual(0, result.exit_code)
self.assertOutput(expected_output, result.output)
create_playlist.assert_called_once_with(
dict(
type=PlaylistType.ARTIST,
provider=Provider.lastfm,
arguments=dict(limit=50, artist="Queen"),
title="Queen....",
)
)
fetch_tracks.assert_called_once_with("id_a")
| 35.331361
| 74
| 0.587674
|
fac20aba3508bc35ba4ef852e19adbaa0bf5d979
| 19,965
|
py
|
Python
|
lodbox/testy_mctestface.py
|
teessider/LODBox
|
702bb8309365146b0174e5fe044c683cf4f7679d
|
[
"MIT"
] | 6
|
2019-11-06T15:52:45.000Z
|
2022-01-24T09:09:26.000Z
|
lodbox/testy_mctestface.py
|
teessider/LODBox
|
702bb8309365146b0174e5fe044c683cf4f7679d
|
[
"MIT"
] | null | null | null |
lodbox/testy_mctestface.py
|
teessider/LODBox
|
702bb8309365146b0174e5fe044c683cf4f7679d
|
[
"MIT"
] | null | null | null |
from __future__ import print_function, absolute_import
import os
import fbx
import FbxCommon
import lodbox.fbx_io
import lodbox.scene
file_paths = {'Attributes': "Sphere_Attr.fbx",
'lodGroup': "Sphere_lodGroup.fbx",
'lodGroup_Max': "Sphere_lodGroup_Max.FBX",
'Group_lods': "Sphere_group_lods.fbx",
'MergeSceneTest01': "MergeSceneTest01.FBX",
'MergeSceneTest02': "MergeSceneTest02.FBX",
'MergeSceneTest03': "MergeSceneTest03.FBX",
'MergeSceneTest_Merged': "MergeSceneTest_Merged.FBX",
'test_merged_scenes': os.path.join(os.getcwd(), "test_merged_scenes.fbx")
} # Hardcoded for now.
# The full path gets injected into the DocumentUrl when exporting. If there is just a filename, then it gets exported into the current working directory.
# FbxCommon contains some helper functions to get rid of some of the boilerplate
manager, scene = FbxCommon.InitializeSdkObjects()
global_settings = scene.GetGlobalSettings() # type: fbx.FbxGlobalSettings
ImportFBX = lodbox.fbx_io.import_scene
# FbxCommon.LoadScene(manager, scene, file_paths['lodGroup_Max'])
FbxCommon.LoadScene(manager, scene, file_paths['lodGroup'])
# ImportFBX(manager, scene, file_paths['Group_lods'])
# ImportFBX(manager, scene, file_paths['MergeSceneTest01'])
# FbxCommon.LoadScene(manager, scene, file_paths['MergeSceneTest_Merged'])
root_node = scene.GetRootNode() # type: fbx.FbxNode
# 1st method of getting high-level scene nodes
# Using root_node.GetChildCount(True) (returns number of children with recursion)
# to get all scene nodes but returns None for grandchildren and lower
scene_nodes = [root_node.GetChild(i) for i in range(root_node.GetChildCount())]
print("Total number of nodes in the scene are: {0}\n"
"The root node is: {1}\nScene Units: {2}\n{3}: Z-UP".format(root_node.GetChildCount(True),
root_node.GetName(),
global_settings.GetSystemUnit().GetScaleFactorAsString(),
global_settings.GetOriginalUpAxis()))
for node in scene_nodes:
node_attr = node.GetNodeAttribute()
# # FbxNull > FbxLODGroup # #
# Necessary for making LOD Groups OUTSIDE of 3ds Max and Maya.
# It's not SOO bad in Maya but it is still a black box in terms of scripting.
if isinstance(node_attr, fbx.FbxNull):
# # FbxNull nodes are what 'groups' are in Maya.
# # 3ds Max can create these and can export them but doesn't convert to a native group on import?!
#
# # In order to turn a group into a LOD group, the LOD Group first needs to be created with all the trimmings
# lod_group_attr = fbx.FbxLODGroup.Create(manager, '') # type: fbx.FbxLODGroup
#
# lod_group_attr.WorldSpace.Set(False)
# lod_group_attr.MinMaxDistance.Set(False)
# lod_group_attr.MinDistance.Set(0.0)
# lod_group_attr.MaxDistance.Set(0.0)
#
# child_num = node.GetChildCount()
#
# for x in range(child_num):
# child = node.GetChild(x)
# print(child.GetName())
#
# # # CUSTOM ATTRIBUTE REMOVING # #
# # Because of a MAXScript error on import
# # Custom Attributes should be removed if part of a LOD Group?! (they are still there when the error pops up just not in UI)
# # UPDATE: IT WORKS :D (ONly now no Custom Attributes :( But see lower implementation for full explanation and possible ideas)
# child_properties = []
# child_prop = child.GetFirstProperty() # type: fbx.FbxProperty
# while child_prop.IsValid():
# if child_prop.GetFlag(fbx.FbxPropertyFlags.eUserDefined):
# child_properties.append(child_prop)
# child_prop = child.GetNextProperty(child_prop)
# for prop in child_properties:
# prop.DisconnectAllSrcObject()
# prop.Destroy()
# # # END OF CUSTOM ATTRIBUTE REMOVING # #
#
# # Add some thresholds!
# # LOD Groups produced from Max/Maya do not create thresholds for all the children.
# # They do not make one for the last LOD - not exactly sure why but i have replicated that here with great success!
# # Just use some random values for testing. Doesn't matter with UE4 at least.
# # It won't matter either with Max/Maya as I will add/remove the LOD Group attribute on export/import
# if x == (child_num - 1):
# continue
# elif x == 0:
# threshold = fbx.FbxDistance((x + 1) * 12.0, '')
# else:
# threshold = fbx.FbxDistance(x * 20, '')
#
# lod_group_attr.AddThreshold(threshold)
#
# lod_group_attr.SetDisplayLevel(x, 0) # UseLOD DisplayLevel - Default in Maya :)
#
# print(lod_group_attr.GetThreshold(x), lod_group_attr.GetDisplayLevel(x))
#
# node.SetNodeAttribute(lod_group_attr) # This is VIP!!! Don't forget about this again! xD
lodbox.scene.create_lod_group_attribute(manager, node)
lodbox.fbx_io.export_fbx(manager, scene, lodbox.fbx_io.FBX_VERSION['2014'], "test_lod_group", lodbox.fbx_io.FBX_FORMAT['Binary'])
# # FbxLODGroup > FbxNull # #
# "Extracting" normal meshes out of LOD Groups so don't have to deal with that in 3ds Max/Maya (at least 1st steps)
elif isinstance(node_attr, fbx.FbxLODGroup):
# # Need to parent the old LOD group children to a new empty 'group' node
# # (A node with NULL properties)
# # Make sure it's destroyed as it's not needed anymore ;)
# # Get the children in the group first
# lod_group_nodes = lodbox.scene.get_children(node)
#
# # But 1st - those attributes need to be cleaned up! (for testing purposes)
# # group_props = [] # for seeing all custom properties on all objects
# for group_node in lod_group_nodes:
# print(group_node.GetName())
# # count = 0
#
# # Because of the C++ nature of SDK (and these bindings), a normal for loop is not possible for collecting properties
# # A collection must be made with a while loop
# properties = []
# group_node_prop = group_node.GetFirstProperty() # type: fbx.FbxProperty
# while group_node_prop.IsValid():
# # count += 1
# # Only the User-defined Properties are wanted (defined by user and not by SDK)
# # These are Custom Attributes from Maya (and 3ds Max)
# # AND User-defined Properties from 3ds Max (mmm perhaps something to convert to/from Custom Attributes on export/import?)
# if group_node_prop.GetFlag(fbx.FbxPropertyFlags.eUserDefined):
# properties.append(group_node_prop)
# # group_props.append(properties) # for seeing all custom properties on all objects
# group_node_prop = group_node.GetNextProperty(group_node_prop)
#
# for custom_prop in properties:
# data = custom_prop.GetPropertyDataType() # type: fbx.FbxDataType
#
# if data.GetType() == fbx.eFbxString:
# custom_prop = fbx.FbxPropertyString(custom_prop)
#
# # This is not needed when importing into 3ds Max as it is passed to the UV Channel directly (See Channel Info.. Window).
# # Not sure about Maya but when re-imported it still works without it (when removed after)? - Needs testing with multiple UV channels
# if custom_prop.GetName() == 'currentUVSet':
# # Destroying the property while connected seems to fuck up the rest of the properties so be sure to disconnect it first!
# custom_prop.DisconnectAllSrcObject()
# custom_prop.Destroy()
#
# # This comes from Maya UV set names being injected into the User-Defined Properties in 3ds Max (NOT Custom Attributes) thus creating crap data.
# # Unless cleaned up/converted on import/export or utilised in a meaningful way, this can be removed.
# # Further testing needs to be done with this. (Relates to Custom Attributes and User-Defined Properties earlier talk)
# elif custom_prop.GetName() == 'UDP3DSMAX':
# custom_prop.DisconnectAllSrcObject()
# custom_prop.Destroy()
#
# else:
# print("{}\n type: {}\n\tValue: {}".format(custom_prop.GetName(), data.GetName(), custom_prop.Get()))
#
# elif data.GetType() == fbx.eFbxInt:
# custom_prop = fbx.FbxPropertyInteger1(custom_prop)
#
# # This comes from 3ds Max as well - Not sure where this comes from xD
# # Doesn't seem to have any effect though??
# if custom_prop.GetName() == 'MaxHandle':
# custom_prop.DisconnectAllSrcObject()
# custom_prop.Destroy()
#
# elif custom_prop.HasMinLimit() and custom_prop.HasMaxLimit():
# print("{}\n type: {}\n\tValue: {}\n\tMinLimit: {}\n\tMaxLimit: {}".format(custom_prop.GetName(), data.GetName(),
# custom_prop.Get(), custom_prop.GetMinLimit(),
# custom_prop.GetMaxLimit()))
# else:
# print("{}\n type: {}\n\tValue: {}".format(custom_prop.GetName(), data.GetName(), custom_prop.Get()))
#
# elif data.GetType() == fbx.eFbxBool:
# custom_prop = fbx.FbxPropertyBool1(custom_prop)
# print("{}\n type: {}\n\tValue: {}".format(custom_prop.GetName(), data.GetName(), custom_prop.Get()))
#
# elif data.GetType() == fbx.eFbxDouble: # Number type - Similar to float but instead of 32-bit data type, 64-bit data type.
# custom_prop = fbx.FbxPropertyDouble1(custom_prop)
# if custom_prop.HasMinLimit() and custom_prop.HasMaxLimit():
# print("{}\n type: {}\n\tValue: {}\n\tMinLimit: {}\n\tMaxLimit: {}".format(custom_prop.GetName(), data.GetName(),
# custom_prop.Get(), custom_prop.GetMinLimit(),
# custom_prop.GetMaxLimit()))
# else:
# print("\tValue: {}".format(custom_prop.Get()))
#
# # After All of this, ONLY our Custom Attributes should be left (and NOT any weird 3ds Max stuff xD )
# # Now to finally remove all of them (ONLY FOR TESTING PURPOSES)
# custom_prop.DisconnectAllSrcObject()
# custom_prop.Destroy()
#
# # Now that we have done what wanted to do, it is time to destroy the LOD Group node (the children are safely somewhere else)
# node.DisconnectAllSrcObject()
# node.Destroy()
#
# new_group = fbx.FbxNode.Create(manager, 'group')
# for lod_grp_node in lod_group_nodes:
# new_group.AddChild(lod_grp_node)
#
# root_node.AddChild(new_group) # Make sure it's in the scene!
node = lodbox.scene.convert_node_to_null(manager, node)
node_attr = node.GetNodeAttribute()
# TODO: Now that lod group attr > null is done, extracting meshes as individual files could be done
# (maybe with selection somehow say - only want to change LOD3 and then recombine)
# lodbox.fbx_io.export_fbx(manager, scene, version=lodbox.fbx_io.FBX_VERSION['2014'], filename="test_no_lod_group", file_format=lodbox.fbx_io.FBX_FORMAT['Binary'])
# # EXTRACTING MESHES
lod_group_children = lodbox.scene.get_children(node)
if lod_group_children:
for child in lod_group_children: # type: fbx.FbxNode
# TODO: MAKE A NEW TEMP SCENE FOR EXPORTING EACH THING (OR REUSE EXISTING ONE)
# AS USING SCENE JUST EXPORTED THE SCENE ALL THE TIME :D
lodbox.fbx_io.export_fbx(manager, scene, version=lodbox.fbx_io.FBX_VERSION['2014'], filename=child.GetName(), file_format=lodbox.fbx_io.FBX_FORMAT['Binary'])
else:
raise IndexError
manager.Destroy()
# # Merging Scenes Test # #
# Starting with MergeTestScene01
elif node.GetName() == "Sphere001":
reference_scene = lodbox.scene.merge_scenes(manager, scene, (file_paths['MergeSceneTest02'], file_paths['MergeSceneTest03']))
# # Create a new scene to hold the already imported scene (probably can just the original normally but this is useful for testing ;) )
# reference_scene = fbx.FbxScene.Create(manager, "ReferenceScene")
# # Start moving stuff to new scene (already have the scene nodes in list from above)
# ref_scene_root = reference_scene.GetRootNode() # type: fbx.FbxNode
#
# # Since the default Axis System is Y-Up and because these are brand new settings (its made with a scene along with FbxAnimEvaluator and a Root Node),
# # the axis needs to be set to the same as the original imported scene!
# orig_axis_sys = fbx.FbxAxisSystem(global_settings.GetAxisSystem())
# orig_axis_sys.ConvertScene(reference_scene)
#
# # Because this is a test, the original scene_nodes list is used, otherwise this would be the
# # MergeTestScene01 nodes.
# for x in range(len(scene_nodes)):
# child = scene_nodes[x]
# ref_scene_root.AddChild(child)
# # Although the original Sphere001 is attached to new Reference Scene root node, it is still connected to the old one
# # so the connections need to be removed. And because there could be lots of children, its better to disconnect the root node from the children.
# root_node.DisconnectAllSrcObject()
# print(fbx_obj.GetName(),
# type(fbx_obj), issubclass(type(fbx_obj), (fbx.FbxGlobalSettings, fbx.FbxAnimEvaluator, fbx.FbxAnimStack, fbx.FbxAnimLayer)),
# issubclass(type(fbx_obj), type(source_scene_root)), isinstance(fbx_obj, type(source_scene_root))
# )
#
# # Because the scene Object also has connections to other types of FBX objects, they need to be moved too.
# # (I'm guessing) Also since there is only a single mesh in the FBX, the scene has connections to that too.
# for x in range(scene.GetSrcObjectCount()):
# fbx_obj = scene.GetSrcObject(x) # type: fbx.FbxObject
# print(type(fbx_obj), fbx_obj.ClassId)
# # Don't want to move the root node, the global settings or the Animation Evaluator (at this point)
# # Can use type(fbx_obj), fbx_obj.GetClassId() or fbx_obj.ClassId to type check
# if fbx_obj == root_node or \
# fbx_obj.ClassId == fbx.FbxGlobalSettings.ClassId or \
# type(fbx_obj) == fbx.FbxAnimEvaluator or \
# fbx_obj.ClassId == fbx.FbxAnimStack.ClassId or \
# fbx_obj.ClassId == fbx.FbxAnimLayer.ClassId:
# continue
# else:
# fbx_obj.ConnectDstObject(reference_scene)
#
# # Now the scene can be disconnected as everything has been moved!
# scene.DisconnectAllSrcObject()
#
# print("merged stuff starts from here")
# # Now that the first scene has been moved from the original and disconnected, time to start
# # merging MergeTestScene02.
# # It seems a new scene HAS to be created for each scene (perhaps revisit this at some point?)
# # So start off with creating/loading scene to merge in
# # EDIT: NOPE - I WAS JUST ALWAYS EXPORTING THE ORIGINAL SCENE - The original scene can be used! :D
# FbxCommon.LoadScene(manager, scene, file_paths['MergeSceneTest02'])
# scene_nodes = [root_node.GetChild(i) for i in range(root_node.GetChildCount())]
#
# # Repeat adding the new scene nodes to the reference scene and disconnecting to old one
# for x in range(len(scene_nodes)):
# child = scene_nodes[x]
# ref_scene_root.AddChild(child)
# root_node.DisconnectAllSrcObject()
#
# # # Move other types of scene objects again
# for x in range(scene.GetSrcObjectCount()):
# fbx_obj = scene.GetSrcObject(x) # type: fbx.FbxObject
# # Don't want to move the root node, the global settings or the Animation Evaluator (at this point)
# if fbx_obj == root_node or fbx_obj.GetClassId() == fbx.FbxGlobalSettings.ClassId or type(
# fbx_obj) == fbx.FbxAnimEvaluator or fbx_obj.ClassId == fbx.FbxAnimStack.ClassId or fbx_obj.ClassId == fbx.FbxAnimLayer.ClassId:
# continue
# else:
# fbx_obj.ConnectDstObject(reference_scene)
# scene.DisconnectAllSrcObject() # DON'T FORGET TO DISCONNECT THE ORIGINAL SCENE FROM THE MOVED OBJECTS!
#
# # ## 2nd MERGE STUFF
# FbxCommon.LoadScene(manager, scene, file_paths['MergeSceneTest03'])
# scene_nodes = [root_node.GetChild(i) for i in range(root_node.GetChildCount())]
# for x in range(len(scene_nodes)):
# child = scene_nodes[x]
# ref_scene_root.AddChild(child)
# root_node.DisconnectAllSrcObject()
#
# # Move other types of scene objects again
# for x in range(scene.GetSrcObjectCount()):
# fbx_obj = scene.GetSrcObject(x) # type: fbx.FbxObject
# # Don't want to move the root node, the global settings or the Animation Evaluator (at this point)
# if fbx_obj == root_node or \
# fbx_obj.GetClassId() == fbx.FbxGlobalSettings.ClassId or \
# type(fbx_obj) == fbx.FbxAnimEvaluator or \
# fbx_obj.ClassId == fbx.FbxAnimStack.ClassId or \
# fbx_obj.ClassId == fbx.FbxAnimLayer.ClassId:
# continue
# else:
# print(fbx_obj.GetClassId().GetName())
# fbx_obj.ConnectDstObject(reference_scene)
# scene.DisconnectAllSrcObject() # DON'T FORGET TO DISCONNECT THE ORIGINAL SCENE FROM THE MOVED OBJECTS!
# ## FBX EXPORT ##
# Okay so it works! BUT it seems to be almost double the file size than if I would have exported them from 3ds Max (or Maya)?!
# EDIT: I found the cause :D when comparing the files as ASCII, the FBX version has Tangents and Binormals so that is the extra data :)
# Normally, I don't export these so hence my confusion! I wonder if they can be excluded....?
lodbox.fbx_io.export_fbx(manager, reference_scene, lodbox.fbx_io.FBX_VERSION['2014'], file_paths['test_merged_scenes'], lodbox.fbx_io.FBX_FORMAT['ASCII'])
for x in range(reference_scene.GetSrcObjectCount()):
print(reference_scene.GetSrcObject(x), reference_scene.GetSrcObject(x).GetName())
manager.Destroy()
else:
print(node.GetName(), type(node))
| 60.135542
| 173
| 0.607864
|
875032a11297392038a2e01daca95d99ac6f187c
| 1,738
|
py
|
Python
|
apps/utils/format/obj_format.py
|
zhoujun/sometitle
|
65438cd4cbc528251aefa0a020d93516ccb8ee21
|
[
"BSD-2-Clause"
] | null | null | null |
apps/utils/format/obj_format.py
|
zhoujun/sometitle
|
65438cd4cbc528251aefa0a020d93516ccb8ee21
|
[
"BSD-2-Clause"
] | null | null | null |
apps/utils/format/obj_format.py
|
zhoujun/sometitle
|
65438cd4cbc528251aefa0a020d93516ccb8ee21
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
obj_format.py
~~~~~~~~~~~~~~~~~~~
:author: Finger
:license: BSD, see LICENSE for more details.
"""
import json
import sys
import regex as re
from pymongo.cursor import Cursor
def obj_id_to_str(data, fields=["_id"]):
if isinstance(data, (list, Cursor)):
_data = []
for d in data:
for field in fields:
d[field] = str(d[field])
_data.append(d)
return _data
else:
data_keys = data.keys()
for field in fields:
if field in data_keys:
data[field] = str(data[field])
return data
def json_to_py_seq(json_):
if json_ in [None, "None"]:
return None
elif not isinstance(json_, (list, dict, tuple)) and json_ != "":
if isinstance(json_, (str, bytes)) and json_[0] not in ["{", "["]:
return json_
try:
json_ = json.loads(json_)
except:
json_ = eval(json_)
else:
if isinstance(json_, str):
json_ = eval(json_)
return json_
def str_to_num(str_):
try:
return int(str_)
except:
if str_:
return 1
elif not str_ or str_.lower() == "false":
return 0
class ConfigToClass(object):
def __init__(self, config, key=None):
if not isinstance(config, dict):
print("[Error] config must be a dictionary")
sys.exit(-1)
if key == "value":
for k, v in config.items():
if not re.search(r"^__.*__$", k):
self.__dict__[k] = v["value"]
else:
for k, v in config.items():
self.__dict__[k] = v
| 23.808219
| 74
| 0.506329
|
787caa28b87ef6d4c0d8dd6a59cbaade0de8b6ff
| 762
|
py
|
Python
|
camera.py
|
mcnichol/chicky-cam
|
18a5cc6b7eaff6ff9a39ea55f0634124478f3bcc
|
[
"MIT"
] | null | null | null |
camera.py
|
mcnichol/chicky-cam
|
18a5cc6b7eaff6ff9a39ea55f0634124478f3bcc
|
[
"MIT"
] | null | null | null |
camera.py
|
mcnichol/chicky-cam
|
18a5cc6b7eaff6ff9a39ea55f0634124478f3bcc
|
[
"MIT"
] | null | null | null |
#Modified by smartbuilds.io
#Date: 27.09.20
#Desc: This scrtipt script..
import cv2
from imutils.video.pivideostream import PiVideoStream
import imutils
import time
#import numpy as np
class VideoCamera(object):
def __init__(self, flip = False):
print("Starting Stream...")
self.vs = PiVideoStream().start()
print("Started")
self.flip = flip
time.sleep(2.0)
def __del__(self):
self.vs.stop()
# def flip_if_needed(self, frame):
# if self.flip:
# return np.flip(frame, 0)
# return frame
def get_frame(self):
# frame = self.flip_if_needed(self.vs.read())
frame = self.vs.read()
ret, jpeg = cv2.imencode('.jpg', frame)
return jpeg.tobytes()
| 23.090909
| 53
| 0.615486
|
498c313f00ec287f5347ee9daf504353d3c3e04a
| 46
|
py
|
Python
|
tests/__init__.py
|
trewjames/receipt-tracker
|
1f1768741f8599252f9bb153f111b245bdc995d4
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
trewjames/receipt-tracker
|
1f1768741f8599252f9bb153f111b245bdc995d4
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
trewjames/receipt-tracker
|
1f1768741f8599252f9bb153f111b245bdc995d4
|
[
"MIT"
] | null | null | null |
"""Unit test package for receipt_tracker."""
| 23
| 45
| 0.717391
|
694ef8be13edab2bc4007422c56c2ce9c126a393
| 3,806
|
py
|
Python
|
moxing_metric.py
|
baobrian/deep-speaker
|
850f160375c4e9d1c95a73c722f8c068aa4bdb6e
|
[
"Apache-2.0"
] | null | null | null |
moxing_metric.py
|
baobrian/deep-speaker
|
850f160375c4e9d1c95a73c722f8c068aa4bdb6e
|
[
"Apache-2.0"
] | null | null | null |
moxing_metric.py
|
baobrian/deep-speaker
|
850f160375c4e9d1c95a73c722f8c068aa4bdb6e
|
[
"Apache-2.0"
] | null | null | null |
import logging
import os
import shutil
import time
from argparse import ArgumentParser
import random
import numpy as np
from audio_reader import AudioReader
from constants import c
from utils import InputsGenerator
def arg_parse():
arg_p = ArgumentParser()
arg_p.add_argument('--audio_dir', required=True)
arg_p.add_argument('--cache_output_dir', required=True)
arg_p.add_argument('--regenerate_full_cache', action='store_true')
arg_p.add_argument('--update_cache', action='store_true')
arg_p.add_argument('--generate_training_inputs', action='store_true')
arg_p.add_argument('--multi_threading', action='store_true')
arg_p.add_argument('--unseen_speakers') # p225,p226 example.
arg_p.add_argument('--get_embeddings') # p225 example.
return arg_p
def regenerate_full_cache(audio_reader, args):
cache_output_dir = os.path.expanduser(args.cache_output_dir)
print('The directory containing the cache is {}.'.format(cache_output_dir))
print('Going to wipe out and regenerate the cache in 5 seconds. Ctrl+C to kill this script.')
time.sleep(5)
try:
shutil.rmtree(cache_output_dir)
except:
pass
os.makedirs(cache_output_dir)
audio_reader.build_cache()
def generate_cache_from_training_inputs(audio_reader, args):
cache_dir = os.path.expanduser(args.cache_output_dir)
inputs_generator = InputsGenerator(cache_dir=cache_dir,
audio_reader=audio_reader,
max_count_per_class=1000,
speakers_sub_list=None,
multi_threading=args.multi_threading)
inputs_generator.start_generation()
def main():
args = arg_parse().parse_args()
audio_reader = AudioReader(input_audio_dir=args.audio_dir,
output_cache_dir=args.cache_output_dir,
sample_rate=c.AUDIO.SAMPLE_RATE,
multi_threading=args.multi_threading)
# if args.regenerate_full_cache:
# regenerate_full_cache(audio_reader, args)
# exit(1)
#
# if args.update_cache:
# audio_reader.build_cache()
# exit(1)
#
# if args.generate_training_inputs:
# generate_cache_from_training_inputs(audio_reader, args)
# exit(1)
if args.unseen_speakers is not None:
unseen_speakers = [x.strip() for x in args.unseen_speakers.split(',')]
from unseen_speakers import inference_unseen_speakers
list=c.AUDIO.SPEAKERS_TRAINING_SET
scores_vector=np.zeros((len(list)+1,1))
target_label_vector=np.zeros((len(list)+1,1))
for index,compont in enumerate(list):
# compare_speaker=random.choice(speakers)
score=inference_unseen_speakers(audio_reader, unseen_speakers[0],compont)
scores_vector[index]=score
if unseen_speakers[0]==compont:
target_label_vector[index]=1
else:
target_label_vector[index]=0
score = inference_unseen_speakers(audio_reader, unseen_speakers[0], unseen_speakers[1])
scores_vector[index+1]=score
target_label_vector[index+1]=1
np.save('./roc_data/scores_vector.npy',scores_vector)
np.save('./roc_data/target_label_vector.npy',target_label_vector)
print('roc data finished')
exit(1)
# if args.get_embeddings is not None:
# speaker_id = args.get_embeddings.strip()
# from unseen_speakers import inference_embeddings
# inference_embeddings(audio_reader, speaker_id)
# exit(1)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
main()
| 37.313725
| 97
| 0.665791
|
36f9e24de357a03ead0250507ab5512baefcf46a
| 5,529
|
py
|
Python
|
python/oneflow/test/modules/test_clamp.py
|
LiPengze97/oneflow
|
1c1d2d3faa1c02d20e009046a290cf1095ee12e0
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/test/modules/test_clamp.py
|
LiPengze97/oneflow
|
1c1d2d3faa1c02d20e009046a290cf1095ee12e0
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/test/modules/test_clamp.py
|
LiPengze97/oneflow
|
1c1d2d3faa1c02d20e009046a290cf1095ee12e0
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from test_util import GenArgList
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
def _test_clamp(test_case, shape, device):
input = flow.tensor(
np.random.randn(*shape), dtype=flow.float32, device=flow.device(device)
)
of_out = flow.clamp(input, 0.1, 0.5)
np_out = np.clip(input.numpy(), 0.1, 0.5)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
def _test_tensor_clamp(test_case, shape, device):
input = flow.tensor(
np.random.randn(*shape), dtype=flow.float32, device=flow.device(device)
)
of_out = input.clamp(0.1, 0.5)
np_out = np.clip(input.numpy(), 0.1, 0.5)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
def _test_clamp_scalar_min(test_case, shape, device):
input = flow.tensor(
np.random.randn(*shape), dtype=flow.float32, device=flow.device(device)
)
of_out = flow.clamp(input, 0.1, None)
np_out = np.clip(input.numpy(), 0.1, None)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
def _test_clamp_scalar_max(test_case, shape, device):
input = flow.tensor(
np.random.randn(*shape), dtype=flow.float32, device=flow.device(device)
)
of_out = flow.clamp(input, None, 0.5)
np_out = np.clip(input.numpy(), None, 0.5)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
def _test_clamp_integral(test_case, shape, device):
input = flow.tensor(np.random.randint(3, 10, shape), device=flow.device(device))
of_out = flow.clamp(input, 1, 5)
np_out = np.clip(input.numpy(), 1, 5)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
def _numpy_clamp_grad(arr, min, max):
grad = np.zeros_like(arr)
grad[arr.clip(min, max) == arr] += 1
return grad
def _test_clamp_backward(test_case, shape, device):
x = flow.tensor(
np.random.randn(*shape),
dtype=flow.float32,
device=flow.device(device),
requires_grad=True,
)
y = flow.clamp(x, 0.1, 0.5).sum()
y.backward()
test_case.assertTrue(
np.allclose(
x.grad.numpy(), _numpy_clamp_grad(x.numpy(), 0.1, 0.5), 1e-05, 1e-05
)
)
@flow.unittest.skip_unless_1n1d()
class TestClampModule(flow.unittest.TestCase):
def test_clamp(test_case):
arg_dict = OrderedDict()
arg_dict["fun"] = [
_test_clamp,
_test_tensor_clamp,
_test_clamp_scalar_min,
_test_clamp_scalar_max,
_test_clamp_integral,
_test_clamp_backward,
]
arg_dict["shape"] = [(2,), (2, 3), (2, 4, 5, 6)]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest(check_graph=False)
def test_clamp_flow_with_random_data(test_case):
device = random_device()
input = random_pytorch_tensor().to(device)
y = torch.clamp(input, min=random().to(float), max=random().to(float))
return y
@autotest(check_graph=False)
def test_clamp_min_none_flow_with_random_data(test_case):
device = random_device()
input = random_pytorch_tensor().to(device)
y = torch.clamp(input, min=random().to(float), max=random().to(float))
return y
@autotest(check_graph=False)
def test_clamp_max_none_flow_with_random_data(test_case):
device = random_device()
input = random_pytorch_tensor().to(device)
y = torch.clamp(
input, min=random().to(float), max=random().to(float) | nothing()
)
return y
@autotest(check_graph=False)
def test_clip_flow_with_random_data(test_case):
device = random_device()
input = random_pytorch_tensor().to(device)
y = torch.clip(input, min=random().to(float), max=random().to(float))
return y
@autotest(check_graph=False)
def test_clip_min_none_flow_with_random_data(test_case):
device = random_device()
input = random_pytorch_tensor().to(device)
y = torch.clip(input, min=random().to(float), max=random().to(float))
return y
@autotest(check_graph=False)
def test_clip_max_none_flow_with_random_data(test_case):
device = random_device()
input = random_pytorch_tensor().to(device)
y = torch.clip(
input, min=random().to(float), max=random().to(float) | nothing()
)
return y
@autotest(auto_backward=False, check_graph=False)
def test_clamp_with_0shape_data(test_case):
device = random_device()
x = random_pytorch_tensor(4, 2, 1, 0, 3).to(device)
y = torch.clamp(x, min=random().to(float), max=random().to(float))
return y
if __name__ == "__main__":
unittest.main()
| 33.107784
| 84
| 0.660336
|
78ed39f180fe4843acc0fad555f445369065ef27
| 2,426
|
py
|
Python
|
OnlineCourseRegistration/users/urls.py
|
rusheel98/Online-Course-Registration
|
3cdb26be29ce7f409d6d1753d2b8c15fed27f43a
|
[
"MIT"
] | 1
|
2020-09-28T13:08:30.000Z
|
2020-09-28T13:08:30.000Z
|
OnlineCourseRegistration/users/urls.py
|
rusheel98/Online-Course-Registration
|
3cdb26be29ce7f409d6d1753d2b8c15fed27f43a
|
[
"MIT"
] | null | null | null |
OnlineCourseRegistration/users/urls.py
|
rusheel98/Online-Course-Registration
|
3cdb26be29ce7f409d6d1753d2b8c15fed27f43a
|
[
"MIT"
] | null | null | null |
from django.urls import path,include
from django.conf.urls import url
from django.views.generic.base import TemplateView,RedirectView
from . import views
from .views import CourseListView,StudentCourseListView,RegCourseListView,Login,SignUp
#
app_name = 'users'
urlpatterns = [
path('signup/', views.SignUp.as_view(), name='signup'),
path('login/', Login.as_view(), name='in'),
path('login.html/', Login.as_view(), name='in'),
path('profile.html/', TemplateView.as_view(template_name='users/profile.html'), name='profile'),
path('add_sprofile/', views.add_sprofile, name='sprofile'),
path('', views.index, name='index'),
url('^callback/(?P<token>.+)$', views.callback, name='callback'),
path('<int:course_id>/', views.details, name='details'),
path('add_course/', views.add_course, name='add_course'),
path('add_student/', views.add_student, name='add_student'),
path('students/', views.display_students, name='display_students'),
path('audit_course/', views.audit_course, name='audit_course'),
path('<int:course_id>/add_course_details/', views.add_course_details, name='add_course_details'),
path('add_grade/',views.add_grade,name='add_grade'),
path('publish_course_registration/',views.publish_course_registrations, name='publish_course_registrations'),
path('view_registration/',views.view_registration, name='view_registration'),
path('faculty/', views.faculty, name='faculty'),
path('register.html/', CourseListView.as_view(),name='RegCourseList'),
path('<int:course_id>/special_req/', views.special_req, name='special_req'),
path('<int:course_id>/add_course_details/', views.add_course_details, name='add_course_details'),
path('(P<course_id>\d+)/(P<val>\d+)/', views.CourseListView.coursedetails, name='coursedetails'),
path('coursedetails.html/',TemplateView.as_view(template_name='users/coursedetails.html'),name="coursevals"),
path('approve_req/', views.approve_req, name='approve_req'),
path('approve_req/<int:request_id>/special_req_res_acc/', views.special_req_res_acc, name='special_req_res_acc'),
path('approve_req/<int:request_id>/special_req_res_dec/', views.special_req_res_dec, name='special_req_res_dec'),
#path('studenthome.html/',TemplateView.as_view(template_name='users/studentehome.html'),name="studenthome"),
path('studenthome.html/',StudentCourseListView.as_view(),name='MyCourseList'),
path('courselist.html/',RegCourseListView.as_view(),name='regcourselist'),
]
| 57.761905
| 114
| 0.760923
|
0eedcea6542b62281c1ba220383c46cc2735703f
| 5,515
|
py
|
Python
|
lib/pysot/datasets/video.py
|
Existever/PyCFTrackers
|
3221e47aecca40de21ad9be875b2f8d960b4e09c
|
[
"MIT"
] | 231
|
2019-04-01T08:04:40.000Z
|
2020-02-19T10:16:12.000Z
|
lib/pysot/datasets/video.py
|
Existever/PyCFTrackers
|
3221e47aecca40de21ad9be875b2f8d960b4e09c
|
[
"MIT"
] | 18
|
2020-04-17T03:52:02.000Z
|
2021-10-15T13:36:46.000Z
|
lib/pysot/datasets/video.py
|
Existever/PyCFTrackers
|
3221e47aecca40de21ad9be875b2f8d960b4e09c
|
[
"MIT"
] | 63
|
2020-02-24T15:21:12.000Z
|
2022-03-26T21:44:40.000Z
|
import os
import cv2
import re
import numpy as np
import json
from glob import glob
class Video(object):
def __init__(self, name, root, video_dir, init_rect, img_names,
gt_rect, attr, load_img=False):
self.name = name
self.video_dir = video_dir
self.init_rect = init_rect
self.gt_traj = gt_rect
self.attr = attr
self.pred_trajs = {}
self.img_names = [os.path.join(root, x) for x in img_names]
self.imgs = None
if load_img:
self.imgs = [cv2.imread(img_name)
for img_name in self.img_names]
self.width = self.imgs[0].shape[1]
self.height = self.imgs[0].shape[0]
else:
img = cv2.imread(self.img_names[0])
assert img is not None, self.img_names[0]
self.width = img.shape[1]
self.height = img.shape[0]
def load_tracker(self, path, tracker_names=None, store=True):
"""
Args:
path(str): path to result
tracker_name(list): name of tracker
"""
if not tracker_names:
tracker_names = [x.split('/')[-1] for x in glob(path)
if os.path.isdir(x)]
if isinstance(tracker_names, str):
tracker_names = [tracker_names]
for name in tracker_names:
traj_file = os.path.join(path, name, self.name+'.txt')
if os.path.exists(traj_file):
with open(traj_file, 'r') as f :
pred_traj = [list(map(float, x.strip().split(',')))
for x in f.readlines()]
if len(pred_traj) != len(self.gt_traj):
print(name, len(pred_traj), len(self.gt_traj), self.name)
if store:
self.pred_trajs[name] = pred_traj
else:
return pred_traj
else:
print(traj_file)
self.tracker_names = list(self.pred_trajs.keys())
def load_img(self):
if self.imgs is None:
self.imgs = [cv2.imread(x)
for x in self.img_names]
self.width = self.imgs[0].shape[1]
self.height = self.imgs[0].shape[0]
def free_img(self):
self.imgs = None
def __len__(self):
return len(self.img_names)
def __getitem__(self, idx):
if self.imgs is None:
return cv2.imread(self.img_names[idx]), \
self.gt_traj[idx]
else:
return self.imgs[idx], self.gt_traj[idx]
def __iter__(self):
for i in range(len(self.img_names)):
if self.imgs is not None:
yield self.imgs[i], self.gt_traj[i]
else:
yield cv2.imread(self.img_names[i]), \
self.gt_traj[i]
def draw_box(self, roi, img, linewidth, color, name=None):
"""
roi: rectangle or polygon
img: numpy array img
linewith: line width of the bbox
"""
if len(roi) > 6 and len(roi) % 2 == 0:
pts = np.array(roi, np.int32).reshape(-1, 1, 2)
color = tuple(map(int, color))
img = cv2.polylines(img, [pts], True, color, linewidth)
pt = (pts[0, 0, 0], pts[0, 0, 1]-5)
if name:
img = cv2.putText(img, name, pt, cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, color, 1)
elif len(roi) == 4:
if not np.isnan(roi[0]):
roi = list(map(int, roi))
color = tuple(map(int, color))
img = cv2.rectangle(img, (roi[0], roi[1]), (roi[0]+roi[2], roi[1]+roi[3]),
color, linewidth)
if name:
img = cv2.putText(img, name, (roi[0], roi[1]-5), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, color, 1)
return img
def show(self, pred_trajs={}, linewidth=2, show_name=False):
"""
pred_trajs: dict of pred_traj, {'tracker_name': list of traj}
pred_traj should contain polygon or rectangle(x, y, width, height)
linewith: line width of the bbox
"""
assert self.imgs is not None
video = []
cv2.namedWindow(self.name, cv2.WINDOW_NORMAL)
colors = {}
if len(pred_trajs) == 0 and len(self.pred_trajs) > 0:
pred_trajs = self.pred_trajs
for i, (roi, img) in enumerate(zip(self.gt_traj,
self.imgs[self.start_frame:self.end_frame+1])):
img = img.copy()
if len(img.shape) == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
else:
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
img = self.draw_box(roi, img, linewidth, (0, 255, 0),
'gt' if show_name else None)
for name, trajs in pred_trajs.items():
if name not in colors:
color = tuple(np.random.randint(0, 256, 3))
colors[name] = color
else:
color = colors[name]
img = self.draw_box(traj[0][i], img, linewidth, color,
name if show_name else None)
cv2.putText(img, str(i+self.start_frame), (5, 20),
cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 255, 0), 2)
cv2.imshow(self.name, img)
cv2.waitKey(40)
video.append(img.copy())
return video
| 37.517007
| 113
| 0.513327
|
c6c84cdb015f3ac2e3d3c7cd1768a8d5dadfcbf6
| 441
|
py
|
Python
|
2187.py
|
ShawonBarman/URI-Online-judge-Ad-Hoc-level-problem-solution-in-python
|
9a0f0ad5efd4a9e73589c357ab4b34b7c73a11da
|
[
"MIT"
] | 1
|
2022-01-14T08:45:32.000Z
|
2022-01-14T08:45:32.000Z
|
2187.py
|
ShawonBarman/URI-Online-judge-Ad-Hoc-level-problem-solution-in-python
|
9a0f0ad5efd4a9e73589c357ab4b34b7c73a11da
|
[
"MIT"
] | null | null | null |
2187.py
|
ShawonBarman/URI-Online-judge-Ad-Hoc-level-problem-solution-in-python
|
9a0f0ad5efd4a9e73589c357ab4b34b7c73a11da
|
[
"MIT"
] | null | null | null |
notes = [50, 10, 5, 1]
count = 1
while True:
v = int(input())
if v == 0:
break
arr = [0]*4
while v != 0:
for i in range(len(notes)):
if v - notes[i] >= 0:
v -= notes[i]
arr[i] += 1
break
print(f"Teste {count}")
count += 1
for i in range(len(arr)-1):
print(arr[i], end=" ")
print(arr[len(arr)-1])
print()
| 23.210526
| 36
| 0.394558
|
845fdc488b7e5c84879a6469622600199eef4def
| 1,485
|
py
|
Python
|
setup.py
|
christopherahern/lifelines
|
9d7c13468111fc456a66f78a18cb3f7f9b6c9506
|
[
"MIT"
] | 1
|
2020-03-07T07:39:07.000Z
|
2020-03-07T07:39:07.000Z
|
setup.py
|
zhaobeile/lifelines
|
a91113f06b983a8dce923c2cc2242d78b1da4600
|
[
"MIT"
] | null | null | null |
setup.py
|
zhaobeile/lifelines
|
a91113f06b983a8dce923c2cc2242d78b1da4600
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
def filepath(fname):
return os.path.join(os.path.dirname(__file__), fname)
exec(compile(open("lifelines/version.py").read(), "lifelines/version.py", "exec"))
with open("README.md") as f:
long_description = f.read()
setup(
name="lifelines",
version=__version__,
author="Cameron Davidson-Pilon",
author_email="cam.davidson.pilon@gmail.com",
description="Survival analysis in Python, including Kaplan Meier, Nelson Aalen and regression",
license="MIT",
keywords="survival analysis statistics data analysis",
url="https://github.com/CamDavidsonPilon/lifelines",
packages=find_packages(),
python_requires=">=3.5",
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering",
],
install_requires=[
"numpy>=1.6.0",
"scipy>=1.0,<=1.2.1",
"pandas>=0.23.0",
"matplotlib>=3.0",
"bottleneck>=1.0",
"autograd>=1.2",
],
package_data={"lifelines": ["../README.md", "../README.txt", "../LICENSE", "../MANIFEST.in", "datasets/*"]},
)
| 30.9375
| 112
| 0.628283
|
fea7819c2afa54d95b95d89e91c0f30347de3168
| 1,823
|
py
|
Python
|
app/handlers/users/mathematics/equations.py
|
vitaliy-ukiru/math-bot
|
72c116b4f5a4aa6a5f8eaae67ecbbf3df821f9e9
|
[
"MIT"
] | 1
|
2021-12-11T07:41:38.000Z
|
2021-12-11T07:41:38.000Z
|
app/handlers/users/mathematics/equations.py
|
vitaliy-ukiru/math-bot
|
72c116b4f5a4aa6a5f8eaae67ecbbf3df821f9e9
|
[
"MIT"
] | 8
|
2021-05-08T21:48:34.000Z
|
2022-01-20T15:42:00.000Z
|
app/handlers/users/mathematics/equations.py
|
vitaliy-ukiru/math-bot
|
72c116b4f5a4aa6a5f8eaae67ecbbf3df821f9e9
|
[
"MIT"
] | null | null | null |
__all__ = (
'SOLUTION_METHODS',
)
from math import sqrt
from app.utils.math import biquadratic_roots, view, view_sqrt
def equation_solution(a: float, b: float, c: float) -> str:
d = b ** 2 - 4 * a * c
total_answer = f'<i>D = {b ** 2} - {view(4 * a * c)} = {d}</i>\n'
if d < 0:
total_answer += '\nКорней нет'
elif d == 0:
x = (b / (2 * a)) * -1
total_answer += f'''
D = 0
x = -(b / (2 * a)) = -({b} / (2 * {view(a)})) = {x}'''
elif d > 0:
x1 = (-b + sqrt(d)) / (2 * a)
x2 = (-b - sqrt(d)) / (2 * a)
total_answer += f'''√D = {view_sqrt(d)}
x1 = (-b + √D) / (2a) = ({-b} + {view_sqrt(d)}) / (2 * {view(a)}) = {x1}
x2 = (-b - √D) / (2a) = ({-b} - {view_sqrt(d)}) / (2 * {view(a)}) = {x2}
'''
return total_answer
def biquadratic_solution(a: float, b: float, c: float) -> str:
answer = f'''
{view(a)} * x^4 + {view(b)} * x² + {view(c)} = 0
t = x²
{view(a)} * t² + {view(b)} * t + {view(c)} = 0\n
'''
d = b ** 2 - 4 * a * c
answer += f'<i>D = {b ** 2} - {view(4 * a * c)} = {d}</i>\n'
if d < 0:
answer += '\nКорней нет'
elif d == 0:
t = (b / (2 * a)) * -1
answer += f'''
D = 0
t = -(b / (2 * a) = -({b} / (2 * {view(a)})) = {t}
'''
if t > 0:
answer += f'x² = {t}\n' \
f'x = {view_sqrt(t)}'
else:
answer += f'x² = {t} => нет решения'
elif d > 0:
t1 = (-b + sqrt(d)) / (2 * a)
t2 = (-b - sqrt(d)) / (2 * a)
answer += f'''√D = {sqrt(d)}
t1 = (-b + √D) / (2a) = ({-b} + {sqrt(d)}) / (2 * {view(a)}) = {t1}
t2 = (-b - √D) / (2a) = ({-b} - {sqrt(d)}) / (2 * {view(a)}) = {t2}\n
'''
answer += biquadratic_roots(t1, t2)
return answer
SOLUTION_METHODS = {'eq': equation_solution, 'bq': biquadratic_solution}
| 26.042857
| 72
| 0.410861
|
31d624c5e7a6090428de47478d8d536f2e085b77
| 2,168
|
py
|
Python
|
countries_field/bitfield/forms.py
|
egosko/django-countries-field
|
0710f6d148dfefd5c56767bc5203081e96b8dee4
|
[
"Unlicense"
] | 3
|
2016-02-18T15:06:41.000Z
|
2019-12-25T15:34:28.000Z
|
countries_field/bitfield/forms.py
|
egosko/django-countries-field
|
0710f6d148dfefd5c56767bc5203081e96b8dee4
|
[
"Unlicense"
] | 2
|
2016-02-19T07:54:56.000Z
|
2018-05-15T14:46:31.000Z
|
countries_field/bitfield/forms.py
|
egosko/django-countries-field
|
0710f6d148dfefd5c56767bc5203081e96b8dee4
|
[
"Unlicense"
] | 8
|
2015-03-24T10:27:28.000Z
|
2020-11-30T09:56:19.000Z
|
from __future__ import absolute_import
from django.utils.encoding import force_text
from django.forms import CheckboxSelectMultiple, IntegerField, ValidationError
from .types import BitHandler
class BitFieldCheckboxSelectMultiple(CheckboxSelectMultiple):
def render(self, name, value, attrs=None, choices=()):
if isinstance(value, BitHandler):
value = [k for k, v in value if v]
elif isinstance(value, int):
real_value = []
div = 2
for (k, v) in self.choices:
if value % div != 0:
real_value.append(k)
value -= (value % div)
div *= 2
value = real_value
return super(BitFieldCheckboxSelectMultiple, self).render(
name, value, attrs=attrs, choices=enumerate(choices))
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if initial != data:
return True
initial_set = set([force_text(value) for value in initial])
data_set = set([force_text(value) for value in data])
return data_set != initial_set
class BitFormField(IntegerField):
def __init__(self, choices=(), widget=BitFieldCheckboxSelectMultiple, *args, **kwargs):
if isinstance(kwargs['initial'], int):
iv = kwargs['initial']
l = []
for i in range(0, 63):
if (1 << i) & iv > 0:
l += [choices[i][0]]
kwargs['initial'] = l
self.widget = widget
super(BitFormField, self).__init__(widget=widget, *args, **kwargs)
self.choices = self.widget.choices = choices
def clean(self, value):
if not value:
return 0
# Assume an iterable which contains an item per flag that's enabled
result = BitHandler(0, [k for k, v in self.choices])
for k in value:
try:
setattr(result, str(k), True)
except AttributeError:
raise ValidationError('Unknown choice: %r' % (k,))
return int(result)
| 34.412698
| 91
| 0.571033
|
dfe3d9cd8b242b0d8d82e2758d16b8b8516908f2
| 6,198
|
py
|
Python
|
samples/ThreadedReadProperty.py
|
ChristianTremblay/bacpypes
|
e6c5a805552ddbf9517e6bf43063b4ca20d7266f
|
[
"MIT"
] | 1
|
2021-07-11T02:13:40.000Z
|
2021-07-11T02:13:40.000Z
|
samples/ThreadedReadProperty.py
|
ChristianTremblay/bacpypes
|
e6c5a805552ddbf9517e6bf43063b4ca20d7266f
|
[
"MIT"
] | null | null | null |
samples/ThreadedReadProperty.py
|
ChristianTremblay/bacpypes
|
e6c5a805552ddbf9517e6bf43063b4ca20d7266f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Threaded Read Property
This application has a static list of points that it would like to read. It
starts a thread for each unique device address and reads the points for that
device.
"""
from threading import Thread
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from bacpypes.consolelogging import ConfigArgumentParser
from bacpypes.core import run, stop, deferred
from bacpypes.iocb import IOCB
from bacpypes.pdu import Address
from bacpypes.object import get_datatype
from bacpypes.apdu import ReadPropertyRequest
from bacpypes.primitivedata import Unsigned
from bacpypes.constructeddata import Array
from bacpypes.app import BIPSimpleApplication
from bacpypes.service.device import LocalDeviceObject
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# globals
this_application = None
# point list, set according to your devices
point_list = [
('10.0.1.14', [
('analogValue', 1, 'presentValue'),
('analogValue', 2, 'presentValue'),
]),
('10.0.1.15', [
('analogValue', 1, 'presentValue'),
('analogValue', 2, 'presentValue'),
]),
]
#
# ReadPointListThread
#
@bacpypes_debugging
class ReadPointListThread(Thread):
def __init__(self, device_address, point_list):
if _debug: ReadPointListThread._debug("__init__ %r %r", device_address, point_list)
Thread.__init__(self)
# save the address
self.device_address = Address(device_address)
# turn the point list into a queue
self.point_list = point_list
# make a list of the response values
self.response_values = []
def run(self):
if _debug: ReadPointListThread._debug("run")
global this_application
# loop through the points
for obj_type, obj_inst, prop_id in self.point_list:
# build a request
request = ReadPropertyRequest(
destination=self.device_address,
objectIdentifier=(obj_type, obj_inst),
propertyIdentifier=prop_id,
)
if _debug: ReadPointListThread._debug(" - request: %r", request)
# make an IOCB
iocb = IOCB(request)
if _debug: ReadPointListThread._debug(" - iocb: %r", iocb)
# give it to the application
this_application.request_io(iocb)
# wait for the response
iocb.wait()
if iocb.ioResponse:
apdu = iocb.ioResponse
# find the datatype
datatype = get_datatype(apdu.objectIdentifier[0], apdu.propertyIdentifier)
if _debug: ReadPointListThread._debug(" - datatype: %r", datatype)
if not datatype:
raise TypeError("unknown datatype")
# special case for array parts, others are managed by cast_out
if issubclass(datatype, Array) and (apdu.propertyArrayIndex is not None):
if apdu.propertyArrayIndex == 0:
value = apdu.propertyValue.cast_out(Unsigned)
else:
value = apdu.propertyValue.cast_out(datatype.subtype)
else:
value = apdu.propertyValue.cast_out(datatype)
if _debug: ReadPointListThread._debug(" - value: %r", value)
# save the value
self.response_values.append(value)
if iocb.ioError:
if _debug: ReadPointListThread._debug(" - error: %r", iocb.ioError)
self.response_values.append(iocb.ioError)
if _debug: ReadPointListThread._debug(" - fini")
#
# ThreadSupervisor
#
@bacpypes_debugging
class ThreadSupervisor(Thread):
def __init__(self, thread_list):
if _debug: ThreadSupervisor._debug("__init__ ...")
Thread.__init__(self)
self.thread_list = thread_list
def run(self):
if _debug: ThreadSupervisor._debug("run")
# start them up
for read_thread in self.thread_list:
read_thread.start()
if _debug: ThreadSupervisor._debug(" - all started")
# wait for them to finish
for read_thread in self.thread_list:
read_thread.join()
if _debug: ThreadSupervisor._debug(" - all finished")
# stop the core
stop()
#
# __main__
#
def main():
global this_application
# parse the command line arguments
args = ConfigArgumentParser(description=__doc__).parse_args()
if _debug: _log.debug("initialization")
if _debug: _log.debug(" - args: %r", args)
# make a device object
this_device = LocalDeviceObject(
objectName=args.ini.objectname,
objectIdentifier=int(args.ini.objectidentifier),
maxApduLengthAccepted=int(args.ini.maxapdulengthaccepted),
segmentationSupported=args.ini.segmentationsupported,
vendorIdentifier=int(args.ini.vendoridentifier),
)
# make a simple application
this_application = BIPSimpleApplication(this_device, args.ini.address)
# get the services supported
services_supported = this_application.get_services_supported()
if _debug: _log.debug(" - services_supported: %r", services_supported)
# let the device object know
this_device.protocolServicesSupported = services_supported.value
thread_list = []
# loop through the address and point lists
for addr, points in point_list:
# create a thread
read_thread = ReadPointListThread(addr, points)
if _debug: _log.debug(" - read_thread: %r", read_thread)
thread_list.append(read_thread)
# create a thread supervisor
thread_supervisor = ThreadSupervisor(thread_list)
# start it running when the core is running
deferred(thread_supervisor.start)
_log.debug("running")
run()
# dump out the results
for read_thread in thread_list:
for request, response in zip(read_thread.point_list, read_thread.response_values):
print(request, response)
_log.debug("fini")
if __name__ == "__main__":
main()
| 29.235849
| 91
| 0.645531
|
2f39e331b4ca8607c537a0f0c0fc44f245ae6670
| 25,740
|
py
|
Python
|
Plugins/Utilities/Generate_Diffs.py
|
bvbohnen/X4_Customizer
|
6f865008690916a66a44c97331d9a2692baedb35
|
[
"MIT"
] | 25
|
2018-12-10T12:52:11.000Z
|
2022-01-29T14:42:57.000Z
|
Plugins/Utilities/Generate_Diffs.py
|
bvbohnen/X4_Customizer
|
6f865008690916a66a44c97331d9a2692baedb35
|
[
"MIT"
] | 4
|
2019-08-01T19:09:11.000Z
|
2022-01-02T01:47:42.000Z
|
Plugins/Utilities/Generate_Diffs.py
|
bvbohnen/X4_Customizer
|
6f865008690916a66a44c97331d9a2692baedb35
|
[
"MIT"
] | 6
|
2019-02-16T08:39:04.000Z
|
2021-12-21T06:11:58.000Z
|
from pathlib import Path
from itertools import zip_longest
import difflib
from Framework import Utility_Wrapper
from Framework import Plugin_Log
from Framework import Print
from Framework.File_Manager import XML_File
from Framework.File_Manager.Cat_Reader import Get_Hash_String
from Framework.File_Manager.XML_Diff import Print as XML_Print
# TODO: merge this in with the Game_File system if run as part of
# a script and not from the command line launcher, where the outputs
# would instead be to the corresponding path in the dest extension.
# TODO: maybe add generate_sigs support.
@Utility_Wrapper(uses_paths_from_settings = False)
def Generate_Diffs(
original_dir_path,
modified_dir_path,
output_dir_path,
skip_unchanged = False,
verbose = False,
):
'''
Generate diffs for changes between two xml containing folders,
creating diff patches.
* original_dir_path
- Path to the original xml file that acts as the baseline.
* modified_dir_path
- Path to the modified version of the xml file.
* output_dir_path
- Path to write the diff patch to.
* skip_unchanged
- Bool, skip output for files that are unchanged (removing any
existing diff patch).
- Default will generate empty diff patches.
* verbose
- Bool, print the path of the outputs on succesful writes.
'''
# Cast to paths to be safe.
original_dir_path = Path(original_dir_path).resolve()
modified_dir_path = Path(modified_dir_path).resolve()
output_dir_path = Path(output_dir_path).resolve()
# Gather all xml files from the input directorys.
# Make dicts for ease of use, keyed by relative path from the
# base folder.
#original_paths = {x.relative_to(original_dir_path) : x for x in original_dir_path.glob('**/*.xml')}
modified_paths = {x.relative_to(modified_dir_path) : x for x in modified_dir_path.glob('**/*.xml')}
# Pair off the modified files with originals by name.
# If an original is not found, error.
# Ignore excess originals.
for rel_path, mod_path in modified_paths.items():
orig_path = original_dir_path / rel_path
if not orig_path.exists():
Print('No matching original file found for {}'.format(rel_path.name))
continue
# Set up the output.
out_path = output_dir_path / rel_path
if verbose:
Print('Generating diff for {}'.format(rel_path.name))
# Generate the diff. If this errors, the file will be skipped
# (due to plugin wrapper).
Generate_Diff(
original_file_path = orig_path,
modified_file_path = mod_path,
output_file_path = out_path,
skip_unchanged = skip_unchanged,
verbose = verbose
)
return
@Utility_Wrapper(uses_paths_from_settings = False)
def Generate_Diff(
original_file_path,
modified_file_path,
output_file_path,
skip_unchanged = False,
verbose = False,
):
'''
Generate a diff of changes between two xml files, creating a diff patch.
* original_file_path
- Path to the original xml files that act as the baseline.
* modified_file_path
- Path to the modified versions of the xml files.
* output_file_path
- Path to write the diff patches to.
* skip_unchanged
- Bool, skip output for files that are unchanged (removing any
existing diff patch).
- Default will generate empty diff patches.
* verbose
- Bool, print the path of the outputs on succesful writes.
'''
# Cast to paths to be safe.
original_file_path = Path(original_file_path).resolve()
modified_file_path = Path(modified_file_path).resolve()
output_file_path = Path(output_file_path).resolve()
if (original_file_path == modified_file_path
or output_file_path == original_file_path
or output_file_path == modified_file_path):
raise Exception('Path conflict error')
# List of messages to print out.
messages = []
def Print_Messages():
'Prints all pending messages.'
while messages:
message = messages.pop(0)
# TODO: maybe allow this if Settings are set up, otherwise
# might give an error on eg. missing x4 path.
#Plugin_Log.Print(message)
if verbose:
Print(message)
# Load the original.
base_game_file = XML_File(
# Virtual path doesn't matter, though can be useful for debug,
# so try to fill in something.
virtual_path = output_file_path.name,
binary = original_file_path.read_bytes(),
# Flag as the source; this will trigger diff patch generation later.
from_source = True,
)
# Finish initializing it; no diff patches to wait for.
# This fills in initial node ids.
base_game_file.Delayed_Init()
# Load the modified. Just want the xml nodes, but use a game_file
# for consistent loading format.
temp_game_file = XML_File(
virtual_path = '',
binary = modified_file_path.read_bytes(),
)
# Go ahead and give node ids. Not too important, but might do some
# misc formatting, eg. removing tails.
temp_game_file.Delayed_Init()
# Pick out the roots.
original_root = base_game_file.Get_Root()
modified_root = temp_game_file.Get_Root()
# Start by using a standard text diff library.
# This is very good at matching up exact nodes regardless of their
# parentage. Not so good at handling attribute changes or data
# structure changes.
# Returns a dict pairing original with modified nodes.
text_based_node_matches, changed = Get_Text_Diff_Matches(original_root, modified_root)
# If files match, check arg for skipping the file.
if not changed and skip_unchanged:
messages.append('File unchanged: {}'.format(modified_file_path))
# Check if an output file already exists and delete it.
if output_file_path.exists():
output_file_path.unlink()
messages.append('Removing prior diff: {}'.format(output_file_path))
else:
# Don't need to put the modified root back if there are no changes.
if changed:
# Follow up with a manual traversal of the trees, completing matches.
Match_Trees(original_root, modified_root, text_based_node_matches)
# Put the modified xml back in the game_file.
base_game_file.Update_Root(modified_root)
# Write to file. This will trigger the diff patch generation,
# empty if no changes.
# This also makes the directory if needed.
base_game_file.Write_File(output_file_path)
# The above can be handy as a print message to verify the update.
messages.append('Generated diff written to: {}'.format(output_file_path))
Print_Messages()
return
class Element_Wrap:
'''
Wrapper on xml elements with custom comparison rules.
'''
def __init__(self, xml):
self.xml = xml
self.tag = xml.tag
self.attrib = dict(xml.attrib)
self.text = xml.text
# String version of this element, flattened, for easy comparison.
# TODO: maybe check parent tags as well, for conservative matching.
self.hash_str = '{}{{{}}}{}'.format(
self.tag,
','.join(['{}:{}'.format(k,v) for k,v in sorted(self.attrib.items())]),
self.text)
# Hash to be used.
# Note: just doing something like id(self) gives horrible results;
# the hash appears to be part of the comparison.
self.hash_int = hash(self.hash_str)
return
def __eq__(self, other):
return self.hash_str == other.hash_str
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return self.hash_int
# Alternative to the above: breaks out the element into a series
# of pieces for finer grain matching.
class Element_Piece:
'''
For better difflib matching, this will be just a piece of an xml
element.
* text
- String, text for this piece.
* is_tag
- Bool, True if this is the tag string.
* xml
- Original XML Element.
'''
def __init__(self, text, is_tag = False, xml = None):
self.text = text
self.is_tag = is_tag
self.xml = xml
# Stich the is_tag flag in the hash/comparison string for
# robustness.
self.hash_str = f'{"_t_" if is_tag else ""}{text}'
self.hash_int = hash(self.hash_str)
return
def __eq__(self, other):
return self.hash_str == other.hash_str
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return self.hash_int
def Element_To_Pieces(node):
'''
Returns a list of Element_Pieces for a given xml node.
'''
ret_list = [Element_Piece(node.tag, is_tag = True, xml = node)]
for k,v in sorted(node.attrib.items()):
ret_list.append(Element_Piece(f'{k}:{v}'))
if node.text:
ret_list.append(Element_Piece(node.text))
return ret_list
def Get_Text_Diff_Matches(original_root, modified_root):
'''
Identify modifications with the help of a text diff library.
Returns a dict matching original elements to modified elements that
appear to be the same, along with a bool "changed" flag indicating
if any changes were found.
'''
# Flatten out all of the nodes, and wrap them with custom
# match logic.
original_nodes = [Element_Wrap(x) for x in original_root.iter()]
modified_nodes = [Element_Wrap(x) for x in modified_root.iter()]
# -Removed; expanded style didn't help, and just ran a lot slower.
# Alternative: get tighter matching logic by breaking apart tags,
# attributes, and tails (eg. an element expands to 2+ subelements), since
# the difflib really wants to match sequences, and does poorly at matching
# a full xml element that has changes on both sides.
#original_nodes = [y for x in original_root.iter() for y in Element_To_Pieces(x)]
#modified_nodes = [y for x in modified_root.iter() for y in Element_To_Pieces(x)]
# Sequence matcher will pair up the nodes.
matcher = difflib.SequenceMatcher(
# Lambda function that takes an element and returns if it is ignorable.
# Nothing ignorable, so no function.
None,
original_nodes,
modified_nodes,
# There is some weird background algorithm that selects elements
# to ignore based on frequency? Anyway, in practice on a big
# wares file it caused a bunch of matches to be missed, so
# disable it.
autojunk = False)
# Dict pairing original to modified nodes that the sequencer matched.
orig_mod_matches = {}
# get_matching_blocks returns a series of tuples of
# (i, j, n) where a[i:i+n] == b[j:j+n]
# Note: this may end up matching nodes from one parent's child elements
# to those of another parent. However, this is not expected to be a
# problem, since the diff generator just checks for matches under
# an already matched parent.
for orig_base, mod_base, length in matcher.get_matching_blocks():
for offset in range(length):
# -Removed; expanded style didn't help.
#orig_piece = original_nodes[orig_base + offset]
#mod_piece = modified_nodes[mod_base + offset]
## When a non-tag is matched, ignore it. Only care about
## the tags.
#if not orig_piece.is_tag:
# continue
#assert mod_piece.is_tag
#orig_node = orig_piece.xml
#mod_node = mod_piece.xml
orig_node = original_nodes[orig_base + offset].xml
mod_node = modified_nodes[mod_base + offset].xml
orig_mod_matches[orig_node] = mod_node
# Set a flag indicating if there are any mismatches, since the following
# code will match up some nodes that just have attribute changes and
# might make this check think the nodes are unchanged if done later.
if len(orig_mod_matches) == len(original_nodes) == len(modified_nodes):
changed = False
else:
changed = True
# When a node changed attributes, if it had children, they may have
# been matched. Can treat the parent as matched if any children matched.
# This is easiest to do backwards: for all matched nodes, set their
# parents as matched if not already.
# Error if any mod nodes are matched again.
mod_nodes_matched = set([x for x in orig_mod_matches.values()])
# Loop until all orig nodes processed; this list will extend on
# each new match.
orig_nodes_to_check = [x for x in orig_mod_matches]
while orig_nodes_to_check:
orig_node = orig_nodes_to_check.pop(0)
mod_node = orig_mod_matches[orig_node]
# Get their parents.
orig_parent = orig_node.getparent()
mod_parent = mod_node.getparent()
# If reaching the top, skip.
if orig_parent == None or mod_parent == None:
continue
# In some cases, nodes may have been matched across different parents.
# Do some extra validation before trying to match these parents.
if( (orig_parent not in orig_mod_matches)
# Tag can't have changed.
and (orig_parent.tag == mod_parent.tag)
# These nodes should not have existing matches.
and (orig_parent not in orig_mod_matches)
and (mod_parent not in mod_nodes_matched)
):
orig_mod_matches[orig_parent] = mod_parent
# Update dict and set for later loops.
orig_nodes_to_check.append(orig_parent)
mod_nodes_matched.add(mod_parent)
return orig_mod_matches, changed
def Match_Trees(original_root, modified_root, text_based_node_matches):
'''
Manually compare nodes between the xml trees, and try to find matches.
Updates modified_root tail ids directly.
'''
# Gather hashes, with and without attributes included.
attr_hash_dict, no_attr_hash_dict = Fill_Element_Hashes(original_root)
Fill_Element_Hashes(modified_root, attr_hash_dict, no_attr_hash_dict)
# The top level node should always match, so do that directly.
if original_root.tag != modified_root.tag:
Print('Generate_Diffs error: root tag mismatch, {} vs {}'.format(
original_root.tag,
modified_root.tag ))
modified_root.tail = original_root.tail
# Fill in child node matches, recursively.
Match_Children(original_root, modified_root,
attr_hash_dict, no_attr_hash_dict,
text_based_node_matches)
return
def Fill_Element_Hashes(element, attr_hash_dict = None, no_attr_hash_dict = None):
'''
Returns a pair of dicts matching each xml element to a hash string, where
the hash accounts for the node tag, attributes, and the hashes of
all child nodes in order.
The first dict includes node attributes in the hash; the second does
not include attributes (of this node or any children).
'''
# Start a new dict if needed.
if attr_hash_dict == None:
attr_hash_dict = {}
if no_attr_hash_dict == None:
no_attr_hash_dict = {}
# Construct a text string the summarizes this node and child hashes.
# TODO: could maybe loop the attr and no-attr versions.
attr_hash_text = ''
no_attr_hash_text = ''
# Start with the element tag and attributes.
attr_hash_text += 'tag:{},'.format(element.tag)
no_attr_hash_text += 'tag:{},'.format(element.tag)
for attr, value in sorted(element.items()):
attr_hash_text += '{}:{},'.format(attr, value)
# Gather all child hashes.
for child in element.getchildren():
# Costruct the hash if needed (should generally occur).
if child not in attr_hash_dict:
Fill_Element_Hashes(child, attr_hash_dict, no_attr_hash_dict)
# Use the attribute-including hash of the child.
attr_hash_text += attr_hash_dict[child]+','
no_attr_hash_text += no_attr_hash_dict[child]+','
# Shorten it for faster matching, using an md5 hash.
attr_hash = Get_Hash_String(attr_hash_text.encode())
attr_hash_dict[element] = attr_hash
no_attr_hash = Get_Hash_String(no_attr_hash_text.encode())
no_attr_hash_dict[element] = no_attr_hash
return attr_hash_dict, no_attr_hash_dict
def Match_Children(
original_node,
modified_node,
attr_hash_dict,
no_attr_hash_dict,
text_based_node_matches
):
'''
Search the children of the given pair of elements, and copy tags from
the original elements to the modified elements where matches are found.
'''
# This will use code similar to what is in XML_Diff for matching children,
# but modified somewhat to use hashes which may repeat.
# Look for child node changes.
# The approach will be to use a running walk between both child
# lists, matching up node hashes; when there is a mismatch, can
# check if one side's node is present in the other side's list,
# indicating what happened (add or remove).
# Collect the child lists.
# During processing, nodes will get popped off as their match/mismatch
# status is determined. Matches pop off both lists. Mismatches may
# pop off one list depending on if it appears to be an insert or delete.
# Note: use iterchildren instead of children to pick up comments.
orig_children = [x for x in original_node.iterchildren()]
mod_children = [x for x in modified_node.iterchildren()]
# Handy match check functions.
def Is_Attr_Match(orig, mod):
return attr_hash_dict[orig] == attr_hash_dict[mod]
def Is_No_Attr_Match(orig, mod):
return no_attr_hash_dict[orig] == no_attr_hash_dict[mod]
def Is_Text_Diff_Match(orig, mod):
return text_based_node_matches.get(orig) == mod
# Loop while nodes remain in both lists.
# Once one runs out, there are no more matches.
while orig_children and mod_children:
# Sample elements from both lists; don't remove yet.
orig_child = orig_children[0]
mod_child = mod_children[0]
strong_match = False
weak_match = False
# Check if the text diff thinks there is a later match, either direction.
mod_child_in_orig = any( Is_Text_Diff_Match(x, mod_child) for x in orig_children[1:])
orig_child_in_mod = any( Is_Text_Diff_Match(orig_child, x) for x in mod_children[1:])
# Check if there is a perfect match later, either direction.
#mod_child_in_orig = any( Is_Attr_Match(x, mod_child) for x in orig_children[1:])
#orig_child_in_mod = any( Is_Attr_Match(orig_child, x) for x in mod_children[1:])
# If node tags differ, not a match ever.
if orig_child.tag != mod_child.tag:
pass
# If the text diff thinks these match, then treat as a match.
# Note: if just the attributes differed, the backfill pass will
# have set matches on parents of matching nodes, which is
# caught here.
elif Is_Text_Diff_Match(orig_child, mod_child):
# If hashes are exact, save some time with an exact match.
if Is_Attr_Match(orig_child, mod_child):
strong_match = True
else:
# Set to look for nested changes.
weak_match = True
# If either node is a better match to something upcoming,
# based on text_diff, then dont match.
# (Note: this is different than the node being present in the
# text diff pairs, since that check could be confused if the
# text diff tried to match children from different parents,
# or some other mistake.)
elif mod_child_in_orig or orig_child_in_mod:
pass
# If the attributes differed and the node had no children, there
# is no existing match from the text diff, but the nodes may
# have originally been the same.
# In such cases, since neither node matches anything later,
# treat as matched.
# TODO: merge into checks below.
elif len(orig_child) == len(mod_child) == 0:
weak_match = True
# Sometimes the difflib fails to match two nodes even though
# they have the same tag and attributes, perhaps because there
# are a large number of attribute changes among children.
# Can polish up this case somewhat by checking if they match
# without attributes.
# TODO: merge into checks below.
elif Is_No_Attr_Match(orig_child, mod_child):
weak_match = True
else:
# Gather the lists of orig and mod children up until the
# next test_diff match. Note: these lists include the
# current nodes, regardless of if they have text_diff matches,
# since this point is only reached if such text_diff matches
# have no upcoming matching node (eg. they matched children of
# different parents, or something like that).
upcoming_origs = []
for node in orig_children:
if node is orig_child or node not in text_based_node_matches.keys():
upcoming_origs.append(node)
else:
break
upcoming_mods = []
for node in mod_children:
if node is mod_child or node not in text_based_node_matches.values():
upcoming_mods.append(node)
else:
break
# If there is just one node in each list, then these nodes
# probably match, since the next orig and mod elements have
# text_diff matches.
if len(upcoming_origs) == len(upcoming_mods) == 1:
weak_match = True
# TODO: compare possible cross-pairings between lists, and
# try to determine if the current node pair is the best fit,
# or if there is a better fit later.
# Fit quality: count matching tags/attributes/children
# (recursively) for components that are the same or different,
# then match quality is same / (same + different), where something
# like >0.5 can be considered a likely match. This may be tricky
# to do well if there were new children added. Can maybe preparse
# using difflib in some way.
# TODO: on mismatch, use this logic to pick which node to
# consider as inserted (eg. the one that does not have a high
# quality match to a later node in the other list), since the
# text_diff may not have enough info to guide the choice well.
if strong_match:
# Copy over the IDs, for all children as well.
for orig_subnode, mod_subnode in zip_longest(orig_child.iter(),
mod_child.iter()):
assert mod_subnode.tag == orig_subnode.tag
mod_subnode.tail = orig_subnode.tail
# Pop off both lists.
orig_children.remove(orig_child)
mod_children .remove(mod_child)
elif weak_match:
# Copy this top level node id.
mod_child.tail = orig_child.tail
# Process the children of the nodes.
Match_Children(
orig_child,
mod_child,
attr_hash_dict,
no_attr_hash_dict,
text_based_node_matches)
# Pop off both lists.
orig_children.remove(orig_child)
mod_children .remove(mod_child)
else:
# Want to determine if this is an insertion or deletion.
# An insert should advance the mod_children but not the
# orig_children.
# A deletion should do the reverse, advancing only orig_children.
if mod_child_in_orig == True and orig_child_in_mod == False:
# This case suggests a node was removed.
orig_children.remove(orig_child)
elif mod_child_in_orig == False and orig_child_in_mod == True:
# This case suggests a node was added.
mod_children .remove(mod_child)
elif mod_child_in_orig == False and orig_child_in_mod == False:
# Neither node is in the other; remove both.
# TODO: check for a no-attribute match later, and if found,
# just remove one of these.
orig_children.remove(orig_child)
mod_children .remove(mod_child)
# TODO: add more annotation from earlier match checks, which
# can pick up cases where the text_diff didn't match the nodes,
# but above logic can guess which node might have a later match.
else:
# This indicates a reordering.
# Just pick a node to throw out; go with modified node,
# so the original tail is available for matching still
# (maybe slightly better?).
mod_children .remove(mod_child)
return
| 39.29771
| 104
| 0.645532
|
2180d5d372ad5f67d84f358bbf342c6dac9754d2
| 33,187
|
py
|
Python
|
userbot/plugins/animation1.py
|
sakhiofsakshi/catuserbot
|
4703928a4b4184e3118ffae7f853f988117fa66f
|
[
"MIT"
] | 9
|
2021-05-16T23:40:05.000Z
|
2022-03-26T02:08:17.000Z
|
userbot/plugins/animation1.py
|
RamshourieshR/catuserbot
|
b16b5a2531e89058b78ac01d979f01ffd30a37f7
|
[
"MIT"
] | null | null | null |
userbot/plugins/animation1.py
|
RamshourieshR/catuserbot
|
b16b5a2531e89058b78ac01d979f01ffd30a37f7
|
[
"MIT"
] | 47
|
2021-03-16T17:16:25.000Z
|
2022-03-29T12:59:36.000Z
|
from telethon import events
import asyncio
from userbot.utils import admin_cmd
from userbot import ALIVE_NAME
import random, re
from userbot import CMD_HELP
from collections import deque
import importlib.util
import random
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "cat"
@borg.on(admin_cmd(pattern="stupid$"))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 14)
await event.edit("brain")
animation_chars = [
"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\n\n🧠 <(^_^ <)🗑",
"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\n\n🧠 <(^_^ <) 🗑",
"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\n\n🧠 <(^_^ <) 🗑",
"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\n\n🧠 <(^_^ <) 🗑",
"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\n\n🧠 <(^_^ <) 🗑",
"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\n\n🧠<(^_^ <) 🗑",
"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\n\n(> ^_^)>🧠 🗑",
"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\n\n (> ^_^)>🧠 🗑",
"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\n\n (> ^_^)>🧠 🗑",
"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\n\n (> ^_^)>🧠 🗑",
"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\n\n (> ^_^)>🧠 🗑",
"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\n\n (> ^_^)>🧠🗑",
"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\n\n (> ^_^)>🗑",
"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\n\n <(^_^ <)🗑",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i %14 ])
@borg.on(admin_cmd(pattern=f"bombs$", outgoing=True))
async def _(event):
if event.fwd_from:
return
await event.edit("▪️▪️▪️▪️ \n▪️▪️▪️▪️ \n▪️▪️▪️▪️ \n▪️▪️▪️▪️ \n▪️▪️▪️▪️ \n")
await asyncio.sleep(0.5)
await event.edit("💣💣💣💣 \n▪️▪️▪️▪️ \n▪️▪️▪️▪️ \n▪️▪️▪️▪️ \n▪️▪️▪️▪️ \n")
await asyncio.sleep(0.5)
await event.edit("▪️▪️▪️▪️ \n💣💣💣💣 \n▪️▪️▪️▪️ \n▪️▪️▪️▪️ \n▪️▪️▪️▪️ \n")
await asyncio.sleep(0.5)
await event.edit("▪️▪️▪️▪️ \n▪️▪️▪️▪️ \n💣💣💣💣 \n▪️▪️▪️▪️ \n▪️▪️▪️▪️ \n")
await asyncio.sleep(0.5)
await event.edit("▪️▪️▪️▪️ \n▪️▪️▪️▪️ \n▪️▪️▪️▪️ \n💣💣💣💣 \n▪️▪️▪️▪️ \n")
await asyncio.sleep(0.5)
await event.edit("▪️▪️▪️▪️ \n▪️▪️▪️▪️ \n▪️▪️▪️▪️ \n▪️▪️▪️▪️ \n💣💣💣💣 \n")
await asyncio.sleep(1)
await event.edit("▪️▪️▪️▪️ \n▪️▪️▪️▪️ \n▪️▪️▪️▪️ \n▪️▪️▪️▪️ \n💥💥💥💥 \n")
await asyncio.sleep(0.5)
await event.edit("▪️▪️▪️▪️ \n▪️▪️▪️▪️ \n▪️▪️▪️▪️ \n💥💥💥💥 \n💥💥💥💥 \n")
await asyncio.sleep(0.5)
await event.edit("▪️▪️▪️▪️ \n▪️▪️▪️▪️ \n▪️▪️▪️▪️ \n▪️▪️▪️▪️ \n😵😵😵😵 \n")
await asyncio.sleep(0.5)
await event.edit("`RIP PLOXXX......`")
await asyncio.sleep(2)
@borg.on(admin_cmd(pattern=r"call$"))
async def _(event):
if event.fwd_from:
return
animation_interval = 3
animation_ttl = range(0, 18)
await event.edit("Calling Pavel Durov (ceo of telegram)......")
animation_chars = [
"`Connecting To Telegram Headquarters...`",
"`Call Connected.`",
"`Telegram: Hello This is Telegram HQ. Who is this?`",
f"`Me: Yo this is` {DEFAULTUSER} ,`Please Connect me to my lil bro,Pavel Durov `",
"`User Authorised.`",
"`Calling Shivamani ` `At +916969696969`",
"`Private Call Connected...`",
"`Me: Hello Sir, Please Ban This Telegram Account.`",
"`Shivamani : May I Know Who Is This?`",
f"`Me: Yo Brah, I Am` {DEFAULTUSER} ",
"`Shivamani : OMG!!! Long time no see, Wassup cat...\nI'll Make Sure That Guy Account Will Get Blocked Within 24Hrs.`",
"`Me: Thanks, See You Later Brah.`",
"`Shivamani : Please Don't Thank Brah, Telegram Is Our's. Just Gimme A Call When You Become Free.`",
"`Me: Is There Any Issue/Emergency???`",
"`Shivamani : Yes Sur, There Is A Bug In Telegram v69.6.9.\nI Am Not Able To Fix It. If Possible, Please Help Fix The Bug.`",
"`Me: Send Me The App On My Telegram Account, I Will Fix The Bug & Send You.`",
"`Shivamani : Sure Sur \nTC Bye Bye :)`",
"`Private Call Disconnected.`"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 18])
@borg.on(admin_cmd(pattern=f"kill$", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.7
animation_ttl = range(0, 12)
await event.edit("ready to die dude.....")
animation_chars = [
"Fiiiiire",
"( ・ิω・ิ)︻デ═一-->",
"---->____________",
"------>__________",
"-------->_________",
"---------->_______",
"------------>_____",
"-------------->____",
"------------------>",
"------>;(^。^)ノ",
"( ̄ー ̄) DEAD",
"`Targeted user killed by Headshot 😈.😈.😈.😈.😈.😈.😈......`\n '#Sad_Reacts_Online'\n",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 12])
@borg.on(admin_cmd(pattern="wtf$"))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.8
animation_ttl = range(0, 5)
await event.edit("wtf")
animation_chars = [
"What",
"What The",
"What The F",
"What The F Brah",
"What The F Brah\nhttps://telegra.ph//file/f3b760e4a99340d331f9b.jpg"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 5 ])
@borg.on(admin_cmd(pattern="ding$"))
async def _(event):
animation_interval = 0.3
animation_ttl = range(0, 30)
animation_chars = [
"🔴⬛⬛⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜",
"⬜⬜⬛⬜⬜\n⬜⬛⬜⬜⬜\n🔴⬜⬜⬜⬜",
"⬜⬜⬛⬜⬜\n⬜⬜⬛⬜⬜\n⬜⬜🔴⬜⬜",
"⬜⬜⬛⬜⬜\n⬜⬜⬜⬛⬜\n⬜⬜⬜⬜🔴",
"⬜⬜⬛⬛🔴\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜",
"⬜⬜⬛⬜⬜\n⬜⬜⬜⬛⬜\n⬜⬜⬜⬜🔴",
"⬜⬜⬛⬜⬜\n⬜⬜⬛⬜⬜\n⬜⬜🔴⬜⬜",
"⬜⬜⬛⬜⬜\n⬜⬛⬜⬜⬜\n🔴⬜⬜⬜⬜",
"🔴⬛⬛⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜",
]
if event.fwd_from:
return
await event.edit("ding..dong..ding..dong ...")
await asyncio.sleep(4)
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 10])
@borg.on(admin_cmd(pattern=f"hypno$", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.3
animation_ttl = range(0, 15)
await event.edit("hypo....")
animation_chars = [
"⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜",
"⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬛⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜",
"⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬛⬛⬛⬜⬜\n⬜⬜⬛⬜⬛⬜⬜\n⬜⬜⬛⬛⬛⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜",
"⬜⬜⬜⬜⬜⬜⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬜⬜⬜⬜⬜⬜",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬛⬛⬛⬛⬛⬛",
"⬛⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜⬛",
"⬜⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛⬜",
"⬜⬜⬜⬜⬜⬜⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬜⬜⬜⬜⬜⬜",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬜⬛⬛⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬛⬛⬜⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬛⬛⬛⬛⬛⬛",
"⬜⬜⬜⬜⬜⬜⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬜⬜⬜⬜⬜⬜",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬜⬛⬛⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬛⬛⬜⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬛⬛⬛⬛⬛⬛",
"⬜⬜⬜⬜⬜⬜⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬜⬜⬜⬜⬜⬜",
"⬛⬛⬛⬛⬛\n⬛⬜⬜⬜⬛\n⬛⬜⬛⬜⬛\n⬛⬜⬜⬜⬛\n⬛⬛⬛⬛⬛",
"⬜⬜⬜\n⬜⬛⬜\n⬜⬜⬜",
"[👉🔴👈])"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 15])
@borg.on(admin_cmd(pattern=r"candy$"))
async def _(event):
if event.fwd_from:
return
deq = deque(list("🍦🍧🍩🍪🎂🍰🧁🍫🍬🍭"))
for _ in range(999):
await asyncio.sleep(0.4)
await event.edit("".join(deq))
deq.rotate(1)
@borg.on(admin_cmd(pattern="gangasta$"))
async def _(event):
await event.edit("EVERyBOdy")
await asyncio.sleep(0.3)
await event.edit("iZ")
await asyncio.sleep(0.2)
await event.edit("GangSTur")
await asyncio.sleep(0.5)
await event.edit("UNtIL ")
await asyncio.sleep(0.2)
await event.edit("I")
await asyncio.sleep(0.3)
await event.edit("ArRivE")
await asyncio.sleep(0.3)
await event.edit("🔥🔥🔥")
await asyncio.sleep(0.3)
await event.edit("EVERyBOdy iZ GangSTur UNtIL I ArRivE 🔥🔥🔥")
@borg.on(admin_cmd(pattern=f"charging$"))
async def timer_blankx(e):
txt=e.text[10:] + '\n\n`Tesla Wireless Charging (beta) Started...\nDevice Detected: Nokia 1100\nBattery Percentage:` '
j=10
k=j
for j in range(j):
await e.edit(txt + str(k))
k=k+10
await asyncio.sleep(1)
await asyncio.sleep(1)
await e.edit("`Tesla Wireless Charging (beta) Completed...\nDevice Detected: Nokia 1100 (Space Grey Varient)\nBattery Percentage:` [100%](https://telegra.ph/file/a45aa7450c8eefed599d9.mp4) ", link_preview=True)
| 140.029536
| 4,035
| 0.124266
|
b337bc2eb1491f1ae84ef879fdbbf5707032fa14
| 12,977
|
py
|
Python
|
code/python/QuotesAPIforDigitalPortals/v3/fds/sdk/QuotesAPIforDigitalPortals/model/inline_response2006.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 6
|
2022-02-07T16:34:18.000Z
|
2022-03-30T08:04:57.000Z
|
code/python/QuotesAPIforDigitalPortals/v3/fds/sdk/QuotesAPIforDigitalPortals/model/inline_response2006.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 2
|
2022-02-07T05:25:57.000Z
|
2022-03-07T14:18:04.000Z
|
code/python/QuotesAPIforDigitalPortals/v3/fds/sdk/QuotesAPIforDigitalPortals/model/inline_response2006.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | null | null | null |
"""
Quotes API For Digital Portals
The quotes API combines endpoints for retrieving security end-of-day, delayed, and realtime prices with performance key figures and basic reference data on the security and market level. The API supports over 20 different price types for each quote and comes with basic search endpoints based on security identifiers and instrument names. Market coverage is included in the *Sample Use Cases* section below. The Digital Portal use case is focused on high-performance applications that are * serving millions of end-users, * accessible by client browsers via the internet, * supporting subscriptions for streamed updates out-of-the-box, * typically combining a wide variety of *for Digital Portals*-APIs into a highly use-case specific solution for customers, * integrated into complex infrastructures such as existing frontend frameworks, authentication services. All APIs labelled *for Digital Portals* have been designed for direct use by client web applications and feature extreme low latency: The average response time across all endpoints is 30 ms whereas 99% of all requests are answered in close to under 300ms. See the Time Series API for Digital Portals for direct access to price histories, and the News API for Digital Portals for searching and fetching related news. # noqa: E501
The version of the OpenAPI document: 2
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.QuotesAPIforDigitalPortals.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.QuotesAPIforDigitalPortals.exceptions import ApiAttributeError
def lazy_import():
from fds.sdk.QuotesAPIforDigitalPortals.model.inline_response2006_data import InlineResponse2006Data
from fds.sdk.QuotesAPIforDigitalPortals.model.inline_response200_meta import InlineResponse200Meta
globals()['InlineResponse2006Data'] = InlineResponse2006Data
globals()['InlineResponse200Meta'] = InlineResponse200Meta
class InlineResponse2006(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'data': (InlineResponse2006Data,), # noqa: E501
'meta': (InlineResponse200Meta,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'data': 'data', # noqa: E501
'meta': 'meta', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""InlineResponse2006 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data (InlineResponse2006Data): [optional] # noqa: E501
meta (InlineResponse200Meta): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""InlineResponse2006 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data (InlineResponse2006Data): [optional] # noqa: E501
meta (InlineResponse200Meta): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 48.421642
| 1,302
| 0.60245
|
b47d843a6300949080b59c927c7608e67db92759
| 2,259
|
py
|
Python
|
test/scons-time/obj/format-gnuplot.py
|
datalogics-staylor/scons
|
4c48deb6947066e53aac7d86621a7ec17f3b4034
|
[
"MIT"
] | 3
|
2017-01-06T09:26:23.000Z
|
2017-03-04T04:13:20.000Z
|
test/scons-time/obj/format-gnuplot.py
|
datalogics-staylor/scons
|
4c48deb6947066e53aac7d86621a7ec17f3b4034
|
[
"MIT"
] | 2
|
2015-10-27T20:17:24.000Z
|
2016-08-04T21:49:56.000Z
|
test/scons-time/obj/format-gnuplot.py
|
datalogics-staylor/scons
|
4c48deb6947066e53aac7d86621a7ec17f3b4034
|
[
"MIT"
] | 4
|
2015-03-31T16:09:15.000Z
|
2021-08-04T12:41:47.000Z
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify the obj --format=gnuplot option.
"""
import TestSCons_time
test = TestSCons_time.TestSCons_time()
test.fake_logfile('foo-000-0.log', 0)
test.fake_logfile('foo-000-1.log', 0)
test.fake_logfile('foo-000-2.log', 0)
test.fake_logfile('foo-001-0.log', 1)
test.fake_logfile('foo-001-1.log', 1)
test.fake_logfile('foo-001-2.log', 1)
expect_notitle = """\
set key bottom left
plot '-' title "Startup" with lines lt 1, \\
'-' title "Full build" with lines lt 2, \\
'-' title "Up-to-date build" with lines lt 3
# Startup
0 20040.000
1 20041.000
e
# Full build
0 20040.000
1 20041.000
e
# Up-to-date build
0 20040.000
1 20041.000
e
"""
expect_title = 'set title "TITLE"\n' + expect_notitle
test.run(arguments = 'obj --fmt gnuplot Node.Node',
stdout=expect_notitle)
test.run(arguments = 'obj --fmt=gnuplot --title TITLE Node.Node',
stdout=expect_title)
test.run(arguments = 'obj --format gnuplot --title TITLE Node.Node',
stdout=expect_title)
test.run(arguments = 'obj --format=gnuplot Node.Node',
stdout=expect_notitle)
test.pass_test()
| 29.337662
| 73
| 0.732625
|
e1cf4c9a574a2e7a81cb735d9a220cc261fc6efc
| 682
|
py
|
Python
|
settings.py
|
tumluliu/tracks-rest-api
|
e70bf473c8cb94a2747c7eac649bf0e4208a1131
|
[
"MIT"
] | 1
|
2019-08-04T01:08:14.000Z
|
2019-08-04T01:08:14.000Z
|
settings.py
|
tumluliu/tracks-rest-api
|
e70bf473c8cb94a2747c7eac649bf0e4208a1131
|
[
"MIT"
] | null | null | null |
settings.py
|
tumluliu/tracks-rest-api
|
e70bf473c8cb94a2747c7eac649bf0e4208a1131
|
[
"MIT"
] | null | null | null |
""" Read config.json file to construct the project-level settings object
"""
import json
import logging
logger = logging.getLogger(__name__)
# TODO: Identify the config.json file in a good way
CONFIG_FILE = "config.json"
with (open(CONFIG_FILE, 'r')) as conf_file:
conf = json.load(conf_file)
logger.debug("Get config from %s: %s", CONFIG_FILE, conf)
PG_DB_CONF = conf["pg_datasource"]["connection"]
logger.debug("Content of ['pg_datasource']['connection'] section: %s",
PG_DB_CONF)
PGBOUNCER_CONF = conf["pg_datasource"]["pgbouncer"]
logger.debug("Content of ['pg_datasource']['pgbouncer'] section: %s",
PGBOUNCER_CONF)
| 34.1
| 74
| 0.683284
|
1e46556d2608ebf16548a8cca36317b3febe8c43
| 12,514
|
py
|
Python
|
seg/lib/utils/flops/jit_handles.py
|
Frank-Abagnal/HRFormer
|
d7d362770de8648f8e0a379a71cee25f42954503
|
[
"MIT"
] | 254
|
2021-08-13T10:05:22.000Z
|
2022-03-25T09:21:45.000Z
|
seg/lib/utils/flops/jit_handles.py
|
Sense-X/HRFormer
|
1245b88b5824fbd8cdb358b5ee909a4e537a2ef5
|
[
"MIT"
] | 17
|
2021-09-08T01:40:49.000Z
|
2022-03-23T10:53:47.000Z
|
seg/lib/utils/flops/jit_handles.py
|
Sense-X/HRFormer
|
1245b88b5824fbd8cdb358b5ee909a4e537a2ef5
|
[
"MIT"
] | 48
|
2021-08-13T14:06:58.000Z
|
2022-03-30T02:41:26.000Z
|
# taken from detectron2 / fvcore with a few modifications
# https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/analysis.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import typing
from collections import Counter, OrderedDict
import numpy as np
from numpy import prod
from itertools import zip_longest
from numpy import prod
from typing import Any, Callable, List
def get_shape(val: object) -> typing.List[int]:
"""
Get the shapes from a jit value object.
Args:
val (torch._C.Value): jit value object.
Returns:
list(int): return a list of ints.
"""
if val.isCompleteTensor(): # pyre-ignore
r = val.type().sizes() # pyre-ignore
if not r:
r = [1]
return r
elif val.type().kind() in ("IntType", "FloatType"):
return [1]
else:
raise ValueError()
def addmm_flop_jit(
inputs: typing.List[object], outputs: typing.List[object]
) -> typing.Counter[str]:
"""
This method counts the flops for fully connected layers with torch script.
Args:
inputs (list(torch._C.Value)): The input shape in the form of a list of
jit object.
outputs (list(torch._C.Value)): The output shape in the form of a list
of jit object.
Returns:
Counter: A Counter dictionary that records the number of flops for each
operation.
"""
# Count flop for nn.Linear
# inputs is a list of length 3.
input_shapes = [get_shape(v) for v in inputs[1:3]]
# input_shapes[0]: [batch size, input feature dimension]
# input_shapes[1]: [batch size, output feature dimension]
assert len(input_shapes[0]) == 2
assert len(input_shapes[1]) == 2
batch_size, input_dim = input_shapes[0]
output_dim = input_shapes[1][1]
flop = batch_size * input_dim * output_dim
flop_counter = Counter({"addmm": flop})
return flop_counter
def bmm_flop_jit(inputs, outputs):
# Count flop for nn.Linear
# inputs is a list of length 3.
input_shapes = [get_shape(v) for v in inputs]
# input_shapes[0]: [batch size, input feature dimension]
# input_shapes[1]: [batch size, output feature dimension]
assert len(input_shapes[0]) == 3
assert len(input_shapes[1]) == 3
T, batch_size, input_dim = input_shapes[0]
output_dim = input_shapes[1][2]
flop = T * batch_size * input_dim * output_dim
flop_counter = Counter({"bmm": flop})
return flop_counter
def basic_binary_op_flop_jit(inputs, outputs, name):
input_shapes = [get_shape(v) for v in inputs]
# for broadcasting
input_shapes = [s[::-1] for s in input_shapes]
max_shape = np.array(list(zip_longest(*input_shapes, fillvalue=1))).max(1)
flop = prod(max_shape)
flop_counter = Counter({name: flop})
return flop_counter
def rsqrt_flop_jit(inputs, outputs):
input_shapes = [get_shape(v) for v in inputs]
flop = prod(input_shapes[0]) * 2
flop_counter = Counter({"rsqrt": flop})
return flop_counter
def dropout_flop_jit(inputs, outputs):
input_shapes = [get_shape(v) for v in inputs[:1]]
flop = prod(input_shapes[0])
flop_counter = Counter({"dropout": flop})
return flop_counter
def softmax_flop_jit(inputs, outputs):
# from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/profiler/internal/flops_registry.py
input_shapes = [get_shape(v) for v in inputs[:1]]
flop = prod(input_shapes[0]) * 5
flop_counter = Counter({'softmax': flop})
return flop_counter
def _reduction_op_flop_jit(inputs, outputs, reduce_flops=1, finalize_flops=0):
input_shapes = [get_shape(v) for v in inputs]
output_shapes = [get_shape(v) for v in outputs]
in_elements = prod(input_shapes[0])
out_elements = prod(output_shapes[0])
num_flops = (in_elements * reduce_flops
+ out_elements * (finalize_flops - reduce_flops))
return num_flops
def conv_flop_count(
x_shape: typing.List[int],
w_shape: typing.List[int],
out_shape: typing.List[int],
) -> typing.Counter[str]:
"""
This method counts the flops for convolution. Note only multiplication is
counted. Computation for addition and bias is ignored.
Args:
x_shape (list(int)): The input shape before convolution.
w_shape (list(int)): The filter shape.
out_shape (list(int)): The output shape after convolution.
Returns:
Counter: A Counter dictionary that records the number of flops for each
operation.
"""
batch_size, Cin_dim, Cout_dim = x_shape[0], w_shape[1], out_shape[1]
out_size = prod(out_shape[2:])
kernel_size = prod(w_shape[2:])
flop = batch_size * out_size * Cout_dim * Cin_dim * kernel_size
flop_counter = Counter({"conv": flop})
return flop_counter
def conv_flop_jit(
inputs: typing.List[object], outputs: typing.List[object]
) -> typing.Counter[str]:
"""
This method counts the flops for convolution using torch script.
Args:
inputs (list(torch._C.Value)): The input shape in the form of a list of
jit object before convolution.
outputs (list(torch._C.Value)): The output shape in the form of a list
of jit object after convolution.
Returns:
Counter: A Counter dictionary that records the number of flops for each
operation.
"""
# Inputs of Convolution should be a list of length 12. They represent:
# 0) input tensor, 1) convolution filter, 2) bias, 3) stride, 4) padding,
# 5) dilation, 6) transposed, 7) out_pad, 8) groups, 9) benchmark_cudnn,
# 10) deterministic_cudnn and 11) user_enabled_cudnn.
# + 12) allowTF32CuDNN for PyTorch 1.7
assert len(inputs) == 13
x, w = inputs[:2]
x_shape, w_shape, out_shape = (
get_shape(x),
get_shape(w),
get_shape(outputs[0]),
)
return conv_flop_count(x_shape, w_shape, out_shape)
def einsum_flop_jit(
inputs: typing.List[object], outputs: typing.List[object]
) -> typing.Counter[str]:
"""
This method counts the flops for the einsum operation. We currently support
two einsum operations: "nct,ncp->ntp" and "ntg,ncg->nct".
Args:
inputs (list(torch._C.Value)): The input shape in the form of a list of
jit object before einsum.
outputs (list(torch._C.Value)): The output shape in the form of a list
of jit object after einsum.
Returns:
Counter: A Counter dictionary that records the number of flops for each
operation.
"""
# Inputs of einsum should be a list of length 2.
# Inputs[0] stores the equation used for einsum.
# Inputs[1] stores the list of input shapes.
assert len(inputs) == 2
equation = inputs[0].toIValue() # pyre-ignore
# Get rid of white space in the equation string.
equation = equation.replace(" ", "")
# Re-map equation so that same equation with different alphabet
# representations will look the same.
letter_order = OrderedDict((k, 0) for k in equation if k.isalpha()).keys()
mapping = {ord(x): 97 + i for i, x in enumerate(letter_order)}
equation = equation.translate(mapping)
input_shapes_jit = inputs[1].node().inputs() # pyre-ignore
input_shapes = [get_shape(v) for v in input_shapes_jit]
if equation == "abc,abd->acd":
n, c, t = input_shapes[0]
p = input_shapes[-1][-1]
flop = n * c * t * p
flop_counter = Counter({"einsum": flop})
return flop_counter
elif equation == "abc,adc->adb":
n, t, g = input_shapes[0]
c = input_shapes[-1][1]
flop = n * t * g * c
flop_counter = Counter({"einsum": flop})
return flop_counter
elif equation == "abcd,acd->abc":
n, t, g,c = input_shapes[0]
flop = n * t * g * c
flop_counter = Counter({"einsum": flop})
return flop_counter
elif equation == "abcd,abce->acde":
n, t, g,i = input_shapes[0]
c = input_shapes[-1][-1]
flop = n * t * g *i* c
flop_counter = Counter({"einsum": flop})
return flop_counter
elif equation == "abcd,aced->abce":
n, t, g ,i= input_shapes[0]
c = input_shapes[-1][-2]
flop = n * t * g * c * i
flop_counter = Counter({"einsum": flop})
return flop_counter
elif equation == "abcd,ced->abce":
n, t, g ,i= input_shapes[0]
c = input_shapes[-1][-2]
flop = n * t * g * c * i
flop_counter = Counter({"einsum": flop})
return flop_counter
elif equation == "abc,adc->abd":
n, t, g = input_shapes[0]
c = input_shapes[-1][-2]
flop = n * t * g * c
flop_counter = Counter({"einsum": flop})
return flop_counter
elif equation == "abcd,ed->abce":
n, t, g, i = input_shapes[0]
c = input_shapes[-1][0]
flop = n * t * g * c * i
flop_counter = Counter({"einsum": flop})
return flop_counter
elif equation == "abc,acd->abd":
n, t, g = input_shapes[0]
c = input_shapes[-1][-1]
flop = n * t * g * c
flop_counter = Counter({"einsum": flop})
return flop_counter
elif equation == "abcd,abed->abce":
n, t, g, i = input_shapes[0]
c = input_shapes[-1][-2]
flop = n * t * g * c * i
flop_counter = Counter({"einsum": flop})
return flop_counter
elif equation == "abcd,abde->abce":
n, t, g, i = input_shapes[0]
c = input_shapes[-1][-1]
flop = n * t * g * c * i
flop_counter = Counter({"einsum": flop})
return flop_counter
else:
raise NotImplementedError("Unsupported einsum operation. {}".format(equation))
"""
def matmul_flop_jit(
inputs: typing.List[object], outputs: typing.List[object]
) -> typing.Counter[str]:
This method counts the flops for matmul.
Args:
inputs (list(torch._C.Value)): The input shape in the form of a list of
jit object before matmul.
outputs (list(torch._C.Value)): The output shape in the form of a list
of jit object after matmul.
Returns:
Counter: A Counter dictionary that records the number of flops for each
operation.
# Inputs should be a list of length 2.
# Inputs contains the shapes of two matrices.
print("+_+in matmul")
for v in inputs:
print(get_shape(v))
input_shapes = [get_shape(v) for v in inputs]
assert len(input_shapes) == 2
assert len(input_shapes[1]) == 2
assert input_shapes[0][-1] == input_shapes[1][0]
batch_dim = input_shapes[0][0]
m1_dim, m2_dim = input_shapes[1]
flop = m1_dim * m2_dim * batch_dim
flop_counter = Counter({"matmul": flop})
return flop_counter
"""
def matmul_flop_jit(inputs: List[Any], outputs: List[Any]) -> typing.Counter[str]:
"""
This method counts the flops for matmul.
Args:
inputs (list(torch._C.Value)): The input shape in the form of a list of
jit object before matmul.
outputs (list(torch._C.Value)): The output shape in the form of a list
of jit object after matmul.
Returns:
Counter: A Counter dictionary that records the number of flops for each
operation.
"""
# Inputs should be a list of length 2.
# Inputs contains the shapes of two matrices.
input_shapes = [get_shape(v) for v in inputs]
assert len(input_shapes) == 2, input_shapes
assert input_shapes[0][-1] == input_shapes[1][-2], input_shapes
flop = prod(input_shapes[0]) * input_shapes[-1][-1]
flop_counter = Counter({"matmul": flop})
return flop_counter
def batchnorm_flop_jit(
inputs: typing.List[object], outputs: typing.List[object]
) -> typing.Counter[str]:
"""
This method counts the flops for batch norm.
Args:
inputs (list(torch._C.Value)): The input shape in the form of a list of
jit object before batch norm.
outputs (list(torch._C.Value)): The output shape in the form of a list
of jit object after batch norm.
Returns:
Counter: A Counter dictionary that records the number of flops for each
operation.
"""
# Inputs[0] contains the shape of the input.
input_shape = get_shape(inputs[0])
assert 2 <= len(input_shape) <= 5
flop = prod(input_shape) * 4
flop_counter = Counter({"batchnorm": flop})
return flop_counter
| 36.063401
| 117
| 0.634649
|
92c543fac364705107a3b02720aeb18a6fd6062d
| 26,894
|
py
|
Python
|
hyperopt_wgan.py
|
lliutianc/gan-flow
|
00922d76a3a78ffbd882bc2eaef46c84d7d34fef
|
[
"MIT"
] | 13
|
2020-06-19T22:11:20.000Z
|
2021-11-03T10:07:26.000Z
|
hyperopt_wgan.py
|
lliutianc/gan-flow
|
00922d76a3a78ffbd882bc2eaef46c84d7d34fef
|
[
"MIT"
] | 1
|
2021-11-03T10:09:02.000Z
|
2021-11-04T01:41:52.000Z
|
hyperopt_wgan.py
|
lliutianc/gan-flow
|
00922d76a3a78ffbd882bc2eaef46c84d7d34fef
|
[
"MIT"
] | null | null | null |
import os
import sys
import argparse
from functools import partial
import time
import matplotlib.pyplot as plt
# import seaborn.apionly as sns
import seaborn as sns
import torch.nn as nn
import torch.nn.utils.spectral_norm as spectral_norm
from torch import autograd
from torch.autograd import Variable
import ray.tune as tune
from ray.tune.schedulers import ASHAScheduler
from residualblock import ResidualBlock
from gu import *
from util import *
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(curPath)
parser = argparse.ArgumentParser()
# action
parser.add_argument(
'--cuda',
type=int,
default=2,
help='Number of CUDA to use if available.')
# data
parser.add_argument('--seed', type=int, default=1, help='Random seed to use.')
parser.add_argument('--gu_num', type=int, default=8,
help='Components of GU clusters.')
# model parameters
parser.add_argument(
'--prior',
type=str,
choices=[
'uniform',
'gaussian'],
default='gaussian',
help='Distribution of prior.')
parser.add_argument(
'--prior_size',
type=int,
default=3,
help='Dimension of prior.')
parser.add_argument('--hidden_size', type=int, default=256,
help='Hidden layer size for GAN/WGAN.')
parser.add_argument(
'--n_hidden',
type=int,
default=3,
help='Number of hidden layers(Residual blocks) in GAN/WGAN.')
parser.add_argument(
'--activation_fn',
type=str,
choices=[
'relu',
'leakyrelu',
'tanh'],
default='leakyrelu',
help='What activation function to use in GAN/WGAN.')
parser.add_argument('--activation_slope', type=float, default=1e-2,
help='Negative slope of LeakyReLU activation function.')
parser.add_argument(
'--no_spectral_norm',
action='store_true',
help='Do not use spectral normalization in critic.')
# parser.add_argument('--no_batch_norm', action='store_true', help='Do not use batch norm')
parser.add_argument(
'--residual_block',
action='store_true',
help='Use residual block')
parser.add_argument('--dropout', action='store_true', help='Use dropout')
parser.add_argument(
'--norm',
type=str,
choices=[
'layer',
'batch',
None],
default='batch',
help='Which normaliztion to be used.')
parser.add_argument(
'--init_method',
type=str,
choices=[
'default',
'xav_u'],
default='default',
help='Use residual block')
# training params
parser.add_argument(
'--batch_size',
type=int,
default=2048,
help='Batch size in training.')
parser.add_argument('--niters', type=int, default=50000,
help='Total iteration numbers in training.')
parser.add_argument(
'--lr',
type=float,
default=1e-4,
help='Learning rate in Adam.')
parser.add_argument(
'--weight_decay',
type=float,
default=1e-6,
help='Weight decay in Adam.')
parser.add_argument('--beta1', type=float, default=0.9, help='Beta 1 in Adam.')
parser.add_argument(
'--beta2',
type=float,
default=0.999,
help='Beta 2 in Adam.')
parser.add_argument(
'--clr',
action='store_true',
help='Use cyclic LR in training.')
parser.add_argument(
'--clr_size_up',
type=int,
default=2000,
help='Size of up step in cyclic LR.')
parser.add_argument('--clr_scale', type=int, default=3,
help='Scale of base lr in cyclic LR.')
parser.add_argument(
'--k',
type=int,
default=5,
help='Update times of critic in each iterations.')
parser.add_argument(
'--l',
type=float,
default=0.1,
help='Coefficient for Gradient penalty.')
parser.add_argument(
'--auto',
action='store_true',
help='Using parameter searching to find the best result.')
parser.add_argument(
'--auto_full',
action='store_true',
help='Using parameter searching to find the best result.')
parser.add_argument(
'--eval_size',
type=int,
default=100000,
help='Sample size in evaluation.')
parser.add_argument(
'--exp_num',
type=int,
default=100,
help='Number of experiments.')
parser.add_argument(
'--eval_est',
action='store_true',
default=False,
help='use w_distance_estimated to choose best model.')
parser.add_argument(
'--log_interval',
type=int,
default=1000,
help='How often to show loss statistics and save models/samples.')
config = { # 'prior': tune.choice(['uniform', 'gaussian']),
'prior_size': tune.choice([1, 3, 5]), 'hidden_size': tune.choice([64, 128, 256]),
'n_hidden': tune.choice([1, 2, 3, 4]), 'activation_slope': 1e-2,
'activation_fn': tune.choice(['relu', 'leakyrelu', 'tanh']), 'init_method': tune.choice(['default', 'xav_u']),
'lr': tune.choice([1e-5, 5e-5, 1e-4, 5e-4, 1e-3]),
'weight_decay': tune.choice([0., 1e-6, 5e-6, 1e-5, 5e-5, 1e-4, 1e-3]),
'beta1': tune.choice([0.5, 0.6, 0.7, 0.8, 0.9]), 'beta2': tune.choice([0.7, 0.8, 0.9, 0.999]),
# In auto_full, these are not used
'clr_scale': tune.choice([2, 3, 4, 5]), 'clr_size_up': tune.choice([2000, 4000, 6000, 8000]),
'k': tune.choice([1, 5, 10, 50, 100]), 'l': tune.choice([0, 1e-2, 1e-1, 1, 10]),
'norm': tune.choice(['batch', None]), 'spect_norm': tune.choice([1, 0]),
# 'spect_norm': 1, # try enforcing spect_norm in critic.
# 'dropout': None,
# 'clr': None,
}
class Generator (nn.Module):
def __init__(
self,
input_size,
n_hidden,
hidden_size,
activation_fn,
activation_slope,
init_method,
norm='batch',
res_block=False,
dropout=False,
dropout_p=0.5):
super().__init__()
# Define activation function.
if activation_fn == 'relu':
activation = nn.ReLU(inplace=True)
elif activation_fn == 'leakyrelu':
activation = nn.LeakyReLU(
inplace=True, negative_slope=activation_slope)
elif activation_fn == 'tanh':
activation = nn.Tanh()
else:
raise NotImplementedError('Check activation_fn.')
if norm == 'batch':
norm = nn.BatchNorm1d
elif norm == 'layer':
norm = nn.LayerNorm
else:
norm = None
modules = [
nn.Linear(
input_size,
hidden_size),
norm(hidden_size)] if norm else [
nn.Linear(
input_size,
hidden_size)]
for _ in range(n_hidden):
# Add dropout.
if dropout:
modules += [nn.Dropout(dropout_p)]
# Add act and layer.
if res_block:
modules += [activation,
ResidualBlock(hidden_size,
hidden_size,
activation,
False,
norm)]
else:
modules += [activation, nn.Linear(hidden_size, hidden_size)]
if norm:
modules += [norm(hidden_size)]
if dropout:
modules += [nn.Dropout(dropout_p)]
modules += [activation, nn.Linear(hidden_size, 1)]
self.model = nn.Sequential(*modules)
self.init_method = init_method
self.model.apply(self.__init)
def forward(self, x):
return self.model(x)
def __init(self, m):
classname = m.__class__.__name__
if self.init_method == 'default':
return
elif self.init_method == 'xav_u':
if classname.find('Linear') != -1:
nn.init.xavier_uniform_(m.weight, gain=1)
else:
raise NotImplementedError('Check init_method')
class Critic (nn.Module):
def __init__(
self,
n_hidden,
hidden_size,
activation_fn,
activation_slope,
init_method,
spect_norm=True,
norm='layer',
res_block=False,
dropout=False,
dropout_p=0.5):
super().__init__()
# Define activation function.
if activation_fn == 'relu':
activation = nn.ReLU(inplace=True)
elif activation_fn == 'leakyrelu':
activation = nn.LeakyReLU(
inplace=True, negative_slope=activation_slope)
elif activation_fn == 'tanh':
activation = nn.Tanh()
else:
raise NotImplementedError('Check activation_fn.')
if norm == 'layer':
norm = nn.LayerNorm
else:
norm = None
modules = [
spectral_norm(
nn.Linear(
1,
hidden_size)) if spect_norm else nn.Linear(
1,
hidden_size)]
if norm:
modules += [norm(hidden_size)]
for _ in range(n_hidden):
# Add dropout.
if dropout:
modules += [nn.Dropout(dropout_p)]
# Add act and layer.
if res_block:
modules += [activation,
ResidualBlock(hidden_size,
hidden_size,
activation,
spect_norm,
norm)]
else:
modules += [activation,
spectral_norm(
nn.Linear(
hidden_size,
hidden_size)) if spect_norm else nn.Linear(
hidden_size,
hidden_size)]
if norm:
modules += [norm(hidden_size)]
if dropout:
modules += [nn.Dropout(dropout_p)]
modules += [activation]
modules += [spectral_norm(nn.Linear(hidden_size, 1))
if spect_norm else nn.Linear(hidden_size, 1)]
self.model = nn.Sequential(*modules)
self.init_method = init_method
self.model.apply(self.__init)
def forward(self, x):
return self.model(x)
def __init(self, m):
classname = m.__class__.__name__
if self.init_method == 'default':
return
elif self.init_method == 'xav_u':
if classname.find('Linear') != -1:
nn.init.xavier_uniform_(m.weight, gain=1)
else:
raise NotImplementedError('Check init_method')
class WGANTrainer (tune.Trainable):
def _setup(self, config):
self.config = config
self.prior = torch.randn if self.config['prior'] == 'uniform' else partial(
torch.normal, mean=0., std=1.)
self.i = 0
# model
self.generator = Generator(
input_size=config['prior_size'],
n_hidden=config['n_hidden'],
hidden_size=config['hidden_size'],
activation_slope=config['activation_slope'],
init_method=config['init_method'],
activation_fn=config['activation_fn'],
norm=config['norm'],
res_block=config['residual_block'],
dropout=config['dropout']).to(
config['device'])
self.critic = Critic(
n_hidden=config['n_hidden'],
hidden_size=config['hidden_size'],
activation_slope=config['activation_slope'],
init_method=config['init_method'],
activation_fn=config['activation_fn'],
norm=config['norm'],
res_block=config['residual_block'],
dropout=config['dropout'],
spect_norm=config['spect_norm']).to(
config['device'])
# data
if self.config['gu_num'] == 8:
self.dataloader = GausUniffMixture(
n_mixture=self.config['gu_num'],
mean_dist=10,
sigma=2,
unif_intsect=1.5,
unif_ratio=1.,
device=self.config['device'])
else:
self.dataloader = GausUniffMixture(
n_mixture=self.config['gu_num'],
mean_dist=5,
sigma=0.1,
unif_intsect=5,
unif_ratio=3,
device=self.config['device'])
# optimizer
self.optim_g = torch.optim.Adam(
[
p for p in self.generator.parameters() if p.requires_grad],
lr=config['lr'],
betas=(
config['beta1'],
config['beta2']),
weight_decay=config['weight_decay'])
self.optim_c = torch.optim.Adam(
[
p for p in self.critic.parameters() if p.requires_grad],
lr=config['lr'],
betas=(
config['beta1'],
config['beta2']),
weight_decay=config['weight_decay'])
if self.config['clr']:
self.sche_g = torch.optim.lr_scheduler.CyclicLR(
self.optim_g,
base_lr=config['lr'] /
config['clr_scale'],
max_lr=config['lr'],
step_size_up=config['clr_size_up'],
cycle_momentum=False)
self.sche_c = torch.optim.lr_scheduler.CyclicLR(
self.optim_c,
base_lr=config['lr'] /
config['clr_scale'],
max_lr=config['lr'],
step_size_up=config['clr_size_up'],
cycle_momentum=False)
else:
self.sche_g, self.sche_c = None, None
def _train(self):
if self.i == 0:
self.start = time.time()
self.i += 1
self.generator.train()
self.critic.train()
for k in range(self.config['k']):
real = self.dataloader.get_sample(self.config['batch_size'])
prior = self.prior(
size=(
self.config['batch_size'],
self.config['prior_size']),
device=self.config['device'])
fake = self.generator(prior)
loss_c = self.critic(fake.detach()).mean() - \
self.critic(real).mean()
loss_c += self.config["l"] * self._gradient_penalty(real, fake)
self.optim_c.zero_grad()
loss_c.backward()
self.optim_c.step()
if self.sche_c:
self.sche_c.step()
prior = self.prior(
size=(
self.config['batch_size'],
self.config['prior_size']),
device=self.config['device'])
fake = self.generator(prior)
loss_g = - self.critic(fake).mean()
self.optim_g.zero_grad()
loss_g.backward()
self.optim_g.step()
if self.sche_g:
self.sche_g.step()
if self.i % self.config['log_interval'] == 0 and not self.config['auto']:
cur_state_path = os.path.join(model_path, str(self.i))
torch.save(self.generator, cur_state_path + '_' + 'generator.pth')
torch.save(self.critic, cur_state_path + '_' + 'critic.pth')
w_distance_real, w_distance_est = self._evaluate(
display=True, niter=self.i)
logger.info(
f'Iter: {self.i} / {self.config["niters"]}, Time: {round (time.time () - self.start, 4)}, '
f'w_distance_real: {w_distance_real}, w_distance_estimated: {w_distance_est}')
self.start = time.time()
w_distance_real, w_distance_est = self._evaluate(
display=False, niter=self.config['niters'])
return {
'w_distance_estimated': w_distance_est,
'w_distance_real': w_distance_real,
'iteration': self.i}
def _save(self, tmp_checkpoint_dir):
generator_path = os.path.join(tmp_checkpoint_dir, 'generator.pth')
critic_path = os.path.join(tmp_checkpoint_dir, 'critic.pth')
torch.save(self.generator.state_dict(), generator_path)
torch.save(self.critic.state_dict(), critic_path)
return tmp_checkpoint_dir
def _save_whole(self, tmp_checkpoint_dir):
generator_path = os.path.join(tmp_checkpoint_dir, 'generator.pth')
critic_path = os.path.join(tmp_checkpoint_dir, 'critic.pth')
torch.save(self.generator.to('cpu'), generator_path)
torch.save(self.critic.to('cpu'), critic_path)
return tmp_checkpoint_dir
def _restore(self, checkpoint_dir):
generator_path = os.path.join(checkpoint_dir, 'generator.pth')
critic_path = os.path.join(checkpoint_dir, 'critic.pth')
self.generator.load_state_dict(torch.load(generator_path))
self.critic.load_state_dict(torch.load(critic_path))
def _evaluate(self, display, niter):
self.generator.eval()
self.critic.eval()
with torch.no_grad():
real = self.dataloader.get_sample(self.config['eval_size'])
prior = self.prior(
size=(
self.config['eval_size'],
self.config['prior_size']),
device=self.config['device'])
fake = self.generator(prior)
w_distance_est = self.critic(
real).mean() - self.critic(fake).mean()
w_distance_est = abs(round(w_distance_est.item(), 5))
w_distance_real = w_distance(real, fake)
if display:
# save images
real_sample = real.cpu().data.numpy().squeeze()
fake_sample = fake.cpu().data.numpy().squeeze()
plt.cla()
fig = plt.figure(figsize=(FIG_W, FIG_H))
ax = fig.add_subplot(111)
ax.set_facecolor('whitesmoke')
ax.grid(True, color='white', linewidth=2)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.get_xaxis().tick_bottom()
kde_num = 200
min_real, max_real = min(real_sample), max(real_sample)
kde_width_real = kde_num * \
(max_real - min_real) / args.eval_size
min_fake, max_fake = min(fake_sample), max(fake_sample)
kde_width_fake = kde_num * \
(max_fake - min_fake) / args.eval_size
sns.kdeplot(
real_sample,
bw=kde_width_real,
label='Data',
color='green',
shade=True,
linewidth=6)
sns.kdeplot(
fake_sample,
bw=kde_width_fake,
label='Model',
color='orange',
shade=True,
linewidth=6)
ax.set_title(
f'True EM Distance: {w_distance_real}, '
f'Est. EM Distance: {w_distance_est}.',
fontsize=FONTSIZE)
ax.legend(loc=2, fontsize=FONTSIZE)
ax.set_ylabel('Estimated Density by KDE', fontsize=FONTSIZE)
ax.tick_params(axis='x', labelsize=FONTSIZE * 0.7)
ax.tick_params(
axis='y',
labelsize=FONTSIZE * 0.5,
direction='in')
cur_img_path = os.path.join(image_path, str(niter) + '.jpg')
plt.tight_layout()
plt.savefig(cur_img_path)
plt.close()
return w_distance_real, w_distance_est
def _gradient_penalty(self, real, fake):
batch_size = fake.size(0)
alpha = torch.rand(size=(batch_size, 1), device=self.config['device'])
alpha = alpha.expand_as(real)
interpolated = alpha * real + (1 - alpha) * fake
interpolated = Variable(
interpolated,
requires_grad=True).to(
self.config['device'])
interpolation_loss = self.critic(interpolated)
gradients = autograd.grad(
outputs=interpolation_loss,
inputs=interpolated,
grad_outputs=torch.ones(
interpolation_loss.size(),
device=self.config['device']),
create_graph=True,
retain_graph=True)[0]
gradients = gradients.view(gradients.size(0), -1)
return ((gradients.norm(2, dim=1) - 1.) ** 2).mean()
if __name__ == '__main__':
args = parser.parse_args()
args.spect_norm = not args.no_spectral_norm
args.eval_real = not args.eval_est
if args.auto or args.auto_full:
args.device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu')
else:
args.device = torch.device(
f'cuda:{args.cuda}' if torch.cuda.is_available() else 'cpu')
if args.auto_full:
# Search over all tweaks, but don't search over clr parameters.
# Further, since ResNet doesn't improve, don't search over it as well.
config = {'prior': tune.choice(['uniform', 'gaussian']), 'prior_size': tune.choice([1, 3, 5]),
'hidden_size': tune.choice([64, 128, 256]), 'n_hidden': tune.choice([1, 2, 3, 4]),
'activation_slope': 1e-2, 'activation_fn': tune.choice(['relu', 'leakyrelu', 'tanh']),
'init_method': tune.choice(['default', 'xav_u']),
'lr': tune.choice([1e-5, 5e-5, 1e-4, 5e-4, 1e-3]),
'weight_decay': tune.choice([0., 1e-6, 5e-6, 1e-5, 5e-5, 1e-4, 1e-3]),
'beta1': tune.choice([0.5, 0.6, 0.7, 0.8, 0.9]), 'beta2': tune.choice([0.7, 0.8, 0.9, 0.999]),
'k': tune.choice([1, 5, 10, 50, 100]), 'l': tune.choice([0, 1e-2, 1e-1, 1, 10]),
'spect_norm': tune.choice([1, 0]),
'norm': tune.choice(['batch', None]), 'dropout': tune.choice([1, 0]), 'clr': tune.choice([1, 0]),
}
# Add constant params in args to config.
dict_args = vars(args)
for key in dict_args:
if key in ['no_batch_norm', 'no_spectral_norm', 'batch_norm']:
# redundant args.
continue
if key in config:
if not args.auto:
# In Manual experiment: overwrite existed config settings.
config[key] = dict_args[key]
else:
config[key] = dict_args[key]
if args.auto:
if not args.clr:
# Reset hyperparameter choices in clr if it is not a tuning field.
config["clr_scale"] = 2
config["clr_size_up"] = 2000
if args.residual_block:
# Set deeper depth of Resnet.
config["n_hidden"] = tune.choice([1, 3, 5, 7])
config['device'] = args.device
# save path
search_type = 'automatic' if args.auto else 'manual'
experiment = f'gu{args.gu_num}/wgan/{args.niters}|' + args.eval_real * 'w_distance_real|' + (
args.eval_est) * 'w_distance_estimated|' + 'resnt|' * args.residual_block + 'fcnet|' * (
not args.residual_block) + f'{args.prior}|' + f'clr|' * args.clr + f'dropout|' * args.dropout + f'{args.activation_fn}|' * (
not args.auto) + f'{args.norm}_norm|' * (not args.auto) + (
'no_' * args.no_spectral_norm + 'spect_norm|') * (not args.auto) + (
'no_' * (args.l != 0) + 'gradient_penalty|') * (
not args.auto) + f'{args.init_method}_init|{args.k}_updates' * (not args.auto)
model_path = os.path.join(curPath, search_type, 'models', experiment)
image_path = os.path.join(curPath, search_type, 'images', experiment)
if args.auto_full:
model_path = os.path.join(
curPath,
search_type,
f'models/gu{args.gu_num}/wgan/{args.niters}|full_new')
image_path = os.path.join(
curPath,
search_type,
f'images/gu{args.gu_num}/wgan/{args.niters}|full_new')
makedirs(model_path, image_path)
log_path = model_path + '/logs'
logger = get_logger(log_path)
logger.info('Trained model will save to: ' + model_path)
logger.info('Result plot will save to : ' + image_path)
logger.info('Search space: ')
logger.info(config)
logger.info(SEP)
logger.info('Start training...')
if args.auto:
if args.eval_est:
sched = ASHAScheduler(
metric='w_distance_estimated',
mode='min',
grace_period=args.niters // 10,
max_t=args.niters,
time_attr="iteration")
else:
sched = ASHAScheduler(
metric='w_distance_real',
mode='min',
grace_period=args.niters // 10,
max_t=args.niters,
time_attr="iteration")
analysis = tune.run(WGANTrainer, name=experiment, scheduler=sched, # search_alg=algo,
stop={"iteration": args.niters}, resources_per_trial={"cpu": 3, "gpu": 1},
num_samples=args.exp_num, checkpoint_at_end=True, config=config)
if args.eval_real:
best_config = analysis.get_best_config(
metric='w_distance_real', mode='min')
best_path = analysis.get_best_logdir(
metric='w_distance_real', mode='min')
else:
best_config = analysis.get_best_config(
metric='w_distance_estimated', mode='min')
best_path = analysis.get_best_logdir(
metric='w_distance_estimated', mode='min')
results = analysis.dataframe()
if args.eval_real:
results.to_csv(model_path + f'results.csv')
else:
results.to_csv(model_path + f'results.csv')
logger.info(f'Best config is: {best_config}')
best_model_dir = retrieve_best_result_from_tune(best_path)
else:
trainer = WGANTrainer(config)
for _ in range(1, config['niters'] + 1):
_ = trainer._train()
best_config = config
best_model_dir = model_path
logger.info(f'Saving to {model_path}')
trainer._save(model_path)
logger.info('Start evaluation...')
eval_trainer = WGANTrainer(best_config)
eval_trainer._restore(best_model_dir)
eval_trainer._evaluate(display=True, niter=args.niters)
logger.info('Saving to: ' + model_path)
eval_trainer._save_whole(model_path)
logger.info('Finish All...')
logger.info(SEP)
| 34.259873
| 132
| 0.548821
|
bb35520069f248479e887d2a0c54108e9e121d80
| 1,090
|
py
|
Python
|
AppPkg/Applications/Python/Python-2.7.2/Lib/encodings/euc_jis_2004.py
|
CEOALT1/RefindPlusUDK
|
116b957ad735f96fbb6d80a0ba582046960ba164
|
[
"BSD-2-Clause"
] | 2,757
|
2018-04-28T21:41:36.000Z
|
2022-03-29T06:33:36.000Z
|
AppPkg/Applications/Python/Python-2.7.2/Lib/encodings/euc_jis_2004.py
|
CEOALT1/RefindPlusUDK
|
116b957ad735f96fbb6d80a0ba582046960ba164
|
[
"BSD-2-Clause"
] | 30
|
2019-01-04T10:14:56.000Z
|
2020-10-12T14:00:31.000Z
|
AppPkg/Applications/Python/Python-2.7.2/Lib/encodings/euc_jis_2004.py
|
CEOALT1/RefindPlusUDK
|
116b957ad735f96fbb6d80a0ba582046960ba164
|
[
"BSD-2-Clause"
] | 449
|
2018-05-09T05:54:05.000Z
|
2022-03-30T14:54:18.000Z
|
#
# euc_jis_2004.py: Python Unicode Codec for EUC_JIS_2004
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('euc_jis_2004')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='euc_jis_2004',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| 27.25
| 75
| 0.686239
|
8f71601bd22eaa229058ccf4fc955e17ae14aa28
| 7,772
|
py
|
Python
|
pcapkit/const/ipv4/router_alert.py
|
chellvs/PyPCAPKit
|
f8d66f9955904196b71a6143e49ff4ec4c4922dc
|
[
"BSD-3-Clause"
] | 131
|
2018-10-12T09:45:44.000Z
|
2022-03-31T18:58:14.000Z
|
pcapkit/const/ipv4/router_alert.py
|
chellvs/PyPCAPKit
|
f8d66f9955904196b71a6143e49ff4ec4c4922dc
|
[
"BSD-3-Clause"
] | 39
|
2018-08-18T12:15:04.000Z
|
2022-03-07T20:28:08.000Z
|
pcapkit/const/ipv4/router_alert.py
|
chellvs/PyPCAPKit
|
f8d66f9955904196b71a6143e49ff4ec4c4922dc
|
[
"BSD-3-Clause"
] | 23
|
2018-10-12T09:45:52.000Z
|
2022-03-05T15:23:00.000Z
|
# -*- coding: utf-8 -*-
# pylint: disable=line-too-long
"""IPv4 Router Alert Option Values"""
from aenum import IntEnum, extend_enum
__all__ = ['RouterAlert']
class RouterAlert(IntEnum):
"""[RouterAlert] IPv4 Router Alert Option Values"""
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_0 = 1
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_1 = 2
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_2 = 3
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_3 = 4
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_4 = 5
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_5 = 6
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_6 = 7
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_7 = 8
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_8 = 9
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_9 = 10
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_10 = 11
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_11 = 12
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_12 = 13
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_13 = 14
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_14 = 15
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_15 = 16
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_16 = 17
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_17 = 18
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_18 = 19
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_19 = 20
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_20 = 21
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_21 = 22
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_22 = 23
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_23 = 24
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_24 = 25
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_25 = 26
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_26 = 27
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_27 = 28
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_28 = 29
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_29 = 30
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_30 = 31
#: Aggregated Reservation Nesting Level [:rfc:`3175`]
Aggregated_Reservation_Nesting_Level_31 = 32
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_0 = 33
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_1 = 34
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_2 = 35
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_3 = 36
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_4 = 37
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_5 = 38
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_6 = 39
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_7 = 40
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_8 = 41
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_9 = 42
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_10 = 43
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_11 = 44
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_12 = 45
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_13 = 46
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_14 = 47
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_15 = 48
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_16 = 49
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_17 = 50
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_18 = 51
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_19 = 52
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_20 = 53
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_21 = 54
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_22 = 55
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_23 = 56
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_24 = 57
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_25 = 58
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_26 = 59
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_27 = 60
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_28 = 61
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_29 = 62
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_30 = 63
#: QoS NSLP Aggregation Levels 0-31 [:rfc:`5974`]
QoS_NSLP_Aggregation_Level_31 = 64
#: NSIS NATFW NSLP [:rfc:`5973`]
NSIS_NATFW_NSLP = 65
#: Reserved [:rfc:`5350`]
Reserved = 65535
@staticmethod
def get(key, default=-1):
"""Backport support for original codes."""
if isinstance(key, int):
return RouterAlert(key)
if key not in RouterAlert._member_map_: # pylint: disable=no-member
extend_enum(RouterAlert, key, default)
return RouterAlert[key]
@classmethod
def _missing_(cls, value):
"""Lookup function used when value is not found."""
if not (isinstance(value, int) and 0 <= value <= 65535):
raise ValueError('%r is not a valid %s' % (value, cls.__name__))
if 66 <= value <= 65502:
#: Unassigned
extend_enum(cls, 'Unassigned_%d' % value, value)
return cls(value)
if 65503 <= value <= 65534:
#: Reserved for experimental use [:rfc:`5350`]
extend_enum(cls, 'Reserved for experimental use_%d' % value, value)
return cls(value)
return super()._missing_(value)
| 33.213675
| 79
| 0.701107
|
2672de8e7d463742dbd062a3b586f07c10031ecc
| 12,612
|
py
|
Python
|
tests/json_rpc_tests/rpc_test_framework.py
|
DaeunYim/pgtoolsservice
|
b7e548718d797883027b2caee2d4722810b33c0f
|
[
"MIT"
] | 33
|
2019-05-27T13:04:35.000Z
|
2022-03-17T13:33:05.000Z
|
tests/json_rpc_tests/rpc_test_framework.py
|
DaeunYim/pgtoolsservice
|
b7e548718d797883027b2caee2d4722810b33c0f
|
[
"MIT"
] | 31
|
2019-06-10T01:55:47.000Z
|
2022-03-09T07:27:49.000Z
|
tests/json_rpc_tests/rpc_test_framework.py
|
DaeunYim/pgtoolsservice
|
b7e548718d797883027b2caee2d4722810b33c0f
|
[
"MIT"
] | 25
|
2019-05-13T18:39:24.000Z
|
2021-11-16T03:07:33.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""Module for testing expected JSON RPC input/outputs when the tools service is being used"""
import io
import json
import logging
import os
import re
import threading
from typing import Callable, List, Optional, Tuple
from unittest import mock
from ossdbtoolsservice.hosting.json_message import JSONRPCMessageType
import ossdbtoolsservice.ossdbtoolsservice_main as ossdbtoolsservice_main
from ossdbtoolsservice.utils import constants
class RPCTestMessage:
"""
Class representing an individual JSON RPC message sent as part of an end-to-end integration test
:param method: The name of the JSON RPC method (e.g. 'connection/connect')
:param message_type: The JSONRpcMessageType for the message
:param expect_error_response: Whether the server will respond to this message with an error.
This parameter will be ignored for non-request messages. Default is False.
:param response_verifier: An optional callback that will be called with the response object,
which can be used to verify that the response is the expected one. This parameter will be
ignored for non-request messages. For request messages, if this is not provided, the test will
verify that some response was sent, but will not verify its details.
:param notification_verifiers: An optional list of verifiers that can be used to verify that
the server sent the expected notifications following this message. Each verifier is a tuple
where the first element is a filter function to determine if a given notification was sent in
response to this message, and the second element is an optional verifier that will be called
for each notification that the filter function returns True for. If the message causes the
server to send back notifications, this argument must be provided.
"""
request_id = 0
def __init__(self, method: str, params: str, message_type: JSONRPCMessageType, expect_error_response: bool = False,
response_verifier: Callable[[dict], None] = None,
notification_verifiers: List[Tuple[Callable[[dict], bool], Optional[Callable[[dict], None]]]] = None):
self.method = method
self.params = json.loads(params) if params is not None else None
self.message_type = message_type
if self.message_type is JSONRPCMessageType.Request:
self.request_id = None
self.expect_error_response = expect_error_response
self.response_verifier = response_verifier
self.notification_verifiers = notification_verifiers
def initialize_request_id(self):
"""For a request message, initialize its request ID"""
if self.message_type is not JSONRPCMessageType.Request:
raise RuntimeError('initialize_request_id can only be called on request messages')
elif self.request_id is not None:
raise RuntimeError('Request ID already initialized')
self.request_id = RPCTestMessage.request_id
RPCTestMessage.request_id += 1
def __str__(self):
message_dictionary = {
'jsonrpc': '2.0',
'method': self.method
}
if self.params is not None:
message_dictionary['params'] = self.params
if self.message_type is JSONRPCMessageType.Request:
if self.request_id is None:
self.initialize_request_id()
message_dictionary['id'] = self.request_id
return json.dumps(message_dictionary)
class JSONRPCTestCase:
def __init__(self, test_messages: List[RPCTestMessage]):
initialization_messages = [
DefaultRPCTestMessages.initialize(),
DefaultRPCTestMessages.version(),
DefaultRPCTestMessages.change_configuration(),
DefaultRPCTestMessages.list_capabilities()]
shutdown_messages = [DefaultRPCTestMessages.shutdown()]
self.messages = initialization_messages + test_messages + shutdown_messages
def run(self):
# Start the server
input_stream, output_stream, output_info = JSONRPCTestCase.start_service()
output = ""
# Send all messages to the server
for message in self.messages:
expected_write_calls = output_info[0] + 2 * ((len(message.notification_verifiers) if message.notification_verifiers is not None else 0) +
(1 if message.message_type is JSONRPCMessageType.Request else 0))
bytes_message = b'Content-Length: ' + str.encode(str(len(str(message)))) + b'\r\n\r\n' + str.encode(str(message))
output_info[1].acquire()
input_stream.write(bytes_message)
input_stream.flush()
if message.method == 'shutdown':
continue
output_info[1].wait_for(lambda: output_info[0] >= expected_write_calls, 10)
if output_info[0] < expected_write_calls:
raise RuntimeError(f'Timed out waiting for response or notification for method {message.method}')
# Process the output into responses and notifications
output = output_stream.getvalue().decode()
messages = re.split(r'Content-Length: .+\s+', output)
response_dict = {}
notifications = []
for message_str in messages:
if not message_str:
continue
message = json.loads(message_str.strip())
if 'id' in message:
message_id = message['id']
if message_id in response_dict:
raise RuntimeError(f'Server sent multiple responses with ID {message_id}')
response_dict[message_id] = message
else:
notifications.append(message)
# Verify that each request has a response
requests = [message for message in self.messages if message.message_type is JSONRPCMessageType.Request]
responses_to_verify = {response['id'] for response in response_dict.values()}
for request in requests:
if request.method == 'shutdown':
continue
response = response_dict.get(request.request_id)
if response is None:
raise RuntimeError(f'Request ID {request.request_id} (method {request.method}) has no response')
# Verify that the response is or is not an error, as expected
if request.expect_error_response:
if 'error' not in response:
raise RuntimeError(f'Expected error response to request method {request.method} but got \n{json.dumps(response)}')
else:
if 'result' not in response:
raise RuntimeError(f'Expected successful response to request method {request.method} but got \n{json.dumps(response)}')
# Run the response verifier if present
responses_to_verify.remove(response['id'])
if request.response_verifier is not None:
request.response_verifier(response)
if responses_to_verify:
raise RuntimeError('Server sent the following responses that had no corresponding request:\n{}'.format('\n'.join(
[json.dumps(response_dict[response_id]) for response_id in responses_to_verify])))
# Verify the notifications
notifications_to_verify = {index for index, _ in enumerate(notifications)}
for message in self.messages:
verifiers = message.notification_verifiers
if not verifiers:
continue
for filter_function, verification_function in verifiers:
filtered_notifications = [(index, notification) for index, notification in enumerate(notifications) if filter_function(notification)]
notification_count = len(filtered_notifications)
if notification_count == 0:
raise RuntimeError(f'Expected 1 notification for request with method {message.method} but got 0')
# If there was more than 1 notification matching the filter, take the first one that matches
index = None
notification = None
for filtered_notification in filtered_notifications:
index = filtered_notification[0]
notification = filtered_notification[1]
if index in notifications_to_verify:
break
notifications_to_verify.remove(index)
if verification_function is not None:
verification_function(notification)
if notifications_to_verify:
raise RuntimeError('Server sent the following unexpected notifications:\n{}'.format('\n'.join(
[json.dumps(notifications[index]) for index in notifications_to_verify])))
@staticmethod
def start_service():
# Set up the server's input and output
input_r, input_w = os.pipe()
server_input_stream = open(input_r, 'rb', buffering=0, closefd=False)
test_input_stream = open(input_w, 'wb', buffering=0, closefd=False)
server_output_stream = io.BytesIO()
server_output_stream.close = mock.Mock()
output_info = [0, threading.Condition()] # Number of times write called, Condition variable for monitoring info
# Mock the server output stream's write method so that the test knows how many messages have been written
old_write_method = server_output_stream.write
def mock_write(message):
output_info[1].acquire()
bytes_written = old_write_method(message)
output_info[0] += 1
output_info[1].notify()
output_info[1].release()
return bytes_written
server_output_stream.write = mock.Mock(side_effect=mock_write)
logger = logging.Logger('test')
logger.addHandler(logging.NullHandler())
server = ossdbtoolsservice_main._create_server(server_input_stream, server_output_stream, logger, constants.PG_PROVIDER_NAME)
server.start()
return test_input_stream, server_output_stream, output_info
class DefaultRPCTestMessages:
@staticmethod
def initialize():
return RPCTestMessage(
'initialize',
'{"processId": 4340, "capabilities": {}, "trace": "off"}',
JSONRPCMessageType.Request
)
@staticmethod
def version():
return RPCTestMessage('version', None, JSONRPCMessageType.Request)
@staticmethod
def change_configuration():
return RPCTestMessage(
'workspace/didChangeConfiguration',
'{"settings":{"pgsql":{"logDebugInfo":false,"enabled":true,"defaultDatabase":"postgres","format":{"keywordCase":null,"identifierCase":null,"stripComments":false,"reindent":true}}}}', # noqa
JSONRPCMessageType.Notification
)
@staticmethod
def list_capabilities():
return RPCTestMessage(
'capabilities/list',
'{"hostName":"carbon","hostVersion":"1.0"}',
JSONRPCMessageType.Request
)
@staticmethod
def connection_request(owner_uri, connection_options):
connection_request = RPCTestMessage(
'connection/connect',
'{"ownerUri":"%s","connection":{"options":%s}}' % (owner_uri, json.dumps(connection_options)),
JSONRPCMessageType.Request,
notification_verifiers=[(
lambda notification: notification['method'] == 'connection/complete' and notification['params']['ownerUri'] == owner_uri,
None
)]
)
language_flavor_notification = RPCTestMessage(
'connection/languageflavorchanged',
'{"uri":"%s","language":"sql","flavor":"PGSQL"}' % owner_uri,
JSONRPCMessageType.Notification,
notification_verifiers=[(
lambda notification: notification['method'] == 'textDocument/intelliSenseReady' and notification['params']['ownerUri'] == owner_uri,
None
)]
)
return (connection_request, language_flavor_notification)
@staticmethod
def shutdown():
return RPCTestMessage('shutdown', None, JSONRPCMessageType.Request)
| 49.07393
| 202
| 0.650571
|
724ba40e951370a4d28d95bbafcbd8a270f8ca3b
| 168
|
py
|
Python
|
powernad/Object/StatReport/RequestObject/CreateStatReportObject.py
|
devkingsejong/python---PowerNad
|
c308bba4cb31126ccd318e4574071f4057f5d23f
|
[
"CNRI-Python"
] | 34
|
2017-03-16T14:32:49.000Z
|
2022-03-18T09:23:05.000Z
|
powernad/Object/StatReport/RequestObject/CreateStatReportObject.py
|
devkingsejong/python---PowerNad
|
c308bba4cb31126ccd318e4574071f4057f5d23f
|
[
"CNRI-Python"
] | 16
|
2018-02-08T02:37:56.000Z
|
2022-03-15T13:45:34.000Z
|
powernad/Object/StatReport/RequestObject/CreateStatReportObject.py
|
devkingsejong/python---PowerNad
|
c308bba4cb31126ccd318e4574071f4057f5d23f
|
[
"CNRI-Python"
] | 19
|
2017-03-28T21:48:18.000Z
|
2021-11-30T05:13:43.000Z
|
class CreateStatReportObject:
def __init__(self, reportTp, statDt):
self.reportTp = reportTp
self.statDt = statDt
#self.customerId = 1109868
| 33.6
| 41
| 0.672619
|
123c6e757a258fa380ac64e450c24064b2bfa83f
| 3,371
|
py
|
Python
|
image/simclr/train_simclr.py
|
huxin711/ColossalAI-Examples
|
fa3560683dec891315d5356e76c10ff20e41266f
|
[
"Apache-2.0"
] | null | null | null |
image/simclr/train_simclr.py
|
huxin711/ColossalAI-Examples
|
fa3560683dec891315d5356e76c10ff20e41266f
|
[
"Apache-2.0"
] | null | null | null |
image/simclr/train_simclr.py
|
huxin711/ColossalAI-Examples
|
fa3560683dec891315d5356e76c10ff20e41266f
|
[
"Apache-2.0"
] | null | null | null |
import colossalai
from colossalai.core import global_context as gpc
from colossalai.logging import get_dist_logger
from colossalai.trainer import Trainer, hooks
from colossalai.utils import get_dataloader, MultiTimer
from colossalai.nn.lr_scheduler import CosineAnnealingWarmupLR
from colossalai.engine.schedule import NonPipelineSchedule
from torchvision.datasets import CIFAR10
from NT_Xentloss import NT_Xentloss
from myhooks import TotalBatchsizeHook
from models.simclr import SimCLR
from augmentation import SimCLRTransform
def build_dataset_train():
augment = SimCLRTransform()
train_dataset = CIFAR10(root=gpc.config.dataset.root,
transform=augment,
train=True,
download=True)
return get_dataloader(
dataset=train_dataset,
shuffle=True,
num_workers=1,
batch_size=gpc.config.BATCH_SIZE,
pin_memory=True,
)
def build_dataset_test():
augment = SimCLRTransform()
val_dataset = CIFAR10(root=gpc.config.dataset.root,
transform=augment,
train=False)
return get_dataloader(
dataset=val_dataset,
add_sampler=False,
num_workers=1,
batch_size=gpc.config.BATCH_SIZE,
pin_memory=True,
)
def main():
colossalai.launch_from_torch(config='./config.py')
# get logger
logger = get_dist_logger()
# build model
model = SimCLR(model='resnet18').cuda()
# build dataloader
train_dataloader = build_dataset_train()
test_dataloader = build_dataset_test()
# build loss
criterion = NT_Xentloss()
# build optimizer
optimizer = colossalai.nn.FusedSGD(model.parameters(), lr=gpc.config.LEARNING_RATE,
weight_decay=gpc.config.WEIGHT_DECAY, momentum=gpc.config.MOMENTUM)
# lr_scheduelr
lr_scheduler = CosineAnnealingWarmupLR(optimizer, warmup_steps=10, total_steps=gpc.config.NUM_EPOCHS)
engine, train_dataloader, test_dataloader, _ = colossalai.initialize(
model, optimizer, criterion, train_dataloader, test_dataloader
)
logger.info("initialized colossalai components", ranks=[0])
# build a timer to measure time
timer = MultiTimer()
def process_batch_data(batch_data):
(x1, x2), img_cls = batch_data
# return data and label
return dict(x1=x1, x2=x2), img_cls
schedule = NonPipelineSchedule(batch_data_process_func=process_batch_data)
# build trainer
trainer = Trainer(engine=engine, logger=logger, timer=timer, schedule=schedule)
# build hooks
hook_list = [
hooks.LossHook(),
hooks.LogMetricByEpochHook(logger),
hooks.LRSchedulerHook(lr_scheduler, by_epoch=True),
TotalBatchsizeHook(),
# comment if you do not need to use the hooks below
hooks.SaveCheckpointHook(interval=50, checkpoint_dir=f'./ckpt/{gpc.config.LOG_NAME}'),
hooks.TensorboardHook(log_dir=f'./tb_logs/{gpc.config.LOG_NAME}', ranks=[0]),
]
# start training
trainer.fit(
train_dataloader=train_dataloader,
test_dataloader=test_dataloader,
epochs=gpc.config.NUM_EPOCHS,
hooks=hook_list,
display_progress=True,
test_interval=1
)
if __name__ == '__main__':
main()
| 29.831858
| 106
| 0.68051
|
fd4263c962ca32cfe70ddfad457532b60f8ae69b
| 38,969
|
py
|
Python
|
cloudify_cli/commands/plugins.py
|
tirkarthi/cloudify-cli
|
68365052649a6deea9896cccb05e66d0a6d737cb
|
[
"Apache-2.0"
] | null | null | null |
cloudify_cli/commands/plugins.py
|
tirkarthi/cloudify-cli
|
68365052649a6deea9896cccb05e66d0a6d737cb
|
[
"Apache-2.0"
] | null | null | null |
cloudify_cli/commands/plugins.py
|
tirkarthi/cloudify-cli
|
68365052649a6deea9896cccb05e66d0a6d737cb
|
[
"Apache-2.0"
] | null | null | null |
########
# Copyright (c) 2018 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
import json
import os
import time
import click
import wagon
from cloudify.models_states import PluginInstallationState
from cloudify_cli import execution_events_fetcher
from cloudify_cli.logger import get_events_logger, CloudifyJSONEncoder, output
from cloudify_cli.exceptions import (
SuppressedCloudifyCliError, CloudifyCliError, CloudifyValidationError,
)
from cloudify_rest_client.constants import VISIBILITY_EXCEPT_PRIVATE
from cloudify_rest_client.exceptions import CloudifyClientError
from .. import env, utils
from ..logger import get_global_json_output
from ..table import print_data, print_single, print_details
from ..cli import helptexts, cfy
from ..utils import (prettify_client_error,
get_visibility,
validate_visibility)
from ..labels_utils import get_printable_resource_labels
PLUGINS_BUNDLE_COLUMNS = ['id', 'package_name', 'package_version',
'distribution', 'distribution_release']
PLUGIN_COLUMNS = PLUGINS_BUNDLE_COLUMNS + \
['installed on', 'uploaded_at', 'visibility', 'tenant_name',
'created_by', 'yaml_url_path']
PLUGINS_UPDATE_COLUMNS = ['id', 'state', 'blueprint_id', 'temp_blueprint_id',
'execution_id', 'deployments_to_update',
'visibility', 'created_at', 'forced']
GET_DATA_COLUMNS = ['file_server_path', 'supported_platform',
'supported_py_versions']
@cfy.group(name='plugins')
@cfy.options.common_options
def plugins():
"""Handle plugins on the manager
"""
pass
@plugins.command(name='validate',
short_help='Validate a plugin')
@cfy.argument('plugin-path')
@cfy.options.common_options
@cfy.pass_logger
def validate(plugin_path, logger):
"""Validate a plugin
This will try to validate the plugin's archive is not corrupted.
A valid plugin is a wagon (http://github.com/cloudify-cosomo/wagon)
in the tar.gz format.
`PLUGIN_PATH` is the path to wagon archive to validate.
"""
logger.info('Validating plugin {0}...'.format(plugin_path))
wagon.validate(plugin_path)
logger.info('Plugin validated successfully')
@plugins.command(name='delete',
short_help='Delete a plugin [manager only]')
@cfy.argument('plugin-id')
@cfy.options.force(help=helptexts.FORCE_DELETE_PLUGIN)
@cfy.options.common_options
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def delete(plugin_id, force, logger, client, tenant_name):
"""Delete a plugin from the manager
`PLUGIN_ID` is the id of the plugin to delete.
"""
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Deleting plugin {0}...'.format(plugin_id))
client.plugins.delete(plugin_id=plugin_id, force=force)
logger.info('Plugin deleted')
@plugins.command(name='upload',
short_help='Upload a plugin [manager only]')
@cfy.argument('plugin-path')
@cfy.options.plugin_yaml_path()
@cfy.options.plugin_icon_path()
@cfy.options.plugin_title()
@cfy.options.private_resource
@cfy.options.visibility()
@cfy.options.common_options
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.pass_context
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def upload(ctx,
plugin_path,
yaml_path,
icon_path,
title,
private_resource,
visibility,
logger,
client,
tenant_name):
"""Upload a plugin to the manager
`PLUGIN_PATH` is the path to wagon archive to upload.
"""
client.license.check()
# Test whether the path is a valid URL. If it is, no point in doing local
# validations - it will be validated on the server side anyway
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Creating plugin zip archive..')
wagon_path = utils.get_local_path(plugin_path, create_temp=True)
yaml_path = utils.get_local_path(yaml_path, create_temp=True)
zip_files = [wagon_path, yaml_path]
zip_descr = 'wagon + yaml'
if icon_path:
icon_path = utils.get_local_path(icon_path,
destination='icon.png',
create_temp=True)
zip_files.append(icon_path)
zip_descr += ' + icon'
zip_path = utils.zip_files(zip_files)
progress_handler = utils.generate_progress_handler(zip_path, '')
visibility = get_visibility(private_resource, visibility, logger)
logger.info('Uploading plugin archive (%s)..', zip_descr)
try:
plugin = client.plugins.upload(zip_path,
plugin_title=title,
visibility=visibility,
progress_callback=progress_handler)
logger.info("Plugin uploaded. Plugin's id is {0}".format(plugin.id))
finally:
for f in zip_files:
os.remove(f)
os.remove(zip_path)
@plugins.command(name='bundle-upload',
short_help='Upload a bundle of plugins [manager only]')
@cfy.options.plugins_bundle_path
@cfy.pass_client()
@cfy.pass_logger
@cfy.options.extended_view
def upload_caravan(client, logger, path):
client.license.check()
if not path:
logger.info("Starting upload of plugins bundle, "
"this may take few minutes to complete.")
path = 'http://repository.cloudifysource.org/' \
'cloudify/wagons/cloudify-plugins-bundle.tgz'
progress = utils.generate_progress_handler(path, '')
plugins_ = client.plugins.upload(path, progress_callback=progress)
logger.info("Bundle uploaded, {0} Plugins installed."
.format(len(plugins_)))
if len(plugins_) > 0:
print_data(PLUGINS_BUNDLE_COLUMNS, plugins_, 'Plugins:')
@plugins.command(name='download',
short_help='Download a plugin [manager only]')
@cfy.argument('plugin-id')
@cfy.options.output_path
@cfy.options.common_options
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.pass_logger
@cfy.pass_client()
def download(plugin_id, output_path, logger, client, tenant_name):
"""Download a plugin from the manager
`PLUGIN_ID` is the id of the plugin to download.
"""
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Downloading plugin {0}...'.format(plugin_id))
plugin_name = output_path if output_path else plugin_id
progress_handler = utils.generate_progress_handler(plugin_name, '')
target_file = client.plugins.download(plugin_id,
output_path,
progress_handler)
logger.info('Plugin downloaded as {0}'.format(target_file))
@plugins.command(name='download_yaml',
short_help='Download a plugin yaml [manager only]')
@cfy.argument('plugin-id')
@cfy.options.output_path
@cfy.options.common_options
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.pass_logger
@cfy.pass_client()
def download_yaml(plugin_id, output_path, logger, client, tenant_name):
"""Download a plugin yaml from the manager
`PLUGIN_ID` is the id of the plugin yaml to download.
"""
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Downloading plugin yaml {0}...'.format(plugin_id))
plugin_name = output_path if output_path else plugin_id
progress_handler = utils.generate_progress_handler(plugin_name, '')
target_file = client.plugins.download_yaml(plugin_id,
output_path,
progress_handler)
logger.info('Plugin yaml downloaded as {0}'.format(target_file))
def _format_installation_state(plugin):
"""Format the 'installation_state' into a human-readable 'installed on'"""
if not plugin.get('installation_state'):
return ''
agents = 0
managers = 0
errors = 0
for state in plugin['installation_state']:
if state['state'] == PluginInstallationState.ERROR:
errors += 1
elif state['state'] != PluginInstallationState.INSTALLED:
continue
if state.get('manager'):
managers += 1
elif state.get('agent'):
agents += 1
parts = []
if managers:
parts.append('{0} managers'.format(managers))
if agents:
parts.append('{0} agents'.format(agents))
if errors:
parts.append('{0} errors'.format(errors))
return ', '.join(parts)
@plugins.command(name='get',
short_help='Retrieve plugin information [manager only]')
@cfy.argument('plugin-id')
@cfy.options.common_options
@cfy.options.get_data
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def get(plugin_id, logger, client, tenant_name, get_data):
"""Retrieve information for a specific plugin
`PLUGIN_ID` is the id of the plugin to get information on.
"""
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Retrieving plugin {0}...'.format(plugin_id))
plugin = client.plugins.get(plugin_id, _get_data=get_data)
columns = PLUGIN_COLUMNS + GET_DATA_COLUMNS if get_data else PLUGIN_COLUMNS
plugin['installed on'] = _format_installation_state(plugin)
if get_global_json_output():
# for json, also include installation_state because it's useful
print_single(columns + ['installation_state'], plugin, 'Plugin:', 50)
return
states = {}
for state in plugin.pop('installation_state', []):
if state.get('manager'):
label = 'Manager {0}'.format(state['manager'])
elif state.get('agent'):
label = 'Agent {0}'.format(state['agent'])
states[label] = state['state']
print_details({
col: plugin.get(col) for col in columns
}, 'Plugin:')
print_details(states, 'Plugin installation state:')
@plugins.command(name='list',
short_help='List plugins [manager only]')
@cfy.options.sort_by('uploaded_at')
@cfy.options.descending
@cfy.options.tenant_name_for_list(
required=False, resource_name_for_help='plugin')
@cfy.options.all_tenants
@cfy.options.search
@cfy.options.common_options
@cfy.options.get_data
@cfy.options.pagination_offset
@cfy.options.pagination_size
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
@cfy.options.extended_view
def list(sort_by,
descending,
tenant_name,
all_tenants,
search,
pagination_offset,
pagination_size,
logger,
client,
get_data):
"""List all plugins on the manager
"""
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Listing all plugins...')
plugins_list = client.plugins.list(sort=sort_by,
is_descending=descending,
_all_tenants=all_tenants,
_search=search,
_get_data=get_data,
_offset=pagination_offset,
_size=pagination_size)
for plugin in plugins_list:
plugin['installed on'] = _format_installation_state(plugin)
columns = PLUGIN_COLUMNS + GET_DATA_COLUMNS if get_data else PLUGIN_COLUMNS
if get_global_json_output():
columns += ['installation_state']
print_data(columns, plugins_list, 'Plugins:')
total = plugins_list.metadata.pagination.total
logger.info('Showing {0} of {1} plugins'.format(len(plugins_list),
total))
def _wait_for_plugin_to_be_installed(client, plugin_id, managers, agents,
timeout, logger):
logger.info(
'Waiting for plugin %s to be installed on the managers: [%s] '
'and agents: [%s]',
plugin_id, ', '.join(managers), ', '.join(agents)
)
wait_managers = set(managers)
wait_agents = set(agents)
errors = 0
deadline = time.time() + timeout
while time.time() < deadline:
for pstate in client.plugins.get(plugin_id)['installation_state']:
if pstate['state'] == PluginInstallationState.INSTALLED:
if pstate.get('manager') in wait_managers:
wait_managers.remove(pstate['manager'])
logger.info('Finished installing on manager %s',
pstate['manager'])
if pstate.get('agent') in wait_agents:
wait_agents.remove(pstate['agent'])
logger.info('Finished installing on agent %s',
pstate['agent'])
if pstate['state'] == PluginInstallationState.ERROR:
if pstate.get('manager') in wait_managers:
errors += 1
wait_managers.remove(pstate['manager'])
logger.info('Error installing on manager %s: %s',
pstate['manager'], pstate['error'])
if pstate.get('agent') in wait_agents:
errors += 1
wait_agents.remove(pstate['agent'])
logger.info('Error installing on agent %s: %s',
pstate['agent'], pstate['error'])
if not wait_managers and not wait_agents:
break
time.sleep(1)
else:
raise CloudifyCliError(
'Timed out waiting for plugin {0} to be installed on managers: '
'[{1}] and agents: [{2}]'
.format(plugin_id,
', '.join(managers),
', '.join(agents))
)
if errors:
raise CloudifyCliError('Encountered errors while installing plugins')
@plugins.command(name='install',
short_help='Install a plugin [manager only]')
@cfy.argument('plugin-id')
@cfy.options.common_options
@click.option('--manager-hostname', multiple=True,
help='The hostname of the manager to install the plugin on '
'(can be passed multiple times)')
@click.option('--agent-name', multiple=True,
help='The name of the agent to install the plugin on'
'(can be passed multiple times)')
@cfy.options.timeout(300)
@cfy.pass_client()
@cfy.pass_logger
def install(plugin_id, manager_hostname, agent_name, timeout, client, logger):
"""Install the plugin on the given managers and agents.
Force plugin installation before it needs to be used.
If manager hostnames and agent names are not provided, default to
installing on all managers.
This will wait for the plugins to be installed, up to timeout seconds.
"""
if not manager_hostname and not agent_name:
manager_hostname = [
manager.hostname for manager in client.manager.get_managers()
]
client.plugins.install(
plugin_id,
agents=agent_name,
managers=manager_hostname
)
_wait_for_plugin_to_be_installed(
client, plugin_id, manager_hostname, agent_name, timeout, logger)
@plugins.command(name='set-global',
short_help="Set the plugin's visibility to global")
@cfy.argument('plugin-id')
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client(use_tenant_in_header=True)
@cfy.pass_logger
def set_global(plugin_id, logger, client):
"""Set the plugin's visibility to global
`PLUGIN_ID` is the id of the plugin to set global
"""
status_codes = [400, 403, 404]
with prettify_client_error(status_codes, logger):
client.plugins.set_global(plugin_id)
logger.info('Plugin `{0}` was set to global'.format(plugin_id))
logger.info("This command will be deprecated soon, please use the "
"'set-visibility' command instead")
@plugins.command(name='set-visibility',
short_help="Set the plugin's visibility")
@cfy.argument('plugin-id')
@cfy.options.visibility(required=True, valid_values=VISIBILITY_EXCEPT_PRIVATE)
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client(use_tenant_in_header=True)
@cfy.pass_logger
def set_visibility(plugin_id, visibility, logger, client):
"""Set the plugin's visibility
`PLUGIN_ID` is the id of the plugin to update
"""
validate_visibility(visibility, valid_values=VISIBILITY_EXCEPT_PRIVATE)
status_codes = [400, 403, 404]
with prettify_client_error(status_codes, logger):
client.plugins.set_visibility(plugin_id, visibility)
logger.info('Plugin `{0}` was set to {1}'.format(plugin_id,
visibility))
@plugins.command(name='set-owner',
short_help="Change plugin's ownership")
@cfy.argument('plugin-id')
@cfy.options.new_username()
@cfy.assert_manager_active()
@cfy.pass_client(use_tenant_in_header=True)
@cfy.pass_logger
def set_owner(plugin_id, username, logger, client):
"""Set a new owner for the plugin."""
plugin = client.plugins.set_owner(plugin_id, username)
logger.info('Plugin `%s` is now owned by user `%s`.',
plugin_id, plugin.get('created_by'))
@plugins.command(name='update',
short_help='Update the plugins of all the deployments of '
'the blueprint [manager only]')
@cfy.argument('blueprint-id', required=False)
@cfy.options.all_blueprints
@cfy.options.all_tenants
@cfy.options.except_blueprints
@cfy.options.plugin_names
@cfy.options.plugins_to_latest
@cfy.options.plugins_all_to_latest
@cfy.options.plugins_to_minor
@cfy.options.plugins_all_to_minor
@cfy.options.common_options
@cfy.options.tenant_name(required=False,
mutually_exclusive_with=['all_tenants'],
resource_name_for_help='plugin')
@cfy.assert_manager_active()
@cfy.options.include_logs
@cfy.options.json_output
@cfy.pass_logger
@cfy.pass_client()
@cfy.options.force(help=helptexts.FORCE_PLUGINS_UPDATE)
@cfy.options.auto_correct_types
@cfy.options.reevaluate_active_statuses(help=helptexts.
REEVALUATE_ACTIVE_STATUSES_PLUGINS)
def update(blueprint_id,
all_blueprints,
all_tenants,
except_blueprints,
plugin_names,
to_latest,
all_to_latest,
to_minor,
all_to_minor,
include_logs,
json_output,
logger,
client,
tenant_name,
force,
auto_correct_types,
reevaluate_active_statuses):
"""Update the plugins of all the deployments of the given blueprint
or any blueprint in case `--all-blueprints` flag was used instead of
providing a BLUEPRINT_ID. This will update the deployments one by one
until all succeeded.
"""
# Validate input arguments
if ((blueprint_id and all_blueprints) or
(not blueprint_id and not all_blueprints)):
raise CloudifyValidationError(
'ERROR: Invalid command syntax. Either provide '
'a BLUEPRINT_ID or use --all-blueprints flag.')
if except_blueprints and not all_blueprints:
raise CloudifyValidationError(
'ERROR: Invalid command syntax. Cannot list blueprints '
'exceptions unless used with --all-blueprints flag.')
all_to_minor = bool(all_to_minor)
if all_to_latest is None:
all_to_latest = not all_to_minor
if (all_to_latest and all_to_minor) or \
(not all_to_latest and not all_to_minor):
raise CloudifyValidationError(
'ERROR: Invalid command syntax. --all-to-latest and '
'--all-to-minor are mutually exclusive.')
if to_latest and all_to_latest:
raise CloudifyValidationError(
'ERROR: Invalid command syntax. --all-to-latest and '
'--to-latest are mutually exclusive. If you want to upgrade '
'only the specific plugins, use --plugin-name parameter instead.')
if to_minor and all_to_minor:
raise CloudifyValidationError(
'ERROR: Invalid command syntax. --all-to-minor and '
'--to-minor are mutually exclusive. If you want to upgrade '
'only the specific plugins, use --plugin-name parameter instead.')
utils.explicit_tenant_name_message(tenant_name, logger)
if blueprint_id:
_update_a_blueprint(blueprint_id, all_tenants, plugin_names,
to_latest, all_to_latest, to_minor, all_to_minor,
include_logs, json_output, logger,
client, force, auto_correct_types,
reevaluate_active_statuses)
elif all_blueprints:
update_results = {'successful': [], 'failed': []}
pagination_offset = 0
while True:
blueprints = client.blueprints.list(
sort='created_at',
_all_tenants=all_tenants,
_offset=pagination_offset,
)
for blueprint in blueprints:
if blueprint.id in except_blueprints:
continue
try:
_update_a_blueprint(blueprint.id, all_tenants,
plugin_names, to_latest, all_to_latest,
to_minor, all_to_minor,
include_logs, json_output, logger,
client, force, auto_correct_types,
reevaluate_active_statuses)
update_results['successful'].append(blueprint.id)
except (CloudifyClientError, SuppressedCloudifyCliError) as ex:
update_results['failed'].append(blueprint.id)
logger.warning('Error during %s blueprint update. %s',
blueprint.id, ex)
pagination_offset += blueprints.metadata.pagination.size
if len(blueprints) < blueprints.metadata.pagination.size or \
0 == blueprints.metadata.pagination.size:
break
if update_results['successful']:
logger.info('Successfully updated %d blueprints.',
len(update_results['successful']))
if update_results['failed']:
logger.error('Failed updating %d blueprints.',
len(update_results['failed']))
logger.error('Failed blueprints: %s.',
', '.join(update_results['failed']))
def _update_a_blueprint(blueprint_id,
all_tenants,
plugin_names,
to_latest,
all_to_latest,
to_minor,
all_to_minor,
include_logs,
json_output,
logger,
client,
force,
auto_correct_types,
reevaluate_active_statuses):
logger.info('Updating the plugins of the deployments of the blueprint '
'{}'.format(blueprint_id))
plugins_update = client.plugins_update.update_plugins(
blueprint_id, force=force, plugin_names=plugin_names,
to_latest=to_latest, all_to_latest=all_to_latest,
to_minor=to_minor, all_to_minor=all_to_minor,
auto_correct_types=auto_correct_types,
reevaluate_active_statuses=reevaluate_active_statuses,
all_tenants=all_tenants,
)
events_logger = get_events_logger(json_output)
execution = execution_events_fetcher.wait_for_execution(
client,
client.executions.get(plugins_update.execution_id),
events_handler=events_logger,
include_logs=include_logs,
timeout=None # don't timeout ever
)
if execution.error:
logger.info("Execution of workflow '{0}' for blueprint "
"'{1}' failed. [error={2}]"
.format(execution.workflow_id,
blueprint_id,
execution.error))
logger.info('Failed updating plugins for blueprint {0}. '
'Plugins update ID: {1}. Execution id: {2}'
.format(blueprint_id,
plugins_update.id,
execution.id))
raise SuppressedCloudifyCliError()
logger.info("Finished executing workflow '{0}'".format(
execution.workflow_id))
logger.info('Successfully updated plugins for blueprint {0}. '
'Plugins update ID: {1}. Execution id: {2}'
.format(blueprint_id,
plugins_update.id,
execution.id))
@plugins.command(
name='get-update',
short_help='Retrieve plugins update information [manager only]'
)
@cfy.argument('plugins-update-id')
@cfy.options.common_options
@cfy.options.tenant_name(required=False,
resource_name_for_help='plugins update')
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
@cfy.options.extended_view
def manager_get_update(plugins_update_id, logger, client, tenant_name):
"""Retrieve information for a specific plugins update
`PLUGINS_UPDATE_ID` is the id of the plugins update to get information on.
"""
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Retrieving plugins update {0}...'.format(plugins_update_id))
plugins_update_dict = client.plugins_update.get(plugins_update_id)
print_single(
PLUGINS_UPDATE_COLUMNS, plugins_update_dict, 'Plugins update:')
@plugins.command(name='history', short_help='List plugins updates '
'[manager only]')
@cfy.options.blueprint_id()
@cfy.options.sort_by()
@cfy.options.descending
@cfy.options.tenant_name_for_list(
required=False, resource_name_for_help='plugins update')
@cfy.options.all_tenants
@cfy.options.search
@cfy.options.pagination_offset
@cfy.options.pagination_size
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
@cfy.options.extended_view
def manager_history(blueprint_id,
sort_by,
descending,
all_tenants,
search,
pagination_offset,
pagination_size,
logger,
client,
tenant_name):
"""Show blueprint history by listing plugins updates
If `--blueprint-id` is provided, list plugins updates for that
blueprint. Otherwise, list plugins updates for all blueprints.
"""
utils.explicit_tenant_name_message(tenant_name, logger)
if blueprint_id:
logger.info('Listing plugins updates for blueprint {0}...'.format(
blueprint_id))
else:
logger.info('Listing all plugins updates...')
plugins_updates = client.plugins_update.list(
sort=sort_by,
is_descending=descending,
_all_tenants=all_tenants,
_search=search,
_offset=pagination_offset,
_size=pagination_size,
blueprint_id=blueprint_id
)
total = plugins_updates.metadata.pagination.total
print_data(
PLUGINS_UPDATE_COLUMNS, plugins_updates, 'Plugins updates:')
logger.info('Showing {0} of {1} plugins updates'.format(
len(plugins_updates), total))
@plugins.group(name='blueprint-labels',
short_help="Handle plugin's blueprint labels")
@cfy.options.common_options
def blueprint_labels():
if not env.is_initialized():
env.raise_uninitialized()
@blueprint_labels.command(name='list',
short_help="List blueprint-labels of a specific "
"plugin")
@cfy.argument('plugin-id')
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def list_blueprint_labels(plugin_id,
logger,
client,
tenant_name):
_list_metadata(plugin_id, 'blueprint_labels', tenant_name,
client.plugins, logger)
@blueprint_labels.command(name='add',
short_help="Add blueprint-labels to a specific "
"plugin")
@cfy.argument('labels-list',
callback=cfy.parse_and_validate_labels)
@cfy.argument('plugin-id')
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def add_blueprint_labels(labels_list,
plugin_id,
logger,
client,
tenant_name):
"""LABELS_LIST: <key>:<value>,<key>:<value>.
Any comma and colon in <value> must be escaped with '\\'."""
_add_metadata(plugin_id, 'blueprint_labels', labels_list, tenant_name,
client.plugins, logger)
@blueprint_labels.command(name='delete',
short_help="Delete blueprint-labels from a specific "
"plugin")
@cfy.argument('label', callback=cfy.parse_and_validate_label_to_delete)
@cfy.argument('plugin-id')
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def delete_blueprint_labels(label,
plugin_id,
logger,
client,
tenant_name):
"""
LABEL: A mixed list of labels and keys, i.e.
<key>:<value>,<key>,<key>:<value>. If <key> is provided,
all labels associated with this key will be deleted from the deployment.
Any comma and colon in <value> must be escaped with `\\`
"""
_delete_metadata(plugin_id, 'blueprint_labels', label, tenant_name,
client.plugins, logger)
@plugins.group(name='deployment-labels',
short_help="Handle plugin's (deployment) labels")
@cfy.options.common_options
def deployment_labels():
if not env.is_initialized():
env.raise_uninitialized()
@deployment_labels.command(name='list',
short_help="List (deployment) labels of a specific "
"plugin")
@cfy.argument('plugin-id')
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def list_deployment_labels(plugin_id,
logger,
client,
tenant_name):
_list_metadata(plugin_id, 'labels', tenant_name, client.plugins, logger)
@deployment_labels.command(name='add',
short_help="Add (deployment) labels to a specific "
"plugin")
@cfy.argument('labels-list',
callback=cfy.parse_and_validate_labels)
@cfy.argument('plugin-id')
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def add_deployment_labels(labels_list,
plugin_id,
logger,
client,
tenant_name):
"""LABELS_LIST: <key>:<value>,<key>:<value>.
Any comma and colon in <value> must be escaped with '\\'."""
_add_metadata(plugin_id, 'labels', labels_list, tenant_name,
client.plugins, logger)
@deployment_labels.command(name='delete',
short_help="Delete (deployment) labels from "
"a specific plugin")
@cfy.argument('label', callback=cfy.parse_and_validate_label_to_delete)
@cfy.argument('plugin-id')
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def delete_deployment_labels(label,
plugin_id,
logger,
client,
tenant_name):
"""
LABEL: A mixed list of labels and keys, i.e.
<key>:<value>,<key>,<key>:<value>. If <key> is provided,
all labels associated with this key will be deleted from the deployment.
Any comma and colon in <value> must be escaped with `\\`
"""
_delete_metadata(plugin_id, 'labels', label, tenant_name,
client.plugins, logger)
@plugins.group(name='resource-tags',
short_help="Handle plugin's resource tags")
@cfy.options.common_options
def resource_tags():
if not env.is_initialized():
env.raise_uninitialized()
@resource_tags.command(name='list',
short_help="List resource tags of a specific plugin")
@cfy.argument('plugin-id')
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def list_resource_tags(plugin_id, logger, client, tenant_name):
_list_metadata(plugin_id, 'resource_tags', tenant_name, client.plugins,
logger)
@resource_tags.command(name='add',
short_help="Add resource tags to a specific plugin")
@cfy.argument('key-values',
callback=cfy.parse_and_validate_labels)
@cfy.argument('plugin-id')
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def add_resource_tags(key_values, plugin_id, logger, client, tenant_name):
"""KEY_VALUES: <key>:<value>,<key>:<value>.
Any comma and colon in <value> must be escaped with '\\'."""
_add_metadata(plugin_id, 'resource_tags', key_values, tenant_name,
client.plugins, logger)
@resource_tags.command(name='delete',
short_help="Delete resource tags from "
"a specific plugin")
@cfy.argument('key', callback=cfy.parse_and_validate_label_to_delete)
@cfy.argument('plugin-id')
@cfy.options.tenant_name(required=False, resource_name_for_help='plugin')
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def delete_resource_tags(key, plugin_id, logger, client, tenant_name):
"""
KEY: A resource tag's key to be deleted.
"""
_delete_metadata(plugin_id, 'resource_tags', key, tenant_name,
client.plugins, logger)
def _list_metadata(plugin_id,
metadata_type,
tenant_name,
client,
logger):
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Listing %s of plugin %s...', metadata_type, plugin_id)
metadata = client.get(plugin_id)[metadata_type]
if get_global_json_output():
output(json.dumps(metadata, cls=CloudifyJSONEncoder))
elif metadata_type.endswith('labels'):
print_data(['key', 'values'],
get_printable_resource_labels(metadata),
'{0} labels'.format('Plugin'),
max_width=50)
else:
print_data(['key', 'value'],
[{'key': k, 'value': v} for k, v in metadata.items()],
'{0} labels'.format('Plugin'),
max_width=50)
def _add_metadata(plugin_id,
metadata_type,
metadata_list,
tenant_name,
client,
logger):
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Adding %s to plugin %s...', metadata_type, plugin_id)
metadata = client.get(plugin_id)[metadata_type]
for added_metadata in metadata_list:
for k, v in added_metadata.items():
if k in metadata:
if v not in metadata[k]:
metadata[k].append(v)
else:
metadata[k] = [v]
_update_metadata(plugin_id, metadata_type, client,
**{metadata_type: metadata})
logger.info('The %s of plugin %s were added: %s',
metadata_type, plugin_id, metadata_list)
def _delete_metadata(plugin_id,
metadata_type,
metadata_list,
tenant_name,
client,
logger):
utils.explicit_tenant_name_message(tenant_name, logger)
logger.info('Deleting %s from plugin %s...', metadata_type, plugin_id)
metadata = client.get(plugin_id)[metadata_type]
for deleted_metadata in metadata_list:
for k, v in deleted_metadata.items():
if k in metadata:
if v in metadata[k]:
metadata[k].remove(v)
elif v is None:
del metadata[k]
_update_metadata(plugin_id, metadata_type, client,
**{metadata_type: metadata})
if metadata_type.endswith('labels'):
logger.info('The %s of plugin %s were deleted: %s',
metadata_type, plugin_id, metadata_list)
else:
logger.info('The %s of plugin %s were deleted: %s',
metadata_type, plugin_id,
", ".join(k for m in metadata_list for k in m.keys()))
def _update_metadata(plugin_id,
metadata_type,
client,
**kwargs):
plugin = client.update(plugin_id, **kwargs)
return plugin[metadata_type]
| 38.204902
| 79
| 0.628166
|
31039fa08aa678dad3107b6ac0f8834aa2a63607
| 1,477
|
py
|
Python
|
lib/dataset/iNaturalist.py
|
zhangyongshun/BagofTricks-LT
|
aec4d9a552236c32231374b7b00fa5bf4208dae3
|
[
"MIT"
] | 115
|
2020-12-27T06:32:11.000Z
|
2022-03-31T13:27:16.000Z
|
lib/dataset/iNaturalist.py
|
mymuli/BagofTricks-LT
|
46e1ca38f1ff3efb15fe25d50754d56f911c2ff1
|
[
"MIT"
] | 9
|
2021-01-06T12:53:21.000Z
|
2022-01-31T04:39:32.000Z
|
lib/dataset/iNaturalist.py
|
mymuli/BagofTricks-LT
|
46e1ca38f1ff3efb15fe25d50754d56f911c2ff1
|
[
"MIT"
] | 21
|
2021-01-14T14:52:18.000Z
|
2022-03-23T13:26:49.000Z
|
from dataset.baseset import BaseSet
import random, cv2
import numpy as np
class iNaturalist(BaseSet):
def __init__(self, mode='train', cfg=None, transform=None):
super(iNaturalist, self).__init__(mode, cfg, transform)
random.seed(0)
self.class_dict = self._get_class_dict()
def __getitem__(self, index):
if self.cfg.TRAIN.SAMPLER.TYPE == "weighted sampler" \
and self.mode == 'train' \
and (not self.cfg.TRAIN.TWO_STAGE.DRS or (self.cfg.TRAIN.TWO_STAGE.DRS and self.epoch)):
assert self.cfg.TRAIN.SAMPLER.WEIGHTED_SAMPLER.TYPE in ["balance", 'square', 'progressive']
if self.cfg.TRAIN.SAMPLER.WEIGHTED_SAMPLER.TYPE == "balance":
sample_class = random.randint(0, self.num_classes - 1)
elif self.cfg.TRAIN.SAMPLER.WEIGHTED_SAMPLER.TYPE == "square":
sample_class = np.random.choice(np.arange(self.num_classes), p=self.square_p)
else:
sample_class = np.random.choice(np.arange(self.num_classes), p=self.progress_p)
sample_indexes = self.class_dict[sample_class]
index = random.choice(sample_indexes)
now_info = self.data[index]
img = self._get_image(now_info)
image = self.transform(img)
meta = dict()
image_label = now_info['category_id'] # 0-index
return image, image_label, meta
| 34.348837
| 105
| 0.616791
|
18b600010b16ef8995e9c69b5675903c264d76a9
| 71,753
|
py
|
Python
|
simulator/sim.py
|
predict-drone/drone-control
|
e406f117e62a6b4533b587aecefadb895deb88c8
|
[
"BSD-2-Clause"
] | 227
|
2021-01-20T05:34:32.000Z
|
2022-03-29T12:43:05.000Z
|
machine_learning/CoppeliaSim_gym_reinforcement_learning/simple demos/VREP_RemoteAPIs/sim.py
|
passYYYY/guyueclass
|
2054ccec2f5e6c002727a5561b494a1046484504
|
[
"Apache-2.0"
] | 5
|
2021-10-07T18:46:58.000Z
|
2021-10-07T19:44:41.000Z
|
machine_learning/CoppeliaSim_gym_reinforcement_learning/simple demos/VREP_RemoteAPIs/sim.py
|
passYYYY/guyueclass
|
2054ccec2f5e6c002727a5561b494a1046484504
|
[
"Apache-2.0"
] | 239
|
2021-01-28T02:59:53.000Z
|
2022-03-29T08:02:17.000Z
|
import platform
import struct
import sys
import os
import ctypes as ct
from simConst import *
#load library
libsimx = None
try:
file_extension = '.so'
if platform.system() =='cli':
file_extension = '.dll'
elif platform.system() =='Windows':
file_extension = '.dll'
elif platform.system() == 'Darwin':
file_extension = '.dylib'
else:
file_extension = '.so'
libfullpath = os.path.join(os.path.dirname(__file__), 'remoteApi' + file_extension)
libsimx = ct.CDLL(libfullpath)
except:
print ('----------------------------------------------------')
print ('The remoteApi library could not be loaded. Make sure')
print ('it is located in the same folder as "sim.py", or')
print ('appropriately adjust the file "sim.py"')
print ('----------------------------------------------------')
print ('')
#ctypes wrapper prototypes
c_GetJointPosition = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_float), ct.c_int32)(("simxGetJointPosition", libsimx))
c_SetJointPosition = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_float, ct.c_int32)(("simxSetJointPosition", libsimx))
c_GetJointMatrix = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_float), ct.c_int32)(("simxGetJointMatrix", libsimx))
c_SetSphericalJointMatrix = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_float), ct.c_int32)(("simxSetSphericalJointMatrix", libsimx))
c_SetJointTargetVelocity = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_float, ct.c_int32)(("simxSetJointTargetVelocity", libsimx))
c_SetJointTargetPosition = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_float, ct.c_int32)(("simxSetJointTargetPosition", libsimx))
c_GetJointForce = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_float), ct.c_int32)(("simxGetJointForce", libsimx))
c_GetJointMaxForce = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_float), ct.c_int32)(("simxGetJointMaxForce", libsimx))
c_SetJointForce = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_float, ct.c_int32)(("simxSetJointMaxForce", libsimx))
c_SetJointMaxForce = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_float, ct.c_int32)(("simxSetJointMaxForce", libsimx))
c_ReadForceSensor = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_ubyte), ct.POINTER(ct.c_float), ct.POINTER(ct.c_float), ct.c_int32)(("simxReadForceSensor", libsimx))
c_BreakForceSensor = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32)(("simxBreakForceSensor", libsimx))
c_ReadVisionSensor = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_ubyte), ct.POINTER(ct.POINTER(ct.c_float)), ct.POINTER(ct.POINTER(ct.c_int32)), ct.c_int32)(("simxReadVisionSensor", libsimx))
c_GetObjectHandle = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.POINTER(ct.c_int32), ct.c_int32)(("simxGetObjectHandle", libsimx))
c_GetVisionSensorImage = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_int32), ct.POINTER(ct.POINTER(ct.c_byte)), ct.c_ubyte, ct.c_int32)(("simxGetVisionSensorImage", libsimx))
c_SetVisionSensorImage = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_byte), ct.c_int32, ct.c_ubyte, ct.c_int32)(("simxSetVisionSensorImage", libsimx))
c_GetVisionSensorDepthBuffer= ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_int32), ct.POINTER(ct.POINTER(ct.c_float)), ct.c_int32)(("simxGetVisionSensorDepthBuffer", libsimx))
c_GetObjectChild = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.POINTER(ct.c_int32), ct.c_int32)(("simxGetObjectChild", libsimx))
c_GetObjectParent = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_int32), ct.c_int32)(("simxGetObjectParent", libsimx))
c_ReadProximitySensor = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_ubyte), ct.POINTER(ct.c_float), ct.POINTER(ct.c_int32), ct.POINTER(ct.c_float), ct.c_int32)(("simxReadProximitySensor", libsimx))
c_LoadModel = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.c_ubyte, ct.POINTER(ct.c_int32), ct.c_int32)(("simxLoadModel", libsimx))
c_LoadUI = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.c_ubyte, ct.POINTER(ct.c_int32), ct.POINTER(ct.POINTER(ct.c_int32)), ct.c_int32)(("simxLoadUI", libsimx))
c_LoadScene = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.c_ubyte, ct.c_int32)(("simxLoadScene", libsimx))
c_StartSimulation = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32)(("simxStartSimulation", libsimx))
c_PauseSimulation = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32)(("simxPauseSimulation", libsimx))
c_StopSimulation = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32)(("simxStopSimulation", libsimx))
c_GetUIHandle = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.POINTER(ct.c_int32), ct.c_int32)(("simxGetUIHandle", libsimx))
c_GetUISlider = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.POINTER(ct.c_int32), ct.c_int32)(("simxGetUISlider", libsimx))
c_SetUISlider = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.c_int32, ct.c_int32)(("simxSetUISlider", libsimx))
c_GetUIEventButton = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_int32), ct.POINTER(ct.c_int32), ct.c_int32)(("simxGetUIEventButton", libsimx))
c_GetUIButtonProperty = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.POINTER(ct.c_int32), ct.c_int32)(("simxGetUIButtonProperty", libsimx))
c_SetUIButtonProperty = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.c_int32, ct.c_int32)(("simxSetUIButtonProperty", libsimx))
c_AddStatusbarMessage = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.c_int32)(("simxAddStatusbarMessage", libsimx))
c_AuxiliaryConsoleOpen = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.c_int32, ct.c_int32, ct.POINTER(ct.c_int32), ct.POINTER(ct.c_int32), ct.POINTER(ct.c_float), ct.POINTER(ct.c_float), ct.POINTER(ct.c_int32), ct.c_int32)(("simxAuxiliaryConsoleOpen", libsimx))
c_AuxiliaryConsoleClose = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32)(("simxAuxiliaryConsoleClose", libsimx))
c_AuxiliaryConsolePrint = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_char), ct.c_int32)(("simxAuxiliaryConsolePrint", libsimx))
c_AuxiliaryConsoleShow = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_ubyte, ct.c_int32)(("simxAuxiliaryConsoleShow", libsimx))
c_GetObjectOrientation = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.POINTER(ct.c_float), ct.c_int32)(("simxGetObjectOrientation", libsimx))
c_GetObjectQuaternion = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.POINTER(ct.c_float), ct.c_int32)(("simxGetObjectQuaternion", libsimx))
c_GetObjectPosition = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.POINTER(ct.c_float), ct.c_int32)(("simxGetObjectPosition", libsimx))
c_SetObjectOrientation = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.POINTER(ct.c_float), ct.c_int32)(("simxSetObjectOrientation", libsimx))
c_SetObjectQuaternion = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.POINTER(ct.c_float), ct.c_int32)(("simxSetObjectQuaternion", libsimx))
c_SetObjectPosition = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.POINTER(ct.c_float), ct.c_int32)(("simxSetObjectPosition", libsimx))
c_SetObjectParent = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.c_ubyte, ct.c_int32)(("simxSetObjectParent", libsimx))
c_SetUIButtonLabel = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.POINTER(ct.c_char), ct.POINTER(ct.c_char), ct.c_int32)(("simxSetUIButtonLabel", libsimx))
c_GetLastErrors = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_int32), ct.POINTER(ct.POINTER(ct.c_char)), ct.c_int32)(("simxGetLastErrors", libsimx))
c_GetArrayParameter = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_float), ct.c_int32)(("simxGetArrayParameter", libsimx))
c_SetArrayParameter = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_float), ct.c_int32)(("simxSetArrayParameter", libsimx))
c_GetBooleanParameter = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_ubyte), ct.c_int32)(("simxGetBooleanParameter", libsimx))
c_SetBooleanParameter = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_ubyte, ct.c_int32)(("simxSetBooleanParameter", libsimx))
c_GetIntegerParameter = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_int32), ct.c_int32)(("simxGetIntegerParameter", libsimx))
c_SetIntegerParameter = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.c_int32)(("simxSetIntegerParameter", libsimx))
c_GetFloatingParameter = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_float), ct.c_int32)(("simxGetFloatingParameter", libsimx))
c_SetFloatingParameter = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_float, ct.c_int32)(("simxSetFloatingParameter", libsimx))
c_GetStringParameter = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.POINTER(ct.c_char)), ct.c_int32)(("simxGetStringParameter", libsimx))
c_GetCollisionHandle = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.POINTER(ct.c_int32), ct.c_int32)(("simxGetCollisionHandle", libsimx))
c_GetDistanceHandle = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.POINTER(ct.c_int32), ct.c_int32)(("simxGetDistanceHandle", libsimx))
c_GetCollectionHandle = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.POINTER(ct.c_int32), ct.c_int32)(("simxGetCollectionHandle", libsimx))
c_ReadCollision = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_ubyte), ct.c_int32)(("simxReadCollision", libsimx))
c_ReadDistance = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_float), ct.c_int32)(("simxReadDistance", libsimx))
c_RemoveObject = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32)(("simxRemoveObject", libsimx))
c_RemoveModel = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32)(("simxRemoveModel", libsimx))
c_RemoveUI = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32)(("simxRemoveUI", libsimx))
c_CloseScene = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32)(("simxCloseScene", libsimx))
c_GetObjects = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_int32), ct.POINTER(ct.POINTER(ct.c_int32)), ct.c_int32)(("simxGetObjects", libsimx))
c_DisplayDialog = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.POINTER(ct.c_char), ct.c_int32, ct.POINTER(ct.c_char), ct.POINTER(ct.c_float), ct.POINTER(ct.c_float), ct.POINTER(ct.c_int32), ct.POINTER(ct.c_int32), ct.c_int32)(("simxDisplayDialog", libsimx))
c_EndDialog = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32)(("simxEndDialog", libsimx))
c_GetDialogInput = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.POINTER(ct.c_char)), ct.c_int32)(("simxGetDialogInput", libsimx))
c_GetDialogResult = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_int32), ct.c_int32)(("simxGetDialogResult", libsimx))
c_CopyPasteObjects = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_int32), ct.c_int32, ct.POINTER(ct.POINTER(ct.c_int32)), ct.POINTER(ct.c_int32), ct.c_int32)(("simxCopyPasteObjects", libsimx))
c_GetObjectSelection = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.POINTER(ct.c_int32)), ct.POINTER(ct.c_int32), ct.c_int32)(("simxGetObjectSelection", libsimx))
c_SetObjectSelection = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_int32), ct.c_int32, ct.c_int32)(("simxSetObjectSelection", libsimx))
c_ClearFloatSignal = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.c_int32)(("simxClearFloatSignal", libsimx))
c_ClearIntegerSignal = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.c_int32)(("simxClearIntegerSignal", libsimx))
c_ClearStringSignal = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.c_int32)(("simxClearStringSignal", libsimx))
c_GetFloatSignal = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.POINTER(ct.c_float), ct.c_int32)(("simxGetFloatSignal", libsimx))
c_GetIntegerSignal = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.POINTER(ct.c_int32), ct.c_int32)(("simxGetIntegerSignal", libsimx))
c_GetStringSignal = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.POINTER(ct.POINTER(ct.c_ubyte)), ct.POINTER(ct.c_int32), ct.c_int32)(("simxGetStringSignal", libsimx))
c_SetFloatSignal = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.c_float, ct.c_int32)(("simxSetFloatSignal", libsimx))
c_SetIntegerSignal = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.c_int32, ct.c_int32)(("simxSetIntegerSignal", libsimx))
c_SetStringSignal = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.POINTER(ct.c_ubyte), ct.c_int32, ct.c_int32)(("simxSetStringSignal", libsimx))
c_AppendStringSignal = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.POINTER(ct.c_ubyte), ct.c_int32, ct.c_int32)(("simxAppendStringSignal", libsimx))
c_WriteStringStream = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.POINTER(ct.c_ubyte), ct.c_int32, ct.c_int32)(("simxWriteStringStream", libsimx))
c_GetObjectFloatParameter = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.POINTER(ct.c_float), ct.c_int32)(("simxGetObjectFloatParameter", libsimx))
c_SetObjectFloatParameter = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.c_float, ct.c_int32)(("simxSetObjectFloatParameter", libsimx))
c_GetObjectIntParameter = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.POINTER(ct.c_int32), ct.c_int32)(("simxGetObjectIntParameter", libsimx))
c_SetObjectIntParameter = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.c_int32, ct.c_int32)(("simxSetObjectIntParameter", libsimx))
c_GetModelProperty = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_int32), ct.c_int32)(("simxGetModelProperty", libsimx))
c_SetModelProperty = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.c_int32)(("simxSetModelProperty", libsimx))
c_Start = ct.CFUNCTYPE(ct.c_int32,ct.POINTER(ct.c_char), ct.c_int32, ct.c_ubyte, ct.c_ubyte, ct.c_int32, ct.c_int32)(("simxStart", libsimx))
c_Finish = ct.CFUNCTYPE(None, ct.c_int32)(("simxFinish", libsimx))
c_GetPingTime = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_int32))(("simxGetPingTime", libsimx))
c_GetLastCmdTime = ct.CFUNCTYPE(ct.c_int32,ct.c_int32)(("simxGetLastCmdTime", libsimx))
c_SynchronousTrigger = ct.CFUNCTYPE(ct.c_int32,ct.c_int32)(("simxSynchronousTrigger", libsimx))
c_Synchronous = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_ubyte)(("simxSynchronous", libsimx))
c_PauseCommunication = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_ubyte)(("simxPauseCommunication", libsimx))
c_GetInMessageInfo = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_int32))(("simxGetInMessageInfo", libsimx))
c_GetOutMessageInfo = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_int32))(("simxGetOutMessageInfo", libsimx))
c_GetConnectionId = ct.CFUNCTYPE(ct.c_int32,ct.c_int32)(("simxGetConnectionId", libsimx))
c_CreateBuffer = ct.CFUNCTYPE(ct.POINTER(ct.c_ubyte), ct.c_int32)(("simxCreateBuffer", libsimx))
c_ReleaseBuffer = ct.CFUNCTYPE(None, ct.c_void_p)(("simxReleaseBuffer", libsimx))
c_TransferFile = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.POINTER(ct.c_char), ct.c_int32, ct.c_int32)(("simxTransferFile", libsimx))
c_EraseFile = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.c_int32)(("simxEraseFile", libsimx))
c_GetAndClearStringSignal = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.POINTER(ct.POINTER(ct.c_ubyte)), ct.POINTER(ct.c_int32), ct.c_int32)(("simxGetAndClearStringSignal", libsimx))
c_ReadStringStream = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.POINTER(ct.POINTER(ct.c_ubyte)), ct.POINTER(ct.c_int32), ct.c_int32)(("simxReadStringStream", libsimx))
c_CreateDummy = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_float, ct.POINTER(ct.c_ubyte), ct.POINTER(ct.c_int32), ct.c_int32)(("simxCreateDummy", libsimx))
c_Query = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.POINTER(ct.c_char), ct.POINTER(ct.c_ubyte), ct.c_int32, ct.POINTER(ct.c_char), ct.POINTER(ct.POINTER(ct.c_ubyte)), ct.POINTER(ct.c_int32), ct.c_int32)(("simxQuery", libsimx))
c_GetObjectGroupData = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.c_int32, ct.POINTER(ct.c_int32), ct.POINTER(ct.POINTER(ct.c_int32)), ct.POINTER(ct.c_int32), ct.POINTER(ct.POINTER(ct.c_int32)), ct.POINTER(ct.c_int32), ct.POINTER(ct.POINTER(ct.c_float)), ct.POINTER(ct.c_int32), ct.POINTER(ct.POINTER(ct.c_char)), ct.c_int32)(("simxGetObjectGroupData", libsimx))
c_GetObjectVelocity = ct.CFUNCTYPE(ct.c_int32,ct.c_int32, ct.c_int32, ct.POINTER(ct.c_float), ct.POINTER(ct.c_float), ct.c_int32)(("simxGetObjectVelocity", libsimx))
c_CallScriptFunction = ct.CFUNCTYPE(ct.c_int32,ct.c_int32,ct.POINTER(ct.c_char),ct.c_int32,ct.POINTER(ct.c_char),ct.c_int32,ct.POINTER(ct.c_int32),ct.c_int32,ct.POINTER(ct.c_float),ct.c_int32,ct.POINTER(ct.c_char),ct.c_int32,ct.POINTER(ct.c_ubyte),ct.POINTER(ct.c_int32), ct.POINTER(ct.POINTER(ct.c_int32)),ct.POINTER(ct.c_int32), ct.POINTER(ct.POINTER(ct.c_float)),ct.POINTER(ct.c_int32), ct.POINTER(ct.POINTER(ct.c_char)),ct.POINTER(ct.c_int32), ct.POINTER(ct.POINTER(ct.c_ubyte)),ct.c_int32)(("simxCallScriptFunction", libsimx))
#API functions
def simxGetJointPosition(clientID, jointHandle, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
position = ct.c_float()
return c_GetJointPosition(clientID, jointHandle, ct.byref(position), operationMode), position.value
def simxSetJointPosition(clientID, jointHandle, position, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_SetJointPosition(clientID, jointHandle, position, operationMode)
def simxGetJointMatrix(clientID, jointHandle, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
matrix = (ct.c_float*12)()
ret = c_GetJointMatrix(clientID, jointHandle, matrix, operationMode)
arr = []
for i in range(12):
arr.append(matrix[i])
return ret, arr
def simxSetSphericalJointMatrix(clientID, jointHandle, matrix, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
matrix = (ct.c_float*12)(*matrix)
return c_SetSphericalJointMatrix(clientID, jointHandle, matrix, operationMode)
def simxSetJointTargetVelocity(clientID, jointHandle, targetVelocity, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_SetJointTargetVelocity(clientID, jointHandle, targetVelocity, operationMode)
def simxSetJointTargetPosition(clientID, jointHandle, targetPosition, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_SetJointTargetPosition(clientID, jointHandle, targetPosition, operationMode)
def simxJointGetForce(clientID, jointHandle, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
force = ct.c_float()
return c_GetJointForce(clientID, jointHandle, ct.byref(force), operationMode), force.value
def simxGetJointForce(clientID, jointHandle, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
force = ct.c_float()
return c_GetJointForce(clientID, jointHandle, ct.byref(force), operationMode), force.value
def simxGetJointMaxForce(clientID, jointHandle, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
force = ct.c_float()
return c_GetJointMaxForce(clientID, jointHandle, ct.byref(force), operationMode), force.value
def simxSetJointForce(clientID, jointHandle, force, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_SetJointMaxForce(clientID, jointHandle, force, operationMode)
def simxSetJointMaxForce(clientID, jointHandle, force, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_SetJointMaxForce(clientID, jointHandle, force, operationMode)
def simxReadForceSensor(clientID, forceSensorHandle, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
state = ct.c_ubyte()
forceVector = (ct.c_float*3)()
torqueVector = (ct.c_float*3)()
ret = c_ReadForceSensor(clientID, forceSensorHandle, ct.byref(state), forceVector, torqueVector, operationMode)
arr1 = []
for i in range(3):
arr1.append(forceVector[i])
arr2 = []
for i in range(3):
arr2.append(torqueVector[i])
#if sys.version_info[0] == 3:
# state=state.value
#else:
# state=ord(state.value)
return ret, state.value, arr1, arr2
def simxBreakForceSensor(clientID, forceSensorHandle, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_BreakForceSensor(clientID, forceSensorHandle, operationMode)
def simxReadVisionSensor(clientID, sensorHandle, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
detectionState = ct.c_ubyte()
auxValues = ct.POINTER(ct.c_float)()
auxValuesCount = ct.POINTER(ct.c_int)()
ret = c_ReadVisionSensor(clientID, sensorHandle, ct.byref(detectionState), ct.byref(auxValues), ct.byref(auxValuesCount), operationMode)
auxValues2 = []
if ret == 0:
s = 0
for i in range(auxValuesCount[0]):
auxValues2.append(auxValues[s:s+auxValuesCount[i+1]])
s += auxValuesCount[i+1]
#free C buffers
c_ReleaseBuffer(auxValues)
c_ReleaseBuffer(auxValuesCount)
return ret, bool(detectionState.value!=0), auxValues2
def simxGetObjectHandle(clientID, objectName, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
handle = ct.c_int()
if (sys.version_info[0] == 3) and (type(objectName) is str):
objectName=objectName.encode('utf-8')
return c_GetObjectHandle(clientID, objectName, ct.byref(handle), operationMode), handle.value
def simxGetVisionSensorImage(clientID, sensorHandle, options, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
resolution = (ct.c_int*2)()
c_image = ct.POINTER(ct.c_byte)()
bytesPerPixel = 3
if (options and 1) != 0:
bytesPerPixel = 1
ret = c_GetVisionSensorImage(clientID, sensorHandle, resolution, ct.byref(c_image), options, operationMode)
reso = []
image = []
if (ret == 0):
image = [None]*resolution[0]*resolution[1]*bytesPerPixel
for i in range(resolution[0] * resolution[1] * bytesPerPixel):
image[i] = c_image[i]
for i in range(2):
reso.append(resolution[i])
return ret, reso, image
def simxSetVisionSensorImage(clientID, sensorHandle, image, options, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
size = len(image)
image_bytes = (ct.c_byte*size)(*image)
return c_SetVisionSensorImage(clientID, sensorHandle, image_bytes, size, options, operationMode)
def simxGetVisionSensorDepthBuffer(clientID, sensorHandle, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
c_buffer = ct.POINTER(ct.c_float)()
resolution = (ct.c_int*2)()
ret = c_GetVisionSensorDepthBuffer(clientID, sensorHandle, resolution, ct.byref(c_buffer), operationMode)
reso = []
buffer = []
if (ret == 0):
buffer = [None]*resolution[0]*resolution[1]
for i in range(resolution[0] * resolution[1]):
buffer[i] = c_buffer[i]
for i in range(2):
reso.append(resolution[i])
return ret, reso, buffer
def simxGetObjectChild(clientID, parentObjectHandle, childIndex, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
childObjectHandle = ct.c_int()
return c_GetObjectChild(clientID, parentObjectHandle, childIndex, ct.byref(childObjectHandle), operationMode), childObjectHandle.value
def simxGetObjectParent(clientID, childObjectHandle, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
parentObjectHandle = ct.c_int()
return c_GetObjectParent(clientID, childObjectHandle, ct.byref(parentObjectHandle), operationMode), parentObjectHandle.value
def simxReadProximitySensor(clientID, sensorHandle, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
detectionState = ct.c_ubyte()
detectedObjectHandle = ct.c_int()
detectedPoint = (ct.c_float*3)()
detectedSurfaceNormalVector = (ct.c_float*3)()
ret = c_ReadProximitySensor(clientID, sensorHandle, ct.byref(detectionState), detectedPoint, ct.byref(detectedObjectHandle), detectedSurfaceNormalVector, operationMode)
arr1 = []
for i in range(3):
arr1.append(detectedPoint[i])
arr2 = []
for i in range(3):
arr2.append(detectedSurfaceNormalVector[i])
return ret, bool(detectionState.value!=0), arr1, detectedObjectHandle.value, arr2
def simxLoadModel(clientID, modelPathAndName, options, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
baseHandle = ct.c_int()
if (sys.version_info[0] == 3) and (type(modelPathAndName) is str):
modelPathAndName=modelPathAndName.encode('utf-8')
return c_LoadModel(clientID, modelPathAndName, options, ct.byref(baseHandle), operationMode), baseHandle.value
def simxLoadUI(clientID, uiPathAndName, options, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
count = ct.c_int()
uiHandles = ct.POINTER(ct.c_int)()
if (sys.version_info[0] == 3) and (type(uiPathAndName) is str):
uiPathAndName=uiPathAndName.encode('utf-8')
ret = c_LoadUI(clientID, uiPathAndName, options, ct.byref(count), ct.byref(uiHandles), operationMode)
handles = []
if ret == 0:
for i in range(count.value):
handles.append(uiHandles[i])
#free C buffers
c_ReleaseBuffer(uiHandles)
return ret, handles
def simxLoadScene(clientID, scenePathAndName, options, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
if (sys.version_info[0] == 3) and (type(scenePathAndName) is str):
scenePathAndName=scenePathAndName.encode('utf-8')
return c_LoadScene(clientID, scenePathAndName, options, operationMode)
def simxStartSimulation(clientID, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_StartSimulation(clientID, operationMode)
def simxPauseSimulation(clientID, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_PauseSimulation(clientID, operationMode)
def simxStopSimulation(clientID, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_StopSimulation(clientID, operationMode)
def simxGetUIHandle(clientID, uiName, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
handle = ct.c_int()
if (sys.version_info[0] == 3) and (type(uiName) is str):
uiName=uiName.encode('utf-8')
return c_GetUIHandle(clientID, uiName, ct.byref(handle), operationMode), handle.value
def simxGetUISlider(clientID, uiHandle, uiButtonID, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
position = ct.c_int()
return c_GetUISlider(clientID, uiHandle, uiButtonID, ct.byref(position), operationMode), position.value
def simxSetUISlider(clientID, uiHandle, uiButtonID, position, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_SetUISlider(clientID, uiHandle, uiButtonID, position, operationMode)
def simxGetUIEventButton(clientID, uiHandle, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
uiEventButtonID = ct.c_int()
auxValues = (ct.c_int*2)()
ret = c_GetUIEventButton(clientID, uiHandle, ct.byref(uiEventButtonID), auxValues, operationMode)
arr = []
for i in range(2):
arr.append(auxValues[i])
return ret, uiEventButtonID.value, arr
def simxGetUIButtonProperty(clientID, uiHandle, uiButtonID, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
prop = ct.c_int()
return c_GetUIButtonProperty(clientID, uiHandle, uiButtonID, ct.byref(prop), operationMode), prop.value
def simxSetUIButtonProperty(clientID, uiHandle, uiButtonID, prop, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_SetUIButtonProperty(clientID, uiHandle, uiButtonID, prop, operationMode)
def simxAddStatusbarMessage(clientID, message, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
if (sys.version_info[0] == 3) and (type(message) is str):
message=message.encode('utf-8')
return c_AddStatusbarMessage(clientID, message, operationMode)
def simxAuxiliaryConsoleOpen(clientID, title, maxLines, mode, position, size, textColor, backgroundColor, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
consoleHandle = ct.c_int()
if (sys.version_info[0] == 3) and (type(title) is str):
title=title.encode('utf-8')
if position != None:
c_position = (ct.c_int*2)(*position)
else:
c_position = None
if size != None:
c_size = (ct.c_int*2)(*size)
else:
c_size = None
if textColor != None:
c_textColor = (ct.c_float*3)(*textColor)
else:
c_textColor = None
if backgroundColor != None:
c_backgroundColor = (ct.c_float*3)(*backgroundColor)
else:
c_backgroundColor = None
return c_AuxiliaryConsoleOpen(clientID, title, maxLines, mode, c_position, c_size, c_textColor, c_backgroundColor, ct.byref(consoleHandle), operationMode), consoleHandle.value
def simxAuxiliaryConsoleClose(clientID, consoleHandle, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_AuxiliaryConsoleClose(clientID, consoleHandle, operationMode)
def simxAuxiliaryConsolePrint(clientID, consoleHandle, txt, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
if (sys.version_info[0] == 3) and (type(txt) is str):
txt=txt.encode('utf-8')
return c_AuxiliaryConsolePrint(clientID, consoleHandle, txt, operationMode)
def simxAuxiliaryConsoleShow(clientID, consoleHandle, showState, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_AuxiliaryConsoleShow(clientID, consoleHandle, showState, operationMode)
def simxGetObjectOrientation(clientID, objectHandle, relativeToObjectHandle, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
eulerAngles = (ct.c_float*3)()
ret = c_GetObjectOrientation(clientID, objectHandle, relativeToObjectHandle, eulerAngles, operationMode)
arr = []
for i in range(3):
arr.append(eulerAngles[i])
return ret, arr
def simxGetObjectQuaternion(clientID, objectHandle, relativeToObjectHandle, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
quaternion = (ct.c_float*4)()
ret = c_GetObjectQuaternion(clientID, objectHandle, relativeToObjectHandle, quaternion, operationMode)
arr = []
for i in range(4):
arr.append(quaternion[i])
return ret, arr
def simxGetObjectPosition(clientID, objectHandle, relativeToObjectHandle, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
position = (ct.c_float*3)()
ret = c_GetObjectPosition(clientID, objectHandle, relativeToObjectHandle, position, operationMode)
arr = []
for i in range(3):
arr.append(position[i])
return ret, arr
def simxSetObjectOrientation(clientID, objectHandle, relativeToObjectHandle, eulerAngles, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
angles = (ct.c_float*3)(*eulerAngles)
return c_SetObjectOrientation(clientID, objectHandle, relativeToObjectHandle, angles, operationMode)
def simxSetObjectQuaternion(clientID, objectHandle, relativeToObjectHandle, quaternion, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
quat = (ct.c_float*4)(*quaternion)
return c_SetObjectQuaternion(clientID, objectHandle, relativeToObjectHandle, quat, operationMode)
def simxSetObjectPosition(clientID, objectHandle, relativeToObjectHandle, position, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
c_position = (ct.c_float*3)(*position)
return c_SetObjectPosition(clientID, objectHandle, relativeToObjectHandle, c_position, operationMode)
def simxSetObjectParent(clientID, objectHandle, parentObject, keepInPlace, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_SetObjectParent(clientID, objectHandle, parentObject, keepInPlace, operationMode)
def simxSetUIButtonLabel(clientID, uiHandle, uiButtonID, upStateLabel, downStateLabel, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
if sys.version_info[0] == 3:
if type(upStateLabel) is str:
upStateLabel=upStateLabel.encode('utf-8')
if type(downStateLabel) is str:
downStateLabel=downStateLabel.encode('utf-8')
return c_SetUIButtonLabel(clientID, uiHandle, uiButtonID, upStateLabel, downStateLabel, operationMode)
def simxGetLastErrors(clientID, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
errors =[]
errorCnt = ct.c_int()
errorStrings = ct.POINTER(ct.c_char)()
ret = c_GetLastErrors(clientID, ct.byref(errorCnt), ct.byref(errorStrings), operationMode)
if ret == 0:
s = 0
for i in range(errorCnt.value):
a = bytearray()
while errorStrings[s] != b'\0':
if sys.version_info[0] == 3:
a.append(int.from_bytes(errorStrings[s],'big'))
else:
a.append(errorStrings[s])
s += 1
s += 1 #skip null
if sys.version_info[0] == 3:
errors.append(str(a,'utf-8'))
else:
errors.append(str(a))
return ret, errors
def simxGetArrayParameter(clientID, paramIdentifier, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
paramValues = (ct.c_float*3)()
ret = c_GetArrayParameter(clientID, paramIdentifier, paramValues, operationMode)
arr = []
for i in range(3):
arr.append(paramValues[i])
return ret, arr
def simxSetArrayParameter(clientID, paramIdentifier, paramValues, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
c_paramValues = (ct.c_float*3)(*paramValues)
return c_SetArrayParameter(clientID, paramIdentifier, c_paramValues, operationMode)
def simxGetBooleanParameter(clientID, paramIdentifier, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
paramValue = ct.c_ubyte()
return c_GetBooleanParameter(clientID, paramIdentifier, ct.byref(paramValue), operationMode), bool(paramValue.value!=0)
def simxSetBooleanParameter(clientID, paramIdentifier, paramValue, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_SetBooleanParameter(clientID, paramIdentifier, paramValue, operationMode)
def simxGetIntegerParameter(clientID, paramIdentifier, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
paramValue = ct.c_int()
return c_GetIntegerParameter(clientID, paramIdentifier, ct.byref(paramValue), operationMode), paramValue.value
def simxSetIntegerParameter(clientID, paramIdentifier, paramValue, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_SetIntegerParameter(clientID, paramIdentifier, paramValue, operationMode)
def simxGetFloatingParameter(clientID, paramIdentifier, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
paramValue = ct.c_float()
return c_GetFloatingParameter(clientID, paramIdentifier, ct.byref(paramValue), operationMode), paramValue.value
def simxSetFloatingParameter(clientID, paramIdentifier, paramValue, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_SetFloatingParameter(clientID, paramIdentifier, paramValue, operationMode)
def simxGetStringParameter(clientID, paramIdentifier, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
paramValue = ct.POINTER(ct.c_char)()
ret = c_GetStringParameter(clientID, paramIdentifier, ct.byref(paramValue), operationMode)
a = bytearray()
if ret == 0:
i = 0
while paramValue[i] != b'\0':
if sys.version_info[0] == 3:
a.append(int.from_bytes(paramValue[i],'big'))
else:
a.append(paramValue[i])
i=i+1
if sys.version_info[0] == 3:
a=str(a,'utf-8')
else:
a=str(a)
return ret, a
def simxGetCollisionHandle(clientID, collisionObjectName, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
handle = ct.c_int()
if (sys.version_info[0] == 3) and (type(collisionObjectName) is str):
collisionObjectName=collisionObjectName.encode('utf-8')
return c_GetCollisionHandle(clientID, collisionObjectName, ct.byref(handle), operationMode), handle.value
def simxGetCollectionHandle(clientID, collectionName, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
handle = ct.c_int()
if (sys.version_info[0] == 3) and (type(collectionName) is str):
collectionName=collectionName.encode('utf-8')
return c_GetCollectionHandle(clientID, collectionName, ct.byref(handle), operationMode), handle.value
def simxGetDistanceHandle(clientID, distanceObjectName, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
handle = ct.c_int()
if (sys.version_info[0] == 3) and (type(distanceObjectName) is str):
distanceObjectName=distanceObjectName.encode('utf-8')
return c_GetDistanceHandle(clientID, distanceObjectName, ct.byref(handle), operationMode), handle.value
def simxReadCollision(clientID, collisionObjectHandle, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
collisionState = ct.c_ubyte()
return c_ReadCollision(clientID, collisionObjectHandle, ct.byref(collisionState), operationMode), bool(collisionState.value!=0)
def simxReadDistance(clientID, distanceObjectHandle, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
minimumDistance = ct.c_float()
return c_ReadDistance(clientID, distanceObjectHandle, ct.byref(minimumDistance), operationMode), minimumDistance.value
def simxRemoveObject(clientID, objectHandle, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_RemoveObject(clientID, objectHandle, operationMode)
def simxRemoveModel(clientID, objectHandle, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_RemoveModel(clientID, objectHandle, operationMode)
def simxRemoveUI(clientID, uiHandle, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_RemoveUI(clientID, uiHandle, operationMode)
def simxCloseScene(clientID, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_CloseScene(clientID, operationMode)
def simxGetObjects(clientID, objectType, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
objectCount = ct.c_int()
objectHandles = ct.POINTER(ct.c_int)()
ret = c_GetObjects(clientID, objectType, ct.byref(objectCount), ct.byref(objectHandles), operationMode)
handles = []
if ret == 0:
for i in range(objectCount.value):
handles.append(objectHandles[i])
return ret, handles
def simxDisplayDialog(clientID, titleText, mainText, dialogType, initialText, titleColors, dialogColors, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
if titleColors != None:
c_titleColors = (ct.c_float*6)(*titleColors)
else:
c_titleColors = None
if dialogColors != None:
c_dialogColors = (ct.c_float*6)(*dialogColors)
else:
c_dialogColors = None
c_dialogHandle = ct.c_int()
c_uiHandle = ct.c_int()
if sys.version_info[0] == 3:
if type(titleText) is str:
titleText=titleText.encode('utf-8')
if type(mainText) is str:
mainText=mainText.encode('utf-8')
if type(initialText) is str:
initialText=initialText.encode('utf-8')
return c_DisplayDialog(clientID, titleText, mainText, dialogType, initialText, c_titleColors, c_dialogColors, ct.byref(c_dialogHandle), ct.byref(c_uiHandle), operationMode), c_dialogHandle.value, c_uiHandle.value
def simxEndDialog(clientID, dialogHandle, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_EndDialog(clientID, dialogHandle, operationMode)
def simxGetDialogInput(clientID, dialogHandle, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
inputText = ct.POINTER(ct.c_char)()
ret = c_GetDialogInput(clientID, dialogHandle, ct.byref(inputText), operationMode)
a = bytearray()
if ret == 0:
i = 0
while inputText[i] != b'\0':
if sys.version_info[0] == 3:
a.append(int.from_bytes(inputText[i],'big'))
else:
a.append(inputText[i])
i = i+1
if sys.version_info[0] == 3:
a=str(a,'utf-8')
else:
a=str(a)
return ret, a
def simxGetDialogResult(clientID, dialogHandle, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
result = ct.c_int()
return c_GetDialogResult(clientID, dialogHandle, ct.byref(result), operationMode), result.value
def simxCopyPasteObjects(clientID, objectHandles, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
c_objectHandles = (ct.c_int*len(objectHandles))(*objectHandles)
c_objectHandles = ct.cast(c_objectHandles,ct.POINTER(ct.c_int)) # IronPython needs this
newObjectCount = ct.c_int()
newObjectHandles = ct.POINTER(ct.c_int)()
ret = c_CopyPasteObjects(clientID, c_objectHandles, len(objectHandles), ct.byref(newObjectHandles), ct.byref(newObjectCount), operationMode)
newobj = []
if ret == 0:
for i in range(newObjectCount.value):
newobj.append(newObjectHandles[i])
return ret, newobj
def simxGetObjectSelection(clientID, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
objectCount = ct.c_int()
objectHandles = ct.POINTER(ct.c_int)()
ret = c_GetObjectSelection(clientID, ct.byref(objectHandles), ct.byref(objectCount), operationMode)
newobj = []
if ret == 0:
for i in range(objectCount.value):
newobj.append(objectHandles[i])
return ret, newobj
def simxSetObjectSelection(clientID, objectHandles, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
c_objectHandles = (ct.c_int*len(objectHandles))(*objectHandles)
return c_SetObjectSelection(clientID, c_objectHandles, len(objectHandles), operationMode)
def simxClearFloatSignal(clientID, signalName, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
if (sys.version_info[0] == 3) and (type(signalName) is str):
signalName=signalName.encode('utf-8')
return c_ClearFloatSignal(clientID, signalName, operationMode)
def simxClearIntegerSignal(clientID, signalName, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
if (sys.version_info[0] == 3) and (type(signalName) is str):
signalName=signalName.encode('utf-8')
return c_ClearIntegerSignal(clientID, signalName, operationMode)
def simxClearStringSignal(clientID, signalName, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
if (sys.version_info[0] == 3) and (type(signalName) is str):
signalName=signalName.encode('utf-8')
return c_ClearStringSignal(clientID, signalName, operationMode)
def simxGetFloatSignal(clientID, signalName, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
signalValue = ct.c_float()
if (sys.version_info[0] == 3) and (type(signalName) is str):
signalName=signalName.encode('utf-8')
return c_GetFloatSignal(clientID, signalName, ct.byref(signalValue), operationMode), signalValue.value
def simxGetIntegerSignal(clientID, signalName, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
signalValue = ct.c_int()
if (sys.version_info[0] == 3) and (type(signalName) is str):
signalName=signalName.encode('utf-8')
return c_GetIntegerSignal(clientID, signalName, ct.byref(signalValue), operationMode), signalValue.value
def simxGetStringSignal(clientID, signalName, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
signalLength = ct.c_int();
signalValue = ct.POINTER(ct.c_ubyte)()
if (sys.version_info[0] == 3) and (type(signalName) is str):
signalName=signalName.encode('utf-8')
ret = c_GetStringSignal(clientID, signalName, ct.byref(signalValue), ct.byref(signalLength), operationMode)
a = bytearray()
if ret == 0:
for i in range(signalLength.value):
a.append(signalValue[i])
if sys.version_info[0] != 3:
a=str(a)
return ret, a
def simxGetAndClearStringSignal(clientID, signalName, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
signalLength = ct.c_int();
signalValue = ct.POINTER(ct.c_ubyte)()
if (sys.version_info[0] == 3) and (type(signalName) is str):
signalName=signalName.encode('utf-8')
ret = c_GetAndClearStringSignal(clientID, signalName, ct.byref(signalValue), ct.byref(signalLength), operationMode)
a = bytearray()
if ret == 0:
for i in range(signalLength.value):
a.append(signalValue[i])
if sys.version_info[0] != 3:
a=str(a)
return ret, a
def simxReadStringStream(clientID, signalName, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
signalLength = ct.c_int();
signalValue = ct.POINTER(ct.c_ubyte)()
if (sys.version_info[0] == 3) and (type(signalName) is str):
signalName=signalName.encode('utf-8')
ret = c_ReadStringStream(clientID, signalName, ct.byref(signalValue), ct.byref(signalLength), operationMode)
a = bytearray()
if ret == 0:
for i in range(signalLength.value):
a.append(signalValue[i])
if sys.version_info[0] != 3:
a=str(a)
return ret, a
def simxSetFloatSignal(clientID, signalName, signalValue, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
if (sys.version_info[0] == 3) and (type(signalName) is str):
signalName=signalName.encode('utf-8')
return c_SetFloatSignal(clientID, signalName, signalValue, operationMode)
def simxSetIntegerSignal(clientID, signalName, signalValue, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
if (sys.version_info[0] == 3) and (type(signalName) is str):
signalName=signalName.encode('utf-8')
return c_SetIntegerSignal(clientID, signalName, signalValue, operationMode)
def simxSetStringSignal(clientID, signalName, signalValue, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
sigV=signalValue
if sys.version_info[0] == 3:
if type(signalName) is str:
signalName=signalName.encode('utf-8')
if type(signalValue) is bytearray:
sigV = (ct.c_ubyte*len(signalValue))(*signalValue)
if type(signalValue) is str:
signalValue=signalValue.encode('utf-8')
sigV = (ct.c_ubyte*len(signalValue))(*signalValue)
else:
if type(signalValue) is bytearray:
sigV = (ct.c_ubyte*len(signalValue))(*signalValue)
if type(signalValue) is str:
signalValue=bytearray(signalValue)
sigV = (ct.c_ubyte*len(signalValue))(*signalValue)
sigV=ct.cast(sigV,ct.POINTER(ct.c_ubyte)) # IronPython needs this
return c_SetStringSignal(clientID, signalName, sigV, len(signalValue), operationMode)
def simxAppendStringSignal(clientID, signalName, signalValue, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
sigV=signalValue
if sys.version_info[0] == 3:
if type(signalName) is str:
signalName=signalName.encode('utf-8')
if type(signalValue) is bytearray:
sigV = (ct.c_ubyte*len(signalValue))(*signalValue)
if type(signalValue) is str:
signalValue=signalValue.encode('utf-8')
sigV = (ct.c_ubyte*len(signalValue))(*signalValue)
else:
if type(signalValue) is bytearray:
sigV = (ct.c_ubyte*len(signalValue))(*signalValue)
if type(signalValue) is str:
signalValue=bytearray(signalValue)
sigV = (ct.c_ubyte*len(signalValue))(*signalValue)
sigV=ct.cast(sigV,ct.POINTER(ct.c_ubyte)) # IronPython needs this
return c_AppendStringSignal(clientID, signalName, sigV, len(signalValue), operationMode)
def simxWriteStringStream(clientID, signalName, signalValue, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
sigV=signalValue
if sys.version_info[0] == 3:
if type(signalName) is str:
signalName=signalName.encode('utf-8')
if type(signalValue) is bytearray:
sigV = (ct.c_ubyte*len(signalValue))(*signalValue)
if type(signalValue) is str:
signalValue=signalValue.encode('utf-8')
sigV = (ct.c_ubyte*len(signalValue))(*signalValue)
else:
if type(signalValue) is bytearray:
sigV = (ct.c_ubyte*len(signalValue))(*signalValue)
if type(signalValue) is str:
signalValue=bytearray(signalValue)
sigV = (ct.c_ubyte*len(signalValue))(*signalValue)
sigV=ct.cast(sigV,ct.POINTER(ct.c_ubyte)) # IronPython needs this
return c_WriteStringStream(clientID, signalName, sigV, len(signalValue), operationMode)
def simxGetObjectFloatParameter(clientID, objectHandle, parameterID, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
parameterValue = ct.c_float()
return c_GetObjectFloatParameter(clientID, objectHandle, parameterID, ct.byref(parameterValue), operationMode), parameterValue.value
def simxSetObjectFloatParameter(clientID, objectHandle, parameterID, parameterValue, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_SetObjectFloatParameter(clientID, objectHandle, parameterID, parameterValue, operationMode)
def simxGetObjectIntParameter(clientID, objectHandle, parameterID, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
parameterValue = ct.c_int()
return c_GetObjectIntParameter(clientID, objectHandle, parameterID, ct.byref(parameterValue), operationMode), parameterValue.value
def simxSetObjectIntParameter(clientID, objectHandle, parameterID, parameterValue, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_SetObjectIntParameter(clientID, objectHandle, parameterID, parameterValue, operationMode)
def simxGetModelProperty(clientID, objectHandle, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
prop = ct.c_int()
return c_GetModelProperty(clientID, objectHandle, ct.byref(prop), operationMode), prop.value
def simxSetModelProperty(clientID, objectHandle, prop, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_SetModelProperty(clientID, objectHandle, prop, operationMode)
def simxStart(connectionAddress, connectionPort, waitUntilConnected, doNotReconnectOnceDisconnected, timeOutInMs, commThreadCycleInMs):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
if (sys.version_info[0] == 3) and (type(connectionAddress) is str):
connectionAddress=connectionAddress.encode('utf-8')
return c_Start(connectionAddress, connectionPort, waitUntilConnected, doNotReconnectOnceDisconnected, timeOutInMs, commThreadCycleInMs)
def simxFinish(clientID):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_Finish(clientID)
def simxGetPingTime(clientID):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
pingTime = ct.c_int()
return c_GetPingTime(clientID, ct.byref(pingTime)), pingTime.value
def simxGetLastCmdTime(clientID):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_GetLastCmdTime(clientID)
def simxSynchronousTrigger(clientID):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_SynchronousTrigger(clientID)
def simxSynchronous(clientID, enable):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_Synchronous(clientID, enable)
def simxPauseCommunication(clientID, enable):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_PauseCommunication(clientID, enable)
def simxGetInMessageInfo(clientID, infoType):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
info = ct.c_int()
return c_GetInMessageInfo(clientID, infoType, ct.byref(info)), info.value
def simxGetOutMessageInfo(clientID, infoType):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
info = ct.c_int()
return c_GetOutMessageInfo(clientID, infoType, ct.byref(info)), info.value
def simxGetConnectionId(clientID):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_GetConnectionId(clientID)
def simxCreateBuffer(bufferSize):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_CreateBuffer(bufferSize)
def simxReleaseBuffer(buffer):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
return c_ReleaseBuffer(buffer)
def simxTransferFile(clientID, filePathAndName, fileName_serverSide, timeOut, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
if (sys.version_info[0] == 3) and (type(filePathAndName) is str):
filePathAndName=filePathAndName.encode('utf-8')
return c_TransferFile(clientID, filePathAndName, fileName_serverSide, timeOut, operationMode)
def simxEraseFile(clientID, fileName_serverSide, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
if (sys.version_info[0] == 3) and (type(fileName_serverSide) is str):
fileName_serverSide=fileName_serverSide.encode('utf-8')
return c_EraseFile(clientID, fileName_serverSide, operationMode)
def simxCreateDummy(clientID, size, color, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
handle = ct.c_int()
if color != None:
c_color = (ct.c_ubyte*12)(*color)
else:
c_color = None
return c_CreateDummy(clientID, size, c_color, ct.byref(handle), operationMode), handle.value
def simxQuery(clientID, signalName, signalValue, retSignalName, timeOutInMs):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
retSignalLength = ct.c_int();
retSignalValue = ct.POINTER(ct.c_ubyte)()
sigV=signalValue
if sys.version_info[0] == 3:
if type(signalName) is str:
signalName=signalName.encode('utf-8')
if type(retSignalName) is str:
retSignalName=retSignalName.encode('utf-8')
if type(signalValue) is bytearray:
sigV = (ct.c_ubyte*len(signalValue))(*signalValue)
if type(signalValue) is str:
signalValue=signalValue.encode('utf-8')
sigV = (ct.c_ubyte*len(signalValue))(*signalValue)
else:
if type(signalValue) is bytearray:
sigV = (ct.c_ubyte*len(signalValue))(*signalValue)
if type(signalValue) is str:
signalValue=bytearray(signalValue)
sigV = (ct.c_ubyte*len(signalValue))(*signalValue)
sigV=ct.cast(sigV,ct.POINTER(ct.c_ubyte)) # IronPython needs this
ret = c_Query(clientID, signalName, sigV, len(signalValue), retSignalName, ct.byref(retSignalValue), ct.byref(retSignalLength), timeOutInMs)
a = bytearray()
if ret == 0:
for i in range(retSignalLength.value):
a.append(retSignalValue[i])
if sys.version_info[0] != 3:
a=str(a)
return ret, a
def simxGetObjectGroupData(clientID, objectType, dataType, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
handles =[]
intData =[]
floatData =[]
stringData =[]
handlesC = ct.c_int()
handlesP = ct.POINTER(ct.c_int)()
intDataC = ct.c_int()
intDataP = ct.POINTER(ct.c_int)()
floatDataC = ct.c_int()
floatDataP = ct.POINTER(ct.c_float)()
stringDataC = ct.c_int()
stringDataP = ct.POINTER(ct.c_char)()
ret = c_GetObjectGroupData(clientID, objectType, dataType, ct.byref(handlesC), ct.byref(handlesP), ct.byref(intDataC), ct.byref(intDataP), ct.byref(floatDataC), ct.byref(floatDataP), ct.byref(stringDataC), ct.byref(stringDataP), operationMode)
if ret == 0:
for i in range(handlesC.value):
handles.append(handlesP[i])
for i in range(intDataC.value):
intData.append(intDataP[i])
for i in range(floatDataC.value):
floatData.append(floatDataP[i])
s = 0
for i in range(stringDataC.value):
a = bytearray()
while stringDataP[s] != b'\0':
if sys.version_info[0] == 3:
a.append(int.from_bytes(stringDataP[s],'big'))
else:
a.append(stringDataP[s])
s += 1
s += 1 #skip null
if sys.version_info[0] == 3:
a=str(a,'utf-8')
else:
a=str(a)
stringData.append(a)
return ret, handles, intData, floatData, stringData
def simxCallScriptFunction(clientID, scriptDescription, options, functionName, inputInts, inputFloats, inputStrings, inputBuffer, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
inputBufferV=inputBuffer
if sys.version_info[0] == 3:
if type(scriptDescription) is str:
scriptDescription=scriptDescription.encode('utf-8')
if type(functionName) is str:
functionName=functionName.encode('utf-8')
if type(inputBuffer) is bytearray:
inputBufferV = (ct.c_ubyte*len(inputBuffer))(*inputBuffer)
if type(inputBuffer) is str:
inputBuffer=inputBuffer.encode('utf-8')
inputBufferV = (ct.c_ubyte*len(inputBuffer))(*inputBuffer)
else:
if type(inputBuffer) is bytearray:
inputBufferV = (ct.c_ubyte*len(inputBuffer))(*inputBuffer)
if type(inputBuffer) is str:
inputBuffer=bytearray(inputBuffer)
inputBufferV = (ct.c_ubyte*len(inputBuffer))(*inputBuffer)
inputBufferV=ct.cast(inputBufferV,ct.POINTER(ct.c_ubyte)) # IronPython needs this
c_inInts = (ct.c_int*len(inputInts))(*inputInts)
c_inInts = ct.cast(c_inInts,ct.POINTER(ct.c_int)) # IronPython needs this
c_inFloats = (ct.c_float*len(inputFloats))(*inputFloats)
c_inFloats = ct.cast(c_inFloats,ct.POINTER(ct.c_float)) # IronPython needs this
concatStr=''.encode('utf-8')
for i in range(len(inputStrings)):
a=inputStrings[i]
a=a+'\0'
if type(a) is str:
a=a.encode('utf-8')
concatStr=concatStr+a
c_inStrings = (ct.c_char*len(concatStr))(*concatStr)
intDataOut =[]
floatDataOut =[]
stringDataOut =[]
bufferOut =bytearray()
intDataC = ct.c_int()
intDataP = ct.POINTER(ct.c_int)()
floatDataC = ct.c_int()
floatDataP = ct.POINTER(ct.c_float)()
stringDataC = ct.c_int()
stringDataP = ct.POINTER(ct.c_char)()
bufferS = ct.c_int()
bufferP = ct.POINTER(ct.c_ubyte)()
ret = c_CallScriptFunction(clientID,scriptDescription,options,functionName,len(inputInts),c_inInts,len(inputFloats),c_inFloats,len(inputStrings),c_inStrings,len(inputBuffer),inputBufferV,ct.byref(intDataC),ct.byref(intDataP),ct.byref(floatDataC),ct.byref(floatDataP),ct.byref(stringDataC),ct.byref(stringDataP),ct.byref(bufferS),ct.byref(bufferP),operationMode)
if ret == 0:
for i in range(intDataC.value):
intDataOut.append(intDataP[i])
for i in range(floatDataC.value):
floatDataOut.append(floatDataP[i])
s = 0
for i in range(stringDataC.value):
a = bytearray()
while stringDataP[s] != b'\0':
if sys.version_info[0] == 3:
a.append(int.from_bytes(stringDataP[s],'big'))
else:
a.append(stringDataP[s])
s += 1
s += 1 #skip null
if sys.version_info[0] == 3:
a=str(a,'utf-8')
else:
a=str(a)
stringDataOut.append(a)
for i in range(bufferS.value):
bufferOut.append(bufferP[i])
if sys.version_info[0] != 3:
bufferOut=str(bufferOut)
return ret, intDataOut, floatDataOut, stringDataOut, bufferOut
def simxGetObjectVelocity(clientID, objectHandle, operationMode):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
linearVel = (ct.c_float*3)()
angularVel = (ct.c_float*3)()
ret = c_GetObjectVelocity(clientID, objectHandle, linearVel, angularVel, operationMode)
arr1 = []
for i in range(3):
arr1.append(linearVel[i])
arr2 = []
for i in range(3):
arr2.append(angularVel[i])
return ret, arr1, arr2
def simxPackInts(intList):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
if sys.version_info[0] == 3:
s=bytes()
for i in range(len(intList)):
s=s+struct.pack('<i',intList[i])
s=bytearray(s)
else:
s=''
for i in range(len(intList)):
s+=struct.pack('<i',intList[i])
return s
def simxUnpackInts(intsPackedInString):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
b=[]
for i in range(int(len(intsPackedInString)/4)):
b.append(struct.unpack('<i',intsPackedInString[4*i:4*(i+1)])[0])
return b
def simxPackFloats(floatList):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
if sys.version_info[0] == 3:
s=bytes()
for i in range(len(floatList)):
s=s+struct.pack('<f',floatList[i])
s=bytearray(s)
else:
s=''
for i in range(len(floatList)):
s+=struct.pack('<f',floatList[i])
return s
def simxUnpackFloats(floatsPackedInString):
'''
Please have a look at the function description/documentation in the CoppeliaSim user manual
'''
b=[]
for i in range(int(len(floatsPackedInString)/4)):
b.append(struct.unpack('<f',floatsPackedInString[4*i:4*(i+1)])[0])
return b
| 48.027443
| 539
| 0.693156
|
c18a5df9f8cd5e6279144e34bfc82e143f349f98
| 3,750
|
py
|
Python
|
LUCI/LuciVisualize.py
|
lyalcorn/LUCI
|
ed9dde4286ca80694f53a3a50e1da2073a92ff76
|
[
"MIT"
] | null | null | null |
LUCI/LuciVisualize.py
|
lyalcorn/LUCI
|
ed9dde4286ca80694f53a3a50e1da2073a92ff76
|
[
"MIT"
] | null | null | null |
LUCI/LuciVisualize.py
|
lyalcorn/LUCI
|
ed9dde4286ca80694f53a3a50e1da2073a92ff76
|
[
"MIT"
] | null | null | null |
"""
Luci visualization tools
"""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.widgets import RectangleSelector, Slider
import seaborn as sns
def visualize(deep_image, spectrum_axis, cube_final):
"""
Function that allows you to visualize the deep frame, click on a pixel, and
then see the sepctrum. This is under development at the moment (4.8.22 -- Carter)
"""
fig,axes = plt.subplots(2,1,figsize=(15,15))
plt.style.use('fivethirtyeight')
shift_ct = 0
point1 = []
rectangles = []
def line_select_callback(eclick, erelease):
x1, y1 = eclick.xdata, eclick.ydata
x2, y2 = erelease.xdata, erelease.ydata
rect = plt.Rectangle((min(x1, x2), min(y1, y2)), np.abs(x1 - x2), np.abs(y1 - y2))
if len(rectangles) > 0:
rectangles[-1].remove()
rectangles.append(rect)
axes[0].add_patch(rect)
integrated_spectrum = np.zeros(cube_final.shape[2])
for i in range(int(y2 - y1)):
y_pix = int(y1 + i)
for j in range(int(x2 - x1)):
x_pix = int(x1 + j)
# Check if pixel is in the mask or not
integrated_spectrum += cube_final[x_pix, y_pix, :]
axes[1].cla()
axes[1].set_title('Spectrum of region %i<x<%i %i<y%i'%(int(x1), int(x2), int(y1), int(y2)))
plt.plot(1e7 / spectrum_axis, integrated_spectrum, linewidth=2)
axes[1].set_xlabel('Wavelength [nm]', fontweight='bold')
axes[1].set_ylabel(r'Intensity (Ergs/cm$^2$/s/$\AA$)', fontweight='bold')
#deep_image = fits.open('Luci_outputs/NGC628_deep.fits')[0].data
def onclick(event):
plt.subplot(212)
global shift_ct
global point1
if event.key == 'control':
shift_is_held = True
else:
shift_is_held = False
point1 = []
shift_ct = 0
if shift_is_held is True:
print('SHIFT %i %i'%(event.xdata, event.ydata))
#axes[0].plot(int(event.xdata), int(event.ydata), 'o', color='y')
if shift_ct != 1:
point1 = [int(event.xdata), int(event.ydata)]
shift_ct = 1
if len(point1) == 1:
dist = np.sqrt((int(event.xdata)-point1[0])**2+(int(event.ydata)-point1[1])**2)
circle = plt.Circle((point1[0], point1[1]), dist, color='b', fill=False)
point1 = []
else:
X_coordinate = int(event.xdata)
Y_coordinate = int(event.ydata)
axes[1].cla()
plt.title('Spectrum of point (%i,%i)'%(X_coordinate, Y_coordinate))
plt.plot(1e7/spectrum_axis,cube_final[X_coordinate, Y_coordinate], linewidth=2)
axes[1].set_xlabel('Wavelength [nm]', fontweight='bold')
axes[1].set_ylabel(r'Intensity (Ergs/cm$^2$/s/$\AA$)', fontweight='bold')
plt.show()
def update_min(min):
axes[0].clf()
axes[0].set_title('Scaled Deep Image')
scaled_deep_image = np.nan_to_num(np.log10(deep_image), 0)
axes[0].imshow(scaled_deep_image, vmin=float(min))
plt.show()
fig.canvas.mpl_connect('button_press_event', onclick)
plt.subplot(211)
axes[0].set_title('Scaled Deep Image')
scaled_deep_image = np.nan_to_num(np.log10(deep_image), 0)
plt.imshow(scaled_deep_image, origin='lower', vmin=np.percentile(scaled_deep_image, 5), vmax=np.percentile(scaled_deep_image, 99))
rs = RectangleSelector(axes[0], line_select_callback,
drawtype='box', useblit=False, button=[1],
minspanx=2, minspany=2, spancoords='pixels',
interactive=False)
plt.show()
| 39.473684
| 134
| 0.586133
|
76180365eb73d57f4ac271df111bc64be75e06c9
| 330
|
py
|
Python
|
datapyle/sqlaimports.py
|
quidditymaster/datapyle
|
c5f311353a507aafc7947f2657ebc0ee95a53d72
|
[
"MIT"
] | null | null | null |
datapyle/sqlaimports.py
|
quidditymaster/datapyle
|
c5f311353a507aafc7947f2657ebc0ee95a53d72
|
[
"MIT"
] | null | null | null |
datapyle/sqlaimports.py
|
quidditymaster/datapyle
|
c5f311353a507aafc7947f2657ebc0ee95a53d72
|
[
"MIT"
] | null | null | null |
import sqlalchemy as sa
from sqlalchemy import ForeignKey
from sqlalchemy import Column, Date, Integer, String, Float, DateTime
from sqlalchemy import PickleType, Boolean
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship, backref
| 36.666667
| 69
| 0.854545
|
18ae1b0a6a8ee84702f427665d6f306fc185fbf0
| 894
|
py
|
Python
|
tests/test_env_config.py
|
gosuto-ai/badger-rewards
|
45a7cefce2035bc385bebf5f103780c7ff614304
|
[
"MIT"
] | 3
|
2022-01-05T20:33:35.000Z
|
2022-02-09T16:07:30.000Z
|
tests/test_env_config.py
|
gosuto-ai/badger-rewards
|
45a7cefce2035bc385bebf5f103780c7ff614304
|
[
"MIT"
] | 341
|
2021-08-04T13:01:21.000Z
|
2022-03-31T19:46:30.000Z
|
tests/test_env_config.py
|
gosuto-ai/badger-rewards
|
45a7cefce2035bc385bebf5f103780c7ff614304
|
[
"MIT"
] | 3
|
2021-09-07T12:54:27.000Z
|
2021-12-22T13:27:23.000Z
|
import logging
import os
import pytest
os.environ["KUBE"] = "False"
os.environ["AWS_ACCESS_KEY_ID"] = ""
os.environ["AWS_SECRET_ACCESS_KEY"] = ""
from config.env_config import EnvConfig
from helpers.enums import Environment
logger = logging.getLogger("test-env-config")
def test_valid_environment():
if "ENV" in os.environ:
os.environ.pop("ENV")
with pytest.raises(AssertionError):
EnvConfig()
os.environ["ENV"] = "test"
env_config = EnvConfig()
assert env_config.is_valid_config()
def test_get_environment():
enum = [Environment.Test, Environment.Staging, Environment.Production]
vals = ["test", "stg", "prod"]
for expected, val in zip(enum, vals):
if "ENV" in os.environ:
os.environ.pop("ENV")
os.environ["ENV"] = val
env_config = EnvConfig()
assert env_config.get_environment() == expected
| 24.833333
| 74
| 0.668904
|
80d8c25f8ed1d3c22b6d0b712f39f1bd187eee7a
| 312
|
py
|
Python
|
cross_circle_gym/envs/__init__.py
|
Ferch42/PyDSRL
|
bd9ea3e739c837db0db5052f7db23476fa21c472
|
[
"MIT"
] | null | null | null |
cross_circle_gym/envs/__init__.py
|
Ferch42/PyDSRL
|
bd9ea3e739c837db0db5052f7db23476fa21c472
|
[
"MIT"
] | null | null | null |
cross_circle_gym/envs/__init__.py
|
Ferch42/PyDSRL
|
bd9ea3e739c837db0db5052f7db23476fa21c472
|
[
"MIT"
] | null | null | null |
from cross_circle_gym.envs.cross_circle_neg_grid import CrossCircleNegGrid
from cross_circle_gym.envs.cross_circle_mixed_grid import CrossCircleMixedGrid
from cross_circle_gym.envs.cross_circle_neg_rand import CrossCircleNegRand
from cross_circle_gym.envs.cross_circle_mixed_rand import CrossCircleMixedRand
| 62.4
| 79
| 0.910256
|
ee1da9100b470a72b7d9dce8b74c82a4f4ee5ce8
| 711
|
py
|
Python
|
bw2io/errors.py
|
mfastudillo/brightway2-io
|
dc383ddb6003a46e78259aeb7f87b9d80965d689
|
[
"BSD-3-Clause"
] | null | null | null |
bw2io/errors.py
|
mfastudillo/brightway2-io
|
dc383ddb6003a46e78259aeb7f87b9d80965d689
|
[
"BSD-3-Clause"
] | null | null | null |
bw2io/errors.py
|
mfastudillo/brightway2-io
|
dc383ddb6003a46e78259aeb7f87b9d80965d689
|
[
"BSD-3-Clause"
] | null | null | null |
class InvalidPackage(Exception):
"""bw2package data doesn't validate"""
pass
class UnsafeData(Exception):
"""bw2package data comes from a class that isn't recognized by Brightway2"""
pass
class UnsupportedExchange(Exception):
"""This exchange uncertainty type can't be rescaled automatically"""
pass
class StrategyError(Exception):
"""The strategy could not be applied"""
pass
class NonuniqueCode(Exception):
"""Not all provided codes are unique"""
pass
class WrongDatabase(Exception):
"""Dataset does not belong to this database"""
pass
class MultiprocessingError(Exception):
"""Multiprocessing module error or incompatibility"""
pass
| 17.341463
| 80
| 0.706048
|
614d10aa70c9104be1792b8082a8ed0b81f4a352
| 5,561
|
py
|
Python
|
Genetic Algorithm /Genetic Algorithm.py
|
TonyHinjos/Machine-Learning-Algorithms-Toolkit
|
7f322e148ee5b9908e6ea02f7692838ec08501d3
|
[
"MIT"
] | 1
|
2015-11-05T23:57:57.000Z
|
2015-11-05T23:57:57.000Z
|
Genetic Algorithm Final/Genetic Algorithm.py
|
urandu/machine-learning-assignments
|
6c6f727a4b2dd00a1ac3ee755bcf36390781d2b6
|
[
"Apache-2.0"
] | null | null | null |
Genetic Algorithm Final/Genetic Algorithm.py
|
urandu/machine-learning-assignments
|
6c6f727a4b2dd00a1ac3ee755bcf36390781d2b6
|
[
"Apache-2.0"
] | null | null | null |
__author__ = 'steve_w'
import random
class GeneticAlgorithm(object):
def __init__(self, genetics):
self.genetics = genetics
pass
def run(self):
population = self.genetics.initial()
while True:
fits_pops = [(self.genetics.fitness(ch), ch) for ch in population]
if self.genetics.check_stop(fits_pops): break
population = self.next(fits_pops)
pass
return population
def next(self, fits):
parents_generator = self.genetics.parents(fits)
size = len(fits)
nexts = []
while len(nexts) < size:
parents = next(parents_generator)
cross = random.random() < self.genetics.probability_crossover()
children = self.genetics.crossover(parents) if cross else parents
for ch in children:
mutate = random.random() < self.genetics.probability_mutation()
nexts.append(self.genetics.mutation(ch) if mutate else ch)
pass
pass
return nexts[0:size]
pass
class GeneticFunctions(object):
def probability_crossover(self):
r"""returns rate of occur crossover(0.0-1.0)"""
return 1.0
def probability_mutation(self):
r"""returns rate of occur mutation(0.0-1.0)"""
return 0.0
def initial(self):
r"""returns list of initial population
"""
return []
def fitness(self, chromosome):
r"""returns domain fitness value of chromosome
"""
return len(chromosome)
def check_stop(self, fits_populations):
r"""stop run if returns True
- fits_populations: list of (fitness_value, chromosome)
"""
return False
def parents(self, fits_populations):
r"""generator of selected parents
"""
gen = iter(sorted(fits_populations))
while True:
f1, ch1 = next(gen)
f2, ch2 = next(gen)
yield (ch1, ch2)
pass
return
def crossover(self, parents):
r"""breed children
"""
return parents
def mutation(self, chromosome):
r"""mutate chromosome
"""
return chromosome
pass
if __name__ == "__main__":
"""
example: Mapped guess prepared Text
"""
class GuessText(GeneticFunctions):
def __init__(self, target_text,
limit=200, size=400,
prob_crossover=0.9, prob_mutation=0.2):
self.target = self.text2chromo(target_text)
self.counter = 0
self.limit = limit
self.size = size
self.prob_crossover = prob_crossover
self.prob_mutation = prob_mutation
pass
# GeneticFunctions interface impls
def probability_crossover(self):
return self.prob_crossover
def probability_mutation(self):
return self.prob_mutation
def initial(self):
return [self.random_chromo() for j in range(self.size)]
def fitness(self, chromo):
# larger is better, matched == 0
return -sum(abs(c - t) for c, t in zip(chromo, self.target))
def check_stop(self, fits_populations):
self.counter += 1
if self.counter % 10 == 0:
best_match = list(sorted(fits_populations))[-1][1]
fits = [f for f, ch in fits_populations]
best = max(fits)
worst = min(fits)
ave = sum(fits) / len(fits)
print(
"[G %3d] score=(%4d, %4d, %4d): %r" %
(self.counter, best, ave, worst,
self.chromo2text(best_match)))
pass
return self.counter >= self.limit
def parents(self, fits_populations):
while True:
father = self.tournament(fits_populations)
mother = self.tournament(fits_populations)
yield (father, mother)
pass
pass
def crossover(self, parents):
father, mother = parents
index1 = random.randint(1, len(self.target) - 2)
index2 = random.randint(1, len(self.target) - 2)
if index1 > index2: index1, index2 = index2, index1
child1 = father[:index1] + mother[index1:index2] + father[index2:]
child2 = mother[:index1] + father[index1:index2] + mother[index2:]
return (child1, child2)
def mutation(self, chromosome):
index = random.randint(0, len(self.target) - 1)
vary = random.randint(-5, 5)
mutated = list(chromosome)
mutated[index] += vary
return mutated
# internals
def tournament(self, fits_populations):
alicef, alice = self.select_random(fits_populations)
bobf, bob = self.select_random(fits_populations)
return alice if alicef > bobf else bob
def select_random(self, fits_populations):
return fits_populations[random.randint(0, len(fits_populations)-1)]
def text2chromo(self, text):
return [ord(ch) for ch in text]
def chromo2text(self, chromo):
return "".join(chr(max(1, min(ch, 255))) for ch in chromo)
def random_chromo(self):
return [random.randint(1, 255) for i in range(len(self.target))]
pass
GeneticAlgorithm(GuessText("Hello World!")).run()
pass
| 31.95977
| 79
| 0.560691
|
4c96d1e0c3b9271ce553a43e30941adc7ce3a747
| 4,982
|
py
|
Python
|
examples/converters.py
|
AryamanSrii/PyDiscord
|
3366d20e2725672ae7e6b29335119cac1aee76f9
|
[
"MIT"
] | null | null | null |
examples/converters.py
|
AryamanSrii/PyDiscord
|
3366d20e2725672ae7e6b29335119cac1aee76f9
|
[
"MIT"
] | null | null | null |
examples/converters.py
|
AryamanSrii/PyDiscord
|
3366d20e2725672ae7e6b29335119cac1aee76f9
|
[
"MIT"
] | null | null | null |
# This example requires the 'members' privileged intent to use the Member converter.
import typing
import pydiscord
from pydiscord.ext import commands
intents = pydiscord.Intents.default()
intents.members = True
bot = commands.Bot('!', intents=intents)
@bot.command()
async def userinfo(ctx: commands.Context, user: pydiscord.User):
# In the command signature above, you can see that the `user`
# parameter is typehinted to `pydiscord.User`. This means that
# during command invocation we will attempt to convert
# the value passed as `user` to a `pydiscord.User` instance.
# The documentation notes what can be converted, in the case of `pydiscord.User`
# you pass an ID, mention or username (discrim optional)
# E.g. 80088516616269824, @Danny or Danny#0007
# NOTE: typehinting acts as a converter within the `commands` framework only.
# In standard Python, it is use for documentation and IDE assistance purposes.
# If the conversion is successful, we will have a `pydiscord.User` instance
# and can do the following:
user_id = user.id
username = user.name
avatar = user.avatar.url
await ctx.send(f'User found: {user_id} -- {username}\n{avatar}')
@userinfo.error
async def userinfo_error(ctx: commands.Context, error: commands.CommandError):
# if the conversion above fails for any reason, it will raise `commands.BadArgument`
# so we handle this in this error handler:
if isinstance(error, commands.BadArgument):
return await ctx.send('Couldn\'t find that user.')
# Custom Converter here
class ChannelOrMemberConverter(commands.Converter):
async def convert(self, ctx: commands.Context, argument: str):
# In this example we have made a custom converter.
# This checks if an input is convertible to a
# `pydiscord.Member` or `pydiscord.TextChannel` instance from the
# input the user has given us using the pre-existing converters
# that the library provides.
member_converter = commands.MemberConverter()
try:
# Try and convert to a Member instance.
# If this fails, then an exception is raised.
# Otherwise, we just return the converted member value.
member = await member_converter.convert(ctx, argument)
except commands.MemberNotFound:
pass
else:
return member
# Do the same for TextChannel...
textchannel_converter = commands.TextChannelConverter()
try:
channel = await textchannel_converter.convert(ctx, argument)
except commands.ChannelNotFound:
pass
else:
return channel
# If the value could not be converted we can raise an error
# so our error handlers can deal with it in one place.
# The error has to be CommandError derived, so BadArgument works fine here.
raise commands.BadArgument(f'No Member or TextChannel could be converted from "{argument}"')
@bot.command()
async def notify(ctx: commands.Context, target: ChannelOrMemberConverter):
# This command signature utilises the custom converter written above
# What will happen during command invocation is that the `target` above will be passed to
# the `argument` parameter of the `ChannelOrMemberConverter.convert` method and
# the conversion will go through the process defined there.
await target.send(f'Hello, {target.name}!')
@bot.command()
async def ignore(ctx: commands.Context, target: typing.Union[pydiscord.Member, pydiscord.TextChannel]):
# This command signature utilises the `typing.Union` typehint.
# The `commands` framework attempts a conversion of each type in this Union *in order*.
# So, it will attempt to convert whatever is passed to `target` to a `pydiscord.Member` instance.
# If that fails, it will attempt to convert it to a `pydiscord.TextChannel` instance.
# See: https://discordpy.readthedocs.io/en/latest/ext/commands/commands.html#typing-union
# NOTE: If a Union typehint converter fails it will raise `commands.BadUnionArgument`
# instead of `commands.BadArgument`.
# To check the resulting type, `isinstance` is used
if isinstance(target, pydiscord.Member):
await ctx.send(f'Member found: {target.mention}, adding them to the ignore list.')
elif isinstance(target, pydiscord.TextChannel): # this could be an `else` but for completeness' sake.
await ctx.send(f'Channel found: {target.mention}, adding it to the ignore list.')
# Built-in type converters.
@bot.command()
async def multiply(ctx: commands.Context, number: int, maybe: bool):
# We want an `int` and a `bool` parameter here.
# `bool` is a slightly special case, as shown here:
# See: https://discordpy.readthedocs.io/en/latest/ext/commands/commands.html#bool
if maybe is True:
return await ctx.send(number * 2)
await ctx.send(number * 5)
bot.run('token')
| 42.220339
| 105
| 0.704135
|
37e05e28cad9d9ba1e0daa400251e30890c9b5e0
| 17,324
|
py
|
Python
|
twitter/models.py
|
jakeshi/python-twitter
|
243e7185c68117f71d27f36b08bbbb36cb3ce0da
|
[
"Apache-2.0"
] | null | null | null |
twitter/models.py
|
jakeshi/python-twitter
|
243e7185c68117f71d27f36b08bbbb36cb3ce0da
|
[
"Apache-2.0"
] | null | null | null |
twitter/models.py
|
jakeshi/python-twitter
|
243e7185c68117f71d27f36b08bbbb36cb3ce0da
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from calendar import timegm
try:
from rfc822 import parsedate
except ImportError:
from email.utils import parsedate
class TwitterModel(object):
""" Base class from which all twitter models will inherit. """
def __init__(self, **kwargs):
self.param_defaults = {}
def __str__(self):
""" Returns a string representation of TwitterModel. By default
this is the same as AsJsonString(). """
return self.AsJsonString()
def __eq__(self, other):
return other and self.AsDict() == other.AsDict()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
if hasattr(self, 'id'):
return hash(self.id)
else:
raise TypeError('unhashable type: {} (no id attribute)'
.format(type(self)))
def AsJsonString(self):
""" Returns the TwitterModel as a JSON string based on key/value
pairs returned from the AsDict() method. """
return json.dumps(self.AsDict(), sort_keys=True)
def AsDict(self):
""" Create a dictionary representation of the object. Please see inline
comments on construction when dictionaries contain TwitterModels. """
data = {}
for (key, value) in self.param_defaults.items():
# If the value is a list, we need to create a list to hold the
# dicts created by an object supporting the AsDict() method,
# i.e., if it inherits from TwitterModel. If the item in the list
# doesn't support the AsDict() method, then we assign the value
# directly. An example being a list of Media objects contained
# within a Status object.
if isinstance(getattr(self, key, None), (list, tuple, set)):
data[key] = list()
for subobj in getattr(self, key, None):
if getattr(subobj, 'AsDict', None):
data[key].append(subobj.AsDict())
else:
data[key].append(subobj)
# Not a list, *but still a subclass of TwitterModel* and
# and we can assign the data[key] directly with the AsDict()
# method of the object. An example being a Status object contained
# within a User object.
elif getattr(getattr(self, key, None), 'AsDict', None):
data[key] = getattr(self, key).AsDict()
# If the value doesn't have an AsDict() method, i.e., it's not
# something that subclasses TwitterModel, then we can use direct
# assigment.
elif getattr(self, key, None):
data[key] = getattr(self, key, None)
return data
@classmethod
def NewFromJsonDict(cls, data, **kwargs):
""" Create a new instance based on a JSON dict. Any kwargs should be
supplied by the inherited, calling class.
Args:
data: A JSON dict, as converted from the JSON in the twitter API.
"""
json_data = data.copy()
if kwargs:
for key, val in kwargs.items():
json_data[key] = val
c = cls(**json_data)
c._json = data
return c
class Media(TwitterModel):
"""A class representing the Media component of a tweet. """
def __init__(self, **kwargs):
self.param_defaults = {
'display_url': None,
'expanded_url': None,
'ext_alt_text': None,
'id': None,
'media_url': None,
'media_url_https': None,
'sizes': None,
'type': None,
'url': None,
'video_info': None,
}
for (param, default) in self.param_defaults.items():
setattr(self, param, kwargs.get(param, default))
def __repr__(self):
return "Media(ID={media_id}, Type={media_type}, DisplayURL='{url}')".format(
media_id=self.id,
media_type=self.type,
url=self.display_url)
class List(TwitterModel):
"""A class representing the List structure used by the twitter API. """
def __init__(self, **kwargs):
self.param_defaults = {
'description': None,
'following': None,
'full_name': None,
'id': None,
'member_count': None,
'mode': None,
'name': None,
'slug': None,
'subscriber_count': None,
'uri': None,
'user': None,
}
for (param, default) in self.param_defaults.items():
setattr(self, param, kwargs.get(param, default))
if 'user' in kwargs:
self.user = User.NewFromJsonDict(kwargs.get('user'))
def __repr__(self):
return "List(ID={list_id}, FullName={full_name!r}, Slug={slug}, User={user})".format(
list_id=self.id,
full_name=self.full_name,
slug=self.slug,
user=self.user.screen_name)
class Category(TwitterModel):
"""A class representing the suggested user category structure. """
def __init__(self, **kwargs):
self.param_defaults = {
'name': None,
'size': None,
'slug': None,
}
for (param, default) in self.param_defaults.items():
setattr(self, param, kwargs.get(param, default))
def __repr__(self):
return "Category(Name={name!r}, Slug={slug}, Size={size})".format(
name=self.name,
slug=self.slug,
size=self.size)
class DirectMessage(TwitterModel):
"""A class representing a Direct Message. """
def __init__(self, **kwargs):
self.param_defaults = {
'created_at': None,
'id': None,
'recipient': None,
'recipient_id': None,
'recipient_screen_name': None,
'sender': None,
'sender_id': None,
'sender_screen_name': None,
'text': None,
}
for (param, default) in self.param_defaults.items():
setattr(self, param, kwargs.get(param, default))
if 'sender' in kwargs:
self.sender = User.NewFromJsonDict(kwargs.get('sender', None))
if 'recipient' in kwargs:
self.recipient = User.NewFromJsonDict(kwargs.get('recipient', None))
def __repr__(self):
if self.text and len(self.text) > 140:
text = "{text}[...]".format(text=self.text[:140])
else:
text = self.text
return "DirectMessage(ID={dm_id}, Sender={sender}, Created={time}, Text='{text!r}')".format(
dm_id=self.id,
sender=self.sender_screen_name,
time=self.created_at,
text=text)
class Trend(TwitterModel):
""" A class representing a trending topic. """
def __init__(self, **kwargs):
self.param_defaults = {
'events': None,
'name': None,
'promoted_content': None,
'query': None,
'timestamp': None,
'url': None,
'tweet_volume': None,
}
for (param, default) in self.param_defaults.items():
setattr(self, param, kwargs.get(param, default))
def __repr__(self):
return "Trend(Name={0!r}, Time={1}, URL={2})".format(
self.name,
self.timestamp,
self.url)
@property
def volume(self):
return self.tweet_volume
class Hashtag(TwitterModel):
""" A class representing a twitter hashtag. """
def __init__(self, **kwargs):
self.param_defaults = {
'text': None
}
for (param, default) in self.param_defaults.items():
setattr(self, param, kwargs.get(param, default))
def __repr__(self):
return "Hashtag(Text={text!r})".format(
text=self.text)
class Url(TwitterModel):
""" A class representing an URL contained in a tweet. """
def __init__(self, **kwargs):
self.param_defaults = {
'expanded_url': None,
'url': None}
for (param, default) in self.param_defaults.items():
setattr(self, param, kwargs.get(param, default))
def __repr__(self):
return "URL(URL={url}, ExpandedURL={eurl})".format(
url=self.url,
eurl=self.expanded_url)
class UserStatus(TwitterModel):
""" A class representing the UserStatus structure. This is an abbreviated
form of the twitter.User object. """
_connections = {'following': False,
'followed_by': False,
'following_received': False,
'following_requested': False,
'blocking': False,
'muting': False}
def __init__(self, **kwargs):
self.param_defaults = {
'blocking': False,
'followed_by': False,
'following': False,
'following_received': False,
'following_requested': False,
'id': None,
'id_str': None,
'muting': False,
'name': None,
'screen_name': None,
}
for (param, default) in self.param_defaults.items():
setattr(self, param, kwargs.get(param, default))
if 'connections' in kwargs:
for param in self._connections:
if param in kwargs['connections']:
setattr(self, param, True)
@property
def connections(self):
return {'following': self.following,
'followed_by': self.followed_by,
'following_received': self.following_received,
'following_requested': self.following_requested,
'blocking': self.blocking,
'muting': self.muting}
def __repr__(self):
connections = [param for param in self.connections if getattr(self, param)]
return "UserStatus(ID={uid}, ScreenName={sn}, Connections=[{conn}])".format(
uid=self.id,
sn=self.screen_name,
conn=", ".join(connections))
class User(TwitterModel):
"""A class representing the User structure. """
def __init__(self, **kwargs):
self.param_defaults = {
'contributors_enabled': None,
'created_at': None,
'default_profile': None,
'default_profile_image': None,
'description': None,
'email': None,
'favourites_count': None,
'followers_count': None,
'following': None,
'friends_count': None,
'geo_enabled': None,
'id': None,
'lang': None,
'listed_count': None,
'location': None,
'name': None,
'notifications': None,
'profile_background_color': None,
'profile_background_image_url': None,
'profile_background_tile': None,
'profile_banner_url': None,
'profile_image_url': None,
'profile_link_color': None,
'profile_sidebar_fill_color': None,
'profile_text_color': None,
'protected': None,
'screen_name': None,
'status': None,
'statuses_count': None,
'time_zone': None,
'url': None,
'utc_offset': None,
'verified': None,
}
for (param, default) in self.param_defaults.items():
setattr(self, param, kwargs.get(param, default))
def __repr__(self):
return "User(ID={uid}, ScreenName={sn})".format(
uid=self.id,
sn=self.screen_name)
@classmethod
def NewFromJsonDict(cls, data, **kwargs):
from twitter import Status
if data.get('status', None):
status = Status.NewFromJsonDict(data.get('status'))
return super(cls, cls).NewFromJsonDict(data=data, status=status)
else:
return super(cls, cls).NewFromJsonDict(data=data)
class Status(TwitterModel):
"""A class representing the Status structure used by the twitter API.
"""
def __init__(self, **kwargs):
self.param_defaults = {
'contributors': None,
'coordinates': None,
'created_at': None,
'current_user_retweet': None,
'favorite_count': None,
'favorited': None,
'full_text': None,
'geo': None,
'hashtags': None,
'id': None,
'id_str': None,
'in_reply_to_screen_name': None,
'in_reply_to_status_id': None,
'in_reply_to_user_id': None,
'lang': None,
'location': None,
'media': None,
'place': None,
'possibly_sensitive': None,
'quoted_status': None,
'quoted_status_id': None,
'quoted_status_id_str': None,
'retweet_count': None,
'retweeted': None,
'retweeted_status': None,
'scopes': None,
'source': None,
'text': None,
'truncated': None,
'urls': None,
'user': None,
'user_mentions': None,
'withheld_copyright': None,
'withheld_in_countries': None,
'withheld_scope': None,
}
for (param, default) in self.param_defaults.items():
setattr(self, param, kwargs.get(param, default))
if kwargs.get('full_text', None):
self.tweet_mode = 'extended'
else:
self.tweet_mode = 'compatibility'
@property
def created_at_in_seconds(self):
""" Get the time this status message was posted, in seconds since
the epoch (1 Jan 1970).
Returns:
int: The time this status message was posted, in seconds since
the epoch.
"""
return timegm(parsedate(self.created_at))
def __repr__(self):
""" A string representation of this twitter.Status instance.
The return value is the ID of status, username and datetime.
Returns:
string: A string representation of this twitter.Status instance with
the ID of status, username and datetime.
"""
if self.tweet_mode == 'extended':
text = self.full_text
else:
text = self.text
if self.user:
return "Status(ID={0}, ScreenName={1}, Created={2}, Text={3!r})".format(
self.id,
self.user.screen_name,
self.created_at,
text)
else:
return u"Status(ID={0}, Created={1}, Text={2!r})".format(
self.id,
self.created_at,
text)
@classmethod
def NewFromJsonDict(cls, data, **kwargs):
""" Create a new instance based on a JSON dict.
Args:
data: A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.Status instance
"""
current_user_retweet = None
hashtags = None
media = None
quoted_status = None
retweeted_status = None
urls = None
user = None
user_mentions = None
if 'user' in data:
user = User.NewFromJsonDict(data['user'])
if 'retweeted_status' in data:
retweeted_status = Status.NewFromJsonDict(data['retweeted_status'])
if 'current_user_retweet' in data:
current_user_retweet = data['current_user_retweet']['id']
if 'quoted_status' in data:
quoted_status = Status.NewFromJsonDict(data.get('quoted_status'))
if 'entities' in data:
if 'urls' in data['entities']:
urls = [Url.NewFromJsonDict(u) for u in data['entities']['urls']]
if 'user_mentions' in data['entities']:
user_mentions = [User.NewFromJsonDict(u) for u in data['entities']['user_mentions']]
if 'hashtags' in data['entities']:
hashtags = [Hashtag.NewFromJsonDict(h) for h in data['entities']['hashtags']]
if 'media' in data['entities']:
media = [Media.NewFromJsonDict(m) for m in data['entities']['media']]
# the new extended entities
if 'extended_entities' in data:
if 'media' in data['extended_entities']:
media = [Media.NewFromJsonDict(m) for m in data['extended_entities']['media']]
return super(cls, cls).NewFromJsonDict(data=data,
current_user_retweet=current_user_retweet,
hashtags=hashtags,
media=media,
quoted_status=quoted_status,
retweeted_status=retweeted_status,
urls=urls,
user=user,
user_mentions=user_mentions)
| 32.810606
| 100
| 0.540349
|
04b81299c29bbb22d42d79eef7153224cf5da3ae
| 1,063
|
py
|
Python
|
cvxpy/expressions/constants/callback_param.py
|
jasondark/cvxpy
|
56aaa01b0e9d98ae5a91a923708129a7b37a6f18
|
[
"ECL-2.0",
"Apache-2.0"
] | 7
|
2015-06-03T01:33:46.000Z
|
2021-11-15T01:48:49.000Z
|
cvxpy/expressions/constants/callback_param.py
|
h-vetinari/cvxpy
|
86307f271819bb78fcdf64a9c3a424773e8269fa
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-10-22T07:46:38.000Z
|
2020-10-22T07:46:38.000Z
|
cvxpy/expressions/constants/callback_param.py
|
h-vetinari/cvxpy
|
86307f271819bb78fcdf64a9c3a424773e8269fa
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-10-22T01:35:58.000Z
|
2022-01-19T10:48:51.000Z
|
"""
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cvxpy.expressions.constants.parameter import Parameter
class CallbackParam(Parameter):
"""
A parameter whose value is obtained by evaluating a function.
"""
PARAM_COUNT = 0
def __init__(self, callback, shape=(), **kwargs):
self._callback = callback
super(CallbackParam, self).__init__(shape, **kwargs)
@property
def value(self):
"""Evaluate the callback to get the value.
"""
return self._validate_value(self._callback())
| 30.371429
| 72
| 0.725306
|
ba70d7253458c1d5b83d8fb22e5155503553cb3f
| 23,014
|
py
|
Python
|
src/MOSIM/mmi/services/MPostureBlendingService.py
|
dfki-asr/MMIPython-Core
|
2f4b51ffde606c45661d9dbd5153576f919bdb8b
|
[
"MIT"
] | null | null | null |
src/MOSIM/mmi/services/MPostureBlendingService.py
|
dfki-asr/MMIPython-Core
|
2f4b51ffde606c45661d9dbd5153576f919bdb8b
|
[
"MIT"
] | null | null | null |
src/MOSIM/mmi/services/MPostureBlendingService.py
|
dfki-asr/MMIPython-Core
|
2f4b51ffde606c45661d9dbd5153576f919bdb8b
|
[
"MIT"
] | null | null | null |
#
# Autogenerated by Thrift Compiler (0.13.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import MOSIM.mmi.services.MMIServiceBase
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
all_structs = []
class Iface(MOSIM.mmi.services.MMIServiceBase.Iface):
def Blend(self, startPosture, targetPosture, weight, mask, properties):
"""
Parameters:
- startPosture
- targetPosture
- weight
- mask
- properties
"""
pass
def BlendMany(self, startPosture, targetPosture, weights, mask, properties):
"""
Parameters:
- startPosture
- targetPosture
- weights
- mask
- properties
"""
pass
class Client(MOSIM.mmi.services.MMIServiceBase.Client, Iface):
def __init__(self, iprot, oprot=None):
MOSIM.mmi.services.MMIServiceBase.Client.__init__(self, iprot, oprot)
def Blend(self, startPosture, targetPosture, weight, mask, properties):
"""
Parameters:
- startPosture
- targetPosture
- weight
- mask
- properties
"""
self.send_Blend(startPosture, targetPosture, weight, mask, properties)
return self.recv_Blend()
def send_Blend(self, startPosture, targetPosture, weight, mask, properties):
self._oprot.writeMessageBegin('Blend', TMessageType.CALL, self._seqid)
args = Blend_args()
args.startPosture = startPosture
args.targetPosture = targetPosture
args.weight = weight
args.mask = mask
args.properties = properties
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_Blend(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = Blend_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "Blend failed: unknown result")
def BlendMany(self, startPosture, targetPosture, weights, mask, properties):
"""
Parameters:
- startPosture
- targetPosture
- weights
- mask
- properties
"""
self.send_BlendMany(startPosture, targetPosture, weights, mask, properties)
return self.recv_BlendMany()
def send_BlendMany(self, startPosture, targetPosture, weights, mask, properties):
self._oprot.writeMessageBegin('BlendMany', TMessageType.CALL, self._seqid)
args = BlendMany_args()
args.startPosture = startPosture
args.targetPosture = targetPosture
args.weights = weights
args.mask = mask
args.properties = properties
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_BlendMany(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = BlendMany_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "BlendMany failed: unknown result")
class Processor(MOSIM.mmi.services.MMIServiceBase.Processor, Iface, TProcessor):
def __init__(self, handler):
MOSIM.mmi.services.MMIServiceBase.Processor.__init__(self, handler)
self._processMap["Blend"] = Processor.process_Blend
self._processMap["BlendMany"] = Processor.process_BlendMany
self._on_message_begin = None
def on_message_begin(self, func):
self._on_message_begin = func
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if self._on_message_begin:
self._on_message_begin(name, type, seqid)
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_Blend(self, seqid, iprot, oprot):
args = Blend_args()
args.read(iprot)
iprot.readMessageEnd()
result = Blend_result()
try:
result.success = self._handler.Blend(args.startPosture, args.targetPosture, args.weight, args.mask, args.properties)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("Blend", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_BlendMany(self, seqid, iprot, oprot):
args = BlendMany_args()
args.read(iprot)
iprot.readMessageEnd()
result = BlendMany_result()
try:
result.success = self._handler.BlendMany(args.startPosture, args.targetPosture, args.weights, args.mask, args.properties)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("BlendMany", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class Blend_args(object):
"""
Attributes:
- startPosture
- targetPosture
- weight
- mask
- properties
"""
def __init__(self, startPosture=None, targetPosture=None, weight=None, mask=None, properties=None,):
self.startPosture = startPosture
self.targetPosture = targetPosture
self.weight = weight
self.mask = mask
self.properties = properties
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.startPosture = MOSIM.mmi.avatar.ttypes.MAvatarPostureValues()
self.startPosture.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.targetPosture = MOSIM.mmi.avatar.ttypes.MAvatarPostureValues()
self.targetPosture.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.DOUBLE:
self.weight = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.mask = {}
(_ktype192, _vtype193, _size191) = iprot.readMapBegin()
for _i195 in range(_size191):
_key196 = iprot.readI32()
_val197 = iprot.readDouble()
self.mask[_key196] = _val197
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.MAP:
self.properties = {}
(_ktype199, _vtype200, _size198) = iprot.readMapBegin()
for _i202 in range(_size198):
_key203 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val204 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.properties[_key203] = _val204
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('Blend_args')
if self.startPosture is not None:
oprot.writeFieldBegin('startPosture', TType.STRUCT, 1)
self.startPosture.write(oprot)
oprot.writeFieldEnd()
if self.targetPosture is not None:
oprot.writeFieldBegin('targetPosture', TType.STRUCT, 2)
self.targetPosture.write(oprot)
oprot.writeFieldEnd()
if self.weight is not None:
oprot.writeFieldBegin('weight', TType.DOUBLE, 3)
oprot.writeDouble(self.weight)
oprot.writeFieldEnd()
if self.mask is not None:
oprot.writeFieldBegin('mask', TType.MAP, 4)
oprot.writeMapBegin(TType.I32, TType.DOUBLE, len(self.mask))
for kiter205, viter206 in self.mask.items():
oprot.writeI32(kiter205)
oprot.writeDouble(viter206)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.properties is not None:
oprot.writeFieldBegin('properties', TType.MAP, 5)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties))
for kiter207, viter208 in self.properties.items():
oprot.writeString(kiter207.encode('utf-8') if sys.version_info[0] == 2 else kiter207)
oprot.writeString(viter208.encode('utf-8') if sys.version_info[0] == 2 else viter208)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(Blend_args)
Blend_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'startPosture', [MOSIM.mmi.avatar.ttypes.MAvatarPostureValues, None], None, ), # 1
(2, TType.STRUCT, 'targetPosture', [MOSIM.mmi.avatar.ttypes.MAvatarPostureValues, None], None, ), # 2
(3, TType.DOUBLE, 'weight', None, None, ), # 3
(4, TType.MAP, 'mask', (TType.I32, None, TType.DOUBLE, None, False), None, ), # 4
(5, TType.MAP, 'properties', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 5
)
class Blend_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = MOSIM.mmi.avatar.ttypes.MAvatarPostureValues()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('Blend_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(Blend_result)
Blend_result.thrift_spec = (
(0, TType.STRUCT, 'success', [MOSIM.mmi.avatar.ttypes.MAvatarPostureValues, None], None, ), # 0
)
class BlendMany_args(object):
"""
Attributes:
- startPosture
- targetPosture
- weights
- mask
- properties
"""
def __init__(self, startPosture=None, targetPosture=None, weights=None, mask=None, properties=None,):
self.startPosture = startPosture
self.targetPosture = targetPosture
self.weights = weights
self.mask = mask
self.properties = properties
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.startPosture = MOSIM.mmi.avatar.ttypes.MAvatarPostureValues()
self.startPosture.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.targetPosture = MOSIM.mmi.avatar.ttypes.MAvatarPostureValues()
self.targetPosture.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.weights = []
(_etype212, _size209) = iprot.readListBegin()
for _i213 in range(_size209):
_elem214 = iprot.readDouble()
self.weights.append(_elem214)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.mask = {}
(_ktype216, _vtype217, _size215) = iprot.readMapBegin()
for _i219 in range(_size215):
_key220 = iprot.readI32()
_val221 = iprot.readDouble()
self.mask[_key220] = _val221
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.MAP:
self.properties = {}
(_ktype223, _vtype224, _size222) = iprot.readMapBegin()
for _i226 in range(_size222):
_key227 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val228 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.properties[_key227] = _val228
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('BlendMany_args')
if self.startPosture is not None:
oprot.writeFieldBegin('startPosture', TType.STRUCT, 1)
self.startPosture.write(oprot)
oprot.writeFieldEnd()
if self.targetPosture is not None:
oprot.writeFieldBegin('targetPosture', TType.STRUCT, 2)
self.targetPosture.write(oprot)
oprot.writeFieldEnd()
if self.weights is not None:
oprot.writeFieldBegin('weights', TType.LIST, 3)
oprot.writeListBegin(TType.DOUBLE, len(self.weights))
for iter229 in self.weights:
oprot.writeDouble(iter229)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.mask is not None:
oprot.writeFieldBegin('mask', TType.MAP, 4)
oprot.writeMapBegin(TType.I32, TType.DOUBLE, len(self.mask))
for kiter230, viter231 in self.mask.items():
oprot.writeI32(kiter230)
oprot.writeDouble(viter231)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.properties is not None:
oprot.writeFieldBegin('properties', TType.MAP, 5)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties))
for kiter232, viter233 in self.properties.items():
oprot.writeString(kiter232.encode('utf-8') if sys.version_info[0] == 2 else kiter232)
oprot.writeString(viter233.encode('utf-8') if sys.version_info[0] == 2 else viter233)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(BlendMany_args)
BlendMany_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'startPosture', [MOSIM.mmi.avatar.ttypes.MAvatarPostureValues, None], None, ), # 1
(2, TType.STRUCT, 'targetPosture', [MOSIM.mmi.avatar.ttypes.MAvatarPostureValues, None], None, ), # 2
(3, TType.LIST, 'weights', (TType.DOUBLE, None, False), None, ), # 3
(4, TType.MAP, 'mask', (TType.I32, None, TType.DOUBLE, None, False), None, ), # 4
(5, TType.MAP, 'properties', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 5
)
class BlendMany_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype237, _size234) = iprot.readListBegin()
for _i238 in range(_size234):
_elem239 = MOSIM.mmi.avatar.ttypes.MAvatarPostureValues()
_elem239.read(iprot)
self.success.append(_elem239)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('BlendMany_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter240 in self.success:
iter240.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(BlendMany_result)
BlendMany_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [MOSIM.mmi.avatar.ttypes.MAvatarPostureValues, None], False), None, ), # 0
)
fix_spec(all_structs)
del all_structs
| 37.482085
| 134
| 0.586295
|
3dc2ee1285d661dd93e5989264d671b0c4a9f35f
| 255
|
py
|
Python
|
src/rastervision/ml_tasks/utils.py
|
nholeman/raster-vision
|
f3e1e26c555feed6fa018183c3fa04d7858d91bd
|
[
"Apache-2.0"
] | null | null | null |
src/rastervision/ml_tasks/utils.py
|
nholeman/raster-vision
|
f3e1e26c555feed6fa018183c3fa04d7858d91bd
|
[
"Apache-2.0"
] | null | null | null |
src/rastervision/ml_tasks/utils.py
|
nholeman/raster-vision
|
f3e1e26c555feed6fa018183c3fa04d7858d91bd
|
[
"Apache-2.0"
] | null | null | null |
def is_window_inside_aoi(window, aoi_polygons):
if not aoi_polygons:
return True
window_shapely = window.get_shapely()
for polygon in aoi_polygons:
if window_shapely.within(polygon):
return True
return False
| 21.25
| 47
| 0.678431
|
3ac013d649f723ed9218d806eb3d3c5ebf513811
| 1,772
|
py
|
Python
|
python/tvm/exec/rpc_tracker.py
|
Aimledge/tvm
|
f41c050fc681a9d9805e6c73e729df233e1acbac
|
[
"Apache-2.0"
] | null | null | null |
python/tvm/exec/rpc_tracker.py
|
Aimledge/tvm
|
f41c050fc681a9d9805e6c73e729df233e1acbac
|
[
"Apache-2.0"
] | null | null | null |
python/tvm/exec/rpc_tracker.py
|
Aimledge/tvm
|
f41c050fc681a9d9805e6c73e729df233e1acbac
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=redefined-outer-name, invalid-name
"""Tool to start RPC tracker"""
from __future__ import absolute_import
import logging
import argparse
import multiprocessing
import sys
from ..rpc.tracker import Tracker
def main(args):
"""Main funciton"""
tracker = Tracker(args.host, port=args.port, port_end=args.port_end,
silent=args.silent)
tracker.proc.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--host', type=str, default="0.0.0.0",
help='the hostname of the tracker')
parser.add_argument('--port', type=int, default=9190,
help='The port of the PRC')
parser.add_argument('--port-end', type=int, default=9199,
help='The end search port of the PRC')
parser.add_argument('--no-fork', dest='fork', action='store_false',
help="Use spawn mode to avoid fork. This option \
is able to avoid potential fork problems with Metal, OpenCL \
and ROCM compilers.")
parser.add_argument('--silent', action='store_true',
help="Whether run in silent mode.")
parser.set_defaults(fork=True)
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
if args.fork is False:
if sys.version_info[0] < 3:
raise RuntimeError(
"Python3 is required for spawn mode."
)
multiprocessing.set_start_method('spawn')
else:
if not args.silent:
logging.info("If you are running ROCM/Metal, fork will cause "
"compiler internal error. Try to launch with arg ```--no-fork```")
main(args)
| 36.916667
| 91
| 0.602709
|
4875e923df4f8593693bf8abdfc480c55169a82f
| 2,467
|
py
|
Python
|
umamaheswar/bin/explode.py
|
oyearunpal/NagarGatPat
|
c9c0f29af47cef134b0b158cb4df503ce3417f0a
|
[
"MIT"
] | null | null | null |
umamaheswar/bin/explode.py
|
oyearunpal/NagarGatPat
|
c9c0f29af47cef134b0b158cb4df503ce3417f0a
|
[
"MIT"
] | null | null | null |
umamaheswar/bin/explode.py
|
oyearunpal/NagarGatPat
|
c9c0f29af47cef134b0b158cb4df503ce3417f0a
|
[
"MIT"
] | null | null | null |
#!/home/arun/umamaheswar/umamaheswar/bin/python
#
# The Python Imaging Library
# $Id$
#
# split an animation into a number of frame files
#
from __future__ import print_function
from PIL import Image
import os
import sys
class Interval(object):
def __init__(self, interval="0"):
self.setinterval(interval)
def setinterval(self, interval):
self.hilo = []
for s in interval.split(","):
if not s.strip():
continue
try:
v = int(s)
if v < 0:
lo, hi = 0, -v
else:
lo = hi = v
except ValueError:
i = s.find("-")
lo, hi = int(s[:i]), int(s[i+1:])
self.hilo.append((hi, lo))
if not self.hilo:
self.hilo = [(sys.maxsize, 0)]
def __getitem__(self, index):
for hi, lo in self.hilo:
if hi >= index >= lo:
return 1
return 0
# --------------------------------------------------------------------
# main program
html = 0
if sys.argv[1:2] == ["-h"]:
html = 1
del sys.argv[1]
if not sys.argv[2:]:
print()
print("Syntax: python explode.py infile template [range]")
print()
print("The template argument is used to construct the names of the")
print("individual frame files. The frames are numbered file001.ext,")
print("file002.ext, etc. You can insert %d to control the placement")
print("and syntax of the frame number.")
print()
print("The optional range argument specifies which frames to extract.")
print("You can give one or more ranges like 1-10, 5, -15 etc. If")
print("omitted, all frames are extracted.")
sys.exit(1)
infile = sys.argv[1]
outfile = sys.argv[2]
frames = Interval(",".join(sys.argv[3:]))
try:
# check if outfile contains a placeholder
outfile % 1
except TypeError:
file, ext = os.path.splitext(outfile)
outfile = file + "%03d" + ext
ix = 1
im = Image.open(infile)
if html:
file, ext = os.path.splitext(outfile)
html = open(file+".html", "w")
html.write("<html>\n<body>\n")
while True:
if frames[ix]:
im.save(outfile % ix)
print(outfile % ix)
if html:
html.write("<img src='%s'><br>\n" % outfile % ix)
try:
im.seek(ix)
except EOFError:
break
ix += 1
if html:
html.write("</body>\n</html>\n")
| 21.831858
| 75
| 0.539522
|
be714622fdfce8043719c8c1e9f2deeebc61f216
| 126
|
py
|
Python
|
files/database-rename.py
|
opus-codium/puppet-odoo
|
319cd1360a568e8272743ca67897fecc88147677
|
[
"Apache-2.0"
] | null | null | null |
files/database-rename.py
|
opus-codium/puppet-odoo
|
319cd1360a568e8272743ca67897fecc88147677
|
[
"Apache-2.0"
] | 9
|
2020-06-29T16:02:34.000Z
|
2021-12-02T17:46:58.000Z
|
files/database-rename.py
|
opus-codium/puppet-odoo
|
319cd1360a568e8272743ca67897fecc88147677
|
[
"Apache-2.0"
] | null | null | null |
import os
odoo.tools.config['list_db'] = True
odoo.service.db.exp_rename(os.environ['PT_oldname'], os.environ['PT_newname'])
| 25.2
| 78
| 0.753968
|
cd0bf489fd9fd6000bad400bb93384ac6ad7ed04
| 609
|
py
|
Python
|
courses/urls.py
|
Terahpatrick/Django-Course-Articles-Class-Based-view
|
79aa277fd2e8e01017c49f891a59313fb3799f4c
|
[
"MIT"
] | 1
|
2019-10-09T12:11:39.000Z
|
2019-10-09T12:11:39.000Z
|
courses/urls.py
|
Terahpatrick/Django-Course-Articles-Class-Based-view
|
79aa277fd2e8e01017c49f891a59313fb3799f4c
|
[
"MIT"
] | null | null | null |
courses/urls.py
|
Terahpatrick/Django-Course-Articles-Class-Based-view
|
79aa277fd2e8e01017c49f891a59313fb3799f4c
|
[
"MIT"
] | null | null | null |
from django.urls import path
# from .views import course_list_view
from .views import (
CourseListView,
CourseDetailView,
CourseCreateView,
CourseDeleteView,
CourseUpdateView
)
app_name = 'courses'
urlpatterns = [
path('', CourseListView.as_view(), name = "course_list"),
path('<int:id>/', CourseDetailView.as_view(), name = "course_detail"),
path('create/', CourseCreateView.as_view(), name = "course_create"),
path('<int:id>/delete/', CourseDeleteView.as_view(), name = "course_delete"),
path('<int:id>/update/', CourseUpdateView.as_view(), name = "course_update")
]
| 33.833333
| 81
| 0.694581
|
7012a9822ba66be45291895f80070eabb8640797
| 1,273
|
py
|
Python
|
scenarios/dualist_gps/plots/plot_all.py
|
domingoesteban/robolearn
|
0d20125425c352b80ef2eeed1c0b11ab6497b11a
|
[
"BSD-3-Clause"
] | 1
|
2020-01-13T09:44:22.000Z
|
2020-01-13T09:44:22.000Z
|
scenarios/dualist_gps/plots/plot_all.py
|
domingoesteban/robolearn
|
0d20125425c352b80ef2eeed1c0b11ab6497b11a
|
[
"BSD-3-Clause"
] | null | null | null |
scenarios/dualist_gps/plots/plot_all.py
|
domingoesteban/robolearn
|
0d20125425c352b80ef2eeed1c0b11ab6497b11a
|
[
"BSD-3-Clause"
] | 1
|
2021-12-22T00:41:20.000Z
|
2021-12-22T00:41:20.000Z
|
import os
from robolearn.old_utils.plots.policy_cost import plot_policy_cost
from robolearn.old_utils.plots.specific_cost import plot_specific_cost
from robolearn.old_utils.plots.duals import plot_duals
method = 'gps' # 'gps' or 'trajopt'
gps_directory_names = ['reacher_log']#, 'reacher_log2', 'reacher_log3']
gps_models_labels = ['gps1']#, 'gps2', 'gps3']
itr_to_load = None # list(range(8))
block = False
plot_cs = True
plot_policy_costs = True
plot_cost_types = False
specific_costs = None #[4] # None for all costs
dir_names = [os.path.dirname(os.path.realpath(__file__)) + '/../' + dir_name
for dir_name in gps_directory_names]
plot_policy_cost(dir_names, itr_to_load=itr_to_load, method=method,
gps_models_labels=gps_models_labels, block=block,
plot_cs=plot_cs, plot_policy_costs=plot_policy_costs,
plot_cost_types=plot_cost_types)
plot_specific_cost(dir_names, itr_to_load=itr_to_load, method=method,
gps_models_labels=gps_models_labels, block=block,
specific_costs=specific_costs)
plot_duals(dir_names, itr_to_load=itr_to_load, method=method,
gps_models_labels=gps_models_labels, block=block)
input('Showing plots. Press a key to close...')
| 39.78125
| 76
| 0.732914
|
2c23c4c812db17fd205fab1d8180ea575145ad6b
| 1,350
|
py
|
Python
|
slurk/models/log.py
|
TimDiekmann/slurk
|
6b597c9b75978ce4d191934b9e94185dd6fcb871
|
[
"BSD-3-Clause"
] | null | null | null |
slurk/models/log.py
|
TimDiekmann/slurk
|
6b597c9b75978ce4d191934b9e94185dd6fcb871
|
[
"BSD-3-Clause"
] | null | null | null |
slurk/models/log.py
|
TimDiekmann/slurk
|
6b597c9b75978ce4d191934b9e94185dd6fcb871
|
[
"BSD-3-Clause"
] | null | null | null |
from sqlalchemy import String, Integer, ForeignKey, JSON, Column
from sqlalchemy.orm import relationship
from .common import Common
class Log(Common):
__tablename__ = "Log"
event = Column(String, nullable=False)
user_id = Column(Integer, ForeignKey("User.id", ondelete="CASCADE"))
room_id = Column(Integer, ForeignKey("Room.id", ondelete="CASCADE"))
receiver_id = Column(Integer, ForeignKey("User.id", ondelete="CASCADE"))
data = Column(JSON, nullable=False)
user = relationship("User", foreign_keys=[user_id])
receiver = relationship("User", foreign_keys=[receiver_id])
def add(event, user=None, room=None, receiver=None, data=None):
from flask.globals import current_app
if not data:
data = {}
if event == "join":
current_app.logger.info(f"{user.name} joined {room.layout.title}")
if event == "leave":
current_app.logger.info(f"{user.name} left {room.layout.title})")
if event == "connect":
current_app.logger.info(f"{user.name} connected")
if event == "disconnect":
current_app.logger.info(f"{user.name} disconnected")
log = Log(event=event, user=user, room=room, data=data, receiver=receiver)
db = current_app.session
db.add(log)
db.commit()
return log
| 34.615385
| 82
| 0.643704
|
e89ca8043ca046e29a9e38f5900a400b330e7da1
| 185
|
py
|
Python
|
Coloring/setup.py
|
zarahz/MARL-and-Markets
|
3591a160e098e7251b9e7c7b59c6d0ab08ba0779
|
[
"MIT"
] | 1
|
2022-03-12T09:17:32.000Z
|
2022-03-12T09:17:32.000Z
|
Coloring/setup.py
|
zarahz/MARL-and-Markets
|
3591a160e098e7251b9e7c7b59c6d0ab08ba0779
|
[
"MIT"
] | null | null | null |
Coloring/setup.py
|
zarahz/MARL-and-Markets
|
3591a160e098e7251b9e7c7b59c6d0ab08ba0779
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(name='coloring',
version='0.0.1',
# for visualization also install matplotlib
# install_requires=['gym', 'numpy', 'torch']
)
| 23.125
| 50
| 0.637838
|
1d850243b46adee5d5a29a03f7cc1016f77c4760
| 2,416
|
py
|
Python
|
supervised_learning/test_attention.py
|
gonzalezJohnas/SpeechCommand-recognition
|
d5351abe45c571a075c24bd04d328e76293f9230
|
[
"MIT"
] | null | null | null |
supervised_learning/test_attention.py
|
gonzalezJohnas/SpeechCommand-recognition
|
d5351abe45c571a075c24bd04d328e76293f9230
|
[
"MIT"
] | 2
|
2021-04-10T18:12:44.000Z
|
2022-02-09T23:36:43.000Z
|
supervised_learning/test_attention.py
|
gonzalezJohnas/SpeechCommand-recognition
|
d5351abe45c571a075c24bd04d328e76293f9230
|
[
"MIT"
] | null | null | null |
import argparse
# Data Loading
from tensorflow.python.keras import Model
from tensorflow.keras.backend import squeeze
from global_utils import read_sample
import numpy as np
import matplotlib.pyplot as plt
from supervised_learning.config import *
import random
from scipy import signal
import scipy.io.wavfile as wavfile
random.seed(42)
def main(args):
print("Loading the model")
model = tf.keras.models.load_model(args.model, custom_objects={
'squeeze': squeeze}
)
model.summary()
sample_mfcc = read_sample(args.filename, mfcc=True)
x_test = np.expand_dims(sample_mfcc, axis=0)
x_test = np.expand_dims(x_test, -1)
attSpeechModel = Model(inputs=model.input,
outputs=[model.get_layer('output').output,
model.get_layer('attSoftmax').output])
predictions, attention_weights = attSpeechModel.predict(x_test)
prediction_max = tf.argmax(predictions, axis=1).numpy()
print("Label predicted {}".format(id2name[prediction_max[0]]))
print("Max predictions {}".format(np.max(predictions)))
imgHeight = 2 * 2
_, wav = wavfile.read(args.filename)
# plot the first 1024 samples
plt.figure(figsize=(17, imgHeight))
plt.plot(wav)
# label the axes
plt.ylabel("Amplitude")
plt.xlabel("Time")
# set the title
plt.title("Audio sample")
# display the plot
plt.figure(figsize=(17, imgHeight))
plt.title('Attention weights (log)')
plt.ylabel('Log of attention weight')
plt.xlabel('Mel-spectrogram index')
plt.plot(np.log(attention_weights[0]))
f, t, Sxx = signal.spectrogram(wav, SAMPLE_RATE)
plt.figure(figsize=(17, imgHeight))
plt.pcolormesh(t, f, Sxx)
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
plt.ylim(0, 1000.0)
plt.show()
return 1
if __name__ == "__main__":
print("TensorFlow version: {}".format(tf.__version__))
print("Eager execution: {}".format(tf.executing_eagerly()))
parser = argparse.ArgumentParser()
parser.add_argument(
'--model',
help='Path of a previous trained model',
required=True,
default=None
)
parser.add_argument(
'--filename',
help='Wav file name to test',
required=True,
default=None
)
args = parser.parse_args()
main(args)
| 24.653061
| 74
| 0.643626
|
d0c9b64aacea4727ad7c9ec35e7c35aea3b78518
| 2,409
|
py
|
Python
|
src/transformers/utils/dummy_sentencepiece_objects.py
|
suliuzh/transformers
|
f34372a9ff99f6bc8619ac83dc07f7afe6b92141
|
[
"Apache-2.0"
] | 96
|
2021-06-16T09:06:52.000Z
|
2022-03-26T09:56:32.000Z
|
src/transformers/utils/dummy_sentencepiece_objects.py
|
suliuzh/transformers
|
f34372a9ff99f6bc8619ac83dc07f7afe6b92141
|
[
"Apache-2.0"
] | 16
|
2021-07-01T05:34:48.000Z
|
2022-03-28T09:40:15.000Z
|
src/transformers/utils/dummy_sentencepiece_objects.py
|
suliuzh/transformers
|
f34372a9ff99f6bc8619ac83dc07f7afe6b92141
|
[
"Apache-2.0"
] | 24
|
2021-06-19T15:58:31.000Z
|
2022-03-14T09:17:19.000Z
|
# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..file_utils import requires_sentencepiece
class AlbertTokenizer:
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self)
class BertGenerationTokenizer:
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self)
class CamembertTokenizer:
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self)
class MarianTokenizer:
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self)
class MBartTokenizer:
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self)
class PegasusTokenizer:
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self)
class ReformerTokenizer:
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self)
class T5Tokenizer:
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self)
class XLMProphetNetTokenizer:
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self)
class XLMRobertaTokenizer:
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self)
class XLNetTokenizer:
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self)
| 23.617647
| 75
| 0.688252
|
f59f42fe47d5da2e4c3a5e15f275bfe093945c6e
| 5,634
|
py
|
Python
|
mbbl_envs/mbbl/util/kfac/estimator.py
|
hbutsuak95/iv_rl
|
0f72a8f077a238237027ea96b7d1160c35ac9959
|
[
"MIT"
] | 9
|
2022-01-16T11:27:00.000Z
|
2022-03-13T14:04:48.000Z
|
mbbl_envs/mbbl/util/kfac/estimator.py
|
hbutsuak95/iv_rl
|
0f72a8f077a238237027ea96b7d1160c35ac9959
|
[
"MIT"
] | null | null | null |
mbbl_envs/mbbl/util/kfac/estimator.py
|
hbutsuak95/iv_rl
|
0f72a8f077a238237027ea96b7d1160c35ac9959
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import itertools
from tensorflow.python.framework import ops as tf_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.util import nest
from mbbl.util.kfac import utils
class _DeviceContextGenerator(object):
"""Class for generating device contexts in a round-robin fashion."""
def __init__(self, devices):
self._cycle = None if devices is None else itertools.cycle(devices)
@contextlib.contextmanager
def __call__(self):
"""Returns a context manager specifying the default device."""
if self._cycle is None:
yield
else:
with tf_ops.device(next(self._cycle)):
yield
class FisherEstimator(object):
def __init__(self,
variables,
cov_ema_decay,
damping,
layer_collection,
estimation_mode="gradients",
colocate_gradients_with_ops=False,
cov_devices=None,
inv_devices=None):
self._variables = variables
self._damping = damping
self._estimation_mode = estimation_mode
self._layers = layer_collection
# self._layers.create_subgraph()
# self._layers.check_registration(variables)
self._gradient_fns = {
"gradients": self._get_grads_lists_gradients,
"empirical": self._get_grads_lists_empirical,
}
self._colocate_gradients_with_ops = colocate_gradients_with_ops
self._cov_device_context_generator = _DeviceContextGenerator(cov_devices)
if inv_devices == cov_devices:
self._inv_device_context_generator = self._cov_device_context_generator
else:
self._inv_device_context_generator = _DeviceContextGenerator(inv_devices)
setup = self._setup(cov_ema_decay)
self.cov_update_op, self.inv_update_op, self.inv_updates_dict = setup
self.init_cov_op = self.init_cov_op()
@property
def variables(self):
return self._variables
@property
def damping(self):
return self._damping
def _apply_transformation(self, vecs_and_vars, transform):
vecs = utils.SequenceDict((var, vec) for vec, var in vecs_and_vars)
trans_vecs = utils.SequenceDict()
for params, fb in self._layers.fisher_blocks.items():
trans_vecs[params] = transform(fb, vecs[params])
return [(trans_vecs[var], var) for _, var in vecs_and_vars]
def multiply_inverse(self, vecs_and_vars):
return self._apply_transformation(vecs_and_vars,
lambda fb, vec: fb.multiply_inverse(vec))
def multiply(self, vecs_and_vars):
return self._apply_transformation(vecs_and_vars,
lambda fb, vec: fb.multiply(vec))
def init_cov_op(self):
cov_updates = [
factor.make_covariance_update_op(1.0, "accumulate")
for factor in self._layers.get_factors()
]
return control_flow_ops.group(*cov_updates)
def rescale(self, sess, scale):
rescale_ops = [factor.rescale_covariance_op(scale) for factor in self._layers.get_factors()]
sess.run(control_flow_ops.group(*rescale_ops))
def reset(self, sess):
reset_ops = [factor.reset_covariance_op() for factor in self._layers.get_factors()]
sess.run(control_flow_ops.group(*reset_ops))
def _setup(self, cov_ema_decay):
fisher_blocks_list = self._layers.get_blocks()
tensors_to_compute_grads = [
fb.tensors_to_compute_grads() for fb in fisher_blocks_list
]
try:
grads_lists = self._gradient_fns[self._estimation_mode](
tensors_to_compute_grads)
except KeyError:
raise ValueError("Unrecognized value {} for estimation_mode.".format(
self._estimation_mode))
for grads_list, fb in zip(grads_lists, fisher_blocks_list):
with self._cov_device_context_generator():
fb.instantiate_factors(grads_list, self.damping)
cov_updates = [
factor.make_covariance_update_op(cov_ema_decay)
for factor in self._layers.get_factors()
]
inv_updates = {op.name: op for op in self._get_all_inverse_update_ops()}
return control_flow_ops.group(*cov_updates), control_flow_ops.group(
*inv_updates.values()), inv_updates
def _get_all_inverse_update_ops(self):
for factor in self._layers.get_factors():
with self._inv_device_context_generator():
for op in factor.make_inverse_update_ops():
yield op
def _get_grads_lists_gradients(self, tensors):
grads_flat = gradients_impl.gradients(
self._layers.total_sampled_loss(),
nest.flatten(tensors),
colocate_gradients_with_ops=self._colocate_gradients_with_ops)
grads_all = nest.pack_sequence_as(tensors, grads_flat)
return tuple((grad,) for grad in grads_all)
def _get_grads_lists_empirical(self, tensors):
grads_flat = gradients_impl.gradients(
self._layers.total_loss(),
nest.flatten(tensors),
colocate_gradients_with_ops=self._colocate_gradients_with_ops)
grads_all = nest.pack_sequence_as(tensors, grads_flat)
return tuple((grad,) for grad in grads_all)
| 36.823529
| 100
| 0.663294
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.