hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1ecb1b056b45fb646b5124396662365d5daf50c4
| 151
|
py
|
Python
|
0x11-python-network_1/6-post_email.py
|
calypsobronte/holbertonschool-higher_level_programming
|
c39c060d8473976fa475d22fffba5cb4329c9965
|
[
"MIT"
] | null | null | null |
0x11-python-network_1/6-post_email.py
|
calypsobronte/holbertonschool-higher_level_programming
|
c39c060d8473976fa475d22fffba5cb4329c9965
|
[
"MIT"
] | null | null | null |
0x11-python-network_1/6-post_email.py
|
calypsobronte/holbertonschool-higher_level_programming
|
c39c060d8473976fa475d22fffba5cb4329c9965
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import requests
from sys import argv
if __name__ == "__main__":
print((requests.post(argv[1], data={'email': argv[2]})).text)
| 18.875
| 65
| 0.675497
|
48f726dd79dfc27b4607c76077038bd79e95758c
| 75,622
|
py
|
Python
|
paddlenlp/transformers/visualbert/modeling.py
|
chenkangyang/PaddleNLP
|
0d4a8a80c8c20d774db207fae214509be402d149
|
[
"Apache-2.0"
] | null | null | null |
paddlenlp/transformers/visualbert/modeling.py
|
chenkangyang/PaddleNLP
|
0d4a8a80c8c20d774db207fae214509be402d149
|
[
"Apache-2.0"
] | null | null | null |
paddlenlp/transformers/visualbert/modeling.py
|
chenkangyang/PaddleNLP
|
0d4a8a80c8c20d774db207fae214509be402d149
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from collections import OrderedDict
import paddle
import paddle.nn as nn
from .. import PretrainedModel, register_base_model
__all__ = [
"VisualBertModel",
"VisualBertForPreTraining",
"VisualBertForQuestionAnswering",
"VisualBertForVisualReasoning",
"VisualBertForMultipleChoice",
]
dtype_float = paddle.get_default_dtype()
class VisualBertEmbeddings(nn.Layer):
"""Construct the embeddings from word, position and token_type embeddings and visual embeddings."""
def __init__(
self,
vocab_size=30522,
hidden_size=768,
visual_embedding_dim=512,
hidden_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
layer_norm_eps=1e-12,
special_visual_initialize=True,
pad_token_id=1, ):
super(VisualBertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(
vocab_size, hidden_size, padding_idx=pad_token_id)
self.position_embeddings = nn.Embedding(max_position_embeddings,
hidden_size)
self.token_type_embeddings = nn.Embedding(type_vocab_size, hidden_size)
self.layer_norm = nn.LayerNorm(hidden_size, epsilon=layer_norm_eps)
self.dropout = nn.Dropout(hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids",
paddle.arange(max_position_embeddings).expand(
(1, -1)))
# For Visual Features
# Token type and position embedding for image features
self.visual_token_type_embeddings = nn.Embedding(type_vocab_size,
hidden_size)
self.visual_position_embeddings = nn.Embedding(max_position_embeddings,
hidden_size)
if special_visual_initialize:
assert isinstance(self.visual_token_type_embeddings.weight,
paddle.Tensor)
assert isinstance(self.visual_position_embeddings.weight,
paddle.Tensor)
self.visual_token_type_embeddings.weight.set_value(
self.token_type_embeddings.weight.clone())
self.visual_position_embeddings.weight.set_value(
self.position_embeddings.weight.clone())
self.visual_projection = nn.Linear(visual_embedding_dim, hidden_size)
def forward(
self,
input_ids=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
visual_embeds=None,
visual_token_type_ids=None,
image_text_alignment=None, ):
if input_ids is not None:
input_shape = input_ids.shape
else:
input_shape = inputs_embeds.shape[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if token_type_ids is None:
token_type_ids = paddle.zeros(input_shape, dtype=paddle.int64)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
# Absolute Position Embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
if visual_embeds is not None:
if visual_token_type_ids is None:
visual_token_type_ids = paddle.ones(
visual_embeds.shape[:-1], dtype=paddle.int64)
visual_embeds = self.visual_projection(visual_embeds)
visual_token_type_ids = visual_token_type_ids.astype(paddle.int64)
visual_token_type_embeddings = self.visual_token_type_embeddings(
visual_token_type_ids)
if image_text_alignment is not None:
# image_text_alignment = Batch x image_length x alignment_number.
# Each element denotes the position of the word corresponding to the image feature. -1 is the padding value.
dtype = token_type_embeddings.dtype
image_text_alignment_mask = (image_text_alignment != -1).long()
# Get rid of the -1.
image_text_alignment = image_text_alignment_mask * image_text_alignment
# Batch x image_length x alignment length x dim
visual_position_embeddings = self.position_embeddings(
image_text_alignment)
visual_position_embeddings *= image_text_alignment_mask.astype(
dtype=dtype).unsqueeze(-1)
visual_position_embeddings = visual_position_embeddings.sum(2)
# We want to averge along the alignment_number dimension.
image_text_alignment_mask = image_text_alignment_mask.astype(
dtype=dtype).sum(2)
if (image_text_alignment_mask == 0).sum() != 0:
image_text_alignment_mask[
image_text_alignment_mask ==
0] = 1 # Avoid divide by zero error
warnings.warn(
"Found 0 values in `image_text_alignment_mask`. Setting them to 1 to avoid divide-by-zero error."
)
visual_position_embeddings = visual_position_embeddings / image_text_alignment_mask.unsqueeze(
-1)
visual_position_ids = paddle.zeros(
*visual_embeds.shape[:-1], dtype=paddle.int64)
# When fine-tuning the detector , the image_text_alignment is sometimes padded too long.
if visual_position_embeddings.shape[1] != visual_embeds.shape[
1]:
if visual_position_embeddings.shape[
1] < visual_embeds.shape[1]:
raise ValueError(
f"Visual position embeddings length: {visual_position_embeddings.shape[1]}"
f"should be the same as `visual_embeds` length: {visual_embeds.shape[1]}"
)
visual_position_embeddings = visual_position_embeddings[:, :
visual_embeds.
shape[
1], :]
visual_position_embeddings = visual_position_embeddings + self.visual_position_embeddings(
visual_position_ids)
else:
visual_position_ids = paddle.zeros(
visual_embeds.shape[:-1], dtype=paddle.int64)
visual_position_embeddings = self.visual_position_embeddings(
visual_position_ids)
visual_embeddings = visual_embeds + visual_position_embeddings + visual_token_type_embeddings
embeddings = paddle.concat((embeddings, visual_embeddings), axis=1)
embeddings = self.layer_norm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class VisualBertEncoder(nn.Layer):
def __init__(self,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1):
super(VisualBertEncoder, self).__init__()
encoder_layer = nn.TransformerEncoderLayer(
hidden_size,
num_attention_heads,
intermediate_size,
dropout=hidden_dropout_prob,
activation=hidden_act,
attn_dropout=attention_probs_dropout_prob,
act_dropout=0)
self.layer = nn.TransformerEncoder(encoder_layer, num_hidden_layers)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=False, ):
"""
Returns:
last_hidden_state: ``padle.Tensor`` of shape `(batch_size, sequence_length, hidden_size)`
hidden_states: `optional`, returned when ``output_hidden_states=True`` is passed
attentions: `optional`, returned when ``output_attentions=True`` is passed
"""
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
assert isinstance(self.layer.layers, nn.LayerList)
if output_attentions:
raise NotImplementedError(
f"nn.TransformerEncoderLayer don't support args: `output_attentions`, Please build an inherit Class to support"
)
if head_mask:
raise NotImplementedError(
f"nn.TransformerEncoderLayer don't support args: `head_mask`, Please build an inherit Class to support"
)
for i, layer_module in enumerate(self.layer.layers):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states, )
# layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(hidden_states, attention_mask)
# layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions) # retrun a tuple
hidden_states = layer_outputs
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1], )
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states, )
if not return_dict:
return tuple(v
for v in [
hidden_states,
all_hidden_states,
all_self_attentions,
] if v is not None)
return OrderedDict(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions)
class VisualBertPooler(nn.Layer):
def __init__(self, hidden_size=768):
super(VisualBertPooler, self).__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class VisualBertLMPredictionHead(nn.Layer):
"""
Bert Model with a `language modeling` head on top for CLM fine-tuning.
"""
def __init__(self,
hidden_size,
vocab_size,
activation,
embedding_weights=None):
super(VisualBertLMPredictionHead, self).__init__()
self.transform = nn.Linear(hidden_size, hidden_size)
self.activation = getattr(nn.functional, activation)
self.layer_norm = nn.LayerNorm(hidden_size)
self.decoder_weight = self.create_parameter(
shape=[vocab_size, hidden_size],
dtype=self.transform.weight.dtype,
is_bias=False) if embedding_weights is None else embedding_weights
self.decoder_bias = self.create_parameter(
shape=[vocab_size], dtype=self.decoder_weight.dtype, is_bias=True)
def forward(self, hidden_states, masked_positions=None):
if masked_positions is not None:
hidden_states = paddle.reshape(hidden_states,
[-1, hidden_states.shape[-1]])
hidden_states = paddle.tensor.gather(hidden_states,
masked_positions)
# gather masked tokens might be more quick
hidden_states = self.transform(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.layer_norm(hidden_states)
hidden_states = paddle.tensor.matmul(
hidden_states, self.decoder_weight,
transpose_y=True) + self.decoder_bias
return hidden_states
class VisualBertPreTrainingHeads(nn.Layer):
"""
Perform language modeling task and next sentence classification task.
Args:
hidden_size (int):
See :class:`BertModel`.
vocab_size (int):
See :class:`BertModel`.
activation (str):
Activation function used in the language modeling task.
embedding_weights (Tensor, optional):
Decoding weights used to map hidden_states to logits of the masked token prediction.
Its data type should be float32 and its shape is [vocab_size, hidden_size].
Defaults to `None`, which means use the same weights of the embedding layer.
"""
def __init__(self,
hidden_size,
vocab_size,
activation,
embedding_weights=None):
super(VisualBertPreTrainingHeads, self).__init__()
self.predictions = VisualBertLMPredictionHead(
hidden_size, vocab_size, activation, embedding_weights)
self.seq_relationship = nn.Linear(hidden_size, 2)
def forward(self, sequence_output, pooled_output, masked_positions=None):
"""
Args:
sequence_output(Tensor):
Sequence of hidden-states at the last layer of the model.
It's data type should be float32 and its shape is [batch_size, sequence_length, hidden_size].
pooled_output(Tensor):
The output of first token (`[CLS]`) in sequence.
We "pool" the model by simply taking the hidden state corresponding to the first token.
Its data type should be float32 and its shape is [batch_size, hidden_size].
masked_positions(Tensor, optional):
A tensor indicates positions to be masked in the position embedding.
Its data type should be int64 and its shape is [batch_size, mask_token_num].
`mask_token_num` is the number of masked tokens. It should be no bigger than `sequence_length`.
Defaults to `None`, which means we output hidden-states of all tokens in masked token prediction.
Returns:
tuple: Returns tuple (``prediction_scores``, ``seq_relationship_score``).
With the fields:
- `prediction_scores` (Tensor):
The scores of masked token prediction. Its data type should be float32.
If `masked_positions` is None, its shape is [batch_size, sequence_length, vocab_size].
Otherwise, its shape is [batch_size, mask_token_num, vocab_size].
- `seq_relationship_score` (Tensor):
The scores of next sentence prediction.
Its data type should be float32 and its shape is [batch_size, 2].
"""
prediction_scores = self.predictions(sequence_output, masked_positions)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class VisualBertPreTrainedModel(PretrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
base_model_prefix = "visualbert"
model_config_file = "model_config.json"
# pretrained general configuration
pretrained_init_configuration = {
"visualbert-vqa": {
"vocab_size": 30522,
"hidden_size": 768,
"visual_embedding_dim": 2048,
"num_hidden_layers": 12,
"num_attention_heads": 12,
"intermediate_size": 3072,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 512,
"type_vocab_size": 2,
"initializer_range": 0.02,
"layer_norm_eps": 1e-12,
"bypass_transformer": False,
"special_visual_initialize": True,
"pad_token_id": 1,
"bos_token_id": 0,
"eos_token_id": 2,
},
"visualbert-vqa-pre": {
"vocab_size": 30522,
"hidden_size": 768,
"visual_embedding_dim": 2048,
"num_hidden_layers": 12,
"num_attention_heads": 12,
"intermediate_size": 3072,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 512,
"type_vocab_size": 2,
"initializer_range": 0.02,
"layer_norm_eps": 1e-12,
"bypass_transformer": False,
"special_visual_initialize": True,
"pad_token_id": 1,
"bos_token_id": 0,
"eos_token_id": 2,
},
"visualbert-vqa-coco-pre": {
"vocab_size": 30522,
"hidden_size": 768,
"visual_embedding_dim": 2048,
"num_hidden_layers": 12,
"num_attention_heads": 12,
"intermediate_size": 3072,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 512,
"type_vocab_size": 2,
"initializer_range": 0.02,
"layer_norm_eps": 1e-12,
"bypass_transformer": False,
"special_visual_initialize": True,
"pad_token_id": 1,
"bos_token_id": 0,
"eos_token_id": 2,
},
"visualbert-nlvr2": {
"vocab_size": 30522,
"hidden_size": 768,
"visual_embedding_dim": 1024,
"num_hidden_layers": 12,
"num_attention_heads": 12,
"intermediate_size": 3072,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 512,
"type_vocab_size": 2,
"initializer_range": 0.02,
"layer_norm_eps": 1e-12,
"bypass_transformer": False,
"special_visual_initialize": True,
"pad_token_id": 1,
"bos_token_id": 0,
"eos_token_id": 2,
},
"visualbert-nlvr2-pre": {
"vocab_size": 30522,
"hidden_size": 768,
"visual_embedding_dim": 1024,
"num_hidden_layers": 12,
"num_attention_heads": 12,
"intermediate_size": 3072,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 512,
"type_vocab_size": 2,
"initializer_range": 0.02,
"layer_norm_eps": 1e-12,
"bypass_transformer": False,
"special_visual_initialize": True,
"pad_token_id": 1,
"bos_token_id": 0,
"eos_token_id": 2,
},
"visualbert-nlvr2-coco-pre": {
"vocab_size": 30522,
"hidden_size": 768,
"visual_embedding_dim": 1024,
"num_hidden_layers": 12,
"num_attention_heads": 12,
"intermediate_size": 3072,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 512,
"type_vocab_size": 2,
"initializer_range": 0.02,
"layer_norm_eps": 1e-12,
"bypass_transformer": False,
"special_visual_initialize": True,
"pad_token_id": 1,
"bos_token_id": 0,
"eos_token_id": 2,
},
"visualbert-vcr": {
"vocab_size": 30522,
"hidden_size": 768,
"visual_embedding_dim": 512,
"num_hidden_layers": 12,
"num_attention_heads": 12,
"intermediate_size": 3072,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 512,
"type_vocab_size": 2,
"initializer_range": 0.02,
"layer_norm_eps": 1e-12,
"bypass_transformer": False,
"special_visual_initialize": True,
"pad_token_id": 1,
"bos_token_id": 0,
"eos_token_id": 2,
},
"visualbert-vcr-pre": {
"vocab_size": 30522,
"hidden_size": 768,
"visual_embedding_dim": 512,
"num_hidden_layers": 12,
"num_attention_heads": 12,
"intermediate_size": 3072,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 512,
"type_vocab_size": 2,
"initializer_range": 0.02,
"layer_norm_eps": 1e-12,
"bypass_transformer": False,
"special_visual_initialize": True,
"pad_token_id": 1,
"bos_token_id": 0,
"eos_token_id": 2,
},
"visualbert-vcr-coco-pre": {
"vocab_size": 30522,
"hidden_size": 768,
"visual_embedding_dim": 512,
"num_hidden_layers": 12,
"num_attention_heads": 12,
"intermediate_size": 3072,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 512,
"type_vocab_size": 2,
"initializer_range": 0.02,
"layer_norm_eps": 1e-12,
"bypass_transformer": False,
"special_visual_initialize": True,
"pad_token_id": 1,
"bos_token_id": 0,
"eos_token_id": 2,
}
}
resource_files_names = {"model_state": "model_state.pdparams"}
pretrained_resource_files_map = {
"model_state": {
"visualbert-vqa":
"https://paddlenlp.bj.bcebos.com/models/transformers/visualbert/visualbert-vqa/model_state.pdparams",
"visualbert-vqa-pre":
"https://paddlenlp.bj.bcebos.com/models/transformers/visualbert/visualbert-vqa-pre/model_state.pdparams",
"visualbert-vqa-coco-pre":
"https://paddlenlp.bj.bcebos.com/models/transformers/visualbert/visualbert-vqa-coco-pre/model_state.pdparams",
"visualbert-vcr":
"https://paddlenlp.bj.bcebos.com/models/transformers/visualbert/visualbert-vcr/model_state.pdparams",
"visualbert-vcr-pre":
"https://paddlenlp.bj.bcebos.com/models/transformers/visualbert/visualbert-vcr-pre/model_state.pdparams",
"visualbert-vcr-coco-pre":
"https://paddlenlp.bj.bcebos.com/models/transformers/visualbert/visualbert-vcr-coco-pre/model_state.pdparams",
"visualbert-nlvr2":
"https://paddlenlp.bj.bcebos.com/models/transformers/visualbert/visualbert-nlvr2/model_state.pdparams",
"visualbert-nlvr2-pre":
"https://paddlenlp.bj.bcebos.com/models/transformers/visualbert/visualbert-nlvr2-pre/model_state.pdparams",
"visualbert-nlvr2-coco-pre":
"https://paddlenlp.bj.bcebos.com/models/transformers/visualbert/visualbert-nlvr2-coco-pre/model_state.pdparams",
}
}
def init_weights(self, layer):
""" Initialization hook """
if isinstance(layer, (nn.Linear, nn.Embedding)):
# In the dygraph mode, use the `set_value` to reset the parameter directly,
# and reset the `state_dict` to update parameter in static mode.
if isinstance(layer.weight, paddle.Tensor):
layer.weight.set_value(
paddle.tensor.normal(
mean=0.0,
std=self.initializer_range
if hasattr(self, "initializer_range") else
self.visual_bert.config["initializer_range"],
shape=layer.weight.shape))
elif isinstance(layer, nn.LayerNorm):
layer._epsilon = 1e-12
# weight 1.0, bias 0.0
# if isinstance(layer, nn.Linear) and layer.bias is not None:
# # bias 0.0
@register_base_model
class VisualBertModel(VisualBertPreTrainedModel):
"""
The bare VisualBERT Model transformer outputting raw hidden-states without any specific head on top.
This model inherits from :class:`~paddlenlp.transformers.model_utils.PretrainedModel`.
Refer to the superclass documentation for the generic methods.
This model is also a Paddle `paddle.nn.Layer <https://www.paddlepaddle.org.cn/documentation
/docs/en/api/paddle/fluid/dygraph/layers/Layer_en.html>`__ subclass. Use it as a regular Paddle Layer
and refer to the Paddle documentation for all matter related to general usage and behavior.
Args:
vocab_size (:obj:`int`, `optional`, defaults to 30522):
Vocabulary size of the VisualBERT model. Defines the number of different tokens that can be represented by
the :obj:`inputs_ids` passed when calling :class:`~transformers.VisualBertModel`. Vocabulary size of the
model. Defines the different tokens that can be represented by the ``inputs_ids`` passed to the forward
method of :class:`~transformers.VisualBertModel`.
hidden_size (:obj:`int`, `optional`, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
visual_embedding_dim (:obj:`int`, `optional`, defaults to 512):
Dimensionality of the visual embeddings to be passed to the model.
num_hidden_layers (:obj:`int`, `optional`, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (:obj:`int`, `optional`, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (:obj:`int`, `optional`, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (:obj:`str` or :obj:`function`, `optional`, defaults to :obj:`"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string,
:obj:`"gelu"`, :obj:`"relu"`, :obj:`"selu"` and :obj:`"gelu_new"` are supported.
hidden_dropout_prob (:obj:`float`, `optional`, defaults to 0.1):
The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (:obj:`int`, `optional`, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (:obj:`int`, `optional`, defaults to 2):
The vocabulary size of the :obj:`token_type_ids` passed when calling
:class:`~transformers.VisualBertModel`.
initializer_range (:obj:`float`, `optional`, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-12):
The epsilon used by the layer normalization layers.
bypass_transformer (:obj:bool, optional, defaults to :obj:`False`):
Whether or not the model should bypass the transformer for the visual embeddings. If set to :obj:`True`,
the model directly concatenates the visual embeddings from :class:`~transformers.VisualBertEmbeddings` with
text output from transformers, and then pass it to a self-attention layer.
special_visual_initialize (:obj:bool, optional, defaults to :obj:`True`):
Whether or not the visual token type and position type embedding weights should be initialized the same as
the textual token type and positive type embeddings. When set to :obj:`True`, the weights of the textual
token type and position type embeddings are copied to the respective visual embedding layers.
"""
def __init__(self,
vocab_size=30522,
hidden_size=768,
visual_embedding_dim=512,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
bypass_transformer=False,
special_visual_initialize=True,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
add_pooling_layer=True):
super(VisualBertModel, self).__init__()
self.pad_token_id = pad_token_id
self.initializer_range = initializer_range
self.embeddings = VisualBertEmbeddings(
vocab_size=vocab_size,
hidden_size=hidden_size,
visual_embedding_dim=visual_embedding_dim,
hidden_dropout_prob=hidden_dropout_prob,
max_position_embeddings=max_position_embeddings,
type_vocab_size=type_vocab_size,
layer_norm_eps=layer_norm_eps,
special_visual_initialize=special_visual_initialize,
pad_token_id=pad_token_id)
self.encoder = VisualBertEncoder(
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob)
self.pooler = VisualBertPooler(
hidden_size=hidden_size) if add_pooling_layer else None
self.bypass_transformer = bypass_transformer
if self.bypass_transformer:
self.additional_layer = nn.TransformerEncoderLayer(
hidden_size,
num_attention_heads,
intermediate_size,
dropout=hidden_dropout_prob,
activation=hidden_act,
attn_dropout=attention_probs_dropout_prob,
act_dropout=0)
self.apply(self.init_weights)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
visual_embeds=None,
visual_attention_mask=None,
visual_token_type_ids=None,
image_text_alignment=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None, ):
r"""
The VisualBertModel forward method, overrides the `__call__()` special method.
Args:
input_ids (Tensor):
Indices of input sequence tokens in the vocabulary. They are
numerical representations of tokens that build the input sequence.
Its data type should be `int64` and it has a shape of [batch_size, sequence_length].
attention_mask (Tensor, optional):
Mask used in multi-head attention to avoid performing attention on to some unwanted positions,
usually the paddings or the subsequent positions.
Its data type can be int, float and bool.
If its data type is int, the values should be either 0 or 1.
- **1** for tokens that **not masked**,
- **0** for tokens that **masked**.
It is a tensor with shape broadcasted to `[batch_size, num_attention_heads, sequence_length, sequence_length]`.
Defaults to `None`, which means nothing needed to be prevented attention to.
token_type_ids (Tensor, optional):
Segment token indices to indicate different portions of the inputs.
Selected in the range ``[0, type_vocab_size - 1]``.
If `type_vocab_size` is 2, which means the inputs have two portions.
Indices can either be 0 or 1:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
Its data type should be `int64` and it has a shape of [batch_size, sequence_length].
Defaults to `None`, which means we don't add segment embeddings.
position_ids(Tensor, optional):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
max_position_embeddings - 1]``.
Shape as `(batch_size, num_tokens)` and dtype as int64. Defaults to `None`.
head_mask (Tensor, optional):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (Tensor, optional):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
visual_embeds (Tensor, optional):
The embedded representation of the visual inputs, generally derived using using an object detector.
visual_attention_mask (Tensor, optional):
Mask to avoid performing attention on visual embeddings. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
visual_token_type_ids (Tensor, optional):
Segment token indices to indicate different portions of the visual embeds.
`What are token type IDs? <../glossary.html#token-type-ids>`_ The authors of VisualBERT set the
`visual_token_type_ids` to `1` for all tokens.
image_text_alignment (Tensor, optional):
Image-Text alignment uses to decide the position IDs of the visual embeddings.
output_attentions (bool, optional):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (bool, optional):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (bool, optional):
Whether or not to return a `OrderedDict` instead of a plain tuple.
Returns:
tuple: Returns tuple (`last_hidden_state`, `pooled_output`, `hidden_states` (optional), `attentions` (optional)).
Or OrderedDict:
{
last_hidden_state=sequence_output,
pooled_output=pooled_output,
hidden_states=encoder_outputs['hidden_states'],
attentions=encoder_outputs['attentions'],
}
With the fields:
- `last_hidden_state` (Tensor):
Sequence of hidden-states at the last layer of the model.
It's data type should be float32 and its shape is [batch_size, sequence_length, hidden_size].
- `pooled_output` (Tensor):
The output of first token (`[CLS]`) in sequence.
We "pool" the model by simply taking the hidden state corresponding to the first token.
Its data type should be float32 and its shape is [batch_size, hidden_size].
- `encoder_outputs` (List(Tensor)):
A list of Tensor containing hidden-states of the model at each hidden layer in the Transformer encoder.
The length of the list is `num_hidden_layers`.
Each Tensor has a data type of float32 and its shape is [batch_size, sequence_length, hidden_size].
Example:
.. code-block::
import paddle
from paddlenlp.transformers import VisualBertModel, BertTokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# Built-in pretrained models for VisualBertModel are as follows
# 'visualbert-vqa-coco-pre'
# 'visualbert-vqa-pre'
# 'visualbert-nlvr2-coco-pre'
# 'visualbert-nlvr2-pre'
# 'visualbert-vcr-coco-pre'
# 'visualbert-vcr-pre'
model = VisualBertModel.from_pretrained('visualbert-vqa-coco-pre')
inputs = tokenizer("Welcome to paddlenlp.", return_attention_mask=True)
inputs['input_ids'] = paddle.to_tensor([inputs['input_ids']])
inputs['token_type_ids'] = paddle.to_tensor([inputs['token_type_ids']])
inputs['attention_mask'] = paddle.to_tensor([inputs['attention_mask']])
# 2048-dim visual embeds for vqa-pre model
# 1024-dim visual embeds for nlvr2-pre model
# 512-dim visual embeds for vcr-pre model
visual_embeds = paddle.ones([100, 2048]).unsqueeze(0)
visual_token_type_ids = paddle.ones(visual_embeds.shape[:-1], dtype=paddle.int64)
visual_attention_mask = paddle.ones(visual_embeds.shape[:-1], dtype=paddle.int64)
return_dict = True
inputs.update({
"visual_embeds": visual_embeds,
"visual_token_type_ids": visual_token_type_ids,
"visual_attention_mask": visual_attention_mask,
"return_dict": return_dict
})
outputs = model(**inputs)
if not return_dict:
last_hidden_state = outputs[0]
pooled_output = outputs[1]
else:
last_hidden_state = outputs['last_hidden_state']
pooled_output = outputs['pooled_output']
"""
output_attentions = output_attentions if output_attentions is not None else False
output_hidden_states = (output_hidden_states
if output_hidden_states is not None else False)
return_dict = return_dict if return_dict is not None else False
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.shape[:-1]
else:
raise ValueError(
"You have to specify either input_ids or inputs_embeds")
if visual_embeds is None:
raise ValueError(
f"`visual_embeds` can not be of type {type(visual_embeds)} when using a VisualBert Model."
)
batch_size, seq_length = input_shape
# device = input_ids.device if input_ids is not None else inputs_embeds.device
visual_input_shape = visual_embeds.shape[:-1]
if attention_mask is None:
attention_mask = paddle.ones(
input_shape) # (batch_size, text_seq_len)
if visual_attention_mask is None:
visual_attention_mask = paddle.ones(
visual_input_shape) # (batch_size, visual_seq_len)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, from_seq_length, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
combined_attention_mask = paddle.concat(
(attention_mask, visual_attention_mask),
axis=-1) # (batch_size, seq_len)
batch_size, combined_seq_length = combined_attention_mask.shape
extended_attention_mask = paddle.concat(
[
combined_attention_mask[b].broadcast_to(
[combined_seq_length, combined_seq_length]).unsqueeze(0)
.unsqueeze(0) for b in range(batch_size)
],
axis=0)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
visual_embeds=visual_embeds,
visual_token_type_ids=visual_token_type_ids,
image_text_alignment=image_text_alignment, )
if self.bypass_transformer and visual_embeds is not None:
text_length = input_ids.shape[1]
text_embedding_output = embedding_output[:, :text_length, :]
visual_embedding_output = embedding_output[:, text_length:, :]
text_extended_attention_mask = extended_attention_mask[:, :,
text_length, :
text_length]
encoded_outputs = self.encoder(
text_embedding_output,
attention_mask=text_extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict, )
sequence_output = encoded_outputs[0]
concatenated_input = paddle.concat(
(sequence_output, visual_embedding_output), axis=1)
sequence_output = self.additional_layer(concatenated_input,
extended_attention_mask)
pooled_output = self.pooler(
sequence_output) if self.pooler is not None else None
else:
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict, )
if not return_dict:
sequence_output = encoder_outputs[0]
else:
sequence_output = encoder_outputs['last_hidden_state']
pooled_output = self.pooler(
sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return OrderedDict(
last_hidden_state=sequence_output,
pooled_output=pooled_output,
hidden_states=encoder_outputs['hidden_states'],
attentions=encoder_outputs['attentions'], )
class VisualBertForPreTraining(VisualBertPreTrainedModel):
"""
VisualBert Model with pretraining tasks on top.
Args:
visual_bert (:class:`VisualBertModel`):
An instance of :class `VisualBertModel`.
"""
def __init__(self, visual_bert):
super(VisualBertForPreTraining, self).__init__()
self.visual_bert = visual_bert
self.cls = VisualBertPreTrainingHeads(
self.visual_bert.config["hidden_size"],
self.visual_bert.config["vocab_size"],
self.visual_bert.config["hidden_act"],
embedding_weights=self.visual_bert.embeddings.word_embeddings.
weight)
self.apply(self.init_weights)
def get_predctions_decoder_weight(self):
return self.cls.predictions.decoder_weight
def get_predctions_decoder_bias(self):
return self.cls.predictions.decoder_weight
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
visual_embeds=None,
visual_attention_mask=None,
visual_token_type_ids=None,
image_text_alignment=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
sentence_image_labels=None, ):
r"""
Args:
input_ids (Tensor):
Indices of input sequence tokens in the vocabulary.
attention_mask (Tensor, optional):
See :class:`VisualBertModel`.
token_type_ids (Tensor, optional):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
position_ids (Tensor, optional):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
head_mask (Tensor, optional):
See :class:`VisualBertModel`.
inputs_embeds (Tensor, optional):
See :class:`VisualBertModel`.
visual_embeds (Tensor, optional):
The embedded representation of the visual inputs, generally derived using using an object detector.
visual_attention_mask (Tensor, optional):
See :class:`VisualBertModel`.
visual_token_type_ids (Tensor, optional):
See :class:`VisualBertModel`.
image_text_alignment (Tensor, optional):
Image-Text alignment uses to decide the position IDs of the visual embeddings.
output_attentions (bool, optional):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (bool, optional):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (bool, optional):
Whether or not to return a `OrderedDict` instead of a plain tuple.
labels (Tensor, optional):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
sentence_image_labels (Tensor, optional):
Labels for computing the sentence-image prediction (classification) loss. Input should be a sequence
pair (see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a matching pair of sequence A for the given image,
- 1 indicates sequence B is a random sequence w.r.t A for the given image.
Returns:
tuple: Returns tuple (`loss`, `prediction_logits`, `seq_relationship_logits`, `hidden_states`(optional), `attentions`(optional)).
Or OrderedDict:
{
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs["hidden_states"],
attentions=outputs["attentions"],
}
Example::
.. code-block::
import paddle
from paddlenlp.transformers import VisualBertForPreTraining, BertTokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# Built-in pretrained models for VisualBertModel are as follows
# 'visualbert-vqa-coco-pre'
# 'visualbert-vqa-pre'
# 'visualbert-nlvr2-coco-pre'
# 'visualbert-nlvr2-pre'
# 'visualbert-vcr-coco-pre'
# 'visualbert-vcr-pre'
model = VisualBertForPreTraining.from_pretrained('visualbert-vqa-coco-pre')
inputs = tokenizer("Welcome to paddlenlp.", return_attention_mask=True)
inputs['input_ids'] = paddle.to_tensor([inputs['input_ids']])
inputs['token_type_ids'] = paddle.to_tensor([inputs['token_type_ids']])
inputs['attention_mask'] = paddle.to_tensor([inputs['attention_mask']])
# 2048-dim visual embeds for vqa-pre model
# 1024-dim visual embeds for nlvr2-pre model
# 512-dim visual embeds for vcr-pre model
visual_embeds = paddle.ones([100, 2048]).unsqueeze(0)
visual_token_type_ids = paddle.ones(visual_embeds.shape[:-1], dtype=paddle.int64)
visual_attention_mask = paddle.ones(visual_embeds.shape[:-1], dtype=paddle.int64)
return_dict = True
inputs.update({
"visual_embeds": visual_embeds,
"visual_token_type_ids": visual_token_type_ids,
"visual_attention_mask": visual_attention_mask,
"return_dict": return_dict
})
outputs = model(**inputs)
if not return_dict:
loss = outputs[0]
prediction_logits = outputs[1]
seq_relationship_logits = outputs[2]
hidden_states = outputs[3]
attentions = outputs[4]
else:
loss = outputs['loss']
prediction_logits = outputs['prediction_logits']
seq_relationship_logits = outputs['seq_relationship_logits']
hidden_states = outputs['hidden_states']
attentions = outputs['attentions']
"""
return_dict = return_dict if return_dict is not None else False
outputs = self.visual_bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
visual_embeds=visual_embeds,
visual_attention_mask=visual_attention_mask,
visual_token_type_ids=visual_token_type_ids,
image_text_alignment=image_text_alignment,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict, )
if not return_dict:
sequence_output, pooled_output = outputs[:2]
else:
sequence_output, pooled_output = outputs[
'last_hidden_state'], outputs['pooled_output']
prediction_scores, seq_relationship_score = self.cls(sequence_output,
pooled_output)
total_loss = None
if labels is not None and sentence_image_labels is not None:
total_size = attention_mask.shape[-1] + visual_attention_mask.shape[
-1]
if labels.shape[-1] != total_size:
raise ValueError(
f"The labels provided should have same sequence length as total attention mask."
f"Found labels with sequence length {labels.shape[-1]}, expected {total_size}."
)
loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(
prediction_scores.reshape(
[-1, self.visual_bert.config["vocab_size"]]),
labels.flatten())
sentence_image_loss = loss_fct(
seq_relationship_score.reshape([-1, 2]),
sentence_image_labels.flatten())
total_loss = masked_lm_loss + sentence_image_loss
if labels is not None and sentence_image_labels is None:
total_size = attention_mask.shape[-1] + visual_attention_mask.shape[
-1]
if labels.shape[-1] != total_size:
raise ValueError(
f"The labels provided should have same sequence length as total attention mask."
f"Found labels with sequence length {labels.shape[-1]}, expected {total_size}."
)
loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
total_loss = loss_fct(
prediction_scores.reshape(
[-1, self.visual_bert.config["vocab_size"]]),
labels.flatten())
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return (
(total_loss, ) + output) if total_loss is not None else output
return OrderedDict(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs["hidden_states"],
attentions=outputs["attentions"], )
class VisualBertForQuestionAnswering(VisualBertPreTrainedModel):
"""
VisualBert Model with a classification/regression head on top (a dropout and a linear layer on top of the pooled
output) for VQA.
Args:
visual_bert (:class:`VisualBertModel`):
An instance of VisualBertModel.
num_classes (int, optional):
The number of classes. Default to `2`.
dropout (float, optional):
The dropout probability for output of VisualBERT.
If None, use the same value as `hidden_dropout_prob` of `VisualBertModel`
instance `visualbert`. Defaults to `None`.
"""
def __init__(self, visual_bert, num_classes=2, dropout=None):
super(VisualBertForQuestionAnswering, self).__init__()
self.num_classes = num_classes
self.visual_bert = visual_bert
self.dropout = nn.Dropout(dropout if dropout is not None else self.
visual_bert.config["hidden_dropout_prob"])
self.cls = nn.Linear(self.visual_bert.config["hidden_size"],
self.num_classes)
self.apply(self.init_weights)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
visual_embeds=None,
visual_attention_mask=None,
visual_token_type_ids=None,
image_text_alignment=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None, ):
r"""
labels (Tensor, optional):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. A KLDivLoss is computed between the labels and the returned logits.
Returns:
Example::
.. code-block::
import paddle
from paddlenlp.transformers import BertTokenizer, VisualBertForQuestionAnswering
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
model = VisualBertForQuestionAnswering.from_pretrained("visualbert-vqa", num_classes=3129)
model.eval()
inputs = tokenizer("Who is eating the apple?", return_attention_mask=True)
inputs['input_ids'] = paddle.to_tensor([inputs['input_ids']])
inputs['token_type_ids'] = paddle.to_tensor([inputs['token_type_ids']])
inputs['attention_mask'] = paddle.to_tensor([inputs['attention_mask']])
visual_embeds = paddle.ones([100,2048]).unsqueeze(0)
visual_token_type_ids = paddle.ones(visual_embeds.shape[:-1], dtype=paddle.int64)
visual_attention_mask = paddle.ones(visual_embeds.shape[:-1], dtype=paddle.int64)
return_dict = True
inputs.update({
"visual_embeds": visual_embeds,
"visual_token_type_ids": visual_token_type_ids,
"visual_attention_mask": visual_attention_mask,
"return_dict": return_dict
})
labels = paddle.nn.functional.one_hot(paddle.to_tensor(50), num_classes=3129).astype(paddle.float32) # Batch size 1, Num labels 3092
with paddle.no_grad():
outputs = model(**inputs, labels = labels)
if not return_dict:
loss = outputs[0]
logits = outputs[1]
else:
loss = outputs['loss']
logits = outputs['logits']
"""
return_dict = return_dict if return_dict is not None else False
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.shape[:-1]
else:
raise ValueError(
"You have to specify either input_ids or inputs_embeds")
# Get the index of the last text token
if attention_mask is None:
attention_mask = paddle.ones(input_shape)
index_to_gather = attention_mask.sum(1) - 2 # as in original code
outputs = self.visual_bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
visual_embeds=visual_embeds,
visual_attention_mask=visual_attention_mask,
visual_token_type_ids=visual_token_type_ids,
image_text_alignment=image_text_alignment,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict)
if not return_dict:
sequence_output = outputs[0]
else:
sequence_output = outputs['last_hidden_state']
# (batch_size, seq_length, hidden_size)
# --> gather_index_list
# --> (batch_size, seq_length=len(gather_index_list), hidden_size)
index_to_gather = index_to_gather.astype(paddle.int64)
pooled_output = paddle.concat(
[
paddle.gather(
sequence_output[b], index_to_gather[b], axis=0).unsqueeze(0)
for b in range(input_shape[0])
],
axis=0)
pooled_output = self.dropout(pooled_output)
logits = self.cls(pooled_output)
# logits = paddle.transpose(logits, perm=[self.num_classes, 0, 1])
reshaped_logits = paddle.reshape(logits, shape=[-1, self.num_classes])
loss = None
if labels is not None:
loss_fct = nn.KLDivLoss(reduction="batchmean")
log_softmax = nn.LogSoftmax(axis=-1)
reshaped_logits = log_softmax(reshaped_logits)
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits, ) + outputs[2:]
return ((loss, ) + output) if loss is not None else output
return OrderedDict(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs['hidden_states'],
attentions=outputs['attentions'], )
class VisualBertForVisualReasoning(VisualBertPreTrainedModel):
"""
VisualBert Model with a sequence classification head on top (a dropout and a linear layer on top of the pooled
output) for Visual Reasoning e.g. for NLVR task.
Args:
visual_bert (:class:`VisualBertModel`):
An instance of VisualBertModel.
num_classes (int, optional):
The number of classes. Default to `2`.
dropout (float, optional):
The dropout probability for output of VisualBERT.
If None, use the same value as `hidden_dropout_prob` of `VisualBertModel`
instance `visualbert`. Defaults to `None`.
"""
def __init__(self, visual_bert, num_classes=2, dropout=None):
super(VisualBertForVisualReasoning, self).__init__()
self.num_classes = num_classes
self.visual_bert = visual_bert
self.dropout = nn.Dropout(dropout if dropout is not None else self.
visual_bert.config["hidden_dropout_prob"])
self.cls = nn.Linear(self.visual_bert.config["hidden_size"],
self.num_classes)
self.apply(self.init_weights)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
visual_embeds=None,
visual_attention_mask=None,
visual_token_type_ids=None,
image_text_alignment=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None, ):
r"""
labels (Tensor, optional):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. A KLDivLoss is computed between the labels and the returned logits.
Returns:
Example::
.. code-block::
import paddle
from paddlenlp.transformers import BertTokenizer, VisualBertForVisualReasoning
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
model = VisualBertForVisualReasoning.from_pretrained("visualbert-nlvr2", num_classes=2)
model.eval()
inputs = tokenizer("Who is eating the apple?", return_attention_mask=True)
inputs['input_ids'] = paddle.to_tensor([inputs['input_ids']])
inputs['token_type_ids'] = paddle.to_tensor([inputs['token_type_ids']])
inputs['attention_mask'] = paddle.to_tensor([inputs['attention_mask']])
visual_embeds = paddle.ones([100,1024]).unsqueeze(0)
visual_token_type_ids = paddle.ones(visual_embeds.shape[:-1], dtype=paddle.int64)
visual_attention_mask = paddle.ones(visual_embeds.shape[:-1], dtype=paddle.int64)
return_dict = True
inputs.update({
"visual_embeds": visual_embeds,
"visual_token_type_ids": visual_token_type_ids,
"visual_attention_mask": visual_attention_mask,
"return_dict": return_dict
})
labels = paddle.to_tensor(1).unsqueeze(0) # Batch size 1, Num choices 2
with paddle.no_grad():
outputs = model(**inputs, labels = labels)
if not return_dict:
loss = outputs[0]
logits = outputs[1]
else:
loss = outputs['loss']
logits = outputs['logits']
"""
return_dict = return_dict if return_dict is not None else False
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.shape[:-1]
else:
raise ValueError(
"You have to specify either input_ids or inputs_embeds")
# Get the index of the last text token
if attention_mask is None:
attention_mask = paddle.ones(input_shape)
outputs = self.visual_bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
visual_embeds=visual_embeds,
visual_attention_mask=visual_attention_mask,
visual_token_type_ids=visual_token_type_ids,
image_text_alignment=image_text_alignment,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict)
if not return_dict:
pooled_output = outputs[1]
else:
pooled_output = outputs['pooled_output']
pooled_output = self.dropout(pooled_output)
logits = self.cls(pooled_output)
reshaped_logits = paddle.reshape(logits, shape=[-1, self.num_classes])
loss = None
if labels is not None:
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels.flatten())
if not return_dict:
output = (logits, ) + outputs[2:]
return ((loss, ) + output) if loss is not None else output
return OrderedDict(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs["hidden_states"],
attentions=outputs["attentions"], )
class VisualBertForMultipleChoice(VisualBertPreTrainedModel):
"""
VisualBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
a softmax) e.g. for VCR tasks.
Args:
visual_bert (:class:`VisualBertModel`):
An instance of VisualBertModel.
num_classes (int, optional):
The number of classes. Default to `1`.
dropout (float, optional):
The dropout probability for output of VisualBERT.
If None, use the same value as `hidden_dropout_prob` of `VisualBertModel`
instance `visualbert`. Defaults to `None`.
"""
def __init__(self, visual_bert, num_classes=1, dropout=None):
super(VisualBertForMultipleChoice, self).__init__()
self.num_classes = num_classes
self.visual_bert = visual_bert
self.dropout = nn.Dropout(dropout if dropout is not None else self.
visual_bert.config["hidden_dropout_prob"])
self.cls = nn.Linear(self.visual_bert.config["hidden_size"],
self.num_classes)
self.apply(self.init_weights)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
visual_embeds=None,
visual_attention_mask=None,
visual_token_type_ids=None,
image_text_alignment=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None, ):
r"""
labels (Tensor, optional):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. A KLDivLoss is computed between the labels and the returned logits.
Returns:
Example::
.. code-block::
import paddle
from paddlenlp.transformers import BertTokenizer, VisualBertForMultipleChoice
text = "Welcome to use paddle paddle and paddlenlp!"
choice0 = "Use it."
choice1 = "Like it."
model = VisualBertForMultipleChoice.from_pretrained("visualbert-vcr", num_classes=1)
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
model.eval()
inputs = tokenizer.batch_encode(batch_text_or_text_pairs=[[text, text], [choice0, choice1]], max_seq_len=128, pad_to_max_seq_len=True, return_attention_mask=True)
input_ids_list = []
token_type_ids_list = []
attention_mask_list = []
inputs_dict = {}
for item in inputs:
input_ids_list.append(paddle.to_tensor(item['input_ids']).unsqueeze(0))
token_type_ids_list.append(paddle.to_tensor(item['token_type_ids']).unsqueeze(0))
attention_mask_list.append(paddle.to_tensor(item['attention_mask']).unsqueeze(0))
inputs_dict = {
"input_ids": paddle.concat(input_ids_list, axis=0).unsqueeze(0),
"token_type_ids": paddle.concat(token_type_ids_list, axis=0).unsqueeze(0),
"attention_mask": paddle.concat(attention_mask_list, axis=0).unsqueeze(0)
}
visual_embeds = paddle.ones([100,512]).unsqueeze(0)
visual_embeds = visual_embeds.expand(shape=[1, 2, *visual_embeds.shape])
visual_token_type_ids = paddle.ones(visual_embeds.shape[:-1], dtype=paddle.int64)
visual_attention_mask = paddle.ones(visual_embeds.shape[:-1], dtype=paddle.int64)
labels = paddle.to_tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1
return_dict = True
inputs_dict.update({
"visual_embeds": visual_embeds,
"visual_token_type_ids": visual_token_type_ids,
"visual_attention_mask": visual_attention_mask,
"return_dict": return_dict
})
with paddle.no_grad():
outputs = model(**inputs_dict, labels=labels)
if not return_dict:
loss = outputs[0]
logits = outputs[1]
else:
loss = outputs['loss']
logits = outputs['logits']
"""
return_dict = return_dict if return_dict is not None else False
num_choices = input_ids.shape[
1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.reshape(
[-1, input_ids.shape[-1]]) if input_ids is not None else None
attention_mask = attention_mask.reshape(
[-1,
attention_mask.shape[-1]]) if attention_mask is not None else None
token_type_ids = token_type_ids.reshape(
[-1,
token_type_ids.shape[-1]]) if token_type_ids is not None else None
position_ids = position_ids.reshape(
[-1, position_ids.shape[-1]]) if position_ids is not None else None
inputs_embeds = (inputs_embeds.reshape(
[-1, inputs_embeds.shape[-2], inputs_embeds.shape[-1]])
if inputs_embeds is not None else None)
visual_embeds = (visual_embeds.reshape(
[-1, visual_embeds.shape[-2], visual_embeds.shape[-1]])
if visual_embeds is not None else None)
visual_attention_mask = (visual_attention_mask.reshape(
[-1, visual_attention_mask.shape[-1]])
if visual_attention_mask is not None else None)
visual_token_type_ids = (visual_token_type_ids.reshape(
[-1, visual_token_type_ids.shape[-1]])
if visual_token_type_ids is not None else None)
outputs = self.visual_bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
visual_embeds=visual_embeds,
visual_attention_mask=visual_attention_mask,
visual_token_type_ids=visual_token_type_ids,
image_text_alignment=image_text_alignment,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict, )
if not return_dict:
pooled_output = outputs[1]
else:
pooled_output = outputs['pooled_output']
pooled_output = self.dropout(pooled_output)
logits = self.cls(pooled_output)
reshaped_logits = logits.reshape([-1, num_choices])
loss = None
if labels is not None:
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits, ) + outputs[2:]
return ((loss, ) + output) if loss is not None else output
return OrderedDict(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs["hidden_states"],
attentions=outputs["attentions"], )
| 44.799763
| 178
| 0.598966
|
bf9b05faeaaaa31fd7eb82b087932b6a7df86dc8
| 9,678
|
py
|
Python
|
app/models.py
|
jorgegene/sis-inf-project
|
ed97162a828abb5c3c2013c33c6511e2ae0b6efd
|
[
"MIT"
] | null | null | null |
app/models.py
|
jorgegene/sis-inf-project
|
ed97162a828abb5c3c2013c33c6511e2ae0b6efd
|
[
"MIT"
] | null | null | null |
app/models.py
|
jorgegene/sis-inf-project
|
ed97162a828abb5c3c2013c33c6511e2ae0b6efd
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from app import db, login
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from sqlalchemy import Column, String, Integer, Boolean, ForeignKey, Text, func
@login.user_loader
def load_user(id):
return User.query.get(int(id))
class User(UserMixin, db.Model):
__tablename__ = 'users'
# Campos
id = Column(Integer, primary_key=True)
username = Column(String(64), index=True, unique=True)
nombre = Column(String(64))
apellidos = Column(String(64))
nia = Column(String(6), index=True, unique=True)
email = Column(String(120), index=True, unique=True)
password_hash = Column(String(128))
tipo_usuario = Column(Integer)
validated = Column(Integer)
# Representación del Usuario
def __repr__(self):
return '<ID: {}, NIA: {}, email: {}>'.format(self.id, self.nia, self.email)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
# Interfaz
def addUser(self):
db.session.add(self)
db.session.commit()
def removeUser(self):
db.session.delete(self)
db.session.commit()
def updateUser(self):
User.query.filter_by(id=self.id).update(dict(username=self.username, email=self.email, password_hash=self.password_hash, tipo_usuario=self.tipo_usuario))
db.session.commit()
def validate(self):
self.validated = True
self.tipo_usuario = 2
self.updateUser()
def invalidate(self):
self.validated = False
self.updateUser()
@classmethod
def getUserByUsername(cls, username):
return User.query.filter_by(username=username).first()
@classmethod
def getUserById(cls, id):
return User.query.filter_by(id=id).first()
@classmethod
def getUsersNotValidated(cls):
return User.query.filter_by(validated=0).all()
class Stat(db.Model):
__tablename__ = 'stats'
id = Column(Integer, primary_key=True)
id_usuario = Column(Integer, unique=True)
dato_estadistico_1 = Column(String(64))
dato_estadistico_2 = Column(String(64))
dato_estadistico_3 = Column(String(64))
# Representación del dato estadístico
def __repr__(self):
return '<id: {}, id_usuario: {}, dato_estadistico_1: {}, datos_estadistico_2: {}, datos_estadistico_3>'.format(self.id, self.id_usuario, self.dato_estadistico_1, self.dato_estadistico_2, self.dato_estadistic_3)
#Interfaz
def addStat(self):
db.session.add(self)
db.session.commit()
def removeStat(self):
db.session.delete(self)
db.session.commit()
def updateStat(self):
Stat.query.filter_by(id=self.id).update(dict(dato_estadistico_1=self.dato_estadistico_1, dato_estadistico_2=self.dato_estadistico_2, dato_estadistic_3=self.dato_estadistic_3))
db.session.commit()
@classmethod
def getStatById(cls, id):
return Stat.query.filter_by(id=id).first()
@classmethod
def getUsers(cls,id):
return Stat.query.filter_by(id_usuario=id).first()
@classmethod
def getCountByDE1(cls, filter):
return Stat.query.filter_by(dato_estadistico_1=filter).count()
@classmethod
def getCountByDE2(cls, filter):
return Stat.query.filter_by(dato_estadistico_2=filter).count()
@classmethod
def getCountByDE3(cls, filter):
return Stat.query.filter_by(dato_estadistico_3=filter).count()
#######################################################
#######################################################
class Pregunta(db.Model):
__tablename__ = 'preguntas'
id = Column(Integer, primary_key=True)
pregunta = Column(String(1024))
year = Column(Integer)
def addPregunta(self):
db.session.add(self)
db.session.commit()
class QuestionOption2(db.Model):
__tablename__ = 'question_options_2'
id = Column(Integer, primary_key=True)
id_pregunta = Column(Integer, ForeignKey('preguntas.id'))
opcion = Column(String(512))
correcta = Column(Integer)
# Reperesentación de QuestionOption
def __resp__(self):
return '<id: {}, id_poster: {}, opcion:{}>'.format(self.id, self.id_pregunta, self.opcion)
@classmethod
def newOption(cls, pregunta_id, respuesta):
if respuesta != "":
resp = QuestionOption2(id_pregunta=pregunta_id,opcion=respuesta,correcta=0)
db.session.add(resp)
db.session.commit()
#Interfaz
def addOpcionPregunta(self):
db.session.add(self)
db.session.commit()
def removeOpcionPregunta(self):
db.session.delete(self)
db.session.commit()
def updateOpcionPregunta(self):
QuestionOption.query.filter_by(id=self.id).update(dict(id_poster=self.id_poster, opcion=self.opcion))
db.session.commit()
@classmethod
def getOpcionPreguntaByPosterId(cls, id_poster):
return QuestionOption.query.filter_by(id_poster=id_poster).all()
#######################################################
#######################################################
class Poster(db.Model):
__tablename__ = 'posters'
id = Column(Integer, primary_key=True)
id_usuario = Column(Integer, ForeignKey('users.id'))
imagen = Column(String(1024))
titulo = Column(String(1024))
reto = Column(Text(8192))
info = Column(Text(16384))
pregunta = Column(String(1024))
respuesta_correcta = Column(Integer)
corregido = Column(Integer)
# Representación del poster
def __repr__(self):
return '<id: {}, id_usuario: {}, info: {}>'.format(self.id, self.id_usuario, self.info)
#Interfaz
def addPoster(self):
db.session.add(self)
db.session.commit()
def removePoster(self):
db.session.delete(self)
db.session.commit()
def validate(self):
self.corregido = 1
self.updatePoster()
def denyPoster(self):
self.corregido = 2
self.updatePoster()
def updatePoster(self):
Poster.query.filter_by(id=self.id).update(dict(id_usuario=self.id_usuario, corregido=self.corregido, imagen=self.imagen, reto=self.reto, info=self.info, pregunta=self.pregunta, respuesta_correcta=self.respuesta_correcta))
db.session.commit()
@classmethod
def getPosterById(cls, id):
return Poster.query.filter_by(id=id).first()
@classmethod
def getPosterByUserId(cls, user_id):
return Poster.query.filter_by(id_usuario=user_id).all()
@classmethod
def getPosters(cls):
return Poster.query.all()
@classmethod
def getPostersNotChecked(cls):
return Poster.query.filter_by(corregido=0).all()
@classmethod
def getPostersChecked(cls):
return Poster.query.filter_by(corregido=1).all()
class QuestionOption(db.Model):
__tablename__ = 'question_options'
id = Column(Integer, primary_key=True)
id_poster = Column(Integer)
opcion = Column(String(512))
# Reperesentación de QuestionOption
def __resp__(self):
return '<id: {}, id_poster: {}, opcion:{}>'.format(self.id, self.id_poster, self.opcion)
#Interfaz
def addOpcionPregunta(self):
db.session.add(self)
db.session.commit()
def removeOpcionPregunta(self):
db.session.delete(self)
db.session.commit()
def updateOpcionPregunta(self):
QuestionOption.query.filter_by(id=self.id).update(dict(id_poster=self.id_poster, opcion=self.opcion))
db.session.commit()
@classmethod
def getOpcionPreguntaByPosterId(cls, id_poster):
return QuestionOption.query.filter_by(id_poster=id_poster).all()
class UserResponse(db.Model):
__tablename__ = 'user_response'
id = Column(Integer, primary_key=True)
id_usuario = Column(Integer, ForeignKey('users.id'))
id_poster = Column(Integer, ForeignKey('posters.id'))
opcion = Column(String(100))
# Reperesentación de UserResponse
def __resp__(self):
return '<id: {}, id_user: {}, id_poster:{}, opcion:{}>'.format(self.id, self.id_usuario, self.id_poster,self.opcion)
#Interfaz
def addUserResponse(self):
db.session.add(self)
db.session.commit()
def removeUserResponse(self):
db.session.delete(self)
db.session.commit()
def updateUserResponse(self):
UserResponse.query.filter_by(id=self.id).update(dict(id_user=self.id_usuario, id_poster=self.id_poster, opcion =self.opcion))
db.session.commit()
@classmethod
def getUserResponseByUserId(cls, id_usuario):
return UserResponse.query.filter_by(id_usuario=id_usuario).all()
class UserLike(db.Model):
__tablename__ = 'user_likes'
id = Column(Integer, primary_key=True)
id_usuario = Column(Integer, ForeignKey('users.id'))
id_poster = Column(Integer)
#Interfaz
def likePoster(self):
db.session.add(self)
db.session.commit()
def unlikePoster(self):
db.session.delete(self)
db.session.commit()
@classmethod
def getUserLikes(cls, id_usuario):
return UserLike.query.filter_by(id_usuario=id_usuario).all()
@classmethod
def getPosterLikes(cls, id_poster):
return UserLike.query.filter_by(id_poster=id_poster).count()
@classmethod
def gaveLike(cls, id_usuario, id_poster):
return UserLike.query.filter_by(id_usuario=id_usuario, id_poster=id_poster).first() is not None
| 31.320388
| 229
| 0.660777
|
480947d678231989b0596f762a37d173d9b90192
| 3,424
|
py
|
Python
|
tests/anova_test.py
|
SakuragiYoshimasa/pystats
|
cf64f1a9d2f26b327a19352a272e6fb36134435e
|
[
"MIT"
] | 1
|
2020-07-31T18:10:26.000Z
|
2020-07-31T18:10:26.000Z
|
tests/anova_test.py
|
SakuragiYoshimasa/pystats
|
cf64f1a9d2f26b327a19352a272e6fb36134435e
|
[
"MIT"
] | null | null | null |
tests/anova_test.py
|
SakuragiYoshimasa/pystats
|
cf64f1a9d2f26b327a19352a272e6fb36134435e
|
[
"MIT"
] | null | null | null |
#coding: utf-8
from pystats.anova import one_way_anova_between_subject
from pystats.anova import one_way_anova_within_subject
from pystats.anova import two_way_anova_between_subject
from pystats.anova import two_way_anova_within_subject
from pystats.anova import twawis_mat
import pandas as pd
from numba import jit
import numpy as np
'''
sample_data = {
'type': ['A','A','A','A','B','B','B','B','C','C','C','C',],
'value': [3,4,4,3,7,8,9,6,5,4,6,7] }
print(one_way_anova_between_subject(pd.DataFrame(sample_data), levelCol='type', valCol='value'))
sample_data = {
'type': ['A','A','A','A','B','B','B','B','C','C','C','C',],
'subject': ['1', '2', '3', '4', '1', '2', '3', '4', '1', '2', '3', '4'],
'value': [10,9,4,7,5,4,2,3,9,5,3,5] }
print(one_way_anova_within_subject(pd.DataFrame(sample_data), levelCol='type', subjectCol='subject', valCol='value'))
'''
'''
sample_data = pd.DataFrame({
'levelA': ['1','1','1','1','1','1','1','1','1','1','1','1','1','1','1','2','2','2','2','2','2','2','2','2','2','2','2','2','2','2'],
'levelB': ['1','1','1','1','1','2','2','2','2','2','3','3','3','3','3','1','1','1','1','1','2','2','2','2','2','3','3','3','3','3'],
'value': [6,4,5,3,2,10,8,10,8,9,11,12,12,10,10,5,4,2,2,2,7,6,5,4,3,12,8,5,6,4] })
print(two_way_anova_between_subject(sample_data, levelACol='levelA', levelBCol='levelB', valCol='value'))
'''
sample_data = {
'levelA': ['1','1','1','1','1','1','1','1','1','1','1','1','1','1','1','2','2','2','2','2','2','2','2','2','2','2','2','2','2','2'],
'levelB': ['1','1','1','1','1','2','2','2','2','2','3','3','3','3','3','1','1','1','1','1','2','2','2','2','2','3','3','3','3','3'],
'subject':['1','2','3','4','5','1','2','3','4','5','1','2','3','4','5','1','2','3','4','5','1','2','3','4','5','1','2','3','4','5'],
'value': [6.0,4.0,5.0,3.0,2.0,10.0,8.0,10.0,8.0,9.0,11.0,12.0,12.0,10.0,10.0,5.0,4.0,2.0,2.0,2.0,7.0,6.0,5.0,4.0,3.0,12.0,8.0,5.0,6.0,4.0] }
#print(two_way_anova_within_subject(pd.DataFrame(sample_data), levelACol='levelA', levelBCol='levelB', subjectCol='subject', valCol='value'))
import time
start = time.time()
for i in range(300):
two_way_anova_within_subject(pd.DataFrame(sample_data), levelACol='levelA', levelBCol='levelB', subjectCol='subject', valCol='value')
elapsed_time = time.time() - start
print ("elapsed_time:{0}".format(elapsed_time) + "[sec]")
sample_data = np.array([[6.0,4.0,5.0,3.0,2.0], [10.0,8.0,10.0,8.0,9.0], [11.0,12.0,12.0,10.0,10.0], [5.0,4.0,2.0,2.0,2.0], [7.0,6.0,5.0,4.0,3.0], [12.0,8.0,5.0,6.0,4.0]])
start = time.time()
for i in range(300):
twawis_mat(sample_data, 2, 3)
elapsed_time = time.time() - start
print ("elapsed_time:{0}".format(elapsed_time) + "[sec]")
'''
import time
@jit
def jited():
start = time.time()
for i in range(100):
two_way_anova_within_subject_jit(pd.DataFrame(sample_data), levelACol='levelA', levelBCol='levelB', subjectCol='subject', valCol='value')
elapsed_time = time.time() - start
print ("elapsed_time:{0}".format(elapsed_time) + "[sec]")
def nojited():
start = time.time()
for i in range(100):
two_way_anova_within_subject(pd.DataFrame(sample_data), levelACol='levelA', levelBCol='levelB', subjectCol='subject', valCol='value')
elapsed_time = time.time() - start
print ("elapsed_time:{0}".format(elapsed_time) + "[sec]")
if __name__ == '__main__':
jited()
nojited()
'''
| 45.653333
| 170
| 0.577103
|
3485c96527c31de535e3ed891c678d6e8fb76d97
| 764
|
py
|
Python
|
algorithms/3SumClosest/3SumClosest.py
|
zhyu/leetcode
|
3c2d85b4b7a497ceffac3e562ac1a468f1f6a4b0
|
[
"MIT"
] | 5
|
2015-02-18T10:17:12.000Z
|
2016-11-14T19:12:21.000Z
|
algorithms/3SumClosest/3SumClosest.py
|
zhyu/leetcode
|
3c2d85b4b7a497ceffac3e562ac1a468f1f6a4b0
|
[
"MIT"
] | null | null | null |
algorithms/3SumClosest/3SumClosest.py
|
zhyu/leetcode
|
3c2d85b4b7a497ceffac3e562ac1a468f1f6a4b0
|
[
"MIT"
] | null | null | null |
class Solution:
# @return an integer
def threeSumClosest(self, num, target):
num.sort()
res = sum(num[:3])
if res > target:
diff = res-target
elif res < target:
diff = target-res
else:
return res
n = len(num)
for i in xrange(n):
j, k = i+1, n-1
while j < k:
s = num[i]+num[j]+num[k]
if s > target:
n_diff = s-target
k -= 1
elif s < target:
n_diff = target-s
j += 1
else:
return s
if n_diff < diff:
res, diff = s, n_diff
return res
| 26.344828
| 43
| 0.367801
|
30b2c4ea962bcc2d10befa3f436dc0dd751064a0
| 23,696
|
py
|
Python
|
pysnmp-with-texts/JUNIPER-NSM-TRAPS.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/JUNIPER-NSM-TRAPS.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/JUNIPER-NSM-TRAPS.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module JUNIPER-NSM-TRAPS (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/JUNIPER-NSM-TRAPS
# Produced by pysmi-0.3.4 at Wed May 1 14:00:39 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection")
jnxNsm, = mibBuilder.importSymbols("JUNIPER-SMI", "jnxNsm")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Unsigned32, Counter64, ObjectIdentity, Integer32, IpAddress, Counter32, ModuleIdentity, MibIdentifier, Bits, iso, Gauge32, TimeTicks, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "Counter64", "ObjectIdentity", "Integer32", "IpAddress", "Counter32", "ModuleIdentity", "MibIdentifier", "Bits", "iso", "Gauge32", "TimeTicks", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType")
DateAndTime, DisplayString, TextualConvention, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "DateAndTime", "DisplayString", "TextualConvention", "TruthValue")
nsmTrapInfo = ModuleIdentity((1, 3, 6, 1, 4, 1, 2636, 6, 1))
if mibBuilder.loadTexts: nsmTrapInfo.setLastUpdated('200506301100Z')
if mibBuilder.loadTexts: nsmTrapInfo.setOrganization('Juniper Networks, Inc')
if mibBuilder.loadTexts: nsmTrapInfo.setContactInfo(' Juniper Technical Assistance Center Juniper Networks, Inc. 1194 N. Mathilda Avenue Sunnyvale, CA 94089 E-mail: support@juniper.net')
if mibBuilder.loadTexts: nsmTrapInfo.setDescription('NetScreen Manger trap definitions for NSM')
nsmTrap = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 6, 0))
nsmDayID = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 1), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmDayID.setStatus('current')
if mibBuilder.loadTexts: nsmDayID.setDescription('The day id of the log for the specified day (encoded yyyymmdd)')
nsmRecordID = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmRecordID.setStatus('current')
if mibBuilder.loadTexts: nsmRecordID.setDescription('The record id of the log for the specified day (nth log received)')
nsmTimeReceived = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 3), DateAndTime()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmTimeReceived.setStatus('current')
if mibBuilder.loadTexts: nsmTimeReceived.setDescription('Time the log was received by the Mgt-Svr')
nsmTimeGenerated = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 4), DateAndTime()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmTimeGenerated.setStatus('current')
if mibBuilder.loadTexts: nsmTimeGenerated.setDescription('Time the log was generated by the device')
nsmDeviceDomain = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 5), DisplayString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmDeviceDomain.setStatus('current')
if mibBuilder.loadTexts: nsmDeviceDomain.setDescription('The domain for the device that generated this log')
nsmDeviceDomainVer2 = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 6), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmDeviceDomainVer2.setStatus('current')
if mibBuilder.loadTexts: nsmDeviceDomainVer2.setDescription('The version of the domain that contained the device that generated this log')
nsmDevice = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 7), DisplayString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmDevice.setStatus('current')
if mibBuilder.loadTexts: nsmDevice.setDescription('The device that generated this log')
nsmDeviceIp = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 8), IpAddress()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmDeviceIp.setStatus('current')
if mibBuilder.loadTexts: nsmDeviceIp.setDescription('IP address of the device that generated this log')
nsmCategory = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("self", 0), ("config", 1), ("traffic", 2), ("alarm", 3), ("info", 4), ("predefined", 5), ("predefined1", 6), ("custom", 7), ("screen", 8), ("implicit", 9), ("profiler", 10)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmCategory.setStatus('current')
if mibBuilder.loadTexts: nsmCategory.setDescription('The type of log (e.g. Info, Misc, Alarm, etc.)')
nsmSubcategory = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 10), DisplayString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmSubcategory.setStatus('current')
if mibBuilder.loadTexts: nsmSubcategory.setDescription('Subcategory values have different semantics depending on the value of the log category.')
nsmSrcZone = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 11), DisplayString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmSrcZone.setStatus('current')
if mibBuilder.loadTexts: nsmSrcZone.setDescription("The name of the packet-centric 'inbound' zone")
nsmSrcIfName = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 12), DisplayString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmSrcIfName.setStatus('current')
if mibBuilder.loadTexts: nsmSrcIfName.setDescription("The name of the packet-centric 'inbound' interface")
nsmSrcAddr = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 13), IpAddress()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmSrcAddr.setStatus('current')
if mibBuilder.loadTexts: nsmSrcAddr.setDescription('The Source Address of the packet that generated the log')
nsmSrcPort = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmSrcPort.setStatus('current')
if mibBuilder.loadTexts: nsmSrcPort.setDescription('The Source Port of the packet that generated the log')
nsmNatSrcAddr = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 15), IpAddress()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmNatSrcAddr.setStatus('current')
if mibBuilder.loadTexts: nsmNatSrcAddr.setDescription("The NAT'ed Source Address of the packet that generated the log")
nsmNatSrcPort = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmNatSrcPort.setStatus('current')
if mibBuilder.loadTexts: nsmNatSrcPort.setDescription("The NAT'ed Source Port of the packet that generated the log")
nsmDstZone = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 17), DisplayString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmDstZone.setStatus('current')
if mibBuilder.loadTexts: nsmDstZone.setDescription("The name of the packet-centric 'outbound' zone")
nsmDstIfName = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 18), DisplayString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmDstIfName.setStatus('current')
if mibBuilder.loadTexts: nsmDstIfName.setDescription("The name of the packet-centric 'outbound' interface")
nsmDstAddr = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 19), IpAddress()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmDstAddr.setStatus('current')
if mibBuilder.loadTexts: nsmDstAddr.setDescription('The Destination Address of the packet that generated the log')
nsmDstPort = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 20), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmDstPort.setStatus('current')
if mibBuilder.loadTexts: nsmDstPort.setDescription('The Destination Port of the packet that generated the log')
nsmNatDstAddr = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 21), IpAddress()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmNatDstAddr.setStatus('current')
if mibBuilder.loadTexts: nsmNatDstAddr.setDescription("The NAT'ed Destination Address of the packet that generated the log")
nsmNatDstPort = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 22), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmNatDstPort.setStatus('current')
if mibBuilder.loadTexts: nsmNatDstPort.setDescription("The NAT'ed Destination Port of the packet that generated the log")
nsmProtocol = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 255))).clone(namedValues=NamedValues(("hopopt", 0), ("icmp", 1), ("igmp", 2), ("ggp", 3), ("ip", 4), ("st", 5), ("tcp", 6), ("cbt", 7), ("egp", 8), ("igp", 9), ("bbn-rcc-mon", 10), ("nvp-ii", 11), ("pup", 12), ("argus", 13), ("emcon", 14), ("xnet", 15), ("chaos", 16), ("udp", 17), ("mux", 18), ("dcn-meas", 19), ("hmp", 20), ("prm", 21), ("xns-idp", 22), ("trunk-1", 23), ("trunk-2", 24), ("leaf-1", 25), ("leaf-2", 26), ("rdp", 27), ("irtp", 28), ("iso-tp4", 29), ("netblt", 30), ("mfe-nsp", 31), ("merit-inp", 32), ("sep", 33), ("a3pc", 34), ("idpr-1", 35), ("xtp", 36), ("ddp", 37), ("idpr-cmt", 38), ("tp-plusplus", 39), ("il", 40), ("ipv6", 41), ("sdpr", 42), ("ipv6-route", 43), ("ipv6-frag", 44), ("idpr-2", 45), ("rsvp", 46), ("gre", 47), ("mhrp", 48), ("bn", 49), ("esp", 50), ("ah", 51), ("i-nslp", 52), ("swipe", 53), ("narp", 54), ("mobile", 55), ("tlsp", 56), ("skip", 57), ("ipv6-icmp", 58), ("ipv6-nonxt", 59), ("ipv6-opts", 60), ("ahip", 61), ("cftp", 62), ("alnp", 63), ("sat-expak", 64), ("kryptolan", 65), ("rvd", 66), ("ippc", 67), ("adfsp", 68), ("sat-mon", 69), ("visa", 70), ("ipcv", 71), ("cpnx", 72), ("cphb", 73), ("wsn", 74), ("pvp", 75), ("br-sat-mon", 76), ("sun-nd", 77), ("wb-mon", 78), ("wb-expak", 79), ("iso-ip", 80), ("vmpt", 81), ("secure-vmtp", 82), ("vines", 83), ("ttp", 84), ("nsfnet-igp", 85), ("dgp", 86), ("tcf", 87), ("eigrp", 88), ("ospfigp", 89), ("sprite-rcp", 90), ("larp", 91), ("mtp", 92), ("ax-25", 93), ("ipip", 94), ("micp", 95), ("scc-sp", 96), ("etherip", 97), ("encap", 98), ("apes", 99), ("gmtp", 100), ("ifmp", 101), ("pnni", 102), ("pim", 103), ("aris", 104), ("scps", 105), ("qnx", 106), ("a-n", 107), ("ipcomp", 108), ("snp", 109), ("compat-peer", 110), ("ipx-in-ip", 111), ("vrrp", 112), ("pgm", 113), ("hop-0", 114), ("l2tp", 115), ("ddx", 116), ("iatp", 117), ("stp", 118), ("srp", 119), ("uti", 120), ("smp", 121), ("ssm", 122), ("ptp", 123), ("isis", 124), ("fire", 125), ("crtp", 126), ("crudp", 127), ("sscopmce", 128), ("iplt", 129), ("sps", 130), ("pipe", 131), ("sctp", 132), ("fc", 133), ("rsvp-e2e-ignore", 134), ("reserved", 255)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmProtocol.setStatus('current')
if mibBuilder.loadTexts: nsmProtocol.setDescription('The protocol that the packet that generated this log was using')
nsmRuleDomain = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 24), DisplayString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmRuleDomain.setStatus('current')
if mibBuilder.loadTexts: nsmRuleDomain.setDescription('The Domain that contained the rule that generated this log')
nsmRuleDomainVer = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 25), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmRuleDomainVer.setStatus('current')
if mibBuilder.loadTexts: nsmRuleDomainVer.setDescription('The version of the domain that contained the rule that generated this log')
nsmPolicy = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 26), DisplayString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmPolicy.setStatus('current')
if mibBuilder.loadTexts: nsmPolicy.setDescription('The policy in a specific version of a domain that contained the rule that generated this log')
nsmRulebase = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 27), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("none", 0), ("main", 1), ("fw", 2), ("idp", 3), ("honeypot", 4), ("backdoor", 5), ("synpro", 6), ("vpn", 7), ("mpolicy", 8)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmRulebase.setStatus('current')
if mibBuilder.loadTexts: nsmRulebase.setDescription('The rulebase inside the policy in a specific version of a domain that generated this log')
nsmRuleNumber = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 28), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmRuleNumber.setStatus('current')
if mibBuilder.loadTexts: nsmRuleNumber.setDescription('The rule in the rulebase in the policy in the specific version of a domain that generated this log')
nsmAction = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 29), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("accept", 0), ("drop-packet", 1), ("drop", 2), ("close", 3), ("close-client", 4), ("close-server", 5), ("ignore", 6), ("dismiss", 7), ("not-set", 8)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmAction.setStatus('current')
if mibBuilder.loadTexts: nsmAction.setDescription('The action the device performed on that packet / connection that generated this log')
nsmSeverity = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 30), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("not-set", 0), ("unused1", 1), ("info", 2), ("warning", 3), ("minor", 4), ("major", 5), ("unused2", 6), ("critical", 7)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmSeverity.setStatus('current')
if mibBuilder.loadTexts: nsmSeverity.setDescription('The severity rating associated with this kind of log')
nsmIsAlert = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 31), TruthValue()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmIsAlert.setStatus('current')
if mibBuilder.loadTexts: nsmIsAlert.setDescription('A statement on if logs of this type are to alert the user')
nsmMisc = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 32), DisplayString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmMisc.setStatus('current')
if mibBuilder.loadTexts: nsmMisc.setDescription('Miscellaneous information associated with the event (typically the repeat count of the event)')
nsmUser = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 33), DisplayString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmUser.setStatus('current')
if mibBuilder.loadTexts: nsmUser.setDescription('The name of the user associated with the event')
nsmApp = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 34), DisplayString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmApp.setStatus('current')
if mibBuilder.loadTexts: nsmApp.setDescription("The 'application' associated with this log")
nsmUri = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 35), DisplayString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmUri.setStatus('current')
if mibBuilder.loadTexts: nsmUri.setDescription("The 'Universal Resource Indicator' associated with this log")
nsmElapsedSecs = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 36), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmElapsedSecs.setStatus('current')
if mibBuilder.loadTexts: nsmElapsedSecs.setDescription('For sessions, specifies how long the session lasted')
nsmBytesIn = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 37), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmBytesIn.setStatus('current')
if mibBuilder.loadTexts: nsmBytesIn.setDescription("For sessions, specifies the number of 'inbound' bytes")
nsmBytesOut = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 38), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmBytesOut.setStatus('current')
if mibBuilder.loadTexts: nsmBytesOut.setDescription("For sessions, specifies the number of 'outbound' bytes")
nsmBytesTotal = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 39), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmBytesTotal.setStatus('current')
if mibBuilder.loadTexts: nsmBytesTotal.setDescription('For sessions, specifies bytes-in + bytes-out')
nsmPacketsIn = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 40), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmPacketsIn.setStatus('current')
if mibBuilder.loadTexts: nsmPacketsIn.setDescription("For sessions, specifies the number of 'inbound' packets")
nsmPacketsOut = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 41), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmPacketsOut.setStatus('current')
if mibBuilder.loadTexts: nsmPacketsOut.setDescription("For sessions, specifies the number of 'outbound' packets")
nsmPacketsTotal = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 42), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmPacketsTotal.setStatus('current')
if mibBuilder.loadTexts: nsmPacketsTotal.setDescription('For sessions, specifies packets-in + packets-out')
nsmRepeatCount = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 43), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmRepeatCount.setStatus('current')
if mibBuilder.loadTexts: nsmRepeatCount.setDescription('The number of logs the device compressed into this one log')
nsmHasPacketData = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 44), TruthValue()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmHasPacketData.setStatus('current')
if mibBuilder.loadTexts: nsmHasPacketData.setDescription('Specifies if this log has associated packet data')
nsmVarDataEnum = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 45), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45))).clone(namedValues=NamedValues(("none", 0), ("test", 1), ("ftp", 2), ("sip", 3), ("msrpc-udp", 4), ("msrpc-tcp", 5), ("portmapper", 6), ("http", 7), ("smtp", 8), ("pop3", 9), ("imap", 10), ("telnet", 11), ("rtsp", 12), ("whois", 13), ("icmp", 14), ("arp", 15), ("tcp", 16), ("dns", 17), ("finger", 18), ("ptype", 19), ("ymsg-v2", 20), ("msn-v2", 21), ("smb", 22), ("gnutella-v2", 23), ("tbd-24", 24), ("scan", 25), ("tbd-26", 26), ("tbd-27", 27), ("tbd-28", 28), ("dhcp", 29), ("tbd-30", 30), ("idp", 31), ("bwmon", 32), ("irc", 33), ("ymsg", 34), ("ident", 35), ("nntp", 36), ("aim", 37), ("rusers", 38), ("tbd-39", 39), ("gopher", 40), ("tbd-41", 41), ("msn", 42), ("gnutella", 43), ("nfs", 44), ("attalarm", 45)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmVarDataEnum.setStatus('current')
if mibBuilder.loadTexts: nsmVarDataEnum.setDescription('The kind of variable data (if any) associated with this log')
nsmVarData = MibScalar((1, 3, 6, 1, 4, 1, 2636, 6, 1, 46), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: nsmVarData.setStatus('current')
if mibBuilder.loadTexts: nsmVarData.setDescription('Variable data')
nsmTrapNotification = NotificationType((1, 3, 6, 1, 4, 1, 2636, 6, 0, 1)).setObjects(("JUNIPER-NSM-TRAPS", "nsmDayID"), ("JUNIPER-NSM-TRAPS", "nsmRecordID"), ("JUNIPER-NSM-TRAPS", "nsmTimeReceived"), ("JUNIPER-NSM-TRAPS", "nsmTimeGenerated"), ("JUNIPER-NSM-TRAPS", "nsmDeviceDomain"), ("JUNIPER-NSM-TRAPS", "nsmDeviceDomainVer2"), ("JUNIPER-NSM-TRAPS", "nsmDevice"), ("JUNIPER-NSM-TRAPS", "nsmDeviceIp"), ("JUNIPER-NSM-TRAPS", "nsmCategory"), ("JUNIPER-NSM-TRAPS", "nsmSubcategory"), ("JUNIPER-NSM-TRAPS", "nsmSrcZone"), ("JUNIPER-NSM-TRAPS", "nsmSrcIfName"), ("JUNIPER-NSM-TRAPS", "nsmSrcAddr"), ("JUNIPER-NSM-TRAPS", "nsmSrcPort"), ("JUNIPER-NSM-TRAPS", "nsmNatSrcAddr"), ("JUNIPER-NSM-TRAPS", "nsmNatSrcPort"), ("JUNIPER-NSM-TRAPS", "nsmDstZone"), ("JUNIPER-NSM-TRAPS", "nsmDstIfName"), ("JUNIPER-NSM-TRAPS", "nsmDstAddr"), ("JUNIPER-NSM-TRAPS", "nsmDstPort"), ("JUNIPER-NSM-TRAPS", "nsmNatDstAddr"), ("JUNIPER-NSM-TRAPS", "nsmNatDstPort"), ("JUNIPER-NSM-TRAPS", "nsmProtocol"), ("JUNIPER-NSM-TRAPS", "nsmRuleDomain"), ("JUNIPER-NSM-TRAPS", "nsmRuleDomainVer"), ("JUNIPER-NSM-TRAPS", "nsmPolicy"), ("JUNIPER-NSM-TRAPS", "nsmRulebase"), ("JUNIPER-NSM-TRAPS", "nsmRuleNumber"), ("JUNIPER-NSM-TRAPS", "nsmAction"), ("JUNIPER-NSM-TRAPS", "nsmSeverity"), ("JUNIPER-NSM-TRAPS", "nsmIsAlert"), ("JUNIPER-NSM-TRAPS", "nsmMisc"), ("JUNIPER-NSM-TRAPS", "nsmUser"), ("JUNIPER-NSM-TRAPS", "nsmApp"), ("JUNIPER-NSM-TRAPS", "nsmUri"), ("JUNIPER-NSM-TRAPS", "nsmElapsedSecs"), ("JUNIPER-NSM-TRAPS", "nsmBytesIn"), ("JUNIPER-NSM-TRAPS", "nsmBytesOut"), ("JUNIPER-NSM-TRAPS", "nsmBytesTotal"), ("JUNIPER-NSM-TRAPS", "nsmPacketsIn"), ("JUNIPER-NSM-TRAPS", "nsmPacketsOut"), ("JUNIPER-NSM-TRAPS", "nsmPacketsTotal"), ("JUNIPER-NSM-TRAPS", "nsmRepeatCount"), ("JUNIPER-NSM-TRAPS", "nsmHasPacketData"), ("JUNIPER-NSM-TRAPS", "nsmVarDataEnum"))
if mibBuilder.loadTexts: nsmTrapNotification.setStatus('current')
if mibBuilder.loadTexts: nsmTrapNotification.setDescription('A trap describing one log data.')
mibBuilder.exportSymbols("JUNIPER-NSM-TRAPS", nsmSrcPort=nsmSrcPort, nsmBytesTotal=nsmBytesTotal, nsmRuleNumber=nsmRuleNumber, nsmElapsedSecs=nsmElapsedSecs, nsmBytesOut=nsmBytesOut, nsmSubcategory=nsmSubcategory, nsmNatDstPort=nsmNatDstPort, nsmMisc=nsmMisc, nsmTimeReceived=nsmTimeReceived, nsmAction=nsmAction, nsmDstPort=nsmDstPort, nsmRepeatCount=nsmRepeatCount, nsmCategory=nsmCategory, PYSNMP_MODULE_ID=nsmTrapInfo, nsmDeviceDomain=nsmDeviceDomain, nsmRecordID=nsmRecordID, nsmDeviceDomainVer2=nsmDeviceDomainVer2, nsmSrcAddr=nsmSrcAddr, nsmRuleDomain=nsmRuleDomain, nsmNatSrcPort=nsmNatSrcPort, nsmRuleDomainVer=nsmRuleDomainVer, nsmTrapInfo=nsmTrapInfo, nsmVarData=nsmVarData, nsmRulebase=nsmRulebase, nsmIsAlert=nsmIsAlert, nsmUri=nsmUri, nsmSrcZone=nsmSrcZone, nsmNatDstAddr=nsmNatDstAddr, nsmDstZone=nsmDstZone, nsmVarDataEnum=nsmVarDataEnum, nsmBytesIn=nsmBytesIn, nsmDevice=nsmDevice, nsmSeverity=nsmSeverity, nsmUser=nsmUser, nsmPacketsTotal=nsmPacketsTotal, nsmNatSrcAddr=nsmNatSrcAddr, nsmDstAddr=nsmDstAddr, nsmPacketsIn=nsmPacketsIn, nsmDstIfName=nsmDstIfName, nsmProtocol=nsmProtocol, nsmSrcIfName=nsmSrcIfName, nsmTrapNotification=nsmTrapNotification, nsmDayID=nsmDayID, nsmPacketsOut=nsmPacketsOut, nsmDeviceIp=nsmDeviceIp, nsmHasPacketData=nsmHasPacketData, nsmPolicy=nsmPolicy, nsmApp=nsmApp, nsmTrap=nsmTrap, nsmTimeGenerated=nsmTimeGenerated)
| 145.374233
| 2,839
| 0.722274
|
0505167872c618da3dc9e03fabff1e9b153ad753
| 31,208
|
py
|
Python
|
chainer_/chainercv2/models/polynet.py
|
yick2232/imgclsmob
|
fb220bff18b27d1fc6db1bac6cf69b70c2d07490
|
[
"MIT"
] | 1
|
2019-11-28T10:02:58.000Z
|
2019-11-28T10:02:58.000Z
|
chainer_/chainercv2/models/polynet.py
|
fireoil/imgclsmob
|
fb220bff18b27d1fc6db1bac6cf69b70c2d07490
|
[
"MIT"
] | null | null | null |
chainer_/chainercv2/models/polynet.py
|
fireoil/imgclsmob
|
fb220bff18b27d1fc6db1bac6cf69b70c2d07490
|
[
"MIT"
] | 1
|
2019-11-20T18:47:37.000Z
|
2019-11-20T18:47:37.000Z
|
"""
PolyNet for ImageNet-1K, implemented in Chainer.
Original paper: 'PolyNet: A Pursuit of Structural Diversity in Very Deep Networks,'
https://arxiv.org/abs/1611.05725.
"""
__all__ = ['PolyNet', 'polynet']
import os
import chainer.functions as F
import chainer.links as L
from chainer import Chain
from functools import partial
from chainer.serializers import load_npz
from .common import ConvBlock, conv1x1_block, conv3x3_block, Concurrent, ParametricSequential, ParametricConcurrent,\
SimpleSequential
class PolyConv(Chain):
"""
PolyNet specific convolution block. A block that is used inside poly-N (poly-2, poly-3, and so on) modules.
The Convolution layer is shared between all Inception blocks inside a poly-N module. BatchNorm layers are not
shared between Inception blocks and therefore the number of BatchNorm layers is equal to the number of Inception
blocks inside a poly-N module.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
ksize : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Stride of the convolution.
pad : int or tuple/list of 2 int
Padding value for convolution layer.
num_blocks : int
Number of blocks (BatchNorm layers).
"""
def __init__(self,
in_channels,
out_channels,
ksize,
stride,
pad,
num_blocks):
super(PolyConv, self).__init__()
with self.init_scope():
self.conv = L.Convolution2D(
in_channels=in_channels,
out_channels=out_channels,
ksize=ksize,
stride=stride,
pad=pad,
nobias=True)
for i in range(num_blocks):
setattr(self, "bn{}".format(i + 1), L.BatchNormalization(
size=out_channels,
eps=1e-5))
self.activ = F.relu
def __call__(self, x, index):
x = self.conv(x)
bn = getattr(self, "bn{}".format(index + 1))
x = bn(x)
x = self.activ(x)
return x
def poly_conv1x1(in_channels,
out_channels,
num_blocks):
"""
1x1 version of the PolyNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
num_blocks : int
Number of blocks (BatchNorm layers).
"""
return PolyConv(
in_channels=in_channels,
out_channels=out_channels,
ksize=1,
stride=1,
pad=0,
num_blocks=num_blocks)
class MaxPoolBranch(Chain):
"""
PolyNet specific max pooling branch block.
"""
def __init__(self):
super(MaxPoolBranch, self).__init__()
with self.init_scope():
self.pool = partial(
F.max_pooling_2d,
ksize=3,
stride=2,
pad=0,
cover_all=False)
def __call__(self, x):
x = self.pool(x)
return x
class Conv1x1Branch(Chain):
"""
PolyNet specific convolutional 1x1 branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(Conv1x1Branch, self).__init__()
with self.init_scope():
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels)
def __call__(self, x):
x = self.conv(x)
return x
class Conv3x3Branch(Chain):
"""
PolyNet specific convolutional 3x3 branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(Conv3x3Branch, self).__init__()
with self.init_scope():
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=2,
pad=0)
def __call__(self, x):
x = self.conv(x)
return x
class ConvSeqBranch(Chain):
"""
PolyNet specific convolutional sequence branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of tuple of int
List of numbers of output channels.
kernel_size_list : list of tuple of int or tuple of tuple/list of 2 int
List of convolution window sizes.
strides_list : list of tuple of int or tuple of tuple/list of 2 int
List of strides of the convolution.
padding_list : list of tuple of int or tuple of tuple/list of 2 int
List of padding values for convolution layers.
"""
def __init__(self,
in_channels,
out_channels_list,
kernel_size_list,
strides_list,
padding_list):
super(ConvSeqBranch, self).__init__()
assert (len(out_channels_list) == len(kernel_size_list))
assert (len(out_channels_list) == len(strides_list))
assert (len(out_channels_list) == len(padding_list))
with self.init_scope():
self.conv_list = SimpleSequential()
with self.conv_list.init_scope():
for i, (out_channels, kernel_size, strides, padding) in enumerate(zip(
out_channels_list, kernel_size_list, strides_list, padding_list)):
setattr(self.conv_list, "conv{}".format(i + 1), ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
ksize=kernel_size,
stride=strides,
pad=padding))
in_channels = out_channels
def __call__(self, x):
x = self.conv_list(x)
return x
class PolyConvSeqBranch(Chain):
"""
PolyNet specific convolutional sequence branch block with internal PolyNet specific convolution blocks.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of tuple of int
List of numbers of output channels.
kernel_size_list : list of tuple of int or tuple of tuple/list of 2 int
List of convolution window sizes.
strides_list : list of tuple of int or tuple of tuple/list of 2 int
List of strides of the convolution.
padding_list : list of tuple of int or tuple of tuple/list of 2 int
List of padding values for convolution layers.
num_blocks : int
Number of blocks for PolyConv.
"""
def __init__(self,
in_channels,
out_channels_list,
kernel_size_list,
strides_list,
padding_list,
num_blocks):
super(PolyConvSeqBranch, self).__init__()
assert (len(out_channels_list) == len(kernel_size_list))
assert (len(out_channels_list) == len(strides_list))
assert (len(out_channels_list) == len(padding_list))
with self.init_scope():
self.conv_list = ParametricSequential()
with self.conv_list.init_scope():
for i, (out_channels, kernel_size, strides, padding) in enumerate(zip(
out_channels_list, kernel_size_list, strides_list, padding_list)):
setattr(self.conv_list, "conv{}".format(i + 1), PolyConv(
in_channels=in_channels,
out_channels=out_channels,
ksize=kernel_size,
stride=strides,
pad=padding,
num_blocks=num_blocks))
in_channels = out_channels
def __call__(self, x, index):
x = self.conv_list(x, index=index)
return x
class TwoWayABlock(Chain):
"""
PolyNet type Inception-A block.
"""
def __init__(self):
super(TwoWayABlock, self).__init__()
in_channels = 384
with self.init_scope():
self.branches = Concurrent()
with self.branches.init_scope():
setattr(self.branches, "branch1", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(32, 48, 64),
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 1),
padding_list=(0, 1, 1)))
setattr(self.branches, "branch2", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(32, 32),
kernel_size_list=(1, 3),
strides_list=(1, 1),
padding_list=(0, 1)))
setattr(self.branches, "branch3", Conv1x1Branch(
in_channels=in_channels,
out_channels=32))
self.conv = conv1x1_block(
in_channels=128,
out_channels=in_channels,
activation=None)
def __call__(self, x):
x = self.branches(x)
x = self.conv(x)
return x
class TwoWayBBlock(Chain):
"""
PolyNet type Inception-B block.
"""
def __init__(self):
super(TwoWayBBlock, self).__init__()
in_channels = 1152
with self.init_scope():
self.branches = Concurrent()
with self.branches.init_scope():
setattr(self.branches, "branch1", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(128, 160, 192),
kernel_size_list=(1, (1, 7), (7, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 3), (3, 0))))
setattr(self.branches, "branch2", Conv1x1Branch(
in_channels=in_channels,
out_channels=192))
self.conv = conv1x1_block(
in_channels=384,
out_channels=in_channels,
activation=None)
def __call__(self, x):
x = self.branches(x)
x = self.conv(x)
return x
class TwoWayCBlock(Chain):
"""
PolyNet type Inception-C block.
"""
def __init__(self):
super(TwoWayCBlock, self).__init__()
in_channels = 2048
with self.init_scope():
self.branches = Concurrent()
with self.branches.init_scope():
setattr(self.branches, "branch1", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 224, 256),
kernel_size_list=(1, (1, 3), (3, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 1), (1, 0))))
setattr(self.branches, "branch2", Conv1x1Branch(
in_channels=in_channels,
out_channels=192))
self.conv = conv1x1_block(
in_channels=448,
out_channels=in_channels,
activation=None)
def __call__(self, x):
x = self.branches(x)
x = self.conv(x)
return x
class PolyPreBBlock(Chain):
"""
PolyNet type PolyResidual-Pre-B block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
num_blocks : int
Number of blocks (BatchNorm layers).
"""
def __init__(self,
num_blocks):
super(PolyPreBBlock, self).__init__()
in_channels = 1152
with self.init_scope():
self.branches = ParametricConcurrent()
with self.branches.init_scope():
setattr(self.branches, "branch1", PolyConvSeqBranch(
in_channels=in_channels,
out_channels_list=(128, 160, 192),
kernel_size_list=(1, (1, 7), (7, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 3), (3, 0)),
num_blocks=num_blocks))
setattr(self.branches, "branch2", poly_conv1x1(
in_channels=in_channels,
out_channels=192,
num_blocks=num_blocks))
def __call__(self, x, index):
x = self.branches(x, index=index)
return x
class PolyPreCBlock(Chain):
"""
PolyNet type PolyResidual-Pre-C block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
num_blocks : int
Number of blocks (BatchNorm layers).
"""
def __init__(self,
num_blocks):
super(PolyPreCBlock, self).__init__()
in_channels = 2048
with self.init_scope():
self.branches = ParametricConcurrent()
with self.branches.init_scope():
setattr(self.branches, "branch1", PolyConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 224, 256),
kernel_size_list=(1, (1, 3), (3, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 1), (1, 0)),
num_blocks=num_blocks))
setattr(self.branches, "branch2", poly_conv1x1(
in_channels=in_channels,
out_channels=192,
num_blocks=num_blocks))
def __call__(self, x, index):
x = self.branches(x, index=index)
return x
def poly_res_b_block():
"""
PolyNet type PolyResidual-Res-B block.
"""
return conv1x1_block(
in_channels=384,
out_channels=1152,
stride=1,
activation=None)
def poly_res_c_block():
"""
PolyNet type PolyResidual-Res-C block.
"""
return conv1x1_block(
in_channels=448,
out_channels=2048,
stride=1,
activation=None)
class MultiResidual(Chain):
"""
Base class for constructing N-way modules (2-way, 3-way, and so on). Actually it is for 2-way modules.
Parameters:
----------
scale : float, default 1.0
Scale value for each residual branch.
res_block : Chain class
Residual branch block.
num_blocks : int
Number of residual branches.
"""
def __init__(self,
scale,
res_block,
num_blocks):
super(MultiResidual, self).__init__()
assert (num_blocks >= 1)
self.scale = scale
self.num_blocks = num_blocks
with self.init_scope():
for i in range(num_blocks):
setattr(self, "res_block{}".format(i + 1), res_block())
self.activ = F.relu
def __call__(self, x):
out = x
for i in range(self.num_blocks):
res_block = getattr(self, "res_block{}".format(i + 1))
out = out + self.scale * res_block(x)
out = self.activ(out)
return out
class PolyResidual(Chain):
"""
The other base class for constructing N-way poly-modules. Actually it is for 3-way poly-modules.
Parameters:
----------
scale : float, default 1.0
Scale value for each residual branch.
res_block : Chain class
Residual branch block.
num_blocks : int
Number of residual branches.
pre_block : Chain class
Preliminary block.
"""
def __init__(self,
scale,
res_block,
num_blocks,
pre_block):
super(PolyResidual, self).__init__()
assert (num_blocks >= 1)
self.scale = scale
self.num_blocks = num_blocks
with self.init_scope():
self.pre_block = pre_block(num_blocks=num_blocks)
for i in range(num_blocks):
setattr(self, "res_block{}".format(i + 1), res_block())
self.activ = F.relu
def __call__(self, x):
out = x
for index in range(self.num_blocks):
x = self.pre_block(x, index)
res_block = getattr(self, "res_block{}".format(index + 1))
x = res_block(x)
out = out + self.scale * x
x = self.activ(x)
out = self.activ(out)
return out
class PolyBaseUnit(Chain):
"""
PolyNet unit base class.
Parameters:
----------
two_way_scale : float
Scale value for 2-way stage.
two_way_block : Chain class
Residual branch block for 2-way-stage.
poly_scale : float, default 0.0
Scale value for 2-way stage.
poly_res_block : Chain class, default None
Residual branch block for poly-stage.
poly_pre_block : Chain class, default None
Preliminary branch block for poly-stage.
"""
def __init__(self,
two_way_scale,
two_way_block,
poly_scale=0.0,
poly_res_block=None,
poly_pre_block=None):
super(PolyBaseUnit, self).__init__()
with self.init_scope():
if poly_res_block is not None:
assert (poly_scale != 0.0)
assert (poly_pre_block is not None)
self.poly = PolyResidual(
scale=poly_scale,
res_block=poly_res_block,
num_blocks=3,
pre_block=poly_pre_block)
else:
assert (poly_scale == 0.0)
assert (poly_pre_block is None)
self.poly = None
self.twoway = MultiResidual(
scale=two_way_scale,
res_block=two_way_block,
num_blocks=2)
def __call__(self, x):
if self.poly is not None:
x = self.poly(x)
x = self.twoway(x)
return x
class PolyAUnit(PolyBaseUnit):
"""
PolyNet type A unit.
Parameters:
----------
two_way_scale : float
Scale value for 2-way stage.
poly_scale : float
Scale value for 2-way stage.
"""
def __init__(self,
two_way_scale,
poly_scale=0.0):
super(PolyAUnit, self).__init__(
two_way_scale=two_way_scale,
two_way_block=TwoWayABlock)
assert (poly_scale == 0.0)
class PolyBUnit(PolyBaseUnit):
"""
PolyNet type B unit.
Parameters:
----------
two_way_scale : float
Scale value for 2-way stage.
poly_scale : float
Scale value for 2-way stage.
"""
def __init__(self,
two_way_scale,
poly_scale):
super(PolyBUnit, self).__init__(
two_way_scale=two_way_scale,
two_way_block=TwoWayBBlock,
poly_scale=poly_scale,
poly_res_block=poly_res_b_block,
poly_pre_block=PolyPreBBlock)
class PolyCUnit(PolyBaseUnit):
"""
PolyNet type C unit.
Parameters:
----------
two_way_scale : float
Scale value for 2-way stage.
poly_scale : float
Scale value for 2-way stage.
"""
def __init__(self,
two_way_scale,
poly_scale):
super(PolyCUnit, self).__init__(
two_way_scale=two_way_scale,
two_way_block=TwoWayCBlock,
poly_scale=poly_scale,
poly_res_block=poly_res_c_block,
poly_pre_block=PolyPreCBlock)
class ReductionAUnit(Chain):
"""
PolyNet type Reduction-A unit.
"""
def __init__(self):
super(ReductionAUnit, self).__init__()
in_channels = 384
with self.init_scope():
self.branches = Concurrent()
with self.branches.init_scope():
setattr(self.branches, "branch1", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(256, 256, 384),
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 2),
padding_list=(0, 1, 0)))
setattr(self.branches, "branch2", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(384,),
kernel_size_list=(3,),
strides_list=(2,),
padding_list=(0,)))
setattr(self.branches, "branch3", MaxPoolBranch())
def __call__(self, x):
x = self.branches(x)
return x
class ReductionBUnit(Chain):
"""
PolyNet type Reduction-B unit.
"""
def __init__(self):
super(ReductionBUnit, self).__init__()
in_channels = 1152
with self.init_scope():
self.branches = Concurrent()
with self.branches.init_scope():
setattr(self.branches, "branch1", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(256, 256, 256),
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 2),
padding_list=(0, 1, 0)))
setattr(self.branches, "branch2", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(256, 256),
kernel_size_list=(1, 3),
strides_list=(1, 2),
padding_list=(0, 0)))
setattr(self.branches, "branch3", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(256, 384),
kernel_size_list=(1, 3),
strides_list=(1, 2),
padding_list=(0, 0)))
setattr(self.branches, "branch4", MaxPoolBranch())
def __call__(self, x):
x = self.branches(x)
return x
class PolyBlock3a(Chain):
"""
PolyNet type Mixed-3a block.
"""
def __init__(self):
super(PolyBlock3a, self).__init__()
with self.init_scope():
self.branches = Concurrent()
with self.branches.init_scope():
setattr(self.branches, "branch1", MaxPoolBranch())
setattr(self.branches, "branch2", Conv3x3Branch(
in_channels=64,
out_channels=96))
def __call__(self, x):
x = self.branches(x)
return x
class PolyBlock4a(Chain):
"""
PolyNet type Mixed-4a block.
"""
def __init__(self):
super(PolyBlock4a, self).__init__()
with self.init_scope():
self.branches = Concurrent()
with self.branches.init_scope():
setattr(self.branches, "branch1", ConvSeqBranch(
in_channels=160,
out_channels_list=(64, 96),
kernel_size_list=(1, 3),
strides_list=(1, 1),
padding_list=(0, 0)))
setattr(self.branches, "branch2", ConvSeqBranch(
in_channels=160,
out_channels_list=(64, 64, 64, 96),
kernel_size_list=(1, (7, 1), (1, 7), 3),
strides_list=(1, 1, 1, 1),
padding_list=(0, (3, 0), (0, 3), 0)))
def __call__(self, x):
x = self.branches(x)
return x
class PolyBlock5a(Chain):
"""
PolyNet type Mixed-5a block.
"""
def __init__(self):
super(PolyBlock5a, self).__init__()
with self.init_scope():
self.branches = Concurrent()
with self.branches.init_scope():
setattr(self.branches, "branch1", MaxPoolBranch())
setattr(self.branches, "branch2", Conv3x3Branch(
in_channels=192,
out_channels=192))
def __call__(self, x):
x = self.branches(x)
return x
class PolyInitBlock(Chain):
"""
PolyNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
"""
def __init__(self,
in_channels):
super(PolyInitBlock, self).__init__()
with self.init_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=32,
stride=2,
pad=0)
self.conv2 = conv3x3_block(
in_channels=32,
out_channels=32,
pad=0)
self.conv3 = conv3x3_block(
in_channels=32,
out_channels=64)
self.block1 = PolyBlock3a()
self.block2 = PolyBlock4a()
self.block3 = PolyBlock5a()
def __call__(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
return x
class PolyNet(Chain):
"""
PolyNet model from 'PolyNet: A Pursuit of Structural Diversity in Very Deep Networks,'
https://arxiv.org/abs/1611.05725.
Parameters:
----------
two_way_scales : list of list of floats
Two way scale values for each normal unit.
poly_scales : list of list of floats
Three way scale values for each normal unit.
dropout_rate : float, default 0.2
Fraction of the input units to drop. Must be a number between 0 and 1.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (331, 331)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
two_way_scales,
poly_scales,
dropout_rate=0.2,
in_channels=3,
in_size=(331, 331),
classes=1000):
super(PolyNet, self).__init__()
self.in_size = in_size
self.classes = classes
normal_units = [PolyAUnit, PolyBUnit, PolyCUnit]
reduction_units = [ReductionAUnit, ReductionBUnit]
with self.init_scope():
self.features = SimpleSequential()
with self.features.init_scope():
setattr(self.features, "init_block", PolyInitBlock(
in_channels=in_channels))
for i, (two_way_scales_per_stage, poly_scales_per_stage) in enumerate(zip(two_way_scales, poly_scales)):
stage = SimpleSequential()
with stage.init_scope():
for j, (two_way_scale, poly_scale) in enumerate(zip(
two_way_scales_per_stage, poly_scales_per_stage)):
if (j == 0) and (i != 0):
unit = reduction_units[i - 1]
setattr(stage, "unit{}".format(j + 1), unit())
else:
unit = normal_units[i]
setattr(stage, "unit{}".format(j + 1), unit(
two_way_scale=two_way_scale,
poly_scale=poly_scale))
setattr(self.features, "stage{}".format(i + 1), stage)
setattr(self.features, 'final_pool', partial(
F.average_pooling_2d,
ksize=9,
stride=1))
in_channels = 2048
self.output = SimpleSequential()
with self.output.init_scope():
setattr(self.output, 'flatten', partial(
F.reshape,
shape=(-1, in_channels)))
setattr(self.output, 'dropout', partial(
F.dropout,
ratio=dropout_rate))
setattr(self.output, 'fc', L.Linear(
in_size=in_channels,
out_size=classes))
def __call__(self, x):
x = self.features(x)
x = self.output(x)
return x
def get_polynet(model_name=None,
pretrained=False,
root=os.path.join("~", ".chainer", "models"),
**kwargs):
"""
Create PolyNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
two_way_scales = [
[1.000000, 0.992308, 0.984615, 0.976923, 0.969231, 0.961538, 0.953846, 0.946154, 0.938462, 0.930769],
[0.000000, 0.915385, 0.900000, 0.884615, 0.869231, 0.853846, 0.838462, 0.823077, 0.807692, 0.792308, 0.776923],
[0.000000, 0.761538, 0.746154, 0.730769, 0.715385, 0.700000]]
poly_scales = [
[0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000],
[0.000000, 0.923077, 0.907692, 0.892308, 0.876923, 0.861538, 0.846154, 0.830769, 0.815385, 0.800000, 0.784615],
[0.000000, 0.769231, 0.753846, 0.738462, 0.723077, 0.707692]]
net = PolyNet(
two_way_scales=two_way_scales,
poly_scales=poly_scales,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
load_npz(
file=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
obj=net)
return net
def polynet(**kwargs):
"""
PolyNet model from 'PolyNet: A Pursuit of Structural Diversity in Very Deep Networks,'
https://arxiv.org/abs/1611.05725.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_polynet(model_name="polynet", **kwargs)
def _test():
import numpy as np
import chainer
chainer.global_config.train = False
pretrained = False
models = [
polynet,
]
for model in models:
net = model(pretrained=pretrained)
weight_count = net.count_params()
print("m={}, {}".format(model.__name__, weight_count))
assert (model != polynet or weight_count == 95366600)
x = np.zeros((1, 3, 331, 331), np.float32)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 31.747711
| 120
| 0.543002
|
4996c32e2369ffbf065965528818ba48c0a7a263
| 12,780
|
py
|
Python
|
mne/io/tests/test_raw.py
|
achilleas-k/mne-python
|
0078e1af13a92ab47498dd167bc5ec73be864427
|
[
"BSD-3-Clause"
] | null | null | null |
mne/io/tests/test_raw.py
|
achilleas-k/mne-python
|
0078e1af13a92ab47498dd167bc5ec73be864427
|
[
"BSD-3-Clause"
] | 4
|
2016-06-04T15:28:08.000Z
|
2016-12-22T14:23:13.000Z
|
mne/io/tests/test_raw.py
|
achilleas-k/mne-python
|
0078e1af13a92ab47498dd167bc5ec73be864427
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Generic tests that all raw classes should run."""
# # Authors: MNE Developers
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
#
# License: BSD (3-clause)
from os import path as op
import math
import pytest
import numpy as np
from numpy.testing import (assert_allclose, assert_array_almost_equal,
assert_equal, assert_array_equal)
from mne import concatenate_raws, create_info, Annotations
from mne.annotations import _handle_meas_date
from mne.datasets import testing
from mne.io import read_raw_fif, RawArray, BaseRaw
from mne.utils import _TempDir, catch_logging, _raw_annot
from mne.io.meas_info import _get_valid_units
def test_orig_units():
"""Test the error handling for original units."""
# Should work fine
info = create_info(ch_names=['Cz'], sfreq=100, ch_types='eeg')
BaseRaw(info, last_samps=[1], orig_units={'Cz': 'nV'})
# Should complain that channel Cz does not have a corresponding original
# unit.
with pytest.raises(ValueError, match='has no associated original unit.'):
info = create_info(ch_names=['Cz'], sfreq=100, ch_types='eeg')
BaseRaw(info, last_samps=[1], orig_units={'not_Cz': 'nV'})
# Test that a non-dict orig_units argument raises a ValueError
with pytest.raises(ValueError, match='orig_units must be of type dict'):
info = create_info(ch_names=['Cz'], sfreq=100, ch_types='eeg')
BaseRaw(info, last_samps=[1], orig_units=True)
def _test_raw_reader(reader, test_preloading=True, **kwargs):
"""Test reading, writing and slicing of raw classes.
Parameters
----------
reader : function
Function to test.
test_preloading : bool
Whether not preloading is implemented for the reader. If True, both
cases and memory mapping to file are tested.
**kwargs :
Arguments for the reader. Note: Do not use preload as kwarg.
Use ``test_preloading`` instead.
Returns
-------
raw : instance of Raw
A preloaded Raw object.
"""
tempdir = _TempDir()
rng = np.random.RandomState(0)
if test_preloading:
raw = reader(preload=True, **kwargs)
# don't assume the first is preloaded
buffer_fname = op.join(tempdir, 'buffer')
picks = rng.permutation(np.arange(len(raw.ch_names) - 1))[:10]
picks = np.append(picks, len(raw.ch_names) - 1) # test trigger channel
bnd = min(int(round(raw.buffer_size_sec *
raw.info['sfreq'])), raw.n_times)
slices = [slice(0, bnd), slice(bnd - 1, bnd), slice(3, bnd),
slice(3, 300), slice(None), slice(1, bnd)]
if raw.n_times >= 2 * bnd: # at least two complete blocks
slices += [slice(bnd, 2 * bnd), slice(bnd, bnd + 1),
slice(0, bnd + 100)]
other_raws = [reader(preload=buffer_fname, **kwargs),
reader(preload=False, **kwargs)]
for sl_time in slices:
data1, times1 = raw[picks, sl_time]
for other_raw in other_raws:
data2, times2 = other_raw[picks, sl_time]
assert_allclose(data1, data2)
assert_allclose(times1, times2)
else:
raw = reader(**kwargs)
full_data = raw._data
assert raw.__class__.__name__ in repr(raw) # to test repr
assert raw.info.__class__.__name__ in repr(raw.info)
# gh-5604
assert _handle_meas_date(raw.info['meas_date']) >= 0
# test resetting raw
raw2 = reader(**raw._init_kwargs)
assert set(raw.info.keys()) == set(raw2.info.keys())
assert_array_equal(raw.times, raw2.times)
# Test saving and reading
out_fname = op.join(tempdir, 'test_raw.fif')
raw = concatenate_raws([raw])
raw.save(out_fname, tmax=raw.times[-1], overwrite=True, buffer_size_sec=1)
raw3 = read_raw_fif(out_fname)
assert set(raw.info.keys()) == set(raw3.info.keys())
assert_allclose(raw3[0:20][0], full_data[0:20], rtol=1e-6,
atol=1e-20) # atol is very small but > 0
assert_array_almost_equal(raw.times, raw3.times)
assert not math.isnan(raw3.info['highpass'])
assert not math.isnan(raw3.info['lowpass'])
assert not math.isnan(raw.info['highpass'])
assert not math.isnan(raw.info['lowpass'])
assert raw3.info['kit_system_id'] == raw.info['kit_system_id']
# Make sure concatenation works
first_samp = raw.first_samp
last_samp = raw.last_samp
concat_raw = concatenate_raws([raw.copy(), raw])
assert_equal(concat_raw.n_times, 2 * raw.n_times)
assert_equal(concat_raw.first_samp, first_samp)
assert_equal(concat_raw.last_samp - last_samp + first_samp, last_samp + 1)
idx = np.where(concat_raw.annotations.description == 'BAD boundary')[0]
if concat_raw.info['meas_date'] is None:
expected_bad_boundary_onset = ((last_samp - first_samp) /
raw.info['sfreq'])
else:
expected_bad_boundary_onset = raw._last_time
assert_array_almost_equal(concat_raw.annotations.onset[idx],
expected_bad_boundary_onset,
decimal=2)
if raw.info['meas_id'] is not None:
for key in ['secs', 'usecs', 'version']:
assert_equal(raw.info['meas_id'][key], raw3.info['meas_id'][key])
assert_array_equal(raw.info['meas_id']['machid'],
raw3.info['meas_id']['machid'])
assert isinstance(raw.annotations, Annotations)
# Make a "soft" test on units: They have to be valid SI units as in
# mne.io.meas_info.valid_units, but we accept any lower/upper case for now.
valid_units = _get_valid_units()
valid_units_lower = [unit.lower() for unit in valid_units]
if raw._orig_units is not None:
assert isinstance(raw._orig_units, dict)
for ch_name, unit in raw._orig_units.items():
assert unit.lower() in valid_units_lower, ch_name
return raw
def _test_concat(reader, *args):
"""Test concatenation of raw classes that allow not preloading."""
data = None
for preload in (True, False):
raw1 = reader(*args, preload=preload)
raw2 = reader(*args, preload=preload)
raw1.append(raw2)
raw1.load_data()
if data is None:
data = raw1[:, :][0]
assert_allclose(data, raw1[:, :][0])
for first_preload in (True, False):
raw = reader(*args, preload=first_preload)
data = raw[:, :][0]
for preloads in ((True, True), (True, False), (False, False)):
for last_preload in (True, False):
t_crops = raw.times[np.argmin(np.abs(raw.times - 0.5)) +
[0, 1]]
raw1 = raw.copy().crop(0, t_crops[0])
if preloads[0]:
raw1.load_data()
raw2 = raw.copy().crop(t_crops[1], None)
if preloads[1]:
raw2.load_data()
raw1.append(raw2)
if last_preload:
raw1.load_data()
assert_allclose(data, raw1[:, :][0])
@testing.requires_testing_data
def test_time_as_index():
"""Test indexing of raw times."""
raw_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test_raw.fif')
raw = read_raw_fif(raw_fname)
# Test original (non-rounding) indexing behavior
orig_inds = raw.time_as_index(raw.times)
assert(len(set(orig_inds)) != len(orig_inds))
# Test new (rounding) indexing behavior
new_inds = raw.time_as_index(raw.times, use_rounding=True)
assert_array_equal(new_inds, np.arange(len(raw.times)))
@pytest.mark.parametrize('offset, origin', [
pytest.param(0, None, id='times in s. relative to first_samp (default)'),
pytest.param(0, 2.0, id='times in s. relative to first_samp'),
pytest.param(1, 1.0, id='times in s. relative to meas_date'),
pytest.param(2, 0.0, id='absolute times in s. relative to 0')])
def test_time_as_index_ref(offset, origin):
"""Test indexing of raw times."""
meas_date = 1
info = create_info(ch_names=10, sfreq=10.)
raw = RawArray(data=np.empty((10, 10)), info=info, first_samp=10)
raw.info['meas_date'] = meas_date
relative_times = raw.times
inds = raw.time_as_index(relative_times + offset,
use_rounding=True,
origin=origin)
assert_array_equal(inds, np.arange(raw.n_times))
def test_meas_date_orig_time():
"""Test the relation between meas_time in orig_time."""
# meas_time is set and orig_time is set:
# clips the annotations based on raw.data and resets the annotation based
# on raw.info['meas_date]
raw = _raw_annot(1, 1.5)
assert raw.annotations.orig_time == 1
assert raw.annotations.onset[0] == 1
# meas_time is set and orig_time is None:
# Consider annot.orig_time to be raw.frist_sample, clip and reset
# annotations to have the raw.annotations.orig_time == raw.info['meas_date]
raw = _raw_annot(1, None)
assert raw.annotations.orig_time == 1
assert raw.annotations.onset[0] == 1.5
# meas_time is None and orig_time is set:
# Raise error, it makes no sense to have an annotations object that we know
# when was acquired and set it to a raw object that does not know when was
# it acquired.
with pytest.raises(RuntimeError, match='Ambiguous operation'):
_raw_annot(None, 1.5)
# meas_time is None and orig_time is None:
# Consider annot.orig_time to be raw.first_sample and clip
raw = _raw_annot(None, None)
assert raw.annotations.orig_time is None
assert raw.annotations.onset[0] == 0.5
assert raw.annotations.duration[0] == 0.2
def test_get_data_reject():
"""Test if reject_by_annotation is working correctly."""
fs = 256
ch_names = ["C3", "Cz", "C4"]
info = create_info(ch_names, sfreq=fs)
raw = RawArray(np.zeros((len(ch_names), 10 * fs)), info)
raw.set_annotations(Annotations(onset=[2, 4], duration=[3, 2],
description="bad"))
with catch_logging() as log:
data = raw.get_data(reject_by_annotation="omit", verbose=True)
msg = ('Omitting 1024 of 2560 (40.00%) samples, retaining 1536' +
' (60.00%) samples.')
assert log.getvalue().strip() == msg
assert data.shape == (len(ch_names), 1536)
with catch_logging() as log:
data = raw.get_data(reject_by_annotation="nan", verbose=True)
msg = ('Setting 1024 of 2560 (40.00%) samples to NaN, retaining 1536' +
' (60.00%) samples.')
assert log.getvalue().strip() == msg
assert data.shape == (len(ch_names), 2560) # shape doesn't change
assert np.isnan(data).sum() == 3072 # but NaNs are introduced instead
def test_5839():
"""Test concatenating raw objects with annotations."""
# Global Time 0 1 2 3 4
# .
# raw_A |---------XXXXXXXXXX
# annot |--------------AA
# latency . 0 0 1 1 2 2 3
# . 5 0 5 0 5 0
#
# raw_B . |---------YYYYYYYYYY
# annot . |--------------AA
# latency . 0 1
# . 5 0
# .
# output |---------XXXXXXXXXXYYYYYYYYYY
# annot |--------------AA---|----AA
# latency . 0 0 1 1 2 2 3
# . 5 0 5 0 5 0
#
EXPECTED_ONSET = [1.5, 2., 2., 2.5]
EXPECTED_DURATION = [0.2, 0., 0., 0.2]
EXPECTED_DESCRIPTION = ['dummy', 'BAD boundary', 'EDGE boundary', 'dummy']
def raw_factory(meas_date):
raw = RawArray(data=np.empty((10, 10)),
info=create_info(ch_names=10, sfreq=10., ),
first_samp=10)
raw.info['meas_date'] = meas_date
raw.set_annotations(annotations=Annotations(onset=[.5],
duration=[.2],
description='dummy',
orig_time=None))
return raw
raw_A, raw_B = [raw_factory((x, 0)) for x in [0, 2]]
raw_A.append(raw_B)
assert_array_equal(raw_A.annotations.onset, EXPECTED_ONSET)
assert_array_equal(raw_A.annotations.duration, EXPECTED_DURATION)
assert_array_equal(raw_A.annotations.description, EXPECTED_DESCRIPTION)
assert raw_A.annotations.orig_time == 0.0
| 40.188679
| 79
| 0.599139
|
4eb1f70c6816bfd7a4fd9e31e2280e858d14d1e7
| 208
|
py
|
Python
|
Models/Application.py
|
khatrivarun/tinder_for_jobs_py
|
208d42c7c283fc97f966ec7173d53c5c89651862
|
[
"MIT"
] | 4
|
2020-12-04T06:56:48.000Z
|
2021-12-15T12:48:08.000Z
|
Models/Application.py
|
khatrivarun/tinder_for_jobs_py
|
208d42c7c283fc97f966ec7173d53c5c89651862
|
[
"MIT"
] | null | null | null |
Models/Application.py
|
khatrivarun/tinder_for_jobs_py
|
208d42c7c283fc97f966ec7173d53c5c89651862
|
[
"MIT"
] | 1
|
2020-05-23T08:01:31.000Z
|
2020-05-23T08:01:31.000Z
|
class Application:
def __init__(self):
self.application_id = None
self.job_id = None
self.applicant_email_id = None
self.company_email_id = None
self.response = ''
| 26
| 38
| 0.620192
|
20a242c1a971b9d80b2c80232b1a0ab6de606116
| 7,950
|
py
|
Python
|
test/Polynomial_tests.py
|
coproc/PolyPieces
|
35f6262e378de92147e20d8605d8247bd70ee4bb
|
[
"MIT"
] | null | null | null |
test/Polynomial_tests.py
|
coproc/PolyPieces
|
35f6262e378de92147e20d8605d8247bd70ee4bb
|
[
"MIT"
] | null | null | null |
test/Polynomial_tests.py
|
coproc/PolyPieces
|
35f6262e378de92147e20d8605d8247bd70ee4bb
|
[
"MIT"
] | null | null | null |
import numbers
import unittest
from src.Polynomial import Polynomial as Poly
TEST_CASES_CREATION = [
('1', ([1], 'x')),
(('1',['y']), ([1], 'y')),
('x', ([0, 1], 'x')),
('y', ([0, 1], 'y')),
('1-x', ([1, -1], 'x')),
('1-y', ([1, -1], 'y')),
('1-xy', ([1, ([0, -1], 'x')], 'y')),
(('1-xy',['xy']), ([1, -1], 'xy')),
('(1-x)(1-y)', ([([1, -1], 'x'), ([-1, 1], 'x')], 'y')),
('(x+y+z)^2', ([([([0,0,1], 'x'), ([0,2], 'x'), 1], 'y'), ([([0,2], 'x'), 2], 'y'), 1], 'z'))
]
TEST_CASES_NULLARY = {
Poly.deg: [
(-1, []),
(-1, [0]),
( 0, [1]),
( 1, [-1,2]),
( 2, [0,0,-1])
],
Poly._normalize: [
# univariate polys
([], []),
([], [0]),
([1], [1,0]),
([0,-1], [0,-1,0,0]),
# multivariate polys: remove zero leading terms in all layers
(([0, ([0,2], 'x')], 'y'), '(x^2+2xy+y^2)-x^2-y^2'), # = 2xy
# multivariate polys: remove (layers with) variables with only constant terms
(([([0,1], 'x'), 1], 'z'), '(x+y+z) - y') # = x + z
],
Poly.allVarNames: [
({'x'}, []),
({'x', 'y'}, 'xy'),
({'y'}, '(x+y)-x'),
({'t', 'x', 'y', 'z'}, 'z*x + yz^2+t'),
],
Poly.der: [
# univariate polys
([], []),
([], [0]),
([], [1]),
([1], [0,1]),
([0,2], [0,0,1]),
# multivariate polys
(([0, ([0,2], 'y')], 'z'), '1+yz^2') # d(1+yz^2)/dz = 2yz
],
Poly.intIndef: [
# univariate polys
([], []),
([], [0]),
([0,1], [1]),
([0,0,1], [0,2]),
# multivariate polys
(([0, 1, ([0,1], 'y')], 'z'), '1+2yz') # int(1+2yz)dz = z+yz^2
]
}
TEST_CASES_UNARY = {
Poly.deg: [
(-1, ('0', 'x')),
(-1, ('0', 'y')),
(0, ('1', 'x')),
(0, ('1', 'y')),
(1, ('x', 'x')),
(0, ('x', 'y')),
(0, ('y', 'x'))
],
Poly.coeff: [
(0, ('0', 0)),
(0, ('0', 1)),
(1, ('1', 0)),
(0, ('1', 1)),
(0, ('x', 0)),
(1, ('x', 1)),
(0, ('x', 2)),
(0, ('0', 0, 'x')),
(0, ('0', 0, 'y')),
(0, ('0', 0, 't')),
(1, ('x', 1, 'x')),
(0, ('x', 1, 'y')),
(0, ('y', 1, 'x')),
(([0,1], 'y'), ('xy', 1, 'x')),
(([0,1], 'x'), ('xy', 1, 'y')),
],
Poly.eval: [
(0, ('x', 0)),
(1, ('x', 1)),
(-1, ('x', -1)),
(1, ('x^2', -1)),
(0, ('x^2+x', -1)),
([0,1], ('x+y', 0)),
([2,1], ('x+y', 2)),
],
Poly.subs: [
(0, ('x+y', {'x':0, 'y':0})),
(5, ('x+y', {'x':2, 'y':3})),
(([0,3], 'x'), ('x+y', {'y': Poly.fromString('2x')})), # 3x = (x+y)|(y=2x)
(([0,3], 'y'), ('x+y', {'x': Poly.fromString('2y')})), # 3x = (x+y)|(y=2x)
],
Poly.der: [
([1], ('x', 'x')),
(([], 'y'), ('x', 'y')),
([], ('y', 'x'))
],
Poly.intIndef: [
([0,1], ('1', 'x')),
(([0,1], 'y'), ('1', 'y')),
(([0, ([0,1], 'x')], 'y'), ('y', 'x')),
(([0, ([0,1], 'x')], 'y'), ('x', 'y')),
(([0, ([0,0,1], 'x')], 'y'), ('2xy', 'x')), # int(2xy)dx = x^2y
(([0, 0, ([0,1], 'x')], 'y'), ('2xy', 'y')), # int(2xy)dy = xy^2
]
}
# test cases with polynomial and polynomial argument
TEST_CASES_BINARY = {
# when substituting a polynomial for the main variable, the result will also be a polynomial
Poly.eval: [
([], ('0', 'x')),
([], ('x', '0')),
([1], ('x', '1')),
(([0,1], 'x'), ('x', 'x')),
(([0,1], 'x'), ('y', 'x')),
(([0,1], 'y'), ('x', 'y')),
(([], 't'), ('x-t', 't')), # Polynomial(0) = (x-t)(x=t)
([-1, 3, -3, 1], ('x^3', 'x-1')),
],
Poly.iadd: [
([1,0,1], ('x-1', 'x^2-x+2')), # x^2+1 = (x-1) + (x^2-x+2)
(([0,1], 't'), ('0', 't')),
(([([0,1], 'x'), 1],'y'), ('x', 'y')),
(([([0,1], 'x'), 1],'y'), ('y', 'x')),
],
Poly.isub: [
([-3,2,-1], ('x-1', 'x^2-x+2')), # -x^2+2x-3 = (x-1) - (x^2-x+2)
(([([0,1], 'x'), -1],'y'), ('x', 'y')),
],
Poly.imul: [
([], ('x', '0')),
([], ('0', 'x')),
([-1,0,1], ('x-1', 'x+1')),
(([0, ([0,1], 'x')],'y'), ('x', 'y')),
],
}
def _createPoly(repr):
if isinstance(repr, Poly): return repr
if isinstance(repr, str): return Poly.fromString(repr)
if isinstance(repr, tuple): return Poly(*repr) if isinstance(repr[0], list) else Poly.fromString(*repr)
if isinstance(repr, list) or isinstance(repr, numbers.Number): return Poly(repr)
raise TypeError('cannot create polynomial from type %s (%s)' % (type(repr), repr))
class PolynomialTests(unittest.TestCase):
def _assertPolyStruct(self, polyStruct, p, depthPath=None, rootStruct=None, rootPoly=None):
if depthPath is None: depthPath = []
if rootStruct is None: rootStruct = polyStruct
if rootPoly is None: rootPoly = p
whileMsg = (' while checking coeff %s of %s ~ %s' %
(' of coeff '.join([str(d) for d in depthPath]), rootStruct, rootPoly))\
if depthPath else ''
coeffsExp,varNameExp = polyStruct
self.assertEqual(varNameExp, p.varName,
'testing variable in %s ~ %s%s' % (polyStruct, p, whileMsg))
self.assertEqual(len(coeffsExp), len(p.coeffs),
'testing coeff count in %s ~ %s%s (%s)' % (polyStruct, p, whileMsg, p.coeffs))
for idx,(cExp,cPoly) in enumerate(zip(coeffsExp, p.coeffs)):
if isinstance(cExp, tuple) and isinstance(cPoly, Poly):
self._assertPolyStruct(cExp, cPoly, [idx]+depthPath, rootStruct, rootPoly)
else:
self.assertEqual(cExp, cPoly,
'testing coeff %d in %s ~ %s%s' % (idx, polyStruct, p, whileMsg))
def test_creation(self):
print('testing Polynomial creation: ', end='')
for expr, polyStruct in TEST_CASES_CREATION:
try:
p = _createPoly(expr)
self._assertPolyStruct(polyStruct, p)
print('.', end='')
except AssertionError:
print('F\n', end='')
raise
except:
print('E\n', end='')
raise
print()
def test_nullaryMethods(self):
for func,inOutData in TEST_CASES_NULLARY.items():
print('testing Polynomial.%s (nullary): ' % func.__name__, end='')
for resultExp,polyRepr in inOutData:
try:
p = _createPoly(polyRepr)
f_p = func(p)
# if there is no return value, method has changed polynomial
result = p if f_p is None else f_p
if isinstance(result, Poly):
if isinstance(resultExp, list): resultExp = (resultExp, 'x')
self._assertPolyStruct(resultExp, result)
else:
self.assertEqual(resultExp, result, 'testing %s = (%s).%s()' % (resultExp, p, func.__name__))
print('.', end='')
except AssertionError:
print('F\n', end='')
raise
except:
print('E\n', end='')
raise
print()
def test_unaryMethods(self):
for func,inOutData in TEST_CASES_UNARY.items():
print('testing Polynomial.%s (unary): ' % func.__name__, end='')
for resultExp,(polyRepr,*args) in inOutData:
try:
p = _createPoly(polyRepr)
f_p = func(p, *args)
# if there is no return value, method has changed polynomial
result = p if f_p is None else f_p
if isinstance(result, Poly):
if isinstance(resultExp, list): resultExp = (resultExp, 'x')
self._assertPolyStruct(resultExp, result)
else:
self.assertEqual(resultExp, result, 'testing %s = (%s).%s(%s)' % (resultExp, p, func.__name__, args))
print('.', end='')
except AssertionError:
print('F\n', end='')
raise
except:
print('E\n', end='')
raise
print()
def test_binaryMethods(self):
for func,inOutData in TEST_CASES_BINARY.items():
print('testing Polynomial.%s (binary): ' % func.__name__, end='')
for resultExp,(polyRepr,argPolyRepr) in inOutData:
try:
p = _createPoly(polyRepr)
pa = _createPoly(argPolyRepr)
f_p = func(p, pa)
# if there is no return value, method has changed polynomial
result = p if f_p is None else f_p
if isinstance(result, Poly):
if isinstance(resultExp, list): resultExp = (resultExp, 'x')
self._assertPolyStruct(resultExp, result)
else:
self.assertEqual(resultExp, result, 'testing %s = (%s).%s(%s)' % (resultExp, p, func.__name__, pa))
print('.', end='')
except AssertionError:
print('F\n', end='')
raise
except:
print('E\n', end='')
raise
print()
# def test_special(self):
# print('testing special: ', end='')
# p = _createPoly('y')
# result = p.der('x')
# print(result)
# self._assertPolyStruct(([],'x'), result)
# print()
| 28.597122
| 107
| 0.500126
|
75eedbb37841c98028724cac224a7d1d8b791851
| 163
|
py
|
Python
|
main.py
|
ChrisEngstrom/Py-PE-1
|
9302e12604ed8abbf0cdaad3c32d974b665d7914
|
[
"MIT"
] | null | null | null |
main.py
|
ChrisEngstrom/Py-PE-1
|
9302e12604ed8abbf0cdaad3c32d974b665d7914
|
[
"MIT"
] | null | null | null |
main.py
|
ChrisEngstrom/Py-PE-1
|
9302e12604ed8abbf0cdaad3c32d974b665d7914
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
sumOfMultiples = 0
for i in range(3, 1000):
if (i % 3 == 0 or
i % 5 == 0):
sumOfMultiples += i
print(sumOfMultiples)
| 14.818182
| 27
| 0.558282
|
8bb55b3a473e5b5a5894fda5e0c0ce9459a3913d
| 3,394
|
py
|
Python
|
huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/show_off_site_backup_policy_response.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/show_off_site_backup_policy_response.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/show_off_site_backup_policy_response.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowOffSiteBackupPolicyResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'policy_para': 'list[GetOffSiteBackupPolicy]'
}
attribute_map = {
'policy_para': 'policy_para'
}
def __init__(self, policy_para=None):
"""ShowOffSiteBackupPolicyResponse - a model defined in huaweicloud sdk"""
super(ShowOffSiteBackupPolicyResponse, self).__init__()
self._policy_para = None
self.discriminator = None
if policy_para is not None:
self.policy_para = policy_para
@property
def policy_para(self):
"""Gets the policy_para of this ShowOffSiteBackupPolicyResponse.
备份策略对象,包括备份类型、备份保留天数、目标区域ID和目标project ID。
:return: The policy_para of this ShowOffSiteBackupPolicyResponse.
:rtype: list[GetOffSiteBackupPolicy]
"""
return self._policy_para
@policy_para.setter
def policy_para(self, policy_para):
"""Sets the policy_para of this ShowOffSiteBackupPolicyResponse.
备份策略对象,包括备份类型、备份保留天数、目标区域ID和目标project ID。
:param policy_para: The policy_para of this ShowOffSiteBackupPolicyResponse.
:type: list[GetOffSiteBackupPolicy]
"""
self._policy_para = policy_para
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowOffSiteBackupPolicyResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.513043
| 84
| 0.591043
|
aca859e2f25f12a3ab7b33021b7732ed2ce39076
| 1,751
|
py
|
Python
|
script.mrknow.urlresolver/lib/urlresolver9/plugins/uploadaf.py
|
mrknow/filmkodi
|
0162cde9ae25ddbf4a69330948714833ff2f78c9
|
[
"Apache-2.0"
] | 105
|
2015-11-28T00:03:11.000Z
|
2021-05-05T20:47:42.000Z
|
script.mrknow.urlresolver/lib/urlresolver9/plugins/uploadaf.py
|
rrosajp/filmkodi
|
0162cde9ae25ddbf4a69330948714833ff2f78c9
|
[
"Apache-2.0"
] | 918
|
2015-11-28T14:12:40.000Z
|
2022-03-23T20:24:49.000Z
|
script.mrknow.urlresolver/lib/urlresolver9/plugins/uploadaf.py
|
rrosajp/filmkodi
|
0162cde9ae25ddbf4a69330948714833ff2f78c9
|
[
"Apache-2.0"
] | 111
|
2015-12-01T14:06:10.000Z
|
2020-08-01T10:44:39.000Z
|
"""
grifthost urlresolver plugin
Copyright (C) 2015 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from lib import captcha_lib
from lib import helpers
from urlresolver9 import common
from urlresolver9.resolver import UrlResolver, ResolverError
MAX_TRIES = 3
class UploadAfResolver(UrlResolver):
name = "upload.af"
domains = ["upload.af"]
pattern = '(?://|\.)(upload\.af)/([0-9a-zA-Z/]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
html = self.net.http_GET(web_url).content
tries = 0
while tries < MAX_TRIES:
data = helpers.get_hidden(html)
data.update(captcha_lib.do_captcha(html))
html = self.net.http_POST(web_url, form_data=data).content
match = re.search('href="([^"]+)[^>]*>Download<', html, re.DOTALL)
if match:
return match.group(1)
tries += 1
raise ResolverError('Unable to resolve upload.af link. Filelink not found.')
def get_url(self, host, media_id):
return 'https://upload.af/%s' % (media_id)
| 32.425926
| 84
| 0.68418
|
7e16e37c2ebc8e64fa127ae390f890e4d3ec397a
| 7,909
|
py
|
Python
|
docs/conf.py
|
alexhouse/python-fitbark
|
8a665c8974829079781fb6e65fbe8b4fe3d1e5da
|
[
"Apache-2.0"
] | 1
|
2019-03-12T04:48:55.000Z
|
2019-03-12T04:48:55.000Z
|
docs/conf.py
|
alexhouse/python-fitbark
|
8a665c8974829079781fb6e65fbe8b4fe3d1e5da
|
[
"Apache-2.0"
] | null | null | null |
docs/conf.py
|
alexhouse/python-fitbark
|
8a665c8974829079781fb6e65fbe8b4fe3d1e5da
|
[
"Apache-2.0"
] | 1
|
2020-07-16T07:01:51.000Z
|
2020-07-16T07:01:51.000Z
|
# -*- coding: utf-8 -*-
#
# Python-FitBark documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 14 18:51:57 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
import fitbark
project = u'Python-FitBark'
copyright = fitbark.__copyright__
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = fitbark.__version__
# The full version, including alpha/beta/rc tags.
release = fitbark.__release__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Python-FitBarkdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Python-FitBark.tex', u'Python-FitBark Documentation',
u'Issac Kelly, Percy Perez, Brad Pitcher', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'python-fitbark', u'Python-FitBark Documentation',
[u'Alex House'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Python-FitBark', u'Python-FitBark Documentation',
u'Alex House', 'Python-FitBark', 'FitBark API Python Client Implementation',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| 32.020243
| 80
| 0.717158
|
ae163ee39500634db7bbae341bb00f08a9d7af26
| 3,546
|
py
|
Python
|
Paper/paper-export.py
|
dropbox/DropboxBusinessScripts
|
4f4c32ddd488b29e7fd16a40966761e70a758239
|
[
"Apache-2.0"
] | 48
|
2015-11-09T20:05:14.000Z
|
2021-12-17T23:35:12.000Z
|
Paper/paper-export.py
|
dropbox/DropboxBusinessScripts
|
4f4c32ddd488b29e7fd16a40966761e70a758239
|
[
"Apache-2.0"
] | null | null | null |
Paper/paper-export.py
|
dropbox/DropboxBusinessScripts
|
4f4c32ddd488b29e7fd16a40966761e70a758239
|
[
"Apache-2.0"
] | 24
|
2015-11-18T16:15:18.000Z
|
2022-03-04T10:35:22.000Z
|
# File: paper-export.py
# Export Paper Docs Tool
# Version 1.0
# Author: Marcel Ribas - @macribas
# Date: 3/31/2021
# Python script to export Paper Docs from a Dropbox account. It can export in either HTML or Markdown.
# It only works in accounts that have Paper In the FileSystem (PiFS). Script checks the account for that condition.
# Does not work recursively, on purpose. You need to select the folders where your Paper docs are. Saves files in the working local folder.
# Once you are comfortable with running this, then you can modify it to work recursively.
# Your API key needs to have Full Dropbox access and files.content.read scope.
import dropbox
import os
# Dropbox
try:
dbx = dropbox.Dropbox('<YOUR_API_KEY_HERE>')
# check if account has PiFS
features = dbx.users_features_get_values([dropbox.users.UserFeature.paper_as_files])
pifs = features.values[0].get_paper_as_files().get_enabled()
except dropbox.exceptions.AuthError:
print("It was not possible to connect to your Dropbox account. Please try another token.")
print("You need the files.content.read scope")
quit()
if not pifs:
print("This account does not have Paper In The FileSystem (PiFS) enabled")
quit()
while True:
path = input("Enter the Dropbox path for your Paper docs (<RETURN> for the root folder): ")
if path.startswith('/') or not path:
break;
else:
print("Invalid folder name, please try again.")
while True:
go_on = input("This process might take a while, depending on the size of the folder you are traversing. Continue (Y or N)? ")
if go_on.upper() == 'Y':
break;
elif go_on.upper() == 'N':
quit()
print("Processing")
# Check if folder exists
try:
folder = dbx.files_list_folder(path)
cursor = folder.cursor
except dropbox.exceptions.DropboxException:
print("Could not find folder {0}".format(path))
quit()
# if file is paper doc, put it in list
paper_docs = [file.path_display for file in folder.entries if isinstance(file, dropbox.files.FileMetadata) if not file.is_downloadable if os.path.splitext(file.path_lower)[1] == ".paper"]
while folder.has_more:
print("Still working")
folder = dbx.files_list_folder_continue(cursor)
cursor = folder.cursor
paper_docs += [file.path_display for file in folder.entries if isinstance(file, dropbox.files.FileMetadata) if not file.is_downloadable if os.path.splitext(file.path_lower)[1] == ".paper"]
size = len(paper_docs)
if size == 0:
print("You don't have any Paper docs in this path. ")
quit()
else:
if path:
folder_name = path
else:
folder_name = "the root folder"
print("You have {0} Paper docs in {1}.".format(size,folder_name))
while True:
export = input("Do you want to export these to your computer? (Y/N) ")
if export.upper() == 'Y':
break;
elif export.upper() == 'N':
quit()
print("These Paper docs will be exported to the folder where you are running this script from.")
while True:
format = input("Which format do you want to export as? (1) HTML or (2) Markdown? (3) to quit: ")
if format == '1':
export_as = ("html",".html")
break
elif format == '2':
export_as = ("markdown",".md")
break
elif format == '3':
quit()
else:
print("Invalid format")
for paper_doc in paper_docs:
folder, filename = os.path.split(paper_doc)
basename, ext = os.path.splitext(filename)
print("Exporting {0} as {1}".format(paper_doc, basename + export_as[1]))
with open(basename + export_as[1], "wb") as f:
metadata, res = dbx.files_export(path=paper_doc,export_format=export_as[0])
f.write(res.content)
print("Export completed!")
| 32.833333
| 189
| 0.723632
|
7dcf66207cbc23910b7a4c44d7d78ced7f232cef
| 1,217
|
py
|
Python
|
dataloader/mnist.py
|
lucamocerino/Binary-Neural-Networks-PyTorch-1.0
|
aa62f5449e4f64bc821aea4d9921572e8dca8037
|
[
"MIT"
] | 22
|
2020-09-15T12:59:49.000Z
|
2022-02-12T15:56:32.000Z
|
dataloader/mnist.py
|
lucamocerino/Binary-Neural-Networks-PyTorch-1.0
|
aa62f5449e4f64bc821aea4d9921572e8dca8037
|
[
"MIT"
] | 3
|
2021-08-07T15:50:13.000Z
|
2022-01-27T09:46:19.000Z
|
dataloader/mnist.py
|
lucamocerino/Binary-Neural-Networks-PyTorch-1.0
|
aa62f5449e4f64bc821aea4d9921572e8dca8037
|
[
"MIT"
] | 2
|
2021-07-19T06:34:55.000Z
|
2022-03-22T18:06:03.000Z
|
from torch.utils.data import DataLoader
from os.path import join
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Resize, Normalize, ToTensor
def load_train_data(batch_size=128, sampler=None):
cuda = True
loader_kwargs = {'num_workers': 0, 'pin_memory': True} if cuda else {}
train_loader = DataLoader(
MNIST(join('datasets', 'mnist'), train=True, download=True,
transform=Compose([
Resize((28, 28)),
ToTensor(),
Normalize((0.1307,),(0.308,)),
])),
batch_size=batch_size, shuffle=True, **loader_kwargs)
return train_loader
def load_test_data(batch_size=1000, sampler=None):
cuda = True
loader_kwargs = {'num_workers': 0, 'pin_memory': True} if cuda else {}
test_loader = DataLoader(
MNIST(join('datasets', 'mnist'), train=False, download=True,
transform=Compose([
Resize((28, 28)),
ToTensor(),
Normalize((0.1307,),(0.308,)),
])),
batch_size= batch_size, shuffle=False,sampler=sampler, **loader_kwargs)
return test_loader
| 31.205128
| 79
| 0.590797
|
4f5db26a240b58b54d7c586ddff346eac69f1c1b
| 1,192
|
py
|
Python
|
src/front_end/streamlit_helpers.py
|
Calychas/relocation-recommendation
|
b6a9c5d2df49e1656af76a8402c703be52546372
|
[
"MIT"
] | null | null | null |
src/front_end/streamlit_helpers.py
|
Calychas/relocation-recommendation
|
b6a9c5d2df49e1656af76a8402c703be52546372
|
[
"MIT"
] | 2
|
2021-11-05T11:52:10.000Z
|
2022-01-27T23:16:53.000Z
|
src/front_end/streamlit_helpers.py
|
Calychas/relocation-recommendation
|
b6a9c5d2df49e1656af76a8402c703be52546372
|
[
"MIT"
] | null | null | null |
import pydeck as pdk
selectbox_options = {
"price": "Mean price of offers",
"price_per_m": "Mean price per m2 of offers",
"area": "Mean area of offers",
"count": "Number of offers"
}
cities_selectbox_options = {
"wroclaw": "Wrocław",
"gdansk": "Gdańsk",
"warszawa": "Warszawa",
"krakow": "Kraków",
"poznan": "Poznań"
}
radio_buttons_options = [
(("x", "y", "z"), "Road Network + GTFS data + Functional data"),
(("y", "z"), "Functional data + GTFS data"),
(("x", "z"), "GTFS data + Road Network"),
(("x", "y"), "Functional data + Road Network"),
(("x",), "Only Road Network"),
(("y",), "Only GTFS data"),
(("z",), "Only Functional data")
]
VIEW_STATE_ZOOM = 10
LINE_WIDTH = 0.3
LINE_COLOR = [0, 0, 0]
OPACITY = 0.5
color_mapping = {
"aeroway": "#F280BF",
"amenity": "#FF8000",
"building": "#908b86",
"healthcare": "#CC0000",
"historic": "#A6A64D",
"landuse": "#808000",
"leisure": "#80E600",
"military": "#4B5320",
"natural": "#33CC33",
"office": "#b3aca7",
"shop": "#804DFF",
"sport": "#D94D4D",
"tourism": "#E6BF80",
"water": "#0080FF",
"waterway": "#80BFFF"
}
| 23.372549
| 68
| 0.547819
|
9632a1993e43759f0e5dd5fce8d6da2bff64347e
| 13,930
|
py
|
Python
|
profitability.py
|
slimcoin-project/slm-tools
|
4649327cbf80c747f1e193852f23aeeae64a72f8
|
[
"MIT"
] | null | null | null |
profitability.py
|
slimcoin-project/slm-tools
|
4649327cbf80c747f1e193852f23aeeae64a72f8
|
[
"MIT"
] | null | null | null |
profitability.py
|
slimcoin-project/slm-tools
|
4649327cbf80c747f1e193852f23aeeae64a72f8
|
[
"MIT"
] | null | null | null |
# this script uses parts of the original Slimcoin code (/src/simulations/pob_difficulty.py)
# (c) The Slimcoin Developers 2014-19
# MIT License
from random import random as rand_num
import math, argparse, datetime
#constants for block types
#do we need to compute also POS blocks? (their difficulty could be ignored probably)
#TODO: Reward calculation
#TODO: Effect of participation on short-term profitability - should only be relevant if changes sharply.
POW = 0
POB = 1
POW_PROBABILITY = 0.8
POB_TARGET = 3
BURN_DECAY_RATE = 1.00000198 # original Slimcoin value.
POWPOB_BLOCKS_PER_DAY = 96
GENESIS_BLOCK_DATE = datetime.date(2014, 5, 28) # day of SLM inception
# default range for burn values
DEFAULT_POB_RANGE = 10000
class CBlock:
total_coins_burned = 0
ebc = 0 # nEffectiveBurnCoins
blocks = []
def __init__(self, block_type, coins_burned=0):
# be careful that blocks MUST be generated ALWAYS in strict sequential order for this to work.
# self.coins_burned = 10000 * rand_num() # replaced, as we want to know what happens with different values.
self.blockheight = len(CBlock.blocks)
self.coins_burned = coins_burned
self.type = block_type
self.diff = calc_PoB_difficulty(self.coins_burned)
CBlock.total_coins_burned += self.coins_burned
self.total_coins_until_now = CBlock.total_coins_burned
# Effective Burn Coins: burnt coins only decay on PoW blocks.
# original: pindexPrev->nEffectiveBurnCoins / BURN_DECAY_RATE) + nBurnedCoins
if self.type == POW:
CBlock.ebc /= BURN_DECAY_RATE
CBlock.ebc += self.coins_burned
self.ebc = CBlock.ebc
def print_self(self):
print("Block: %d" % self.blockheight)
if self.type == POW:
print("Type: POW")
elif self.type == POB:
print("Type: POB")
else:
print("Type: Unknown")
print("Coins Burned: %f" % self.coins_burned)
print("Difficulty: %d" % self.diff)
print("nEffectiveBurnCoins: %f" % self.ebc)
print("Total Coins Burned: %f" % self.total_coins_until_now)
print("-" * 30)
## tool functions for CBlock class
def PoW_blocks_back(): # modified to take "blocks" value (should not be global).
# calculates how many blocks back from now were PoW.
blocks = CBlock.blocks
nPoW = 0
i = -1
if len(blocks) == 0:
return nPoW
while True:
if i == -1 * len(blocks) - 1 or blocks[i].type == POB:
break
nPoW += 1
i -= 1
return nPoW
def logistic_curve(x):
return (2 / (1 + math.e ** (-0.2 * x))) - 1
def calc_PoB_difficulty(cur_blk_coins_burned):
#genesis block
blocks = CBlock.blocks
total_coins_burned = CBlock.total_coins_burned
if len(blocks) == 0:
return 10000
nPoW = PoW_blocks_back()
offset = POB_TARGET - nPoW
#offset > 0, increas diff
#offset < 0, decrease diff
#offset == 0, do nothing
adjust = logistic_curve(offset)
# TODO: Find out if 0 is the really correct value when still no coins were burnt (near genesis block).
if total_coins_burned > 0:
burn_adjust = cur_blk_coins_burned / total_coins_burned
else:
burn_adjust = 0
last_diff = blocks[-1].diff
new_diff = last_diff * (1 + adjust - burn_adjust)
return new_diff
## small new functions
def reset_blocks():
CBlock.blocks = []
CBlock.ebc = 0
CBlock.total_coins_burned = 0
def get_days_since_inception():
today = datetime.date.today()
timedelta = today - GENESIS_BLOCK_DATE
return timedelta.days
def randomize_burns(avg_coins_burned, pob_range=DEFAULT_POB_RANGE):
# this checks the last 1000 burn transactions and only allows a high
# burn tx when the average of the last blocks was lower than avg_coins_burned
blockheight = len(CBlock.blocks)
begin_sequence = max(0, blockheight - 1000) # cannot become negative
last_1000_burns = sum([ block.coins_burned for block in CBlock.blocks[begin_sequence:blockheight] ])
rawburn = max(0, avg_coins_burned + (rand_num()-0.5) * pob_range)
if last_1000_burns / (blockheight - begin_sequence) > avg_coins_burned:
if rawburn > avg_coins_burned:
burn_value = 0
else:
burn_value = rawburn
else:
burn_value = rawburn
if burn_value > 0: # for debugging
print(blockheight, burn_value)
return burn_value
def print_intro(p):
print("Calculating probabilities for the following scenario:")
print("- Burnt amount:", p.burnt_amount)
print("- nEffectiveBurnCoins:", p.neffectiveburncoins)
if p.days_before and not p.blocks_before:
print("- Days before burn transaction:", p.days_before)
if p.days_after and not p.blocks_after:
print("- Days generated after burn transaction:", p.days_after)
if p.blocks_before:
print("- Blocks generated before burn transaction:", p.blocks_before)
if p.blocks_after:
print("- Blocks generated after burn transaction:", p.blocks_after)
if p.average_burn_rate:
print("- Average burn rate: %f coins per PoB/PoW block." % p. average_burn_rate)
if p.burn_event:
print("- Another burn event of %f coins" % p.burn_event)
if p.burn_event_blocks:
print(" at %f blocks in the future" % p.burn_event_blocks)
## main loops
def gen_fake_blocks(nBlocks, avg_coins_burned=0, pob_range=None, randomize=False, verbose=False, reset=False):
# generate psuedo blocks randomly to fill the blocks list
# new variable avg_coins_burned, which is the value passed to each block.
if reset:
reset_blocks()
blocks = CBlock.blocks
# genesis block
# only added if this is the first function call
if len(blocks) == 0:
blocks.append(CBlock(POW))
for n in range(nBlocks):
multi = blocks[-1].diff / blocks[0].diff
if randomize:
if not pob_range:
pob_range = DEFAULT_POB_RANGE
coins_burned = randomize_burns(avg_coins_burned, pob_range)
else:
coins_burned = avg_coins_burned
if rand_num() < multi * POW_PROBABILITY or blocks[-1].type == POB: #make a PoW block
blocks.append(CBlock(POW, coins_burned))
else: #make a PoB block
blocks.append(CBlock(POB, coins_burned))
if verbose:
for block in blocks:
block.print_self()
def create_block_sequence(blocksbefore=0, ebc=0, blocksafter=0, ownburn=0, otherburn=0, otherburnblock=None, avg_coins_burned=None, randomize=True, reset=True, verbose=False, pob_range=None):
if reset:
reset_blocks()
# first, generate all blocks until "now".
# calculate average burn from ebc (nEffectiveBurnCoins) value.
# Note: Real proportion of PoB/PoW blocks seems to be around 0.77, not 0.8.
if blocksbefore > 0:
est_pow_blocks_before = blocksbefore * 0.77 # * POW_PROBABILITY # estimated value, real value comes after avg burn.
avg_decay = BURN_DECAY_RATE ** (est_pow_blocks_before / 2)
avg_burn_before = (ebc / blocksbefore) * avg_decay
gen_fake_blocks(blocksbefore, avg_coins_burned=avg_burn_before)
# uncomment following lines for debugging:
# print(avg_burn_before)
# print(est_pow_blocks_before)
# powbl = len([b for b in CBlock.blocks if b.type == POW])
# allbl = len(CBlock.blocks)
# print("Real proportion", powbl / allbl)
# print(allbl, blocksbefore)
# if avg_coins_burned is not given, then we use the value derived from nEffectiveBurnCoins we used for older blocks.
if not avg_coins_burned:
if avg_burn_before:
avg_coins_burned = avg_burn_before
else:
avg_coins_burned = 0
gen_fake_blocks(1, avg_coins_burned=ownburn + avg_coins_burned, randomize=randomize, pob_range=pob_range)
# blocks after: depend on otherburn/otherburnblock values
if otherburn:
if otherburnblock:
gen_fake_blocks(otherburnblock - 1, avg_coins_burned=avg_coins_burned)
blocksafter -= otherburnblock
gen_fake_blocks(1, avg_coins_burned=avg_coins_burned + otherburn)
gen_fake_blocks(blocksafter, avg_coins_burned=avg_coins_burned, randomize=randomize, pob_range=pob_range)
if verbose:
for block in CBlock.blocks:
block.print_self()
def calc_probabilities(ownburn_blockheight, ownburn, participation, verbose=False, printresult=True):
# loop from "now" on, past blocks are not needed in this loop.
expected_probabilities = []
pow_blocks_after_burn = 0
for block in CBlock.blocks[ownburn_blockheight:]:
if block.type == POB: # you don't get PoB rewards for PoW blocks.
total_ebc = block.ebc * participation
own_ebc = ownburn / (BURN_DECAY_RATE ** pow_blocks_after_burn)
probability = own_ebc / total_ebc
expected_probabilities.append(probability)
elif block.type == POW:
pow_blocks_after_burn += 1 #only pow blocks lead to decay.
if verbose:
block.print_self()
if block.type == POB:
print("Own Effective Burnt Coins: %f" % own_ebc)
print("Real Effective Burnt Coins with participation %f: %f" % (participation, total_ebc))
print("Block find probability: %f" % probability)
if printresult:
print("=" * 30)
initial_ebc = CBlock.blocks[ownburn_blockheight-1].ebc
probsum = sum(expected_probabilities)
pobblocks = len(expected_probabilities) # should give the correct number after the loop
print("Result for %f burnt coins at %f nEffectiveBurnCoins and %f mint participation" % (ownburn, initial_ebc, participation))
print("Expected found blocks (from %i PoB blocks)\n(sum of all probabilities): %f" % (pobblocks, probsum))
return expected_probabilities
def get_probability(blocksbefore=0, ebc=0, blocksafter=0, daysbefore=None, daysafter=None, ownburn=0, otherburn=0, otherburnblock=None, avg_coins_burned=None, randomize=True, reset=True, verbose=False, pob_range=None, participation=0.2, printresult=True):
if not blocksbefore:
blocksbefore = POWPOB_BLOCKS_PER_DAY * daysbefore
if not blocksafter:
blocksafter = POWPOB_BLOCKS_PER_DAY * daysafter
create_block_sequence(blocksbefore, ebc, blocksafter, ownburn, otherburn, otherburnblock, avg_coins_burned, randomize, reset=True, verbose=False, pob_range=pob_range)
return calc_probabilities(ownburn_blockheight=blocksbefore + 1, ownburn=ownburn, participation=participation, verbose=verbose, printresult=printresult)
def cli():
helptext_daysbefore = 'Generate blocks for X days before the burn transaction. Default is since the time of the coin inception (%s)' % GENESIS_BLOCK_DATE.strftime("%Y-%m-%d")
days_since_inception = get_days_since_inception()
parser = argparse.ArgumentParser(description="Profitability calculator.")
parser.add_argument('burnt_amount', help='Amount of the burn transaction.', type=float)
parser.add_argument('neffectiveburncoins', help='Effective burn coins at the moment of the burn.', type=float)
parser.add_argument('-da', '--days-after', help='Generate blocks for X days after burn transaction. Default is one year (365 days).', type=int, default=365)
parser.add_argument('-db', '--days-before', help=helptext_daysbefore, type=int, default=days_since_inception)
# advanced arguments
parser.add_argument('-bb', '--blocks-before', help='Generate X PoW/PoB blocks before the burn transaction. (Note: PoS blocks are ignored.)', type=int)
parser.add_argument('-ba', '--blocks-after', help='Generate X PoW/PoB blocks after the burn transaction.', type=int)
parser.add_argument('-e', '--burn-event', help='Add one other significant burn transaction with amount X in the future. This allows to calculate the impact of a large burn transaction.', type=float)
parser.add_argument('-eb', '--burn-event-blocks', help='Blocks in the future the burn event will occur. By default, it is the next block after the own burn transaction.', type=int)
parser.add_argument('-p', '--participation', help='Burning participation. Part of the coins which are effectively participating in burning (values from 0 to 1, default: 0.25).', type=float, default=0.25)
parser.add_argument('-a', '--average-burn-rate', help='Average burning rate per PoW/PoB block. As a default, the average of the blocks preceding the burn transaction (derived from EffectiveBurnCoins) will be used.', type=float)
parser.add_argument('-r', '--randomize', help='Add some randomness to the average burn transactions.', action='store_true')
parser.add_argument('-g', '--range', help='Range for the randomness, in coins.', type=float)
parser.add_argument('-v', '--verbose', help='Verbose mode. Will show all blocks with data.', action='store_true')
parser.add_argument('-s', '--silent', help='Silent mode. Will only return probabilities (to use in scripts).', action='store_true')
return parser.parse_args()
if __name__ == "__main__":
p = cli()
if p.silent == False:
printresult = True
print_intro(p)
else:
printresult = False
get_probability(blocksbefore=p.blocks_before, ebc=p.neffectiveburncoins, blocksafter=p.blocks_after, daysbefore=p.days_before, daysafter=p.days_after, ownburn=p.burnt_amount, otherburn=p.burn_event, otherburnblock=p.burn_event_blocks, avg_coins_burned=p.average_burn_rate, randomize=p.randomize, verbose=p.verbose, pob_range=p.range, participation=p.participation, printresult=printresult)
| 38.694444
| 393
| 0.684853
|
7b18c6191087427e04f05d2371ff5c2872c67fce
| 310
|
py
|
Python
|
spider/settings.py
|
JamesZBL/zhilian_spider
|
613879e59b0e42f4a95d41cc91c369330bf8f9ac
|
[
"Apache-2.0"
] | 61
|
2018-02-13T09:12:07.000Z
|
2022-03-01T03:38:37.000Z
|
spider/settings.py
|
JamesZBL/zhilian_spider
|
613879e59b0e42f4a95d41cc91c369330bf8f9ac
|
[
"Apache-2.0"
] | 1
|
2018-05-21T08:54:35.000Z
|
2018-05-21T08:54:35.000Z
|
spider/settings.py
|
JamesZBL/zhilian_spider
|
613879e59b0e42f4a95d41cc91c369330bf8f9ac
|
[
"Apache-2.0"
] | 8
|
2018-05-10T05:59:39.000Z
|
2022-01-30T07:12:09.000Z
|
# -*- coding:utf-8 -*-
"""
@author:James
Created on:18-2-12 19:43
"""
# 搜索结果页
URL_RESULT = 'http://sou.zhaopin.com/jobs/searchresult.ashx'
# 搜索请求参数名
# 1.职位关键字
KEY_KEYWORD = 'kw'
# 2.地域范围
KEY_AREA = 'jl'
# 3.页码
KYE_PAGENUM = 'p'
# 关键字
VALUE_KEYWORD = 'Java'
# 范围
VALUE_AREA = '全国'
# 页数限制
PAGE_LIMIT = 1
| 11.071429
| 60
| 0.632258
|
07066fa386a9c539d21e191421d029d7409e3270
| 1,037
|
py
|
Python
|
orm_sqlfan/libreria/migrations/0003_auto_20191125_0515.py
|
rulotr/djangorm_sqlfan
|
4bcadd1459e5a39584bb5ad8bafaaf3993324f6a
|
[
"MIT"
] | 2
|
2021-09-29T01:08:56.000Z
|
2022-02-14T03:34:37.000Z
|
orm_sqlfan/libreria/migrations/0003_auto_20191125_0515.py
|
rulotr/djangorm_sqlfan
|
4bcadd1459e5a39584bb5ad8bafaaf3993324f6a
|
[
"MIT"
] | 4
|
2020-02-12T02:52:19.000Z
|
2021-04-08T20:46:05.000Z
|
orm_sqlfan/libreria/migrations/0003_auto_20191125_0515.py
|
rulotr/djangorm_sqlfan
|
4bcadd1459e5a39584bb5ad8bafaaf3993324f6a
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.7 on 2019-11-25 05:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('libreria', '0002_libro'),
]
operations = [
migrations.AlterField(
model_name='libro',
name='desc_corta',
field=models.CharField(default='Sin reseña', max_length=2000),
),
migrations.AlterField(
model_name='libro',
name='fecha_publicacion',
field=models.DateField(null=True),
),
migrations.AlterField(
model_name='libro',
name='imagen',
field=models.URLField(max_length=85, null=True),
),
migrations.AlterField(
model_name='libro',
name='paginas',
field=models.PositiveIntegerField(),
),
migrations.AlterField(
model_name='libro',
name='titulo',
field=models.CharField(blank=True, max_length=70),
),
]
| 26.589744
| 74
| 0.550627
|
45256f1b6aa8ee4b8a135f61bbf647d92e22babc
| 4,822
|
py
|
Python
|
UserManagement/forms.py
|
SkillSmart/ConferenceManagementSystem
|
43af08260f321d1d506755da5c1b6ce1cf95fc42
|
[
"MIT"
] | null | null | null |
UserManagement/forms.py
|
SkillSmart/ConferenceManagementSystem
|
43af08260f321d1d506755da5c1b6ce1cf95fc42
|
[
"MIT"
] | null | null | null |
UserManagement/forms.py
|
SkillSmart/ConferenceManagementSystem
|
43af08260f321d1d506755da5c1b6ce1cf95fc42
|
[
"MIT"
] | null | null | null |
from django.forms import ModelForm, Form
from django.contrib.auth.models import User
from django.forms.models import formset_factory, modelform_factory, inlineformset_factory
# Crispy Imports
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div, Field
# Profile Models
from Authentication.forms import UserModelForm
from .models import ExpertProfile, TeamProfile, StudentProfile, Program
# Expert Attribute Models
from .models import MediationExperience, NegotiationExperience
# Student Attribute Models
from .models import Internship, Course, Award
# # Setting up the forms
class UserProfileForm(ModelForm):
class Meta:
model = User
fields = ['first_name', 'last_name', 'email']
class ExpertModelForm(ModelForm):
class Meta:
model = ExpertProfile
exclude = ['user']
class TeamModelForm(ModelForm):
class Meta:
model = TeamProfile
exclude = ['member_a', 'member_b', 'coach_a', 'coach_b']
fields = []
class StudentModelForm(ModelForm):
class Meta:
model = StudentProfile
exclude = ['attendent', 'mediation_courses', 'negotiation_courses']
# Attribute Forms Student Attendents
class InternshipForm(ModelForm):
class Meta:
model = Internship
exclude = []
class CourseForm(ModelForm):
class Meta:
model = Course
exclude=[]
class AwardForm(ModelForm):
class Meta:
model = Award
exclude = []
#------------ EXPERT PROFILE FORMS ------------------------
class MediationExperienceForm(ModelForm):
class Meta:
model = MediationExperience
fields = ['profession', 'duration', 'cases', 'description', 'priorClients', 'placesWorked']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.form_tag = False
class NegotiationExperienceForm(ModelForm):
class Meta:
model = NegotiationExperience
fields = ['profession', 'duration', 'cases', 'description', 'priorClients', 'placesWorked']
def __init__(self, *args, ** kwargs):
super().__init__(*args, **kwargs)
self.form_tag = False
self.helper = FormHelper()
self.helper.layout = Layout()
#EXPERT _ FORMSETS AND INLINE FORMSETS
ProgramFormSet = inlineformset_factory(ExpertProfile, Program, exclude=[], extra=2)
class ProgramFormSet_helper(FormHelper):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.form_tag = False
self.layout = Layout(
Div(
Div(
'subject',
'startDate',
'endDate',
'institution',
Field('priorClients', rows=3),
Field('placesWorked', rows=3),
css_class='col-sm-4'
),
Div(
'title',
'description',
css_class='col-sm-8',
),
css_class='row experienceItem'
)
)
self.render_required_fields = False
NegotiationExperienceFormset = inlineformset_factory(ExpertProfile, NegotiationExperience, extra=2, exclude=[])
class NegotiationExperienceFormset_helper(FormHelper):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.form_tag = False
self.layout = Layout(
Div(
Div(
'duration',
'cases',
Field('priorClients', rows=3),
Field('placesWorked', rows=3),
css_class='col-sm-4'
),
Div(
'profession',
'description',
css_class='col-sm-8',
),
css_class='row experienceItem'
)
)
self.render_required_fields = False
MediationExperienceFormset = inlineformset_factory(ExpertProfile, MediationExperience, extra=2, exclude=[])
class MediationExperienceFormset_helper(FormHelper):
def __init__(self, *args, **kwargs):
super(MediationExperienceFormset_helper, self).__init__(*args, **kwargs)
self.form_tag = False
self.layout = Layout(
Div(
Div(
'duration',
'cases',
Field('priorClients', rows=3),
Field('placesWorked', rows=3),
css_class='col-sm-4'
),
Div(
'profession',
'description',
css_class='col-sm-8',
),
css_class='row experienceItem'
)
)
| 31.51634
| 111
| 0.568851
|
9f23c368a5e61e56857f038c087404146c627c26
| 25,784
|
py
|
Python
|
src/interface/Python/paramonte/_TabularFileContents.py
|
cdslaborg/paramonte
|
08b5ee74e1dc9045fca8fd7d94e3bbc979a3c425
|
[
"MIT"
] | 158
|
2020-01-13T06:40:27.000Z
|
2022-03-28T03:12:03.000Z
|
src/interface/Python/paramonte/_TabularFileContents.py
|
cdslaborg/paramonte
|
08b5ee74e1dc9045fca8fd7d94e3bbc979a3c425
|
[
"MIT"
] | 12
|
2020-10-31T22:46:10.000Z
|
2022-03-17T19:57:06.000Z
|
src/interface/Python/paramonte/_TabularFileContents.py
|
cdslaborg/paramonte
|
08b5ee74e1dc9045fca8fd7d94e3bbc979a3c425
|
[
"MIT"
] | 18
|
2020-07-04T23:45:21.000Z
|
2021-09-14T06:52:07.000Z
|
####################################################################################################################################
####################################################################################################################################
####
#### MIT License
####
#### ParaMonte: plain powerful parallel Monte Carlo library.
####
#### Copyright (C) 2012-present, The Computational Data Science Lab
####
#### This file is part of the ParaMonte library.
####
#### Permission is hereby granted, free of charge, to any person obtaining a
#### copy of this software and associated documentation files (the "Software"),
#### to deal in the Software without restriction, including without limitation
#### the rights to use, copy, modify, merge, publish, distribute, sublicense,
#### and/or sell copies of the Software, and to permit persons to whom the
#### Software is furnished to do so, subject to the following conditions:
####
#### The above copyright notice and this permission notice shall be
#### included in all copies or substantial portions of the Software.
####
#### THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#### EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
#### MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#### IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
#### DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
#### OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
#### OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
####
#### ACKNOWLEDGMENT
####
#### ParaMonte is an honor-ware and its currency is acknowledgment and citations.
#### As per the ParaMonte library license agreement terms, if you use any parts of
#### this library for any purposes, kindly acknowledge the use of ParaMonte in your
#### work (education/research/industry/development/...) by citing the ParaMonte
#### library as described on this page:
####
#### https://github.com/cdslaborg/paramonte/blob/main/ACKNOWLEDGMENT.md
####
####################################################################################################################################
####################################################################################################################################
import numpy as np
import _paramonte as pm
import _CorCovMat as ccm
from _AutoCorr import AutoCorr
from _dfutils import getMaxLogFunc
from _OutputFileContents import OutputFileContents
from paramonte.vis.LineScatterPlot import LineScatterPlot
from paramonte.vis.DensityPlot import DensityPlot
from paramonte.vis.GridPlot import GridPlot
Struct = pm.Struct
newline = pm.newline
####################################################################################################################################
#### TabularFileContents class
####################################################################################################################################
class TabularFileContents(OutputFileContents):
"""
This is the **TabularFileContents** class for generating instances
of the ParaMonte tabular output contents. This class is NOT meant to
be directly accessed by the ParaMonte library users. It is internally
used by the ParaMonte library to parse the tabular contents of the
output files generated by the ParaMonte sampler routines. For example,
the ParaDRAM sampler class makes calls to this class via its
``readSample()`` or ``readChain()`` or ``readMarkovChain()``
or ``readProgress()`` methods to return a list of objects
of class ``TabularFileContents``.
**Parameters**
file
The full path to the file containing the sample/chain.
fileType
A string containing the type of the file to be parsed.
Current options include but are not limited to:
``sample``, ``chain``, ``markovChain``, ``progress``
delimiter
The delimiter used in the sample/chain file, which
must be provided by the user.
methodName
A string representing the name of the ParaMonte sampler used
to call the constructor of the ``TabularFileContents`` class.
parseContents
If set to ``True``, the contents of the file will be parsed and
stored in a component of the object named ``contents``.
The default value is ``True``.
reportEnabled
A logical input parameter indicating whether the ParaMonte
automatic guidelines to the standard output should be provided
or not. The default value is ``True``.
**Attributes**
file
The full path to the file containing the sample/chain.
delimiter
The delimiter used in the sample/chain file, which
must be provided by the user.
ndim
The number of dimensions of the domain of the objective
function from which the sample has been drawn.
count
The number of points (states) in the sample/chain file.
This is essentially, the number of rows in the file
minus one (representing the header line).
plot
A structure containing the graphics tools for the
visualization of the contents of the file.
df
If the input file contents is structured in a format that
could be read as a dataframe, then the contents of the file
will be stored in the form of a pandas-library DataFrame
in this property (hence called ``df``).
contents
If the input file contents is structured in the form of columns,
then a property named ``contents`` is also added to the object.
Each component of contents will named via the header of the file
and will contain data from the corresponding column of the file.
**Returns**
tabularFileContents
An object of class ``TabularFileContents``.
----------------------------------------------------------------------
"""
def __init__( self
, file
, fileType
, delimiter
, methodName
, parseContents = True
, reportEnabled = True
):
super().__init__(file, methodName, reportEnabled)
markovChainRequested = fileType=="markovChain"
self._isProgressFile = "progress"==fileType
self._sampleLogFuncColName = "" if self._isProgressFile else "SampleLogFunc"
#if "sample"==fileType:
# fileSuffix = "sample"
#elif fileType=="chain" or markovChainRequested:
# fileSuffix = "chain"
#elif self._isProgressFile:
# fileSuffix = "progress"
#else:
# pm.abort( msg = "Internal error occurred. The input fileType is not recognized.\n"
# + "Please report this error at:\n\n"
# + " " + pm.website.github.issues.url
# , methodName = self._methodName
# , marginTop = 1
# , marginBot = 1
# )
if fileType!="sample" and fileType!="chain" and not (self._isProgressFile or markovChainRequested):
pm.abort( msg = "Internal error occurred. The input fileType is not recognized.\n"
+ "Please report this error at:\n\n"
+ " " + pm.website.github.issues.url
, methodName = self._methodName
, marginTop = 1
, marginBot = 1
)
############################################################################################################################
#### data
############################################################################################################################
self.delimiter = delimiter
import pandas as pd
self.df = pd.read_csv ( self.file
, delimiter = self.delimiter
, header = 0
)
if self._isProgressFile:
self._offset = -1
else:
self._offset = list(self.df.columns).index(self._sampleLogFuncColName) + 1 # index of the first variable
self.ndim = len(self.df.columns) - self._offset
self.count = len(self.df.iloc[:,1])
self.ncol = len(self.df.iloc[1,:])
if markovChainRequested:
CumSumWeight = np.cumsum(self.df.iloc[:,self._offset-2].values, dtype=np.int32)
if CumSumWeight[-1] != self.count: # it is indeed a compact chain
#dfMarkov = pd.DataFrame( columns=list(self.df.columns), index=list(range(CumSumWeight[-1])) )
dfMarkov = np.zeros( (CumSumWeight[-1] , self.ndim+self._offset) )
istart = 0
for i in range(self.count):
iend = CumSumWeight[i]
#dfMarkov.iloc[istart:iend,:] = self.df.iloc[i].values
dfMarkov[istart:iend,:] = self.df.iloc[i].values
istart = iend
columns = self.df.columns
self.df = pd.DataFrame(dfMarkov)
self.count = len(self.df.iloc[:,1])
self.df.columns = columns
self._progress.note()
if not self._isProgressFile:
self._progress.note( msg = "ndim = " + str(self.ndim) + ", count = " + str(self.count), end = newline, pre = True )
# set dynamic properties
if parseContents:
self._progress.note( msg = "parsing file contents... ", end = newline, pre = True )
self.contents = Struct()
for icol, colName in enumerate(self.df.columns):
setattr ( self.contents, colName, self.df[colName] )
############################################################################################################################
#### statistics
############################################################################################################################
if not self._isProgressFile:
self.stats = Struct()
#### add chain cormat
self._progress.note( msg = "computing the sample correlation matrix... ", end = newline, pre = True )
self.stats.cormat = ccm.CorMat ( dataFrame = self.df
, columns = range(self._offset,self._offset+self.ndim)
, methodName = self._methodName
, reportEnabled = self._reportEnabled
, method = "pearson"
)
self.stats.cormat()
#### add chain covmat
self._progress.note( msg = "computing the sample covariance matrix... ", end = newline, pre = True )
self.stats.covmat = ccm.CovMat ( dataFrame = self.df
, columns = range(self._offset,self._offset+self.ndim)
, methodName = self._methodName
, reportEnabled = self._reportEnabled
)
self.stats.covmat()
#### add chain autocorrelation
self._progress.note( msg = "computing the sample autocorrelations... ", end = newline, pre = True )
self.stats.autocorr = AutoCorr ( dataFrame = self.df
, columns = range(self._offset-1,self._offset+self.ndim)
, methodName = self._methodName
, reportEnabled = self._reportEnabled
)
self.stats.autocorr()
#### add chain maxLogFunc
self.stats.maxLogFunc = getMaxLogFunc(dataFrame = self.df)
############################################################################################################################
#### graphics
############################################################################################################################
self._plotTypeList = [ "line"
, "scatter"
, "lineScatter"
]
if not self._isProgressFile: self._plotTypeList += [ "line3"
, "scatter3"
, "lineScatter3"
, "jointplot"
, "histplot"
, "kdeplot1"
, "kdeplot2"
, "contour3"
, "contourf"
, "contour"
, "grid"
]
self._progress.note( msg = "adding the graphics tools... ", end = newline, pre = True )
self.plot = Struct()
self._resetPlot(resetType="hard")
self.plot.reset = self._resetPlot
#self.plot.helpme = self.helpme
################################################################################################################################
#### _resetPlot
################################################################################################################################
def _resetPlot ( self
, resetType = "soft"
, plotNames = "all"
):
"""
Reset the properties of the plot to the original default settings.
Use this method when you change many attributes of the plot and
you want to clean up and go back to the default settings.
**Parameters**
resetType (optional)
An optional string with possible value of ``"hard"``.
If provided, the plot object will be regenerated from scratch.
This includes reading the original data frame again and resetting
everything. If not provided, then only the plot settings will be
reset without reseting the dataFrame.
plotNames (optional)
An optional string value or list of string values representing
the names of plots to reset. If no value is provided,
then all plots will be reset.
**Returns**
None
**Example**
.. code-block:: python
reset("hard") # regenerate all plots from scratch
reset("hard","line3") # regenerate line3 plot from scratch
reset("hard",["line","line3"]) # regenerate line and line3 plots
"""
requestedPlotTypeList = []
if isinstance(plotNames, str):
plotTypeLower = plotNames.lower()
if plotTypeLower=="all":
requestedPlotTypeList = self._plotTypeList
elif plotNames in self._plotTypeList:
requestedPlotTypeList = [plotNames]
else:
self._reportWrongPlotName(plotNames)
elif isinstance(plotNames, list):
for plotName in plotNames:
if plotName not in self._plotTypeList: self._reportWrongPlotName(plotName)
else:
self._reportWrongPlotName("a none-string none-list object.")
resetTypeIsHard = None
if isinstance(resetType, str):
resetTypeIsHard = resetType.lower()=="hard"
else:
resetTypeIsHard = None
pm.abort( msg = "The input argument resetType must be a string representing" + newline
+ "the type of the reset to be performed on the plots." + newline
+ "A list of possible plots includes: \"hard\", \"soft\"" + newline
+ "Here is the help for the ``reset()`` method: " + newline
+ newline
+ self._resetPlot.__doc__
, marginTop = 1
, marginBot = 1
, methodName = self._methodName
)
############################################################################################################################
#### reset plots
############################################################################################################################
for requestedPlotType in requestedPlotTypeList:
plotObject = None
requestedPlotTypeLower = requestedPlotType.lower()
is3d = "3" in requestedPlotTypeLower
isLine = "line" in requestedPlotTypeLower
isScatter = "scatter" in requestedPlotTypeLower
isJointplot = "jointplot" in requestedPlotTypeLower
isHistplot = "histplot" in requestedPlotTypeLower
isKdeplot1 = "kdeplot1" in requestedPlotTypeLower
isKdeplot2 = "kdeplot2" in requestedPlotTypeLower
isContourf = "contourf" in requestedPlotTypeLower
isContour3 = "contour3" in requestedPlotTypeLower
isContour = "contour" in requestedPlotTypeLower and not (isContourf or isContour3)
isGridPlot = "grid" in requestedPlotTypeLower
isLineScatterPlot = isLine or isScatter
isDensityPlot = isJointplot or isHistplot or isKdeplot1 or isKdeplot2 or isContourf or isContour3 or isContour
if not resetTypeIsHard:
plotComponent = getattr(self, "plot")
plotObject = getattr(plotComponent, requestedPlotType)
plotObject._reset()
########################################################################################################################
#### reset line / scatter
########################################################################################################################
if isLineScatterPlot:
if resetTypeIsHard:
plotObject = LineScatterPlot( plotType = requestedPlotType
, dataFrame = self.df
, methodName = self._methodName
, reportEnabled = self._reportEnabled
, resetPlot = self._resetPlot
)
plotObject.ycolumns = self.df.columns[self._offset] # :]
plotObject.ccolumns = self._sampleLogFuncColName
plotObject.colorbar.kws.extend = "neither"
plotObject.colorbar.kws.orientation = "vertical"
plotObject.colorbar.kws.spacing = "uniform"
if is3d:
plotObject.zcolumns = self._sampleLogFuncColName
if self.ndim>1:
plotObject.xcolumns = self.df.columns[self._offset]
plotObject.ycolumns = self.df.columns[self._offset+1]
if isLine:
if isScatter:
plotObject.lineCollection.enabled = False
plotObject.plot.enabled = True
plotObject.plot.kws.alpha = 0.2
plotObject.plot.kws.color = "grey"
plotObject.plot.kws.linewidth = 0.75
else:
plotObject.lineCollection.enabled = True
plotObject.plot.enabled = False
########################################################################################################################
#### reset density plots: kdeplot / histplot / jointplot / contour / contourf / contour3
########################################################################################################################
if isDensityPlot:
if resetTypeIsHard:
plotObject = DensityPlot( plotType = requestedPlotType
, dataFrame = self.df
, methodName = self._methodName
, reportEnabled = self._reportEnabled
, resetPlot = self._resetPlot
)
plotObject.xcolumns = self.df.columns[self._offset]
if not (isHistplot or isKdeplot1):
if self.ndim==1:
plotObject.xcolumns = self.df.columns[self._offset-1]
plotObject.ycolumns = self.df.columns[self._offset]
else:
plotObject.ycolumns = self.df.columns[self._offset+1]
########################################################################################################################
#### reset GridPlot
########################################################################################################################
if isGridPlot:
if resetTypeIsHard:
plotObject = GridPlot ( plotType = requestedPlotType
, dataFrame = self.df
, methodName = self._methodName
, reportEnabled = self._reportEnabled
, resetPlot = self._resetPlot
)
endColindex = np.min( [self._offset+3, self._offset+self.ndim] )
plotObject.columns = self.df.columns[self._offset-1:endColindex]
plotObject.ccolumn = self._sampleLogFuncColName
########################################################################################################################
#### reset target component
########################################################################################################################
if (isLineScatterPlot or isDensityPlot) and not (plotObject._type.is3d or self._isProgressFile):
xtarget = 0 # dummy
if isDensityPlot: xtarget = self.df[plotObject.xcolumns].values.flatten()[self.stats.maxLogFunc.idrow]
if plotObject._type.is1d: plotObject.target.value = [ xtarget, 0 ]
if plotObject._type.is2d:
ytarget = self.df[plotObject.ycolumns].values.flatten()[self.stats.maxLogFunc.idrow]
plotObject.target.value = [ xtarget, ytarget ]
if isDensityPlot and plotObject._type.is1d: plotObject.target.axhline.enabled = False
if isLine or isScatter: plotObject.target.axvline.enabled = False
plotObject.target.label = "maxLogFunc"
########################################################################################################################
if plotObject is not None: setattr(self.plot, requestedPlotType, plotObject)
################################################################################################################################
#### _reportWrongPlotName
################################################################################################################################
def _reportWrongPlotName( self
, plotNames
):
pm.abort( msg = "The input argument ``plotNames`` must be a string representing" + newline
+ "the name of a plot belonging to the TabularFileContents class or," + newline
+ "a list of such plot names. You have entered: " + plotNames + newline
+ "Possible plots are: " + newline
+ newline
+ newline.join(self._plotTypeList) + newline
+ newline
+ "Here is the help for the ``reset()`` method: " + newline
+ newline
+ self._resetPlot.__doc__
, marginTop = 1
, marginBot = 1
, methodName = self._methodName
)
################################################################################################################################
| 46.794918
| 132
| 0.446517
|
124f38c891599afc6d672d8572a17e4cebf5ad92
| 493
|
py
|
Python
|
application/modules/__init__.py
|
BaggerFast/Simple_votings
|
843769fa6fd2c04feb542e6b301b7b4810260d4e
|
[
"MIT"
] | null | null | null |
application/modules/__init__.py
|
BaggerFast/Simple_votings
|
843769fa6fd2c04feb542e6b301b7b4810260d4e
|
[
"MIT"
] | null | null | null |
application/modules/__init__.py
|
BaggerFast/Simple_votings
|
843769fa6fd2c04feb542e6b301b7b4810260d4e
|
[
"MIT"
] | null | null | null |
from application.modules.index import MainView
from application.modules.vote import VotePage
from application.modules.login import LoginView
from application.modules.registration import RegistrationView
from application.modules.votings_list import VoteListView
from application.modules.edit_vote import CreateEdiVoteView
from application.modules.own_vote_list import OwnVoteListView
from application.modules.remove_vote import RemoveVotePage
from application.modules.user_list import UserList
| 49.3
| 61
| 0.890467
|
008f658db1cc654952e788b75771df293c0f8582
| 5,404
|
py
|
Python
|
tests/parser_original_cases.py
|
wapwallace/jpath_finder
|
5aa058507bec290aed179256cfb66364f6f4a490
|
[
"Apache-2.0"
] | null | null | null |
tests/parser_original_cases.py
|
wapwallace/jpath_finder
|
5aa058507bec290aed179256cfb66364f6f4a490
|
[
"Apache-2.0"
] | null | null | null |
tests/parser_original_cases.py
|
wapwallace/jpath_finder
|
5aa058507bec290aed179256cfb66364f6f4a490
|
[
"Apache-2.0"
] | 1
|
2022-03-19T02:07:15.000Z
|
2022-03-19T02:07:15.000Z
|
# json_path, result_expected
SORTED_CASES = (
("$.objects.`sorted`", [["alpha", "beta", "gamma"]]),
("$.objects.`sorted`[1]", ["beta"]),
("$.objects_2.`sorted`", [["cat", "cow", "horse"]]),
("$.objects_2.`sorted`[0]", ["cat"]),
)
LEN_CASES = (
("$.objects.`len`", [3]),
("$.objects_2.`len`", [3]),
("$.objects[0].`len`", [5]),
)
FILTER_CASES = (
(
"$.objects_4[?(@.cow>5)]",
[{"cow": 8, "cat": 2}, {"cow": 7, "cat": 2}, {"cow": 8, "cat": 3}],
),
("$.objects_4[?(@.cow>5 & @.cat=2)]", [{"cow": 8, "cat": 2}, {"cow": 7, "cat": 2}]),
("$.objects_5[?(@.confidence>=0.5)].prediction", ["Bad"]),
(
"$.objects_4[?(@.cow==8|@.cat==3)]",
[{"cow": 8, "cat": 2}, {"cow": 5, "cat": 3}, {"cow": 8, "cat": 3}],
),
(
"$.objects_4[?(@.cow=8 & (@.cat=2 | @.cat=3))]",
[{"cow": 8, "cat": 2}, {"cow": 8, "cat": 3}],
),
("$.objects_4[?(@.dog=1|@.cat=3&@.cow=8)]", [{"cow": 8, "cat": 3}]),
("$.objects_4[?(@.cow>5&@.cat=2)]", [{"cow": 8, "cat": 2}, {"cow": 7, "cat": 2}]),
("$.items[?(@.quotas[*].limit<=21)].id", ["1000", "1001"]),
(
"$.items[?(@.quotas[*].metric='SSD' & @.quotas[*].usage>0) | "
"(@.quotas[*].metric='CPU' & @.quotas[*].usage>0) | "
"(@..quotas[*].metric='DISKS' & @.quotas[*].usage>0)].id",
["1000", "1002"],
),
(
"$.items[?(@.quotas[?((@.metric='SSD' & @.usage>0) | (@.metric='CPU' "
"& @.usage>0) | (@.metric='DISKS' & @.usage>0))])].quotas[?(@.usage>4)].limit",
[40960.0, 20480.0],
),
)
ARITHMETIC_CASES = (
("3 * 3", "3*3", "Operator(3,3,*)", [9]),
(
"$.objects_4[0].cow * 10",
"$.objects_4.[0].cow*10",
"Operator(Child(Child(Child(Root(), Fields('objects_4')), "
"Index(0)), Fields('cow')),10,*)",
[80],
),
(
"10 * $.objects_4[0].cow",
"$.objects_4.[0].cow*10",
"Operator(Child(Child(Child(Root(), Fields('objects_4')), "
"Index(0)), Fields('cow')),10,*)",
[80],
),
(
"$.objects_5[0].prediction[0] * 3",
"$.objects_5.[0].prediction.[0]*3",
"Operator(Child(Child(Child(Child(Root(), Fields('objects_5')), "
"Index(0)), Fields('prediction')), Index(0)),3,*)",
["GGG"],
),
("'foo' * 3", "foo*3", "Operator('foo',3,*)", ["foofoofoo"]),
(
"($.objects_4[2].cow * 10 * $.objects_4[4].cow) + 2",
"$.objects_4.[2].cow*$.objects_4.[4].cow*10+2",
"Operator(Operator(Child(Child(Child(Root(), Fields('objects_4')), "
"Index(2)), Fields('cow')),Operator(Child(Child(Child(Root(), "
"Fields('objects_4')), Index(4)), Fields('cow')),10,*),*),2,+)",
[162],
),
(
"($.objects_4[4].cat * 10 * $.objects_4[4].cat) + 2",
"$.objects_4.[4].cat*$.objects_4.[4].cat*10+2",
"Operator(Operator(Child(Child(Child(Root(), Fields('objects_4')), "
"Index(4)), Fields('cat')),Operator(Child(Child(Child(Root(), "
"Fields('objects_4')), Index(4)), Fields('cat')),10,*),*),2,+)",
[92],
),
("'foo' + 'bar'", "foo+bar", "Operator('foo','bar',+)", ["foobar"]),
(
"$.objects_3[0].cow + '_' + $.objects_3[1].cat",
"$.objects_3.[0].cow+_+$.objects_3.[1].cat",
"Operator(Operator(Child(Child(Child(Root(), Fields('objects_3')), "
"Index(0)), Fields('cow')),'_',+),Child(Child(Child(Root(), "
"Fields('objects_3')), Index(1)), Fields('cat')),+)",
["moo_neigh"],
),
(
"$.objects_3[0].cow + $.objects_3[1].cat",
"$.objects_3.[0].cow+$.objects_3.[1].cat",
"Operator(Child(Child(Child(Root(), Fields('objects_3')), "
"Index(0)), Fields('cow')),Child(Child(Child(Root(), "
"Fields('objects_3')), Index(1)), Fields('cat')),+)",
["mooneigh"],
),
(
"$.objects_4[*].cow * 2",
"$.objects_4.[*].cow*2",
"Operator(Child(Child(Child(Root(), Fields('objects_4')), AllIndex()), Fields('cow')),2,*)",
[16, 14, 4, 10, 16],
),
(
"$.objects_6[*].cow * $.objects_6[*].cow",
"$.objects_6.[*].cow*$.objects_6.[*].cow",
"Operator(Child(Child(Child(Root(), Fields('objects_6')), "
"AllIndex()), Fields('cow')),Child(Child(Child(Root(), "
"Fields('objects_6')), AllIndex()), Fields('cow')),*)",
[4, 1, 9],
),
(
"$.objects_4[*].cow * $.objects_6[*].cow",
"$.objects_4.[*].cow*$.objects_6.[*].cow",
"Operator(Child(Child(Child(Root(), Fields('objects_4')), "
"AllIndex()), Fields('cow')),Child(Child(Child(Root(), "
"Fields('objects_6')), AllIndex()), Fields('cow')),*)",
[16, 8, 24, 14, 7, 21, 4, 2, 6, 10, 5, 15, 16, 8, 24],
),
(
"$.payload.metrics[?(@.name='cpu.frequency')].value * 100",
"$.payload.metrics.[?(@.name=cpu.frequency)].value*100",
"Operator(Child(Child(Child(Child(Root(), Fields('payload')), "
"Fields('metrics')), Filter(Expression(target=Child(This(), "
"Fields('name')),op='=',value='cpu.frequency'))), Fields('value')),100,*)",
[160000],
),
(
"$.payload.metrics[*].id",
"$.payload.metrics.[*].id",
"Child(Child(Child(Child(Root(), Fields('payload')), "
"Fields('metrics')), AllIndex()), Fields('id'))",
[1, 2],
),
)
| 38.6
| 100
| 0.463175
|
a6ad18cf2645b49fa1d3bb3bf809578ad9969e85
| 8,672
|
py
|
Python
|
pyleecan/Classes/NodeMat.py
|
helene-t/pyleecan
|
8362de9b0e32b346051b38192e07f3a6974ea9aa
|
[
"Apache-2.0"
] | null | null | null |
pyleecan/Classes/NodeMat.py
|
helene-t/pyleecan
|
8362de9b0e32b346051b38192e07f3a6974ea9aa
|
[
"Apache-2.0"
] | null | null | null |
pyleecan/Classes/NodeMat.py
|
helene-t/pyleecan
|
8362de9b0e32b346051b38192e07f3a6974ea9aa
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""File generated according to Generator/ClassesRef/Mesh/NodeMat.csv
WARNING! All changes made in this file will be lost!
"""
from os import linesep
from logging import getLogger
from ._check import set_array, check_var, raise_
from ..Functions.get_logger import get_logger
from ..Functions.save import save
from .Node import Node
# Import all class method
# Try/catch to remove unnecessary dependencies in unused method
try:
from ..Methods.Mesh.NodeMat.get_coord import get_coord
except ImportError as error:
get_coord = error
try:
from ..Methods.Mesh.NodeMat.get_tag import get_tag
except ImportError as error:
get_tag = error
try:
from ..Methods.Mesh.NodeMat.get_group import get_group
except ImportError as error:
get_group = error
try:
from ..Methods.Mesh.NodeMat.add_node import add_node
except ImportError as error:
add_node = error
try:
from ..Methods.Mesh.NodeMat.is_exist import is_exist
except ImportError as error:
is_exist = error
from numpy import array, array_equal
from ._check import InitUnKnowClassError
class NodeMat(Node):
"""Class to define nodes coordinates and getter."""
VERSION = 1
# Check ImportError to remove unnecessary dependencies in unused method
# cf Methods.Mesh.NodeMat.get_coord
if isinstance(get_coord, ImportError):
get_coord = property(
fget=lambda x: raise_(
ImportError("Can't use NodeMat method get_coord: " + str(get_coord))
)
)
else:
get_coord = get_coord
# cf Methods.Mesh.NodeMat.get_tag
if isinstance(get_tag, ImportError):
get_tag = property(
fget=lambda x: raise_(
ImportError("Can't use NodeMat method get_tag: " + str(get_tag))
)
)
else:
get_tag = get_tag
# cf Methods.Mesh.NodeMat.get_group
if isinstance(get_group, ImportError):
get_group = property(
fget=lambda x: raise_(
ImportError("Can't use NodeMat method get_group: " + str(get_group))
)
)
else:
get_group = get_group
# cf Methods.Mesh.NodeMat.add_node
if isinstance(add_node, ImportError):
add_node = property(
fget=lambda x: raise_(
ImportError("Can't use NodeMat method add_node: " + str(add_node))
)
)
else:
add_node = add_node
# cf Methods.Mesh.NodeMat.is_exist
if isinstance(is_exist, ImportError):
is_exist = property(
fget=lambda x: raise_(
ImportError("Can't use NodeMat method is_exist: " + str(is_exist))
)
)
else:
is_exist = is_exist
# save method is available in all object
save = save
# get_logger method is available in all object
get_logger = get_logger
def __init__(
self, coordinate=None, nb_node=0, tag=None, delta=1e-10, init_dict=None
):
"""Constructor of the class. Can be use in two ways :
- __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values
for Matrix, None will initialise the property with an empty Matrix
for pyleecan type, None will call the default constructor
- __init__ (init_dict = d) d must be a dictionnary wiht every properties as keys
ndarray or list can be given for Vector and Matrix
object or dict can be given for pyleecan Object"""
if init_dict is not None: # Initialisation by dict
assert type(init_dict) is dict
# Overwrite default value with init_dict content
if "coordinate" in list(init_dict.keys()):
coordinate = init_dict["coordinate"]
if "nb_node" in list(init_dict.keys()):
nb_node = init_dict["nb_node"]
if "tag" in list(init_dict.keys()):
tag = init_dict["tag"]
if "delta" in list(init_dict.keys()):
delta = init_dict["delta"]
# Initialisation by argument
# coordinate can be None, a ndarray or a list
set_array(self, "coordinate", coordinate)
self.nb_node = nb_node
# tag can be None, a ndarray or a list
set_array(self, "tag", tag)
self.delta = delta
# Call Node init
super(NodeMat, self).__init__()
# The class is frozen (in Node init), for now it's impossible to
# add new properties
def __str__(self):
"""Convert this objet in a readeable string (for print)"""
NodeMat_str = ""
# Get the properties inherited from Node
NodeMat_str += super(NodeMat, self).__str__()
NodeMat_str += (
"coordinate = "
+ linesep
+ str(self.coordinate).replace(linesep, linesep + "\t")
+ linesep
+ linesep
)
NodeMat_str += "nb_node = " + str(self.nb_node) + linesep
NodeMat_str += (
"tag = "
+ linesep
+ str(self.tag).replace(linesep, linesep + "\t")
+ linesep
+ linesep
)
NodeMat_str += "delta = " + str(self.delta) + linesep
return NodeMat_str
def __eq__(self, other):
"""Compare two objects (skip parent)"""
if type(other) != type(self):
return False
# Check the properties inherited from Node
if not super(NodeMat, self).__eq__(other):
return False
if not array_equal(other.coordinate, self.coordinate):
return False
if other.nb_node != self.nb_node:
return False
if not array_equal(other.tag, self.tag):
return False
if other.delta != self.delta:
return False
return True
def as_dict(self):
"""Convert this objet in a json seriable dict (can be use in __init__)
"""
# Get the properties inherited from Node
NodeMat_dict = super(NodeMat, self).as_dict()
if self.coordinate is None:
NodeMat_dict["coordinate"] = None
else:
NodeMat_dict["coordinate"] = self.coordinate.tolist()
NodeMat_dict["nb_node"] = self.nb_node
if self.tag is None:
NodeMat_dict["tag"] = None
else:
NodeMat_dict["tag"] = self.tag.tolist()
NodeMat_dict["delta"] = self.delta
# The class name is added to the dict fordeserialisation purpose
# Overwrite the mother class name
NodeMat_dict["__class__"] = "NodeMat"
return NodeMat_dict
def _set_None(self):
"""Set all the properties to None (except pyleecan object)"""
self.coordinate = None
self.nb_node = None
self.tag = None
self.delta = None
# Set to None the properties inherited from Node
super(NodeMat, self)._set_None()
def _get_coordinate(self):
"""getter of coordinate"""
return self._coordinate
def _set_coordinate(self, value):
"""setter of coordinate"""
if type(value) is list:
try:
value = array(value)
except:
pass
check_var("coordinate", value, "ndarray")
self._coordinate = value
# Nodes coordinates
# Type : ndarray
coordinate = property(
fget=_get_coordinate, fset=_set_coordinate, doc=u"""Nodes coordinates"""
)
def _get_nb_node(self):
"""getter of nb_node"""
return self._nb_node
def _set_nb_node(self, value):
"""setter of nb_node"""
check_var("nb_node", value, "int")
self._nb_node = value
# Total number of nodes
# Type : int
nb_node = property(
fget=_get_nb_node, fset=_set_nb_node, doc=u"""Total number of nodes"""
)
def _get_tag(self):
"""getter of tag"""
return self._tag
def _set_tag(self, value):
"""setter of tag"""
if type(value) is list:
try:
value = array(value)
except:
pass
check_var("tag", value, "ndarray")
self._tag = value
# Node tags
# Type : ndarray
tag = property(fget=_get_tag, fset=_set_tag, doc=u"""Node tags""")
def _get_delta(self):
"""getter of delta"""
return self._delta
def _set_delta(self, value):
"""setter of delta"""
check_var("delta", value, "float")
self._delta = value
# Sensibility for node searching
# Type : float
delta = property(
fget=_get_delta, fset=_set_delta, doc=u"""Sensibility for node searching"""
)
| 31.194245
| 88
| 0.598708
|
99e06670ba7b512518d5baa104472d026816177f
| 396
|
py
|
Python
|
cs4050_ws.py
|
jadppf/ai_ws
|
b58f8a08d29af7b88ff1a51c74c4f1061189f9b1
|
[
"MIT"
] | null | null | null |
cs4050_ws.py
|
jadppf/ai_ws
|
b58f8a08d29af7b88ff1a51c74c4f1061189f9b1
|
[
"MIT"
] | null | null | null |
cs4050_ws.py
|
jadppf/ai_ws
|
b58f8a08d29af7b88ff1a51c74c4f1061189f9b1
|
[
"MIT"
] | null | null | null |
from time import time
def FiB(n, F):
if F[n] != -1:
return F[n]
F[n] = FiB(n-1, F) + FiB(n-2, F)
return F[n]
def main():
mark = time() * 1000
result = None
F = []
for i in range(2, 1000):
F.append(-1)
F[0], F[1] = 1, 1
result = FiB(100, F)
print result
now = time() * 1000
diff = now - mark
print 'time diff ->', diff
main()
| 17.217391
| 36
| 0.474747
|
68a138299bf7eb7fc03b31a219aeb82b0556787e
| 8,796
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/aio/operations/_load_balancer_load_balancing_rules_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 3
|
2020-06-23T02:25:27.000Z
|
2021-09-07T18:48:11.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/aio/operations/_load_balancer_load_balancing_rules_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 510
|
2019-07-17T16:11:19.000Z
|
2021-08-02T08:38:32.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/aio/operations/_load_balancer_load_balancing_rules_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 5
|
2019-09-04T12:51:37.000Z
|
2020-09-16T07:28:40.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerLoadBalancingRulesOperations:
"""LoadBalancerLoadBalancingRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs
) -> AsyncIterable["_models.LoadBalancerLoadBalancingRuleListResult"]:
"""Gets all the load balancing rules in a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerLoadBalancingRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_04_01.models.LoadBalancerLoadBalancingRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerLoadBalancingRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerLoadBalancingRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/loadBalancingRules'} # type: ignore
async def get(
self,
resource_group_name: str,
load_balancer_name: str,
load_balancing_rule_name: str,
**kwargs
) -> "_models.LoadBalancingRule":
"""Gets the specified load balancer load balancing rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param load_balancing_rule_name: The name of the load balancing rule.
:type load_balancing_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LoadBalancingRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.LoadBalancingRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancingRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'loadBalancingRuleName': self._serialize.url("load_balancing_rule_name", load_balancing_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LoadBalancingRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/loadBalancingRules/{loadBalancingRuleName}'} # type: ignore
| 49.139665
| 216
| 0.676103
|
67eab4144c3e4f3917f805a6b2ab867a2b29e32d
| 3,750
|
py
|
Python
|
tfx/components/pusher/component.py
|
romeokienzler/tfx
|
6449173532bc35b78dbfb93aa89a688a7278ef59
|
[
"Apache-2.0"
] | null | null | null |
tfx/components/pusher/component.py
|
romeokienzler/tfx
|
6449173532bc35b78dbfb93aa89a688a7278ef59
|
[
"Apache-2.0"
] | null | null | null |
tfx/components/pusher/component.py
|
romeokienzler/tfx
|
6449173532bc35b78dbfb93aa89a688a7278ef59
|
[
"Apache-2.0"
] | 1
|
2020-06-05T08:31:32.000Z
|
2020-06-05T08:31:32.000Z
|
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX Pusher component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, Text, Type
from tfx import types
from tfx.components.base import base_component
from tfx.components.base import base_executor
from tfx.components.pusher import executor
from tfx.proto import pusher_pb2
from tfx.types import channel_utils
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import PusherSpec
# TODO(b/133845381): Investigate other ways to keep push destination converged.
class Pusher(base_component.BaseComponent):
"""Official TFX Pusher component.
The `Pusher` component can be used to push an validated SavedModel from output
of `Trainer` to tensorflow Serving (tf.serving). If the model is not blessed
by `ModelValidator`, no push will happen.
"""
SPEC_CLASS = PusherSpec
EXECUTOR_CLASS = executor.Executor
def __init__(
self,
model_export: types.Channel,
model_blessing: types.Channel,
push_destination: Optional[pusher_pb2.PushDestination] = None,
custom_config: Optional[Dict[Text, Any]] = None,
executor_class: Optional[Type[base_executor.BaseExecutor]] = None,
model_push: Optional[types.Channel] = None,
name: Optional[Text] = None):
"""Construct a Pusher component.
Args:
model_export: A Channel of 'ModelExportPath' type, usually produced by
Trainer component.
model_blessing: A Channel of 'ModelBlessingPath' type, usually produced by
ModelValidator component.
push_destination: A pusher_pb2.PushDestination instance, providing
info for tensorflow serving to load models. Optional if executor_class
doesn't require push_destination.
custom_config: A dict which contains the deployment job parameters to be
passed to Google Cloud ML Engine. For the full set of parameters
supported by Google Cloud ML Engine, refer to
https://cloud.google.com/ml-engine/reference/rest/v1/projects.models
executor_class: Optional custom python executor class.
model_push: Optional output 'ModelPushPath' channel with result of push.
name: Optional unique name. Necessary if multiple Pusher components are
declared in the same pipeline.
"""
model_push = model_push or types.Channel(
type=standard_artifacts.PushedModel,
artifacts=[standard_artifacts.PushedModel()])
if push_destination is None and not executor_class:
raise ValueError('push_destination is required unless a custom '
'executor_class is supplied that does not require '
'it.')
spec = PusherSpec(
model_export=channel_utils.as_channel(model_export),
model_blessing=channel_utils.as_channel(model_blessing),
push_destination=push_destination,
custom_config=custom_config,
model_push=model_push)
super(Pusher, self).__init__(spec=spec,
custom_executor_class=executor_class,
name=name)
| 43.103448
| 80
| 0.731467
|
c7bfc622f24e120b68be6c2a046ab23ce94e08cb
| 1,269
|
py
|
Python
|
Part-1-Answers/4-Manipulating_Strings/4-Short_Long_Names.py
|
Spigot-Dev/Grok-Intro_To_Programming_Python1-2
|
69c64019c0424f6cc8eb326b4456a510baab7ea7
|
[
"MIT"
] | 2
|
2021-11-20T11:28:22.000Z
|
2022-02-07T21:56:46.000Z
|
Part-1-Answers/4-Manipulating_Strings/4-Short_Long_Names.py
|
Spigot-Dev/Grok-Intro_To_Programming_Python1-2
|
69c64019c0424f6cc8eb326b4456a510baab7ea7
|
[
"MIT"
] | null | null | null |
Part-1-Answers/4-Manipulating_Strings/4-Short_Long_Names.py
|
Spigot-Dev/Grok-Intro_To_Programming_Python1-2
|
69c64019c0424f6cc8eb326b4456a510baab7ea7
|
[
"MIT"
] | 4
|
2021-11-20T11:28:25.000Z
|
2022-03-12T04:10:54.000Z
|
#17/02/21
#What does this code do?
# This code used concepts previously explored, the code takes an input, determines the length of the input using ".len" From there, the code decides what output print, based on the length of the input.
name = input("Enter your name: ")
length = len(name)
if length <=3:
print("Hi", name,",you have a short name.")
elif length >=4 and length <=8:
print("Hi",name,", nice to meet you.")
elif length >8:
print("Hi",name,", you have a long name.")
#What is happening here?
# This code is a bit confusing, so lets break it down line by line.
# In the first line, we take an input, and assign it to the "name" variable. This means we can call upon it later.
# Then, we assign the result of len(name) to the variable "length". This just makes it easier to call later, so we're not calling len(name) whenever we need it for a condition in a loop.
# We then go into our series of loops. The first statement, IF, runs only if the length is equal to or lesser than 3.
# If your input doesn't apply to the first loop, we go into our first ELIF statement. This only runs if your name is between 4 and 8 characters long.
# If your name is longer than 8 characters, then you run the final statement, another ELIF.
| 55.173913
| 203
| 0.712372
|
20a45e0e36f3f52007decb91ebc2d0bbb16af071
| 16,303
|
py
|
Python
|
ProjectFiles/bin/Release/2.80/scripts/addons_contrib/io_scene_cod/__init__.py
|
BlazesRus/Bforartists
|
126bdd9e47cc984fd97ba5299bfb92ec5278e754
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2019-07-08T15:51:14.000Z
|
2019-07-08T15:51:14.000Z
|
ProjectFiles/bin/Release/2.80/scripts/addons_contrib/io_scene_cod/__init__.py
|
BlazesRus/Bforartists
|
126bdd9e47cc984fd97ba5299bfb92ec5278e754
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
ProjectFiles/bin/Release/2.80/scripts/addons_contrib/io_scene_cod/__init__.py
|
BlazesRus/Bforartists
|
126bdd9e47cc984fd97ba5299bfb92ec5278e754
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
"""
Blender-CoD: Blender Add-On for Call of Duty modding
Version: alpha 3
Copyright (c) 2011 CoDEmanX, Flybynyt -- blender-cod@online.de
http://code.google.com/p/blender-cod/
TODO
- UI for xmodel and xanim import (planned for alpha 4/5)
"""
bl_info = {
"name": "Blender-CoD - Add-On for Call of Duty modding (alpha 3)",
"author": "CoDEmanX, Flybynyt",
"version": (0, 3, 5),
"blender": (2, 62, 0),
"location": "File > Import | File > Export",
"description": "Export models to *.XMODEL_EXPORT and animations to *.XANIM_EXPORT",
"warning": "Alpha version, please report any bugs!",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Import-Export/Call_of_Duty_IO",
"tracker_url": "https://developer.blender.org/maniphest/task/edit/form/2/",
"support": "TESTING",
"category": "Import-Export"
}
# To support reload properly, try to access a package var, if it's there, reload everything
if "bpy" in locals():
import imp
if "import_xmodel" in locals():
imp.reload(import_xmodel)
if "export_xmodel" in locals():
imp.reload(export_xmodel)
if "import_xanim" in locals():
imp.reload(import_xanim)
if "export_xanim" in locals():
imp.reload(export_xanim)
import bpy
from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, EnumProperty
import bpy_extras.io_utils
from bpy_extras.io_utils import ExportHelper, ImportHelper
import time
# Planned for alpha 4/5
class ImportXmodel(bpy.types.Operator, ImportHelper):
"""Load a CoD XMODEL_EXPORT File"""
bl_idname = "import_scene.xmodel"
bl_label = "Import XMODEL_EXPORT"
bl_options = {'PRESET'}
filename_ext = ".XMODEL_EXPORT"
filter_glob = StringProperty(default="*.XMODEL_EXPORT", options={'HIDDEN'})
#use_meshes = BoolProperty(name="Meshes", description="Import meshes", default=True)
#use_armature = BoolProperty(name="Armature", description="Import Armature", default=True)
#use_bind_armature = BoolProperty(name="Bind Meshes to Armature", description="Parent imported meshes to armature", default=True)
#use_split_objects = BoolProperty(name="Object", description="Import OBJ Objects into Blender Objects", default=True)
#use_split_groups = BoolProperty(name="Group", description="Import OBJ Groups into Blender Objects", default=True)
#use_image_search = BoolProperty(name="Image Search", description="Search subdirs for any associated images (Warning, may be slow)", default=True)
def execute(self, context):
from . import import_xmodel
start_time = time.clock()
result = import_xmodel.load(self, context, **self.as_keywords(ignore=("filter_glob", "check_existing")))
if not result:
self.report({'INFO'}, "Import finished in %.4f sec." % (time.clock() - start_time))
return {'FINISHED'}
else:
self.report({'ERROR'}, result)
return {'CANCELLED'}
"""
def draw(self, context):
layout = self.layout
col = layout.column()
col.prop(self, "use_meshes")
col.prop(self, "use_armature")
row = layout.row()
row.active = self.use_meshes and self.use_armature
row.prop(self, "use_bind_armature")
"""
@classmethod
def poll(self, context):
return (context.scene is not None)
class ImportXanim(bpy.types.Operator, ImportHelper):
"""Load a CoD XANIM_EXPORT File"""
bl_idname = "import_scene.xanim"
bl_label = "Import XANIM_EXPORT"
bl_options = {'PRESET'}
filename_ext = ".XANIM_EXPORT"
filter_glob = StringProperty(default="*.XANIM_EXPORT;*.NT_EXPORT", options={'HIDDEN'})
def execute(self, context):
# print("Selected: " + context.active_object.name)
from . import import_xanim
return import_xanim.load(self, context, **self.as_keywords(ignore=("filter_glob",)))
class ExportXmodel(bpy.types.Operator, ExportHelper):
"""Save a CoD XMODEL_EXPORT File"""
bl_idname = "export_scene.xmodel"
bl_label = 'Export XMODEL_EXPORT'
bl_options = {'PRESET'}
filename_ext = ".XMODEL_EXPORT"
filter_glob = StringProperty(default="*.XMODEL_EXPORT", options={'HIDDEN'})
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
use_version = EnumProperty(
name="Format Version",
description="XMODEL_EXPORT format version for export",
items=(('5', "Version 5", "vCoD, CoD:UO"),
('6', "Version 6", "CoD2, CoD4, CoD5, CoD7")),
default='6',
)
use_selection = BoolProperty(
name="Selection only",
description="Export selected meshes only (object or weight paint mode)",
default=False
)
use_vertex_colors = BoolProperty(
name="Vertex colors",
description="Export vertex colors (if disabled, white color will be used)",
default=True
)
use_vertex_colors_alpha = BoolProperty(
name="As alpha",
description="Turn RGB vertex colors into grayscale (average value) and use it as alpha transparency. White is 1 (opaque), black 0 (invisible)",
default=False
)
use_apply_modifiers = BoolProperty(
name="Apply Modifiers",
description="Apply all mesh modifiers except Armature (preview resolution)",
default=True
)
use_armature = BoolProperty(
name="Armature",
description="Export bones (if disabled, only a 'tag_origin' bone will be written)",
default=True
)
use_vertex_cleanup = BoolProperty(
name="Clean up vertices",
description="Try this if you have problems converting to xmodel. Skips vertices which aren't used by any face and updates references.",
default=False
)
use_armature_pose = BoolProperty(
name="Pose animation to models",
description="Export meshes with Armature modifier applied as a series of XMODEL_EXPORT files",
default=False
)
use_frame_start = IntProperty(
name="Start",
description="First frame to export",
default=1,
min=0
)
use_frame_end = IntProperty(
name="End",
description="Last frame to export",
default=250,
min=0
)
use_weight_min = BoolProperty(
name="Minimum bone weight",
description="Try this if you get 'too small weight' errors when converting",
default=False,
)
use_weight_min_threshold = FloatProperty(
name="Threshold",
description="Smallest allowed weight (minimum value)",
default=0.010097,
min=0.0,
max=1.0,
precision=6
)
def execute(self, context):
from . import export_xmodel
start_time = time.clock()
result = export_xmodel.save(self, context, **self.as_keywords(ignore=("filter_glob", "check_existing")))
if not result:
self.report({'INFO'}, "Export finished in %.4f sec." % (time.clock() - start_time))
return {'FINISHED'}
else:
self.report({'ERROR'}, result)
return {'CANCELLED'}
# Extend ExportHelper invoke function to support dynamic default values
def invoke(self, context, event):
#self.use_frame_start = context.scene.frame_start
self.use_frame_start = context.scene.frame_current
#self.use_frame_end = context.scene.frame_end
self.use_frame_end = context.scene.frame_current
return super().invoke(context, event)
def draw(self, context):
layout = self.layout
row = layout.row(align=True)
row.prop(self, "use_version", expand=True)
# Calculate number of selected mesh objects
if context.mode in {'OBJECT', 'PAINT_WEIGHT'}:
meshes_selected = len([m for m in bpy.data.objects if m.type == 'MESH' and m.select])
else:
meshes_selected = 0
col = layout.column(align=True)
col.prop(self, "use_selection", "Selection only (%i meshes)" % meshes_selected)
col.enabled = bool(meshes_selected)
col = layout.column(align=True)
col.prop(self, "use_apply_modifiers")
col = layout.column(align=True)
col.enabled = not self.use_armature_pose
if self.use_armature and self.use_armature_pose:
col.prop(self, "use_armature", "Armature (disabled)")
else:
col.prop(self, "use_armature")
if self.use_version == '6':
row = layout.row(align=True)
row.prop(self, "use_vertex_colors")
sub = row.split()
sub.active = self.use_vertex_colors
sub.prop(self, "use_vertex_colors_alpha")
col = layout.column(align=True)
col.label("Advanced:")
col = layout.column(align=True)
col.prop(self, "use_vertex_cleanup")
box = layout.box()
col = box.column(align=True)
col.prop(self, "use_armature_pose")
sub = box.column()
sub.active = self.use_armature_pose
sub.label(text="Frame range: (%i frames)" % (abs(self.use_frame_end - self.use_frame_start) + 1))
row = sub.row(align=True)
row.prop(self, "use_frame_start")
row.prop(self, "use_frame_end")
box = layout.box()
col = box.column(align=True)
col.prop(self, "use_weight_min")
sub = box.column()
sub.enabled = self.use_weight_min
sub.prop(self, "use_weight_min_threshold")
@classmethod
def poll(self, context):
return (context.scene is not None)
class ExportXanim(bpy.types.Operator, ExportHelper):
"""Save a XMODEL_XANIM File"""
bl_idname = "export_scene.xanim"
bl_label = 'Export XANIM_EXPORT'
bl_options = {'PRESET'}
filename_ext = ".XANIM_EXPORT"
filter_glob = StringProperty(default="*.XANIM_EXPORT", options={'HIDDEN'})
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
use_selection = BoolProperty(
name="Selection only",
description="Export selected bones only (pose mode)",
default=False
)
use_framerate = IntProperty(
name="Framerate",
description="Set frames per second for export, 30 fps is commonly used.",
default=24,
min=1,
max=100
)
use_frame_start = IntProperty(
name="Start",
description="First frame to export",
default=1,
min=0
)
use_frame_end = IntProperty(
name="End",
description="Last frame to export",
default=250,
min=0
)
use_notetrack = BoolProperty(
name="Notetrack",
description="Export timeline markers as notetrack nodes",
default=True
)
use_notetrack_format = EnumProperty(
name="Notetrack format",
description="Notetrack format to use. Always set 'CoD 7' for Black Ops, even if not using notetrack!",
items=(('5', "CoD 5", "Separate NT_EXPORT notetrack file for 'World at War'"),
('7', "CoD 7", "Separate NT_EXPORT notetrack file for 'Black Ops'"),
('1', "all other", "Inline notetrack data for all CoD versions except WaW and BO")),
default='1',
)
def execute(self, context):
from . import export_xanim
start_time = time.clock()
result = export_xanim.save(self, context, **self.as_keywords(ignore=("filter_glob", "check_existing")))
if not result:
self.report({'INFO'}, "Export finished in %.4f sec." % (time.clock() - start_time))
return {'FINISHED'}
else:
self.report({'ERROR'}, result)
return {'CANCELLED'}
# Extend ExportHelper invoke function to support dynamic default values
def invoke(self, context, event):
self.use_frame_start = context.scene.frame_start
self.use_frame_end = context.scene.frame_end
self.use_framerate = round(context.scene.render.fps / context.scene.render.fps_base)
return super().invoke(context, event)
def draw(self, context):
layout = self.layout
bones_selected = 0
armature = None
# Take the first armature
for ob in bpy.data.objects:
if ob.type == 'ARMATURE' and len(ob.data.bones) > 0:
armature = ob.data
# Calculate number of selected bones if in pose-mode
if context.mode == 'POSE':
bones_selected = len([b for b in armature.bones if b.select])
# Prepare info string
armature_info = "%s (%i bones)" % (ob.name, len(armature.bones))
break
else:
armature_info = "Not found!"
if armature:
icon = 'NONE'
else:
icon = 'ERROR'
col = layout.column(align=True)
col.label("Armature: %s" % armature_info, icon)
col = layout.column(align=True)
col.prop(self, "use_selection", "Selection only (%i bones)" % bones_selected)
col.enabled = bool(bones_selected)
layout.label(text="Frame range: (%i frames)" % (abs(self.use_frame_end - self.use_frame_start) + 1))
row = layout.row(align=True)
row.prop(self, "use_frame_start")
row.prop(self, "use_frame_end")
col = layout.column(align=True)
col.prop(self, "use_framerate")
# Calculate number of markers in export range
frame_min = min(self.use_frame_start, self.use_frame_end)
frame_max = max(self.use_frame_start, self.use_frame_end)
num_markers = len([m for m in context.scene.timeline_markers if frame_max >= m.frame >= frame_min])
col = layout.column(align=True)
col.prop(self, "use_notetrack", text="Notetrack (%i nodes)" % num_markers)
col = layout.column(align=True)
col.prop(self, "use_notetrack_format", expand=True)
@classmethod
def poll(self, context):
return (context.scene is not None)
def menu_func_xmodel_import(self, context):
self.layout.operator(ImportXmodel.bl_idname, text="CoD Xmodel (.XMODEL_EXPORT)")
"""
def menu_func_xanim_import(self, context):
self.layout.operator(ImportXanim.bl_idname, text="CoD Xanim (.XANIM_EXPORT)")
"""
def menu_func_xmodel_export(self, context):
self.layout.operator(ExportXmodel.bl_idname, text="CoD Xmodel (.XMODEL_EXPORT)")
def menu_func_xanim_export(self, context):
self.layout.operator(ExportXanim.bl_idname, text="CoD Xanim (.XANIM_EXPORT)")
def register():
bpy.utils.register_module(__name__)
bpy.types.TOPBAR_MT_file_import.append(menu_func_xmodel_import)
#bpy.types.TOPBAR_MT_file_import.append(menu_func_xanim_import)
bpy.types.TOPBAR_MT_file_export.append(menu_func_xmodel_export)
bpy.types.TOPBAR_MT_file_export.append(menu_func_xanim_export)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.TOPBAR_MT_file_import.remove(menu_func_xmodel_import)
#bpy.types.TOPBAR_MT_file_import.remove(menu_func_xanim_import)
bpy.types.TOPBAR_MT_file_export.remove(menu_func_xmodel_export)
bpy.types.TOPBAR_MT_file_export.remove(menu_func_xanim_export)
if __name__ == "__main__":
register()
| 34.178197
| 151
| 0.64755
|
22606731e7b89795764088872f3e8f825432344b
| 2,989
|
py
|
Python
|
rest_api/application.py
|
Krak91/haystack
|
7d769d8bf14a72801c695728c87f5f4eee0e9b87
|
[
"Apache-2.0"
] | null | null | null |
rest_api/application.py
|
Krak91/haystack
|
7d769d8bf14a72801c695728c87f5f4eee0e9b87
|
[
"Apache-2.0"
] | null | null | null |
rest_api/application.py
|
Krak91/haystack
|
7d769d8bf14a72801c695728c87f5f4eee0e9b87
|
[
"Apache-2.0"
] | 1
|
2022-01-05T15:24:36.000Z
|
2022-01-05T15:24:36.000Z
|
import logging
logging.basicConfig(format="%(asctime)s %(message)s", datefmt="%m/%d/%Y %I:%M:%S %p")
logger = logging.getLogger(__name__)
logging.getLogger("elasticsearch").setLevel(logging.WARNING)
logging.getLogger("haystack").setLevel(logging.INFO)
try:
import uvicorn
from fastapi import FastAPI, HTTPException
from fastapi.routing import APIRoute
from fastapi.openapi.utils import get_openapi
from starlette.middleware.cors import CORSMiddleware
from rest_api.controller.errors.http_error import http_error_handler
from rest_api.config import ROOT_PATH
from rest_api.controller.router import router as api_router
except (ImportError, ModuleNotFoundError) as ie:
from haystack.utils.import_utils import _optional_component_not_installed
_optional_component_not_installed("rest_api", "rest", ie)
def get_application() -> FastAPI:
application = FastAPI(title="Haystack-API", debug=True, version="1.0.0", root_path=ROOT_PATH)
# This middleware enables allow all cross-domain requests to the API from a browser. For production
# deployments, it could be made more restrictive.
application.add_middleware(
CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"],
)
application.add_exception_handler(HTTPException, http_error_handler)
application.include_router(api_router)
return application
def get_openapi_specs() -> dict:
"""
Used to autogenerate OpenAPI specs file to use in the documentation.
See `docs/_src/api/openapi/generate_openapi_specs.py`
"""
app = get_application()
return get_openapi(
title=app.title if app.title else None,
version=app.version if app.version else None,
openapi_version=app.openapi_version if app.openapi_version else None,
description=app.description if app.description else None,
routes=app.routes if app.routes else None,
)
def use_route_names_as_operation_ids(app: FastAPI) -> None:
"""
Simplify operation IDs so that generated API clients have simpler function
names (see https://fastapi.tiangolo.com/advanced/path-operation-advanced-configuration/#using-the-path-operation-function-name-as-the-operationid).
The operation IDs will be the same as the route names (i.e. the python method names of the endpoints)
Should be called only after all routes have been added.
"""
for route in app.routes:
if isinstance(route, APIRoute):
route.operation_id = route.name
app = get_application()
use_route_names_as_operation_ids(app)
logger.info("Open http://127.0.0.1:8000/docs to see Swagger API Documentation.")
logger.info(
"""
Or just try it out directly: curl --request POST --url 'http://127.0.0.1:8000/query' -H "Content-Type: application/json" --data '{"query": "Who is the father of Arya Stark?"}'
"""
)
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
| 38.320513
| 180
| 0.731683
|
ada8c30fab7932491efa2cf0cc39e2e470d9dad2
| 29,941
|
py
|
Python
|
tensorflow/python/framework/importer.py
|
JanX2/tensorflow
|
dd788dbbfa544c1ea4768940ac4300c22bb7e88e
|
[
"Apache-2.0"
] | 3
|
2017-12-04T07:45:22.000Z
|
2018-04-20T06:53:17.000Z
|
tensorflow/python/framework/importer.py
|
JanX2/tensorflow
|
dd788dbbfa544c1ea4768940ac4300c22bb7e88e
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/framework/importer.py
|
JanX2/tensorflow
|
dd788dbbfa544c1ea4768940ac4300c22bb7e88e
|
[
"Apache-2.0"
] | 1
|
2020-05-14T06:13:24.000Z
|
2020-05-14T06:13:24.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A utility function for importing TensorFlow graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.util import compat
from tensorflow.python.util.deprecation import deprecated_args
# TODO(josh11b): SWIG the code from node_def_util instead of duplicating
# the logic here.
def _GetNodeAttr(node_def, attr_name):
if attr_name not in node_def.attr:
raise ValueError('Expected one attr with name %r in %s.'
% (attr_name, str(node_def)))
return node_def.attr[attr_name]
def _ArgToTypesNoRef(node_def, arg_def):
if arg_def.number_attr:
repeats = _GetNodeAttr(node_def, arg_def.number_attr).i
if arg_def.type_attr:
dtype = _GetNodeAttr(node_def, arg_def.type_attr).type
else:
assert arg_def.type != types_pb2.DT_INVALID
dtype = arg_def.type
return [dtype] * repeats
elif arg_def.type_attr:
return [_GetNodeAttr(node_def, arg_def.type_attr).type]
elif arg_def.type_list_attr:
return _GetNodeAttr(node_def, arg_def.type_list_attr).list.type
else:
assert arg_def.type != types_pb2.DT_INVALID
return [arg_def.type]
def _SingleArgToTypes(node_def, arg_def):
types = _ArgToTypesNoRef(node_def, arg_def)
if arg_def.is_ref:
return [dtypes.as_dtype(dt)._as_ref.as_datatype_enum for dt in types] # pylint: disable=protected-access
return types
def _ArgsToTypes(node_def, arg_list):
types = []
for arg_def in arg_list:
types.extend(_SingleArgToTypes(node_def, arg_def))
return types
def _InputTypes(node_def, op_dict):
op_def = op_dict[node_def.op]
return _ArgsToTypes(node_def, op_def.input_arg)
def _OutputTypes(node_def, op_dict):
op_def = op_dict[node_def.op]
return _ArgsToTypes(node_def, op_def.output_arg)
def _IsControlInput(input_name):
# Expected format: '^operation_name' (control input).
return input_name.startswith('^')
def _ParseTensorName(tensor_name):
"""Parses a tensor name into an operation name and output index.
This function will canonicalize tensor names as follows:
* "foo:0" -> ("foo", 0)
* "foo:7" -> ("foo", 7)
* "foo" -> ("foo", 0)
* "foo:bar:baz" -> ValueError
Args:
tensor_name: The name of a tensor.
Returns:
A tuple containing the operation name, and the output index.
Raises:
ValueError: If `tensor_name' cannot be interpreted as the name of a tensor.
"""
components = tensor_name.split(':')
if len(components) == 2:
# Expected format: 'operation_name:output_index'.
try:
output_index = int(components[1])
except ValueError:
raise ValueError('Cannot convert %r to a tensor name.' % (tensor_name,))
return components[0], output_index
elif len(components) == 1:
# Expected format: 'operation_name' (implicit 0th output).
return components[0], 0
else:
raise ValueError('Cannot convert %r to a tensor name.' % (tensor_name,))
def _CanonicalInputName(input_name):
input_name = compat.as_str(input_name)
if _IsControlInput(input_name):
return input_name
input_op_name, output_index = _ParseTensorName(input_name)
return '%s:%d' % (input_op_name, output_index)
def _InvalidNodeMessage(node, message):
return 'graph_def is invalid at node %r: %s.' % (node.name, message)
@contextlib.contextmanager
def _MaybeDevice(device):
"""Applies the given device only if device is not None or empty."""
if device:
with ops.device(device):
yield
else:
yield
def _ProcessGraphDefParam(graph_def):
"""Type-checks and possibly canonicalizes `graph_def`."""
if not isinstance(graph_def, graph_pb2.GraphDef):
# `graph_def` could be a dynamically-created message, so try a duck-typed
# approach
try:
old_graph_def = graph_def
graph_def = graph_pb2.GraphDef()
graph_def.MergeFrom(old_graph_def)
except TypeError:
raise TypeError('graph_def must be a GraphDef proto.')
return graph_def
def _ProcessInputMapParam(input_map):
"""Type-checks and possibly canonicalizes `input_map`."""
if input_map is None:
input_map = {}
else:
if not (isinstance(input_map, dict)
and all(isinstance(k, compat.bytes_or_text_types)
for k in input_map.keys())):
raise TypeError('input_map must be a dictionary mapping strings to '
'Tensor objects.')
return input_map
def _ProcessReturnElementsParam(return_elements):
"""Type-checks and possibly canonicalizes `return_elements`."""
if return_elements is None: return None
if not all(isinstance(x, compat.bytes_or_text_types)
for x in return_elements):
raise TypeError('return_elements must be a list of strings.')
return tuple(compat.as_str(x) for x in return_elements)
def _FindAttrInOpDef(attr_name, op_def):
for attr_def in op_def.attr:
if attr_name == attr_def.name:
return attr_def
return None
def _ConvertInputMapValues(name, input_map):
"""Ensures all input map values are tensors.
This should be called from inside the import name scope.
Args:
name: the `name` argument passed to import_graph_def
input_map: the `input_map` argument passed to import_graph_def.
Returns:
An possibly-updated version of `input_map`.
Raises:
ValueError: if input map values cannot be converted due to empty name scope.
"""
if not all(isinstance(v, ops.Tensor) for v in input_map.values()):
if name == '': # pylint: disable=g-explicit-bool-comparison
raise ValueError(
'tf.import_graph_def() requires a non-empty `name` if `input_map` '
'contains non-Tensor values. Try calling tf.convert_to_tensor() on '
'`input_map` values before calling tf.import_graph_def().')
with ops.name_scope('_inputs'):
input_map = {k: ops.convert_to_tensor(v) for k, v in input_map.items()}
return input_map
def _PopulateTFImportGraphDefOptions(options, prefix, input_map,
return_elements):
"""Populates the TF_ImportGraphDefOptions `options`."""
c_api.TF_ImportGraphDefOptionsSetPrefix(options, prefix)
for input_src, input_dst in input_map.items():
input_src = compat.as_str(input_src)
if input_src.startswith('^'):
src_name = compat.as_bytes(input_src[1:])
dst_op = input_dst._as_tf_output().oper # pylint: disable=protected-access
c_api.TF_ImportGraphDefOptionsRemapControlDependency(options, src_name,
dst_op)
else:
src_name, src_idx = _ParseTensorName(input_src)
src_name = compat.as_str(src_name)
dst_output = input_dst._as_tf_output() # pylint: disable=protected-access
c_api.TF_ImportGraphDefOptionsAddInputMapping(options, src_name,
src_idx, dst_output)
for name in return_elements or []:
if ':' in name:
op_name, index = _ParseTensorName(name)
op_name = compat.as_str(op_name)
c_api.TF_ImportGraphDefOptionsAddReturnOutput(options, op_name, index)
else:
c_api.TF_ImportGraphDefOptionsAddReturnOperation(options,
compat.as_str(name))
# TODO(skyewm): control dependencies
def _ProcessNewOps(graph):
"""Processes the newly-added TF_Operations in `graph`."""
# Maps from a node to the names of the ops it's colocated with, if colocation
# is specified in the attributes.
colocation_pairs = {}
for c_op in c_api_util.new_tf_operations(graph):
# pylint: disable=protected-access
new_op = graph._create_op_from_tf_operation(c_op, compute_device=False)
# pylint: enable=protected-access
colocation_names = _GetColocationNames(new_op)
if colocation_names:
colocation_pairs[new_op] = colocation_names
# Don't apply this op's device function, since colocation constraints
# override device functions. Note that this op's device may still be set
# by the loop below.
else:
with _MaybeDevice(new_op.device):
graph._apply_device_functions(new_op) # pylint: disable=protected-access
# The following loop populates the device field of ops that are colocated
# with another op. This is implied by the colocation attribute, but we
# propagate the device field for completeness.
for op, coloc_op_list in colocation_pairs.items():
coloc_device = None
# Find any device in the list of colocated ops that have a device, if it
# exists. We assume that if multiple ops have devices, they refer to the
# same device. Otherwise, a runtime error will occur since the colocation
# property cannot be guaranteed.
#
# One possible improvement is to try to check for compatibility of all
# devices in this list at import time here, which would require
# implementing a compatibility function for device specs in python.
for coloc_op_name in coloc_op_list:
try:
coloc_op = graph._get_operation_by_name_unsafe(coloc_op_name) # pylint: disable=protected-access
except KeyError:
raise ValueError('Specified colocation to an op that '
'does not exist during import: %s in %s' % (
coloc_op_name, op.name))
if coloc_op.device:
coloc_device = pydev.DeviceSpec.from_string(coloc_op.device)
break
if coloc_device:
op._set_device(coloc_device) # pylint: disable=protected-access
def _GetColocationNames(op):
"""Returns names of the ops that `op` should be colocated with."""
colocation_names = []
try:
class_values = op.get_attr('_class')
except ValueError:
# No _class attr
return
for val in class_values:
val = compat.as_str(val)
if val.startswith('loc:@'):
colocation_node_name = val[len('loc:@'):]
if colocation_node_name != op.name:
colocation_names.append(colocation_node_name)
return colocation_names
def _GatherReturnElements(requested_return_elements, graph, results):
"""Returns the requested return elements from results.
Args:
requested_return_elements: list of strings of operation and tensor names
graph: Graph
results: wrapped TF_ImportGraphDefResults
Returns:
list of `Operation` and/or `Tensor` objects
"""
return_outputs = c_api.TF_ImportGraphDefResultsReturnOutputs(results)
return_opers = c_api.TF_ImportGraphDefResultsReturnOperations(results)
combined_return_elements = []
outputs_idx = 0
opers_idx = 0
for name in requested_return_elements:
if ':' in name:
combined_return_elements.append(
graph._get_tensor_by_tf_output(return_outputs[outputs_idx])) # pylint: disable=protected-access
outputs_idx += 1
else:
combined_return_elements.append(
graph._get_operation_by_tf_operation(return_opers[opers_idx])) # pylint: disable=protected-access
opers_idx += 1
return combined_return_elements
@deprecated_args(None, 'Please file an issue at '
'https://github.com/tensorflow/tensorflow/issues if you depend'
' on this feature.',
'op_dict')
def import_graph_def(graph_def, input_map=None, return_elements=None,
name=None, op_dict=None, producer_op_list=None):
"""Imports the graph from `graph_def` into the current default `Graph`.
This function provides a way to import a serialized TensorFlow
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer, and extract individual objects in the `GraphDef` as
@{tf.Tensor} and @{tf.Operation} objects. Once extracted,
these objects are placed into the current default `Graph`. See
@{tf.Graph.as_graph_def} for a way to create a `GraphDef`
proto.
Args:
graph_def: A `GraphDef` proto containing operations to be imported into
the default graph.
input_map: A dictionary mapping input names (as strings) in `graph_def`
to `Tensor` objects. The values of the named input tensors in the
imported graph will be re-mapped to the respective `Tensor` values.
return_elements: A list of strings containing operation names in
`graph_def` that will be returned as `Operation` objects; and/or
tensor names in `graph_def` that will be returned as `Tensor` objects.
name: (Optional.) A prefix that will be prepended to the names in
`graph_def`. Note that this does not apply to imported function names.
Defaults to `"import"`.
op_dict: (Optional.) Deprecated, do not use.
producer_op_list: (Optional.) An `OpList` proto with the (possibly stripped)
list of `OpDef`s used by the producer of the graph. If provided,
unrecognized attrs for ops in `graph_def` that have their default value
according to `producer_op_list` will be removed. This will allow some more
`GraphDef`s produced by later binaries to be accepted by earlier binaries.
Returns:
A list of `Operation` and/or `Tensor` objects from the imported graph,
corresponding to the names in `return_elements`.
Raises:
TypeError: If `graph_def` is not a `GraphDef` proto,
`input_map` is not a dictionary mapping strings to `Tensor` objects,
or `return_elements` is not a list of strings.
ValueError: If `input_map`, or `return_elements` contains names that
do not appear in `graph_def`, or `graph_def` is not well-formed (e.g.
it refers to an unknown tensor).
"""
graph_def = _ProcessGraphDefParam(graph_def)
input_map = _ProcessInputMapParam(input_map)
return_elements = _ProcessReturnElementsParam(return_elements)
op_dict = op_def_registry.get_registered_ops()
if producer_op_list is None:
producer_op_dict = None
else:
producer_op_dict = {op.name: op for op in producer_op_list.op}
graph = ops.get_default_graph()
if graph._c_graph: # pylint: disable=protected-access
with ops.name_scope(name, 'import', input_map.values()) as scope:
# Save unique prefix generated by name_scope
if scope:
assert scope.endswith('/')
prefix = scope[:-1]
else:
prefix = ''
# Generate any input map tensors inside name scope
input_map = _ConvertInputMapValues(name, input_map)
scoped_options = c_api_util.ScopedTFImportGraphDefOptions()
options = scoped_options.options
_PopulateTFImportGraphDefOptions(options, prefix, input_map,
return_elements)
with c_api_util.tf_buffer(graph_def.SerializeToString()) as serialized:
try:
with errors.raise_exception_on_not_ok_status() as status:
results = c_api.TF_GraphImportGraphDefWithResults(
graph._c_graph, serialized, options, status) # pylint: disable=protected-access
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
_ProcessNewOps(graph)
# TODO(skyewm): error if unused input map key
if return_elements is None:
return None
else:
return _GatherReturnElements(return_elements, graph, results)
else:
g = graph
# Use a canonical representation for all tensor names.
input_map = {_CanonicalInputName(k): v for k, v in input_map.items()}
used_input_keys = set()
name_to_op = {}
# Add any functions defined in `graph_def` to `g`
if graph_def.library and graph_def.library.function:
# Copy op_dict so we don't clobber the original
op_dict = copy.copy(op_dict)
# pylint: disable=protected-access
# Note that we do not prepend `name` to the function name. The reasoning
# is that function names are similar to op definition names, which
# currently do not have a scoped name or namespace scheme.
functions = function._from_library(graph_def.library)
for f in functions:
f.add_to_graph(g)
op_dict[f.name] = f.definition.signature
# pylint: enable=protected-access
# LINT.IfChange
with ops.name_scope(name, 'import', input_map.values()) as scope:
# TODO(ashankar): Should this just copy over or should it do some
# more nuanced merging? For example, the graph may already have some
# marked "bad versions" and we don't want to lose those because of
# what's in graph_def.versions? The C++ ImporGraphDef does something
# more nuanced.
g.graph_def_versions.CopyFrom(graph_def.versions)
input_map = _ConvertInputMapValues(name, input_map)
# NOTE(mrry): We do this in two passes, because there may be a cycle in
# `graph_def`.
# 1. Add operations without their inputs.
for node in graph_def.node:
# Check to see if this op's name matches a previously seen op
if node.name in name_to_op:
raise ValueError('Duplicate name \'%s\' in GraphDef.' % node.name)
# Set any default attr values that aren't present.
if node.op not in op_dict:
raise ValueError('No op named %s in defined operations.' % node.op)
op_def = op_dict[node.op]
for attr_def in op_def.attr:
key = attr_def.name
if attr_def.HasField('default_value'):
value = node.attr[key]
if value is None or value.WhichOneof('value') is None:
node.attr[key].CopyFrom(attr_def.default_value)
if producer_op_dict:
# Remove any default attr values that aren't in op_def.
if node.op in producer_op_dict:
producer_op_def = producer_op_dict[node.op]
# We make a copy of node.attr to iterate through since we
# may modify node.attr inside the loop.
for key in list(node.attr):
if _FindAttrInOpDef(key, op_def) is None:
# No attr_def in consumer, look in producer.
attr_def = _FindAttrInOpDef(key, producer_op_def)
if (attr_def and attr_def.HasField('default_value') and
node.attr[key] == attr_def.default_value):
# Unknown attr had default value in producer, delete it
# so it can be understood by consumer.
del node.attr[key]
output_types = _OutputTypes(node, op_dict)
name_to_op[node.name] = g.create_op(
node.op, [], output_types, name=node.name, attrs=node.attr,
compute_shapes=False, compute_device=False,
op_def=op_def)
# Maps from a node to the ops it is colocated with, if colocation
# is specified in the attributes.
colocation_pairs = collections.defaultdict(list)
# 2. Add inputs to the operations.
for node in graph_def.node:
op = name_to_op[node.name]
input_types = _InputTypes(node, op_dict)
apply_device_function = True
# Rewrite the colocation attributes in the graph, since the
# names of new ops may have changed.
for key, value in op.node_def.attr.items():
if key == '_class':
class_values = value.list
new_class_values = []
for class_value in class_values.s:
if class_value.startswith(b'loc:@'):
op_to_bind_to = class_value[5:].decode()
# Find the op by its original name.
if op_to_bind_to not in name_to_op:
raise ValueError('Specified colocation to an op that '
'does not exist during import: %s in %s' % (
op_to_bind_to, node.name))
original_op = name_to_op[op_to_bind_to]
new_class_values.append(compat.as_bytes(
'loc:@' + original_op.name))
if op_to_bind_to != node.name:
# Keep track of this mapping for a later phase.
colocation_pairs[op].append(original_op)
# Don't apply this op's device function,
# the colocation constraint will ensure
# the proper device gets assigned at runtime.
apply_device_function = False
else:
new_class_values.append(class_value)
value.list.CopyFrom(attr_value_pb2.AttrValue.ListValue(
s=new_class_values))
# NOTE(mrry): We cannot use zip here because control inputs do not
# appear in the list of input_types.
for i, input_name in enumerate(
[_CanonicalInputName(x) for x in node.input]):
if _IsControlInput(input_name):
# (a) Input is a control input that should be taken from an op
# in "graph_def".
try:
source_op = name_to_op[input_name[1:]]
except KeyError:
raise ValueError(
_InvalidNodeMessage(
node,
'Control input %r not found in graph_def.'
% (input_name,)))
# pylint: disable=protected-access
op._add_control_input(source_op)
# pylint: enable=protected-access
else:
try:
input_type = input_types[i]
except IndexError:
raise ValueError(_InvalidNodeMessage(
node, 'More inputs specified (%r) than the op expects.'
% (input_name,)))
if input_name in input_map:
# (b) Input should be replaced by a tensor from the caller.
source_tensor = input_map[input_name]
used_input_keys.add(input_name)
else:
# (c) Input should be taken from an op in `graph_def`.
operation_name, output_index = _ParseTensorName(input_name)
try:
source_op = name_to_op[operation_name]
source_tensor = list(source_op.values())[output_index]
except (KeyError, IndexError):
raise ValueError(
_InvalidNodeMessage(
node,
'Input tensor %r not found in graph_def.'
% (input_name,)))
try:
# pylint: disable=protected-access
op._add_input(source_tensor, dtype=input_type)
# pylint: enable=protected-access
except TypeError as te:
raise ValueError(_InvalidNodeMessage(
node, 'Input tensor %r %s' % (input_name, te)))
# pylint: disable=protected-access
if op._input_dtypes != input_types:
raise ValueError(
_InvalidNodeMessage(
node,
'Input types mismatch (expected %r but got %r)'
% (', '.join(dtypes.as_dtype(x).name for x in input_types),
', '.join(x.name for x in op._input_dtypes))))
# pylint: enable=protected-access
if not g._is_function(op.type): # pylint: disable=protected-access
# Execute shape inference for this op.
# NOTE(mrry): If the graph contains a cycle, the full shape
# information may not be available for this op's inputs.
ops.set_shapes_for_outputs(op)
# For nodes with _output_shapes set, set the output shapes.
if '_output_shapes' in op.node_def.attr:
for i, output in enumerate(op.outputs):
dims = op.node_def.attr['_output_shapes'].list.shape[i]
output_shape = tensor_shape.TensorShape(
None if dims.unknown_rank else
[dim.size if dim.size >= 0 else None for dim in dims.dim])
try:
output.set_shape(output_shape)
except ValueError as e:
# If the output shape is incompatible with what is inferred
# by the graph for a very specific whitelist of ops, then we
# ignore this output shape. This can happen if there is a
# bug in the shape function for some operation, and the
# serialized graph def has the incorrect shape set when
# running on a newer binary with the fixed shape function.
# This is an escape hatch that allows us to correct shape
# functions that are not critical to correct execution but
# would cause graphs to fail if imported after correcting.
#
# This can be removed after 2017/03/08.
if op.type in ['RandomShuffleQueue', 'PaddingFIFOQueue',
'FIFOQueue', 'PriorityQueue', 'QueueSize',
'Stack', 'Barrier', 'BarrierReadySize',
'BarrierIncompleteSize', 'HashTable',
'MutableHashTable',
'MutableHashTableOfTensors', 'Mutex',
'CuckooTable', 'IndexTable',
'WholeFileReader', 'TextLineReader',
'FixedLengthRecordReader',
'TFRecordReader', 'IdentityReader',
'LMDBReader',
'RefSwitch', 'RefEnter', 'RefNextIteration',
'RefMerge', 'RefIdentity']:
pass
elif op.type in [
'ConditionalAccumulator', 'SparseConditionalAccumulator',
'Table'
]:
# This can be removed after 2017/04/24.
pass
else:
raise e
del op.node_def.attr['_output_shapes']
# NOTE(mrry): We do this after configuring the inputs, because
# the result of the device functions may depend on the inputs.
if apply_device_function:
with _MaybeDevice(node.device):
g._apply_device_functions(op) # pylint: disable=protected-access
# The following loop populates the device field of ops that are
# colocated with another op. This is implied by the colocation
# attribute, but we propagate the device field for completeness.
for op, coloc_op_list in colocation_pairs.items():
coloc_device = None
# Find any device in the list of colocated ops that have a
# device, if it exists. We assume that if multiple ops
# have devices, they refer to the same device. Otherwise, a
# runtime error will occur since the colocation property
# cannot be guaranteed.
#
# One possible improvement is to try to check for compatibility
# of all devices in this list at import time here, which would
# require implementing a compatibility function for device specs
# in python.
for coloc_op in coloc_op_list:
if coloc_op.device:
coloc_device = pydev.DeviceSpec.from_string(coloc_op.device)
break
if coloc_device:
op._set_device(coloc_device) # pylint: disable=protected-access
# Treat input mappings that don't appear in the graph as an error,
# because they are likely to be due to a typo.
def _IsImportedNodeOutput(tensor_name):
operation_name, output_index = _ParseTensorName(tensor_name)
try:
return output_index < len(name_to_op[operation_name].outputs)
except KeyError:
return False
absent_input_keys = [
k for k in frozenset(input_map.keys()).difference(used_input_keys)
if not _IsImportedNodeOutput(k)]
if absent_input_keys:
raise ValueError(
'Attempted to map inputs that were not found in graph_def: [%s]'
% ', '.join(absent_input_keys))
if return_elements is None:
return None
else:
ret = []
for name in return_elements:
name = compat.as_str(name)
if ':' in name:
try:
operation_name, output_index = _ParseTensorName(name)
ret.append(name_to_op[operation_name].outputs[output_index])
except (ValueError, KeyError, IndexError):
raise ValueError(
'Requested return_element %r not found in graph_def.' % name)
else:
try:
ret.append(name_to_op[name])
except KeyError:
raise ValueError(
'Requested return_element %r not found in graph_def.' % name)
return ret
# LINT.ThenChange(//tensorflow/core/graph/graph_constructor.cc)
| 40.736054
| 109
| 0.654187
|
45ed163a1aa6637c65bd6c74b1895b8db9b1e8da
| 47,044
|
py
|
Python
|
bot3.py
|
banban/CreativeTradeInBot
|
56938229446983e91d851a3cc8e6ef2a01ace43b
|
[
"MIT"
] | null | null | null |
bot3.py
|
banban/CreativeTradeInBot
|
56938229446983e91d851a3cc8e6ef2a01ace43b
|
[
"MIT"
] | null | null | null |
bot3.py
|
banban/CreativeTradeInBot
|
56938229446983e91d851a3cc8e6ef2a01ace43b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# pylint: disable=C0116,W0613
# This program is dedicated to the public domain under the CC0 license.
"""
First, a few callback functions are defined. Then, those functions are passed to
the Dispatcher and registered at their respective places.
Then, the bot is started and runs until we press Ctrl-C on the command line.
Usage:
Example of a bot-user conversation using nested ConversationHandlers.
Send /start to initiate the conversation.
Press Ctrl-C on the command line or send a signal to the process to stop the
bot.
"""
import sys
import os
import logging
import pymongo
from pymongo.message import query
from bson.objectid import ObjectId
import constants as C
from datetime import datetime
import json
from ibm_watson import SpeechToTextV1
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
import logging
from typing import Tuple, Dict, Any
from telegram import (InlineKeyboardMarkup, InlineKeyboardButton, Update, chat)
from telegram.ext import (
Updater,
CommandHandler,
MessageHandler,
Filters,
ConversationHandler,
CallbackQueryHandler,
CallbackContext,
)
from telegram.utils import helpers
#from telegram.utils.helpers import escape_markdown, helpers
# Enable logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.WARNING
)
logger = logging.getLogger(__name__)
# State definitions for top level conversation
SELECTING_ACTION, TRADING, EDITING, DOWNLOADING = map(chr, range(4))
# State definitions for item
SELECTING_FEATURE, SELECTING_CATEGORY, TYPING, SAVING = map(chr, range(4, 8))
# Meta states
STOPPING, SHOWING, REPLYING, CALLING, SEARCHING, TRACKING, PAGE_MASSAGES, PAGE_ITEMS, PREV_PAGE, NEXT_PAGE = map(chr, range(8, 18))
# Shortcut for ConversationHandler.END
END = ConversationHandler.END
# Different constants for editing item
(
FEATURE,
NAME,
VALUE,
DESCRIPTION,
CATEGORY,
#CATEGORY_VALUE,
IMAGE,
DOCUMENT,
VOICE
) = map(chr, range(18, 26))
class Bot:
# static helpers
def facts_to_str(user_data: Dict[str, str]) -> str:
"""Helper function for formatting the gathered user info."""
excludeKeys = {PREV_PAGE, NEXT_PAGE, PAGE_MASSAGES, PAGE_ITEMS, TRADING, REPLYING, CALLING, VOICE, '_id', 'chat_id', 'Images', 'Files'}
#translation_table = dict.fromkeys(map(ord, '!$*-`_()[].'), "\\")
#value.translate(translation_table)
facts = [f'*{key}*: `{helpers.escape_markdown(str(value), version=2)}`' for key, value in user_data.items() if key not in excludeKeys]
result = "\n".join(facts).join(['\n', '\n'])
#print(result)
return result
def facts_to_save(user_data: Dict[str, Any]) -> Dict[str, Any]:
"""Helper function for saving in database."""
excludeKeys = {PREV_PAGE, NEXT_PAGE, PAGE_MASSAGES, PAGE_ITEMS, TRADING, REPLYING, CALLING, VOICE, '_id'}
return {x: user_data[x] for x in user_data if x not in excludeKeys}
def get_value_from_string(data):
#int(''.join(c for c in s if c.isdigit()))
#import re
#re.findall("\d+\.\d+", "Current Level: 13.4 db.")
value = 0.0
if data is None:
return value
try:
value = float(str(data).replace(',', '').replace('$', '').strip(' '))
except:
pass
return value
# instance members
def __init__(self):
self.myclient = pymongo.MongoClient(C.MONGODB_CONNECTION_URL)
self.botDB = self.myclient["botDB"]
authenticator = IAMAuthenticator(C.IBM_KEY)
self.speech_to_text = SpeechToTextV1(authenticator=authenticator)
self.speech_to_text.set_service_url(C.IBM_SERVICE_URL)
#InsecureRequestWarning: Unverified HTTPS request is being made to host 'api.au-syd.speech-to-text.watson.cloud.ibm.com'. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/1.2
#self.speech_to_text.set_disable_ssl_verification(True)
def remove_page_messages(self, update: Update, context: CallbackContext):
#print("remove_page_messages")
try:
_chat_id = update.callback_query.message.chat_id
except:
_chat_id = update.message.chat_id
#print("remove_page_messages: 2")
chat_data = context.chat_data
if PAGE_MASSAGES in chat_data and _chat_id:
#print("remove_page_messages: 3")
for _id in chat_data[PAGE_MASSAGES]:
try:
context.bot.delete_message(chat_id=_chat_id, message_id=_id)
except:
pass
chat_data[PAGE_MASSAGES] = []
#print("remove_page_messages: 4")
# Top level conversation callbacks
def start(self, update: Update, context: CallbackContext) -> str:
"""Select an action: Adding parent/child or show data."""
#print("start")
#print("update.callback_query:"+ str(bool(update.callback_query)))
#print("update.message:"+ str(bool(update.message)))
self.remove_page_messages(update, context)
buttons = [
[
InlineKeyboardButton(text='✏️Update Item', callback_data=str(EDITING)),
InlineKeyboardButton(text='ℹ️Show Item', callback_data=str(SHOWING)),
],
[
InlineKeyboardButton(text=f"🔎Search & Trade", callback_data=str(SEARCHING)),
InlineKeyboardButton(text=f"⛓Trades History", callback_data=str(TRACKING)),
],
[
InlineKeyboardButton(text='⏹Exit Conversation', callback_data=str(END)),
],
]
keyboard = InlineKeyboardMarkup(buttons)
_text="To abort session, type /stop or click [Stop]."
# If we're calling, don't need to send a new message
if bool(update.callback_query):
#print("start:CALLING")
if (_text != update.callback_query.message.text):
update.callback_query.answer()
update.callback_query.edit_message_text(text=_text, reply_markup=keyboard)
elif bool(update.message):
#print("start:REPLYING")
update.message.reply_text(
"Hi {}, Let's trade-in!".format(update.message.from_user.first_name)
)
update.message.reply_text(text=_text, reply_markup=keyboard)
#print("start:redirecting")
return SELECTING_ACTION
def get_user_info(self, chat_id, context: CallbackContext):
_chat = context.bot.get_chat(chat_id=chat_id)
result = ""
if (_chat.username is not None and _chat.username != "None"):
result = _chat.username
elif (_chat.first_name is not None and _chat.first_name != "None"):
result = f"{_chat.first_name} {_chat.last_name}"
else:
result = str(chat_id)
return result
def history(self, update: Update, context: CallbackContext) -> str:
#print("history of otcoming trades")
self.remove_page_messages(update, context)
try:
chat_id = update.callback_query.message.chat_id
except:
chat_id = update.message.chat_id
count = self.botDB["transactions"].count_documents( { "from_chat_id" : chat_id } )
trans = self.botDB["transactions"].find( { "from_chat_id" : chat_id } )
chat_data = context.chat_data
user_data = context.user_data
#print(f"count:{count}, offset:{offset}")
buttons = []
# if (offset > 0 and offset >= PAGE_SIZE):
# buttons.append(InlineKeyboardButton(text='⬅️Prev page', callback_data=str(PREV_PAGE)))
# if (offset + PAGE_SIZE < count):
# buttons.append(InlineKeyboardButton(text='➡️Next page', callback_data=str(NEXT_PAGE)))
buttons.append(InlineKeyboardButton(text='🔙Back to main menu', callback_data=str(END)))
keyboard = InlineKeyboardMarkup([buttons])
if bool(update.callback_query):
_text = (f"{count} historical trade-in(s) found")
if (_text != update.callback_query.message.text):
update.callback_query.answer()
update.callback_query.edit_message_text(text=_text, reply_markup=keyboard)
transNo = 0
for tran in trans:
item = self.botDB["items"].find_one({"_id": ObjectId(tran['item_id'])})
_text = ""
_image_ids = ""
transNo +=1
facts={'From': self.get_user_info(tran['from_chat_id'], context), 'To': self.get_user_info(tran['to_chat_id'], context), 'Trans No': str(transNo), 'Trade Date': str(tran['trans_date'])[0:16]} #"%d/%m/%y %H:%M"
#facts["To owner"] = str(tran['to_chat_id'])
for key, value in item.items():
facts[key] = str(value)
_text = Bot.facts_to_str(facts)
item_message = context.bot.send_message(chat_id=update.callback_query.message.chat_id
,text=_text
#, reply_markup=InlineKeyboardMarkup([[
# InlineKeyboardButton(text='ℹ️SHOWING', callback_data=str(SAVING))
# ]])
#, reply_to_message_id=update.callback_query.message.message_id
, parse_mode='MarkdownV2') #ParseMode.MARKDOWN_V2
chat_data[PAGE_MASSAGES].append(item_message.message_id)
#context.bot.send_media_group(chat_id=get_chat_id(update, context), media=list)
if (_image_ids != ""):
for image_id in _image_ids.split('|'):
if (image_id != ""):
try:
image_message = context.bot.send_photo(chat_id=update.callback_query.message.chat_id
, reply_to_message_id=item_message.message_id
, photo=image_id
, caption="Attached image")
chat_data[PAGE_MASSAGES].append(image_message.message_id)
except:
pass
return TRACKING #SEARCHING is ok as well
#not implemented yet
def search_text_filter(self, update: Update, context: CallbackContext) -> str:
chat_data = context.chat_data
user_data = context.user_data
return SEARCHING
def search(self, update: Update, context: CallbackContext) -> str:
#print("search")
self.remove_page_messages(update, context)
chat_data = context.chat_data
user_data = context.user_data
PAGE_SIZE = 5
try:
chat_id = update.callback_query.message.chat_id
except:
chat_id = update.message.chat_id
trans = self.botDB["transactions"].find( { "from_chat_id" : chat_id } , projection = { "item_id" : True } )
exsclude_preowned_items = [None]
for tran in trans:
exsclude_preowned_items.append(tran['item_id'])
condition = {"$and": [
{ "chat_id" : { "$nin": [None, chat_id] } }, #exclude currently owned item
{ "_id" : { "$nin": exsclude_preowned_items } } #exclude preowned items
]
}
#DEBIG condition = { "_id" : { "$ne": None } }
count = self.botDB["items"].count_documents(condition)
if PREV_PAGE not in user_data:
user_data[PREV_PAGE] = 0
if PAGE_ITEMS not in user_data:
user_data[PAGE_ITEMS] = {}
offset = int(user_data[PREV_PAGE])
if (update.callback_query.data == PREV_PAGE):
offset = offset - PAGE_SIZE
elif (update.callback_query.data == NEXT_PAGE):
offset = offset + PAGE_SIZE
user_data[PREV_PAGE] = offset
#print(f"count:{count}, offset:{offset}")
buttons = []
if (offset > 0 and offset >= PAGE_SIZE):
buttons.append(InlineKeyboardButton(text='⬅️Prev page', callback_data=str(PREV_PAGE)))
if (offset + PAGE_SIZE < count):
buttons.append(InlineKeyboardButton(text='➡️Next page', callback_data=str(NEXT_PAGE)))
buttons.append(InlineKeyboardButton(text='🔙Back to main menu', callback_data=str(END)))
keyboard = InlineKeyboardMarkup([buttons])
items = self.botDB["items"].find(condition).skip(offset).limit(PAGE_SIZE)
if bool(update.callback_query):
pagesTotal = int(count/PAGE_SIZE)
if (count % PAGE_SIZE)>0:
pagesTotal +=1
_text = (f"Page {int(offset/PAGE_SIZE) + 1} of {pagesTotal}. {count} item(s) found in total.") #You can type to search by text
if (_text != update.callback_query.message.text):
update.callback_query.answer()
update.callback_query.edit_message_text(text=_text, reply_markup=keyboard)
itemNo = int(offset * PAGE_SIZE)
for item in items:
_text = ""
_image_ids = ""
itemNo +=1
facts={'Item No': str(itemNo)}
for key, value in item.items():
facts[key] = str(value)
if (key =='Images'):
_image_ids = str(value)
if (key =='chat_id'):
facts['Owner'] = self.get_user_info(value, context)
_text = Bot.facts_to_str(facts)
item_message = context.bot.send_message(chat_id=update.callback_query.message.chat_id
,text=_text
, reply_markup=InlineKeyboardMarkup([[
InlineKeyboardButton(text='🛒Trade Item No: '+str(itemNo), callback_data=str(SAVING))
]])
#, reply_to_message_id=update.callback_query.message.message_id
, parse_mode='MarkdownV2') #ParseMode.MARKDOWN_V2
chat_data[PAGE_MASSAGES].append(item_message.message_id)
user_data[PAGE_ITEMS][item_message.message_id] = item['_id']
#context.bot.send_media_group(chat_id=get_chat_id(update, context), media=list)
if (_image_ids != ""):
for image_id in _image_ids.split('|'):
if (image_id != ""):
try:
image_message = context.bot.send_photo(chat_id=update.callback_query.message.chat_id
, reply_to_message_id=item_message.message_id
, photo=image_id
, caption="Attached image")
chat_data[PAGE_MASSAGES].append(image_message.message_id)
except:
pass
return SEARCHING
def item_details(self, update: Update, context: CallbackContext) -> str:
"""Pretty print gathered data."""
#print("item_details")
item = self.botDB["items"].find_one({"chat_id": update.callback_query.message.chat_id})
chat_data = context.chat_data
if (item is not None and update.callback_query.data == DOWNLOADING):
_ids = str(item['Files']).strip('|').split('|')
for file_id in _ids:
if (file_id != ""):
try:
message = context.bot.sendDocument(chat_id=update.callback_query.message.chat_id,
document=file_id,
caption = 'Attached file')
chat_data[PAGE_MASSAGES].append(message.message_id)
except:
pass
return SHOWING
buttons = [InlineKeyboardButton(text='🔙Back', callback_data=str(END))]
if (item is None):
_text='*No information yet*\. Please setup your item first'
else:
_text="Here is your item details:"
self.remove_page_messages(update, context)
facts={}
for key, value in item.items():
facts[key] = str(value)
if (key == 'Files'):
buttons.insert(0, InlineKeyboardButton(text='💾Download', callback_data=str(DOWNLOADING)))
elif (key =='Images'):
_ids = str(value).strip('|').split('|')
for image_id in _ids:
if (image_id != ""):
try:
message = context.bot.send_photo(chat_id=update.callback_query.message.chat_id
, reply_to_message_id=update.callback_query.message.message_id
, photo=image_id
, caption="Attached image")
chat_data[PAGE_MASSAGES].append(message.message_id)
except:
pass
_text += Bot.facts_to_str(facts)
url = helpers.create_deep_linked_url(bot_username=context.bot.name.strip('@'), payload=f"{item['_id']}") #, group=False
#https://t.me/{context.bot.name.strip('@')}?trade={item['_id']}
_text += f"\nUse [▶️this link]({url}) to promote your item"
keyboard = InlineKeyboardMarkup([buttons])
if bool(update.callback_query):
if (_text != update.callback_query.message.text):
update.callback_query.answer()
update.callback_query.edit_message_text(text=_text
, reply_markup=keyboard
, parse_mode='MarkdownV2') #ParseMode.MARKDOWN_V2
elif bool(update.message):
update.message.reply_text(text=_text
, reply_markup=keyboard
, parse_mode='MarkdownV2') #ParseMode.MARKDOWN_V2
#user_data[CALLING] = True
return SHOWING
def item_edit(self, update: Update, context: CallbackContext):
#print('item_edit')
edit_item_keyboard = InlineKeyboardMarkup([
[
InlineKeyboardButton(text='❕Name', callback_data=str(NAME)),
InlineKeyboardButton(text='❕Value', callback_data=str(VALUE)),
],
[
InlineKeyboardButton(text='Description', callback_data=str(DESCRIPTION)),
InlineKeyboardButton(text='Other Category', callback_data=str(CATEGORY)),
],
[
InlineKeyboardButton(text='💾Save', callback_data=str(SAVING)),
InlineKeyboardButton(text='🔙Back', callback_data=str(END)),
]
])
chat_data = context.chat_data
user_data = context.user_data
#print ('user_data.get(CALLING):' + str(user_data.get(CALLING)))
_text="Did not get chages yet :("
if bool(update.callback_query):
_text = "Update your item details.\nYou can also attach photos and files"
if (_text != update.callback_query.message.text):
update.callback_query.answer()
update.callback_query.edit_message_text(text=_text, reply_markup=edit_item_keyboard)
elif bool(update.message):
if VOICE in user_data:
_text = "Oh, did you say this? "+ user_data[VOICE]
update.message.reply_text(text=_text, reply_markup=edit_item_keyboard, parse_mode='MarkdownV2')
del user_data[VOICE]
else:
if ('Files' in user_data and bool(update.message.document)):
_text = "\nThe file will be visible for *you only* until new owner\!"
reply_to_file = context.bot.send_message(chat_id=update.message.chat_id
, reply_to_message_id=update.message.message_id
, text=_text, parse_mode='MarkdownV2')
chat_data[PAGE_MASSAGES].append(reply_to_file.message_id)
elif ('Images' in user_data and bool(update.message.photo)):
_text = "The photo will be visible *for all*\!"
reply_to_file = context.bot.send_message(chat_id=update.message.chat_id
, reply_to_message_id=update.message.message_id
, text=_text, parse_mode='MarkdownV2')
chat_data[PAGE_MASSAGES].append(reply_to_file.message_id)
else:
_text = ("Got it\! Keep changing and click *💾Save* to finish update, or *Back* to cancel and return"
f"{Bot.facts_to_str(user_data)}")
update.message.reply_text(text=_text, reply_markup=edit_item_keyboard, parse_mode='MarkdownV2')
return SELECTING_FEATURE
def regular_choice(self, update: Update, context: CallbackContext) -> int:
"""Ask the user for info about the selected predefined choice."""
if (update.callback_query.data == NAME):
text = "Name"
elif (update.callback_query.data == VALUE):
text = "Value"
elif (update.callback_query.data == DESCRIPTION):
text = "Description"
else: #Categoty
text = "Unknown"
#print('regular_choice:'+ text)
user_data = context.user_data
user_data[CATEGORY] = text
update.callback_query.answer()
update.callback_query.edit_message_text(f'Item {text.lower()}? Please type the value:')
return TYPING
def custom_choice(self, update: Update, context: CallbackContext) -> int:
#print('custom_choice:'+update.callback_query.id)
"""Ask the user for a description of a custom category."""
update.callback_query.answer()
update.callback_query.edit_message_text(text="Describe the category, for example *Colour* or *Size*", parse_mode='MarkdownV2')
return SELECTING_CATEGORY
def custom_text(self, update: Update, context: CallbackContext) -> int:
"""Ask the user for info about the selected predefined choice."""
text = update.message.text
#print('custom_text:'+text)
user_data = context.user_data
user_data[CATEGORY] = text
update.message.reply_text(f'Item {text.lower()}? Please type the value:')
return TYPING
def received_information(self, update: Update, context: CallbackContext) -> int:
"""Store info provided by user and ask for the next category."""
user_data = context.user_data
text = update.message.text
#print('received_information:'+ text)
if CATEGORY in user_data:
category = user_data[CATEGORY]
user_data[category] = text
del user_data[CATEGORY]
#go to the start again
return self.item_edit(update, context)
def edit_commit(self, update: Update, context: CallbackContext) -> int:
"""Display the gathered info and end the conversation."""
#print("edit_commit")
user_data = context.user_data
if CATEGORY in user_data:
del user_data[CATEGORY]
facts = Bot.facts_to_save(user_data)
facts['chat_id'] = update.callback_query.message.chat_id
items_coll = self.botDB["items"]
item = items_coll.find_one({"chat_id": facts['chat_id']})
if (item is None):
inserted_item = items_coll.insert_one(facts)
if (bool(inserted_item)):
_id = str(inserted_item.inserted_id)
update.callback_query.answer()
update.callback_query.edit_message_text(text=f"The item {_id} is inserted")
elif (bool(item)):
_id = item['_id']
edited_item = items_coll.find_one_and_update(filter={"_id": ObjectId(_id), "chat_id": facts['chat_id']},
update={"$set": facts}, upsert=True, return_document=True)
if (bool(edited_item)):
update.callback_query.answer()
update.callback_query.edit_message_text(text=f"👏Welldone! The item is updated and ready to trade")
user_data.clear()
self.remove_page_messages(update, context)
return self.start(update, context)
#return END
def received_photo(self, update: Update, context: CallbackContext):
"""
bot sends image in multiple resolutions, last one is the highest one
"""
user_data = context.user_data
chat_data = context.chat_data
if 'Images' in user_data:
user_data['Images'] += "|"
else:
user_data['Images'] = ""
if update.message.photo[0].file_id not in user_data['Images']:
user_data['Images'] += (update.message.photo[0].file_id).strip('|')
chat_data[PAGE_MASSAGES].append(update.message.message_id)
return self.item_edit(update, context)
def received_voice(self, update: Update, context: CallbackContext):
"""
bot sends audio to IBM Speech to Text:
https://cloud.ibm.com/catalog/services/speech-to-text
https://cloud.ibm.com/apidocs/speech-to-text?code=python#recognize
"""
# print(update.message.voice)
#voice_content = update.message.voice.get_file()
# context.bot.get_file(update.message.voice.file_id)
voice_content = update.message.voice.get_file()
chat_data = context.chat_data
chat_data[PAGE_MASSAGES].append(update.message.message_id)
user_data = context.user_data
#try:
speech_recognition_results = self.speech_to_text.recognize(
audio=voice_content.download_as_bytearray(),
#https://cloud.ibm.com/apidocs/speech-to-text-icp?code=python
content_type='audio/l16', #Audio formats (content types)
word_alternatives_threshold=0.9,
keywords=['name', 'value', 'description', 'category', 'save', 'back'],
keywords_threshold=0.5
).get_result()
if speech_recognition_results.status_code != 200 or not speech_recognition_results:
user_data[VOICE] = "Sorry, I could not hear you well :("
else:
user_data[VOICE] = str(json.dumps(speech_recognition_results, indent=2))
print("VOICE"+ user_data[VOICE])
#except:
# print("Unexpected voice error:", sys.exc_info()[0])
# pass
return self.item_edit(update, context)
def trade_command(self, update: Update, context: CallbackContext):
print('trade_command')
user_data = context.user_data
chat_data = context.chat_data
validation_error=""
item_id = None
if (len(context.args) <= 0):
validation_error = "❌Please provide correct item id for direct trade"
else:
item_id = context.args[0]
print('trade_command item_id:'+str(item_id))
try:
if bool(update.callback_query):
chat_id = update.callback_query.message.chat_id
elif bool(update.message):
chat_id = update.message.chat_id
except:
chat_id = None
print('trade_command chat_id:'+str(chat_id))
item = None
if (item_id != None):
try:
item = self.botDB["items"].find_one(filter={"_id": ObjectId(item_id)})
except:
item = None
if (item is None):
validation_error = "❌Sorry, this item id is incorrect or not avaialable anymore!"
elif (item['chat_id'] == chat_id):
validation_error = "❌Sorry, this item is already owned by you"
if (validation_error !=""):
#print ("trade_command validation error:"+validation_error)
reply_message = context.bot.send_message(chat_id=chat_id ,text=validation_error)
chat_data[PAGE_MASSAGES].append(reply_message.message_id)
return SEARCHING
if PAGE_ITEMS not in user_data:
user_data[PAGE_ITEMS] = {}
self.remove_page_messages(update, context)
facts={}
_image_ids =""
for key, value in item.items():
facts[key] = str(value)
if (key == 'Images'):
_image_ids = str(value).strip('|')
_text = Bot.facts_to_str(facts)
if (_text != ""):
_text = "Here is item details:"+ _text
keyboard = InlineKeyboardMarkup([
[
InlineKeyboardButton(text='🛒Trade', callback_data=str(SAVING)),
InlineKeyboardButton(text='🔙Back', callback_data=str(END)),
]
])
#print('trade_command _text:'+_text)
message_id = 0
if (bool(update.callback_query) and update.callback_query.message is not None and update.callback_query.message.text != ""):
print("trade_command response1")
if (_text != update.callback_query.message.text):
update.callback_query.answer()
update.callback_query.edit_message_text(text=_text, reply_markup=keyboard, parse_mode='MarkdownV2')
message_id = update.callback_query.message.message_id
elif bool(update.message):
print("trade_command response2")
update.message.reply_text(text=_text, reply_markup=keyboard, parse_mode='MarkdownV2')
message_id = update.message.message_id
else:
print("trade_command response3")
reply_message = context.bot.send_message(chat_id=chat_id, text=_text, reply_markup=keyboard, parse_mode='MarkdownV2')
chat_data[PAGE_MASSAGES].append(reply_message.message_id)
message_id = reply_message.message_id
if (message_id > 0 and _image_ids !=""):
for image_id in _image_ids.split('|'):
if (image_id != ""):
image_message = context.bot.send_photo(chat_id=chat_id
, reply_to_message_id=message_id
, photo=_image_ids[0]
, caption="Attached image")
chat_data[PAGE_MASSAGES].append(image_message.message_id)
user_data[PAGE_ITEMS][message_id] = item_id
print("trade_command redirect:"+ item_id)
return SEARCHING #self.trade_commit(update, context)
def trade_commit(self, update: Update, context: CallbackContext) -> int:
"""Display the gathered info and end the conversation."""
print("trade_commit")
user_data = context.user_data
chat_data = context.chat_data
self.remove_page_messages(update, context)
chat_id1 = None
item1 = None
item2 = None
message_id = None
validation_error=""
if PAGE_ITEMS not in user_data:
user_data[PAGE_ITEMS] = {}
try:
if bool(update.callback_query):
chat_id1 = update.callback_query.message.chat_id
message_id = update.callback_query.message.message_id
elif bool(update.message):
chat_id1 = update.message.chat_id
message_id = update.message.message_id
except:
chat_id1 = None
print('trade_commmit chat_id:'+str(chat_id1))
items_coll = self.botDB["items"]
if (chat_id1 != None):
try:
item1 = items_coll.find_one({"chat_id": chat_id1})
except:
pass
if (message_id != None):
try:
if message_id in user_data[PAGE_ITEMS]:
item2 = items_coll.find_one(filter={"_id": ObjectId(user_data[PAGE_ITEMS][message_id])})
del user_data[PAGE_ITEMS][message_id]
except:
item2=None
if (item1 is None):
validation_error = "❌Sorry, you have no item to trade-in with others yet, please update your item first!"
elif (item2 is None):
validation_error = "❌Sorry, that item is not avaialable anymore!"
elif (item1['_id'] == item2['_id']):
validation_error = "❌Sorry, the item for trade is the same"
elif (item1['chat_id'] == item2['chat_id']):
validation_error = "❌Sorry, both items have the same owner"
elif (Bot.get_value_from_string(item1['Value']) < Bot.get_value_from_string(item2['Value'])):
validation_error = "❌Sorry, your item has lower value."
else:
#check if owner2 traded item1 before
trade_count = self.botDB["transactions"].count_documents(filter={"item_id": item1['_id'], "from_chat_id": item2['chat_id']})
if (trade_count>0):
validation_error = f"❌Sorry, Your item {item1['Name']} was already preowned by that person and can't be trade-in again"
else:
#check if owner1 traded item2 before
trade_count = self.botDB["transactions"].count_documents(filter={"item_id": item2['_id'], "from_chat_id": item1['chat_id']})
if (trade_count>0):
validation_error = f"❌Sorry, that {item2['Name']} was already preowned by you and can't be trade-in again"
if (validation_error !=""):
reply_message = context.bot.send_message(chat_id=item1['chat_id'] ,text=validation_error)
chat_data[PAGE_MASSAGES].append(reply_message.message_id)
return SEARCHING
trans_coll = self.botDB["transactions"]
trans1 = dict() #Dict[str, Any]
trans1['trans_date'] = datetime.now()
trans1['item_id'] = item1['_id']
trans1['from_chat_id'] = item1['chat_id']
trans1['to_chat_id'] = item2['chat_id']
trans2 = dict()
trans2['trans_date'] = trans1['trans_date']
trans2['item_id'] = item2['_id']
trans2['from_chat_id'] = item2['chat_id']
trans2['to_chat_id'] = item1['chat_id']
#print("start multi-document transaction")
#https://pymongo.readthedocs.io/en/stable/api/pymongo/client_session.html
with self.myclient.start_session() as session:
with session.start_transaction():
reply1 = trans_coll.insert_many([trans1, trans2], session=session)
if (reply1.acknowledged==True and len(reply1.inserted_ids)==2):
items_coll.find_one_and_update(filter={'_id': ObjectId(item1['_id']), 'chat_id': item1['chat_id']},
update={'$set': {'chat_id': item2['chat_id'] } },
projection = { "_id" : True },
session=session) #, upsert=True, return_document = True
items_coll.find_one_and_update(filter={'_id': ObjectId(item2['_id']), 'chat_id': item2['chat_id']},
update={'$set': {'chat_id': item1['chat_id'] } },
projection = { "_id" : True },
session=session) #, upsert=True, return_document = True
session.commit_transaction()
#print("transaction commited")
try: #try to notify owners, do not use answer/reply, parent message does not already exist
_text = "👏The trade-in is done! Check your new item details by running /start command"
reply_message = context.bot.send_message(chat_id=item1['chat_id'], text=_text)
chat_data[PAGE_MASSAGES].append(reply_message.message_id)
context.bot.send_message(chat_id=item2['chat_id'] ,text=_text)
except:
pass
#print("end transaction")
user_data.clear()
#return self.start(update, context)
return END
def received_document(self, update: Update, context: CallbackContext):
"""
bot sends 1 document, calling this function multiple times
"""
user_data = context.user_data
chat_data = context.chat_data
fileType = 'Files'
if update.message.document.file_name.lower().split('.')[-1] in ['jpg','jpeg','png','bmp','tiff']:
fileType = 'Images'
if fileType not in user_data:
user_data[fileType] = ""
else:
user_data[fileType] += "|"
if update.message.document.file_id not in user_data[fileType]:
user_data[fileType] += (update.message.document.file_id).strip('|')
chat_data[PAGE_MASSAGES].append(update.message.message_id)
return self.item_edit(update, context)
def end(self, update: Update, context: CallbackContext) -> int:
"""End conversation from InlineKeyboardButton."""
_chat_id = 0
try:
_chat_id = update.callback_query.message.chat_id
except:
_chat_id = update.message.chat_id
context.user_data.clear()
self.remove_page_messages(update, context)
if (_chat_id> 0):
context.bot.send_message(chat_id = _chat_id
,text='🏁Thank you. See you next time!')
return END
#stop and stop_nested are similar but operates on different levels
def stop(self, update: Update, context: CallbackContext) -> int:
"""End Conversation by command."""
context.user_data.clear()
self.remove_page_messages(update, context)
update.message.reply_text('🏁Okay, bye')
return END
def error(self, update: Update, context: CallbackContext):
logger.error(f"Update: {update}; caused error: {context.error}")
def handle_message(self, update: Update, context: CallbackContext):
# context.bot.edit_message_text(chat_id=update.message.chat.id,
# text="Here are the values of stringList", message_id=update.message.message_id,
# reply_markup=makeKeyboard(), parse_mode='HTML')
user_message = str(update.message.text).lower()
if user_message.strip('!') in ("hello", "hi"):
response = f"🤟G'Day {update.message.from_user.first_name}!"
elif user_message.strip('?') in ("who are you", "who is this", "what is this"):
# creative traid-in bot
response = (f"🤖I am {context.bot.name}."
"This marketplace allowing to trade-in items with identical (or higher) value."
"Type /start to init new session, or /help for more options")
else:
response = "😏hmm, looks like you need some /help"
update.message.reply_text(response)
def help_command(self, update: Update, context: CallbackContext):
#chat_id = update.message.chat_id
message_id = update.message.message_id
reply=update.message.reply_text(
"Type one of the following commands:"
"\n/start - to initiate guided session"
"\n/stop - to stop conversation"
"\n/trade item_id - to trade directly with advertised item"
"\nThere are some rules behind the scene:"
"\n-Bot represents many owners, but each owner can have only one item at the same time."
"\n-Owners can advertise their items externally. Bot will process provided deep links from external redirects."
"\n-Comprehensive search by pages. Owners can see details and photos of other items and choose which one to trade-in with."
"\n-Private content (files) is available only after transfer item ownership to new owner."
"\n-The winner is the owner with maximum number of trades in history OR acquiring highest value item."
"\n-👍Good luck in your trade-in process!"
)
chat_data = context.chat_data
if PAGE_MASSAGES not in chat_data:
chat_data[PAGE_MASSAGES] = []
chat_data[PAGE_MASSAGES].append(reply.message_id)
def run(self) -> None:
"""Run the bot."""
# Create the Updater and pass it your bot's token.
updater = Updater(C.TELEGRAM_TOKEN)
# Get the dispatcher to register handlers
dispatcher = updater.dispatcher
# Set up top level ConversationHandler (selecting action)
conv_handler = ConversationHandler(
entry_points=[
#deep link start from promoted deep link like: https://t.me/CreativeTradeInBot/trade=60e91064f508f554a10a3847
CommandHandler('start', self.trade_command, Filters.regex('[a-z0-9]{24}'), pass_args=True),
#normal start
CommandHandler('start', self.start),
],
states={
SELECTING_ACTION: [
CallbackQueryHandler(self.item_edit, pattern='^' + str(EDITING) + '$'),
CallbackQueryHandler(self.item_details, pattern='^' + str(SHOWING) + '$'),
CallbackQueryHandler(self.search, pattern='^' + str(SEARCHING) +"|"+ str(PREV_PAGE) +"|"+ str(NEXT_PAGE) + '$'),
CallbackQueryHandler(self.history, pattern='^' + str(TRACKING) + '$'),
CallbackQueryHandler(self.end, pattern='^' + str(END) + '$'), #Back button
],
#these states are used by Back buttons
EDITING: [CallbackQueryHandler(self.start, pattern='^' + str(SELECTING_FEATURE) + '$')],
SHOWING: [
CallbackQueryHandler(self.item_details, pattern='^' + str(DOWNLOADING) + '$'),
CallbackQueryHandler(self.start, pattern='^' + str(END) + '$'), #Back button
],
SEARCHING:[
CallbackQueryHandler(self.search, pattern='^' + str(SEARCHING) +"|"+ str(PREV_PAGE) +"|"+ str(NEXT_PAGE) + '$'),
CallbackQueryHandler(self.start, pattern='^' + str(END) + '$'), #Back button
CallbackQueryHandler(self.trade_commit, pattern='^' + str(SAVING) + '$'),
MessageHandler(Filters.text & ~Filters.command, self.search_text_filter),
],
TRACKING:[
CallbackQueryHandler(self.start, pattern='^' + str(END) + '$'), #Back button
#CallbackQueryHandler(self.ext_item_details, pattern='^' + str(SHOWING) + '$'),
],
TRADING: [
CallbackQueryHandler(self.start, pattern='^' + str(END) + '$'), #Back button
],
SELECTING_FEATURE: [
CallbackQueryHandler(self.regular_choice, pattern='^' + str(NAME) + '|' + str(VALUE) + '|' + str(DESCRIPTION) + '$'),
CallbackQueryHandler(self.custom_choice, pattern='^' + str(CATEGORY) + '$'),
CallbackQueryHandler(self.edit_commit, pattern='^' + str(SAVING) + '$'),
CallbackQueryHandler(self.start, pattern='^' + str(END) + '$'), #Back button
MessageHandler(Filters.photo, self.received_photo),
MessageHandler(Filters.document, self.received_document),
MessageHandler(Filters.voice, self.received_voice),
#MessageHandler(Filters.text & ~Filters.command, self.regular_choice),
],
SELECTING_CATEGORY: [
MessageHandler(Filters.text & ~Filters.command, self.custom_text),
],
TYPING: [
MessageHandler(Filters.text & ~Filters.command, self.received_information),
],
#SAVING:[CallbackQueryHandler(self.edit_commit, pattern='^' + str(END) + '$'),],
STOPPING: [CommandHandler('start', self.start)],
},
fallbacks=[
CommandHandler('stop', self.stop),
],
#all entry points and state handlers must be 'CallbackQueryHandler', since no other handlers have a message context
#per_message=True # tracked for every message.
#per_message=False
)
dispatcher.add_handler(conv_handler)
#direct trading
direct_trade_handler = ConversationHandler(
entry_points=[CommandHandler('trade', self.trade_command, Filters.regex('[a-z0-9]{24}'), pass_args=True)],
states={
SEARCHING:[
CallbackQueryHandler(self.trade_commit, pattern='^' + str(SAVING) + '$'),
CallbackQueryHandler(self.start, pattern='^' + str(END) + '$'), #Back button
],
STOPPING: [CommandHandler('start', self.start)],
},
fallbacks=[
CommandHandler('stop', self.stop),
],
)
dispatcher.add_handler(direct_trade_handler)
#help handler
dispatcher.add_handler(CommandHandler('stop', self.stop))
dispatcher.add_handler(CommandHandler("help", self.help_command))
#general conversation
dispatcher.add_handler(MessageHandler(Filters.text, self.handle_message))
dispatcher.add_error_handler(self.error)
# Run bot
if C.HEROKU_APP_NAME == "": #pooling mode
logger.info(
"Can't detect 'HEROKU_APP_NAME' env. Running bot in pooling mode.")
updater.start_polling(1)
else: #webhook mode
PORT = int(os.environ.get('PORT', C.HEROKU_PORT))
updater.start_webhook(
listen="0.0.0.0",
port=PORT,
url_path=C.TELEGRAM_TOKEN,
webhook_url=f"https://{C.HEROKU_APP_NAME}.herokuapp.com/{C.TELEGRAM_TOKEN}"
)
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
bot = Bot()
bot.run()
| 47.138277
| 226
| 0.579351
|
9be5c1b26accf5f7319af5a092874e4d1c9bb5c0
| 537
|
py
|
Python
|
lhdetect2_web/users/views.py
|
cscenter/lhdetect2-web
|
aaef586b976b4a7efc9db042ed9a04773de6fcfe
|
[
"MIT"
] | null | null | null |
lhdetect2_web/users/views.py
|
cscenter/lhdetect2-web
|
aaef586b976b4a7efc9db042ed9a04773de6fcfe
|
[
"MIT"
] | null | null | null |
lhdetect2_web/users/views.py
|
cscenter/lhdetect2-web
|
aaef586b976b4a7efc9db042ed9a04773de6fcfe
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import login, get_user_model
from django.shortcuts import render, redirect
from users.forms import CustomUserCreationForm
def signup(request):
if request.method == 'POST':
form = CustomUserCreationForm(request.POST)
if form.is_valid():
user = form.save()
login(request, user)
return redirect('index')
else:
form = CustomUserCreationForm()
context = {
'form': form
}
return render(request, 'users/signup.html', context)
| 24.409091
| 56
| 0.64432
|
3aa2088c22687b8be13dea3c9bd73ee0b954e3a0
| 1,236
|
py
|
Python
|
examples/manifold/plot_swissroll.py
|
MarcinKonowalczyk/scikit-learn
|
8b18d4cbfc3a10ce85decec292d30470c69f40d7
|
[
"BSD-3-Clause"
] | 2
|
2020-11-07T02:13:22.000Z
|
2022-01-18T14:42:59.000Z
|
examples/manifold/plot_swissroll.py
|
MarcinKonowalczyk/scikit-learn
|
8b18d4cbfc3a10ce85decec292d30470c69f40d7
|
[
"BSD-3-Clause"
] | 1
|
2022-01-12T13:11:21.000Z
|
2022-01-12T13:11:21.000Z
|
examples/manifold/plot_swissroll.py
|
MarcinKonowalczyk/scikit-learn
|
8b18d4cbfc3a10ce85decec292d30470c69f40d7
|
[
"BSD-3-Clause"
] | 1
|
2021-11-03T09:49:02.000Z
|
2021-11-03T09:49:02.000Z
|
"""
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
# ----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12, n_components=2)
print("Done. Reconstruction error: %g" % err)
# ----------------------------------------------------------------------
# Plot result
fig = plt.figure()
ax = fig.add_subplot(211, projection="3d")
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis("tight")
plt.xticks([]), plt.yticks([])
plt.title("Projected data")
plt.show()
| 25.75
| 79
| 0.602751
|
6bfa668d615fe87ab162cc36b90502d074b64875
| 19,702
|
py
|
Python
|
detectron2/modeling/meta_arch/Multimodal_data_fusion/SE_Rexnext_Encoder.py
|
dongdongdong1217/Detectron2-FC
|
92356ebbf52b4e39c94537af26abcf46419c8c2f
|
[
"Apache-2.0"
] | 4
|
2022-01-02T07:06:58.000Z
|
2022-01-08T05:04:43.000Z
|
detectron2/modeling/meta_arch/Multimodal_data_fusion/SE_Rexnext_Encoder.py
|
dongdongdong1217/Detectron2-FC
|
92356ebbf52b4e39c94537af26abcf46419c8c2f
|
[
"Apache-2.0"
] | null | null | null |
detectron2/modeling/meta_arch/Multimodal_data_fusion/SE_Rexnext_Encoder.py
|
dongdongdong1217/Detectron2-FC
|
92356ebbf52b4e39c94537af26abcf46419c8c2f
|
[
"Apache-2.0"
] | 1
|
2022-01-02T11:46:23.000Z
|
2022-01-02T11:46:23.000Z
|
'''
New for ResNeXt:
1. Wider bottleneck
2. Add group for conv2
'''
from cgi import print_directory
import torch.nn as nn
import math
import torch
import numpy as np
from ..build import META_ARCH_REGISTRY
from detectron2.modeling.meta_arch.Image_classification import SE_Resnext
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, num_group=32):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes*2, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes*2)
self.conv2 = nn.Conv2d(planes*2, planes*2, kernel_size=3, stride=stride,
padding=1, bias=False, groups=num_group)
self.bn2 = nn.BatchNorm2d(planes*2)
self.conv3 = nn.Conv2d(planes*2, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
if planes == 64:
self.globalAvgPool = nn.AvgPool2d(56, stride=1)
elif planes == 128:
self.globalAvgPool = nn.AvgPool2d(28, stride=1)
elif planes == 256:
self.globalAvgPool = nn.AvgPool2d(14, stride=1)
elif planes == 512:
self.globalAvgPool = nn.AvgPool2d(7, stride=1)
self.fc1 = nn.Linear(in_features=planes * 4, out_features=round(planes / 4))
self.fc2 = nn.Linear(in_features=round(planes / 4), out_features=planes * 4)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
original_out = out
out = self.globalAvgPool(out)
out = out.view(out.size(0), -1)
out = self.fc1(out)
out = self.relu(out)
out = self.fc2(out)
out = self.sigmoid(out)
out = out.view(out.size(0), out.size(1), 1, 1)
out = out * original_out
out += residual
out = self.relu(out)
return out
class SE_ResNeXt(nn.Module):
def __init__(self, block, layers, num_group=32):
self.inplanes = 64
super(SE_ResNeXt, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], num_group)
self.layer2 = self._make_layer(block, 128, layers[1], num_group, stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], num_group, stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], num_group, stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, 512)
# #自己加的
self.layer5 = nn.Conv2d(2048,512,(1,1),1,0)
#self.layer6 = nn.Conv2d(256,512,(8,8),8,0)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, num_group, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, num_group=num_group))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, num_group=num_group))
return nn.Sequential(*layers)
def forward(self, batch_images_tensor):
#-----------------网络向前传播-------------#
x = self.conv1(batch_images_tensor)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
#print("x_shape:",x.shape)
#x = self.layer6(x)
#print("x_shape:",x.shape)
#x = self.avgpool(x)
#x = x.view(x.size(0), -1)
#图像数据只占一行
#x = self.fc(x)
#return x.unsqueeze(1)
#图像数据占4行
x = x.reshape(len(batch_images_tensor),49,512)
#print("x_shape:",x.shape)
return x
##############################################################
# Transformer Parameters
d_model = 512 # Embedding Size
d_ff = 2048 # FeedForward dimension
d_k = d_v = 64 # dimension of K(=Q), V
n_layers = 6 # number of Encoder of Decoder Layer
n_heads = 8 # number of heads in Multi-Head Attention
#位置编码函数
def get_sinusoid_encoding_table(n_position, d_model):
def cal_angle(position, hid_idx):
return position / np.power(10000, 2 * (hid_idx // 2) / d_model)
def get_posi_angle_vec(position):
return [cal_angle(position, hid_j) for hid_j in range(d_model)]
sinusoid_table = np.array([get_posi_angle_vec(pos_i) for pos_i in range(n_position)])
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
return torch.FloatTensor(sinusoid_table)
#Pad Mask
def get_attn_pad_mask(seq_q, seq_k):
'''
seq_q: [batch_size, seq_len]
seq_k: [batch_size, seq_len]
seq_len could be src_len or it could be tgt_len
seq_len in seq_q and seq_len in seq_k maybe not equal
'''
batch_size, len_q = seq_q.size()
batch_size, len_k = seq_k.size()
# eq(zero) is PAD token
pad_attn_mask = seq_k.data.eq(0).unsqueeze(1) # [batch_size, 1, len_k], False is masked
return pad_attn_mask.expand(batch_size, len_q, len_k) # [batch_size, len_q, len_k]
#Subsequence Mask
def get_attn_subsequence_mask(seq):
'''
seq: [batch_size, tgt_len]
'''
attn_shape = [seq.size(0), seq.size(1), seq.size(1)]
subsequence_mask = np.triu(np.ones(attn_shape), k=1) # Upper triangular matrix
subsequence_mask = torch.from_numpy(subsequence_mask).byte().cuda().long()
return subsequence_mask
#ScaledDotProductAttention
class ScaledDotProductAttention(nn.Module):
def __init__(self):
super(ScaledDotProductAttention, self).__init__()
def forward(self, Q, K, V, attn_mask):
'''
Q: [batch_size, n_heads, len_q, d_k]
K: [batch_size, n_heads, len_k, d_k]
V: [batch_size, n_heads, len_v(=len_k), d_v]
attn_mask: [batch_size, n_heads, seq_len, seq_len]
'''
scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(d_k) # scores : [batch_size, n_heads, len_q, len_k]
scores.masked_fill_(attn_mask, -1e9) # Fills elements of self tensor with value where mask is True.
attn = nn.Softmax(dim=-1)(scores)
context = torch.matmul(attn, V) # [batch_size, n_heads, len_q, d_v]
return context, attn
#MultiHeadAttention
class MultiHeadAttention(nn.Module):
def __init__(self):
super(MultiHeadAttention, self).__init__()
self.W_Q = nn.Linear(d_model, d_k * n_heads, bias=False)
self.W_K = nn.Linear(d_model, d_k * n_heads, bias=False)
self.W_V = nn.Linear(d_model, d_v * n_heads, bias=False)
self.fc = nn.Linear(n_heads * d_v, d_model, bias=False)
self.norm = nn.LayerNorm(d_model)
def forward(self, input_Q, input_K, input_V, attn_mask):
'''
input_Q: [batch_size, len_q, d_model]
input_K: [batch_size, len_k, d_model]
input_V: [batch_size, len_v(=len_k), d_model]
attn_mask: [batch_size, seq_len, seq_len]
'''
residual, batch_size = input_Q, input_Q.size(0)
# (B, S, D) -proj-> (B, S, D_new) -split-> (B, S, H, W) -trans-> (B, H, S, W)
Q = self.W_Q(input_Q).view(batch_size, -1, n_heads, d_k).transpose(1,2) # Q: [batch_size, n_heads, len_q, d_k]
K = self.W_K(input_K).view(batch_size, -1, n_heads, d_k).transpose(1,2) # K: [batch_size, n_heads, len_k, d_k]
V = self.W_V(input_V).view(batch_size, -1, n_heads, d_v).transpose(1,2) # V: [batch_size, n_heads, len_v(=len_k), d_v]
attn_mask = attn_mask.unsqueeze(1).repeat(1, n_heads, 1, 1) # attn_mask : [batch_size, n_heads, seq_len, seq_len]
# context: [batch_size, n_heads, len_q, d_v], attn: [batch_size, n_heads, len_q, len_k]
context, attn = ScaledDotProductAttention()(Q, K, V, attn_mask)
context = context.transpose(1, 2).reshape(batch_size, -1, n_heads * d_v) # context: [batch_size, len_q, n_heads * d_v]
output = self.fc(context) # [batch_size, len_q, d_model]
return self.norm(output + residual), attn
# #MultiHeadAttention
# class MultiHeadAttention_fuse(nn.Module):
# def __init__(self):
# super(MultiHeadAttention_fuse, self).__init__()
# self.W_Q = nn.Linear(d_model, d_k * n_heads, bias=False)
# self.W_K = nn.Linear(512, d_k * n_heads, bias=False)
# self.W_V = nn.Linear(512, d_v * n_heads, bias=False)
# self.fc = nn.Linear(n_heads * d_v, d_model, bias=False)
# self.norm = nn.LayerNorm(d_model)
# def forward(self, input_Q, input_K, input_V, attn_mask):
# '''
# input_Q: [batch_size, len_q, d_model]
# input_K: [batch_size, len_k, d_model]
# input_V: [batch_size, len_v(=len_k), d_model]
# attn_mask: [batch_size, seq_len, seq_len]
# '''
# residual, batch_size = input_Q, input_Q.size(0)
# add_mask = torch.zeros((batch_size,1,1)).bool().cuda()
# attn_mask = torch.cat((attn_mask,add_mask),dim=2)
# # (B, S, D) -proj-> (B, S, D_new) -split-> (B, S, H, W) -trans-> (B, H, S, W)
# Q = self.W_Q(input_Q).view(batch_size, -1, n_heads, d_k).transpose(1,2) # Q: [batch_size, n_heads, len_q, d_k]
# K = self.W_K(input_K).view(batch_size, -1, n_heads, d_k).transpose(1,2) # K: [batch_size, n_heads, len_k, d_k]
# V = self.W_V(input_V).view(batch_size, -1, n_heads, d_v).transpose(1,2) # V: [batch_size, n_heads, len_v(=len_k), d_v]
# attn_mask = attn_mask.unsqueeze(1).repeat(1, n_heads, 1, 1) # attn_mask : [batch_size, n_heads, seq_len, seq_len]
# # context: [batch_size, n_heads, len_q, d_v], attn: [batch_size, n_heads, len_q, len_k]
# context, attn = ScaledDotProductAttention()(Q, K, V, attn_mask)
# context = context.transpose(1, 2).reshape(batch_size, -1, n_heads * d_v) # context: [batch_size, len_q, n_heads * d_v]
# output = self.fc(context) # [batch_size, len_q, d_model]
# return self.norm(output + residual), attn
#FeedForward Layer
class PoswiseFeedForwardNet(nn.Module):
def __init__(self):
super(PoswiseFeedForwardNet, self).__init__()
self.fc = nn.Sequential(
nn.Linear(d_model, d_ff, bias=False),
nn.ReLU(),
nn.Linear(d_ff, d_model, bias=False)
)
self.norm = nn.LayerNorm(d_model)
def forward(self, inputs):
'''
inputs: [batch_size, seq_len, d_model]
'''
residual = inputs
output = self.fc(inputs)
return self.norm(output + residual) # [batch_size, seq_len, d_model]
#Encoder Layer
class EncoderLayer(nn.Module):
def __init__(self):
super(EncoderLayer, self).__init__()
self.enc_self_attn = MultiHeadAttention()
self.pos_ffn = PoswiseFeedForwardNet()
def forward(self, enc_inputs, enc_self_attn_mask):
'''
enc_inputs: [batch_size, src_len, d_model]
enc_self_attn_mask: [batch_size, src_len, src_len]
'''
# enc_outputs: [batch_size, src_len, d_model], attn: [batch_size, n_heads, src_len, src_len]
enc_outputs, attn = self.enc_self_attn(enc_inputs, enc_inputs, enc_inputs, enc_self_attn_mask) # enc_inputs to same Q,K,V
enc_outputs = self.pos_ffn(enc_outputs) # enc_outputs: [batch_size, src_len, d_model]
return enc_outputs, attn
#Encoder,src_vocab_size代表输入字典的长度
class Encoder(nn.Module):
def __init__(self,src_vocab_size):
super(Encoder, self).__init__()
self.src_emb = nn.Embedding(src_vocab_size, d_model) #得到词嵌入
self.pos_emb = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(src_vocab_size, d_model),freeze=True) #位置编码得到位置嵌入
self.layers = nn.ModuleList([EncoderLayer() for _ in range(n_layers)])
def forward(self, enc_inputs,image_output):
'''
enc_inputs: [batch_size, src_len]
'''
word_emb = self.src_emb(enc_inputs) # [batch_size, src_len, d_model]
pos_emb = self.pos_emb(enc_inputs) # [batch_size, src_len, d_model]
enc_outputs = word_emb + pos_emb
enc_outputs = torch.cat((enc_outputs,image_output),dim=1) #融合过程数据和图像数据
enc_self_attn_mask = get_attn_pad_mask(enc_inputs, enc_inputs) # [batch_size, src_len, src_len]
batch_size = enc_inputs.size(0)
add_mask = torch.zeros((batch_size,35,49)).bool().cuda()
enc_self_attn_mask = torch.cat((enc_self_attn_mask,add_mask),dim=2)
add_mask = torch.zeros((batch_size,49,84)).bool().cuda()
enc_self_attn_mask = torch.cat((enc_self_attn_mask,add_mask),dim=1)
enc_self_attns = []
for layer in self.layers:
# enc_outputs: [batch_size, src_len, d_model], enc_self_attn: [batch_size, n_heads, src_len, src_len]
enc_outputs, enc_self_attn = layer(enc_outputs, enc_self_attn_mask)
enc_self_attns.append(enc_self_attn)
return enc_outputs, enc_self_attns
#Decoder Layer
class DecoderLayer(nn.Module):
def __init__(self):
super(DecoderLayer, self).__init__()
self.dec_self_attn = MultiHeadAttention()
self.dec_enc_attn = MultiHeadAttention()
self.pos_ffn = PoswiseFeedForwardNet()
def forward(self, dec_inputs, enc_outputs, dec_self_attn_mask, dec_enc_attn_mask):
'''
dec_inputs: [batch_size, tgt_len, d_model]
enc_outputs: [batch_size, src_len, d_model]
dec_self_attn_mask: [batch_size, tgt_len, tgt_len]
dec_enc_attn_mask: [batch_size, tgt_len, src_len]
'''
# dec_outputs: [batch_size, tgt_len, d_model], dec_self_attn: [batch_size, n_heads, tgt_len, tgt_len]
dec_outputs, dec_self_attn = self.dec_self_attn(dec_inputs, dec_inputs, dec_inputs, dec_self_attn_mask)
# dec_outputs: [batch_size, tgt_len, d_model], dec_enc_attn: [batch_size, h_heads, tgt_len, src_len]
dec_outputs, dec_enc_attn = self.dec_enc_attn(dec_outputs, enc_outputs, enc_outputs, dec_enc_attn_mask)
dec_outputs = self.pos_ffn(dec_outputs) # [batch_size, tgt_len, d_model]
return dec_outputs, dec_self_attn, dec_enc_attn
#Decoder
class Decoder(nn.Module):
def __init__(self,tgt_vocab_size):
super(Decoder, self).__init__()
self.tgt_emb = nn.Embedding(tgt_vocab_size, d_model)
self.pos_emb = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(tgt_vocab_size, d_model),freeze=True)
self.layers = nn.ModuleList([DecoderLayer() for _ in range(n_layers)])
def forward(self, dec_inputs, enc_inputs, enc_outputs):
'''
dec_inputs: [batch_size, tgt_len]
enc_intpus: [batch_size, src_len]
enc_outputs: [batsh_size, src_len, d_model]
'''
word_emb = self.tgt_emb(dec_inputs) # [batch_size, tgt_len, d_model]
pos_emb = self.pos_emb(dec_inputs) # [batch_size, tgt_len, d_model]
dec_outputs = word_emb + pos_emb
dec_self_attn_pad_mask = get_attn_pad_mask(dec_inputs, dec_inputs) # [batch_size, tgt_len, tgt_len]
dec_self_attn_subsequent_mask = get_attn_subsequence_mask(dec_inputs) # [batch_size, tgt_len]
dec_self_attn_mask = torch.gt((dec_self_attn_pad_mask + dec_self_attn_subsequent_mask), 0) # [batch_size, tgt_len, tgt_len]
dec_enc_attn_mask = get_attn_pad_mask(dec_inputs, enc_inputs) # [batc_size, tgt_len, src_len]
batch_size = enc_inputs.size(0)
add_mask = torch.zeros((batch_size,1,49)).bool().cuda()
dec_enc_attn_mask = torch.cat((dec_enc_attn_mask,add_mask),dim=2)
dec_self_attns, dec_enc_attns = [], []
for layer in self.layers:
# dec_outputs: [batch_size, tgt_len, d_model], dec_self_attn: [batch_size, n_heads, tgt_len, tgt_len], dec_enc_attn: [batch_size, h_heads, tgt_len, src_len]
dec_outputs, dec_self_attn, dec_enc_attn = layer(dec_outputs, enc_outputs, dec_self_attn_mask, dec_enc_attn_mask)
dec_self_attns.append(dec_self_attn)
dec_enc_attns.append(dec_enc_attn)
return dec_outputs, dec_self_attns, dec_enc_attns
@META_ARCH_REGISTRY.register()
class Se_resnext_Encoder(nn.Module):
def __init__(self,cfg):
super(Se_resnext_Encoder, self).__init__()
self.image_network = SE_ResNeXt(Bottleneck, [3, 4, 23, 3]).cuda() #resnext101
self.encoder = Encoder(cfg.Arguments1)
self.projection = nn.Linear(d_model,cfg.Arguments2, bias=False)
self.loss_fun = nn.CrossEntropyLoss(ignore_index=0)
def forward(self,data):
'''
enc_inputs: [batch_size, src_len]
dec_inputs: [batch_size, tgt_len]
'''
#------------------获取并预处理图像数据(data里面既含有image、label、width、height信息)---#
batchsize = len(data)
batch_images = []
for i in range(0,batchsize,1):
batch_images.append(data[i]["image"])
batch_images=[image.tolist() for image in batch_images]
batch_images_tensor = torch.tensor(batch_images,dtype=torch.float).cuda()
#------------------获取并预处理过程数据(data里面既含有image_name、x、y信息。)------------#
batch_x = []
batch_y = []
for i in range(0,batchsize,1):
batch_x.append(data[i]["x"])
batch_y.append([int(float(data[i]["y"]))])
batch_x=[x.tolist() for x in batch_x]
enc_inputs = torch.tensor(batch_x,dtype=torch.float).cuda().long()
dec_inputs = torch.tensor(batch_y ,dtype=torch.float).cuda().long()
#----------------------------网络向前推理------------------------------------#
#-----------------推理SE-Resnext------------#
image_output = self.image_network(batch_images_tensor)
#----------------推理Transformer------------#
enc_outputs, enc_self_attns = self.encoder(enc_inputs,image_output)
enc_outputs = enc_outputs.mean(dim=1)
#----------------------------解码生成损失函数---------------------------------#
dec_logits = self.projection(enc_outputs) # dec_logits: [batch_size, tgt_len, tgt_vocab_size]
outputs, enc_self_attns = dec_logits.view(-1, dec_logits.size(-1)), enc_self_attns
if self.training:
loss = self.loss_fun(outputs, dec_inputs.view(-1))
return loss
else:
return outputs
if __name__=="__main__":
model = Se_resnext_Encoder()
output = model()
print("output:",output.shape)
| 43.301099
| 168
| 0.627957
|
14d0dfcf4cdaf6e7c845218dcb47bf7b38ac58f9
| 205,697
|
py
|
Python
|
msgraph-cli-extensions/beta/financials_beta/azext_financials_beta/vendored_sdks/financials/models/_models.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
msgraph-cli-extensions/beta/financials_beta/azext_financials_beta/vendored_sdks/financials/models/_models.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
msgraph-cli-extensions/beta/financials_beta/azext_financials_beta/vendored_sdks/financials/models/_models.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class CollectionOfAccount(msrest.serialization.Model):
"""Collection of account.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphAccount]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphAccount]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfAccount, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfAgedAccountsPayable(msrest.serialization.Model):
"""Collection of agedAccountsPayable.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphAgedAccountsPayable]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphAgedAccountsPayable]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfAgedAccountsPayable, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfAgedAccountsReceivable(msrest.serialization.Model):
"""Collection of agedAccountsReceivable.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphAgedAccountsReceivable]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphAgedAccountsReceivable]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfAgedAccountsReceivable, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfCompany(msrest.serialization.Model):
"""Collection of company.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphCompany]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphCompany]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfCompany, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfCompanyInformation(msrest.serialization.Model):
"""Collection of companyInformation.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphCompanyInformation]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphCompanyInformation]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfCompanyInformation, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfCountryRegion(msrest.serialization.Model):
"""Collection of countryRegion.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphCountryRegion]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphCountryRegion]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfCountryRegion, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfCurrency(msrest.serialization.Model):
"""Collection of currency.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphCurrency]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphCurrency]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfCurrency, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfCustomer(msrest.serialization.Model):
"""Collection of customer.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphCustomer]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphCustomer]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfCustomer, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfCustomerPayment(msrest.serialization.Model):
"""Collection of customerPayment.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphCustomerPayment]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphCustomerPayment]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfCustomerPayment, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfCustomerPayment0(msrest.serialization.Model):
"""Collection of customerPayment.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphCustomerPayment]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphCustomerPayment]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfCustomerPayment0, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfCustomerPaymentJournal(msrest.serialization.Model):
"""Collection of customerPaymentJournal.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphCustomerPaymentJournal]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphCustomerPaymentJournal]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfCustomerPaymentJournal, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfDimension(msrest.serialization.Model):
"""Collection of dimension.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphDimension]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphDimension]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfDimension, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfDimensionValue(msrest.serialization.Model):
"""Collection of dimensionValue.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphDimensionValue]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphDimensionValue]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfDimensionValue, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfDimensionValue0(msrest.serialization.Model):
"""Collection of dimensionValue.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphDimensionValue]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphDimensionValue]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfDimensionValue0, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfEmployee(msrest.serialization.Model):
"""Collection of employee.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphEmployee]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphEmployee]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfEmployee, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfGeneralLedgerEntry(msrest.serialization.Model):
"""Collection of generalLedgerEntry.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphGeneralLedgerEntry]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphGeneralLedgerEntry]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfGeneralLedgerEntry, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfItem(msrest.serialization.Model):
"""Collection of item.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphItem]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphItem]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfItem, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfItemCategory(msrest.serialization.Model):
"""Collection of itemCategory.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphItemCategory]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphItemCategory]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfItemCategory, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfJournal(msrest.serialization.Model):
"""Collection of journal.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphJournal]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphJournal]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfJournal, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfJournalLine(msrest.serialization.Model):
"""Collection of journalLine.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphJournalLine]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphJournalLine]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfJournalLine, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfJournalLine0(msrest.serialization.Model):
"""Collection of journalLine.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphJournalLine]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphJournalLine]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfJournalLine0, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfPaymentMethod(msrest.serialization.Model):
"""Collection of paymentMethod.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphPaymentMethod]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphPaymentMethod]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfPaymentMethod, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfPaymentTerm(msrest.serialization.Model):
"""Collection of paymentTerm.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphPaymentTerm]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphPaymentTerm]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfPaymentTerm, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfPicture(msrest.serialization.Model):
"""Collection of picture.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphPicture]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphPicture]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfPicture, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfPicture0(msrest.serialization.Model):
"""Collection of picture.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphPicture]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphPicture]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfPicture0, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfPicture1(msrest.serialization.Model):
"""Collection of picture.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphPicture]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphPicture]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfPicture1, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfPicture10(msrest.serialization.Model):
"""Collection of picture.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphPicture]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphPicture]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfPicture10, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfPicture11(msrest.serialization.Model):
"""Collection of picture.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphPicture]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphPicture]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfPicture11, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfPicture12(msrest.serialization.Model):
"""Collection of picture.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphPicture]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphPicture]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfPicture12, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfPicture13(msrest.serialization.Model):
"""Collection of picture.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphPicture]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphPicture]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfPicture13, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfPicture14(msrest.serialization.Model):
"""Collection of picture.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphPicture]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphPicture]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfPicture14, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfPicture15(msrest.serialization.Model):
"""Collection of picture.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphPicture]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphPicture]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfPicture15, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfPicture16(msrest.serialization.Model):
"""Collection of picture.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphPicture]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphPicture]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfPicture16, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfPicture17(msrest.serialization.Model):
"""Collection of picture.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphPicture]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphPicture]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfPicture17, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfPicture18(msrest.serialization.Model):
"""Collection of picture.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphPicture]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphPicture]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfPicture18, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfPicture19(msrest.serialization.Model):
"""Collection of picture.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphPicture]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphPicture]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfPicture19, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfPicture2(msrest.serialization.Model):
"""Collection of picture.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphPicture]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphPicture]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfPicture2, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfPicture20(msrest.serialization.Model):
"""Collection of picture.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphPicture]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphPicture]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfPicture20, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfPicture3(msrest.serialization.Model):
"""Collection of picture.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphPicture]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphPicture]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfPicture3, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfPicture4(msrest.serialization.Model):
"""Collection of picture.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphPicture]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphPicture]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfPicture4, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfPicture5(msrest.serialization.Model):
"""Collection of picture.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphPicture]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphPicture]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfPicture5, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfPicture6(msrest.serialization.Model):
"""Collection of picture.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphPicture]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphPicture]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfPicture6, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfPicture7(msrest.serialization.Model):
"""Collection of picture.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphPicture]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphPicture]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfPicture7, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfPicture8(msrest.serialization.Model):
"""Collection of picture.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphPicture]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphPicture]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfPicture8, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfPicture9(msrest.serialization.Model):
"""Collection of picture.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphPicture]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphPicture]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfPicture9, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfPurchaseInvoice(msrest.serialization.Model):
"""Collection of purchaseInvoice.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphPurchaseInvoice]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphPurchaseInvoice]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfPurchaseInvoice, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfPurchaseInvoiceLine(msrest.serialization.Model):
"""Collection of purchaseInvoiceLine.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphPurchaseInvoiceLine]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphPurchaseInvoiceLine]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfPurchaseInvoiceLine, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfPurchaseInvoiceLine0(msrest.serialization.Model):
"""Collection of purchaseInvoiceLine.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphPurchaseInvoiceLine]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphPurchaseInvoiceLine]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfPurchaseInvoiceLine0, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfSalesCreditMemo(msrest.serialization.Model):
"""Collection of salesCreditMemo.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphSalesCreditMemo]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphSalesCreditMemo]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfSalesCreditMemo, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfSalesCreditMemoLine(msrest.serialization.Model):
"""Collection of salesCreditMemoLine.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphSalesCreditMemoLine]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphSalesCreditMemoLine]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfSalesCreditMemoLine, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfSalesCreditMemoLine0(msrest.serialization.Model):
"""Collection of salesCreditMemoLine.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphSalesCreditMemoLine]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphSalesCreditMemoLine]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfSalesCreditMemoLine0, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfSalesInvoice(msrest.serialization.Model):
"""Collection of salesInvoice.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphSalesInvoice]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphSalesInvoice]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfSalesInvoice, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfSalesInvoiceLine(msrest.serialization.Model):
"""Collection of salesInvoiceLine.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphSalesInvoiceLine]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphSalesInvoiceLine]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfSalesInvoiceLine, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfSalesInvoiceLine0(msrest.serialization.Model):
"""Collection of salesInvoiceLine.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphSalesInvoiceLine]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphSalesInvoiceLine]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfSalesInvoiceLine0, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfSalesOrder(msrest.serialization.Model):
"""Collection of salesOrder.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphSalesOrder]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphSalesOrder]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfSalesOrder, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfSalesOrderLine(msrest.serialization.Model):
"""Collection of salesOrderLine.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphSalesOrderLine]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphSalesOrderLine]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfSalesOrderLine, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfSalesOrderLine0(msrest.serialization.Model):
"""Collection of salesOrderLine.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphSalesOrderLine]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphSalesOrderLine]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfSalesOrderLine0, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfSalesQuote(msrest.serialization.Model):
"""Collection of salesQuote.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphSalesQuote]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphSalesQuote]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfSalesQuote, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfSalesQuoteLine(msrest.serialization.Model):
"""Collection of salesQuoteLine.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphSalesQuoteLine]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphSalesQuoteLine]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfSalesQuoteLine, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfSalesQuoteLine0(msrest.serialization.Model):
"""Collection of salesQuoteLine.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphSalesQuoteLine]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphSalesQuoteLine]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfSalesQuoteLine0, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfShipmentMethod(msrest.serialization.Model):
"""Collection of shipmentMethod.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphShipmentMethod]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphShipmentMethod]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfShipmentMethod, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfTaxArea(msrest.serialization.Model):
"""Collection of taxArea.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphTaxArea]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphTaxArea]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfTaxArea, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfTaxGroup(msrest.serialization.Model):
"""Collection of taxGroup.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphTaxGroup]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphTaxGroup]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfTaxGroup, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfUnitOfMeasure(msrest.serialization.Model):
"""Collection of unitOfMeasure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphUnitOfMeasure]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphUnitOfMeasure]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfUnitOfMeasure, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfVendor(msrest.serialization.Model):
"""Collection of vendor.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~financials.models.MicrosoftGraphVendor]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphVendor]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfVendor, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class MicrosoftGraphEntity(msrest.serialization.Model):
"""entity.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param id: Read-only.
:type id: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphEntity, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.id = kwargs.get('id', None)
class MicrosoftGraphAccount(MicrosoftGraphEntity):
"""account.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param blocked:
:type blocked: bool
:param category:
:type category: str
:param display_name:
:type display_name: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param number:
:type number: str
:param sub_category:
:type sub_category: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'blocked': {'key': 'blocked', 'type': 'bool'},
'category': {'key': 'category', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
'number': {'key': 'number', 'type': 'str'},
'sub_category': {'key': 'subCategory', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphAccount, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.blocked = kwargs.get('blocked', None)
self.category = kwargs.get('category', None)
self.display_name = kwargs.get('display_name', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
self.number = kwargs.get('number', None)
self.sub_category = kwargs.get('sub_category', None)
class MicrosoftGraphAgedAccountsPayable(MicrosoftGraphEntity):
"""agedAccountsPayable.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param aged_as_of_date:
:type aged_as_of_date: ~datetime.date
:param balance_due:
:type balance_due: float
:param currency_code:
:type currency_code: str
:param current_amount:
:type current_amount: float
:param name:
:type name: str
:param period1_amount:
:type period1_amount: float
:param period2_amount:
:type period2_amount: float
:param period3_amount:
:type period3_amount: float
:param period_length_filter:
:type period_length_filter: str
:param vendor_number:
:type vendor_number: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'aged_as_of_date': {'key': 'agedAsOfDate', 'type': 'date'},
'balance_due': {'key': 'balanceDue', 'type': 'float'},
'currency_code': {'key': 'currencyCode', 'type': 'str'},
'current_amount': {'key': 'currentAmount', 'type': 'float'},
'name': {'key': 'name', 'type': 'str'},
'period1_amount': {'key': 'period1Amount', 'type': 'float'},
'period2_amount': {'key': 'period2Amount', 'type': 'float'},
'period3_amount': {'key': 'period3Amount', 'type': 'float'},
'period_length_filter': {'key': 'periodLengthFilter', 'type': 'str'},
'vendor_number': {'key': 'vendorNumber', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphAgedAccountsPayable, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.aged_as_of_date = kwargs.get('aged_as_of_date', None)
self.balance_due = kwargs.get('balance_due', None)
self.currency_code = kwargs.get('currency_code', None)
self.current_amount = kwargs.get('current_amount', None)
self.name = kwargs.get('name', None)
self.period1_amount = kwargs.get('period1_amount', None)
self.period2_amount = kwargs.get('period2_amount', None)
self.period3_amount = kwargs.get('period3_amount', None)
self.period_length_filter = kwargs.get('period_length_filter', None)
self.vendor_number = kwargs.get('vendor_number', None)
class MicrosoftGraphAgedAccountsReceivable(MicrosoftGraphEntity):
"""agedAccountsReceivable.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param aged_as_of_date:
:type aged_as_of_date: ~datetime.date
:param balance_due:
:type balance_due: float
:param currency_code:
:type currency_code: str
:param current_amount:
:type current_amount: float
:param customer_number:
:type customer_number: str
:param name:
:type name: str
:param period1_amount:
:type period1_amount: float
:param period2_amount:
:type period2_amount: float
:param period3_amount:
:type period3_amount: float
:param period_length_filter:
:type period_length_filter: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'aged_as_of_date': {'key': 'agedAsOfDate', 'type': 'date'},
'balance_due': {'key': 'balanceDue', 'type': 'float'},
'currency_code': {'key': 'currencyCode', 'type': 'str'},
'current_amount': {'key': 'currentAmount', 'type': 'float'},
'customer_number': {'key': 'customerNumber', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'period1_amount': {'key': 'period1Amount', 'type': 'float'},
'period2_amount': {'key': 'period2Amount', 'type': 'float'},
'period3_amount': {'key': 'period3Amount', 'type': 'float'},
'period_length_filter': {'key': 'periodLengthFilter', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphAgedAccountsReceivable, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.aged_as_of_date = kwargs.get('aged_as_of_date', None)
self.balance_due = kwargs.get('balance_due', None)
self.currency_code = kwargs.get('currency_code', None)
self.current_amount = kwargs.get('current_amount', None)
self.customer_number = kwargs.get('customer_number', None)
self.name = kwargs.get('name', None)
self.period1_amount = kwargs.get('period1_amount', None)
self.period2_amount = kwargs.get('period2_amount', None)
self.period3_amount = kwargs.get('period3_amount', None)
self.period_length_filter = kwargs.get('period_length_filter', None)
class MicrosoftGraphCompany(MicrosoftGraphEntity):
"""company.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param business_profile_id:
:type business_profile_id: str
:param display_name:
:type display_name: str
:param name:
:type name: str
:param system_version:
:type system_version: str
:param accounts:
:type accounts: list[~financials.models.MicrosoftGraphAccount]
:param aged_accounts_payable:
:type aged_accounts_payable: list[~financials.models.MicrosoftGraphAgedAccountsPayable]
:param aged_accounts_receivable:
:type aged_accounts_receivable: list[~financials.models.MicrosoftGraphAgedAccountsReceivable]
:param company_information:
:type company_information: list[~financials.models.MicrosoftGraphCompanyInformation]
:param countries_regions:
:type countries_regions: list[~financials.models.MicrosoftGraphCountryRegion]
:param currencies:
:type currencies: list[~financials.models.MicrosoftGraphCurrency]
:param customer_payment_journals:
:type customer_payment_journals: list[~financials.models.MicrosoftGraphCustomerPaymentJournal]
:param customer_payments:
:type customer_payments: list[~financials.models.MicrosoftGraphCustomerPayment]
:param customers:
:type customers: list[~financials.models.MicrosoftGraphCustomer]
:param dimensions:
:type dimensions: list[~financials.models.MicrosoftGraphDimension]
:param dimension_values:
:type dimension_values: list[~financials.models.MicrosoftGraphDimensionValue]
:param employees:
:type employees: list[~financials.models.MicrosoftGraphEmployee]
:param general_ledger_entries:
:type general_ledger_entries: list[~financials.models.MicrosoftGraphGeneralLedgerEntry]
:param item_categories:
:type item_categories: list[~financials.models.MicrosoftGraphItemCategory]
:param items:
:type items: list[~financials.models.MicrosoftGraphItem]
:param journal_lines:
:type journal_lines: list[~financials.models.MicrosoftGraphJournalLine]
:param journals:
:type journals: list[~financials.models.MicrosoftGraphJournal]
:param payment_methods:
:type payment_methods: list[~financials.models.MicrosoftGraphPaymentMethod]
:param payment_terms:
:type payment_terms: list[~financials.models.MicrosoftGraphPaymentTerm]
:param picture:
:type picture: list[~financials.models.MicrosoftGraphPicture]
:param purchase_invoice_lines:
:type purchase_invoice_lines: list[~financials.models.MicrosoftGraphPurchaseInvoiceLine]
:param purchase_invoices:
:type purchase_invoices: list[~financials.models.MicrosoftGraphPurchaseInvoice]
:param sales_credit_memo_lines:
:type sales_credit_memo_lines: list[~financials.models.MicrosoftGraphSalesCreditMemoLine]
:param sales_credit_memos:
:type sales_credit_memos: list[~financials.models.MicrosoftGraphSalesCreditMemo]
:param sales_invoice_lines:
:type sales_invoice_lines: list[~financials.models.MicrosoftGraphSalesInvoiceLine]
:param sales_invoices:
:type sales_invoices: list[~financials.models.MicrosoftGraphSalesInvoice]
:param sales_order_lines:
:type sales_order_lines: list[~financials.models.MicrosoftGraphSalesOrderLine]
:param sales_orders:
:type sales_orders: list[~financials.models.MicrosoftGraphSalesOrder]
:param sales_quote_lines:
:type sales_quote_lines: list[~financials.models.MicrosoftGraphSalesQuoteLine]
:param sales_quotes:
:type sales_quotes: list[~financials.models.MicrosoftGraphSalesQuote]
:param shipment_methods:
:type shipment_methods: list[~financials.models.MicrosoftGraphShipmentMethod]
:param tax_areas:
:type tax_areas: list[~financials.models.MicrosoftGraphTaxArea]
:param tax_groups:
:type tax_groups: list[~financials.models.MicrosoftGraphTaxGroup]
:param units_of_measure:
:type units_of_measure: list[~financials.models.MicrosoftGraphUnitOfMeasure]
:param vendors:
:type vendors: list[~financials.models.MicrosoftGraphVendor]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'business_profile_id': {'key': 'businessProfileId', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'system_version': {'key': 'systemVersion', 'type': 'str'},
'accounts': {'key': 'accounts', 'type': '[MicrosoftGraphAccount]'},
'aged_accounts_payable': {'key': 'agedAccountsPayable', 'type': '[MicrosoftGraphAgedAccountsPayable]'},
'aged_accounts_receivable': {'key': 'agedAccountsReceivable', 'type': '[MicrosoftGraphAgedAccountsReceivable]'},
'company_information': {'key': 'companyInformation', 'type': '[MicrosoftGraphCompanyInformation]'},
'countries_regions': {'key': 'countriesRegions', 'type': '[MicrosoftGraphCountryRegion]'},
'currencies': {'key': 'currencies', 'type': '[MicrosoftGraphCurrency]'},
'customer_payment_journals': {'key': 'customerPaymentJournals', 'type': '[MicrosoftGraphCustomerPaymentJournal]'},
'customer_payments': {'key': 'customerPayments', 'type': '[MicrosoftGraphCustomerPayment]'},
'customers': {'key': 'customers', 'type': '[MicrosoftGraphCustomer]'},
'dimensions': {'key': 'dimensions', 'type': '[MicrosoftGraphDimension]'},
'dimension_values': {'key': 'dimensionValues', 'type': '[MicrosoftGraphDimensionValue]'},
'employees': {'key': 'employees', 'type': '[MicrosoftGraphEmployee]'},
'general_ledger_entries': {'key': 'generalLedgerEntries', 'type': '[MicrosoftGraphGeneralLedgerEntry]'},
'item_categories': {'key': 'itemCategories', 'type': '[MicrosoftGraphItemCategory]'},
'items': {'key': 'items', 'type': '[MicrosoftGraphItem]'},
'journal_lines': {'key': 'journalLines', 'type': '[MicrosoftGraphJournalLine]'},
'journals': {'key': 'journals', 'type': '[MicrosoftGraphJournal]'},
'payment_methods': {'key': 'paymentMethods', 'type': '[MicrosoftGraphPaymentMethod]'},
'payment_terms': {'key': 'paymentTerms', 'type': '[MicrosoftGraphPaymentTerm]'},
'picture': {'key': 'picture', 'type': '[MicrosoftGraphPicture]'},
'purchase_invoice_lines': {'key': 'purchaseInvoiceLines', 'type': '[MicrosoftGraphPurchaseInvoiceLine]'},
'purchase_invoices': {'key': 'purchaseInvoices', 'type': '[MicrosoftGraphPurchaseInvoice]'},
'sales_credit_memo_lines': {'key': 'salesCreditMemoLines', 'type': '[MicrosoftGraphSalesCreditMemoLine]'},
'sales_credit_memos': {'key': 'salesCreditMemos', 'type': '[MicrosoftGraphSalesCreditMemo]'},
'sales_invoice_lines': {'key': 'salesInvoiceLines', 'type': '[MicrosoftGraphSalesInvoiceLine]'},
'sales_invoices': {'key': 'salesInvoices', 'type': '[MicrosoftGraphSalesInvoice]'},
'sales_order_lines': {'key': 'salesOrderLines', 'type': '[MicrosoftGraphSalesOrderLine]'},
'sales_orders': {'key': 'salesOrders', 'type': '[MicrosoftGraphSalesOrder]'},
'sales_quote_lines': {'key': 'salesQuoteLines', 'type': '[MicrosoftGraphSalesQuoteLine]'},
'sales_quotes': {'key': 'salesQuotes', 'type': '[MicrosoftGraphSalesQuote]'},
'shipment_methods': {'key': 'shipmentMethods', 'type': '[MicrosoftGraphShipmentMethod]'},
'tax_areas': {'key': 'taxAreas', 'type': '[MicrosoftGraphTaxArea]'},
'tax_groups': {'key': 'taxGroups', 'type': '[MicrosoftGraphTaxGroup]'},
'units_of_measure': {'key': 'unitsOfMeasure', 'type': '[MicrosoftGraphUnitOfMeasure]'},
'vendors': {'key': 'vendors', 'type': '[MicrosoftGraphVendor]'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphCompany, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.business_profile_id = kwargs.get('business_profile_id', None)
self.display_name = kwargs.get('display_name', None)
self.name = kwargs.get('name', None)
self.system_version = kwargs.get('system_version', None)
self.accounts = kwargs.get('accounts', None)
self.aged_accounts_payable = kwargs.get('aged_accounts_payable', None)
self.aged_accounts_receivable = kwargs.get('aged_accounts_receivable', None)
self.company_information = kwargs.get('company_information', None)
self.countries_regions = kwargs.get('countries_regions', None)
self.currencies = kwargs.get('currencies', None)
self.customer_payment_journals = kwargs.get('customer_payment_journals', None)
self.customer_payments = kwargs.get('customer_payments', None)
self.customers = kwargs.get('customers', None)
self.dimensions = kwargs.get('dimensions', None)
self.dimension_values = kwargs.get('dimension_values', None)
self.employees = kwargs.get('employees', None)
self.general_ledger_entries = kwargs.get('general_ledger_entries', None)
self.item_categories = kwargs.get('item_categories', None)
self.items = kwargs.get('items', None)
self.journal_lines = kwargs.get('journal_lines', None)
self.journals = kwargs.get('journals', None)
self.payment_methods = kwargs.get('payment_methods', None)
self.payment_terms = kwargs.get('payment_terms', None)
self.picture = kwargs.get('picture', None)
self.purchase_invoice_lines = kwargs.get('purchase_invoice_lines', None)
self.purchase_invoices = kwargs.get('purchase_invoices', None)
self.sales_credit_memo_lines = kwargs.get('sales_credit_memo_lines', None)
self.sales_credit_memos = kwargs.get('sales_credit_memos', None)
self.sales_invoice_lines = kwargs.get('sales_invoice_lines', None)
self.sales_invoices = kwargs.get('sales_invoices', None)
self.sales_order_lines = kwargs.get('sales_order_lines', None)
self.sales_orders = kwargs.get('sales_orders', None)
self.sales_quote_lines = kwargs.get('sales_quote_lines', None)
self.sales_quotes = kwargs.get('sales_quotes', None)
self.shipment_methods = kwargs.get('shipment_methods', None)
self.tax_areas = kwargs.get('tax_areas', None)
self.tax_groups = kwargs.get('tax_groups', None)
self.units_of_measure = kwargs.get('units_of_measure', None)
self.vendors = kwargs.get('vendors', None)
class MicrosoftGraphCompanyInformation(MicrosoftGraphEntity):
"""companyInformation.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param address: postalAddressType.
:type address: ~financials.models.MicrosoftGraphPostalAddressType
:param currency_code:
:type currency_code: str
:param current_fiscal_year_start_date:
:type current_fiscal_year_start_date: ~datetime.date
:param display_name:
:type display_name: str
:param email:
:type email: str
:param fax_number:
:type fax_number: str
:param industry:
:type industry: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param phone_number:
:type phone_number: str
:param picture:
:type picture: bytes
:param tax_registration_number:
:type tax_registration_number: str
:param website:
:type website: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'address': {'key': 'address', 'type': 'MicrosoftGraphPostalAddressType'},
'currency_code': {'key': 'currencyCode', 'type': 'str'},
'current_fiscal_year_start_date': {'key': 'currentFiscalYearStartDate', 'type': 'date'},
'display_name': {'key': 'displayName', 'type': 'str'},
'email': {'key': 'email', 'type': 'str'},
'fax_number': {'key': 'faxNumber', 'type': 'str'},
'industry': {'key': 'industry', 'type': 'str'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
'phone_number': {'key': 'phoneNumber', 'type': 'str'},
'picture': {'key': 'picture', 'type': 'base64'},
'tax_registration_number': {'key': 'taxRegistrationNumber', 'type': 'str'},
'website': {'key': 'website', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphCompanyInformation, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.address = kwargs.get('address', None)
self.currency_code = kwargs.get('currency_code', None)
self.current_fiscal_year_start_date = kwargs.get('current_fiscal_year_start_date', None)
self.display_name = kwargs.get('display_name', None)
self.email = kwargs.get('email', None)
self.fax_number = kwargs.get('fax_number', None)
self.industry = kwargs.get('industry', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
self.phone_number = kwargs.get('phone_number', None)
self.picture = kwargs.get('picture', None)
self.tax_registration_number = kwargs.get('tax_registration_number', None)
self.website = kwargs.get('website', None)
class MicrosoftGraphCountryRegion(MicrosoftGraphEntity):
"""countryRegion.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param address_format:
:type address_format: str
:param code:
:type code: str
:param display_name:
:type display_name: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'address_format': {'key': 'addressFormat', 'type': 'str'},
'code': {'key': 'code', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphCountryRegion, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.address_format = kwargs.get('address_format', None)
self.code = kwargs.get('code', None)
self.display_name = kwargs.get('display_name', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
class MicrosoftGraphCurrency(MicrosoftGraphEntity):
"""currency.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param amount_decimal_places:
:type amount_decimal_places: str
:param amount_rounding_precision:
:type amount_rounding_precision: float
:param code:
:type code: str
:param display_name:
:type display_name: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param symbol:
:type symbol: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'amount_decimal_places': {'key': 'amountDecimalPlaces', 'type': 'str'},
'amount_rounding_precision': {'key': 'amountRoundingPrecision', 'type': 'float'},
'code': {'key': 'code', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
'symbol': {'key': 'symbol', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphCurrency, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.amount_decimal_places = kwargs.get('amount_decimal_places', None)
self.amount_rounding_precision = kwargs.get('amount_rounding_precision', None)
self.code = kwargs.get('code', None)
self.display_name = kwargs.get('display_name', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
self.symbol = kwargs.get('symbol', None)
class MicrosoftGraphCustomer(MicrosoftGraphEntity):
"""customer.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param address: postalAddressType.
:type address: ~financials.models.MicrosoftGraphPostalAddressType
:param blocked:
:type blocked: str
:param currency_code:
:type currency_code: str
:param currency_id:
:type currency_id: str
:param display_name:
:type display_name: str
:param email:
:type email: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param number:
:type number: str
:param payment_method_id:
:type payment_method_id: str
:param payment_terms_id:
:type payment_terms_id: str
:param phone_number:
:type phone_number: str
:param shipment_method_id:
:type shipment_method_id: str
:param tax_area_display_name:
:type tax_area_display_name: str
:param tax_area_id:
:type tax_area_id: str
:param tax_liable:
:type tax_liable: bool
:param tax_registration_number:
:type tax_registration_number: str
:param type:
:type type: str
:param website:
:type website: str
:param currency: currency.
:type currency: ~financials.models.MicrosoftGraphCurrency
:param payment_method: paymentMethod.
:type payment_method: ~financials.models.MicrosoftGraphPaymentMethod
:param payment_term: paymentTerm.
:type payment_term: ~financials.models.MicrosoftGraphPaymentTerm
:param picture:
:type picture: list[~financials.models.MicrosoftGraphPicture]
:param shipment_method: shipmentMethod.
:type shipment_method: ~financials.models.MicrosoftGraphShipmentMethod
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'address': {'key': 'address', 'type': 'MicrosoftGraphPostalAddressType'},
'blocked': {'key': 'blocked', 'type': 'str'},
'currency_code': {'key': 'currencyCode', 'type': 'str'},
'currency_id': {'key': 'currencyId', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'email': {'key': 'email', 'type': 'str'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
'number': {'key': 'number', 'type': 'str'},
'payment_method_id': {'key': 'paymentMethodId', 'type': 'str'},
'payment_terms_id': {'key': 'paymentTermsId', 'type': 'str'},
'phone_number': {'key': 'phoneNumber', 'type': 'str'},
'shipment_method_id': {'key': 'shipmentMethodId', 'type': 'str'},
'tax_area_display_name': {'key': 'taxAreaDisplayName', 'type': 'str'},
'tax_area_id': {'key': 'taxAreaId', 'type': 'str'},
'tax_liable': {'key': 'taxLiable', 'type': 'bool'},
'tax_registration_number': {'key': 'taxRegistrationNumber', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'website': {'key': 'website', 'type': 'str'},
'currency': {'key': 'currency', 'type': 'MicrosoftGraphCurrency'},
'payment_method': {'key': 'paymentMethod', 'type': 'MicrosoftGraphPaymentMethod'},
'payment_term': {'key': 'paymentTerm', 'type': 'MicrosoftGraphPaymentTerm'},
'picture': {'key': 'picture', 'type': '[MicrosoftGraphPicture]'},
'shipment_method': {'key': 'shipmentMethod', 'type': 'MicrosoftGraphShipmentMethod'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphCustomer, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.address = kwargs.get('address', None)
self.blocked = kwargs.get('blocked', None)
self.currency_code = kwargs.get('currency_code', None)
self.currency_id = kwargs.get('currency_id', None)
self.display_name = kwargs.get('display_name', None)
self.email = kwargs.get('email', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
self.number = kwargs.get('number', None)
self.payment_method_id = kwargs.get('payment_method_id', None)
self.payment_terms_id = kwargs.get('payment_terms_id', None)
self.phone_number = kwargs.get('phone_number', None)
self.shipment_method_id = kwargs.get('shipment_method_id', None)
self.tax_area_display_name = kwargs.get('tax_area_display_name', None)
self.tax_area_id = kwargs.get('tax_area_id', None)
self.tax_liable = kwargs.get('tax_liable', None)
self.tax_registration_number = kwargs.get('tax_registration_number', None)
self.type = kwargs.get('type', None)
self.website = kwargs.get('website', None)
self.currency = kwargs.get('currency', None)
self.payment_method = kwargs.get('payment_method', None)
self.payment_term = kwargs.get('payment_term', None)
self.picture = kwargs.get('picture', None)
self.shipment_method = kwargs.get('shipment_method', None)
class MicrosoftGraphCustomerPayment(MicrosoftGraphEntity):
"""customerPayment.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param amount:
:type amount: float
:param applies_to_invoice_id:
:type applies_to_invoice_id: str
:param applies_to_invoice_number:
:type applies_to_invoice_number: str
:param comment:
:type comment: str
:param contact_id:
:type contact_id: str
:param customer_id:
:type customer_id: str
:param customer_number:
:type customer_number: str
:param description:
:type description: str
:param document_number:
:type document_number: str
:param external_document_number:
:type external_document_number: str
:param journal_display_name:
:type journal_display_name: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param line_number:
:type line_number: int
:param posting_date:
:type posting_date: ~datetime.date
:param customer: customer.
:type customer: ~financials.models.MicrosoftGraphCustomer
"""
_validation = {
'line_number': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'amount': {'key': 'amount', 'type': 'float'},
'applies_to_invoice_id': {'key': 'appliesToInvoiceId', 'type': 'str'},
'applies_to_invoice_number': {'key': 'appliesToInvoiceNumber', 'type': 'str'},
'comment': {'key': 'comment', 'type': 'str'},
'contact_id': {'key': 'contactId', 'type': 'str'},
'customer_id': {'key': 'customerId', 'type': 'str'},
'customer_number': {'key': 'customerNumber', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'document_number': {'key': 'documentNumber', 'type': 'str'},
'external_document_number': {'key': 'externalDocumentNumber', 'type': 'str'},
'journal_display_name': {'key': 'journalDisplayName', 'type': 'str'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
'line_number': {'key': 'lineNumber', 'type': 'int'},
'posting_date': {'key': 'postingDate', 'type': 'date'},
'customer': {'key': 'customer', 'type': 'MicrosoftGraphCustomer'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphCustomerPayment, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.amount = kwargs.get('amount', None)
self.applies_to_invoice_id = kwargs.get('applies_to_invoice_id', None)
self.applies_to_invoice_number = kwargs.get('applies_to_invoice_number', None)
self.comment = kwargs.get('comment', None)
self.contact_id = kwargs.get('contact_id', None)
self.customer_id = kwargs.get('customer_id', None)
self.customer_number = kwargs.get('customer_number', None)
self.description = kwargs.get('description', None)
self.document_number = kwargs.get('document_number', None)
self.external_document_number = kwargs.get('external_document_number', None)
self.journal_display_name = kwargs.get('journal_display_name', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
self.line_number = kwargs.get('line_number', None)
self.posting_date = kwargs.get('posting_date', None)
self.customer = kwargs.get('customer', None)
class MicrosoftGraphCustomerPaymentJournal(MicrosoftGraphEntity):
"""customerPaymentJournal.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param balancing_account_id:
:type balancing_account_id: str
:param balancing_account_number:
:type balancing_account_number: str
:param code:
:type code: str
:param display_name:
:type display_name: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param account: account.
:type account: ~financials.models.MicrosoftGraphAccount
:param customer_payments:
:type customer_payments: list[~financials.models.MicrosoftGraphCustomerPayment]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'balancing_account_id': {'key': 'balancingAccountId', 'type': 'str'},
'balancing_account_number': {'key': 'balancingAccountNumber', 'type': 'str'},
'code': {'key': 'code', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
'account': {'key': 'account', 'type': 'MicrosoftGraphAccount'},
'customer_payments': {'key': 'customerPayments', 'type': '[MicrosoftGraphCustomerPayment]'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphCustomerPaymentJournal, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.balancing_account_id = kwargs.get('balancing_account_id', None)
self.balancing_account_number = kwargs.get('balancing_account_number', None)
self.code = kwargs.get('code', None)
self.display_name = kwargs.get('display_name', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
self.account = kwargs.get('account', None)
self.customer_payments = kwargs.get('customer_payments', None)
class MicrosoftGraphDimension(MicrosoftGraphEntity):
"""dimension.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param code:
:type code: str
:param display_name:
:type display_name: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param dimension_values:
:type dimension_values: list[~financials.models.MicrosoftGraphDimensionValue]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'code': {'key': 'code', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
'dimension_values': {'key': 'dimensionValues', 'type': '[MicrosoftGraphDimensionValue]'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphDimension, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.code = kwargs.get('code', None)
self.display_name = kwargs.get('display_name', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
self.dimension_values = kwargs.get('dimension_values', None)
class MicrosoftGraphDimensionValue(MicrosoftGraphEntity):
"""dimensionValue.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param code:
:type code: str
:param display_name:
:type display_name: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'code': {'key': 'code', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphDimensionValue, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.code = kwargs.get('code', None)
self.display_name = kwargs.get('display_name', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
class MicrosoftGraphEmployee(MicrosoftGraphEntity):
"""employee.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param address: postalAddressType.
:type address: ~financials.models.MicrosoftGraphPostalAddressType
:param birth_date:
:type birth_date: ~datetime.date
:param display_name:
:type display_name: str
:param email:
:type email: str
:param employment_date:
:type employment_date: ~datetime.date
:param given_name:
:type given_name: str
:param job_title:
:type job_title: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param middle_name:
:type middle_name: str
:param mobile_phone:
:type mobile_phone: str
:param number:
:type number: str
:param personal_email:
:type personal_email: str
:param phone_number:
:type phone_number: str
:param statistics_group_code:
:type statistics_group_code: str
:param status:
:type status: str
:param surname:
:type surname: str
:param termination_date:
:type termination_date: ~datetime.date
:param picture:
:type picture: list[~financials.models.MicrosoftGraphPicture]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'address': {'key': 'address', 'type': 'MicrosoftGraphPostalAddressType'},
'birth_date': {'key': 'birthDate', 'type': 'date'},
'display_name': {'key': 'displayName', 'type': 'str'},
'email': {'key': 'email', 'type': 'str'},
'employment_date': {'key': 'employmentDate', 'type': 'date'},
'given_name': {'key': 'givenName', 'type': 'str'},
'job_title': {'key': 'jobTitle', 'type': 'str'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
'middle_name': {'key': 'middleName', 'type': 'str'},
'mobile_phone': {'key': 'mobilePhone', 'type': 'str'},
'number': {'key': 'number', 'type': 'str'},
'personal_email': {'key': 'personalEmail', 'type': 'str'},
'phone_number': {'key': 'phoneNumber', 'type': 'str'},
'statistics_group_code': {'key': 'statisticsGroupCode', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'surname': {'key': 'surname', 'type': 'str'},
'termination_date': {'key': 'terminationDate', 'type': 'date'},
'picture': {'key': 'picture', 'type': '[MicrosoftGraphPicture]'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphEmployee, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.address = kwargs.get('address', None)
self.birth_date = kwargs.get('birth_date', None)
self.display_name = kwargs.get('display_name', None)
self.email = kwargs.get('email', None)
self.employment_date = kwargs.get('employment_date', None)
self.given_name = kwargs.get('given_name', None)
self.job_title = kwargs.get('job_title', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
self.middle_name = kwargs.get('middle_name', None)
self.mobile_phone = kwargs.get('mobile_phone', None)
self.number = kwargs.get('number', None)
self.personal_email = kwargs.get('personal_email', None)
self.phone_number = kwargs.get('phone_number', None)
self.statistics_group_code = kwargs.get('statistics_group_code', None)
self.status = kwargs.get('status', None)
self.surname = kwargs.get('surname', None)
self.termination_date = kwargs.get('termination_date', None)
self.picture = kwargs.get('picture', None)
class MicrosoftGraphFinancials(msrest.serialization.Model):
"""financials.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param companies:
:type companies: list[~financials.models.MicrosoftGraphCompany]
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'companies': {'key': 'companies', 'type': '[MicrosoftGraphCompany]'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphFinancials, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.companies = kwargs.get('companies', None)
class MicrosoftGraphGeneralLedgerEntry(MicrosoftGraphEntity):
"""generalLedgerEntry.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param account_id:
:type account_id: str
:param account_number:
:type account_number: str
:param credit_amount:
:type credit_amount: float
:param debit_amount:
:type debit_amount: float
:param description:
:type description: str
:param document_number:
:type document_number: str
:param document_type:
:type document_type: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param posting_date:
:type posting_date: ~datetime.date
:param account: account.
:type account: ~financials.models.MicrosoftGraphAccount
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'account_id': {'key': 'accountId', 'type': 'str'},
'account_number': {'key': 'accountNumber', 'type': 'str'},
'credit_amount': {'key': 'creditAmount', 'type': 'float'},
'debit_amount': {'key': 'debitAmount', 'type': 'float'},
'description': {'key': 'description', 'type': 'str'},
'document_number': {'key': 'documentNumber', 'type': 'str'},
'document_type': {'key': 'documentType', 'type': 'str'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
'posting_date': {'key': 'postingDate', 'type': 'date'},
'account': {'key': 'account', 'type': 'MicrosoftGraphAccount'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphGeneralLedgerEntry, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.account_id = kwargs.get('account_id', None)
self.account_number = kwargs.get('account_number', None)
self.credit_amount = kwargs.get('credit_amount', None)
self.debit_amount = kwargs.get('debit_amount', None)
self.description = kwargs.get('description', None)
self.document_number = kwargs.get('document_number', None)
self.document_type = kwargs.get('document_type', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
self.posting_date = kwargs.get('posting_date', None)
self.account = kwargs.get('account', None)
class MicrosoftGraphItem(MicrosoftGraphEntity):
"""item.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param base_unit_of_measure_id:
:type base_unit_of_measure_id: str
:param blocked:
:type blocked: bool
:param display_name:
:type display_name: str
:param gtin:
:type gtin: str
:param inventory:
:type inventory: float
:param item_category_code:
:type item_category_code: str
:param item_category_id:
:type item_category_id: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param number:
:type number: str
:param price_includes_tax:
:type price_includes_tax: bool
:param tax_group_code:
:type tax_group_code: str
:param tax_group_id:
:type tax_group_id: str
:param type:
:type type: str
:param unit_cost:
:type unit_cost: float
:param unit_price:
:type unit_price: float
:param item_category: itemCategory.
:type item_category: ~financials.models.MicrosoftGraphItemCategory
:param picture:
:type picture: list[~financials.models.MicrosoftGraphPicture]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'base_unit_of_measure_id': {'key': 'baseUnitOfMeasureId', 'type': 'str'},
'blocked': {'key': 'blocked', 'type': 'bool'},
'display_name': {'key': 'displayName', 'type': 'str'},
'gtin': {'key': 'gtin', 'type': 'str'},
'inventory': {'key': 'inventory', 'type': 'float'},
'item_category_code': {'key': 'itemCategoryCode', 'type': 'str'},
'item_category_id': {'key': 'itemCategoryId', 'type': 'str'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
'number': {'key': 'number', 'type': 'str'},
'price_includes_tax': {'key': 'priceIncludesTax', 'type': 'bool'},
'tax_group_code': {'key': 'taxGroupCode', 'type': 'str'},
'tax_group_id': {'key': 'taxGroupId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'unit_cost': {'key': 'unitCost', 'type': 'float'},
'unit_price': {'key': 'unitPrice', 'type': 'float'},
'item_category': {'key': 'itemCategory', 'type': 'MicrosoftGraphItemCategory'},
'picture': {'key': 'picture', 'type': '[MicrosoftGraphPicture]'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphItem, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.base_unit_of_measure_id = kwargs.get('base_unit_of_measure_id', None)
self.blocked = kwargs.get('blocked', None)
self.display_name = kwargs.get('display_name', None)
self.gtin = kwargs.get('gtin', None)
self.inventory = kwargs.get('inventory', None)
self.item_category_code = kwargs.get('item_category_code', None)
self.item_category_id = kwargs.get('item_category_id', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
self.number = kwargs.get('number', None)
self.price_includes_tax = kwargs.get('price_includes_tax', None)
self.tax_group_code = kwargs.get('tax_group_code', None)
self.tax_group_id = kwargs.get('tax_group_id', None)
self.type = kwargs.get('type', None)
self.unit_cost = kwargs.get('unit_cost', None)
self.unit_price = kwargs.get('unit_price', None)
self.item_category = kwargs.get('item_category', None)
self.picture = kwargs.get('picture', None)
class MicrosoftGraphItemCategory(MicrosoftGraphEntity):
"""itemCategory.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param code:
:type code: str
:param display_name:
:type display_name: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'code': {'key': 'code', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphItemCategory, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.code = kwargs.get('code', None)
self.display_name = kwargs.get('display_name', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
class MicrosoftGraphJournal(MicrosoftGraphEntity):
"""journal.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param balancing_account_id:
:type balancing_account_id: str
:param balancing_account_number:
:type balancing_account_number: str
:param code:
:type code: str
:param display_name:
:type display_name: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param account: account.
:type account: ~financials.models.MicrosoftGraphAccount
:param journal_lines:
:type journal_lines: list[~financials.models.MicrosoftGraphJournalLine]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'balancing_account_id': {'key': 'balancingAccountId', 'type': 'str'},
'balancing_account_number': {'key': 'balancingAccountNumber', 'type': 'str'},
'code': {'key': 'code', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
'account': {'key': 'account', 'type': 'MicrosoftGraphAccount'},
'journal_lines': {'key': 'journalLines', 'type': '[MicrosoftGraphJournalLine]'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphJournal, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.balancing_account_id = kwargs.get('balancing_account_id', None)
self.balancing_account_number = kwargs.get('balancing_account_number', None)
self.code = kwargs.get('code', None)
self.display_name = kwargs.get('display_name', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
self.account = kwargs.get('account', None)
self.journal_lines = kwargs.get('journal_lines', None)
class MicrosoftGraphJournalLine(MicrosoftGraphEntity):
"""journalLine.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param account_id:
:type account_id: str
:param account_number:
:type account_number: str
:param amount:
:type amount: float
:param comment:
:type comment: str
:param description:
:type description: str
:param document_number:
:type document_number: str
:param external_document_number:
:type external_document_number: str
:param journal_display_name:
:type journal_display_name: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param line_number:
:type line_number: int
:param posting_date:
:type posting_date: ~datetime.date
:param account: account.
:type account: ~financials.models.MicrosoftGraphAccount
"""
_validation = {
'line_number': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'account_id': {'key': 'accountId', 'type': 'str'},
'account_number': {'key': 'accountNumber', 'type': 'str'},
'amount': {'key': 'amount', 'type': 'float'},
'comment': {'key': 'comment', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'document_number': {'key': 'documentNumber', 'type': 'str'},
'external_document_number': {'key': 'externalDocumentNumber', 'type': 'str'},
'journal_display_name': {'key': 'journalDisplayName', 'type': 'str'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
'line_number': {'key': 'lineNumber', 'type': 'int'},
'posting_date': {'key': 'postingDate', 'type': 'date'},
'account': {'key': 'account', 'type': 'MicrosoftGraphAccount'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphJournalLine, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.account_id = kwargs.get('account_id', None)
self.account_number = kwargs.get('account_number', None)
self.amount = kwargs.get('amount', None)
self.comment = kwargs.get('comment', None)
self.description = kwargs.get('description', None)
self.document_number = kwargs.get('document_number', None)
self.external_document_number = kwargs.get('external_document_number', None)
self.journal_display_name = kwargs.get('journal_display_name', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
self.line_number = kwargs.get('line_number', None)
self.posting_date = kwargs.get('posting_date', None)
self.account = kwargs.get('account', None)
class MicrosoftGraphPaymentMethod(MicrosoftGraphEntity):
"""paymentMethod.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param code:
:type code: str
:param display_name:
:type display_name: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'code': {'key': 'code', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphPaymentMethod, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.code = kwargs.get('code', None)
self.display_name = kwargs.get('display_name', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
class MicrosoftGraphPaymentTerm(MicrosoftGraphEntity):
"""paymentTerm.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param calculate_discount_on_credit_memos:
:type calculate_discount_on_credit_memos: bool
:param code:
:type code: str
:param discount_date_calculation:
:type discount_date_calculation: str
:param discount_percent:
:type discount_percent: float
:param display_name:
:type display_name: str
:param due_date_calculation:
:type due_date_calculation: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'calculate_discount_on_credit_memos': {'key': 'calculateDiscountOnCreditMemos', 'type': 'bool'},
'code': {'key': 'code', 'type': 'str'},
'discount_date_calculation': {'key': 'discountDateCalculation', 'type': 'str'},
'discount_percent': {'key': 'discountPercent', 'type': 'float'},
'display_name': {'key': 'displayName', 'type': 'str'},
'due_date_calculation': {'key': 'dueDateCalculation', 'type': 'str'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphPaymentTerm, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.calculate_discount_on_credit_memos = kwargs.get('calculate_discount_on_credit_memos', None)
self.code = kwargs.get('code', None)
self.discount_date_calculation = kwargs.get('discount_date_calculation', None)
self.discount_percent = kwargs.get('discount_percent', None)
self.display_name = kwargs.get('display_name', None)
self.due_date_calculation = kwargs.get('due_date_calculation', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
class MicrosoftGraphPicture(MicrosoftGraphEntity):
"""picture.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param content:
:type content: bytes
:param content_type:
:type content_type: str
:param height:
:type height: int
:param width:
:type width: int
"""
_validation = {
'height': {'maximum': 2147483647, 'minimum': -2147483648},
'width': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'content': {'key': 'content', 'type': 'base64'},
'content_type': {'key': 'contentType', 'type': 'str'},
'height': {'key': 'height', 'type': 'int'},
'width': {'key': 'width', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphPicture, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.content = kwargs.get('content', None)
self.content_type = kwargs.get('content_type', None)
self.height = kwargs.get('height', None)
self.width = kwargs.get('width', None)
class MicrosoftGraphPostalAddressType(msrest.serialization.Model):
"""postalAddressType.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param city:
:type city: str
:param country_letter_code:
:type country_letter_code: str
:param postal_code:
:type postal_code: str
:param state:
:type state: str
:param street:
:type street: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'city': {'key': 'city', 'type': 'str'},
'country_letter_code': {'key': 'countryLetterCode', 'type': 'str'},
'postal_code': {'key': 'postalCode', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'street': {'key': 'street', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphPostalAddressType, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.city = kwargs.get('city', None)
self.country_letter_code = kwargs.get('country_letter_code', None)
self.postal_code = kwargs.get('postal_code', None)
self.state = kwargs.get('state', None)
self.street = kwargs.get('street', None)
class MicrosoftGraphPurchaseInvoice(MicrosoftGraphEntity):
"""purchaseInvoice.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param buy_from_address: postalAddressType.
:type buy_from_address: ~financials.models.MicrosoftGraphPostalAddressType
:param currency_code:
:type currency_code: str
:param currency_id:
:type currency_id: str
:param discount_amount:
:type discount_amount: float
:param discount_applied_before_tax:
:type discount_applied_before_tax: bool
:param due_date:
:type due_date: ~datetime.date
:param invoice_date:
:type invoice_date: ~datetime.date
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param number:
:type number: str
:param pay_to_address: postalAddressType.
:type pay_to_address: ~financials.models.MicrosoftGraphPostalAddressType
:param pay_to_contact:
:type pay_to_contact: str
:param pay_to_name:
:type pay_to_name: str
:param pay_to_vendor_id:
:type pay_to_vendor_id: str
:param pay_to_vendor_number:
:type pay_to_vendor_number: str
:param prices_include_tax:
:type prices_include_tax: bool
:param ship_to_address: postalAddressType.
:type ship_to_address: ~financials.models.MicrosoftGraphPostalAddressType
:param ship_to_contact:
:type ship_to_contact: str
:param ship_to_name:
:type ship_to_name: str
:param status:
:type status: str
:param total_amount_excluding_tax:
:type total_amount_excluding_tax: float
:param total_amount_including_tax:
:type total_amount_including_tax: float
:param total_tax_amount:
:type total_tax_amount: float
:param vendor_id:
:type vendor_id: str
:param vendor_invoice_number:
:type vendor_invoice_number: str
:param vendor_name:
:type vendor_name: str
:param vendor_number:
:type vendor_number: str
:param currency: currency.
:type currency: ~financials.models.MicrosoftGraphCurrency
:param purchase_invoice_lines:
:type purchase_invoice_lines: list[~financials.models.MicrosoftGraphPurchaseInvoiceLine]
:param vendor: vendor.
:type vendor: ~financials.models.MicrosoftGraphVendor
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'buy_from_address': {'key': 'buyFromAddress', 'type': 'MicrosoftGraphPostalAddressType'},
'currency_code': {'key': 'currencyCode', 'type': 'str'},
'currency_id': {'key': 'currencyId', 'type': 'str'},
'discount_amount': {'key': 'discountAmount', 'type': 'float'},
'discount_applied_before_tax': {'key': 'discountAppliedBeforeTax', 'type': 'bool'},
'due_date': {'key': 'dueDate', 'type': 'date'},
'invoice_date': {'key': 'invoiceDate', 'type': 'date'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
'number': {'key': 'number', 'type': 'str'},
'pay_to_address': {'key': 'payToAddress', 'type': 'MicrosoftGraphPostalAddressType'},
'pay_to_contact': {'key': 'payToContact', 'type': 'str'},
'pay_to_name': {'key': 'payToName', 'type': 'str'},
'pay_to_vendor_id': {'key': 'payToVendorId', 'type': 'str'},
'pay_to_vendor_number': {'key': 'payToVendorNumber', 'type': 'str'},
'prices_include_tax': {'key': 'pricesIncludeTax', 'type': 'bool'},
'ship_to_address': {'key': 'shipToAddress', 'type': 'MicrosoftGraphPostalAddressType'},
'ship_to_contact': {'key': 'shipToContact', 'type': 'str'},
'ship_to_name': {'key': 'shipToName', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'total_amount_excluding_tax': {'key': 'totalAmountExcludingTax', 'type': 'float'},
'total_amount_including_tax': {'key': 'totalAmountIncludingTax', 'type': 'float'},
'total_tax_amount': {'key': 'totalTaxAmount', 'type': 'float'},
'vendor_id': {'key': 'vendorId', 'type': 'str'},
'vendor_invoice_number': {'key': 'vendorInvoiceNumber', 'type': 'str'},
'vendor_name': {'key': 'vendorName', 'type': 'str'},
'vendor_number': {'key': 'vendorNumber', 'type': 'str'},
'currency': {'key': 'currency', 'type': 'MicrosoftGraphCurrency'},
'purchase_invoice_lines': {'key': 'purchaseInvoiceLines', 'type': '[MicrosoftGraphPurchaseInvoiceLine]'},
'vendor': {'key': 'vendor', 'type': 'MicrosoftGraphVendor'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphPurchaseInvoice, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.buy_from_address = kwargs.get('buy_from_address', None)
self.currency_code = kwargs.get('currency_code', None)
self.currency_id = kwargs.get('currency_id', None)
self.discount_amount = kwargs.get('discount_amount', None)
self.discount_applied_before_tax = kwargs.get('discount_applied_before_tax', None)
self.due_date = kwargs.get('due_date', None)
self.invoice_date = kwargs.get('invoice_date', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
self.number = kwargs.get('number', None)
self.pay_to_address = kwargs.get('pay_to_address', None)
self.pay_to_contact = kwargs.get('pay_to_contact', None)
self.pay_to_name = kwargs.get('pay_to_name', None)
self.pay_to_vendor_id = kwargs.get('pay_to_vendor_id', None)
self.pay_to_vendor_number = kwargs.get('pay_to_vendor_number', None)
self.prices_include_tax = kwargs.get('prices_include_tax', None)
self.ship_to_address = kwargs.get('ship_to_address', None)
self.ship_to_contact = kwargs.get('ship_to_contact', None)
self.ship_to_name = kwargs.get('ship_to_name', None)
self.status = kwargs.get('status', None)
self.total_amount_excluding_tax = kwargs.get('total_amount_excluding_tax', None)
self.total_amount_including_tax = kwargs.get('total_amount_including_tax', None)
self.total_tax_amount = kwargs.get('total_tax_amount', None)
self.vendor_id = kwargs.get('vendor_id', None)
self.vendor_invoice_number = kwargs.get('vendor_invoice_number', None)
self.vendor_name = kwargs.get('vendor_name', None)
self.vendor_number = kwargs.get('vendor_number', None)
self.currency = kwargs.get('currency', None)
self.purchase_invoice_lines = kwargs.get('purchase_invoice_lines', None)
self.vendor = kwargs.get('vendor', None)
class MicrosoftGraphPurchaseInvoiceLine(MicrosoftGraphEntity):
"""purchaseInvoiceLine.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param account_id:
:type account_id: str
:param amount_excluding_tax:
:type amount_excluding_tax: float
:param amount_including_tax:
:type amount_including_tax: float
:param description:
:type description: str
:param discount_amount:
:type discount_amount: float
:param discount_applied_before_tax:
:type discount_applied_before_tax: bool
:param discount_percent:
:type discount_percent: float
:param document_id:
:type document_id: str
:param expected_receipt_date:
:type expected_receipt_date: ~datetime.date
:param invoice_discount_allocation:
:type invoice_discount_allocation: float
:param item_id:
:type item_id: str
:param line_type:
:type line_type: str
:param net_amount:
:type net_amount: float
:param net_amount_including_tax:
:type net_amount_including_tax: float
:param net_tax_amount:
:type net_tax_amount: float
:param quantity:
:type quantity: float
:param sequence:
:type sequence: int
:param tax_code:
:type tax_code: str
:param tax_percent:
:type tax_percent: float
:param total_tax_amount:
:type total_tax_amount: float
:param unit_cost:
:type unit_cost: float
:param account: account.
:type account: ~financials.models.MicrosoftGraphAccount
:param item: item.
:type item: ~financials.models.MicrosoftGraphItem
"""
_validation = {
'sequence': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'account_id': {'key': 'accountId', 'type': 'str'},
'amount_excluding_tax': {'key': 'amountExcludingTax', 'type': 'float'},
'amount_including_tax': {'key': 'amountIncludingTax', 'type': 'float'},
'description': {'key': 'description', 'type': 'str'},
'discount_amount': {'key': 'discountAmount', 'type': 'float'},
'discount_applied_before_tax': {'key': 'discountAppliedBeforeTax', 'type': 'bool'},
'discount_percent': {'key': 'discountPercent', 'type': 'float'},
'document_id': {'key': 'documentId', 'type': 'str'},
'expected_receipt_date': {'key': 'expectedReceiptDate', 'type': 'date'},
'invoice_discount_allocation': {'key': 'invoiceDiscountAllocation', 'type': 'float'},
'item_id': {'key': 'itemId', 'type': 'str'},
'line_type': {'key': 'lineType', 'type': 'str'},
'net_amount': {'key': 'netAmount', 'type': 'float'},
'net_amount_including_tax': {'key': 'netAmountIncludingTax', 'type': 'float'},
'net_tax_amount': {'key': 'netTaxAmount', 'type': 'float'},
'quantity': {'key': 'quantity', 'type': 'float'},
'sequence': {'key': 'sequence', 'type': 'int'},
'tax_code': {'key': 'taxCode', 'type': 'str'},
'tax_percent': {'key': 'taxPercent', 'type': 'float'},
'total_tax_amount': {'key': 'totalTaxAmount', 'type': 'float'},
'unit_cost': {'key': 'unitCost', 'type': 'float'},
'account': {'key': 'account', 'type': 'MicrosoftGraphAccount'},
'item': {'key': 'item', 'type': 'MicrosoftGraphItem'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphPurchaseInvoiceLine, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.account_id = kwargs.get('account_id', None)
self.amount_excluding_tax = kwargs.get('amount_excluding_tax', None)
self.amount_including_tax = kwargs.get('amount_including_tax', None)
self.description = kwargs.get('description', None)
self.discount_amount = kwargs.get('discount_amount', None)
self.discount_applied_before_tax = kwargs.get('discount_applied_before_tax', None)
self.discount_percent = kwargs.get('discount_percent', None)
self.document_id = kwargs.get('document_id', None)
self.expected_receipt_date = kwargs.get('expected_receipt_date', None)
self.invoice_discount_allocation = kwargs.get('invoice_discount_allocation', None)
self.item_id = kwargs.get('item_id', None)
self.line_type = kwargs.get('line_type', None)
self.net_amount = kwargs.get('net_amount', None)
self.net_amount_including_tax = kwargs.get('net_amount_including_tax', None)
self.net_tax_amount = kwargs.get('net_tax_amount', None)
self.quantity = kwargs.get('quantity', None)
self.sequence = kwargs.get('sequence', None)
self.tax_code = kwargs.get('tax_code', None)
self.tax_percent = kwargs.get('tax_percent', None)
self.total_tax_amount = kwargs.get('total_tax_amount', None)
self.unit_cost = kwargs.get('unit_cost', None)
self.account = kwargs.get('account', None)
self.item = kwargs.get('item', None)
class MicrosoftGraphSalesCreditMemo(MicrosoftGraphEntity):
"""salesCreditMemo.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param billing_postal_address: postalAddressType.
:type billing_postal_address: ~financials.models.MicrosoftGraphPostalAddressType
:param bill_to_customer_id:
:type bill_to_customer_id: str
:param bill_to_customer_number:
:type bill_to_customer_number: str
:param bill_to_name:
:type bill_to_name: str
:param credit_memo_date:
:type credit_memo_date: ~datetime.date
:param currency_code:
:type currency_code: str
:param currency_id:
:type currency_id: str
:param customer_id:
:type customer_id: str
:param customer_name:
:type customer_name: str
:param customer_number:
:type customer_number: str
:param discount_amount:
:type discount_amount: float
:param discount_applied_before_tax:
:type discount_applied_before_tax: bool
:param due_date:
:type due_date: ~datetime.date
:param email:
:type email: str
:param external_document_number:
:type external_document_number: str
:param invoice_id:
:type invoice_id: str
:param invoice_number:
:type invoice_number: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param number:
:type number: str
:param payment_terms_id:
:type payment_terms_id: str
:param phone_number:
:type phone_number: str
:param prices_include_tax:
:type prices_include_tax: bool
:param salesperson:
:type salesperson: str
:param selling_postal_address: postalAddressType.
:type selling_postal_address: ~financials.models.MicrosoftGraphPostalAddressType
:param status:
:type status: str
:param total_amount_excluding_tax:
:type total_amount_excluding_tax: float
:param total_amount_including_tax:
:type total_amount_including_tax: float
:param total_tax_amount:
:type total_tax_amount: float
:param currency: currency.
:type currency: ~financials.models.MicrosoftGraphCurrency
:param customer: customer.
:type customer: ~financials.models.MicrosoftGraphCustomer
:param payment_term: paymentTerm.
:type payment_term: ~financials.models.MicrosoftGraphPaymentTerm
:param sales_credit_memo_lines:
:type sales_credit_memo_lines: list[~financials.models.MicrosoftGraphSalesCreditMemoLine]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'billing_postal_address': {'key': 'billingPostalAddress', 'type': 'MicrosoftGraphPostalAddressType'},
'bill_to_customer_id': {'key': 'billToCustomerId', 'type': 'str'},
'bill_to_customer_number': {'key': 'billToCustomerNumber', 'type': 'str'},
'bill_to_name': {'key': 'billToName', 'type': 'str'},
'credit_memo_date': {'key': 'creditMemoDate', 'type': 'date'},
'currency_code': {'key': 'currencyCode', 'type': 'str'},
'currency_id': {'key': 'currencyId', 'type': 'str'},
'customer_id': {'key': 'customerId', 'type': 'str'},
'customer_name': {'key': 'customerName', 'type': 'str'},
'customer_number': {'key': 'customerNumber', 'type': 'str'},
'discount_amount': {'key': 'discountAmount', 'type': 'float'},
'discount_applied_before_tax': {'key': 'discountAppliedBeforeTax', 'type': 'bool'},
'due_date': {'key': 'dueDate', 'type': 'date'},
'email': {'key': 'email', 'type': 'str'},
'external_document_number': {'key': 'externalDocumentNumber', 'type': 'str'},
'invoice_id': {'key': 'invoiceId', 'type': 'str'},
'invoice_number': {'key': 'invoiceNumber', 'type': 'str'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
'number': {'key': 'number', 'type': 'str'},
'payment_terms_id': {'key': 'paymentTermsId', 'type': 'str'},
'phone_number': {'key': 'phoneNumber', 'type': 'str'},
'prices_include_tax': {'key': 'pricesIncludeTax', 'type': 'bool'},
'salesperson': {'key': 'salesperson', 'type': 'str'},
'selling_postal_address': {'key': 'sellingPostalAddress', 'type': 'MicrosoftGraphPostalAddressType'},
'status': {'key': 'status', 'type': 'str'},
'total_amount_excluding_tax': {'key': 'totalAmountExcludingTax', 'type': 'float'},
'total_amount_including_tax': {'key': 'totalAmountIncludingTax', 'type': 'float'},
'total_tax_amount': {'key': 'totalTaxAmount', 'type': 'float'},
'currency': {'key': 'currency', 'type': 'MicrosoftGraphCurrency'},
'customer': {'key': 'customer', 'type': 'MicrosoftGraphCustomer'},
'payment_term': {'key': 'paymentTerm', 'type': 'MicrosoftGraphPaymentTerm'},
'sales_credit_memo_lines': {'key': 'salesCreditMemoLines', 'type': '[MicrosoftGraphSalesCreditMemoLine]'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphSalesCreditMemo, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.billing_postal_address = kwargs.get('billing_postal_address', None)
self.bill_to_customer_id = kwargs.get('bill_to_customer_id', None)
self.bill_to_customer_number = kwargs.get('bill_to_customer_number', None)
self.bill_to_name = kwargs.get('bill_to_name', None)
self.credit_memo_date = kwargs.get('credit_memo_date', None)
self.currency_code = kwargs.get('currency_code', None)
self.currency_id = kwargs.get('currency_id', None)
self.customer_id = kwargs.get('customer_id', None)
self.customer_name = kwargs.get('customer_name', None)
self.customer_number = kwargs.get('customer_number', None)
self.discount_amount = kwargs.get('discount_amount', None)
self.discount_applied_before_tax = kwargs.get('discount_applied_before_tax', None)
self.due_date = kwargs.get('due_date', None)
self.email = kwargs.get('email', None)
self.external_document_number = kwargs.get('external_document_number', None)
self.invoice_id = kwargs.get('invoice_id', None)
self.invoice_number = kwargs.get('invoice_number', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
self.number = kwargs.get('number', None)
self.payment_terms_id = kwargs.get('payment_terms_id', None)
self.phone_number = kwargs.get('phone_number', None)
self.prices_include_tax = kwargs.get('prices_include_tax', None)
self.salesperson = kwargs.get('salesperson', None)
self.selling_postal_address = kwargs.get('selling_postal_address', None)
self.status = kwargs.get('status', None)
self.total_amount_excluding_tax = kwargs.get('total_amount_excluding_tax', None)
self.total_amount_including_tax = kwargs.get('total_amount_including_tax', None)
self.total_tax_amount = kwargs.get('total_tax_amount', None)
self.currency = kwargs.get('currency', None)
self.customer = kwargs.get('customer', None)
self.payment_term = kwargs.get('payment_term', None)
self.sales_credit_memo_lines = kwargs.get('sales_credit_memo_lines', None)
class MicrosoftGraphSalesCreditMemoLine(MicrosoftGraphEntity):
"""salesCreditMemoLine.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param account_id:
:type account_id: str
:param amount_excluding_tax:
:type amount_excluding_tax: float
:param amount_including_tax:
:type amount_including_tax: float
:param description:
:type description: str
:param discount_amount:
:type discount_amount: float
:param discount_applied_before_tax:
:type discount_applied_before_tax: bool
:param discount_percent:
:type discount_percent: float
:param document_id:
:type document_id: str
:param invoice_discount_allocation:
:type invoice_discount_allocation: float
:param item_id:
:type item_id: str
:param line_type:
:type line_type: str
:param net_amount:
:type net_amount: float
:param net_amount_including_tax:
:type net_amount_including_tax: float
:param net_tax_amount:
:type net_tax_amount: float
:param quantity:
:type quantity: float
:param sequence:
:type sequence: int
:param shipment_date:
:type shipment_date: ~datetime.date
:param tax_code:
:type tax_code: str
:param tax_percent:
:type tax_percent: float
:param total_tax_amount:
:type total_tax_amount: float
:param unit_of_measure_id:
:type unit_of_measure_id: str
:param unit_price:
:type unit_price: float
:param account: account.
:type account: ~financials.models.MicrosoftGraphAccount
:param item: item.
:type item: ~financials.models.MicrosoftGraphItem
"""
_validation = {
'sequence': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'account_id': {'key': 'accountId', 'type': 'str'},
'amount_excluding_tax': {'key': 'amountExcludingTax', 'type': 'float'},
'amount_including_tax': {'key': 'amountIncludingTax', 'type': 'float'},
'description': {'key': 'description', 'type': 'str'},
'discount_amount': {'key': 'discountAmount', 'type': 'float'},
'discount_applied_before_tax': {'key': 'discountAppliedBeforeTax', 'type': 'bool'},
'discount_percent': {'key': 'discountPercent', 'type': 'float'},
'document_id': {'key': 'documentId', 'type': 'str'},
'invoice_discount_allocation': {'key': 'invoiceDiscountAllocation', 'type': 'float'},
'item_id': {'key': 'itemId', 'type': 'str'},
'line_type': {'key': 'lineType', 'type': 'str'},
'net_amount': {'key': 'netAmount', 'type': 'float'},
'net_amount_including_tax': {'key': 'netAmountIncludingTax', 'type': 'float'},
'net_tax_amount': {'key': 'netTaxAmount', 'type': 'float'},
'quantity': {'key': 'quantity', 'type': 'float'},
'sequence': {'key': 'sequence', 'type': 'int'},
'shipment_date': {'key': 'shipmentDate', 'type': 'date'},
'tax_code': {'key': 'taxCode', 'type': 'str'},
'tax_percent': {'key': 'taxPercent', 'type': 'float'},
'total_tax_amount': {'key': 'totalTaxAmount', 'type': 'float'},
'unit_of_measure_id': {'key': 'unitOfMeasureId', 'type': 'str'},
'unit_price': {'key': 'unitPrice', 'type': 'float'},
'account': {'key': 'account', 'type': 'MicrosoftGraphAccount'},
'item': {'key': 'item', 'type': 'MicrosoftGraphItem'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphSalesCreditMemoLine, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.account_id = kwargs.get('account_id', None)
self.amount_excluding_tax = kwargs.get('amount_excluding_tax', None)
self.amount_including_tax = kwargs.get('amount_including_tax', None)
self.description = kwargs.get('description', None)
self.discount_amount = kwargs.get('discount_amount', None)
self.discount_applied_before_tax = kwargs.get('discount_applied_before_tax', None)
self.discount_percent = kwargs.get('discount_percent', None)
self.document_id = kwargs.get('document_id', None)
self.invoice_discount_allocation = kwargs.get('invoice_discount_allocation', None)
self.item_id = kwargs.get('item_id', None)
self.line_type = kwargs.get('line_type', None)
self.net_amount = kwargs.get('net_amount', None)
self.net_amount_including_tax = kwargs.get('net_amount_including_tax', None)
self.net_tax_amount = kwargs.get('net_tax_amount', None)
self.quantity = kwargs.get('quantity', None)
self.sequence = kwargs.get('sequence', None)
self.shipment_date = kwargs.get('shipment_date', None)
self.tax_code = kwargs.get('tax_code', None)
self.tax_percent = kwargs.get('tax_percent', None)
self.total_tax_amount = kwargs.get('total_tax_amount', None)
self.unit_of_measure_id = kwargs.get('unit_of_measure_id', None)
self.unit_price = kwargs.get('unit_price', None)
self.account = kwargs.get('account', None)
self.item = kwargs.get('item', None)
class MicrosoftGraphSalesInvoice(MicrosoftGraphEntity):
"""salesInvoice.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param billing_postal_address: postalAddressType.
:type billing_postal_address: ~financials.models.MicrosoftGraphPostalAddressType
:param bill_to_customer_id:
:type bill_to_customer_id: str
:param bill_to_customer_number:
:type bill_to_customer_number: str
:param bill_to_name:
:type bill_to_name: str
:param currency_code:
:type currency_code: str
:param currency_id:
:type currency_id: str
:param customer_id:
:type customer_id: str
:param customer_name:
:type customer_name: str
:param customer_number:
:type customer_number: str
:param customer_purchase_order_reference:
:type customer_purchase_order_reference: str
:param discount_amount:
:type discount_amount: float
:param discount_applied_before_tax:
:type discount_applied_before_tax: bool
:param due_date:
:type due_date: ~datetime.date
:param email:
:type email: str
:param external_document_number:
:type external_document_number: str
:param invoice_date:
:type invoice_date: ~datetime.date
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param number:
:type number: str
:param order_id:
:type order_id: str
:param order_number:
:type order_number: str
:param payment_terms_id:
:type payment_terms_id: str
:param phone_number:
:type phone_number: str
:param prices_include_tax:
:type prices_include_tax: bool
:param salesperson:
:type salesperson: str
:param selling_postal_address: postalAddressType.
:type selling_postal_address: ~financials.models.MicrosoftGraphPostalAddressType
:param shipment_method_id:
:type shipment_method_id: str
:param shipping_postal_address: postalAddressType.
:type shipping_postal_address: ~financials.models.MicrosoftGraphPostalAddressType
:param ship_to_contact:
:type ship_to_contact: str
:param ship_to_name:
:type ship_to_name: str
:param status:
:type status: str
:param total_amount_excluding_tax:
:type total_amount_excluding_tax: float
:param total_amount_including_tax:
:type total_amount_including_tax: float
:param total_tax_amount:
:type total_tax_amount: float
:param currency: currency.
:type currency: ~financials.models.MicrosoftGraphCurrency
:param customer: customer.
:type customer: ~financials.models.MicrosoftGraphCustomer
:param payment_term: paymentTerm.
:type payment_term: ~financials.models.MicrosoftGraphPaymentTerm
:param sales_invoice_lines:
:type sales_invoice_lines: list[~financials.models.MicrosoftGraphSalesInvoiceLine]
:param shipment_method: shipmentMethod.
:type shipment_method: ~financials.models.MicrosoftGraphShipmentMethod
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'billing_postal_address': {'key': 'billingPostalAddress', 'type': 'MicrosoftGraphPostalAddressType'},
'bill_to_customer_id': {'key': 'billToCustomerId', 'type': 'str'},
'bill_to_customer_number': {'key': 'billToCustomerNumber', 'type': 'str'},
'bill_to_name': {'key': 'billToName', 'type': 'str'},
'currency_code': {'key': 'currencyCode', 'type': 'str'},
'currency_id': {'key': 'currencyId', 'type': 'str'},
'customer_id': {'key': 'customerId', 'type': 'str'},
'customer_name': {'key': 'customerName', 'type': 'str'},
'customer_number': {'key': 'customerNumber', 'type': 'str'},
'customer_purchase_order_reference': {'key': 'customerPurchaseOrderReference', 'type': 'str'},
'discount_amount': {'key': 'discountAmount', 'type': 'float'},
'discount_applied_before_tax': {'key': 'discountAppliedBeforeTax', 'type': 'bool'},
'due_date': {'key': 'dueDate', 'type': 'date'},
'email': {'key': 'email', 'type': 'str'},
'external_document_number': {'key': 'externalDocumentNumber', 'type': 'str'},
'invoice_date': {'key': 'invoiceDate', 'type': 'date'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
'number': {'key': 'number', 'type': 'str'},
'order_id': {'key': 'orderId', 'type': 'str'},
'order_number': {'key': 'orderNumber', 'type': 'str'},
'payment_terms_id': {'key': 'paymentTermsId', 'type': 'str'},
'phone_number': {'key': 'phoneNumber', 'type': 'str'},
'prices_include_tax': {'key': 'pricesIncludeTax', 'type': 'bool'},
'salesperson': {'key': 'salesperson', 'type': 'str'},
'selling_postal_address': {'key': 'sellingPostalAddress', 'type': 'MicrosoftGraphPostalAddressType'},
'shipment_method_id': {'key': 'shipmentMethodId', 'type': 'str'},
'shipping_postal_address': {'key': 'shippingPostalAddress', 'type': 'MicrosoftGraphPostalAddressType'},
'ship_to_contact': {'key': 'shipToContact', 'type': 'str'},
'ship_to_name': {'key': 'shipToName', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'total_amount_excluding_tax': {'key': 'totalAmountExcludingTax', 'type': 'float'},
'total_amount_including_tax': {'key': 'totalAmountIncludingTax', 'type': 'float'},
'total_tax_amount': {'key': 'totalTaxAmount', 'type': 'float'},
'currency': {'key': 'currency', 'type': 'MicrosoftGraphCurrency'},
'customer': {'key': 'customer', 'type': 'MicrosoftGraphCustomer'},
'payment_term': {'key': 'paymentTerm', 'type': 'MicrosoftGraphPaymentTerm'},
'sales_invoice_lines': {'key': 'salesInvoiceLines', 'type': '[MicrosoftGraphSalesInvoiceLine]'},
'shipment_method': {'key': 'shipmentMethod', 'type': 'MicrosoftGraphShipmentMethod'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphSalesInvoice, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.billing_postal_address = kwargs.get('billing_postal_address', None)
self.bill_to_customer_id = kwargs.get('bill_to_customer_id', None)
self.bill_to_customer_number = kwargs.get('bill_to_customer_number', None)
self.bill_to_name = kwargs.get('bill_to_name', None)
self.currency_code = kwargs.get('currency_code', None)
self.currency_id = kwargs.get('currency_id', None)
self.customer_id = kwargs.get('customer_id', None)
self.customer_name = kwargs.get('customer_name', None)
self.customer_number = kwargs.get('customer_number', None)
self.customer_purchase_order_reference = kwargs.get('customer_purchase_order_reference', None)
self.discount_amount = kwargs.get('discount_amount', None)
self.discount_applied_before_tax = kwargs.get('discount_applied_before_tax', None)
self.due_date = kwargs.get('due_date', None)
self.email = kwargs.get('email', None)
self.external_document_number = kwargs.get('external_document_number', None)
self.invoice_date = kwargs.get('invoice_date', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
self.number = kwargs.get('number', None)
self.order_id = kwargs.get('order_id', None)
self.order_number = kwargs.get('order_number', None)
self.payment_terms_id = kwargs.get('payment_terms_id', None)
self.phone_number = kwargs.get('phone_number', None)
self.prices_include_tax = kwargs.get('prices_include_tax', None)
self.salesperson = kwargs.get('salesperson', None)
self.selling_postal_address = kwargs.get('selling_postal_address', None)
self.shipment_method_id = kwargs.get('shipment_method_id', None)
self.shipping_postal_address = kwargs.get('shipping_postal_address', None)
self.ship_to_contact = kwargs.get('ship_to_contact', None)
self.ship_to_name = kwargs.get('ship_to_name', None)
self.status = kwargs.get('status', None)
self.total_amount_excluding_tax = kwargs.get('total_amount_excluding_tax', None)
self.total_amount_including_tax = kwargs.get('total_amount_including_tax', None)
self.total_tax_amount = kwargs.get('total_tax_amount', None)
self.currency = kwargs.get('currency', None)
self.customer = kwargs.get('customer', None)
self.payment_term = kwargs.get('payment_term', None)
self.sales_invoice_lines = kwargs.get('sales_invoice_lines', None)
self.shipment_method = kwargs.get('shipment_method', None)
class MicrosoftGraphSalesInvoiceLine(MicrosoftGraphEntity):
"""salesInvoiceLine.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param account_id:
:type account_id: str
:param amount_excluding_tax:
:type amount_excluding_tax: float
:param amount_including_tax:
:type amount_including_tax: float
:param description:
:type description: str
:param discount_amount:
:type discount_amount: float
:param discount_applied_before_tax:
:type discount_applied_before_tax: bool
:param discount_percent:
:type discount_percent: float
:param document_id:
:type document_id: str
:param invoice_discount_allocation:
:type invoice_discount_allocation: float
:param item_id:
:type item_id: str
:param line_type:
:type line_type: str
:param net_amount:
:type net_amount: float
:param net_amount_including_tax:
:type net_amount_including_tax: float
:param net_tax_amount:
:type net_tax_amount: float
:param quantity:
:type quantity: float
:param sequence:
:type sequence: int
:param shipment_date:
:type shipment_date: ~datetime.date
:param tax_code:
:type tax_code: str
:param tax_percent:
:type tax_percent: float
:param total_tax_amount:
:type total_tax_amount: float
:param unit_of_measure_id:
:type unit_of_measure_id: str
:param unit_price:
:type unit_price: float
:param account: account.
:type account: ~financials.models.MicrosoftGraphAccount
:param item: item.
:type item: ~financials.models.MicrosoftGraphItem
"""
_validation = {
'sequence': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'account_id': {'key': 'accountId', 'type': 'str'},
'amount_excluding_tax': {'key': 'amountExcludingTax', 'type': 'float'},
'amount_including_tax': {'key': 'amountIncludingTax', 'type': 'float'},
'description': {'key': 'description', 'type': 'str'},
'discount_amount': {'key': 'discountAmount', 'type': 'float'},
'discount_applied_before_tax': {'key': 'discountAppliedBeforeTax', 'type': 'bool'},
'discount_percent': {'key': 'discountPercent', 'type': 'float'},
'document_id': {'key': 'documentId', 'type': 'str'},
'invoice_discount_allocation': {'key': 'invoiceDiscountAllocation', 'type': 'float'},
'item_id': {'key': 'itemId', 'type': 'str'},
'line_type': {'key': 'lineType', 'type': 'str'},
'net_amount': {'key': 'netAmount', 'type': 'float'},
'net_amount_including_tax': {'key': 'netAmountIncludingTax', 'type': 'float'},
'net_tax_amount': {'key': 'netTaxAmount', 'type': 'float'},
'quantity': {'key': 'quantity', 'type': 'float'},
'sequence': {'key': 'sequence', 'type': 'int'},
'shipment_date': {'key': 'shipmentDate', 'type': 'date'},
'tax_code': {'key': 'taxCode', 'type': 'str'},
'tax_percent': {'key': 'taxPercent', 'type': 'float'},
'total_tax_amount': {'key': 'totalTaxAmount', 'type': 'float'},
'unit_of_measure_id': {'key': 'unitOfMeasureId', 'type': 'str'},
'unit_price': {'key': 'unitPrice', 'type': 'float'},
'account': {'key': 'account', 'type': 'MicrosoftGraphAccount'},
'item': {'key': 'item', 'type': 'MicrosoftGraphItem'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphSalesInvoiceLine, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.account_id = kwargs.get('account_id', None)
self.amount_excluding_tax = kwargs.get('amount_excluding_tax', None)
self.amount_including_tax = kwargs.get('amount_including_tax', None)
self.description = kwargs.get('description', None)
self.discount_amount = kwargs.get('discount_amount', None)
self.discount_applied_before_tax = kwargs.get('discount_applied_before_tax', None)
self.discount_percent = kwargs.get('discount_percent', None)
self.document_id = kwargs.get('document_id', None)
self.invoice_discount_allocation = kwargs.get('invoice_discount_allocation', None)
self.item_id = kwargs.get('item_id', None)
self.line_type = kwargs.get('line_type', None)
self.net_amount = kwargs.get('net_amount', None)
self.net_amount_including_tax = kwargs.get('net_amount_including_tax', None)
self.net_tax_amount = kwargs.get('net_tax_amount', None)
self.quantity = kwargs.get('quantity', None)
self.sequence = kwargs.get('sequence', None)
self.shipment_date = kwargs.get('shipment_date', None)
self.tax_code = kwargs.get('tax_code', None)
self.tax_percent = kwargs.get('tax_percent', None)
self.total_tax_amount = kwargs.get('total_tax_amount', None)
self.unit_of_measure_id = kwargs.get('unit_of_measure_id', None)
self.unit_price = kwargs.get('unit_price', None)
self.account = kwargs.get('account', None)
self.item = kwargs.get('item', None)
class MicrosoftGraphSalesOrder(MicrosoftGraphEntity):
"""salesOrder.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param billing_postal_address: postalAddressType.
:type billing_postal_address: ~financials.models.MicrosoftGraphPostalAddressType
:param bill_to_customer_id:
:type bill_to_customer_id: str
:param bill_to_customer_number:
:type bill_to_customer_number: str
:param bill_to_name:
:type bill_to_name: str
:param currency_code:
:type currency_code: str
:param currency_id:
:type currency_id: str
:param customer_id:
:type customer_id: str
:param customer_name:
:type customer_name: str
:param customer_number:
:type customer_number: str
:param discount_amount:
:type discount_amount: float
:param discount_applied_before_tax:
:type discount_applied_before_tax: bool
:param email:
:type email: str
:param external_document_number:
:type external_document_number: str
:param fully_shipped:
:type fully_shipped: bool
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param number:
:type number: str
:param order_date:
:type order_date: ~datetime.date
:param partial_shipping:
:type partial_shipping: bool
:param payment_terms_id:
:type payment_terms_id: str
:param phone_number:
:type phone_number: str
:param prices_include_tax:
:type prices_include_tax: bool
:param requested_delivery_date:
:type requested_delivery_date: ~datetime.date
:param salesperson:
:type salesperson: str
:param selling_postal_address: postalAddressType.
:type selling_postal_address: ~financials.models.MicrosoftGraphPostalAddressType
:param shipping_postal_address: postalAddressType.
:type shipping_postal_address: ~financials.models.MicrosoftGraphPostalAddressType
:param ship_to_contact:
:type ship_to_contact: str
:param ship_to_name:
:type ship_to_name: str
:param status:
:type status: str
:param total_amount_excluding_tax:
:type total_amount_excluding_tax: float
:param total_amount_including_tax:
:type total_amount_including_tax: float
:param total_tax_amount:
:type total_tax_amount: float
:param currency: currency.
:type currency: ~financials.models.MicrosoftGraphCurrency
:param customer: customer.
:type customer: ~financials.models.MicrosoftGraphCustomer
:param payment_term: paymentTerm.
:type payment_term: ~financials.models.MicrosoftGraphPaymentTerm
:param sales_order_lines:
:type sales_order_lines: list[~financials.models.MicrosoftGraphSalesOrderLine]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'billing_postal_address': {'key': 'billingPostalAddress', 'type': 'MicrosoftGraphPostalAddressType'},
'bill_to_customer_id': {'key': 'billToCustomerId', 'type': 'str'},
'bill_to_customer_number': {'key': 'billToCustomerNumber', 'type': 'str'},
'bill_to_name': {'key': 'billToName', 'type': 'str'},
'currency_code': {'key': 'currencyCode', 'type': 'str'},
'currency_id': {'key': 'currencyId', 'type': 'str'},
'customer_id': {'key': 'customerId', 'type': 'str'},
'customer_name': {'key': 'customerName', 'type': 'str'},
'customer_number': {'key': 'customerNumber', 'type': 'str'},
'discount_amount': {'key': 'discountAmount', 'type': 'float'},
'discount_applied_before_tax': {'key': 'discountAppliedBeforeTax', 'type': 'bool'},
'email': {'key': 'email', 'type': 'str'},
'external_document_number': {'key': 'externalDocumentNumber', 'type': 'str'},
'fully_shipped': {'key': 'fullyShipped', 'type': 'bool'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
'number': {'key': 'number', 'type': 'str'},
'order_date': {'key': 'orderDate', 'type': 'date'},
'partial_shipping': {'key': 'partialShipping', 'type': 'bool'},
'payment_terms_id': {'key': 'paymentTermsId', 'type': 'str'},
'phone_number': {'key': 'phoneNumber', 'type': 'str'},
'prices_include_tax': {'key': 'pricesIncludeTax', 'type': 'bool'},
'requested_delivery_date': {'key': 'requestedDeliveryDate', 'type': 'date'},
'salesperson': {'key': 'salesperson', 'type': 'str'},
'selling_postal_address': {'key': 'sellingPostalAddress', 'type': 'MicrosoftGraphPostalAddressType'},
'shipping_postal_address': {'key': 'shippingPostalAddress', 'type': 'MicrosoftGraphPostalAddressType'},
'ship_to_contact': {'key': 'shipToContact', 'type': 'str'},
'ship_to_name': {'key': 'shipToName', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'total_amount_excluding_tax': {'key': 'totalAmountExcludingTax', 'type': 'float'},
'total_amount_including_tax': {'key': 'totalAmountIncludingTax', 'type': 'float'},
'total_tax_amount': {'key': 'totalTaxAmount', 'type': 'float'},
'currency': {'key': 'currency', 'type': 'MicrosoftGraphCurrency'},
'customer': {'key': 'customer', 'type': 'MicrosoftGraphCustomer'},
'payment_term': {'key': 'paymentTerm', 'type': 'MicrosoftGraphPaymentTerm'},
'sales_order_lines': {'key': 'salesOrderLines', 'type': '[MicrosoftGraphSalesOrderLine]'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphSalesOrder, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.billing_postal_address = kwargs.get('billing_postal_address', None)
self.bill_to_customer_id = kwargs.get('bill_to_customer_id', None)
self.bill_to_customer_number = kwargs.get('bill_to_customer_number', None)
self.bill_to_name = kwargs.get('bill_to_name', None)
self.currency_code = kwargs.get('currency_code', None)
self.currency_id = kwargs.get('currency_id', None)
self.customer_id = kwargs.get('customer_id', None)
self.customer_name = kwargs.get('customer_name', None)
self.customer_number = kwargs.get('customer_number', None)
self.discount_amount = kwargs.get('discount_amount', None)
self.discount_applied_before_tax = kwargs.get('discount_applied_before_tax', None)
self.email = kwargs.get('email', None)
self.external_document_number = kwargs.get('external_document_number', None)
self.fully_shipped = kwargs.get('fully_shipped', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
self.number = kwargs.get('number', None)
self.order_date = kwargs.get('order_date', None)
self.partial_shipping = kwargs.get('partial_shipping', None)
self.payment_terms_id = kwargs.get('payment_terms_id', None)
self.phone_number = kwargs.get('phone_number', None)
self.prices_include_tax = kwargs.get('prices_include_tax', None)
self.requested_delivery_date = kwargs.get('requested_delivery_date', None)
self.salesperson = kwargs.get('salesperson', None)
self.selling_postal_address = kwargs.get('selling_postal_address', None)
self.shipping_postal_address = kwargs.get('shipping_postal_address', None)
self.ship_to_contact = kwargs.get('ship_to_contact', None)
self.ship_to_name = kwargs.get('ship_to_name', None)
self.status = kwargs.get('status', None)
self.total_amount_excluding_tax = kwargs.get('total_amount_excluding_tax', None)
self.total_amount_including_tax = kwargs.get('total_amount_including_tax', None)
self.total_tax_amount = kwargs.get('total_tax_amount', None)
self.currency = kwargs.get('currency', None)
self.customer = kwargs.get('customer', None)
self.payment_term = kwargs.get('payment_term', None)
self.sales_order_lines = kwargs.get('sales_order_lines', None)
class MicrosoftGraphSalesOrderLine(MicrosoftGraphEntity):
"""salesOrderLine.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param account_id:
:type account_id: str
:param amount_excluding_tax:
:type amount_excluding_tax: float
:param amount_including_tax:
:type amount_including_tax: float
:param description:
:type description: str
:param discount_amount:
:type discount_amount: float
:param discount_applied_before_tax:
:type discount_applied_before_tax: bool
:param discount_percent:
:type discount_percent: float
:param document_id:
:type document_id: str
:param invoice_discount_allocation:
:type invoice_discount_allocation: float
:param invoiced_quantity:
:type invoiced_quantity: float
:param invoice_quantity:
:type invoice_quantity: float
:param item_id:
:type item_id: str
:param line_type:
:type line_type: str
:param net_amount:
:type net_amount: float
:param net_amount_including_tax:
:type net_amount_including_tax: float
:param net_tax_amount:
:type net_tax_amount: float
:param quantity:
:type quantity: float
:param sequence:
:type sequence: int
:param shipment_date:
:type shipment_date: ~datetime.date
:param shipped_quantity:
:type shipped_quantity: float
:param ship_quantity:
:type ship_quantity: float
:param tax_code:
:type tax_code: str
:param tax_percent:
:type tax_percent: float
:param total_tax_amount:
:type total_tax_amount: float
:param unit_of_measure_id:
:type unit_of_measure_id: str
:param unit_price:
:type unit_price: float
:param account: account.
:type account: ~financials.models.MicrosoftGraphAccount
:param item: item.
:type item: ~financials.models.MicrosoftGraphItem
"""
_validation = {
'sequence': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'account_id': {'key': 'accountId', 'type': 'str'},
'amount_excluding_tax': {'key': 'amountExcludingTax', 'type': 'float'},
'amount_including_tax': {'key': 'amountIncludingTax', 'type': 'float'},
'description': {'key': 'description', 'type': 'str'},
'discount_amount': {'key': 'discountAmount', 'type': 'float'},
'discount_applied_before_tax': {'key': 'discountAppliedBeforeTax', 'type': 'bool'},
'discount_percent': {'key': 'discountPercent', 'type': 'float'},
'document_id': {'key': 'documentId', 'type': 'str'},
'invoice_discount_allocation': {'key': 'invoiceDiscountAllocation', 'type': 'float'},
'invoiced_quantity': {'key': 'invoicedQuantity', 'type': 'float'},
'invoice_quantity': {'key': 'invoiceQuantity', 'type': 'float'},
'item_id': {'key': 'itemId', 'type': 'str'},
'line_type': {'key': 'lineType', 'type': 'str'},
'net_amount': {'key': 'netAmount', 'type': 'float'},
'net_amount_including_tax': {'key': 'netAmountIncludingTax', 'type': 'float'},
'net_tax_amount': {'key': 'netTaxAmount', 'type': 'float'},
'quantity': {'key': 'quantity', 'type': 'float'},
'sequence': {'key': 'sequence', 'type': 'int'},
'shipment_date': {'key': 'shipmentDate', 'type': 'date'},
'shipped_quantity': {'key': 'shippedQuantity', 'type': 'float'},
'ship_quantity': {'key': 'shipQuantity', 'type': 'float'},
'tax_code': {'key': 'taxCode', 'type': 'str'},
'tax_percent': {'key': 'taxPercent', 'type': 'float'},
'total_tax_amount': {'key': 'totalTaxAmount', 'type': 'float'},
'unit_of_measure_id': {'key': 'unitOfMeasureId', 'type': 'str'},
'unit_price': {'key': 'unitPrice', 'type': 'float'},
'account': {'key': 'account', 'type': 'MicrosoftGraphAccount'},
'item': {'key': 'item', 'type': 'MicrosoftGraphItem'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphSalesOrderLine, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.account_id = kwargs.get('account_id', None)
self.amount_excluding_tax = kwargs.get('amount_excluding_tax', None)
self.amount_including_tax = kwargs.get('amount_including_tax', None)
self.description = kwargs.get('description', None)
self.discount_amount = kwargs.get('discount_amount', None)
self.discount_applied_before_tax = kwargs.get('discount_applied_before_tax', None)
self.discount_percent = kwargs.get('discount_percent', None)
self.document_id = kwargs.get('document_id', None)
self.invoice_discount_allocation = kwargs.get('invoice_discount_allocation', None)
self.invoiced_quantity = kwargs.get('invoiced_quantity', None)
self.invoice_quantity = kwargs.get('invoice_quantity', None)
self.item_id = kwargs.get('item_id', None)
self.line_type = kwargs.get('line_type', None)
self.net_amount = kwargs.get('net_amount', None)
self.net_amount_including_tax = kwargs.get('net_amount_including_tax', None)
self.net_tax_amount = kwargs.get('net_tax_amount', None)
self.quantity = kwargs.get('quantity', None)
self.sequence = kwargs.get('sequence', None)
self.shipment_date = kwargs.get('shipment_date', None)
self.shipped_quantity = kwargs.get('shipped_quantity', None)
self.ship_quantity = kwargs.get('ship_quantity', None)
self.tax_code = kwargs.get('tax_code', None)
self.tax_percent = kwargs.get('tax_percent', None)
self.total_tax_amount = kwargs.get('total_tax_amount', None)
self.unit_of_measure_id = kwargs.get('unit_of_measure_id', None)
self.unit_price = kwargs.get('unit_price', None)
self.account = kwargs.get('account', None)
self.item = kwargs.get('item', None)
class MicrosoftGraphSalesQuote(MicrosoftGraphEntity):
"""salesQuote.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param accepted_date:
:type accepted_date: ~datetime.date
:param billing_postal_address: postalAddressType.
:type billing_postal_address: ~financials.models.MicrosoftGraphPostalAddressType
:param bill_to_customer_id:
:type bill_to_customer_id: str
:param bill_to_customer_number:
:type bill_to_customer_number: str
:param bill_to_name:
:type bill_to_name: str
:param currency_code:
:type currency_code: str
:param currency_id:
:type currency_id: str
:param customer_id:
:type customer_id: str
:param customer_name:
:type customer_name: str
:param customer_number:
:type customer_number: str
:param discount_amount:
:type discount_amount: float
:param document_date:
:type document_date: ~datetime.date
:param due_date:
:type due_date: ~datetime.date
:param email:
:type email: str
:param external_document_number:
:type external_document_number: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param number:
:type number: str
:param payment_terms_id:
:type payment_terms_id: str
:param phone_number:
:type phone_number: str
:param salesperson:
:type salesperson: str
:param selling_postal_address: postalAddressType.
:type selling_postal_address: ~financials.models.MicrosoftGraphPostalAddressType
:param sent_date:
:type sent_date: ~datetime.datetime
:param shipment_method_id:
:type shipment_method_id: str
:param shipping_postal_address: postalAddressType.
:type shipping_postal_address: ~financials.models.MicrosoftGraphPostalAddressType
:param ship_to_contact:
:type ship_to_contact: str
:param ship_to_name:
:type ship_to_name: str
:param status:
:type status: str
:param total_amount_excluding_tax:
:type total_amount_excluding_tax: float
:param total_amount_including_tax:
:type total_amount_including_tax: float
:param total_tax_amount:
:type total_tax_amount: float
:param valid_until_date:
:type valid_until_date: ~datetime.date
:param currency: currency.
:type currency: ~financials.models.MicrosoftGraphCurrency
:param customer: customer.
:type customer: ~financials.models.MicrosoftGraphCustomer
:param payment_term: paymentTerm.
:type payment_term: ~financials.models.MicrosoftGraphPaymentTerm
:param sales_quote_lines:
:type sales_quote_lines: list[~financials.models.MicrosoftGraphSalesQuoteLine]
:param shipment_method: shipmentMethod.
:type shipment_method: ~financials.models.MicrosoftGraphShipmentMethod
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'accepted_date': {'key': 'acceptedDate', 'type': 'date'},
'billing_postal_address': {'key': 'billingPostalAddress', 'type': 'MicrosoftGraphPostalAddressType'},
'bill_to_customer_id': {'key': 'billToCustomerId', 'type': 'str'},
'bill_to_customer_number': {'key': 'billToCustomerNumber', 'type': 'str'},
'bill_to_name': {'key': 'billToName', 'type': 'str'},
'currency_code': {'key': 'currencyCode', 'type': 'str'},
'currency_id': {'key': 'currencyId', 'type': 'str'},
'customer_id': {'key': 'customerId', 'type': 'str'},
'customer_name': {'key': 'customerName', 'type': 'str'},
'customer_number': {'key': 'customerNumber', 'type': 'str'},
'discount_amount': {'key': 'discountAmount', 'type': 'float'},
'document_date': {'key': 'documentDate', 'type': 'date'},
'due_date': {'key': 'dueDate', 'type': 'date'},
'email': {'key': 'email', 'type': 'str'},
'external_document_number': {'key': 'externalDocumentNumber', 'type': 'str'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
'number': {'key': 'number', 'type': 'str'},
'payment_terms_id': {'key': 'paymentTermsId', 'type': 'str'},
'phone_number': {'key': 'phoneNumber', 'type': 'str'},
'salesperson': {'key': 'salesperson', 'type': 'str'},
'selling_postal_address': {'key': 'sellingPostalAddress', 'type': 'MicrosoftGraphPostalAddressType'},
'sent_date': {'key': 'sentDate', 'type': 'iso-8601'},
'shipment_method_id': {'key': 'shipmentMethodId', 'type': 'str'},
'shipping_postal_address': {'key': 'shippingPostalAddress', 'type': 'MicrosoftGraphPostalAddressType'},
'ship_to_contact': {'key': 'shipToContact', 'type': 'str'},
'ship_to_name': {'key': 'shipToName', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'total_amount_excluding_tax': {'key': 'totalAmountExcludingTax', 'type': 'float'},
'total_amount_including_tax': {'key': 'totalAmountIncludingTax', 'type': 'float'},
'total_tax_amount': {'key': 'totalTaxAmount', 'type': 'float'},
'valid_until_date': {'key': 'validUntilDate', 'type': 'date'},
'currency': {'key': 'currency', 'type': 'MicrosoftGraphCurrency'},
'customer': {'key': 'customer', 'type': 'MicrosoftGraphCustomer'},
'payment_term': {'key': 'paymentTerm', 'type': 'MicrosoftGraphPaymentTerm'},
'sales_quote_lines': {'key': 'salesQuoteLines', 'type': '[MicrosoftGraphSalesQuoteLine]'},
'shipment_method': {'key': 'shipmentMethod', 'type': 'MicrosoftGraphShipmentMethod'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphSalesQuote, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.accepted_date = kwargs.get('accepted_date', None)
self.billing_postal_address = kwargs.get('billing_postal_address', None)
self.bill_to_customer_id = kwargs.get('bill_to_customer_id', None)
self.bill_to_customer_number = kwargs.get('bill_to_customer_number', None)
self.bill_to_name = kwargs.get('bill_to_name', None)
self.currency_code = kwargs.get('currency_code', None)
self.currency_id = kwargs.get('currency_id', None)
self.customer_id = kwargs.get('customer_id', None)
self.customer_name = kwargs.get('customer_name', None)
self.customer_number = kwargs.get('customer_number', None)
self.discount_amount = kwargs.get('discount_amount', None)
self.document_date = kwargs.get('document_date', None)
self.due_date = kwargs.get('due_date', None)
self.email = kwargs.get('email', None)
self.external_document_number = kwargs.get('external_document_number', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
self.number = kwargs.get('number', None)
self.payment_terms_id = kwargs.get('payment_terms_id', None)
self.phone_number = kwargs.get('phone_number', None)
self.salesperson = kwargs.get('salesperson', None)
self.selling_postal_address = kwargs.get('selling_postal_address', None)
self.sent_date = kwargs.get('sent_date', None)
self.shipment_method_id = kwargs.get('shipment_method_id', None)
self.shipping_postal_address = kwargs.get('shipping_postal_address', None)
self.ship_to_contact = kwargs.get('ship_to_contact', None)
self.ship_to_name = kwargs.get('ship_to_name', None)
self.status = kwargs.get('status', None)
self.total_amount_excluding_tax = kwargs.get('total_amount_excluding_tax', None)
self.total_amount_including_tax = kwargs.get('total_amount_including_tax', None)
self.total_tax_amount = kwargs.get('total_tax_amount', None)
self.valid_until_date = kwargs.get('valid_until_date', None)
self.currency = kwargs.get('currency', None)
self.customer = kwargs.get('customer', None)
self.payment_term = kwargs.get('payment_term', None)
self.sales_quote_lines = kwargs.get('sales_quote_lines', None)
self.shipment_method = kwargs.get('shipment_method', None)
class MicrosoftGraphSalesQuoteLine(MicrosoftGraphEntity):
"""salesQuoteLine.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param account_id:
:type account_id: str
:param amount_excluding_tax:
:type amount_excluding_tax: float
:param amount_including_tax:
:type amount_including_tax: float
:param description:
:type description: str
:param discount_amount:
:type discount_amount: float
:param discount_applied_before_tax:
:type discount_applied_before_tax: bool
:param discount_percent:
:type discount_percent: float
:param document_id:
:type document_id: str
:param item_id:
:type item_id: str
:param line_type:
:type line_type: str
:param net_amount:
:type net_amount: float
:param net_amount_including_tax:
:type net_amount_including_tax: float
:param net_tax_amount:
:type net_tax_amount: float
:param quantity:
:type quantity: float
:param sequence:
:type sequence: int
:param tax_code:
:type tax_code: str
:param tax_percent:
:type tax_percent: float
:param total_tax_amount:
:type total_tax_amount: float
:param unit_of_measure_id:
:type unit_of_measure_id: str
:param unit_price:
:type unit_price: float
:param account: account.
:type account: ~financials.models.MicrosoftGraphAccount
:param item: item.
:type item: ~financials.models.MicrosoftGraphItem
"""
_validation = {
'sequence': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'account_id': {'key': 'accountId', 'type': 'str'},
'amount_excluding_tax': {'key': 'amountExcludingTax', 'type': 'float'},
'amount_including_tax': {'key': 'amountIncludingTax', 'type': 'float'},
'description': {'key': 'description', 'type': 'str'},
'discount_amount': {'key': 'discountAmount', 'type': 'float'},
'discount_applied_before_tax': {'key': 'discountAppliedBeforeTax', 'type': 'bool'},
'discount_percent': {'key': 'discountPercent', 'type': 'float'},
'document_id': {'key': 'documentId', 'type': 'str'},
'item_id': {'key': 'itemId', 'type': 'str'},
'line_type': {'key': 'lineType', 'type': 'str'},
'net_amount': {'key': 'netAmount', 'type': 'float'},
'net_amount_including_tax': {'key': 'netAmountIncludingTax', 'type': 'float'},
'net_tax_amount': {'key': 'netTaxAmount', 'type': 'float'},
'quantity': {'key': 'quantity', 'type': 'float'},
'sequence': {'key': 'sequence', 'type': 'int'},
'tax_code': {'key': 'taxCode', 'type': 'str'},
'tax_percent': {'key': 'taxPercent', 'type': 'float'},
'total_tax_amount': {'key': 'totalTaxAmount', 'type': 'float'},
'unit_of_measure_id': {'key': 'unitOfMeasureId', 'type': 'str'},
'unit_price': {'key': 'unitPrice', 'type': 'float'},
'account': {'key': 'account', 'type': 'MicrosoftGraphAccount'},
'item': {'key': 'item', 'type': 'MicrosoftGraphItem'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphSalesQuoteLine, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.account_id = kwargs.get('account_id', None)
self.amount_excluding_tax = kwargs.get('amount_excluding_tax', None)
self.amount_including_tax = kwargs.get('amount_including_tax', None)
self.description = kwargs.get('description', None)
self.discount_amount = kwargs.get('discount_amount', None)
self.discount_applied_before_tax = kwargs.get('discount_applied_before_tax', None)
self.discount_percent = kwargs.get('discount_percent', None)
self.document_id = kwargs.get('document_id', None)
self.item_id = kwargs.get('item_id', None)
self.line_type = kwargs.get('line_type', None)
self.net_amount = kwargs.get('net_amount', None)
self.net_amount_including_tax = kwargs.get('net_amount_including_tax', None)
self.net_tax_amount = kwargs.get('net_tax_amount', None)
self.quantity = kwargs.get('quantity', None)
self.sequence = kwargs.get('sequence', None)
self.tax_code = kwargs.get('tax_code', None)
self.tax_percent = kwargs.get('tax_percent', None)
self.total_tax_amount = kwargs.get('total_tax_amount', None)
self.unit_of_measure_id = kwargs.get('unit_of_measure_id', None)
self.unit_price = kwargs.get('unit_price', None)
self.account = kwargs.get('account', None)
self.item = kwargs.get('item', None)
class MicrosoftGraphShipmentMethod(MicrosoftGraphEntity):
"""shipmentMethod.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param code:
:type code: str
:param display_name:
:type display_name: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'code': {'key': 'code', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphShipmentMethod, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.code = kwargs.get('code', None)
self.display_name = kwargs.get('display_name', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
class MicrosoftGraphTaxArea(MicrosoftGraphEntity):
"""taxArea.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param code:
:type code: str
:param display_name:
:type display_name: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param tax_type:
:type tax_type: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'code': {'key': 'code', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
'tax_type': {'key': 'taxType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphTaxArea, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.code = kwargs.get('code', None)
self.display_name = kwargs.get('display_name', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
self.tax_type = kwargs.get('tax_type', None)
class MicrosoftGraphTaxGroup(MicrosoftGraphEntity):
"""taxGroup.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param code:
:type code: str
:param display_name:
:type display_name: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param tax_type:
:type tax_type: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'code': {'key': 'code', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
'tax_type': {'key': 'taxType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphTaxGroup, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.code = kwargs.get('code', None)
self.display_name = kwargs.get('display_name', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
self.tax_type = kwargs.get('tax_type', None)
class MicrosoftGraphUnitOfMeasure(MicrosoftGraphEntity):
"""unitOfMeasure.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param code:
:type code: str
:param display_name:
:type display_name: str
:param international_standard_code:
:type international_standard_code: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'code': {'key': 'code', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'international_standard_code': {'key': 'internationalStandardCode', 'type': 'str'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphUnitOfMeasure, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.code = kwargs.get('code', None)
self.display_name = kwargs.get('display_name', None)
self.international_standard_code = kwargs.get('international_standard_code', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
class MicrosoftGraphVendor(MicrosoftGraphEntity):
"""vendor.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param address: postalAddressType.
:type address: ~financials.models.MicrosoftGraphPostalAddressType
:param balance:
:type balance: float
:param blocked:
:type blocked: str
:param currency_code:
:type currency_code: str
:param currency_id:
:type currency_id: str
:param display_name:
:type display_name: str
:param email:
:type email: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param number:
:type number: str
:param payment_method_id:
:type payment_method_id: str
:param payment_terms_id:
:type payment_terms_id: str
:param phone_number:
:type phone_number: str
:param tax_liable:
:type tax_liable: bool
:param tax_registration_number:
:type tax_registration_number: str
:param website:
:type website: str
:param currency: currency.
:type currency: ~financials.models.MicrosoftGraphCurrency
:param payment_method: paymentMethod.
:type payment_method: ~financials.models.MicrosoftGraphPaymentMethod
:param payment_term: paymentTerm.
:type payment_term: ~financials.models.MicrosoftGraphPaymentTerm
:param picture:
:type picture: list[~financials.models.MicrosoftGraphPicture]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'address': {'key': 'address', 'type': 'MicrosoftGraphPostalAddressType'},
'balance': {'key': 'balance', 'type': 'float'},
'blocked': {'key': 'blocked', 'type': 'str'},
'currency_code': {'key': 'currencyCode', 'type': 'str'},
'currency_id': {'key': 'currencyId', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'email': {'key': 'email', 'type': 'str'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
'number': {'key': 'number', 'type': 'str'},
'payment_method_id': {'key': 'paymentMethodId', 'type': 'str'},
'payment_terms_id': {'key': 'paymentTermsId', 'type': 'str'},
'phone_number': {'key': 'phoneNumber', 'type': 'str'},
'tax_liable': {'key': 'taxLiable', 'type': 'bool'},
'tax_registration_number': {'key': 'taxRegistrationNumber', 'type': 'str'},
'website': {'key': 'website', 'type': 'str'},
'currency': {'key': 'currency', 'type': 'MicrosoftGraphCurrency'},
'payment_method': {'key': 'paymentMethod', 'type': 'MicrosoftGraphPaymentMethod'},
'payment_term': {'key': 'paymentTerm', 'type': 'MicrosoftGraphPaymentTerm'},
'picture': {'key': 'picture', 'type': '[MicrosoftGraphPicture]'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphVendor, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.address = kwargs.get('address', None)
self.balance = kwargs.get('balance', None)
self.blocked = kwargs.get('blocked', None)
self.currency_code = kwargs.get('currency_code', None)
self.currency_id = kwargs.get('currency_id', None)
self.display_name = kwargs.get('display_name', None)
self.email = kwargs.get('email', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
self.number = kwargs.get('number', None)
self.payment_method_id = kwargs.get('payment_method_id', None)
self.payment_terms_id = kwargs.get('payment_terms_id', None)
self.phone_number = kwargs.get('phone_number', None)
self.tax_liable = kwargs.get('tax_liable', None)
self.tax_registration_number = kwargs.get('tax_registration_number', None)
self.website = kwargs.get('website', None)
self.currency = kwargs.get('currency', None)
self.payment_method = kwargs.get('payment_method', None)
self.payment_term = kwargs.get('payment_term', None)
self.picture = kwargs.get('picture', None)
class OdataError(msrest.serialization.Model):
"""OdataError.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param error: Required.
:type error: ~financials.models.OdataErrorMain
"""
_validation = {
'error': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'error': {'key': 'error', 'type': 'OdataErrorMain'},
}
def __init__(
self,
**kwargs
):
super(OdataError, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.error = kwargs['error']
class OdataErrorDetail(msrest.serialization.Model):
"""OdataErrorDetail.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param code: Required.
:type code: str
:param message: Required.
:type message: str
:param target:
:type target: str
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OdataErrorDetail, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.code = kwargs['code']
self.message = kwargs['message']
self.target = kwargs.get('target', None)
class OdataErrorMain(msrest.serialization.Model):
"""OdataErrorMain.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param code: Required.
:type code: str
:param message: Required.
:type message: str
:param target:
:type target: str
:param details:
:type details: list[~financials.models.OdataErrorDetail]
:param innererror: The structure of this object is service-specific.
:type innererror: dict[str, object]
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[OdataErrorDetail]'},
'innererror': {'key': 'innererror', 'type': '{object}'},
}
def __init__(
self,
**kwargs
):
super(OdataErrorMain, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.code = kwargs['code']
self.message = kwargs['message']
self.target = kwargs.get('target', None)
self.details = kwargs.get('details', None)
self.innererror = kwargs.get('innererror', None)
| 40.523444
| 122
| 0.657973
|
9872875f0a5646f5b4b10ff772ce86f3387e1b48
| 1,487
|
py
|
Python
|
bottomline/blweb/migrations/0014_auto_20210801_1041.py
|
mcm219/BottomLine
|
db82eef403c79bffa3864c4db6bc336632abaca5
|
[
"MIT"
] | null | null | null |
bottomline/blweb/migrations/0014_auto_20210801_1041.py
|
mcm219/BottomLine
|
db82eef403c79bffa3864c4db6bc336632abaca5
|
[
"MIT"
] | 1
|
2021-06-14T02:20:40.000Z
|
2021-06-14T02:20:40.000Z
|
bottomline/blweb/migrations/0014_auto_20210801_1041.py
|
mcm219/BottomLine
|
db82eef403c79bffa3864c4db6bc336632abaca5
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.2 on 2021-08-01 14:41
from django.db import migrations, models
import django.db.models.deletion
import localflavor.us.models
class Migration(migrations.Migration):
dependencies = [
('blweb', '0013_vehiclemodel_price'),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('street', models.CharField(help_text='The street address e.g. 123 Main St.', max_length=200)),
('zip_code', localflavor.us.models.USZipCodeField(help_text='The address Zip Code', max_length=10)),
('state', localflavor.us.models.USStateField(help_text='The address state e.g. PA', max_length=2)),
],
),
migrations.AddField(
model_name='profile',
name='dealer_make',
field=models.ForeignKey(blank=True, default=None, help_text='The vehicle make associated with this dealer', null=True, on_delete=django.db.models.deletion.CASCADE, to='blweb.vehiclemake'),
),
migrations.AddField(
model_name='profile',
name='address',
field=models.ForeignKey(blank=True, default=None, help_text='The US mailing address associated with this account (optional)', null=True, on_delete=django.db.models.deletion.CASCADE, to='blweb.address'),
),
]
| 42.485714
| 214
| 0.642233
|
9cac18c28843e38f98de5b660b74ef30f236c707
| 1,483
|
py
|
Python
|
show_hit_progress.py
|
lwj5/simple-amt
|
9af304f2dda3da818158c21554ede4f9fae0a89d
|
[
"MIT"
] | null | null | null |
show_hit_progress.py
|
lwj5/simple-amt
|
9af304f2dda3da818158c21554ede4f9fae0a89d
|
[
"MIT"
] | null | null | null |
show_hit_progress.py
|
lwj5/simple-amt
|
9af304f2dda3da818158c21554ede4f9fae0a89d
|
[
"MIT"
] | null | null | null |
import argparse
from collections import Counter
import simpleamt
if __name__ == "__main__":
parser = argparse.ArgumentParser(parents=[simpleamt.get_parent_parser()])
args = parser.parse_args()
mtc = simpleamt.get_mturk_connection_from_args(args)
if args.hit_ids_file is None:
parser.error("Must specify hit_ids_file")
with open(args.hit_ids_file, "r") as f:
hit_ids = [line.strip() for line in f]
counter = Counter()
for idx, hit_id in enumerate(hit_ids):
print("Checking HIT {} ({}/{})".format(hit_id, idx + 1, len(hit_ids)))
try:
hit = mtc.get_hit(HITId=hit_id)["HIT"]
except Exception as e:
print("Can't find hit id: %s" % (hit_id))
print(e)
continue
total = int(hit["MaxAssignments"])
completed = 0
paginator = mtc.get_paginator("list_assignments_for_hit")
for a_page in paginator.paginate(
HITId=hit_id, PaginationConfig={"PageSize": 100}
):
for a in a_page["Assignments"]:
if a["AssignmentStatus"] in [
"Submitted",
"Approved",
"Rejected",
]:
completed += 1
print("Completed {}/{}".format(completed, total))
counter.update([(completed, total)])
for (completed, total), count in counter.most_common():
print("%d / %d: %d" % (completed, total, count))
| 32.955556
| 78
| 0.570465
|
013276f989e61e727520bee99b95f211af35cfe6
| 394
|
py
|
Python
|
sdk/eventhub/azure-mgmt-eventhub/azure/mgmt/eventhub/models.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/eventhub/azure-mgmt-eventhub/azure/mgmt/eventhub/models.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/eventhub/azure-mgmt-eventhub/azure/mgmt/eventhub/models.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from .v2017_04_01.models import *
from .v2021_11_01.models import *
| 43.777778
| 76
| 0.474619
|
5473b109f4d8004bc0b2a03199414788d9e81138
| 2,598
|
py
|
Python
|
epimargin/smoothing.py
|
dilawar/epimargin
|
0261117cb29dd233600d8f0410d79c93ba261fd3
|
[
"MIT"
] | 6
|
2021-06-19T14:01:10.000Z
|
2021-12-31T23:49:34.000Z
|
epimargin/smoothing.py
|
dilawar/epimargin
|
0261117cb29dd233600d8f0410d79c93ba261fd3
|
[
"MIT"
] | 34
|
2020-04-26T05:50:39.000Z
|
2020-06-17T14:02:32.000Z
|
epimargin/smoothing.py
|
dilawar/epimargin
|
0261117cb29dd233600d8f0410d79c93ba261fd3
|
[
"MIT"
] | 5
|
2021-05-26T16:35:15.000Z
|
2021-09-10T02:20:34.000Z
|
from typing import Optional, Sequence
import numpy as np
from scipy.signal import convolve, filtfilt, iirnotch
from statsmodels.nonparametric.smoothers_lowess import lowess as sm_lowess
# supported kernels for convolution smoothing
kernels = {
"hanning" : np.hanning,
"hamming" : np.hamming,
"bartlett" : np.bartlett,
"blackman" : np.blackman,
"uniform" : np.ones
}
def notched_smoothing(window: int = 7):
""" Removes weekly and twice-weekly periodicity before convolving a time-reversed padded signal with a uniform moving average window"""
fs, f0, Q = 1, 1/7, 1
b1, a1 = iirnotch(f0, Q, fs)
b2, a2 = iirnotch(2*f0, 2*Q, fs)
# Frequency response
b = convolve(b1, b2)
a = convolve(a1, a2)
kernel = np.ones(window)/window
def smooth(data: Sequence[float]):
notched = filtfilt(b, a, data)
return convolve(np.concatenate([notched, notched[:-window-1:-1]]), kernel, mode="same")[:-window]
return smooth
def notch_filter():
""" implements a notch filter with notches at 1/week and 2/week; assuming input signal sampling rate is 1/week"""
fs, f0, Q = 1, 1/7, 1
b1, a1 = iirnotch(f0, Q, fs)
b2, a2 = iirnotch(2*f0, 2*Q, fs)
# Frequency response
b = convolve(b1, b2)
a = convolve(a1, a2)
def filter_(data: Sequence[float]):
return filtfilt(b, a, data)
return filter_
def convolution(key: str = "hamming", window: int = 7):
""" entry point for all convolution operations """
kernel = kernels[key](window)
def smooth(data: Sequence[float]):
# pad the data with time reversal windows of signal at ends since all kernels here are apodizing
padded = np.r_[data[window-1:0:-1], data, data[-2:-window-1:-1]]
return np.convolve(kernel/kernel.sum(), padded, mode="valid")[:-window+1]
return smooth
def box_filter_local(window: int = 5, local_smoothing: Optional[int] = 3):
""" implement a box filter smoother with additional LOWESS-like smoothing for data points at the end of the timeseries"""
def smooth(data: Sequence[float]):
smoothed = np.convolve(data, np.ones(window)/window, mode='same')
if local_smoothing and len(data) > (local_smoothing + 1):
for i in range(local_smoothing-1, 0, -1):
smoothed[-i] = np.mean(data[-i-local_smoothing+1: -i+1 if i > 1 else None])
return smoothed
return smooth
def lowess(**kwargs):
""" wrapper over statsmodels lowess implementation to return a callable """
return lambda data: sm_lowess(data, list(range(len(data))), **kwargs)
| 40.59375
| 139
| 0.661663
|
499aa282bc2ac7e50326970dd562407916ebda4d
| 7,928
|
py
|
Python
|
ck/repo/module/model.r/module.py
|
santosh653/ck
|
f09b836df48598aff4db241b52c37899a73eb569
|
[
"BSD-3-Clause"
] | 480
|
2015-02-04T16:07:43.000Z
|
2021-08-17T13:47:53.000Z
|
ck/repo/module/model.r/module.py
|
santosh653/ck
|
f09b836df48598aff4db241b52c37899a73eb569
|
[
"BSD-3-Clause"
] | 166
|
2015-02-05T16:03:52.000Z
|
2021-08-16T14:21:58.000Z
|
ck/repo/module/model.r/module.py
|
santosh653/ck
|
f09b836df48598aff4db241b52c37899a73eb569
|
[
"BSD-3-Clause"
] | 62
|
2015-02-05T17:21:14.000Z
|
2021-09-05T10:24:53.000Z
|
#
# Collective Knowledge (Unified modeling using R)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: Grigori Fursin
#
cfg={} # Will be updated by CK (meta description of this module)
work={} # Will be updated by CK (temporal data)
ck=None # Will be updated by CK (initialized CK kernel)
# Local settings
##############################################################################
# Initialize module
def init(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
return {'return':0}
##############################################################################
# build model
def build(i):
"""
Input: {
model_name - model name
(model_file) - model output file, otherwise generated as tmp file
features_table - features table (in experiment module format)
features_keys - features flat keys
characteristics_table - characteristics table (in experiment module format)
characteristics_keys - characteristics flat keys
(keep_temp_files) - if 'yes', keep temp files
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
model_input_file - temp input file (csv) - can be deleted if not keep_temp_files
model_file - output model file
}
"""
import tempfile
import os
o=i.get('out','')
mn=i['model_name']
mf=i.get('model_file','')
mf1=i['model_file']+'.r.obj'
mf4=i['model_file']+'.r.ft.txt'
ftable=i['features_table']
fkeys=i['features_keys']
ctable=i['characteristics_table']
ckeys=i['characteristics_keys']
lftable=len(ftable)
lctable=len(ctable)
# Enumerate features
s=''
fk=1
for fx in sorted(fkeys): # Needed to be sorted
s+='V'+str(fk)+') '+fx
s+='\n'
fk+=1
if s!='':
r=ck.save_text_file({'text_file':mf4, 'string':s})
if r['return']>0: return r
if o=='con':
ck.out('*******************************************************')
ck.out('Feature key convertion:')
ck.out('')
ck.out(s)
if lftable!=lctable:
return {'return':1, 'error':'length of feature table ('+str(lftable)+') is not the same as length of characteristics table ('+str(lctable)+')'}
# if len(ckeys)>1:
# return {'return':1, 'error':'currently we support only modeling for 1 characteristic'}
ktf=i.get('keep_temp_files','')
# First convert to CSV for R ***********************************
# Prepare common table from features and characteristics
dim=[]
for q in range(0, lftable):
vv=[]
for v in ftable[q]:
vv.append(v)
for v in ctable[q]:
vv.append(v)
dim.append(vv)
# Prepare common keys
keys=[]
for q in fkeys:
keys.append(q)
for q in ckeys:
keys.append(q)
# Prepare temporary CSV file
if ktf=='yes' and mf!='':
fn1=mf+'.build.in.csv'
else:
fd1, fn1=tempfile.mkstemp(suffix='.tmp', prefix='ck-')
os.close(fd1)
os.remove(fn1)
if ktf=='yes' and o=='con':
ck.out('')
ck.out(' Temporary CSV file = '+fn1)
ck.out('')
ii={'action':'convert_table_to_csv',
'module_uoa':cfg['module_deps']['experiment'],
'table':dim,
'keys':keys,
'file_name':fn1,
'csv_no_header':'yes',
'csv_separator':';',
'csv_decimal_mark':'.'
}
r=ck.access(ii)
if r['return']>0: return r
# Prepare (temporary) out model file
fn2=mf
if fn2=='' or i.get('web','')=='yes':
fd2, fn2=tempfile.mkstemp(suffix='.tmp', prefix='ck-')
os.close(fd2)
os.remove(fn2)
else:
fn2=mf1
if os.path.isfile(fn2): os.remove(fn2)
# Calling R
p=work['path']
model_code=cfg['model_code_build'].replace('$#model_name#$',mn)
pmc=os.path.join(p, model_code)
cmd='R --vanilla --args '+fn1+' '+fn2+' < '+pmc
os.system(cmd)
if ktf=='yes' and o=='con':
ck.out('')
ck.out(' Executed command:')
ck.out(cmd)
ck.out('')
if ktf!='yes' and os.path.isfile(fn1): os.remove(fn1)
if not os.path.isfile(fn2):
if ktf=='yes' and o=='con':
ck.out('')
ck.out(' Temporary input CSV file = '+fn1)
ck.out('')
return {'return':1, 'error':'model was not created'}
return {'return':0, 'model_input_file':fn1, 'model_file':fn2}
##############################################################################
# validate model
def validate(i):
"""
Input: {
model_name - model name:
earth
lm
nnet
party
randomforest
rpart
svm
model_file - file with model (object) code
features_table - features table (in experiment module format)
features_keys - features flat keys
(keep_temp_files) - if 'yes', keep temp files
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
prediction_table - experiment table with predictions
}
"""
import tempfile
import os
import csv
import sys
mn=i['model_name']
mf=i['model_file']
mf1=i['model_file']+'.r.obj'
ftable=i['features_table']
fkeys=i['features_keys']
ktf=i.get('keep_temp_files','')
lftable=len(ftable)
# First convert to CSV for R ***********************************
# Prepare temporary CSV file
if ktf=='yes':
fn1=mf+'.validate.in.csv'
else:
fd1, fn1=tempfile.mkstemp(suffix='.tmp', prefix='ck-')
os.close(fd1)
os.remove(fn1)
ii={'action':'convert_table_to_csv',
'module_uoa':cfg['module_deps']['experiment'],
'table':ftable,
'keys':fkeys,
'file_name':fn1,
'csv_no_header':'yes',
'csv_separator':';',
'csv_decimal_mark':'.'
}
r=ck.access(ii)
if r['return']>0: return r
# Prepare temporary out file
if ktf=='yes':
fn2=mf+'.validate.out.csv'
if os.path.isfile(fn2): os.remove(fn2)
else:
fd2, fn2=tempfile.mkstemp(suffix='.tmp', prefix='ck-')
os.close(fd2)
os.remove(fn2)
# Calling R
p=work['path']
model_code=cfg['model_code_predict'].replace('$#model_name#$',mn)
pmc=os.path.join(p, model_code)
cmd='R --vanilla --args '+mf1+' '+fn1+' '+fn2+' < '+pmc
print (cmd)
os.system(cmd)
if ktf!='yes' and os.path.isfile(fn1): os.remove(fn1)
if not os.path.isfile(fn2):
return {'return':1, 'error':'output prediction file was not created'}
# Parse CSV and convert to experiment format
# Read predictions
pr=[]
f=open(fn2, 'r')
c=csv.DictReader(f, delimiter=',')
for a in c:
k=list(a.keys())
if len(k)>0:
pr.append([a[k[1]]])
f.close()
if ktf!='yes': os.remove(fn2)
return {'return':0, 'prediction_table':pr}
| 26.783784
| 150
| 0.489279
|
85aa8921074f7531d10fa6077c12e33c5d237543
| 5,445
|
py
|
Python
|
anchore_manager/cli/utils.py
|
roachmd/anchore-engine
|
521d6796778139a95f51542670714205c2735a81
|
[
"Apache-2.0"
] | null | null | null |
anchore_manager/cli/utils.py
|
roachmd/anchore-engine
|
521d6796778139a95f51542670714205c2735a81
|
[
"Apache-2.0"
] | null | null | null |
anchore_manager/cli/utils.py
|
roachmd/anchore-engine
|
521d6796778139a95f51542670714205c2735a81
|
[
"Apache-2.0"
] | null | null | null |
import os
import re
import sys
import copy
import json
import time
import yaml
import urllib
import logging
import dateutil.parser
from prettytable import PrettyTable, PLAIN_COLUMNS
from collections import OrderedDict
#from textwrap import fill
import anchore_engine.db.entities.common
from anchore_engine.subsys import logger
_logger = logging.getLogger(__name__)
def setup_config(cli_opts):
ret = {
'jsonmode':False,
'debug':False,
'configdir': '/config'
}
settings = {}
# load environment if present
try:
for e in ['ANCHORE_CLI_JSON', 'ANCHORE_CLI_DEBUG', 'ANCHORE_CONFIG_DIR']:
if e in os.environ:
settings[e] = os.environ[e]
except Exception as err:
raise err
# load cmdline options
try:
if cli_opts['json']:
settings['ANCHORE_CLI_JSON'] = "y"
if cli_opts['debug']:
settings['ANCHORE_CLI_DEBUG'] = "y"
if cli_opts['configdir']:
settings['ANCHORE_CONFIG_DIR'] = cli_opts['configdir']
except Exception as err:
raise err
try:
if 'ANCHORE_CLI_JSON' in settings:
if settings['ANCHORE_CLI_JSON'].lower() == 'y':
ret['jsonmode'] = True
if 'ANCHORE_CLI_DEBUG' in settings:
if settings['ANCHORE_CLI_DEBUG'].lower() == 'y':
ret['debug'] = True
if 'ANCHORE_CONFIG_DIR' in settings:
ret['configdir'] = settings['ANCHORE_CONFIG_DIR']
except Exception as err:
raise err
return(ret)
def format_error_output(config, op, params, payload):
try:
errdata = json.loads(str(payload))
except:
errdata = {'message': str(payload)}
if config['jsonmode']:
ret = json.dumps(errdata, indent=4, sort_keys=True)
return(ret)
obuf = ""
try:
outdict = OrderedDict()
if 'message' in errdata:
outdict['Error'] = str(errdata['message'])
if 'httpcode' in errdata:
outdict['HTTP Code'] = str(errdata['httpcode'])
if 'detail' in errdata and errdata['detail']:
outdict['Detail'] = str(errdata['detail'])
for k in outdict.keys():
obuf = obuf + k + ": " + outdict[k] + "\n"
except Exception as err:
obuf = str(payload)
ret = obuf
return(ret)
def doexit(ecode):
try:
sys.stdout.close()
except:
pass
try:
sys.stderr.close()
except:
pass
sys.exit(ecode)
def make_db_params(db_connect=None, db_use_ssl=False, db_timeout=30, db_connect_timeout=120, db_pool_size=30, db_pool_max_overflow=100):
db_connect_args = {
'timeout': db_timeout,
'ssl': db_use_ssl,
'connect_timeout': db_connect_timeout,
}
ret = {
'db_connect': db_connect,
'db_connect_args': db_connect_args,
'db_pool_size': db_pool_size,
'db_pool_max_overflow': db_pool_max_overflow,
}
return(ret)
def connect_database(config, db_params, db_retries=1):
logger.info("DB params: {}".format(json.dumps(db_params)))
rc = anchore_engine.db.entities.common.do_connect(db_params)
logger.info("DB connection configured: {}".format(str(rc)))
db_connected = False
last_db_connect_err = ""
for i in range(0, int(db_retries)):
logger.info("DB attempting to connect...")
try:
rc = anchore_engine.db.entities.common.test_connection()
logger.info("DB connected: {}".format(str(rc)))
db_connected = True
break
except Exception as err:
last_db_connect_err = str(err)
if db_retries > 1:
logger.warn("DB connection failed, retrying - exception: {}".format(str(last_db_connect_err)))
time.sleep(5)
if not db_connected:
raise Exception("DB connection failed - exception: " + str(last_db_connect_err))
def init_database(upgrade_module=None, localconfig=None, do_db_compatibility_check=False):
code_versions = db_versions = None
if upgrade_module:
try:
if do_db_compatibility_check and "do_db_compatibility_check" in dir(upgrade_module):
logger.info("DB compatibility check: running...")
upgrade_module.do_db_compatibility_check()
logger.info("DB compatibility check success")
else:
logger.info("DB compatibility check: skipping...")
except Exception as err:
raise err
try:
code_versions, db_versions = upgrade_module.get_versions()
if code_versions and not db_versions:
logger.info("DB not initialized: initializing tables...")
upgrade_module.do_create_tables()
upgrade_module.do_db_bootstrap(localconfig=localconfig)
upgrade_module.do_version_update(db_versions, code_versions)
code_versions, db_versions = upgrade_module.get_versions()
except Exception as err:
raise err
try:
if localconfig and "do_db_post_actions" in dir(upgrade_module):
logger.info("DB post actions: running...")
upgrade_module.do_db_post_actions(localconfig=localconfig)
except Exception as err:
raise err
return(code_versions, db_versions)
| 30.589888
| 136
| 0.612489
|
7668594c353183d7d86fa02f82c3baeb777acfb2
| 1,106
|
py
|
Python
|
madtornado/ancient/module/syncMemcached.py
|
SystemLight/madtornado
|
707ee156c33facd7050449f9fccef44f1c302b19
|
[
"MIT"
] | 8
|
2020-04-12T13:53:13.000Z
|
2022-01-11T15:31:31.000Z
|
madtornado/ancient/module/syncMemcached.py
|
SystemLight/Mad_tornado3
|
707ee156c33facd7050449f9fccef44f1c302b19
|
[
"MIT"
] | null | null | null |
madtornado/ancient/module/syncMemcached.py
|
SystemLight/Mad_tornado3
|
707ee156c33facd7050449f9fccef44f1c302b19
|
[
"MIT"
] | null | null | null |
from ..conf import parser
import memcache
import json
option = parser.options("cache")
print("[syncMemcached] is imported.")
class Component:
def __init__(self):
self.memcachedClient = None
self.over_time = option["over_time"]
self.server_list = json.loads(option["server_list"])
def __enter__(self):
self.on()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.off()
def __getitem__(self, item):
return self.memcachedClient.get(item)
def __setitem__(self, key, value):
return self.memcachedClient.set(key, value, self.over_time)
def __delitem__(self, key):
return self.memcachedClient.delete(key)
def on(self) -> None:
self.memcachedClient = memcache.Client(self.server_list, debug=False)
def off(self) -> None:
if self.memcachedClient:
self.memcachedClient.disconnect_all()
self.memcachedClient = None
def spe_set(self, key: str, value: str, over_time: int) -> int:
return self.memcachedClient.set(key, value, over_time)
| 25.136364
| 77
| 0.65642
|
e9859a7024632033cf3b04e15f2e07da44c6d801
| 12,971
|
py
|
Python
|
model.py
|
dastratakos/CS-221-Final-Project
|
18849d547c939c5470bb80fcc76842a7e1d42577
|
[
"Apache-2.0"
] | 1
|
2021-05-07T10:03:46.000Z
|
2021-05-07T10:03:46.000Z
|
model.py
|
dastratakos/Optimized-Task-Scheduling
|
18849d547c939c5470bb80fcc76842a7e1d42577
|
[
"Apache-2.0"
] | null | null | null |
model.py
|
dastratakos/Optimized-Task-Scheduling
|
18849d547c939c5470bb80fcc76842a7e1d42577
|
[
"Apache-2.0"
] | null | null | null |
'''
file: model.py
authors: Kento Perera, Timothy Sah, and Dean Stratakos
date: December 1, 2019
----------
This file contains our implementations of Value Iteration
and Q-learning for our CS 221 Project.
We borrowed code structure from blackjack/submission.py
'''
import util, math, random, csv, timeit
from collections import defaultdict
from util import ValueIteration
from itertools import combinations
'''
class: RacquetsMDP
----------
Defines the MDP for the racquet stringing problem.
'''
class RacquetsMDP(util.MDP):
'''
function: __init__
----------
Constructor for the RacquetsMDP class.
numRacquets: number of racquets that can be string in a day
file: a string that is the name of the CSV data file
numDays: number of days to consider
data: tuple of racquet job request data separated by day
'''
def __init__(self, numRacquets, file, numDays):
self.numRacquets = numRacquets
self.data = self.readFile(file)
self.numDays = min(numDays, len(self.data))
'''
function: readFile
----------
Parses an input CSV file and stores the contents in self.data
file: a string that is the name of the CSV data file
returns data, which is a tuple of lists; day[i] is a list representing
the racquet requests for day i + 1 as a list of tuples; day[i][j] is a
tuple representing the (j + 1)th racquet request on day i + 1
'''
def readFile(self, file):
f = open(file, 'r') # to read the file
fileReader = csv.reader(f)
data = []
day = []
currDate = 0
for lineNum, row in enumerate(fileReader):
daysUntilDue = (1 * (row[2] == 'Exp')) + (3 * (row[2] == 'Std'))
reqType = row[2] # to build request string
if row[1] == 'True': reqType += 'SMT' # to build request string
else: reqType += 'Reg' # to build request string
if lineNum == 0:
continue
elif lineNum == 1:
day.append((reqType, daysUntilDue))
currDate = row[3]
else:
if row[3] == currDate:
day.append((reqType, daysUntilDue))
else:
data.append(day)
day = []
day.append((reqType, daysUntilDue))
currDate = row[3]
data.append(day)
print(data)
return data
'''
function: startState
----------
The start state contains a tuple of the racquets at the start of Day 1
and an integer indicating that the state is at the start of Day 1.
'''
def startState(self):
return ((tuple(self.data[0]), 1))
'''
function: actions
----------
Return a list of lists representing actions possible from |state|.
One action is a list of racquets that represent picking any
self.numRacquets (or fewer) racquets to string for the current day.
'''
def actions(self, state):
if state == (): return [0]
if len(state[0]) < self.numRacquets: # all racquets can be strung for that day
return [state[0]] # return list of all racquets
# otherwise, there are more racquets to string that can be strung for that day
return set(combinations(state[0], self.numRacquets))
'''
function: succAndProbReward
----------
Given a |state| and |action|, returns a list of (newState, prob, reward) tuples
corresponding to the states reachable from |state| when taking |action|.
If |state| is an end state, returns an empty list [].
'''
def succAndProbReward(self, state, action, bounded=True, bound=7):
# end state when we have processed the last day
if state[1] == self.numDays + 1: return []
racquets = list(state[0])
# remove racquets based on the action and compute reward of stringing those racquets
for racquet in action:
racquets.remove(racquet)
# decrement days until due for remaining racquets
for i in range(len(racquets)):
racquet = racquets[i]
racquets[i] = (racquet[0], racquet[1] - 1)
# add new racquets for next day
if state[1] <= len(self.data) - 1:
for racquet in self.data[state[1]]:
# sets upper bound if too many requests built up
if bounded and len(racquets) >= self.numRacquets + bound:
break
racquets.append(racquet)
racquets.sort(key = lambda x: x[0] + str(x[1]))
# compute reward in $
# $20 penalty if racquet will be overdue
# $10 penalty if racquet will be overdue in following day
# if requests are same type, then break the tie by assigning slightly higher reward for stringing the older one
reward = 0
for racquet in action:
if racquet[0] == 'SpdReg':
reward += 40
elif racquet[0] == 'ExpReg':
reward += (30 + (1 - racquet[1]) * .01)
elif racquet[0] == 'StdReg':
reward += (20 + (3 - racquet[1]) * .01)
elif racquet[0] == 'SpdSMT':
reward += 18
elif racquet[0] == 'ExpSMT':
reward += (18 + (1 - racquet[1]) * .01)
elif racquet[0] == 'StdSMT':
reward += (18 + (3 - racquet[1])*.01)
# penalize unstrung racquets if they are overdue today or tomorrow
for racquet in racquets:
if (racquet[1] < 0): reward += (20 * racquet[1])
if (racquet[1] - 1 < 0): reward += (10 * racquet[1] - 1)
return [((tuple(racquets), state[1] + 1), 1, reward)]
'''
function: discount
----------
Sets the discount factor.
'''
def discount(self):
return 1.0
'''
function: identityFeatureExtractor
----------
Returns a single-element list containing a binary (indicator) feature
for the existence of the (racquets, action) pair. Provides no generalization.
'''
def identityFeatureExtractor(state, action):
featureKey = (tuple(state[0]), action)
featureValue = 1
return featureKey, featureValue
'''
class: QLearningAlgorithm
----------
Defines the Q-learning algorithm. More information in util.RLAlgorithm.
'''
class QLearningAlgorithm(util.RLAlgorithm):
'''
function: __init__
----------
Construtor for QLearningAlgorithm.
actions: a function that takes a state and returns a list of actions.
discount: a number between 0 and 1, which determines the discount factor
featureExtractor: a function that takes a state and action and returns a list of (feature name, feature value) pairs.
explorationProb: the epsilon value indicating how frequently the policy
returns a random action
'''
# def __init__(self, actions, discount, featureExtractor=identityFeatureExtractor, explorationProb=0.2):
def __init__(self, actions, discount, featureExtractor=identityFeatureExtractor, explorationProb=0.2):
self.actions = actions
self.discount = discount
self.featureExtractor = featureExtractor
self.explorationProb = explorationProb
self.weights = defaultdict(float)
self.numIters = 0
self.qStarActions = defaultdict(list)
'''
function: getQ
----------
Returns the Q function associated with the weights and features
'''
def getQ(self, state, action):
score = 0.0
f, v = self.featureExtractor(state, action)
score += self.weights[tuple(f)] * v
return score
'''
function: getAction
----------
This algorithm will produce an action given a state.
Here we use the epsilon-greedy algorithm: with probability
|explorationProb|, take a random action.
'''
def getAction(self, state):
self.numIters += 1
if random.random() < self.explorationProb:
return random.choice(list(self.actions(state)))
else:
return max((self.getQ(state, action), action) for action in self.actions(state))[1]
'''
function: getStepSize
----------
Returns the step size to update the weights.
'''
def getStepSize(self):
return 1.0 / math.sqrt(self.numIters)
'''
function: incorporateFeedback
----------
This function is called by util.py with (s, a, r, s'), which is used to update |weights|.
'''
def incorporateFeedback(self, state, action, reward, newState):
target = reward
if newState is not None:
qOpt = [self.getQ(newState, action) for action in self.actions(newState)]
target += self.discount * max(qOpt)
prediction = self.getQ(state, action)
name, value = self.featureExtractor(state, action)
self.weights[name] -= self.getStepSize() * (prediction - target) * value
'''
function: updateExplorationProb
----------
This function is called by util.py with the current trial number and the total trials,
which is used to update the exploration probability (epsilon).
'''
def updateExplorationProb(self, trialNum, totalTrials):
# return # Uncomment this for constant exploration probability
self.explorationProb -= self.explorationProb/(totalTrials) * trialNum # Uncomment this for epsilon-decreasing exploration probability
# self.explorationProb -= self.explorationProb/(totalTrials * 5) * trialNum # Another version of epsilon-decreasing exploration probability
'''
function: testValueIteration
----------
Test function for Value Iteration.
'''
def testValueIteration(mdp):
valueIter = ValueIteration() # implemented in util.py
valueIter.solve(mdp, .001)
states = sorted(valueIter.pi, key=lambda x: len(x)) # sorted by state space
print('valueIter.pi:')
for elem in sorted(valueIter.pi):
print(elem, '\t:\t', valueIter.pi[elem])
return valueIter
'''
function: testQLearning
----------
Test function for Q-Learning.
'''
def testQLearning(mdp, printPolicy=False):
qLearn = QLearningAlgorithm(mdp.actions, mdp.discount())
rewards = util.simulate(mdp, qLearn, 500)
print('-'*30, 'Data collection for bar graphs', '-'*30)
print(' Dataset: data/training_data_TEST2.csv')
print(' Epsilon: Decreasing, 0.2')
print(' Episodes: 500')
print(' ---Average reward: ', sum(rewards)/len(rewards))
print(' ---Max reward (converged value): ', max(rewards))
# for i in range(0,300,25):
# print('Average reward, episodes %d - %d: %d' %(i, i+25, sum(rewards[i:i+25]) / 25))
qLearn.explorationProb = 0
if printPolicy:
print('qLearn.qStarActions:')
for elem in sorted(qLearn.qStarActions):
print(elem, '\t:\t', qLearn.qStarActions[elem])
return qLearn
'''
function: compareResults
----------
Compares the results of Value Iteration and Q-Learning.
'''
def compareResults(valueIter, qLearn):
diff = 0.0
for state in valueIter.pi:
if qLearn.qStarActions[state] != [] and valueIter.pi[state] != qLearn.qStarActions[state][0]:
diff += 1
elif qLearn.qStarActions[state] != [] and valueIter.pi[state] == qLearn.qStarActions[state][0]:
print('Same policy mapping \n\t STATE---', state, '\n\t\t--- to action ---', valueIter.pi[state])
print('Number of different policy instructions: ', diff)
print('Length of pi_valueIter: ', len(valueIter.pi))
print('Length of pi_QStar: ', len(qLearn.qStarActions))
print('Difference over length of pi_valueIter:', diff / len(valueIter.pi))
print('Difference over length of pi_QStar:', diff / len(qLearn.qStarActions))
'''
function: main
----------
Initializes an MDP and runs appropriate algorithms.
'''
def main():
start = timeit.default_timer()
valueIteration = False
qLearning = True
# mdp = RacquetsMDP(4, 'test_data_save.csv', 6)
# mdp = RacquetsMDP(15, 'data/training_data.csv', 10)
# mdp = RacquetsMDP(13, 'data/training_data_small.csv', 6)
# mdp = RacquetsMDP(13, 'data/training_data_big.csv', 6)
# mdp = RacquetsMDP(13, 'data/training_data_2019-12-12_1842.csv', 8)
# mdp = RacquetsMDP(13, 'data/training_data_TEST2.csv', 6)
mdp = RacquetsMDP(13, 'data/training_data_TEST2.csv', 6)
if valueIteration:
valueIter = testValueIteration(mdp)
if qLearning:
qLearn = testQLearning(mdp)
if valueIteration and qLearning:
compareResults(valueIter, qLearn)
stop = timeit.default_timer()
print('\nTime:', stop - start, 'sec')
if __name__ == '__main__':
main()
| 37.488439
| 147
| 0.606507
|
5669ffb60a370e57ca3397e642b47ac397e6e11b
| 2,410
|
py
|
Python
|
layers.py
|
Shinkai125/KerasForTextClassfication
|
ed3d04c5c58d1dfb3f79b83ba704dd486616f0e4
|
[
"MIT"
] | null | null | null |
layers.py
|
Shinkai125/KerasForTextClassfication
|
ed3d04c5c58d1dfb3f79b83ba704dd486616f0e4
|
[
"MIT"
] | null | null | null |
layers.py
|
Shinkai125/KerasForTextClassfication
|
ed3d04c5c58d1dfb3f79b83ba704dd486616f0e4
|
[
"MIT"
] | null | null | null |
"""
@file: layers.py.py
@time: 2020-12-02 19:32:42
"""
from tensorflow.keras.layers import Layer
from tensorflow.keras import initializers, regularizers, constraints
from tensorflow.keras import backend as K
class Attention(Layer):
"""
Custom Keras attention layer
Reference: https://www.kaggle.com/qqgeogor/keras-lstm-attention-glove840b-lb-0-043
"""
def __init__(self, step_dim, W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None, bias=True, **kwargs):
self.supports_masking = True
self.bias = bias
self.step_dim = step_dim
self.features_dim = None
super(Attention, self).__init__(**kwargs)
self.param_W = {
'initializer': initializers.get('glorot_uniform'),
'name': '{}_W'.format(self.name),
'regularizer': regularizers.get(W_regularizer),
'constraint': constraints.get(W_constraint)
}
self.W = None
self.param_b = {
'initializer': 'zero',
'name': '{}_b'.format(self.name),
'regularizer': regularizers.get(b_regularizer),
'constraint': constraints.get(b_constraint)
}
self.b = None
def build(self, input_shape):
assert len(input_shape) == 3
self.features_dim = input_shape[-1]
self.W = self.add_weight(shape=(input_shape[-1],),
**self.param_W)
if self.bias:
self.b = self.add_weight(shape=(input_shape[1],),
**self.param_b)
self.built = True
def compute_mask(self, input, input_mask=None):
return None
def call(self, x, mask=None):
step_dim = self.step_dim
features_dim = self.features_dim
eij = K.reshape(
K.dot(K.reshape(x, (-1, features_dim)), K.reshape(self.W, (features_dim, 1))),
(-1, step_dim))
if self.bias:
eij += self.b
eij = K.tanh(eij)
a = K.exp(eij)
if mask is not None:
a *= K.cast(mask, K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
return input_shape[0], self.features_dim
| 29.753086
| 90
| 0.576763
|
81664dc77f4f6bb2cf9d0b73b35ac744a3dc4481
| 5,610
|
py
|
Python
|
test/Java/JAR.py
|
bdbaddog/scons-gh-migrate
|
c76589c83ec00650a2d07dce79fc6dc5ca6465fb
|
[
"MIT"
] | 1
|
2015-11-04T22:22:10.000Z
|
2015-11-04T22:22:10.000Z
|
test/Java/JAR.py
|
bdbaddog/scons-gh-migrate
|
c76589c83ec00650a2d07dce79fc6dc5ca6465fb
|
[
"MIT"
] | null | null | null |
test/Java/JAR.py
|
bdbaddog/scons-gh-migrate
|
c76589c83ec00650a2d07dce79fc6dc5ca6465fb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.write('myjar.py', r"""
import sys
args = sys.argv[1:]
while args:
a = args[0]
if a == 'cf':
out = args[1]
args = args[1:]
else:
break
args = args[1:]
outfile = open(out, 'wb')
for file in args:
infile = open(file, 'rb')
for l in infile.readlines():
if l[:7] != '/*jar*/':
outfile.write(l)
sys.exit(0)
""")
test.write('SConstruct', """
env = Environment(tools = ['jar'],
JAR = r'%(_python_)s myjar.py')
env.Jar(target = 'test1.jar', source = 'test1.class')
""" % locals())
test.write('test1.class', """\
test1.class
/*jar*/
line 3
""")
test.run(arguments = '.', stderr = None)
test.must_match('test1.jar', "test1.class\nline 3\n")
if os.path.normcase('.class') == os.path.normcase('.CLASS'):
test.write('SConstruct', """
env = Environment(tools = ['jar'],
JAR = r'%(_python_)s myjar.py')
env.Jar(target = 'test2.jar', source = 'test2.CLASS')
""" % locals())
test.write('test2.CLASS', """\
test2.CLASS
/*jar*/
line 3
""")
test.run(arguments = '.', stderr = None)
test.must_match('test2.jar', "test2.CLASS\nline 3\n")
test.write('myjar2.py', r"""
import sys
f=open(sys.argv[2], 'wb')
f.write(" ".join(sys.argv[1:]))
f.write("\n")
f.close()
sys.exit(0)
""")
test.write('SConstruct', """
env = Environment(tools = ['jar'],
JAR = r'%(_python_)s myjar2.py',
JARFLAGS='cvf')
env.Jar(target = 'classes.jar', source = [ 'testdir/bar.class',
'foo.mf' ],
TESTDIR='testdir',
JARCHDIR='$TESTDIR')
""" % locals())
test.subdir('testdir')
test.write([ 'testdir', 'bar.class' ], 'foo')
test.write('foo.mf',
"""Manifest-Version : 1.0
blah
blah
blah
""")
test.run(arguments='classes.jar')
test.must_match('classes.jar',
'cvfm classes.jar foo.mf -C testdir bar.class\n')
where_javac, java_version = test.java_where_javac()
where_jar = test.java_where_jar()
test.write("wrapper.py", """\
import os
import sys
open('%s', 'ab').write("wrapper.py %%s\\n" %% " ".join(sys.argv[1:]))
os.system(" ".join(sys.argv[1:]))
""" % test.workpath('wrapper.out').replace('\\', '\\\\'))
test.write('SConstruct', """
foo = Environment(tools = ['javac', 'jar'],
JAVAC = r'%(where_javac)s',
JAR = r'%(where_jar)s')
jar = foo.Dictionary('JAR')
bar = foo.Clone(JAR = r'%(_python_)s wrapper.py ' + jar)
foo.Java(target = 'classes', source = 'com/sub/foo')
bar.Java(target = 'classes', source = 'com/sub/bar')
foo.Jar(target = 'foo', source = 'classes/com/sub/foo')
bar.Jar(target = 'bar', source = 'classes/com/sub/bar')
""" % locals())
test.subdir('com',
['com', 'sub'],
['com', 'sub', 'foo'],
['com', 'sub', 'bar'])
test.write(['com', 'sub', 'foo', 'Example1.java'], """\
package com.sub.foo;
public class Example1
{
public static void main(String[] args)
{
}
}
""")
test.write(['com', 'sub', 'foo', 'Example2.java'], """\
package com.sub.foo;
public class Example2
{
public static void main(String[] args)
{
}
}
""")
test.write(['com', 'sub', 'foo', 'Example3.java'], """\
package com.sub.foo;
public class Example3
{
public static void main(String[] args)
{
}
}
""")
test.write(['com', 'sub', 'bar', 'Example4.java'], """\
package com.sub.bar;
public class Example4
{
public static void main(String[] args)
{
}
}
""")
test.write(['com', 'sub', 'bar', 'Example5.java'], """\
package com.sub.bar;
public class Example5
{
public static void main(String[] args)
{
}
}
""")
test.write(['com', 'sub', 'bar', 'Example6.java'], """\
package com.sub.bar;
public class Example6
{
public static void main(String[] args)
{
}
}
""")
test.run(arguments = '.')
expected_wrapper_out = "wrapper.py %(where_jar)s cf bar.jar classes/com/sub/bar\n"
expected_wrapper_out = expected_wrapper_out.replace('/', os.sep)
test.must_match('wrapper.out',
expected_wrapper_out % locals())
test.must_exist('foo.jar')
test.must_exist('bar.jar')
test.up_to_date(arguments = '.')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 22.086614
| 82
| 0.612834
|
2573e05a44a918e37ca1ae9fbe409ed7f18a2379
| 9,217
|
py
|
Python
|
app.py
|
jmewes/epages-beyond-payment-app
|
d610483fd016c38444bc634e4535c59a2f99f7f3
|
[
"MIT"
] | null | null | null |
app.py
|
jmewes/epages-beyond-payment-app
|
d610483fd016c38444bc634e4535c59a2f99f7f3
|
[
"MIT"
] | 3
|
2022-03-07T17:37:31.000Z
|
2022-03-08T08:39:38.000Z
|
app.py
|
jmewes/epages-beyond-payment-app
|
d610483fd016c38444bc634e4535c59a2f99f7f3
|
[
"MIT"
] | 1
|
2018-12-13T09:29:59.000Z
|
2018-12-13T09:29:59.000Z
|
# -*- coding: utf-8 -*-
'''
Author: Oliver Zscheyge
Description:
Web app that generates beautiful order documents for ePages Beyond shops.
'''
import os
import logging
import random
from urllib.parse import urlencode, urlparse, unquote
from flask import Flask, render_template, request, Response, abort, escape, jsonify
from app_installations import AppInstallations, PostgresAppInstallations
from shops import Shop, PostgresShops, get_shop_id
from payment_method_definitions import create_payment_method
import payments
from signers import sign
app = Flask(__name__)
APP_INSTALLATIONS = None
SHOPS = None
CLIENT_SECRET = ''
DEFAULT_HOSTNAME = ''
LOGGER = logging.getLogger('app')
AUTO_INSTALLED_PAYMENT_METHOD_DEFINITIONS = [
'beautiful-test-payment-embedded',
'beautiful-test-payment-embedded-selection',
'beautiful-test-payment-capture-on-demand'
]
@app.route('/')
def root():
if DEFAULT_HOSTNAME != '':
return render_template('index.html', installed=True, hostname=DEFAULT_HOSTNAME)
return render_template('index.html', installed=False)
@app.route('/<hostname>')
def root_hostname(hostname):
return render_template('index.html', installed=True, hostname=hostname)
@app.route('/callback')
def callback():
args = request.args
return_url = args.get('return_url')
access_token_url = args.get('access_token_url')
api_url = args.get('api_url')
code = args.get('code')
signature = unquote(args.get('signature'))
APP_INSTALLATIONS.retrieve_token_from_auth_code(api_url, code, access_token_url, signature)
hostname = urlparse(api_url).hostname
installation = get_installation(hostname)
created_payment_methods = _auto_create_payment_methods(installation)
_get_and_store_shop_id(installation)
return render_template('callback_result.html',
return_url=return_url,
created_payment_methods=created_payment_methods)
def _auto_create_payment_methods(installation):
created_payment_methods = []
for pmd_name in AUTO_INSTALLED_PAYMENT_METHOD_DEFINITIONS:
status = create_payment_method(installation, pmd_name)
created_payment_methods.append({
'status_code': status,
'payment_method_definition_name': pmd_name
})
print('Created payment method for %s in shop %s with status %i' % (pmd_name, installation.hostname, status))
return created_payment_methods
def _get_and_store_shop_id(installation):
shop_id = get_shop_id(installation)
shop = Shop(shop_id, installation.hostname)
SHOPS.create_or_update_shop(shop)
@app.route('/merchants/<shop_id>')
def merchant_account_status(shop_id):
print('Serving always ready merchant account status')
return jsonify({
'ready' : True,
'details' : {
'primaryEmail' : 'example@b.c'
}
})
@app.route('/payments', methods=['POST'])
def create_payment():
print('Creating payment with paymentNote')
return jsonify({
'paymentNote': 'Please transfer the money using the reference %s to the account %s' % (generate_id(), generate_id()),
})
@app.route('/embedded-payments', methods=['POST'])
def create_embedded_payment():
app_hostname = urlparse(request.url_root).hostname
payload = request.get_json(force=True)
shop = payload.get('shop', {}).get('name', '')
shop_id = payload.get('shopId', '')
payment_id = payload.get('paymentId', '')
signature = sign('%s:%s' % (shop_id, payment_id), CLIENT_SECRET)
params = {
'paymentId': payment_id,
'signature': signature,
'shop': shop,
'shopId': shop_id
}
embeddedApprovalUri = 'https://%s/embedded-payment-approval?%s' % (app_hostname, urlencode(params))
print('Created embedded payment for shop %s' % shop)
return jsonify({
'embeddedApprovalUri': embeddedApprovalUri
})
@app.route('/embedded-payment-approval')
def embedded_payment_approval():
args = request.args
payment_id = unquote(args.get('paymentId', ''))
signature = unquote(args.get('signature', ''))
shop = unquote(args.get('shop', ''))
shop_id = unquote(args.get('shopId', ''))
approve_uri = '/payments/%s/approve' % payment_id
cancel_uri = '/payments/%s/cancel' % payment_id
if _validate_signature(signature, shop_id, payment_id):
return render_template('embedded_payment_approval.html',
state='PENDING',
signature=signature,
shop=shop,
shop_id=shop_id,
approve_uri=approve_uri,
cancel_uri=cancel_uri)
return render_template('embedded_payment_approval.html',
state='ERROR')
def _validate_signature(signature_to_validate, shop_id, payment_id):
expected_signature = sign('%s:%s' % (shop_id, payment_id), CLIENT_SECRET)
match = signature_to_validate == expected_signature
print('Validated signatures: %s | %s | match: %s' % (signature_to_validate, expected_signature, str(match)))
return match
@app.route('/payments/<payment_id>/approve', methods=['POST'])
def approve_payment(payment_id):
''' Currently only needed for embedded payments
'''
print('Approving payment %s' % payment_id)
shop_id = request.form.get('shop_id', '')
signature = request.form.get('signature', '')
shop = SHOPS.get_shop(shop_id)
installation = get_installation(shop.hostname)
if _validate_signature(signature, shop_id, payment_id):
return_uri = payments.approve_payment(installation, payment_id)
return render_template('embedded_payment_approval.html',
state='APPROVED',
return_uri=return_uri)
return render_template('embedded_payment_approval.html',
state='ERROR')
@app.route('/payments/<payment_id>/cancel', methods=['POST'])
def cancel_payment(payment_id):
''' Currently only needed for embedded payments
'''
print('Canceling payment %s' % payment_id)
shop_id = request.form.get('shop_id', '')
signature = request.form.get('signature', '')
shop = SHOPS.get_shop(shop_id)
installation = get_installation(shop.hostname)
if _validate_signature(signature, shop_id, payment_id):
return_uri = payments.cancel_payment(installation, payment_id)
return render_template('embedded_payment_approval.html',
state='CANCELED',
return_uri=return_uri)
return render_template('embedded_payment_approval.html',
state='ERROR')
@app.route('/payments/<payment_id>/capture', methods=['POST'])
def capture_payment(payment_id):
print('Capturing payment %s' % payment_id)
return jsonify({
'paymentStatus' : 'CAPTURED',
})
def generate_id():
return ''.join(random.choice('ABCDEFGHJKLMNPQRSTUVWXYZ23456789') for _ in range(8))
@app.before_request
def limit_open_proxy_requests():
'''Security measure to prevent:
http://serverfault.com/questions/530867/baidu-in-nginx-access-log
http://security.stackexchange.com/questions/41078/url-from-another-domain-in-my-access-log
http://serverfault.com/questions/115827/why-does-apache-log-requests-to-get-http-www-google-com-with-code-200
http://stackoverflow.com/questions/22251038/how-to-limit-flask-dev-server-to-only-one-visiting-ip-address
'''
if not is_allowed_request():
print('Someone is messing with us:')
print(request.url_root)
print(request)
abort(403)
def is_allowed_request():
url = request.url_root
return '.herokuapp.com' in url or \
'.ngrok.io' in url or \
'localhost:8080' in url or \
'127.0.0' in url or \
'0.0.0.0:80' in url
def get_installation(hostname):
installation = APP_INSTALLATIONS.get_installation(hostname)
if not installation:
raise ShopNotKnown(hostname)
return installation
@app.errorhandler(404)
def page_not_found(e):
return '<h1>404 File Not Found! :(</h1>', 404
class ShopNotKnown(Exception):
def __init__(self, hostname):
super()
self.hostname = hostname
@app.errorhandler(ShopNotKnown)
def shop_not_known(e):
return render_template('index.html', installed=False, error_message='App not installed for the requested shop with hostname %s' % e.hostname)
@app.errorhandler(Exception)
def all_exception_handler(error):
LOGGER.exception(error)
return 'Error', 500
def init():
global APP_INSTALLATIONS
global SHOPS
global DEFAULT_HOSTNAME
global CLIENT_SECRET
CLIENT_ID = os.environ.get('CLIENT_ID', '')
CLIENT_SECRET = os.environ.get('CLIENT_SECRET', '')
print('Initialize PostgresAppInstallations')
APP_INSTALLATIONS = PostgresAppInstallations(os.environ.get('DATABASE_URL'), CLIENT_ID, CLIENT_SECRET)
APP_INSTALLATIONS.create_schema()
print('Initialize PostgresShops')
SHOPS = PostgresShops(os.environ.get('DATABASE_URL'))
SHOPS.create_schema()
init()
if __name__ == '__main__':
app.run()
| 34.781132
| 145
| 0.683303
|
afa9aade0f670cafe2342de0687a2bf1e4f85a79
| 486
|
py
|
Python
|
src/paper/migrations/0077_paper_edited_file_extract.py
|
ResearchHub/ResearchHub-Backend-Open
|
d36dca33afae2d442690694bb2ab17180d84bcd3
|
[
"MIT"
] | 18
|
2021-05-20T13:20:16.000Z
|
2022-02-11T02:40:18.000Z
|
src/paper/migrations/0077_paper_edited_file_extract.py
|
ResearchHub/ResearchHub-Backend-Open
|
d36dca33afae2d442690694bb2ab17180d84bcd3
|
[
"MIT"
] | 109
|
2021-05-21T20:14:23.000Z
|
2022-03-31T20:56:10.000Z
|
src/paper/migrations/0077_paper_edited_file_extract.py
|
ResearchHub/ResearchHub-Backend-Open
|
d36dca33afae2d442690694bb2ab17180d84bcd3
|
[
"MIT"
] | 4
|
2021-05-17T13:47:53.000Z
|
2022-02-12T10:48:21.000Z
|
# Generated by Django 2.2 on 2021-03-03 00:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('paper', '0076_paper_pdf_file_extract'),
]
operations = [
migrations.AddField(
model_name='paper',
name='edited_file_extract',
field=models.FileField(blank=True, default=None, max_length=512, null=True, upload_to='uploads/papers/%Y/%m/%d/edited_extract'),
),
]
| 25.578947
| 140
| 0.63786
|
4df318add8e40fb947867a3d0f312a6646e69e57
| 3,464
|
py
|
Python
|
couchdb/database.py
|
mqzry/pycouchdb
|
1592b1b501e6e2dea3009a83e9c937da70b49af8
|
[
"MIT"
] | null | null | null |
couchdb/database.py
|
mqzry/pycouchdb
|
1592b1b501e6e2dea3009a83e9c937da70b49af8
|
[
"MIT"
] | 10
|
2016-08-02T17:24:05.000Z
|
2016-09-06T17:06:06.000Z
|
couchdb/database.py
|
mqzry/pycouchdb
|
1592b1b501e6e2dea3009a83e9c937da70b49af8
|
[
"MIT"
] | null | null | null |
from .document import Document
from requests import codes
import logging
class Database:
def __init__(self, server, name):
self.name = name
self.server = server
self.session = server.session
self.session.add_part_to_prefix(name)
def exists(self):
return self.head()
def head(self):
"""Check existence of a database.
:param self.name: name of database.
:return: whether this database exists.
:rtype: boolean
"""
r = self.session.head()
return r.status_code == codes.ok
def get(self):
r = self.session.get()
if r.status_code == codes.ok:
return r.json()
else:
return None
def put(self):
r = self.session.put()
if r.status_code == codes.created:
return True
else:
info = r.json()
logging.info('Tried to create {0} but {1} happend because {2}'
.format(self.name, info['error'], info['reason']))
return False
def delete(self):
r = self.session.delete(self.name)
if r.status_code == codes.ok:
return True
elif r.status_code == codes.bad_request:
logging.info('Failed attempt to delete database {0}. The request url {1} is not valid.'.format(self.name, r.url)
+ 'Probably a invalid database name or forgotten document id by accident.')
elif r.status_code == codes.not_found:
logging.info('Failed attempt to delete database {0}. It does not exist.'.format(self.name))
elif r.status_code == codes.unauthorized:
logging.info('Failed attempt to delete database {0}. CouchDB Server Administrator privileges required.'.format(self.name))
return False
def post(self, doc, full_commit=None, batch=None):
query_params = {}
if batch is not None:
query_params['batch'] = batch
request_headers = {}
if full_commit is not None:
request_headers['X-Couch-Full-Commit'] = full_commit
r = self.session.post(json=doc, params=query_params, headers=request_headers)
body = r.json()
if r.status_code == codes.ok:
return Document(self, body['id'], body['rev'])
elif r.status_code == codes.created:
return Document(self, body['id'], body['rev'])
elif r.status_code == codes.bad_request:
return False
elif r.status_code == codes.unauthorized:
return False
elif r.status_code == codes.not_found:
return False
elif r.status_code == codes.conflict:
return False
def query(self, method, params=None):
response = self.session.get(self.db_url + method, params=params,
auth=(self.user, self.password))
return response.url
def put_bulk(self, docs):
response = self.session.post(self.db_url + '_bulk_docs',
json={'docs': docs},
auth=(self.user, self.password))
return response.json()
def get_bulk(self, ids):
str_ids = [str for id in ids]
response = self.session.post(self.db_url + '_all_docs',
json={'keys': str_ids},
auth=(self.user, self.password))
return response.json()
| 35.346939
| 134
| 0.568129
|
6556abfa0393649f9a79992dffd48f6c9c0ed3bb
| 28,513
|
py
|
Python
|
tests/dedupe_test.py
|
tvtongerloo/django-DefectDojo
|
77f241afd7773a95a73621fcc60971263885337a
|
[
"BSD-3-Clause"
] | 1
|
2021-01-19T17:25:57.000Z
|
2021-01-19T17:25:57.000Z
|
tests/dedupe_test.py
|
tvtongerloo/django-DefectDojo
|
77f241afd7773a95a73621fcc60971263885337a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/dedupe_test.py
|
tvtongerloo/django-DefectDojo
|
77f241afd7773a95a73621fcc60971263885337a
|
[
"BSD-3-Clause"
] | null | null | null |
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import unittest
import sys
import os
from base_test_class import BaseTestCase, on_exception_html_source_logger
from product_test import ProductTest
import time
import logging
logger = logging.getLogger(__name__)
dir_path = os.path.dirname(os.path.realpath(__file__))
class DedupeTest(BaseTestCase):
# --------------------------------------------------------------------------------------------------------
# Initialization
# --------------------------------------------------------------------------------------------------------
def setUp(self):
super().setUp()
self.relative_path = dir_path = os.path.dirname(os.path.realpath(__file__))
def check_nb_duplicates(self, expected_number_of_duplicates):
logger.debug("checking duplicates...")
driver = self.driver
retries = 0
for i in range(0, 18):
time.sleep(5) # wait bit for celery dedupe task which can be slow on travis
self.goto_all_findings_list(driver)
dupe_count = 0
# iterate over the rows of the findings table and concatenates all columns into td.text
trs = driver.find_elements_by_xpath('//*[@id="open_findings"]/tbody/tr')
for row in trs:
concatRow = ' '.join([td.text for td in row.find_elements_by_xpath(".//td")])
# print(concatRow)
if '(DUPE)' and 'Duplicate' in concatRow:
dupe_count += 1
if (dupe_count != expected_number_of_duplicates):
logger.debug("duplicate count mismatch, let's wait a bit for the celery dedupe task to finish and try again (5s)")
else:
break
if (dupe_count != expected_number_of_duplicates):
findings_table = driver.find_element_by_id('open_findings')
print(findings_table.get_attribute('innerHTML'))
self.assertEqual(dupe_count, expected_number_of_duplicates)
@on_exception_html_source_logger
def test_enable_deduplication(self):
logger.debug("enabling deduplication...")
driver = self.driver
driver.get(self.base_url + 'system_settings')
if not driver.find_element_by_id('id_enable_deduplication').is_selected():
driver.find_element_by_xpath('//*[@id="id_enable_deduplication"]').click()
# save settings
driver.find_element_by_css_selector("input.btn.btn-primary").click()
# check if it's enabled after reload
driver.get(self.base_url + 'system_settings')
self.assertTrue(driver.find_element_by_id('id_enable_deduplication').is_selected())
@on_exception_html_source_logger
def test_delete_findings(self):
logger.debug("removing previous findings...")
driver = self.driver
driver.get(self.base_url + "finding?page=1")
if self.element_exists_by_id("no_findings"):
text = driver.find_element_by_id("no_findings").text
if 'No findings found.' in text:
return
driver.find_element_by_id("select_all").click()
driver.find_element_by_css_selector("i.fa.fa-trash").click()
try:
WebDriverWait(driver, 1).until(EC.alert_is_present(),
'Timed out waiting for PA creation ' +
'confirmation popup to appear.')
driver.switch_to.alert.accept()
except TimeoutException:
self.fail('Confirmation dialogue not shown, cannot delete previous findings')
text = None
if self.element_exists_by_id("no_findings"):
text = driver.find_element_by_id("no_findings").text
self.assertTrue('No findings found.' in text)
# check that user was redirect back to url where it came from based on return_url
self.assertTrue(driver.current_url.endswith('page=1'))
# --------------------------------------------------------------------------------------------------------
# Same scanner deduplication - Deduplication on engagement
# Test deduplication for Bandit SAST scanner
# --------------------------------------------------------------------------------------------------------
@on_exception_html_source_logger # noqa: E301
def test_add_path_test_suite(self):
logger.debug("Same scanner deduplication - Deduplication on engagement - static. Creating tests...")
# Create engagement
driver = self.driver
self.goto_product_overview(driver)
driver.find_element_by_class_name("pull-left").click()
driver.find_element_by_link_text("Add New Engagement").click()
driver.find_element_by_id("id_name").send_keys("Dedupe Path Test")
driver.find_element_by_xpath('//*[@id="id_deduplication_on_engagement"]').click()
driver.find_element_by_name("_Add Tests").click()
self.assertTrue(self.is_success_message_present(text='Engagement added successfully.'))
# Add the tests
# Test 1
driver.find_element_by_id("id_title").send_keys("Path Test 1")
Select(driver.find_element_by_id("id_test_type")).select_by_visible_text("Bandit Scan")
Select(driver.find_element_by_id("id_environment")).select_by_visible_text("Development")
driver.find_element_by_name("_Add Another Test").click()
self.assertTrue(self.is_success_message_present(text='Test added successfully'))
# Test 2
driver.find_element_by_id("id_title").send_keys("Path Test 2")
Select(driver.find_element_by_id("id_test_type")).select_by_visible_text("Bandit Scan")
Select(driver.find_element_by_id("id_environment")).select_by_visible_text("Development")
driver.find_element_by_css_selector("input.btn.btn-primary").click()
self.assertTrue(self.is_success_message_present(text='Test added successfully'))
@on_exception_html_source_logger
def test_import_path_tests(self):
logger.debug("importing reports...")
# First test
driver = self.driver
self.goto_active_engagements_overview(driver)
driver.find_element_by_partial_link_text("Dedupe Path Test").click()
driver.find_element_by_partial_link_text("Path Test 1").click()
driver.find_element_by_id("dropdownMenu1").click()
driver.find_element_by_link_text("Re-Upload Scan").click()
# active and verified:
driver.find_element_by_xpath('//*[@id="base-content"]/form/div[3]/div/div').click()
driver.find_element_by_xpath('//*[@id="base-content"]/form/div[4]/div/div').click()
driver.find_element_by_id('id_file').send_keys(self.relative_path + "/dedupe_scans/dedupe_path_1.json")
driver.find_elements_by_css_selector("button.btn.btn-primary")[1].click()
self.assertTrue(self.is_success_message_present(text='a total of 3 findings were processed'))
# Second test
self.goto_active_engagements_overview(driver)
driver.find_element_by_partial_link_text("Dedupe Path Test").click()
driver.find_element_by_partial_link_text("Path Test 2").click()
driver.find_element_by_id("dropdownMenu1").click()
driver.find_element_by_link_text("Re-Upload Scan").click()
driver.find_element_by_xpath('//*[@id="base-content"]/form/div[3]/div/div').click()
driver.find_element_by_xpath('//*[@id="base-content"]/form/div[4]/div/div').click()
driver.find_element_by_id('id_file').send_keys(self.relative_path + "/dedupe_scans/dedupe_path_2.json")
driver.find_elements_by_css_selector("button.btn.btn-primary")[1].click()
self.assertTrue(self.is_success_message_present(text='a total of 3 findings were processed'))
@on_exception_html_source_logger
def test_check_path_status(self):
# comparing tests/dedupe_scans/dedupe_path_1.json and tests/dedupe_scans/dedupe_path_2.json
# Counts the findings that have on the same line "(DUPE)" (in the title) and "Duplicate" (marked as duplicate by DD)
# We have imported 3 findings twice, but one only is a duplicate because for the 2 others, we have changed either the line number or the file_path
self.check_nb_duplicates(1)
# --------------------------------------------------------------------------------------------------------
# Same scanner deduplication - Deduplication on engagement
# Test deduplication for Immuniweb dynamic scanner
# --------------------------------------------------------------------------------------------------------
@on_exception_html_source_logger
def test_add_endpoint_test_suite(self):
logger.debug("Same scanner deduplication - Deduplication on engagement - dynamic. Creating tests...")
# Create engagement
driver = self.driver
self.goto_product_overview(driver)
driver.find_element_by_class_name("pull-left").click()
driver.find_element_by_link_text("Add New Engagement").click()
driver.find_element_by_id("id_name").send_keys("Dedupe Endpoint Test")
driver.find_element_by_xpath('//*[@id="id_deduplication_on_engagement"]').click()
driver.find_element_by_name("_Add Tests").click()
self.assertTrue(self.is_success_message_present(text='Engagement added successfully.'))
# Add the tests
# Test 1
driver.find_element_by_id("id_title").send_keys("Endpoint Test 1")
Select(driver.find_element_by_id("id_test_type")).select_by_visible_text("Immuniweb Scan")
Select(driver.find_element_by_id("id_environment")).select_by_visible_text("Development")
driver.find_element_by_name("_Add Another Test").click()
self.assertTrue(self.is_success_message_present(text='Test added successfully'))
# Test 2
driver.find_element_by_id("id_title").send_keys("Endpoint Test 2")
Select(driver.find_element_by_id("id_test_type")).select_by_visible_text("Immuniweb Scan")
Select(driver.find_element_by_id("id_environment")).select_by_visible_text("Development")
driver.find_element_by_css_selector("input.btn.btn-primary").click()
self.assertTrue(self.is_success_message_present(text='Test added successfully'))
@on_exception_html_source_logger
def test_import_endpoint_tests(self):
logger.debug("Importing reports...")
# First test : Immuniweb Scan (dynamic)
driver = self.driver
self.goto_active_engagements_overview(driver)
driver.find_element_by_partial_link_text("Dedupe Endpoint Test").click()
driver.find_element_by_partial_link_text("Endpoint Test 1").click()
driver.find_element_by_id("dropdownMenu1").click()
driver.find_element_by_link_text("Re-Upload Scan").click()
# active and verified
driver.find_element_by_xpath('//*[@id="base-content"]/form/div[3]/div/div').click()
driver.find_element_by_xpath('//*[@id="base-content"]/form/div[4]/div/div').click()
driver.find_element_by_id('id_file').send_keys(self.relative_path + "/dedupe_scans/dedupe_endpoint_1.xml")
driver.find_elements_by_css_selector("button.btn.btn-primary")[1].click()
self.assertTrue(self.is_success_message_present(text='a total of 3 findings were processed'))
# Second test : Immuniweb Scan (dynamic)
self.goto_active_engagements_overview(driver)
driver.find_element_by_partial_link_text("Dedupe Endpoint Test").click()
driver.find_element_by_partial_link_text("Endpoint Test 2").click()
driver.find_element_by_id("dropdownMenu1").click()
driver.find_element_by_link_text("Re-Upload Scan").click()
# active and verified
driver.find_element_by_xpath('//*[@id="base-content"]/form/div[3]/div/div').click()
driver.find_element_by_xpath('//*[@id="base-content"]/form/div[4]/div/div').click()
driver.find_element_by_id('id_file').send_keys(self.relative_path + "/dedupe_scans/dedupe_endpoint_2.xml")
driver.find_elements_by_css_selector("button.btn.btn-primary")[1].click()
self.assertTrue(self.is_success_message_present(text='a total of 3 findings were processed'))
@on_exception_html_source_logger
def test_check_endpoint_status(self):
# comparing dedupe_endpoint_1.xml and dedupe_endpoint_2.xml
# Counts the findings that have on the same line "(DUPE)" (in the title) and "Duplicate" (marked as duplicate by DD)
# We have imported 3 findings twice, but one only is a duplicate because for the 2 others, we have changed either (the URL) or (the name and cwe)
self.check_nb_duplicates(1)
@on_exception_html_source_logger
def test_add_same_eng_test_suite(self):
logger.debug("Test different scanners - same engagement - dynamic; Adding tests on the same engagement...")
# Create engagement
driver = self.driver
self.goto_product_overview(driver)
driver.find_element_by_class_name("pull-left").click()
driver.find_element_by_link_text("Add New Engagement").click()
driver.find_element_by_id("id_name").send_keys("Dedupe Same Eng Test")
driver.find_element_by_xpath('//*[@id="id_deduplication_on_engagement"]').click()
driver.find_element_by_name("_Add Tests").click()
self.assertTrue(self.is_success_message_present(text='Engagement added successfully.'))
# Add the tests
# Test 1
driver.find_element_by_id("id_title").send_keys("Same Eng Test 1")
Select(driver.find_element_by_id("id_test_type")).select_by_visible_text("Immuniweb Scan")
Select(driver.find_element_by_id("id_environment")).select_by_visible_text("Development")
driver.find_element_by_name("_Add Another Test").click()
self.assertTrue(self.is_success_message_present(text='Test added successfully'))
# Test 2
driver.find_element_by_id("id_title").send_keys("Same Eng Test 2")
Select(driver.find_element_by_id("id_test_type")).select_by_visible_text("Generic Findings Import")
Select(driver.find_element_by_id("id_environment")).select_by_visible_text("Development")
driver.find_element_by_css_selector("input.btn.btn-primary").click()
self.assertTrue(self.is_success_message_present(text='Test added successfully'))
@on_exception_html_source_logger
def test_import_same_eng_tests(self):
logger.debug("Importing reports")
# First test : Immuniweb Scan (dynamic)
driver = self.driver
self.goto_active_engagements_overview(driver)
driver.find_element_by_partial_link_text("Dedupe Same Eng Test").click()
driver.find_element_by_partial_link_text("Same Eng Test 1").click()
driver.find_element_by_id("dropdownMenu1").click()
driver.find_element_by_link_text("Re-Upload Scan").click()
driver.find_element_by_xpath('//*[@id="base-content"]/form/div[3]/div/div').click()
driver.find_element_by_xpath('//*[@id="base-content"]/form/div[4]/div/div').click()
driver.find_element_by_id('id_file').send_keys(self.relative_path + "/dedupe_scans/dedupe_endpoint_1.xml")
driver.find_elements_by_css_selector("button.btn.btn-primary")[1].click()
self.assertTrue(self.is_success_message_present(text='a total of 3 findings were processed'))
# Second test : Generic Findings Import with Url (dynamic)
self.goto_active_engagements_overview(driver)
driver.find_element_by_partial_link_text("Dedupe Same Eng Test").click()
driver.find_element_by_partial_link_text("Same Eng Test 2").click()
driver.find_element_by_id("dropdownMenu1").click()
driver.find_element_by_link_text("Re-Upload Scan").click()
driver.find_element_by_xpath('//*[@id="base-content"]/form/div[3]/div/div').click()
driver.find_element_by_xpath('//*[@id="base-content"]/form/div[4]/div/div').click()
driver.find_element_by_id('id_file').send_keys(self.relative_path + "/dedupe_scans/dedupe_cross_1.csv")
driver.find_elements_by_css_selector("button.btn.btn-primary")[1].click()
self.assertTrue(self.is_success_message_present(text='a total of 3 findings were processed'))
@on_exception_html_source_logger
def test_check_same_eng_status(self):
# comparing dedupe_endpoint_1.xml and dedupe_endpoint_2.xml
# Counts the findings that have on the same line "(DUPE)" (in the title) and "Duplicate" (marked as duplicate by DD)
# We have imported 3 findings twice, but one only is a duplicate because for the 2 others, we have changed either (the URL) or (the name and cwe)
self.check_nb_duplicates(1)
# --------------------------------------------------------------------------------------------------------
# Same scanner deduplication - Deduplication on engagement
# Test deduplication for Checkmarx SAST Scan with custom hash_code computation
# Upon import, Checkmarx Scan aggregates on : categories, cwe, name, sinkFilename
# That test shows that the custom hash_code (excluding line number, see settings.py)
# makes it possible to detect the duplicate even if the line number has changed (which will occur in a normal software lifecycle)
# --------------------------------------------------------------------------------------------------------
def test_add_path_test_suite_checkmarx_scan(self):
logger.debug("Same scanner deduplication - Deduplication on engagement. Test dedupe on checkmarx aggregated with custom hash_code computation")
# Create engagement
driver = self.driver
self.goto_product_overview(driver)
driver.find_element_by_class_name("pull-left").click()
driver.find_element_by_link_text("Add New Engagement").click()
driver.find_element_by_id("id_name").send_keys("Dedupe on hash_code only")
driver.find_element_by_xpath('//*[@id="id_deduplication_on_engagement"]').click()
driver.find_element_by_name("_Add Tests").click()
self.assertTrue(self.is_success_message_present(text='Engagement added successfully.'))
# Add the tests
# Test 1
driver.find_element_by_id("id_title").send_keys("Path Test 1")
Select(driver.find_element_by_id("id_test_type")).select_by_visible_text("Checkmarx Scan")
Select(driver.find_element_by_id("id_environment")).select_by_visible_text("Development")
driver.find_element_by_name("_Add Another Test").click()
self.assertTrue(self.is_success_message_present(text='Test added successfully'))
# Test 2
driver.find_element_by_id("id_title").send_keys("Path Test 2")
Select(driver.find_element_by_id("id_test_type")).select_by_visible_text("Checkmarx Scan")
Select(driver.find_element_by_id("id_environment")).select_by_visible_text("Development")
driver.find_element_by_css_selector("input.btn.btn-primary").click()
self.assertTrue(self.is_success_message_present(text='Test added successfully'))
def test_import_path_tests_checkmarx_scan(self):
# First test
driver = self.driver
self.goto_active_engagements_overview(driver)
driver.find_element_by_partial_link_text("Dedupe on hash_code only").click()
driver.find_element_by_partial_link_text("Path Test 1").click()
driver.find_element_by_id("dropdownMenu1").click()
driver.find_element_by_link_text("Re-Upload Scan").click()
driver.find_element_by_xpath('//*[@id="base-content"]/form/div[3]/div/div').click()
driver.find_element_by_xpath('//*[@id="base-content"]/form/div[4]/div/div').click()
# os.path.realpath makes the path canonical
driver.find_element_by_id('id_file').send_keys(os.path.realpath(self.relative_path + "/dedupe_scans/multiple_findings.xml"))
driver.find_elements_by_css_selector("button.btn.btn-primary")[1].click()
self.assertTrue(self.is_success_message_present(text='a total of 2 findings were processed'))
# Second test
self.goto_active_engagements_overview(driver)
driver.find_element_by_partial_link_text("Dedupe on hash_code only").click()
driver.find_element_by_partial_link_text("Path Test 2").click()
driver.find_element_by_id("dropdownMenu1").click()
driver.find_element_by_link_text("Re-Upload Scan").click()
driver.find_element_by_xpath('//*[@id="base-content"]/form/div[3]/div/div').click()
driver.find_element_by_xpath('//*[@id="base-content"]/form/div[4]/div/div').click()
driver.find_element_by_id('id_file').send_keys(os.path.realpath(self.relative_path + "/dedupe_scans/multiple_findings_line_changed.xml"))
driver.find_elements_by_css_selector("button.btn.btn-primary")[1].click()
self.assertTrue(self.is_success_message_present(text='a total of 2 findings were processed'))
def test_check_path_status_checkmarx_scan(self):
# After aggregation, it's only two findings. Both are duplicates even though the line number has changed
# because we ignore the line number when computing the hash_code for this scanner
# (so that findings keep being found as duplicate even if the code changes slightly)
self.check_nb_duplicates(2)
# --------------------------------------------------------------------------------------------------------
# Cross scanners deduplication - product-wide deduplication
# Test deduplication for Generic Findings Import with URL (dynamic) vs Immuniweb dynamic scanner
# --------------------------------------------------------------------------------------------------------
def test_add_cross_test_suite(self):
logger.debug("Cross scanners deduplication dynamic; generic finding vs immuniweb. Creating tests...")
# Create generic engagement
driver = self.driver
self.goto_product_overview(driver)
driver.find_element_by_class_name("pull-left").click()
driver.find_element_by_link_text("Add New Engagement").click()
driver.find_element_by_id("id_name").send_keys("Dedupe Generic Test")
# driver.find_element_by_xpath('//*[@id="id_deduplication_on_engagement"]').click()
driver.find_element_by_name("_Add Tests").click()
self.assertTrue(self.is_success_message_present(text='Engagement added successfully.'))
# Test
driver.find_element_by_id("id_title").send_keys("Generic Test")
Select(driver.find_element_by_id("id_test_type")).select_by_visible_text("Generic Findings Import")
Select(driver.find_element_by_id("id_environment")).select_by_visible_text("Development")
driver.find_element_by_css_selector("input.btn.btn-primary").click()
self.assertTrue(self.is_success_message_present(text='Test added successfully'))
# Create immuniweb engagement
self.goto_product_overview(driver)
driver.find_element_by_class_name("pull-left").click()
driver.find_element_by_link_text("Add New Engagement").click()
driver.find_element_by_id("id_name").send_keys("Dedupe Immuniweb Test")
# driver.find_element_by_xpath('//*[@id="id_deduplication_on_engagement"]').click()
driver.find_element_by_name("_Add Tests").click()
self.assertTrue(self.is_success_message_present(text='Engagement added successfully.'))
# Test
driver.find_element_by_id("id_title").send_keys("Immuniweb Test")
Select(driver.find_element_by_id("id_test_type")).select_by_visible_text("Immuniweb Scan")
Select(driver.find_element_by_id("id_environment")).select_by_visible_text("Development")
driver.find_element_by_css_selector("input.btn.btn-primary").click()
self.assertTrue(self.is_success_message_present(text='Test added successfully'))
def test_import_cross_test(self):
logger.debug("Importing findings...")
# First test : Immuniweb Scan (dynamic)
driver = self.driver
self.goto_active_engagements_overview(driver)
driver.find_element_by_partial_link_text("Dedupe Immuniweb Test").click()
driver.find_element_by_partial_link_text("Immuniweb Test").click()
driver.find_element_by_css_selector("b.fa.fa-ellipsis-v").click()
driver.find_element_by_link_text("Re-Upload Scan Results").click()
driver.find_element_by_xpath('//*[@id="base-content"]/form/div[3]/div/div').click()
driver.find_element_by_xpath('//*[@id="base-content"]/form/div[4]/div/div').click()
driver.find_element_by_id('id_file').send_keys(self.relative_path + "/dedupe_scans/dedupe_endpoint_1.xml")
driver.find_elements_by_css_selector("button.btn.btn-primary")[1].click()
self.assertTrue(self.is_success_message_present(text='a total of 3 findings were processed'))
# Second test : generic scan with url (dynamic)
self.goto_active_engagements_overview(driver)
driver.find_element_by_partial_link_text("Dedupe Generic Test").click()
driver.find_element_by_partial_link_text("Generic Test").click()
driver.find_element_by_css_selector("b.fa.fa-ellipsis-v").click()
driver.find_element_by_link_text("Re-Upload Scan Results").click()
driver.find_element_by_xpath('//*[@id="base-content"]/form/div[3]/div/div').click()
driver.find_element_by_xpath('//*[@id="base-content"]/form/div[4]/div/div').click()
driver.find_element_by_id('id_file').send_keys(self.relative_path + "/dedupe_scans/dedupe_cross_1.csv")
driver.find_elements_by_css_selector("button.btn.btn-primary")[1].click()
self.assertTrue(self.is_success_message_present(text='a total of 3 findings were processed'))
def test_check_cross_status(self):
self.check_nb_duplicates(1)
def add_dedupe_tests_to_suite(suite):
suite.addTest(BaseTestCase('test_login'))
suite.addTest(ProductTest('test_create_product'))
suite.addTest(DedupeTest('test_enable_deduplication'))
# Test same scanners - same engagement - static - dedupe
suite.addTest(DedupeTest('test_delete_findings'))
suite.addTest(DedupeTest('test_add_path_test_suite'))
suite.addTest(DedupeTest('test_import_path_tests'))
suite.addTest(DedupeTest('test_check_path_status'))
# Test same scanners - same engagement - dynamic - dedupe
suite.addTest(DedupeTest('test_delete_findings'))
suite.addTest(DedupeTest('test_add_endpoint_test_suite'))
suite.addTest(DedupeTest('test_import_endpoint_tests'))
suite.addTest(DedupeTest('test_check_endpoint_status'))
# Test different scanners - same engagement - dynamic - dedupe
suite.addTest(DedupeTest('test_delete_findings'))
suite.addTest(DedupeTest('test_add_same_eng_test_suite'))
suite.addTest(DedupeTest('test_import_same_eng_tests'))
suite.addTest(DedupeTest('test_check_same_eng_status'))
# Test same scanners - same engagement - static - dedupe with custom hash_code
suite.addTest(DedupeTest('test_delete_findings'))
suite.addTest(DedupeTest('test_add_path_test_suite_checkmarx_scan'))
suite.addTest(DedupeTest('test_import_path_tests_checkmarx_scan'))
suite.addTest(DedupeTest('test_check_path_status_checkmarx_scan'))
# Test different scanners - different engagement - dynamic - dedupe
suite.addTest(DedupeTest('test_delete_findings'))
suite.addTest(DedupeTest('test_add_cross_test_suite'))
suite.addTest(DedupeTest('test_import_cross_test'))
suite.addTest(DedupeTest('test_check_cross_status'))
# Clean up
suite.addTest(ProductTest('test_delete_product'))
return suite
def suite():
suite = unittest.TestSuite()
add_dedupe_tests_to_suite(suite)
suite.addTest(DedupeTest('enable_jira'))
suite.addTest(DedupeTest('enable_github'))
# block mode no longer needed, so good to actually test in non block mode so celery does the dedupe
# suite.addTest(DedupeTest('enable_block_execution'))
add_dedupe_tests_to_suite(suite)
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner(descriptions=True, failfast=True, verbosity=2)
ret = not runner.run(suite()).wasSuccessful()
BaseTestCase.tearDownDriver()
sys.exit(ret)
| 56.127953
| 154
| 0.692316
|
00ed71b8fb74f17634f10c9ea4f9d985d8316e81
| 1,991
|
py
|
Python
|
plugins/fortinet_fortigate/unit_test/test_get_policies.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | null | null | null |
plugins/fortinet_fortigate/unit_test/test_get_policies.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | null | null | null |
plugins/fortinet_fortigate/unit_test/test_get_policies.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | null | null | null |
import sys
import os
from unittest import TestCase
from icon_fortinet_fortigate.actions.get_policies import GetPolicies
from icon_fortinet_fortigate.actions.get_policies.schema import Input, Output
from unit_test.util import Util
from unittest.mock import patch
from parameterized import parameterized
from insightconnect_plugin_runtime.exceptions import PluginException
sys.path.append(os.path.abspath("../"))
@patch("requests.Session.request", side_effect=Util.mocked_requests)
class TestGetPolicies(TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.action = Util.default_connector(GetPolicies())
@parameterized.expand(
[
[
"get_policies",
"Test Policy",
{
"policies": [
{
"policyid": 1,
"q_origin_key": 1,
"name": "Test Policy",
"uuid": "6193559a-6862-51ea-44ce-e27594b8536a",
"srcintf": [{"name": "port1", "q_origin_key": "port1"}],
"dstintf": [{"name": "port1", "q_origin_key": "port1"}],
"srcaddr": [{"name": "Test Group", "q_origin_key": "Test Group"}],
"dstaddr": [{"name": "Test Group", "q_origin_key": "Test Group"}],
"action": "accept",
"service": [{"name": "ALL", "q_origin_key": "ALL"}],
}
],
},
],
[
"get_policies_invalid_name",
"Invalid Policy",
{
"policies": [],
},
],
]
)
def test_get_policies(self, mock_request, name, name_filter, expected):
actual = self.action.run({Input.NAME_FILTER: name_filter})
self.assertEqual(actual, expected)
| 36.2
| 94
| 0.503265
|
55cb5c5a725eba316b3c81a2220c2aae35ced440
| 1,067
|
py
|
Python
|
ding/entry/tests/test_serial_entry_sqil.py
|
youngzhou1999/DI-engine
|
7e9e4e88a02c0fcf1a6399272b09f1dd7595e845
|
[
"Apache-2.0"
] | null | null | null |
ding/entry/tests/test_serial_entry_sqil.py
|
youngzhou1999/DI-engine
|
7e9e4e88a02c0fcf1a6399272b09f1dd7595e845
|
[
"Apache-2.0"
] | null | null | null |
ding/entry/tests/test_serial_entry_sqil.py
|
youngzhou1999/DI-engine
|
7e9e4e88a02c0fcf1a6399272b09f1dd7595e845
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import torch
from copy import deepcopy
from ding.entry import serial_pipeline
from ding.entry.serial_entry_sqil import serial_pipeline_sqil
from dizoo.classic_control.cartpole.config.cartpole_sql_config import cartpole_sql_config, cartpole_sql_create_config
from dizoo.classic_control.cartpole.config.cartpole_sqil_config import cartpole_sqil_config, cartpole_sqil_create_config
@pytest.mark.unittest
def test_sqil():
expert_policy_state_dict_path = './expert_policy.pth'
config = [deepcopy(cartpole_sql_config), deepcopy(cartpole_sql_create_config)]
expert_policy = serial_pipeline(config, seed=0)
torch.save(expert_policy.collect_mode.state_dict(), expert_policy_state_dict_path)
config = [deepcopy(cartpole_sqil_config), deepcopy(cartpole_sqil_create_config)]
config[0].policy.collect.demonstration_info_path = expert_policy_state_dict_path
config[0].policy.learn.update_per_collect = 1
try:
serial_pipeline_sqil(config, seed=0, max_iterations=1)
except Exception:
assert False, "pipeline fail"
| 44.458333
| 120
| 0.818182
|
72fb59ce9d38d8974e58cfff028b0d4334b87320
| 10,709
|
py
|
Python
|
cms/utils/conf.py
|
rspeed/django-cms-contrib
|
c5fbbea191646ab922b5ff6f89a1de6baa648e7f
|
[
"BSD-3-Clause"
] | null | null | null |
cms/utils/conf.py
|
rspeed/django-cms-contrib
|
c5fbbea191646ab922b5ff6f89a1de6baa648e7f
|
[
"BSD-3-Clause"
] | null | null | null |
cms/utils/conf.py
|
rspeed/django-cms-contrib
|
c5fbbea191646ab922b5ff6f89a1de6baa648e7f
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from functools import update_wrapper
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import ugettext_lazy as _
from django.utils.six.moves.urllib.parse import urljoin
from cms import constants
__all__ = ['get_cms_setting']
class VERIFIED: pass # need a unique identifier for CMS_LANGUAGES
def default(name):
def decorator(wrapped):
def wrapper():
if hasattr(settings, name):
return getattr(settings, name)
return wrapped()
update_wrapper(wrapper, wrapped)
return wrapped
return decorator
DEFAULTS = {
'TEMPLATE_INHERITANCE': True,
'DEFAULT_X_FRAME_OPTIONS': constants.X_FRAME_OPTIONS_INHERIT,
'TOOLBAR_SIMPLE_STRUCTURE_MODE': True,
'PLACEHOLDER_CONF': {},
'PERMISSION': False,
# Whether to use raw ID lookups for users when PERMISSION is True
'RAW_ID_USERS': False,
'PUBLIC_FOR': 'all',
'APPHOOKS': [],
'TOOLBARS': [],
'SITE_CHOICES_CACHE_KEY': 'CMS:site_choices',
'PAGE_CHOICES_CACHE_KEY': 'CMS:page_choices',
'MEDIA_PATH': 'cms/',
'PAGE_MEDIA_PATH': 'cms_page_media/',
'TITLE_CHARACTER': '+',
'PAGE_CACHE': True,
'PLACEHOLDER_CACHE': True,
'PLUGIN_CACHE': True,
'CACHE_PREFIX': 'cms-',
'PLUGIN_PROCESSORS': [],
'PLUGIN_CONTEXT_PROCESSORS': [],
'UNIHANDECODE_VERSION': None,
'UNIHANDECODE_DECODERS': ['ja', 'zh', 'kr', 'vn', 'diacritic'],
'UNIHANDECODE_DEFAULT_DECODER': 'diacritic',
'MAX_PAGE_PUBLISH_REVERSIONS': 10,
'MAX_PAGE_HISTORY_REVERSIONS': 15,
'TOOLBAR_ANONYMOUS_ON': True,
'TOOLBAR_URL__EDIT_ON': 'edit',
'TOOLBAR_URL__EDIT_OFF': 'edit_off',
'TOOLBAR_URL__BUILD': 'structure',
'TOOLBAR_URL__DISABLE': 'toolbar_off',
'ADMIN_NAMESPACE': 'admin',
'APP_NAME': None,
'TOOLBAR_HIDE': False,
'INTERNAL_IPS': [],
'REQUEST_IP_RESOLVER': 'cms.utils.request_ip_resolvers.default_request_ip_resolver',
'PAGE_WIZARD_DEFAULT_TEMPLATE': constants.TEMPLATE_INHERITANCE_MAGIC,
'PAGE_WIZARD_CONTENT_PLUGIN': 'TextPlugin',
'PAGE_WIZARD_CONTENT_PLUGIN_BODY': 'body',
'PAGE_WIZARD_CONTENT_PLACEHOLDER': None, # Use first placeholder it finds.
}
def get_cache_durations():
"""
Returns the setting: CMS_CACHE_DURATIONS or the defaults.
"""
return getattr(settings, 'CMS_CACHE_DURATIONS', {
'menus': 60 * 60,
'content': 60,
'permissions': 60 * 60,
})
@default('CMS_MEDIA_ROOT')
def get_media_root():
return os.path.join(settings.MEDIA_ROOT, get_cms_setting('MEDIA_PATH'))
@default('CMS_MEDIA_URL')
def get_media_url():
return urljoin(settings.MEDIA_URL, get_cms_setting('MEDIA_PATH'))
@default('CMS_TOOLBAR_URL__EDIT_ON')
def get_toolbar_url__edit_on():
return get_cms_setting('TOOLBAR_URL__EDIT_ON')
@default('CMS_TOOLBAR_URL__EDIT_OFF')
def get_toolbar_url__edit_off():
return get_cms_setting('TOOLBAR_URL__EDIT_OFF')
@default('CMS_TOOLBAR_URL__BUILD')
def get_toolbar_url__structure():
return get_cms_setting('TOOLBAR_URL__BUILD')
@default('CMS_TOOLBAR_URL__DISABLE')
def get_toolbar_url__disable():
return get_cms_setting('TOOLBAR_URL__DISABLE')
def get_templates():
from cms.utils.django_load import load_from_file
if getattr(settings, 'CMS_TEMPLATES_DIR', False):
tpldir = getattr(settings, 'CMS_TEMPLATES_DIR', False)
# CMS_TEMPLATES_DIR can either be a string poiting to the templates directory
# or a dictionary holding 'site: template dir' entries
if isinstance(tpldir, dict):
tpldir = tpldir[settings.SITE_ID]
# We must extract the relative path of CMS_TEMPLATES_DIR to the neares
# valid templates directory. Here we mimick what the filesystem and
# app_directories template loaders do
prefix = ''
# Relative to TEMPLATE['DIRS'] for filesystem loader
path = [template['DIRS'][0] for template in settings.TEMPLATES]
for basedir in path:
if tpldir.find(basedir) == 0:
prefix = tpldir.replace(basedir + os.sep, '')
break
# Relative to 'templates' directory that app_directory scans
if not prefix:
components = tpldir.split(os.sep)
try:
prefix = os.path.join(*components[components.index('templates') + 1:])
except ValueError:
# If templates is not found we use the directory name as prefix
# and hope for the best
prefix = os.path.basename(tpldir)
config_path = os.path.join(tpldir, '__init__.py')
# Try to load templates list and names from the template module
# If module file is not present skip configuration and just dump the filenames as templates
if os.path.isfile(config_path):
template_module = load_from_file(config_path)
templates = [(os.path.join(prefix, data[0].strip()), data[1]) for data in template_module.TEMPLATES.items()]
else:
templates = list((os.path.join(prefix, tpl), tpl) for tpl in os.listdir(tpldir))
else:
templates = list(getattr(settings, 'CMS_TEMPLATES', []))
if get_cms_setting('TEMPLATE_INHERITANCE'):
templates.append((constants.TEMPLATE_INHERITANCE_MAGIC, _('Inherit the template of the nearest ancestor')))
return templates
def _ensure_languages_settings(languages):
valid_language_keys = ['code', 'name', 'fallbacks', 'hide_untranslated', 'redirect_on_fallback', 'public']
required_language_keys = ['code', 'name']
simple_defaults = ['public', 'redirect_on_fallback', 'hide_untranslated']
if not isinstance(languages, dict):
raise ImproperlyConfigured(
"CMS_LANGUAGES must be a dictionary with site IDs and 'default'"
" as keys. Please check the format.")
defaults = languages.pop('default', {})
default_fallbacks = defaults.get('fallbacks')
needs_fallbacks = []
for key in defaults:
if key not in valid_language_keys:
raise ImproperlyConfigured("CMS_LANGUAGES has an invalid property in the default properties: %s" % key)
for key in simple_defaults:
if key not in defaults:
defaults[key] = True
for site, language_list in languages.items():
if site != hash(site):
raise ImproperlyConfigured(
"CMS_LANGUAGES can only be filled with integers (site IDs) and 'default'"
" for default values. %s is not a valid key." % site)
for language_object in language_list:
for required_key in required_language_keys:
if required_key not in language_object:
raise ImproperlyConfigured("CMS_LANGUAGES has a language which is missing the required key %r "
"in site %r" % (key, site))
language_code = language_object['code']
for key in language_object:
if key not in valid_language_keys:
raise ImproperlyConfigured(
"CMS_LANGUAGES has invalid key %r in language %r in site %r" % (key, language_code, site)
)
if 'fallbacks' not in language_object:
if default_fallbacks:
language_object['fallbacks'] = default_fallbacks
else:
needs_fallbacks.append((site, language_object))
for key in simple_defaults:
if key not in language_object:
language_object[key] = defaults[key]
site_fallbacks = {}
for site, language_object in needs_fallbacks:
if site not in site_fallbacks:
site_fallbacks[site] = [lang['code'] for lang in languages[site] if lang['public']]
language_object['fallbacks'] = [lang_code for lang_code in site_fallbacks[site] if
lang_code != language_object['code']]
languages['default'] = defaults
languages[VERIFIED] = True # this will be busted by @override_settings and cause a re-check
return languages
def get_languages():
if settings.SITE_ID != hash(settings.SITE_ID):
raise ImproperlyConfigured(
"SITE_ID must be an integer"
)
if not settings.USE_I18N:
return _ensure_languages_settings(
{settings.SITE_ID: [{'code': settings.LANGUAGE_CODE, 'name': settings.LANGUAGE_CODE}]})
if settings.LANGUAGE_CODE not in dict(settings.LANGUAGES):
raise ImproperlyConfigured(
'LANGUAGE_CODE "%s" must have a matching entry in LANGUAGES' % settings.LANGUAGE_CODE
)
languages = getattr(settings, 'CMS_LANGUAGES', {
settings.SITE_ID: [{'code': code, 'name': _(name)} for code, name in settings.LANGUAGES]
})
if VERIFIED in languages:
return languages
return _ensure_languages_settings(languages)
def get_unihandecode_host():
host = getattr(settings, 'CMS_UNIHANDECODE_HOST', None)
if not host:
return host
if host.endswith('/'):
return host
else:
return host + '/'
COMPLEX = {
'CACHE_DURATIONS': get_cache_durations,
'MEDIA_ROOT': get_media_root,
'MEDIA_URL': get_media_url,
# complex because not prefixed by CMS_
'TEMPLATES': get_templates,
'LANGUAGES': get_languages,
'UNIHANDECODE_HOST': get_unihandecode_host,
'CMS_TOOLBAR_URL__EDIT_ON': get_toolbar_url__edit_on,
'CMS_TOOLBAR_URL__EDIT_OFF': get_toolbar_url__edit_off,
'CMS_TOOLBAR_URL__BUILD': get_toolbar_url__structure,
'CMS_TOOLBAR_URL__DISABLE': get_toolbar_url__disable,
}
DEPRECATED_CMS_SETTINGS = {
# Old CMS_WIZARD_* settings to be removed in v3.5.0
'PAGE_WIZARD_DEFAULT_TEMPLATE': 'WIZARD_DEFAULT_TEMPLATE',
'PAGE_WIZARD_CONTENT_PLUGIN': 'WIZARD_CONTENT_PLUGIN',
'PAGE_WIZARD_CONTENT_PLUGIN_BODY': 'WIZARD_CONTENT_PLUGIN_BODY',
'PAGE_WIZARD_CONTENT_PLACEHOLDER': 'WIZARD_CONTENT_PLACEHOLDER',
}
def get_cms_setting(name):
if name in COMPLEX:
return COMPLEX[name]()
elif name in DEPRECATED_CMS_SETTINGS:
new_setting = 'CMS_%s' % name
old_setting = 'CMS_%s' % DEPRECATED_CMS_SETTINGS[name]
return getattr(settings, new_setting, getattr(settings, old_setting, DEFAULTS[name]))
return getattr(settings, 'CMS_%s' % name, DEFAULTS[name])
def get_site_id(site):
from django.contrib.sites.models import Site
if isinstance(site, Site):
return site.id
try:
return int(site)
except (TypeError, ValueError):
pass
return settings.SITE_ID
| 36.301695
| 120
| 0.66953
|
ce1be3cf2f9052b4b6f7c4642d8d92cf96422f23
| 22,579
|
py
|
Python
|
bs_seeker2-align.py
|
huboqiang/BSseeker2
|
385f88cf78b3efced75798c00f0e7185ac064047
|
[
"MIT"
] | null | null | null |
bs_seeker2-align.py
|
huboqiang/BSseeker2
|
385f88cf78b3efced75798c00f0e7185ac064047
|
[
"MIT"
] | null | null | null |
bs_seeker2-align.py
|
huboqiang/BSseeker2
|
385f88cf78b3efced75798c00f0e7185ac064047
|
[
"MIT"
] | 1
|
2021-11-01T03:21:47.000Z
|
2021-11-01T03:21:47.000Z
|
#!/usr/bin/env python
from optparse import OptionParser, OptionGroup
import re
import tempfile
from bs_align import output
from bs_align.bs_pair_end import *
from bs_align.bs_single_end import *
from bs_align.bs_rrbs import *
import os
#import re
#from bs_utils.utils import *
if __name__ == '__main__':
parser = OptionParser(usage="Usage: %prog {-i <single> | -1 <mate1> -2 <mate2>} -g <genome.fa> [options]")
# option group 1
opt_group = OptionGroup(parser, "For single end reads")
opt_group.add_option("-i", "--input", type="string", dest="infilename",help="Input read file (FORMAT: sequences, qseq, fasta, fastq). Ex: read.fa or read.fa.gz", metavar="INFILE")
parser.add_option_group(opt_group)
# option group 2
opt_group = OptionGroup(parser, "For pair end reads")
opt_group.add_option("-1", "--input_1", type="string", dest="infilename_1",help="Input read file, mate 1 (FORMAT: sequences, qseq, fasta, fastq)", metavar="FILE")
opt_group.add_option("-2", "--input_2", type="string", dest="infilename_2",help="Input read file, mate 2 (FORMAT: sequences, qseq, fasta, fastq)", metavar="FILE")
opt_group.add_option("-I", "--minins",type = "int",dest = "min_insert_size", help="The minimum insert size for valid paired-end alignments [Default: %default]", default = 0)
opt_group.add_option("-X", "--maxins",type = "int",dest = "max_insert_size", help="The maximum insert size for valid paired-end alignments [Default: %default]", default = 500)
parser.add_option_group(opt_group)
# option group 3
opt_group = OptionGroup(parser, "Reduced Representation Bisulfite Sequencing Options")
opt_group.add_option("-r", "--rrbs", action="store_true", dest="rrbs", default = False, help = 'Map reads to the Reduced Representation genome')
opt_group.add_option("-c", "--cut-site", type="string",dest="cut_format", help="Cutting sites of restriction enzyme. Ex: MspI(C-CGG), Mael:(C-TAG), double-enzyme MspI&Mael:(C-CGG,C-TAG). [Default: %default]", metavar="pattern", default = "C-CGG")
opt_group.add_option("-L", "--low", type = "int", dest="rrbs_low_bound", help="Lower bound of fragment length (excluding C-CGG ends) [Default: %default]", default = 20)
opt_group.add_option("-U", "--up", type = "int", dest="rrbs_up_bound", help="Upper bound of fragment length (excluding C-CGG ends) [Default: %default]", default = 500)
parser.add_option_group(opt_group)
# option group 4
opt_group = OptionGroup(parser, "General options")
opt_group.add_option("-t", "--tag", type="string", dest="taginfo",help="[Y]es for undirectional lib, [N]o for directional [Default: %default]", metavar="TAG", default = 'N')
opt_group.add_option("-s","--start_base",type = "int",dest = "cutnumber1", help="The first cycle of the read to be mapped [Default: %default]", default = 1)
opt_group.add_option("-e","--end_base",type = "int",dest = "cutnumber2", help="The last cycle of the read to be mapped [Default: %default]", default = 200)
opt_group.add_option("-a", "--adapter", type="string", dest="adapter_file",help="Input text file of your adaptor sequences (to be trimmed from the 3'end of the reads, ). "
"Input one seq for dir. lib., twon seqs for undir. lib. One line per sequence. "
"Only the first 10bp will be used", metavar="FILE", default = '')
opt_group.add_option("--am",type = "int",dest = "adapter_mismatch", help="Number of mismatches allowed in adapter [Default: %default]", default = 0)
opt_group.add_option("-g", "--genome", type="string", dest="genome",help="Name of the reference genome (should be the same as \"-f\" in bs_seeker2-build.py ) [ex. chr21_hg18.fa]")
opt_group.add_option("-m", "--mismatches",type = "float", dest="no_mismatches",help="Number(>=1)/Percentage([0, 1)) of mismatches in one read. Ex: 4 (allow 4 mismatches) or 0.04 (allow 4% mismatches) [Default: %default]", default = 4)
opt_group.add_option("--aligner", dest="aligner",help="Aligner program for short reads mapping: " + ', '.join(supported_aligners) + " [Default: %default]", metavar="ALIGNER", default = BOWTIE)
opt_group.add_option("-p", "--path", dest="aligner_path", help="Path to the aligner program. Detected: " +' '*70+ '\t'.join(('%s: %s '+' '*70) % (al, aligner_path[al]) for al in sorted(supported_aligners)),
metavar="PATH"
)
opt_group.add_option("-d", "--db", type="string", dest="dbpath",help="Path to the reference genome library (generated in preprocessing genome) [Default: %default]" , metavar="DBPATH", default = reference_genome_path)
opt_group.add_option("-l", "--split_line",type = "int", dest="no_split",help="Number of lines per split (the read file will be split into small files for mapping. The result will be merged. [Default: %default]", default = 4000000, metavar="INT")
opt_group.add_option("-o", "--output", type="string", dest="outfilename",help="The name of output file [INFILE.bs(se|pe|rrbs)]", metavar="OUTFILE")
opt_group.add_option("-f", "--output-format", type="string", dest="output_format",help="Output format: "+', '.join(output.formats)+" [Default: %default]", metavar="FORMAT", default = output.BAM)
opt_group.add_option("--no-header", action="store_true", dest="no_SAM_header",help="Suppress SAM header lines [Default: %default]", default = False)
try:
opt_group.add_option("--temp_dir", type="string", dest="temp_dir",help="The path to your temporary directory [Detected: %default]", metavar="PATH", default = os.environ["TMPDIR"])
except:
opt_group.add_option("--temp_dir", type="string", dest="temp_dir",help="The path to your temporary directory [Detected: %default]", metavar="PATH", default = tempfile.gettempdir())
opt_group.add_option("--XS",type = "string", dest="XS_filter",help="Filter definition for tag XS, format X,Y. X=0.8 and y=5 indicate that for one read, if #(mCH sites)/#(all CH sites)>0.8 and #(mCH sites)>5, then tag XS=1; or else tag XS=0. [Default: %default]", default = "0.5,5") # added by weilong
opt_group.add_option("-M", "--multiple-hit", metavar="FileName", type="string", dest="Output_multiple_hit", default = None, help = 'File to store reads with multiple-hits')
opt_group.add_option("-u", "--unmapped", metavar="FileName", type="string", dest="Output_unmapped_hit", default = None, help = 'File to store unmapped reads')
opt_group.add_option("-v", "--version", action="store_true", dest="version",help="show version of BS-Seeker2", metavar="version", default = False)
parser.add_option_group(opt_group)
# option group 5
opt_group = OptionGroup(parser, "Aligner Options",
"You may specify any additional options for the aligner. You just have to prefix them with " +
', '.join('%s for %s' % (aligner_options_prefixes[aligner], aligner) for aligner in supported_aligners)+
', and BS-Seeker2 will pass them on. For example: --bt-p 4 will increase the number of threads for bowtie to 4, '
'--bt--tryhard will instruct bowtie to try as hard as possible to find valid alignments when they exist, and so on. ')
parser.add_option_group(opt_group)
#----------------------------------------------------------------
# separate aligner options from BS Seeker options
aligner_options = {}
bs_seeker_options = []
i = 1
while i < len(sys.argv):
arg = sys.argv[i]
m = re.match(r'^%s' % '|'.join('(%s)'% aligner_options_prefixes[al] for al in supported_aligners), arg)
if m:
a_opt = arg.replace(m.group(0),'-',1)
aligner_options[a_opt] = []
while i + 1 < len(sys.argv) and sys.argv[i+1][0] != '-':
aligner_options[a_opt].append(sys.argv[i+1])
i += 1
if len(aligner_options[a_opt]) == 0: # if it is a key-only option
aligner_options[a_opt] = True
else:
bs_seeker_options.append(arg)
i += 1
(options, args) = parser.parse_args(args = bs_seeker_options)
# if no options were given by the user, print help and exit
if len(sys.argv) == 1:
parser.print_help()
exit(0)
if options.version :
show_version()
exit (-1)
else :
show_version()
# check parameters
# input read files
if options.infilename and (options.infilename_1 or options.infilename_2):
error('-i and [-1|-2] options are exclusive. You should use only one of them.')
if not (options.infilename or (options.infilename_1 and options.infilename_2)):
error('You should set either -i or -1 and -2 options.')
# Calculate the length of read
if options.infilename :
read_file = options.infilename
elif options.infilename_1 :
read_file = options.infilename_1
else :
error('You should at least specify -i or -1 options.')
try :
if read_file.endswith(".gz") : # support input file ending with ".gz"
read_inf = gzip.open(read_file, "rb")
else :
read_inf=open(read_file,"r")
except IOError :
print "[Error] Cannot open input file : %s" % read_file
exit(-1)
oneline = read_inf.readline()
oneline = read_inf.readline() # get the second line
read_len = min(len(oneline), (options.cutnumber2-options.cutnumber1))
read_inf.close()
# mismatch allowed: bowtie 1,build-in parameter '-m'; bowtie 2, post-filter paramter
# mismatch should no greater than the read length
no_mismatches = float(options.no_mismatches)
if (no_mismatches < 1) :
int_no_mismatches=int(no_mismatches * read_len)
else :
int_no_mismatches=int(no_mismatches)
str_no_mismatches=str(options.no_mismatches) # pass to specific mode
# -t, directional / un-directional library
asktag=str(options.taginfo).upper()
if asktag not in 'YN':
error('-t option should be either Y or N, not %s' % asktag)
# -a
if options.aligner not in supported_aligners:
error('-a option should be: %s' % ' ,'.join(supported_aligners)+'.')
# path for aligner
aligner_exec = os.path.expanduser( os.path.join(options.aligner_path or aligner_path[options.aligner], options.aligner) )
# -g
if options.genome is None:
error('-g is a required option')
genome = os.path.split(options.genome)[1]
genome_subdir = genome
# try to guess the location of the reference genome for RRBS
if options.rrbs:
if options.rrbs_low_bound and options.rrbs_up_bound:
if options.cut_format == "C-CGG" :
genome_subdir += '_rrbs_%d_%d' % (options.rrbs_low_bound, options.rrbs_up_bound)
else :
genome_subdir += '_rrbs_%s_%d_%d' % ( re.sub(",","-",re.sub("-", "", options.cut_format)), options.rrbs_low_bound, options.rrbs_up_bound)
else:
possible_refs = filter(lambda dir: dir.startswith(genome+'_rrbs_'), os.listdir(options.dbpath))
if len(possible_refs) == 1:
genome_subdir = possible_refs[0]
else:
error('Cannot localize unambiguously the reference genome for RRBS. '
'Please, specify the options \"--low\" and \"--up\" that you used at the index-building step.\n'
'Possible choices are:\n' + '\n'.join([pr.split('_rrbs_')[-1].replace('_',', ') for pr in possible_refs]))
db_path = os.path.expanduser(os.path.join(options.dbpath, genome_subdir + '_' + options.aligner))
if not os.path.isdir(db_path):
error('Index DIR \"' + genome_subdir + '..\" cannot be found in ' + options.dbpath +'.\n\tPlease run the bs_seeker2-build.py '
'to create it with the correct parameters for -g, -r, --low, --up and --aligner.')
# default aligner options
aligner_options_defaults = {
BOWTIE : { '-e' : 40*int_no_mismatches,
'--nomaqround' : True,
'--norc' : True,
#'-k' : 2,
# -k=2; report two best hits, and filter by error rates
'--quiet' : True,
'--best' : True,
# '--suppress' : '2,5,6',
'--sam' : True,
'--sam-nohead' : True,
'-p' : 2
},
BOWTIE2 : {
#'-M' : 5,
'--norc' : True,
'--quiet' : True,
'-p' : 2,
'--sam-nohead' : True,
# run bowtie2 in local mode by default
'--local' : '--end-to-end' not in aligner_options,
#'--mm' : True,
#'-k' : 2
},
SOAP : { '-v' : int_no_mismatches,
'-p' : 2,
'-r' : 2,
'-M' : 4
},
RMAP : { '-M' : 2
# to do # control for only mapping on + strand
}
}
if '--end-to-end' not in aligner_options:
aligner_options_defaults[BOWTIE2].update({'-D' : 50})
#aligner_options_defaults[BOWTIE2].update({'-D' : 50, '-R': 3, '-N': 0, '-L': 15, '-i' : 'S,1,0.50'})
else:
aligner_options_defaults[BOWTIE2].update({'-D' : 50, '-L': 15, '--score-min': 'L,-0.6,-0.6' })
aligner_options = dict(aligner_options_defaults[options.aligner], **aligner_options)
aligner_options_string = lambda : ' %s ' % (' '.join(opt_key +
(' ' + ' '.join(map(str,opt_val)) # join all values if the value is an array
if type(opt_val) is list else
('' if type(opt_val) is bool and opt_val # output an empty string if it is a key-only option
else ' ' +str(opt_val)) # output the value if it is a single value
)
for opt_key, opt_val in aligner_options.iteritems() if opt_val not in [None, False]))
# tmp_path = (options.outfilename or options.infilename or options.infilename_1) +'-'+ options.aligner+ '-TMP'
# clear_dir(tmp_path)
options.output_format = options.output_format.lower()
if options.output_format not in output.formats:
error('Output format should be one of: ' + ', '.join(output.formats))
if options.outfilename:
outfilename = options.outfilename
logfilename = outfilename
elif options.infilename is not None:
logfilename = options.infilename+'_'+ ('rr' if options.rrbs else '') + 'bsse'
outfilename = logfilename + '.' + options.output_format
else:
logfilename = options.infilename_1+'_'+ ('rr' if options.rrbs else '') + 'bspe'
outfilename = logfilename + '.' + options.output_format
outfilename = os.path.expanduser(outfilename)
logfilename = os.path.expanduser(logfilename)
outfile = output.outfile(outfilename, options.output_format, deserialize(os.path.join(db_path, 'refname')), ' '.join(sys.argv), options.no_SAM_header)
open_log(logfilename+'.bs_seeker2_log')
aligner_title = options.aligner
if options.aligner == BOWTIE2 :
if '--end-to-end' in aligner_options :
aligner_title = aligner_title + "-e2e"
else:
aligner_title = aligner_title + "-local"
if options.aligner == BOWTIE :
logm("Mode: Bowtie")
elif options.aligner == BOWTIE2 :
if '--end-to-end' not in aligner_options :
logm("Mode: Bowtie2, local alignment")
else :
logm("Mode: Bowtie2, end-to-end alignment")
tmp_path = tempfile.mkdtemp(prefix='bs_seeker2_%s_-%s-TMP-' % (os.path.split(outfilename)[1], aligner_title ), dir = options.temp_dir)
(XS_x, XS_y) = options.XS_filter.split(",")
XS_pct = float(XS_x)
XS_count = int(XS_y)
logm('Filter for tag XS: #(mCH)/#(all CH)>%.2f%% and #(mCH)>%d' % (XS_pct*100, XS_count))
logm('Temporary directory: %s' % tmp_path)
logm('Reduced Representation Bisulfite Sequencing: %s' % str(options.rrbs))
if options.infilename is not None:
logm('Single end')
aligner_command = aligner_exec + aligner_options_string() + \
{ BOWTIE : ' -k 2 %(reference_genome)s -f %(input_file)s %(output_file)s',
BOWTIE2 : ' -k 2 -x %(reference_genome)s -f -U %(input_file)s -S %(output_file)s',
SOAP : ' -D %(reference_genome)s.fa.index -o %(output_file)s -a %(input_file)s',
RMAP : ' -c %(reference_genome)s.fa -o %(output_file)s %(input_file)s'
}[options.aligner]
logm ('Aligner command: %s' % aligner_command)
# single end reads
if options.rrbs: # RRBS scan
bs_rrbs(options.infilename,
asktag,
options.adapter_file,
options.cutnumber1,
options.cutnumber2,
options.no_split,
str_no_mismatches,
aligner_command,
db_path,
tmp_path,
outfile,
XS_pct,
XS_count,
options.adapter_mismatch,
options.Output_multiple_hit,
options.Output_unmapped_hit,
options.cut_format
)
else: # Normal single end scan
bs_single_end( options.infilename,
asktag,
options.adapter_file,
options.cutnumber1,
options.cutnumber2,
options.no_split,
str_no_mismatches,
aligner_command,
db_path,
tmp_path,
outfile,
XS_pct,
XS_count,
options.adapter_mismatch,
options.Output_multiple_hit,
options.Output_unmapped_hit
)
else:
logm('Pair end')
# pair end specific default options
aligner_options = dict({BOWTIE: {'--fr' : True,
'-X' : options.max_insert_size,
'-I' : options.min_insert_size if options.min_insert_size > 0 else None,
'-a' : True # "-k 2" in bowtie would not report the best two
},
BOWTIE2 : {
'--fr' : True,
'-X' : options.max_insert_size,
'-I' : options.min_insert_size if options.min_insert_size > 0 else None,
'--no-discordant' : True,
'--no-mixed' : True,
'-k' : 2
},
SOAP: {
'-x' : options.max_insert_size,
'-m' : options.min_insert_size if options.min_insert_size > 0 else 100
}}[options.aligner],
# integrating 'rmappe' is different from others
**aligner_options)
aligner_command = aligner_exec + aligner_options_string() + \
{ BOWTIE : ' %(reference_genome)s -f -1 %(input_file_1)s -2 %(input_file_2)s %(output_file)s',
BOWTIE2 : ' -x %(reference_genome)s -f -1 %(input_file_1)s -2 %(input_file_2)s -S %(output_file)s',
SOAP : ' -D %(reference_genome)s.fa.index -o %(output_file)s -a %(input_file_1)s -b %(input_file_2)s -2 %(output_file)s.unpaired' #,
# RMAP : # rmappe, also paste two inputs into one file.
}[options.aligner]
logm('Aligner command: %s' % aligner_command)
if '--end-to-end' not in aligner_options:
aligner_options_defaults[BOWTIE2].update({'-D' : 50})
else:
aligner_options_defaults[BOWTIE2].update({'-D' : 50, '-L': 15, '--score-min': 'L,-0.6,-0.6' })
bs_pair_end(options.infilename_1,
options.infilename_2,
asktag,
options.adapter_file,
options.cutnumber1,
options.cutnumber2,
options.no_split,
str_no_mismatches,
aligner_command,
db_path,
tmp_path,
outfile,
XS_pct,
XS_count,
options.adapter_mismatch,
options.Output_multiple_hit,
options.Output_unmapped_hit
)
outfile.close()
| 56.166667
| 304
| 0.538642
|
05bdc090c8242089b86e8fc4740f72cec85f402d
| 4,629
|
py
|
Python
|
tests/test_connector.py
|
zzx4998/Python-Honeypot
|
78d4258bdaaacf8f0b54349f8d00c560eb6de704
|
[
"Apache-2.0"
] | 1
|
2021-05-02T12:16:21.000Z
|
2021-05-02T12:16:21.000Z
|
tests/test_connector.py
|
zzx4998/Python-Honeypot
|
78d4258bdaaacf8f0b54349f8d00c560eb6de704
|
[
"Apache-2.0"
] | 3
|
2021-07-28T01:25:31.000Z
|
2022-03-29T22:10:45.000Z
|
tests/test_connector.py
|
zzx4998/Python-Honeypot
|
78d4258bdaaacf8f0b54349f8d00c560eb6de704
|
[
"Apache-2.0"
] | 1
|
2021-06-24T17:22:08.000Z
|
2021-06-24T17:22:08.000Z
|
import unittest
import time
from datetime import datetime
from multiprocessing import Queue
from database.connector import (credential_events,
data_events,
honeypot_events,
insert_to_credential_events_collection,
insert_to_events_data_collection,
insert_to_honeypot_events_queue,
insert_to_network_events_queue,
network_events,
push_events_queues_to_database)
from database.datatypes import (CredentialEvent,
EventData,
HoneypotEvent,
NetworkEvent)
class TestConnector(unittest.TestCase):
def test_push_event_queues_to_db(self):
"""
Test pushing Honeypot and network events from queues
to database.
"""
honeypot_event = HoneypotEvent(
ip_dest="11.22.33.44",
port_dest=80,
ip_src="12.23.34.45",
port_src=1010,
protocol='TCP',
module_name="http/basic_auth_weak_password",
machine_name="stockholm_server_1"
)
network_event = NetworkEvent(
ip_dest="13.14.15.16",
port_dest=8090,
ip_src="22.33.44.55",
port_src=1100,
protocol='UDP',
machine_name="stockholm_server_1"
)
honeypot_events_queue = Queue()
network_events_queue = Queue()
# Insert events to queues
insert_to_honeypot_events_queue(honeypot_event, honeypot_events_queue)
insert_to_network_events_queue(network_event, network_events_queue)
push_events_queues_to_database(honeypot_events_queue, network_events_queue)
# Find the records in the DB
honeypot_record = honeypot_events.find_one(honeypot_event.__dict__)
network_record = network_events.find_one(network_event.__dict__)
# wait for queue to be empty
time.sleep(5)
# Compare the record found in the DB with the one pushed
self.assertEqual(honeypot_record["ip_src"], honeypot_event.ip_src)
self.assertEqual(honeypot_record["ip_dest"], honeypot_event.ip_dest)
self.assertEqual(network_record["ip_src"], network_event.ip_src)
self.assertEqual(network_record["ip_dest"], network_event.ip_dest)
# Delete test events from the database
honeypot_events.delete_one(honeypot_event.__dict__)
network_events.delete_one(network_event.__dict__)
def test_insert_to_credential_events(self):
"""
Test the data insertion to the credential_events collection
"""
credential_event = CredentialEvent(
ip="88.99.11.22",
username="admin",
password="password",
module_name="http/basic_auth_weak_password",
date=datetime.now().strftime("%Y-%m-%d %H:%M:%S")
)
insert_to_credential_events_collection(credential_event)
# Find the record in the DB
credential_record = credential_events.find_one(
credential_event.__dict__
)
# Compare the record found in the DB with the one pushed
self.assertEqual(
credential_record["ip"],
credential_event.ip
)
self.assertEqual(
credential_record["username"],
credential_event.username
)
self.assertEqual(
credential_record["password"],
credential_event.password
)
# Delete test events from the database
# credential_events.delete_one(credential_event.__dict__)
def test_insert_events_data(self):
"""
Test the data insertion to the events_data collection
"""
event_data = EventData(
ip="55.66.77.88",
module_name="ics/veeder_root_guardian_ast",
date=datetime.now(),
data="Test Data"
)
insert_to_events_data_collection(event_data)
# Find the record in the DB
event_record_data = data_events.find_one(event_data.__dict__)
# Compare the record found in the DB with the one pushed
self.assertEqual(event_record_data["ip"], event_data.ip)
self.assertEqual(
event_record_data["data"],
event_data.data
)
data_events.delete_one(event_data.__dict__)
if __name__ == '__main__':
unittest.main()
| 32.829787
| 83
| 0.605962
|
88b42ded4a5a32c4b7c77580ae7b5b4384062a5e
| 3,350
|
py
|
Python
|
bin/tumor_vis1.py
|
rheiland/pc4fury
|
41ef56afcfdfc7931fd1b82450f36fd33dfc7697
|
[
"BSD-3-Clause"
] | null | null | null |
bin/tumor_vis1.py
|
rheiland/pc4fury
|
41ef56afcfdfc7931fd1b82450f36fd33dfc7697
|
[
"BSD-3-Clause"
] | null | null | null |
bin/tumor_vis1.py
|
rheiland/pc4fury
|
41ef56afcfdfc7931fd1b82450f36fd33dfc7697
|
[
"BSD-3-Clause"
] | null | null | null |
from pyMCDS_cells import pyMCDS_cells
import numpy as np
from fury import window, actor, ui
#mcds = pyMCDS_cells('output00000001.xml','data')
#mcds = pyMCDS_cells('output00000001.xml','.') # 23123 cells
mcds = pyMCDS_cells('output00000001.xml','../tmpdir') # 23123 cells
#mcds = pyMCDS_cells('output00000006.xml','../tmpdir') # 23123 cells
print('time=',mcds.get_time())
print(mcds.data['discrete_cells'].keys())
#Out[7]: dict_keys(['ID', 'position_x', 'position_y', 'position_z', 'total_volume', 'cell_type', 'cycle_model', 'current_phase', 'elapsed_time_in_phase', 'nuclear_volume', 'cytoplasmic_volume', 'fluid_fraction', 'calcified_fraction', 'orientation_x', 'orientation_y', 'orientation_z', 'polarity', 'migration_speed', 'motility_vector_x', 'motility_vector_y', 'motility_vector_z', 'migration_bias', 'motility_bias_direction_x', 'motility_bias_direction_y', 'motility_bias_direction_z', 'persistence_time', 'motility_reserved', 'oncoprotein', 'elastic_coefficient', 'kill_rate', 'attachment_lifetime', 'attachment_rate'])
# http://www.mathcancer.org/blog/paraview-for-physicell-part-1/
# The following lines assign an integer to represent
# a color, defined in a Color Map.
# sval = 0 # immune cells are yellow?
# if val[5,idx] == 1: # [5]=cell_type
# sval = 1 # lime green
# if (val[6,idx] == 6) or (val[6,idx] == 7):
# sval = 0
# if val[7,idx] == 100: # [7]=current_phase
# sval = 3 # apoptotic: red
# if val[7,idx] > 100 and val[7,idx] < 104:
# sval = 2 # necrotic: brownish
ncells = len(mcds.data['discrete_cells']['ID'])
print('num cells = ',ncells)
#xyz = np.empty((ncells,3))
xyz = np.zeros((ncells,3))
xyz[:,0] = mcds.data['discrete_cells']['position_x']
xyz[:,1] = mcds.data['discrete_cells']['position_y']
xyz[:,2] = mcds.data['discrete_cells']['position_z']
#xyz = np.random.rand((ncells,3))
#xyz[:,]
# sphere V = 4/3 * pi * r^3
# r3 = V * 0.75 / pi
# r = np.cbrt(r3)
cell_radii = mcds.data['discrete_cells']['total_volume'] * 0.75 / np.pi
cell_radii = np.cbrt(cell_radii)
cell_type = mcds.data['discrete_cells']['cell_type']
print('cell_type min, max= ',cell_type.min(),cell_type.max())
#print(cell_type)
#cd8 = np.where(cell_type == 3.0)
#print('# cd8, macrophage, neutrophil = ',len(cd8[0]), len(macrophage[0]), len(neutrophil[0]) )
# Loop over all output files and store times and counts of cell types
#num_cd8 = np.zeros(n)
#num_neut = np.zeros(n)
rgb = np.zeros((ncells,3))
rgb[:,0] = 1
rgb[:,1] = 1
rgb[:,2] = 0
cell_phase = mcds.data['discrete_cells']['current_phase']
print('cell_phase min, max= ',cell_phase.min(),cell_phase.max()) # e.g., 14.0 100.0
for idx in range(ncells):
if cell_phase[idx] == 100.0:
rgb[idx,1] = 0
#-----------------------------
scene = window.Scene()
# https://fury.gl/latest/reference/fury.actor.html?highlight=sphere#fury.actor.sphere
colors = (1,0,0)
#sphere_actor = actor.sphere(centers=xyz, colors=colors, radii=1.0)
#sphere_actor = actor.sphere(centers=xyz, colors=colors, radii=cell_radii)
sphere_actor = actor.sphere(centers=xyz, colors=rgb, radii=cell_radii)
scene.add(sphere_actor)
showm = window.ShowManager(scene,
size=(800, 800), reset_camera=True,
order_transparent=False)
showm.initialize()
showm.start()
## window.record(showm.scene, size=(900, 768), out_path="viz_timer.png")
| 40.853659
| 618
| 0.682388
|
3773d4de7f8e30dc59d6dfe6149aadc64f5e778b
| 300
|
py
|
Python
|
tests/context.py
|
cxz/klaws
|
7f072e46a5f7d263b7e059d21591f776c4c7991b
|
[
"MIT"
] | null | null | null |
tests/context.py
|
cxz/klaws
|
7f072e46a5f7d263b7e059d21591f776c4c7991b
|
[
"MIT"
] | null | null | null |
tests/context.py
|
cxz/klaws
|
7f072e46a5f7d263b7e059d21591f776c4c7991b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
#import sample
#httpretty doesnt work with proxy
for v in ['HTTP_PROXY', 'HTTPS_PROXY', 'http_proxy', 'https_proxy']:
if v in os.environ:
del os.environ[v]
| 23.076923
| 82
| 0.666667
|
f6c63c358cbf2da400ebc025d9f82d75f7ea05fe
| 9,660
|
py
|
Python
|
modules/text/language_model/bert-base-chinese/module.py
|
eepgxxy/PaddleHub
|
b2b6278f8a1f704994ef36474570bab0e0dbe6c0
|
[
"Apache-2.0"
] | 2
|
2020-12-02T07:29:14.000Z
|
2021-11-12T11:13:31.000Z
|
modules/text/language_model/bert-base-chinese/module.py
|
dwuping/PaddleHub
|
9a3b23295947e22149cc85c17cb4cf23c03f9e06
|
[
"Apache-2.0"
] | null | null | null |
modules/text/language_model/bert-base-chinese/module.py
|
dwuping/PaddleHub
|
9a3b23295947e22149cc85c17cb4cf23c03f9e06
|
[
"Apache-2.0"
] | 1
|
2020-12-03T07:14:34.000Z
|
2020-12-03T07:14:34.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional, Union, Tuple
import os
from paddle.dataset.common import DATA_HOME
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddlehub import BertTokenizer
from paddlehub.module.modeling_bert import BertForSequenceClassification, BertModel
from paddlehub.module.module import moduleinfo, serving
from paddlehub.utils.log import logger
from paddlehub.utils.utils import download
@moduleinfo(
name="bert-base-chinese",
version="2.0.0",
summary=
"bert_chinese_L-12_H-768_A-12, 12-layer, 768-hidden, 12-heads, 110M parameters. The module is executed as paddle.dygraph.",
author="paddlepaddle",
author_email="",
type="nlp/semantic_model")
class Bert(nn.Layer):
"""
Bert model
"""
def __init__(
self,
task=None,
load_checkpoint=None,
label_map=None,
):
super(Bert, self).__init__()
# TODO(zhangxuefei): add token_classification task
if task == 'sequence_classification':
self.model = BertForSequenceClassification.from_pretrained(
pretrained_model_name_or_path='bert-base-chinese')
self.criterion = paddle.nn.loss.CrossEntropyLoss()
self.metric = paddle.metric.Accuracy(name='acc_accumulation')
elif task is None:
self.model = BertModel.from_pretrained(pretrained_model_name_or_path='bert-base-chinese')
else:
raise RuntimeError("Unknown task %s, task should be sequence_classification" % task)
self.task = task
self.label_map = label_map
if load_checkpoint is not None and os.path.isfile(load_checkpoint):
state_dict = paddle.load(load_checkpoint)
self.set_state_dict(state_dict)
logger.info('Loaded parameters from %s' % os.path.abspath(load_checkpoint))
def forward(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None, labels=None):
result = self.model(input_ids, token_type_ids, position_ids, attention_mask)
if self.task is not None:
logits = result
probs = F.softmax(logits, axis=1)
if labels is not None:
loss = self.criterion(logits, labels)
correct = self.metric.compute(probs, labels)
acc = self.metric.update(correct)
return probs, loss, acc
return probs
else:
sequence_output, pooled_output = result
return sequence_output, pooled_output
def get_vocab_path(self):
"""
Gets the path of the module vocabulary path.
"""
save_path = os.path.join(DATA_HOME, 'bert-base-chinese', 'bert-base-chinese-vocab.txt')
if not os.path.exists(save_path) or not os.path.isfile(save_path):
url = "https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-chinese-vocab.txt"
download(url, os.path.join(DATA_HOME, 'bert-base-chinese'))
return save_path
def get_tokenizer(self, tokenize_chinese_chars=True):
"""
Gets the tokenizer that is customized for this module.
Args:
tokenize_chinese_chars (:obj: bool , defaults to :obj: True):
Whether to tokenize chinese characters or not.
Returns:
tokenizer (:obj:BertTokenizer) : The tokenizer which was customized for this module.
"""
return BertTokenizer(tokenize_chinese_chars=tokenize_chinese_chars, vocab_file=self.get_vocab_path())
def training_step(self, batch: List[paddle.Tensor], batch_idx: int):
"""
One step for training, which should be called as forward computation.
Args:
batch(:obj:List[paddle.Tensor]): The one batch data, which contains the model needed,
such as input_ids, sent_ids, pos_ids, input_mask and labels.
batch_idx(int): The index of batch.
Returns:
results(:obj: Dict) : The model outputs, such as loss and metrics.
"""
predictions, avg_loss, acc = self(input_ids=batch[0], token_type_ids=batch[1], labels=batch[2])
return {'loss': avg_loss, 'metrics': {'acc': acc}}
def validation_step(self, batch: List[paddle.Tensor], batch_idx: int):
"""
One step for validation, which should be called as forward computation.
Args:
batch(:obj:List[paddle.Tensor]): The one batch data, which contains the model needed,
such as input_ids, sent_ids, pos_ids, input_mask and labels.
batch_idx(int): The index of batch.
Returns:
results(:obj: Dict) : The model outputs, such as metrics.
"""
predictions, avg_loss, acc = self(input_ids=batch[0], token_type_ids=batch[1], labels=batch[2])
return {'metrics': {'acc': acc}}
def predict(self, data, max_seq_len=128, batch_size=1, use_gpu=False):
"""
Predicts the data labels.
Args:
data (obj:`List(str)`): The processed data whose each element is the raw text.
max_seq_len (:obj:`int`, `optional`, defaults to :int:`None`):
If set to a number, will limit the total sequence returned so that it has a maximum length.
batch_size(obj:`int`, defaults to 1): The number of batch.
use_gpu(obj:`bool`, defaults to `False`): Whether to use gpu to run or not.
Returns:
results(obj:`list`): All the predictions labels.
"""
# TODO(zhangxuefei): add task token_classification task predict.
if self.task not in ['sequence_classification']:
raise RuntimeError("The predict method is for sequence_classification task, but got task %s." % self.task)
paddle.set_device('gpu') if use_gpu else paddle.set_device('cpu')
tokenizer = self.get_tokenizer()
examples = []
for text in data:
if len(text) == 1:
encoded_inputs = tokenizer.encode(text[0], text_pair=None, max_seq_len=max_seq_len)
elif len(text) == 2:
encoded_inputs = tokenizer.encode(text[0], text_pair=text[1], max_seq_len=max_seq_len)
else:
raise RuntimeError(
'The input text must have one or two sequence, but got %d. Please check your inputs.' % len(text))
examples.append((encoded_inputs['input_ids'], encoded_inputs['segment_ids']))
def _batchify_fn(batch):
input_ids = [entry[0] for entry in batch]
segment_ids = [entry[1] for entry in batch]
return input_ids, segment_ids
# Seperates data into some batches.
batches = []
one_batch = []
for example in examples:
one_batch.append(example)
if len(one_batch) == batch_size:
batches.append(one_batch)
one_batch = []
if one_batch:
# The last batch whose size is less than the config batch_size setting.
batches.append(one_batch)
results = []
self.eval()
for batch in batches:
input_ids, segment_ids = _batchify_fn(batch)
input_ids = paddle.to_tensor(input_ids)
segment_ids = paddle.to_tensor(segment_ids)
# TODO(zhangxuefei): add task token_classification postprocess after prediction.
if self.task == 'sequence_classification':
probs = self(input_ids, segment_ids)
idx = paddle.argmax(probs, axis=1).numpy()
idx = idx.tolist()
labels = [self.label_map[i] for i in idx]
results.extend(labels)
return results
@serving
def get_embedding(self, texts, use_gpu=False):
if self.task is not None:
raise RuntimeError("The get_embedding method is only valid when task is None, but got task %s" % self.task)
paddle.set_device('gpu') if use_gpu else paddle.set_device('cpu')
tokenizer = self.get_tokenizer()
results = []
for text in texts:
if len(text) == 1:
encoded_inputs = tokenizer.encode(text[0], text_pair=None, pad_to_max_seq_len=False)
elif len(text) == 2:
encoded_inputs = tokenizer.encode(text[0], text_pair=text[1], pad_to_max_seq_len=False)
else:
raise RuntimeError(
'The input text must have one or two sequence, but got %d. Please check your inputs.' % len(text))
input_ids = paddle.to_tensor(encoded_inputs['input_ids']).unsqueeze(0)
segment_ids = paddle.to_tensor(encoded_inputs['segment_ids']).unsqueeze(0)
sequence_output, pooled_output = self(input_ids, segment_ids)
sequence_output = sequence_output.squeeze(0)
pooled_output = pooled_output.squeeze(0)
results.append((sequence_output.numpy().tolist(), pooled_output.numpy().tolist()))
return results
| 43.513514
| 127
| 0.636232
|
1e747e7073596f5734524cf96a58ab1cec0aa7e5
| 3,420
|
py
|
Python
|
tensorforce/core/baselines/network_baseline.py
|
bettermanlu/tensorforce
|
b045da9f048897842bcecebfa30bcb94703b07fd
|
[
"Apache-2.0"
] | 1
|
2019-12-21T03:31:33.000Z
|
2019-12-21T03:31:33.000Z
|
tensorforce/core/baselines/network_baseline.py
|
jesuscast/tensorforce-clone
|
524976f9cdbeebb01eb88c77ae842dbe4c4a1f36
|
[
"Apache-2.0"
] | null | null | null |
tensorforce/core/baselines/network_baseline.py
|
jesuscast/tensorforce-clone
|
524976f9cdbeebb01eb88c77ae842dbe4c4a1f36
|
[
"Apache-2.0"
] | 1
|
2019-12-21T03:31:39.000Z
|
2019-12-21T03:31:39.000Z
|
# Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
from tensorforce.core.networks import Linear, Network
from tensorforce.core.baselines import Baseline
class NetworkBaseline(Baseline):
"""
Baseline based on a TensorForce network, used when parameters are shared between
the value function and the baseline.
"""
def __init__(self, network_spec, scope='network-baseline', summary_labels=()):
"""
Network baseline.
Args:
network_spec: Network specification dict
"""
self.network = Network.from_spec(
spec=network_spec,
kwargs=dict(summary_labels=summary_labels)
)
assert len(self.network.internal_inputs()) == 0
self.linear = Linear(size=1, bias=0.0, scope='prediction')
super(NetworkBaseline, self).__init__(scope, summary_labels)
def tf_predict(self, states, update):
embedding = self.network.apply(x=states, internals=(), update=update)
prediction = self.linear.apply(x=embedding)
return tf.squeeze(input=prediction, axis=1)
def tf_regularization_loss(self):
"""
Creates the TensorFlow operations for the baseline regularization loss.
Returns:
Regularization loss tensor
"""
regularization_loss = super(NetworkBaseline, self).tf_regularization_loss()
if regularization_loss is None:
losses = list()
else:
losses = [regularization_loss]
regularization_loss = self.network.regularization_loss()
if regularization_loss is not None:
losses.append(regularization_loss)
regularization_loss = self.linear.regularization_loss()
if regularization_loss is not None:
losses.append(regularization_loss)
if len(losses) > 0:
return tf.add_n(inputs=losses)
else:
return None
def get_variables(self, include_non_trainable=False):
baseline_variables = super(NetworkBaseline, self).get_variables(include_non_trainable=include_non_trainable)
network_variables = self.network.get_variables(include_non_trainable=include_non_trainable)
layer_variables = self.linear.get_variables(include_non_trainable=include_non_trainable)
return baseline_variables + network_variables + layer_variables
def get_summaries(self):
baseline_summaries = super(NetworkBaseline, self).get_summaries()
network_summaries = self.network.get_summaries()
layer_summaries = self.linear.get_summaries()
return baseline_summaries + network_summaries + layer_summaries
| 36.382979
| 116
| 0.690351
|
1e9bc646a2ddcdc4d8ab4c0887b52dc9337b83e2
| 4,121
|
py
|
Python
|
old/visualizer/mzn_visualize.py
|
SirChri/square-grid-tiling-shapes
|
a8c3865b4c321d0ebcbfedbe03743a2162e4dca6
|
[
"Apache-2.0"
] | null | null | null |
old/visualizer/mzn_visualize.py
|
SirChri/square-grid-tiling-shapes
|
a8c3865b4c321d0ebcbfedbe03743a2162e4dca6
|
[
"Apache-2.0"
] | null | null | null |
old/visualizer/mzn_visualize.py
|
SirChri/square-grid-tiling-shapes
|
a8c3865b4c321d0ebcbfedbe03743a2162e4dca6
|
[
"Apache-2.0"
] | null | null | null |
from cgitb import text
import sys, json, re, subprocess, os, shutil
solver = sys.argv[1]
mainfile = sys.argv[2]
inputfile = sys.argv[3]
out_filepath = sys.argv[4] if len(sys.argv) > 4 else None
file_path = os.path.abspath(os.path.dirname(__file__))
if not out_filepath:
out_filepath = os.path.join(file_path, 'output.html')
basedir = os.path.dirname(os.path.abspath(out_filepath))
os.makedirs(basedir, exist_ok=True)
shutil.copyfile(os.path.join(file_path, 'static.css'), basedir+"/static.css")
bashCommand = 'minizinc "{}" "{}" -O2 --solver {} --time-limit 300000 -p12 -f --output-mode json -s --soln-separator "" --search-complete-msg "OPTIMUM"'.format(mainfile, inputfile, solver)
process = subprocess.Popen(bashCommand, shell=True, stdout=subprocess.PIPE)
output, error = process.communicate()
output = output.decode('utf-8')
jsondata = re.sub(r'^\%.*\n?|^\"\"|^OPTIMUM', '', output, flags=re.MULTILINE).strip()
statsdata = {}
if error:
print("an error has eccurred: {}".format(error))
exit(1)
optimum_found = False
for line in output.splitlines():
if(line.strip() == 'OPTIMUM'):
optimum_found = True
if(line.startswith("%%%mzn-stat: ")):
line = line.replace("%%%mzn-stat: ", "")
key = line.split("=")[0]
val = line.split("=")[1]
statsdata[key] = val
#output
vis_output = {}
#put some data into the output
vis_output["opt"] = optimum_found
## Input file read
textarea_input = ""
with open(inputfile, "r") as text_file:
textarea_input += '<textarea disabled="disabled" class="input-textarea">'
for x in text_file:
if x.strip().replace(" ", "").startswith("n="):
vis_output["n"] = re.findall('\d+', x)[0]
elif x.strip().replace(" ", "").startswith("l="):
vis_output["l"] = re.findall('\d+', x)[0]
elif x.strip().replace(" ", "").startswith("r="):
vis_output["r"] = re.findall('\d+', x)[0]
elif x.strip().replace(" ", "").startswith("s="):
vis_output["s"] = re.findall('\d+', x)[0]
elif x.strip().replace(" ", "").startswith("f="):
vis_output["f"] = re.findall('\d+', x)[0]
textarea_input += x
textarea_input += '</textarea>'
## Data handling
n = 0
div_items = ""
if jsondata != "=====UNKNOWN=====" and jsondata != "" and jsondata != "=====ERROR=====":
data = json.loads(jsondata)
vis_output["cost"] = data["obj"]
else:
print(vis_output)
exit(1)
## Time
vis_output["time"] = 0
stats = "<ul>"
for key in statsdata:
if key == "flatTime" or key == "solveTime":
vis_output["time"] += float(statsdata[key])
value = statsdata[key]
stats += "<li><b>"+key+"</b>: "+str(value)+"</li>"
stats += "<li>Optimum found: "+str(optimum_found)+"</li>"
stats += "</ul>"
for arr in data["Board"]:
n = len(arr)
for j in arr:
val = j["e"]
general_cls = val.lower()[0]
div_items += '<div class="'+general_cls+' '+val.lower()+'"></div>'
out_content = """
<html>
<head>
<title>Output</title>
<link rel="stylesheet" href="static.css">
<style>
.grid {
display: grid;
grid-template-rows: repeat("""+str(n)+""", 1fr);
grid-template-columns: repeat("""+str(n)+""", 1fr);
gap: 0px;
/*margin-top: 60px;*/
height: 95%;
}
</style>
</head>
<body>
<div class="body">
<div class="split left">
<div class="program-instance">
<h2>Program instance</h2>
"""+textarea_input+"""
</div>
<div class="sol-statistics">
<h2>Solution statistics</h2>
"""+stats+"""
</div>
</div>
<div class="split right">
<div class="grid">
"""+div_items+"""
</div>
</div>
</div>
</body>
</html>
"""
with open(out_filepath, "w") as text_file:
text_file.write(out_content)
print(vis_output)
| 29.862319
| 188
| 0.542101
|
75c2ce18b0c40005f4b2277b2dcbfaed18a55175
| 4,596
|
py
|
Python
|
kubernetes_asyncio/client/models/v1_non_resource_rule.py
|
olitheolix/kubernetes_asyncio
|
344426793e4e4b653bcd8e4a29c6fa4766e1fff7
|
[
"Apache-2.0"
] | 1
|
2020-03-25T01:24:27.000Z
|
2020-03-25T01:24:27.000Z
|
kubernetes_asyncio/client/models/v1_non_resource_rule.py
|
olitheolix/kubernetes_asyncio
|
344426793e4e4b653bcd8e4a29c6fa4766e1fff7
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/v1_non_resource_rule.py
|
olitheolix/kubernetes_asyncio
|
344426793e4e4b653bcd8e4a29c6fa4766e1fff7
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.10.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class V1NonResourceRule(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'non_resource_ur_ls': 'list[str]',
'verbs': 'list[str]'
}
attribute_map = {
'non_resource_ur_ls': 'nonResourceURLs',
'verbs': 'verbs'
}
def __init__(self, non_resource_ur_ls=None, verbs=None): # noqa: E501
"""V1NonResourceRule - a model defined in Swagger""" # noqa: E501
self._non_resource_ur_ls = None
self._verbs = None
self.discriminator = None
if non_resource_ur_ls is not None:
self.non_resource_ur_ls = non_resource_ur_ls
self.verbs = verbs
@property
def non_resource_ur_ls(self):
"""Gets the non_resource_ur_ls of this V1NonResourceRule. # noqa: E501
NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path. \"*\" means all. # noqa: E501
:return: The non_resource_ur_ls of this V1NonResourceRule. # noqa: E501
:rtype: list[str]
"""
return self._non_resource_ur_ls
@non_resource_ur_ls.setter
def non_resource_ur_ls(self, non_resource_ur_ls):
"""Sets the non_resource_ur_ls of this V1NonResourceRule.
NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path. \"*\" means all. # noqa: E501
:param non_resource_ur_ls: The non_resource_ur_ls of this V1NonResourceRule. # noqa: E501
:type: list[str]
"""
self._non_resource_ur_ls = non_resource_ur_ls
@property
def verbs(self):
"""Gets the verbs of this V1NonResourceRule. # noqa: E501
Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. \"*\" means all. # noqa: E501
:return: The verbs of this V1NonResourceRule. # noqa: E501
:rtype: list[str]
"""
return self._verbs
@verbs.setter
def verbs(self, verbs):
"""Sets the verbs of this V1NonResourceRule.
Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. \"*\" means all. # noqa: E501
:param verbs: The verbs of this V1NonResourceRule. # noqa: E501
:type: list[str]
"""
if verbs is None:
raise ValueError("Invalid value for `verbs`, must not be `None`") # noqa: E501
self._verbs = verbs
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1NonResourceRule):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.916667
| 178
| 0.596606
|
d8fee0180da20afe120b4fe30d12d9d3122cfcd8
| 982
|
py
|
Python
|
pylava/lint/pylava_radon.py
|
exKAZUu/pylava
|
1a42d0522fc2bb04f2849ac5544e9b1da8f7e298
|
[
"MIT"
] | 76
|
2018-08-17T11:08:09.000Z
|
2018-10-01T16:36:50.000Z
|
pylava/lint/pylava_radon.py
|
exKAZUu/pylava
|
1a42d0522fc2bb04f2849ac5544e9b1da8f7e298
|
[
"MIT"
] | 5
|
2018-12-05T09:36:40.000Z
|
2020-10-01T19:43:26.000Z
|
pylava/lint/pylava_radon.py
|
exKAZUu/pylava
|
1a42d0522fc2bb04f2849ac5544e9b1da8f7e298
|
[
"MIT"
] | 7
|
2018-12-05T05:17:53.000Z
|
2021-09-27T09:05:06.000Z
|
from radon.visitors import ComplexityVisitor
from radon.complexity import add_inner_blocks
from pylava.lint import Linter as Abstract
class Linter(Abstract):
"""Radon runner."""
@staticmethod
def run(path, code=None, params=None, ignore=None, select=None, **meta):
"""Check code with Radon.
:return list: List of errors.
"""
complexity = params.get('complexity', 10)
no_assert = params.get('no_assert', False)
show_closures = params.get('show_closures', False)
visitor = ComplexityVisitor.from_code(code, no_assert=no_assert)
blocks = visitor.blocks
if show_closures:
blocks = add_inner_blocks(blocks)
return [
{'lnum': block.lineno, 'col': block.col_offset, 'type': 'R', 'number': 'R709',
'text': 'R701: %s is too complex %d' % (block.name, block.complexity)}
for block in visitor.blocks if block.complexity > complexity
]
| 30.6875
| 90
| 0.632383
|
ef80afb27f29b9a8709627f30bbeef41d653912c
| 15,785
|
py
|
Python
|
pytorch_lightning/utilities/model_summary.py
|
AI-App/PyTorch-Lightning
|
ca9b25db80f08a3b9a3c448048949ec1adb845ba
|
[
"Apache-2.0"
] | 15,666
|
2020-01-14T07:16:15.000Z
|
2022-03-31T23:22:26.000Z
|
pytorch_lightning/utilities/model_summary.py
|
AI-App/PyTorch-Lightning
|
ca9b25db80f08a3b9a3c448048949ec1adb845ba
|
[
"Apache-2.0"
] | 9,140
|
2020-01-14T03:10:42.000Z
|
2022-03-31T19:57:09.000Z
|
pytorch_lightning/utilities/model_summary.py
|
AI-App/PyTorch-Lightning
|
ca9b25db80f08a3b9a3c448048949ec1adb845ba
|
[
"Apache-2.0"
] | 2,340
|
2020-01-14T06:45:32.000Z
|
2022-03-31T22:57:07.000Z
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import logging
from collections import OrderedDict
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn as nn
from torch import Tensor
from torch.utils.hooks import RemovableHandle
import pytorch_lightning as pl
from pytorch_lightning.utilities.imports import _TORCH_GREATER_EQUAL_1_8
from pytorch_lightning.utilities.warnings import WarningCache
log = logging.getLogger(__name__)
warning_cache = WarningCache()
PARAMETER_NUM_UNITS = [" ", "K", "M", "B", "T"]
UNKNOWN_SIZE = "?"
class LayerSummary:
"""Summary class for a single layer in a :class:`~pytorch_lightning.core.lightning.LightningModule`. It
collects the following information:
- Type of the layer (e.g. Linear, BatchNorm1d, ...)
- Input shape
- Output shape
- Number of parameters
The input and output shapes are only known after the example input array was
passed through the model.
Example::
>>> model = torch.nn.Conv2d(3, 8, 3)
>>> summary = LayerSummary(model)
>>> summary.num_parameters
224
>>> summary.layer_type
'Conv2d'
>>> output = model(torch.rand(1, 3, 5, 5))
>>> summary.in_size
[1, 3, 5, 5]
>>> summary.out_size
[1, 8, 3, 3]
Args:
module: A module to summarize
"""
def __init__(self, module: nn.Module) -> None:
super().__init__()
self._module = module
self._hook_handle = self._register_hook()
self._in_size: Optional[Union[str, List]] = None
self._out_size: Optional[Union[str, List]] = None
def __del__(self) -> None:
self.detach_hook()
def _register_hook(self) -> Optional[RemovableHandle]:
"""Registers a hook on the module that computes the input- and output size(s) on the first forward pass. If
the hook is called, it will remove itself from the from the module, meaning that recursive models will only
record their input- and output shapes once. Registering hooks on :class:`~torch.jit.ScriptModule` is not
supported.
Return:
A handle for the installed hook, or ``None`` if registering the hook is not possible.
"""
def hook(_: nn.Module, inp: Any, out: Any) -> None:
if len(inp) == 1:
inp = inp[0]
self._in_size = parse_batch_shape(inp)
self._out_size = parse_batch_shape(out)
assert self._hook_handle is not None
self._hook_handle.remove()
handle = None
if not isinstance(self._module, torch.jit.ScriptModule):
handle = self._module.register_forward_hook(hook)
return handle
def detach_hook(self) -> None:
"""Removes the forward hook if it was not already removed in the forward pass.
Will be called after the summary is created.
"""
if self._hook_handle is not None:
self._hook_handle.remove()
@property
def in_size(self) -> Union[str, List]:
return self._in_size or UNKNOWN_SIZE
@property
def out_size(self) -> Union[str, List]:
return self._out_size or UNKNOWN_SIZE
@property
def layer_type(self) -> str:
"""Returns the class name of the module."""
return str(self._module.__class__.__name__)
@property
def num_parameters(self) -> int:
"""Returns the number of parameters in this module."""
return sum(np.prod(p.shape) if not _is_lazy_weight_tensor(p) else 0 for p in self._module.parameters())
class ModelSummary:
"""Generates a summary of all layers in a :class:`~pytorch_lightning.core.lightning.LightningModule`.
Args:
model: The model to summarize (also referred to as the root module).
max_depth: Maximum depth of modules to show. Use -1 to show all modules or 0 to show no
summary. Defaults to 1.
The string representation of this summary prints a table with columns containing
the name, type and number of parameters for each layer.
The root module may also have an attribute ``example_input_array`` as shown in the example below.
If present, the root module will be called with it as input to determine the
intermediate input- and output shapes of all layers. Supported are tensors and
nested lists and tuples of tensors. All other types of inputs will be skipped and show as `?`
in the summary table. The summary will also display `?` for layers not used in the forward pass.
Example::
>>> import pytorch_lightning as pl
>>> class LitModel(pl.LightningModule):
...
... def __init__(self):
... super().__init__()
... self.net = nn.Sequential(nn.Linear(256, 512), nn.BatchNorm1d(512))
... self.example_input_array = torch.zeros(10, 256) # optional
...
... def forward(self, x):
... return self.net(x)
...
>>> model = LitModel()
>>> ModelSummary(model, max_depth=1) # doctest: +NORMALIZE_WHITESPACE
| Name | Type | Params | In sizes | Out sizes
------------------------------------------------------------
0 | net | Sequential | 132 K | [10, 256] | [10, 512]
------------------------------------------------------------
132 K Trainable params
0 Non-trainable params
132 K Total params
0.530 Total estimated model params size (MB)
>>> ModelSummary(model, max_depth=-1) # doctest: +NORMALIZE_WHITESPACE
| Name | Type | Params | In sizes | Out sizes
--------------------------------------------------------------
0 | net | Sequential | 132 K | [10, 256] | [10, 512]
1 | net.0 | Linear | 131 K | [10, 256] | [10, 512]
2 | net.1 | BatchNorm1d | 1.0 K | [10, 512] | [10, 512]
--------------------------------------------------------------
132 K Trainable params
0 Non-trainable params
132 K Total params
0.530 Total estimated model params size (MB)
"""
def __init__(self, model: "pl.LightningModule", max_depth: int = 1) -> None:
self._model = model
if not isinstance(max_depth, int) or max_depth < -1:
raise ValueError(f"`max_depth` can be -1, 0 or > 0, got {max_depth}.")
self._max_depth = max_depth
self._layer_summary = self.summarize()
# 1 byte -> 8 bits
# TODO: how do we compute precision_megabytes in case of mixed precision?
precision = self._model.precision if isinstance(self._model.precision, int) else 32
self._precision_megabytes = (precision / 8.0) * 1e-6
@property
def named_modules(self) -> List[Tuple[str, nn.Module]]:
mods: List[Tuple[str, nn.Module]]
if self._max_depth == 0:
mods = []
elif self._max_depth == 1:
# the children are the top-level modules
mods = list(self._model.named_children())
else:
mods = self._model.named_modules()
mods = list(mods)[1:] # do not include root module (LightningModule)
return mods
@property
def layer_names(self) -> List[str]:
return list(self._layer_summary.keys())
@property
def layer_types(self) -> List[str]:
return [layer.layer_type for layer in self._layer_summary.values()]
@property
def in_sizes(self) -> List:
return [layer.in_size for layer in self._layer_summary.values()]
@property
def out_sizes(self) -> List:
return [layer.out_size for layer in self._layer_summary.values()]
@property
def param_nums(self) -> List[int]:
return [layer.num_parameters for layer in self._layer_summary.values()]
@property
def total_parameters(self) -> int:
return sum(p.numel() if not _is_lazy_weight_tensor(p) else 0 for p in self._model.parameters())
@property
def trainable_parameters(self) -> int:
return sum(
p.numel() if not _is_lazy_weight_tensor(p) else 0 for p in self._model.parameters() if p.requires_grad
)
@property
def model_size(self) -> float:
# todo: seems it does not work with quantized models - it returns 0.0
return self.total_parameters * self._precision_megabytes
def summarize(self) -> Dict[str, LayerSummary]:
summary = OrderedDict((name, LayerSummary(module)) for name, module in self.named_modules)
if self._model.example_input_array is not None:
self._forward_example_input()
for layer in summary.values():
layer.detach_hook()
if self._max_depth >= 1:
# remove summary entries with depth > max_depth
for k in [k for k in summary if k.count(".") >= self._max_depth]:
del summary[k]
return summary
def _forward_example_input(self) -> None:
"""Run the example input through each layer to get input- and output sizes."""
model = self._model
trainer = self._model.trainer
input_ = model.example_input_array
input_ = model._apply_batch_transfer_handler(input_)
mode = model.training
model.eval()
if trainer is not None:
forward_context = trainer.precision_plugin.forward_context()
else:
forward_context = contextlib.nullcontext()
with torch.no_grad(), forward_context:
# let the model hooks collect the input- and output shapes
if isinstance(input_, (list, tuple)):
model(*input_)
elif isinstance(input_, dict):
model(**input_)
else:
model(input_)
model.train(mode) # restore mode of module
def _get_summary_data(self) -> List[Tuple[str, List[str]]]:
"""Makes a summary listing with:
Layer Name, Layer Type, Number of Parameters, Input Sizes, Output Sizes, Model Size
"""
arrays = [
(" ", list(map(str, range(len(self._layer_summary))))),
("Name", self.layer_names),
("Type", self.layer_types),
("Params", list(map(get_human_readable_count, self.param_nums))),
]
if self._model.example_input_array is not None:
arrays.append(("In sizes", [str(x) for x in self.in_sizes]))
arrays.append(("Out sizes", [str(x) for x in self.out_sizes]))
return arrays
def __str__(self) -> str:
arrays = self._get_summary_data()
total_parameters = self.total_parameters
trainable_parameters = self.trainable_parameters
model_size = self.model_size
return _format_summary_table(total_parameters, trainable_parameters, model_size, *arrays)
def __repr__(self) -> str:
return str(self)
def parse_batch_shape(batch: Any) -> Union[str, List]:
if hasattr(batch, "shape"):
return list(batch.shape)
if isinstance(batch, (list, tuple)):
shape = [parse_batch_shape(el) for el in batch]
return shape
return UNKNOWN_SIZE
def _format_summary_table(
total_parameters: int,
trainable_parameters: int,
model_size: float,
*cols: Tuple[str, List[str]],
) -> str:
"""Takes in a number of arrays, each specifying a column in the summary table, and combines them all into one
big string defining the summary table that are nicely formatted."""
n_rows = len(cols[0][1])
n_cols = 1 + len(cols)
# Get formatting width of each column
col_widths = []
for c in cols:
col_width = max(len(str(a)) for a in c[1]) if n_rows else 0
col_width = max(col_width, len(c[0])) # minimum length is header length
col_widths.append(col_width)
# Formatting
s = "{:<{}}"
total_width = sum(col_widths) + 3 * n_cols
header = [s.format(c[0], l) for c, l in zip(cols, col_widths)]
# Summary = header + divider + Rest of table
summary = " | ".join(header) + "\n" + "-" * total_width
for i in range(n_rows):
line = []
for c, l in zip(cols, col_widths):
line.append(s.format(str(c[1][i]), l))
summary += "\n" + " | ".join(line)
summary += "\n" + "-" * total_width
summary += "\n" + s.format(get_human_readable_count(trainable_parameters), 10)
summary += "Trainable params"
summary += "\n" + s.format(get_human_readable_count(total_parameters - trainable_parameters), 10)
summary += "Non-trainable params"
summary += "\n" + s.format(get_human_readable_count(total_parameters), 10)
summary += "Total params"
summary += "\n" + s.format(get_formatted_model_size(model_size), 10)
summary += "Total estimated model params size (MB)"
return summary
def get_formatted_model_size(total_model_size: float) -> str:
return f"{total_model_size:,.3f}"
def get_human_readable_count(number: int) -> str:
"""Abbreviates an integer number with K, M, B, T for thousands, millions, billions and trillions, respectively.
Examples:
>>> get_human_readable_count(123)
'123 '
>>> get_human_readable_count(1234) # (one thousand)
'1.2 K'
>>> get_human_readable_count(2e6) # (two million)
'2.0 M'
>>> get_human_readable_count(3e9) # (three billion)
'3.0 B'
>>> get_human_readable_count(4e14) # (four hundred trillion)
'400 T'
>>> get_human_readable_count(5e15) # (more than trillion)
'5,000 T'
Args:
number: a positive integer number
Return:
A string formatted according to the pattern described above.
"""
assert number >= 0
labels = PARAMETER_NUM_UNITS
num_digits = int(np.floor(np.log10(number)) + 1 if number > 0 else 1)
num_groups = int(np.ceil(num_digits / 3))
num_groups = min(num_groups, len(labels)) # don't abbreviate beyond trillions
shift = -3 * (num_groups - 1)
number = number * (10 ** shift)
index = num_groups - 1
if index < 1 or number >= 100:
return f"{int(number):,d} {labels[index]}"
return f"{number:,.1f} {labels[index]}"
def _is_lazy_weight_tensor(p: Tensor) -> bool:
if _TORCH_GREATER_EQUAL_1_8:
from torch.nn.parameter import UninitializedParameter
if isinstance(p, UninitializedParameter):
warning_cache.warn(
"A layer with UninitializedParameter was found. "
"Thus, the total number of parameters detected may be inaccurate."
)
return True
return False
def summarize(lightning_module: "pl.LightningModule", max_depth: int = 1) -> ModelSummary:
"""Summarize the LightningModule specified by `lightning_module`.
Args:
lightning_module: `LightningModule` to summarize.
max_depth: The maximum depth of layer nesting that the summary will include. A value of 0 turns the
layer summary off. Default: 1.
Return:
The model summary object
"""
return ModelSummary(lightning_module, max_depth=max_depth)
| 36.370968
| 115
| 0.621603
|
2f5df0c790ee2784450581ef84c4ca7332b2988a
| 1,204
|
py
|
Python
|
test/test_ezsignfolder_request.py
|
ezmaxinc/eZmax-SDK-python
|
6794b8001abfb7d9ae18a3b87aba164839b925a0
|
[
"MIT"
] | null | null | null |
test/test_ezsignfolder_request.py
|
ezmaxinc/eZmax-SDK-python
|
6794b8001abfb7d9ae18a3b87aba164839b925a0
|
[
"MIT"
] | null | null | null |
test/test_ezsignfolder_request.py
|
ezmaxinc/eZmax-SDK-python
|
6794b8001abfb7d9ae18a3b87aba164839b925a0
|
[
"MIT"
] | null | null | null |
"""
eZmax API Definition (Full)
This API expose all the functionnalities for the eZmax and eZsign applications. # noqa: E501
The version of the OpenAPI document: 1.1.7
Contact: support-api@ezmax.ca
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import eZmaxApi
from eZmaxApi.model.field_e_ezsignfolder_sendreminderfrequency import FieldEEzsignfolderSendreminderfrequency
from eZmaxApi.model.field_pki_ezsigntsarequirement_id import FieldPkiEzsigntsarequirementID
globals()['FieldEEzsignfolderSendreminderfrequency'] = FieldEEzsignfolderSendreminderfrequency
globals()['FieldPkiEzsigntsarequirementID'] = FieldPkiEzsigntsarequirementID
from eZmaxApi.model.ezsignfolder_request import EzsignfolderRequest
class TestEzsignfolderRequest(unittest.TestCase):
"""EzsignfolderRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testEzsignfolderRequest(self):
"""Test EzsignfolderRequest"""
# FIXME: construct object with mandatory attributes with example values
# model = EzsignfolderRequest() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 29.365854
| 109
| 0.76495
|
c896dace3efae624ad31366bf4a3bade77421bbf
| 121
|
py
|
Python
|
1007_Difference.py
|
DiegoC386/EJERCICIOS-URI
|
b2e12b6420ea16a9060726b988ea1b35cbf312c2
|
[
"MIT"
] | null | null | null |
1007_Difference.py
|
DiegoC386/EJERCICIOS-URI
|
b2e12b6420ea16a9060726b988ea1b35cbf312c2
|
[
"MIT"
] | null | null | null |
1007_Difference.py
|
DiegoC386/EJERCICIOS-URI
|
b2e12b6420ea16a9060726b988ea1b35cbf312c2
|
[
"MIT"
] | null | null | null |
A=int(input())
B=int(input())
C=int(input())
D=int(input())
DIFERENCA=((A*B)-(C*D))
print("DIFERENCA = " +str(DIFERENCA))
| 20.166667
| 37
| 0.619835
|
26e113a5c2af03b8922fc30fe3c6f745cff39476
| 3,376
|
py
|
Python
|
lib/googlecloudsdk/command_lib/compute/reservations/util.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/command_lib/compute/reservations/util.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | 11
|
2020-02-29T02:51:12.000Z
|
2022-03-30T23:20:08.000Z
|
lib/googlecloudsdk/command_lib/compute/reservations/util.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | 1
|
2020-07-25T18:17:57.000Z
|
2020-07-25T18:17:57.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions to consturct compute reservations message."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
def MakeReservationMessageFromArgs(messages, args, allocation_ref):
accelerators = MakeGuestAccelerators(messages,
getattr(args, 'accelerator', None))
local_ssds = MakeLocalSsds(messages, getattr(args, 'local_ssd', None))
specific_reservation = MakeSpecificSKUReservationMessage(
messages, args.vm_count, accelerators, local_ssds, args.machine_type,
args.min_cpu_platform)
return MakeReservationMessage(
messages, allocation_ref.Name(), specific_reservation,
args.require_specific_reservation, allocation_ref.zone)
def MakeGuestAccelerators(messages, accelerator_configs):
"""Constructs the repeated accelerator message objects."""
if accelerator_configs is None:
return []
accelerators = []
for a in accelerator_configs:
m = messages.AcceleratorConfig(
acceleratorCount=a['count'], acceleratorType=a['type'])
accelerators.append(m)
return accelerators
def MakeLocalSsds(messages, ssd_configs):
"""Constructs the repeated local_ssd message objects."""
if ssd_configs is None:
return []
local_ssds = []
disk_msg = (
messages
.AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk)
interface_msg = disk_msg.InterfaceValueValuesEnum
for s in ssd_configs:
if s['interface'].upper() == 'NVME':
interface = interface_msg.NVME
else:
interface = interface_msg.SCSI
m = disk_msg(diskSizeGb=s['size'], interface=interface)
local_ssds.append(m)
return local_ssds
def MakeSpecificSKUReservationMessage(messages, vm_count, accelerators,
local_ssds, machine_type,
min_cpu_platform):
"""Constructs a single specific sku reservation message object."""
prop_msgs = (
messages.AllocationSpecificSKUAllocationReservedInstanceProperties)
return messages.AllocationSpecificSKUReservation(
count=vm_count,
instanceProperties=prop_msgs(
guestAccelerators=accelerators,
localSsds=local_ssds,
machineType=machine_type,
minCpuPlatform=min_cpu_platform))
def MakeReservationMessage(messages, reservation_name, specific_reservation,
require_specific_reservation, reservation_zone):
"""Constructs a single allocation message object."""
return messages.Reservation(
name=reservation_name,
specificReservation=specific_reservation,
specificReservationRequired=require_specific_reservation,
zone=reservation_zone)
| 35.914894
| 78
| 0.732227
|
de2bc6cc6daf7c9ce1854dd74615ad760ad6ab3e
| 17,765
|
py
|
Python
|
lib/cogs/mapdatabase.py
|
JLpython-py/Among-Us-Bot
|
edaedd5e820ed23b6d300070086084890ecf1ed5
|
[
"MIT"
] | 4
|
2021-02-07T05:02:02.000Z
|
2021-06-19T04:36:59.000Z
|
lib/cogs/mapdatabase.py
|
JLpython-py/AmongUs-MapBot
|
edaedd5e820ed23b6d300070086084890ecf1ed5
|
[
"MIT"
] | 15
|
2020-12-13T00:59:08.000Z
|
2021-01-24T20:01:25.000Z
|
lib/cogs/mapdatabase.py
|
JLpython-py/Among-Us-Bot
|
edaedd5e820ed23b6d300070086084890ecf1ed5
|
[
"MIT"
] | 1
|
2020-12-12T14:50:20.000Z
|
2020-12-12T14:50:20.000Z
|
#! python3
# mapdatabase.py
"""
==============================================================================
MIT License
Copyright (c) 2020 Jacob Lee
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================
"""
import asyncio
import os
import re
import sqlite3
import discord
from discord.ext import commands
class MapDatabase(commands.Cog):
""" Allows member to explore available information in Among Us
"""
def __init__(self, bot):
self.bot = bot
self.arship_parser = DatabaseParser(self.bot.airship, self.bot)
self.mira_hq_parser = DatabaseParser(self.bot.mirahq, self.bot)
self.polus_parser = DatabaseParser(self.bot.polus, self.bot)
self.the_skeld_parser = DatabaseParser(self.bot.theskeld, self.bot)
def commands_locked(self, ctx):
""" Checks if commands are locked for member
"""
voicechannelcontrol = self.bot.get_cog("VoiceChannelControl")
return voicechannelcontrol.check_commands(ctx)
@commands.group(
name="Airship", case_insensitive=True, pass_context=True,
aliases=["A"]
)
async def airship(self, ctx):
""" Command group to parse Airship database
"""
if self.commands_locked(ctx):
raise commands.CheckFailure
if ctx.invoked_subcommand is None:
await ctx.send("Invalid Airship command passed")
@airship.group(
name="retrieve", case_insensitive=True, pass_context=True,
aliases=["r"]
)
async def airship_retrieve(self, ctx, category, option):
""" Retrieves option for category in Airship database
"""
await self.arship_parser.retrieve(ctx, category, option)
@airship.group(
name="browse", case_insensitive=True, pass_context=True,
aliases=["search", "b", "s"]
)
async def airship_browse(self, ctx, category):
""" Allows member to browse data stored in Airship database
"""
await self.arship_parser.browse(ctx, category)
@airship.group(
name="listopts", case_insensitive=True, pass_context=True,
aliases=["ls"]
)
async def airship_listopts(self, ctx, category):
""" Returns a list of options in Airship database for category
"""
await self.arship_parser.listopts(ctx, category)
@commands.group(
name="MIRAHQ", case_insensitive=True, pass_context=True,
aliases=["MIRA", "MH"]
)
async def mira_hq(self, ctx):
""" Command group to parse MIRA HQ database
"""
if self.commands_locked(ctx):
raise commands.CheckFailure
if ctx.invoked_subcommand is None:
await ctx.send("Invalid MIRA HQ command passed")
@mira_hq.group(
name="retrieve", case_insensitive=True, pass_context=True,
aliases=["r"]
)
async def mirahq_retrieve(self, ctx, category, option):
""" Retrieves option for category in MIRA HQ database
"""
await self.mira_hq_parser.retrieve(ctx, category, option)
@mira_hq.group(
name="browse", case_insensitive=True, pass_context=True,
aliases=["search", "b", "s"]
)
async def mirahq_browse(self, ctx, category):
""" Allows member to browse data stored in Airship database
"""
await self.mira_hq_parser.browse(ctx, category)
@mira_hq.group(
name="listopts", case_insensitive=True, pass_context=True,
aliases=["ls"]
)
async def mirahq_listopts(self, ctx, category):
""" Returns a list of options for category in MIRA HQ database
"""
await self.mira_hq_parser.listopts(ctx, category)
@commands.group(
name="Polus", case_insensitive=True, pass_context=True,
aliases=["P"]
)
async def polus(self, ctx):
""" Command group to parse Polus database
"""
if self.commands_locked(ctx):
raise commands.CheckFailure
if ctx.invoked_subcommand is None:
await ctx.send("Invalid Polus command passed")
@polus.group(
name="retrieve", case_insensitive=True, pass_context=True,
aliases=["r"]
)
async def polus_retrieve(self, ctx, category, option):
""" Retrieves option for category in Polus database
"""
await self.polus_parser.retrieve(ctx, category, option)
@polus.group(
name="browse", case_insensitive=True, pass_context=True,
aliases=["search", "b", "s"]
)
async def polus_browse(self, ctx, category):
""" Allows member to browse data stored in Polus database
"""
await self.polus_parser.browse(ctx, category)
@polus.group(
name="listopts", case_insensitive=True, pass_context=True,
aliases=["ls"]
)
async def polus_listopts(self, ctx, category):
""" Returns a list of options for category in Polus database
"""
await self.polus_parser.listopts(ctx, category)
@commands.group(
name="TheSkeld", case_insensitive=True, pass_context=True,
aliases=["Skeld", "TS"]
)
async def the_skeld(self, ctx):
""" Command group to parse The Skeld database
"""
if self.commands_locked(ctx):
raise commands.CheckFailure
if ctx.invoked_subcommand is None:
await ctx.send("Invalid The Skeld command passed")
@the_skeld.group(
name="retrieve", case_insensitive=True, pass_context=True,
aliases=["r"]
)
async def theskeld_retrieve(self, ctx, category, option):
""" Retrieves option for category in The Skeld database
"""
await self.the_skeld_parser.retrieve(ctx, category, option)
@the_skeld.group(
name="browse", case_insensitive=True, pass_context=True,
aliases=["search", "b", "s"]
)
async def theskeld_browse(self, ctx, category):
""" Allows member to browse data stored in The Skeld database
"""
await self.the_skeld_parser.browse(ctx, category)
@the_skeld.group(
name="listopts", case_insensitive=True, pass_context=True,
aliases=["ls"]
)
async def theskeld_listopts(self, ctx, category):
""" Returns a list of options for category in The Skeld database
"""
await self.the_skeld_parser.listopts(ctx, category)
@airship.error
@mira_hq.error
@polus.error
@the_skeld.error
async def locked_commands_error(self, ctx, error):
""" Handles failed check in command group
Occurrence:
- Member is in a voice channel which has been claimed
- Member in control of voice channel locks MapDatabase commands
- Member attempted to invoke a MapDatabase command
"""
if isinstance(error, commands.CheckFailure):
message = await ctx.channel.send(
"`MapDatabase` commands are locked."
)
await ctx.message.delete()
await asyncio.sleep(10)
await message.delete()
class DatabaseParser:
""" Parses SQLite databases in data/db
"""
def __init__(self, database, bot):
self.database = database
self.bot = bot
async def retrieve(self, ctx, category, option):
""" Retrieve a specific option from a category of options
"""
# Create query to retrieve data from database
category, option = category.lower(), option.title()
query = f"""
SELECT *
FROM {category}
WHERE name=?
"""
# Try to retrieve data, columns from database
try:
content = self.database.read_query(query, option)
columns = self.database.read_columns()
except sqlite3.OperationalError:
await ctx.channel.send(f"`{category}` is not valid.")
return
# Zip columns and content into a dictionary
data = dict(zip(columns, content[0]))
if not data:
await ctx.channel.send("No results found.")
# Construct embed to send data
embed = discord.Embed(
title=f"{category.title()}: {option}", color=0xff0000
)
# Add each column of data as embed field
for item in data:
embed.add_field(name=item.title(), value=data[item])
# Set footer to command group name
embed.set_footer(text=ctx.command.full_parent_name)
# Set embed image corresponding image in data/
image_name = f"{data['name']}.png"
image_path = os.path.join(
"data", ctx.command.full_parent_name.lower(), category, image_name
)
image = discord.File(image_path, image_name)
embed.set_image(url=f"attachment://{image_name}")
# Send constructed embed
await ctx.channel.send(file=image, embed=embed)
async def browse(self, ctx, category):
""" Allow member to scroll through options for category of map
"""
# Create query to retrieve data from database
category = category.lower()
query = f"""
SELECT *
FROM {category}
"""
# Try to retrieve data, columns from database
try:
content = self.database.read_query(query)
columns = self.database.read_columns()
except sqlite3.OperationalError:
await ctx.channel.send(f"`{category}` is not valid.")
return
# Create nested dictionary of columns zipped with rows
data = {
d["name"]: d for d in [dict(zip(columns, c)) for c in content]
}
data = dict(sorted(data.items()))
# Generate a specified embed for member to scroll data with
embed, image = scrolling_embed(
ctx.command.full_parent_name, category, data
)
# Send generated embed with appropriate reactions
message = await send_with_reactions(ctx, embed, image)
# Continuously check for and handle member input
while True:
# Handle member reactions
try:
payload = await self.bot.wait_for(
"raw_reaction_add", timeout=30.0,
check=lambda p: p.member.id == ctx.author.id
)
if payload.emoji.name in [
u'\u23ee', u'\u23ea', u'\u25c0', u'\u25b6', u'\u23e9',
u'\u23ed'
]:
await self.scroll(payload, data)
elif payload.emoji.name == u'\u2714':
await self.retrieve_from_search(payload, data)
elif payload.emoji.name == u'\u274c':
await self.delete_search(payload)
# Break loop when member times out
except asyncio.TimeoutError:
break
# Clear all reactions from and delete message
await message.clear_reactions()
await message.delete()
async def listopts(self, ctx, category):
""" List all options for a category of map
"""
# Create query to retrieve data from database
category = category.lower()
query = f"""
SELECT *
FROM {category}
"""
# Try to retrieve data from database
try:
content = self.database.read_query(query)
except sqlite3.OperationalError:
await ctx.channel.send(f"`{category}` is not valid.")
return
# Construct embed to send data
embed = discord.Embed(
title=category.title(), color=0xff0000,
description='\n'.join(sorted([f"-{r[0]}" for r in content]))
)
# Set embed footer to command group name
embed.set_footer(text=ctx.command.full_parent_name)
# Send constructed embed
await ctx.channel.send(embed=embed)
async def retrieve_from_search(self, payload, data):
""" Retrieve data for current option of embed
"""
# Process information in payload
channel = self.bot.get_channel(payload.channel_id)
message = await channel.fetch_message(payload.message_id)
embed = message.embeds[0]
# Create regular expressions to parse embed
footer_regex = re.compile(
r"(Airship|MIRAHQ|Polus|TheSkeld)",
re.IGNORECASE
)
title_regex = re.compile(r"^(.*): (.*)")
# Get name of map from embed footer
mapname = footer_regex.search(embed.footer.text).group(1)
# Get category and option from embed title
category = title_regex.search(embed.title).group(1)
option = title_regex.search(embed.title).group(2)
# Construct embed to send data
embed = discord.Embed(
title=f"{category}: {option}", color=0xff0000
)
# Add each column of data as embed field
for item in data[option]:
embed.add_field(
name=item.title(), value=data[option][item]
)
# Set footer to name of map (command group name)
embed.set_footer(text=mapname)
# Set image to the corresponding image in data/
image_name = f"{option}.png"
image_path = os.path.join(
'data', mapname.lower(), category.lower(), image_name
)
image = discord.File(image_path, image_name)
embed.set_image(url=f"attachment://{image_name}")
# Send constructed embed
await channel.send(file=image, embed=embed)
# Delete original message
await message.delete()
async def scroll(self, payload, data):
""" Scroll embed from search command based on the emoji used
"""
# Process information in payload
channel = self.bot.get_channel(payload.channel_id)
message = await channel.fetch_message(payload.message_id)
embed = message.embeds[0]
# Create regular expressions to parse embed
footer_regex = re.compile(
r"(Airship|MIRAHQ|Polus|TheSkeld)",
re.IGNORECASE
)
title_regex = re.compile(
r"^(.*): (.*)"
)
# Get name of map from embed footer
mapname = footer_regex.search(embed.footer.text).group(1)
# Get category and option from title
category = title_regex.search(embed.title).group(1)
option = title_regex.search(embed.title).group(2)
# Get current index
index = list(data).index(option)
# Get new index based on emoji used
scroll = {
u'\u23ee': 0, u'\u23ea': index - 5, u'\u25c0': index - 1,
u'\u25b6': index + 1, u'\u23e9': index + 5, u'\u23ed': -1}
index = scroll.get(payload.emoji.name) % len(data)
# Re-generate embed with new data
embed = scrolling_embed(
mapname, category, data, index=index
)[0]
# Edit original message with new embed
await message.edit(embed=embed)
await message.remove_reaction(payload.emoji, payload.member)
async def delete_search(self, payload):
""" Delete embed from search command
"""
# Process information in payload
channel = self.bot.get_channel(payload.channel_id)
message = await channel.fetch_message(payload.message_id)
# Delete message
await message.delete()
def scrolling_embed(mapname, category, data, *, index=0):
""" Creates an embed to allow member to browser data with
"""
# Get current option data
option_data = list(data.values())[index]
# Construct embed to send data
embed = discord.Embed(
title=f"{category.title()}: {option_data['name']}",
color=0xff0000
)
# Set embed footer to name of map and current page
embed.set_footer(
text=f"{mapname}: Page {index+1}/{len(data)}"
)
# Add each column of data as embed field
for item in option_data:
embed.add_field(
name=item.title(),
value=option_data[item]
)
# Set embed image to map logo
image_name = f"{mapname.lower()}.png"
image_path = os.path.join('data', image_name)
image = discord.File(image_path, image_name)
embed.set_image(url=f"attachment://{image_name}")
# Return constructed embed and image
return embed, image
async def send_with_reactions(ctx, embed, image):
""" Sends message with reactions for member to browse data with
"""
# Send embed and image
message = await ctx.channel.send(
embed=embed, file=image
)
# Add scroll control reactions
reactions = [
u'\u23ee', u'\u23ea', u'\u25c0', u'\u25b6', u'\u23e9', u'\u23ed',
u'\u2714', u'\u274c'
]
for rxn in reactions:
await message.add_reaction(rxn)
# Return message for future reference
return message
def setup(bot):
""" Adds MapDatabase cog
"""
bot.add_cog(MapDatabase(bot))
| 36.107724
| 78
| 0.62139
|
4efb5f6d769af4effcdbd8bb3d963ec85f195258
| 2,018
|
py
|
Python
|
lib/ExtraFM.py
|
KanHatakeyama/anneal_project2
|
e9b5e776f4ac1d202b952c9b6fb4ffaee833441d
|
[
"MIT"
] | null | null | null |
lib/ExtraFM.py
|
KanHatakeyama/anneal_project2
|
e9b5e776f4ac1d202b952c9b6fb4ffaee833441d
|
[
"MIT"
] | null | null | null |
lib/ExtraFM.py
|
KanHatakeyama/anneal_project2
|
e9b5e776f4ac1d202b952c9b6fb4ffaee833441d
|
[
"MIT"
] | null | null | null |
from scipy.sparse import csr_matrix
from pyfm import pylibfm
import numpy as np
from numba import jit
from ScaleRegressor import ScaleRegressor
default_model = pylibfm.FM(task="regression", num_iter=30, initial_learning_rate=10**-3,
num_factors=10,
verbose=False
)
class ExtraFM:
def __init__(self, model=None):
if model is None:
model = default_model
self.model = model
def fit(self, X, y):
sparse_X = csr_matrix(X.astype("double"))
self.model.fit(sparse_X, y)
# self.qubo=calc_qubo(self.model.v,self.model.v[0].shape[0],self.model.v.shape[0])+np.diag(self.model.w)
# calc offset
self.b = self.model.predict(csr_matrix(
np.zeros(X[0].shape[0]).astype("double")))
# self.y_max=max(y)
self.y_max = 0
# self.y_max=max(y)
self.y_min = 0
def predict(self, X):
y = self.original_predict(X, reg_mode=False)
# print(X.shape,y.shape)
y = -np.log((1-y)/y)
# fill nan
nan_ids = np.where(y == np.inf)
y[nan_ids] = self.y_max
nan_ids = np.where(y == -np.inf)
y[nan_ids] = self.y_min
return y
def original_predict(self, X, reg_mode=True):
if reg_mode:
self.model.fm_fast.task = 0
else:
self.model.fm_fast.task = 1
sparse_X = csr_matrix(X.astype("double"))
return self.model.predict(sparse_X)
@jit
def calc_qubo(v, dim1, dim2):
qubo = np.zeros((dim1, dim1))
for k in range(dim2):
for i in range(dim1):
for j in range(i):
val = v[k][j]*v[k][i]
qubo[j, i] += val
return qubo
class FMRegressor(ScaleRegressor):
def fit(self, X, y):
X = csr_matrix(X).astype(np.double)
return super().fit(X, y)
def predict(self, X):
X = csr_matrix(X).astype(np.double)
return super().predict(X)
| 26.552632
| 112
| 0.558474
|
9fa58e88f8a32d124b5ccb2ed5b367496fa2361c
| 1,169
|
py
|
Python
|
py12/py12-packages/parts/servo.py
|
hiro345g/raspi_magazine_201612_toku1
|
5947a1c864f9844bb8da955e1b8331553546bba6
|
[
"MIT"
] | null | null | null |
py12/py12-packages/parts/servo.py
|
hiro345g/raspi_magazine_201612_toku1
|
5947a1c864f9844bb8da955e1b8331553546bba6
|
[
"MIT"
] | null | null | null |
py12/py12-packages/parts/servo.py
|
hiro345g/raspi_magazine_201612_toku1
|
5947a1c864f9844bb8da955e1b8331553546bba6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import wiringpi
class Servo:
""" SG90サーボモーター用クラス """
PWM_CLOCK = 375
def __init__(self, pin):
""" 初期化メソッド """
self.pin = pin # GPIOピン
self._degree = 0 # 角度の初期値
# wiringpiの初期化
# wiringpi.wiringPiSetupGpio() は __init__.py で実行済み
wiringpi.pinMode(self.pin, wiringpi.PWM_OUTPUT)
wiringpi.pwmSetMode(0)
wiringpi.pwmSetRange(1024)
wiringpi.pwmSetClock(self.PWM_CLOCK)
def _set_position(self, degree):
""" 角度を度数で受け取って指定位置まで回転
0より小さい値は0、180より大きい値は180へ修正
"""
d = degree
if degree < 0:
d = 0
elif 180 < degree:
d = 180
value = int(26 + (48 * d) / 90)
wiringpi.pwmWrite(self.pin, value)
def min(self):
self.value = -1
def max(self):
self.value = 1
def mid(self):
self.value = 0
@property
def value(self):
return self._degree
@value.setter
def value(self, value):
self._degree = (value + 1) * 90
self._set_position(self._degree)
@value.deleter
def value(self):
del self._degree
| 22.056604
| 58
| 0.551754
|
a51ba3445e1519c5012f4e9d09682f4328a89043
| 81
|
py
|
Python
|
utils_bot/typing.py
|
cleoold/sendo-erika
|
61dcc6d7e01a59e3f454a90e3b3094eb571ff312
|
[
"MIT"
] | 14
|
2019-05-28T19:44:38.000Z
|
2022-01-07T19:59:18.000Z
|
utils_bot/typing.py
|
cleoold/sendo-erika
|
61dcc6d7e01a59e3f454a90e3b3094eb571ff312
|
[
"MIT"
] | 12
|
2019-06-29T17:11:05.000Z
|
2021-07-23T07:00:27.000Z
|
utils_bot/typing.py
|
cleoold/sendo-erika
|
61dcc6d7e01a59e3f454a90e3b3094eb571ff312
|
[
"MIT"
] | 5
|
2019-12-12T13:50:16.000Z
|
2021-01-22T04:10:51.000Z
|
from typing import *
T = TypeVar('T')
def depreciated(f: T) -> T:
return f
| 11.571429
| 27
| 0.604938
|
366d9f7725b435ea671eca1ce264918227124466
| 8,118
|
py
|
Python
|
pennylane/ops/qubit/observables.py
|
ral9000/pennylane
|
0afbd155d044730af546c6d90cef9d01f931632d
|
[
"Apache-2.0"
] | null | null | null |
pennylane/ops/qubit/observables.py
|
ral9000/pennylane
|
0afbd155d044730af546c6d90cef9d01f931632d
|
[
"Apache-2.0"
] | null | null | null |
pennylane/ops/qubit/observables.py
|
ral9000/pennylane
|
0afbd155d044730af546c6d90cef9d01f931632d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This submodule contains the discrete-variable quantum observables,
excepting the Pauli gates and Hadamard gate in ``non_parametric_ops.py``.
"""
from scipy.sparse import coo_matrix
import numpy as np
import pennylane as qml
from pennylane.operation import AllWires, AnyWires, Observable
from pennylane.wires import Wires
from .matrix_ops import QubitUnitary
class Hermitian(Observable):
r"""Hermitian(A, wires)
An arbitrary Hermitian observable.
For a Hermitian matrix :math:`A`, the expectation command returns the value
.. math::
\braket{A} = \braketT{\psi}{\cdots \otimes I\otimes A\otimes I\cdots}{\psi}
where :math:`A` acts on the requested wires.
If acting on :math:`N` wires, then the matrix :math:`A` must be of size
:math:`2^N\times 2^N`.
**Details:**
* Number of wires: Any
* Number of parameters: 1
* Gradient recipe: None
Args:
A (array): square hermitian matrix
wires (Sequence[int] or int): the wire(s) the operation acts on
"""
num_wires = AnyWires
num_params = 1
par_domain = "A"
grad_method = "F"
_eigs = {}
def label(self, decimals=None, base_label=None):
return super().label(decimals=decimals, base_label=base_label or "𝓗")
@classmethod
def _matrix(cls, *params):
A = np.asarray(params[0])
if A.shape[0] != A.shape[1]:
raise ValueError("Observable must be a square matrix.")
if not np.allclose(A, A.conj().T):
raise ValueError("Observable must be Hermitian.")
return A
@property
def eigendecomposition(self):
"""Return the eigendecomposition of the matrix specified by the Hermitian observable.
This method uses pre-stored eigenvalues for standard observables where
possible and stores the corresponding eigenvectors from the eigendecomposition.
It transforms the input operator according to the wires specified.
Returns:
dict[str, array]: dictionary containing the eigenvalues and the eigenvectors of the Hermitian observable
"""
Hmat = self.matrix
Hkey = tuple(Hmat.flatten().tolist())
if Hkey not in Hermitian._eigs:
w, U = np.linalg.eigh(Hmat)
Hermitian._eigs[Hkey] = {"eigvec": U, "eigval": w}
return Hermitian._eigs[Hkey]
@property
def eigvals(self):
"""Return the eigenvalues of the specified Hermitian observable.
This method uses pre-stored eigenvalues for standard observables where
possible and stores the corresponding eigenvectors from the eigendecomposition.
Returns:
array: array containing the eigenvalues of the Hermitian observable
"""
return self.eigendecomposition["eigval"]
def diagonalizing_gates(self):
"""Return the gate set that diagonalizes a circuit according to the
specified Hermitian observable.
This method uses pre-stored eigenvalues for standard observables where
possible and stores the corresponding eigenvectors from the eigendecomposition.
Returns:
list: list containing the gates diagonalizing the Hermitian observable
"""
return [QubitUnitary(self.eigendecomposition["eigvec"].conj().T, wires=list(self.wires))]
class SparseHamiltonian(Observable):
r"""SparseHamiltonian(H)
A Hamiltonian represented directly as a sparse matrix in coordinate list (COO) format.
.. warning::
``SparseHamiltonian`` observables can only be used to return expectation values.
Variances and samples are not supported.
.. note::
Note that the ``SparseHamiltonian`` observable should not be used with a subset of wires.
**Details:**
* Number of wires: All
* Number of parameters: 1
* Gradient recipe: None
Args:
H (coo_matrix): a sparse matrix in SciPy coordinate list (COO) format with
dimension :math:`(2^n, 2^n)`, where :math:`n` is the number of wires
"""
num_wires = AllWires
num_params = 1
par_domain = None
grad_method = None
def label(self, decimals=None, base_label=None):
return super().label(decimals=decimals, base_label=base_label or "𝓗")
@classmethod
def _matrix(cls, *params):
A = params[0]
if not isinstance(A, coo_matrix):
raise TypeError("Observable must be a scipy sparse coo_matrix.")
return A
def diagonalizing_gates(self):
return []
class Projector(Observable):
r"""Projector(basis_state, wires)
Observable corresponding to the computational basis state projector :math:`P=\ket{i}\bra{i}`.
The expectation of this observable returns the value
.. math::
|\langle \psi | i \rangle |^2
corresponding to the probability of measuring the quantum state in the :math:`i` -th eigenstate of the specified :math:`n` qubits.
For example, the projector :math:`\ket{11}\bra{11}` , or in integer notation :math:`\ket{3}\bra{3}`, is created by ``basis_state=np.array([1, 1])``.
**Details:**
* Number of wires: Any
* Number of parameters: 1
* Gradient recipe: None
Args:
basis_state (tensor-like): binary input of shape ``(n, )``
wires (Iterable): wires that the projector acts on
"""
num_wires = AnyWires
num_params = 1
par_domain = "A"
def __init__(self, basis_state, wires, do_queue=True):
wires = Wires(wires)
shape = qml.math.shape(basis_state)
if len(shape) != 1:
raise ValueError(f"Basis state must be one-dimensional; got shape {shape}.")
n_basis_state = shape[0]
if n_basis_state != len(wires):
raise ValueError(
f"Basis state must be of length {len(wires)}; got length {n_basis_state}."
)
basis_state = list(qml.math.toarray(basis_state))
if not set(basis_state).issubset({0, 1}):
raise ValueError(f"Basis state must only consist of 0s and 1s; got {basis_state}")
super().__init__(basis_state, wires=wires, do_queue=do_queue)
def label(self, decimals=None, base_label=None):
r"""A customizable string representation of the operator.
Args:
decimals=None (int): If ``None``, no parameters are included. Else,
specifies how to round the parameters.
base_label=None (str): overwrite the non-parameter component of the label
Returns:
str: label to use in drawings
**Example:**
>>> qml.Projector([0, 1,0], wires=(0,1,2)).label()
'|010⟩⟨010|'
"""
if base_label is not None:
return base_label
basis_string = "".join(str(int(i)) for i in self.parameters[0])
return f"|{basis_string}⟩⟨{basis_string}|"
@classmethod
def _eigvals(cls, *params):
"""Eigenvalues of the specific projector operator.
Returns:
array: eigenvalues of the projector observable in the computational basis
"""
w = np.zeros(2 ** len(params[0]))
idx = int("".join(str(i) for i in params[0]), 2)
w[idx] = 1
return w
def diagonalizing_gates(self):
"""Return the gate set that diagonalizes a circuit according to the
specified Projector observable.
Returns:
list: list containing the gates diagonalizing the projector observable
"""
return []
| 32.342629
| 152
| 0.65324
|
849eb395ee3a9a83c72f052127d5f2e7a3112951
| 515
|
py
|
Python
|
vtkdatawidgets/_version.py
|
batearedcollie/jupyter-vtk-datawidgets
|
cf1013d71eaba49f32a6b6c280314bafdc56cb12
|
[
"BSD-3-Clause"
] | null | null | null |
vtkdatawidgets/_version.py
|
batearedcollie/jupyter-vtk-datawidgets
|
cf1013d71eaba49f32a6b6c280314bafdc56cb12
|
[
"BSD-3-Clause"
] | null | null | null |
vtkdatawidgets/_version.py
|
batearedcollie/jupyter-vtk-datawidgets
|
cf1013d71eaba49f32a6b6c280314bafdc56cb12
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Simula Research Laboratory.
# Distributed under the terms of the Modified BSD License.
version_info = (0, 1, 0, 'dev')
__version__ = ".".join(map(str, version_info))
# The version of the attribute spec that this package
# implements. This is the value used in
# _model_module_version/_view_module_version.
#
# Update this value when attributes are added/removed from
# the widget models, or if the serialized format changes.
EXTENSION_SPEC_VERSION = '1.0.0'
| 30.294118
| 58
| 0.75534
|
b842f14385fc0d76697e1b4b6fe7367de7aa8b0c
| 11,462
|
py
|
Python
|
pybind/nos/v7_1_0/ssh_sa/ssh/client/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/nos/v7_1_0/ssh_sa/ssh/client/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/nos/v7_1_0/ssh_sa/ssh/client/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class client(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-sec-services - based on the path /ssh-sa/ssh/client. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__cipher','__mac','__key_exchange',)
_yang_name = 'client'
_rest_name = 'client'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__key_exchange = YANGDynClass(base=unicode, is_leaf=True, yang_name="key-exchange", rest_name="key-exchange", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Key-exchange algorithm', u'cli-full-command': None, u'callpoint': u'ssh_client_kex_cp'}}, namespace='urn:brocade.com:mgmt:brocade-sec-services', defining_module='brocade-sec-services', yang_type='string', is_config=True)
self.__mac = YANGDynClass(base=unicode, is_leaf=True, yang_name="mac", rest_name="mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure MAC algorithm(s)', u'cli-full-command': None, u'callpoint': u'ssh_client_mac_cp'}}, namespace='urn:brocade.com:mgmt:brocade-sec-services', defining_module='brocade-sec-services', yang_type='string', is_config=True)
self.__cipher = YANGDynClass(base=unicode, is_leaf=True, yang_name="cipher", rest_name="cipher", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Cipher(s)', u'cli-full-command': None, u'callpoint': u'ssh_client_cipher_cp'}}, namespace='urn:brocade.com:mgmt:brocade-sec-services', defining_module='brocade-sec-services', yang_type='string', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'ssh-sa', u'ssh', u'client']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'ssh', u'client']
def _get_cipher(self):
"""
Getter method for cipher, mapped from YANG variable /ssh_sa/ssh/client/cipher (string)
"""
return self.__cipher
def _set_cipher(self, v, load=False):
"""
Setter method for cipher, mapped from YANG variable /ssh_sa/ssh/client/cipher (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_cipher is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cipher() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="cipher", rest_name="cipher", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Cipher(s)', u'cli-full-command': None, u'callpoint': u'ssh_client_cipher_cp'}}, namespace='urn:brocade.com:mgmt:brocade-sec-services', defining_module='brocade-sec-services', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cipher must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="cipher", rest_name="cipher", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Cipher(s)', u'cli-full-command': None, u'callpoint': u'ssh_client_cipher_cp'}}, namespace='urn:brocade.com:mgmt:brocade-sec-services', defining_module='brocade-sec-services', yang_type='string', is_config=True)""",
})
self.__cipher = t
if hasattr(self, '_set'):
self._set()
def _unset_cipher(self):
self.__cipher = YANGDynClass(base=unicode, is_leaf=True, yang_name="cipher", rest_name="cipher", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Cipher(s)', u'cli-full-command': None, u'callpoint': u'ssh_client_cipher_cp'}}, namespace='urn:brocade.com:mgmt:brocade-sec-services', defining_module='brocade-sec-services', yang_type='string', is_config=True)
def _get_mac(self):
"""
Getter method for mac, mapped from YANG variable /ssh_sa/ssh/client/mac (string)
"""
return self.__mac
def _set_mac(self, v, load=False):
"""
Setter method for mac, mapped from YANG variable /ssh_sa/ssh/client/mac (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_mac is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mac() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="mac", rest_name="mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure MAC algorithm(s)', u'cli-full-command': None, u'callpoint': u'ssh_client_mac_cp'}}, namespace='urn:brocade.com:mgmt:brocade-sec-services', defining_module='brocade-sec-services', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mac must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="mac", rest_name="mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure MAC algorithm(s)', u'cli-full-command': None, u'callpoint': u'ssh_client_mac_cp'}}, namespace='urn:brocade.com:mgmt:brocade-sec-services', defining_module='brocade-sec-services', yang_type='string', is_config=True)""",
})
self.__mac = t
if hasattr(self, '_set'):
self._set()
def _unset_mac(self):
self.__mac = YANGDynClass(base=unicode, is_leaf=True, yang_name="mac", rest_name="mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure MAC algorithm(s)', u'cli-full-command': None, u'callpoint': u'ssh_client_mac_cp'}}, namespace='urn:brocade.com:mgmt:brocade-sec-services', defining_module='brocade-sec-services', yang_type='string', is_config=True)
def _get_key_exchange(self):
"""
Getter method for key_exchange, mapped from YANG variable /ssh_sa/ssh/client/key_exchange (string)
"""
return self.__key_exchange
def _set_key_exchange(self, v, load=False):
"""
Setter method for key_exchange, mapped from YANG variable /ssh_sa/ssh/client/key_exchange (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_key_exchange is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_key_exchange() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="key-exchange", rest_name="key-exchange", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Key-exchange algorithm', u'cli-full-command': None, u'callpoint': u'ssh_client_kex_cp'}}, namespace='urn:brocade.com:mgmt:brocade-sec-services', defining_module='brocade-sec-services', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """key_exchange must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="key-exchange", rest_name="key-exchange", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Key-exchange algorithm', u'cli-full-command': None, u'callpoint': u'ssh_client_kex_cp'}}, namespace='urn:brocade.com:mgmt:brocade-sec-services', defining_module='brocade-sec-services', yang_type='string', is_config=True)""",
})
self.__key_exchange = t
if hasattr(self, '_set'):
self._set()
def _unset_key_exchange(self):
self.__key_exchange = YANGDynClass(base=unicode, is_leaf=True, yang_name="key-exchange", rest_name="key-exchange", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Key-exchange algorithm', u'cli-full-command': None, u'callpoint': u'ssh_client_kex_cp'}}, namespace='urn:brocade.com:mgmt:brocade-sec-services', defining_module='brocade-sec-services', yang_type='string', is_config=True)
cipher = __builtin__.property(_get_cipher, _set_cipher)
mac = __builtin__.property(_get_mac, _set_mac)
key_exchange = __builtin__.property(_get_key_exchange, _set_key_exchange)
_pyangbind_elements = {'cipher': cipher, 'mac': mac, 'key_exchange': key_exchange, }
| 59.388601
| 493
| 0.719159
|
a203e0cb1c20538785456563c9637f3bf7cda63e
| 505
|
py
|
Python
|
summer_coding/22_generate_parentheses.py
|
Taewan-P/LeetCode_Repository
|
0e6a91c892fd406a2629b416f37dc6a989875fde
|
[
"MIT"
] | null | null | null |
summer_coding/22_generate_parentheses.py
|
Taewan-P/LeetCode_Repository
|
0e6a91c892fd406a2629b416f37dc6a989875fde
|
[
"MIT"
] | null | null | null |
summer_coding/22_generate_parentheses.py
|
Taewan-P/LeetCode_Repository
|
0e6a91c892fd406a2629b416f37dc6a989875fde
|
[
"MIT"
] | null | null | null |
class Solution:
def generateParenthesis(self, n: int) -> List[str]:
result = []
def generate(l,r,s):
if l < r:
return
if r == n:
result.append(s)
return
if l < n:
generate(l+1, r, s + "(")
if r < n:
generate(l, r+1, s + ")")
generate(0,0,"")
return result
| 24.047619
| 55
| 0.310891
|
84e12d396addcde6767815b707c4d0f59d77d081
| 464
|
py
|
Python
|
MC-MPC/scripts/tests.py
|
Anna-Kuosmanen/DAGChainer
|
2e095e30dd9b158563c05d088443d6b548eeb870
|
[
"MIT"
] | 9
|
2018-04-18T12:48:49.000Z
|
2022-03-23T20:53:30.000Z
|
MC-MPC/scripts/tests.py
|
Anna-Kuosmanen/DAGChainer
|
2e095e30dd9b158563c05d088443d6b548eeb870
|
[
"MIT"
] | null | null | null |
MC-MPC/scripts/tests.py
|
Anna-Kuosmanen/DAGChainer
|
2e095e30dd9b158563c05d088443d6b548eeb870
|
[
"MIT"
] | 3
|
2019-04-10T13:02:47.000Z
|
2022-03-23T20:54:02.000Z
|
import solvers
TEST_GRAPH_NAME = "test_graph"
def test_sum(k, n, m):
solvers.generate_k_path_graph(k, n , m, TEST_GRAPH_NAME)
decomposed_sum = solvers.solve_with_decomposition(TEST_GRAPH_NAME)
normal_sum = solvers.solve_without_decomposition(TEST_GRAPH_NAME)
print decomposed_sum
print normal_sum
if( decomposed_sum != normal_sum):
print "Test failed"
return 0
print "Test passed."
return 1
#for i in range(1, 10):
test_sum(2, 3, 10)
| 21.090909
| 68
| 0.747845
|
99057610d96ed8ad1d3db6e7923d7615f1e5c3d2
| 3,919
|
py
|
Python
|
oscar/lib/python2.7/site-packages/pip/vcs/bazaar.py
|
sainjusajan/django-oscar
|
466e8edc807be689b0a28c9e525c8323cc48b8e1
|
[
"BSD-3-Clause"
] | null | null | null |
oscar/lib/python2.7/site-packages/pip/vcs/bazaar.py
|
sainjusajan/django-oscar
|
466e8edc807be689b0a28c9e525c8323cc48b8e1
|
[
"BSD-3-Clause"
] | null | null | null |
oscar/lib/python2.7/site-packages/pip/vcs/bazaar.py
|
sainjusajan/django-oscar
|
466e8edc807be689b0a28c9e525c8323cc48b8e1
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
import logging
import os
import tempfile
# TODO: Get this into six.moves.urllib.parse
try:
from urllib import parse as urllib_parse
except ImportError:
import urlparse as urllib_parse
from pip.utils import rmtree, display_path
from pip.vcs import vcs, VersionControl
from pip.download import path_to_url
logger = logging.getLogger(__name__)
class Bazaar(VersionControl):
name = 'bzr'
dirname = '.bzr'
repo_name = 'branch'
schemes = (
'bzr', 'bzr+http', 'bzr+https', 'bzr+ssh', 'bzr+sftp', 'bzr+ftp',
'bzr+lp',
)
def __init__(self, url=None, *args, **kwargs):
super(Bazaar, self).__init__(url, *args, **kwargs)
# Python >= 2.7.4, 3.3 doesn't have uses_fragment or non_hierarchical
# Register lp but do not expose as a scheme to support bzr+lp.
if getattr(urllib_parse, 'uses_fragment', None):
urllib_parse.uses_fragment.extend(['lp'])
urllib_parse.non_hierarchical.extend(['lp'])
def export(self, location):
"""
Export the Bazaar repository at the url to the destination location
"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
if os.path.exists(location):
# Remove the location to make sure Bazaar can export it correctly
rmtree(location)
try:
self.run_command(['export', location], cwd=temp_dir,
show_stdout=False)
finally:
rmtree(temp_dir)
def switch(self, dest, url, rev_options):
self.run_command(['switch', url], cwd=dest)
def update(self, dest, rev_options):
self.run_command(['pull', '-q'] + rev_options, cwd=dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = ['-r', rev]
rev_display = ' (to revision %s)' % rev
else:
rev_options = []
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.info(
'Checking out %s%s to %s',
url,
rev_display,
display_path(dest),
)
self.run_command(['branch', '-q'] + rev_options + [url, dest])
def get_url_rev(self):
# hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it
url, rev = super(Bazaar, self).get_url_rev()
if url.startswith('ssh://'):
url = 'bzr+' + url
return url, rev
def get_url(self, location):
urls = self.run_command(['info'], show_stdout=False, cwd=location)
for line in urls.splitlines():
line = line.strip()
for x in ('checkout of branch: ',
'parent branch: '):
if line.startswith(x):
repo = line.split(x)[1]
if self._is_local_repository(repo):
return path_to_url(repo)
return repo
return None
def get_revision(self, location):
revision = self.run_command(
['revno'], show_stdout=False, cwd=location)
return revision.splitlines()[-1]
def get_src_requirement(self, dist, location):
repo = self.get_url(location)
if not repo:
return None
if not repo.lower().startswith('bzr:'):
repo = 'bzr+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
current_rev = self.get_revision(location)
return '%s@%s#egg=%s' % (repo, current_rev, egg_project_name)
def check_version(self, dest, rev_options):
"""Always assume the versions don't match"""
return False
vcs.register(Bazaar)
| 33.495726
| 78
| 0.560857
|
9857e91815e3dd666ec94d4dd1d0f69f368b494e
| 3,056
|
py
|
Python
|
tinys3/tests/test_list_request.py
|
CloudKilat/tinys3
|
32c023ead540c0764250d181e8176ff41540e1c8
|
[
"MIT"
] | null | null | null |
tinys3/tests/test_list_request.py
|
CloudKilat/tinys3
|
32c023ead540c0764250d181e8176ff41540e1c8
|
[
"MIT"
] | null | null | null |
tinys3/tests/test_list_request.py
|
CloudKilat/tinys3
|
32c023ead540c0764250d181e8176ff41540e1c8
|
[
"MIT"
] | 1
|
2018-08-17T12:55:43.000Z
|
2018-08-17T12:55:43.000Z
|
# -*- coding: utf-8 -*-
import datetime
import unittest
from flexmock import flexmock
from tinys3.request_factory import ListRequest
from tinys3 import Connection
class TestNonUploadRequests(unittest.TestCase):
def setUp(self):
self.conn = Connection("TEST_ACCESS_KEY", "TEST_SECRET_KEY", tls=True)
self.r = ListRequest(self.conn, 'prefix', 'bucket')
self.adapter = flexmock()
flexmock(self.r).should_receive('adapter').and_return(self.adapter)
files = ["""
<Contents>
<Key>prefix/file1</Key>
<LastModified>2013-10-31T15:38:32.000Z</LastModified>
<ETag>"d41d8cd98f00b204e9800998ecf8427e"</ETag>
<Size>0</Size>
<StorageClass>STANDARD</StorageClass>
</Contents>
""", """
<Contents>
<Key>prefix/file2</Key>
<LastModified>2014-06-16T15:58:56.000Z</LastModified>
<ETag>"31ed785816f1162fca532cbc80b27266"</ETag>
<Size>581708</Size>
<StorageClass>STANDARD</StorageClass>
</Contents>
"""]
parsed_files = [{
'etag': 'd41d8cd98f00b204e9800998ecf8427e',
'key': 'prefix/file1',
'last_modified': datetime.datetime(2013, 10, 31, 15, 38, 32),
'size': 0,
'storage_class': 'STANDARD',
}, {
'etag': '31ed785816f1162fca532cbc80b27266',
'key': 'prefix/file2',
'last_modified': datetime.datetime(2014, 6, 16, 15, 58, 56),
'size': 581708,
'storage_class': 'STANDARD',
}]
def setup_adapter(self, marker, files, truncated):
self.adapter.should_receive('get').with_args(
'https://s3.amazonaws.com/bucket/',
auth=self.conn.auth,
params={'prefix': 'prefix', 'marker': marker},
).and_return(flexmock(
raise_for_status=lambda: None,
content="""
<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>bucket</Name>
<Prefix>prefix</Prefix>
<Marker>{0}</Marker>
<MaxKeys>1000</MaxKeys>
<IsTruncated>{1}</IsTruncated>
{2}
</ListBucketResult>
""".format(
marker,
'true' if truncated else 'false',
files,
).strip().encode('utf-8'),
)).once()
def test_simple_list_request(self):
"""
Test the generation of a list request
"""
self.setup_adapter('', '\n'.join(self.files), False)
self.assertEquals(list(self.r.run()), self.parsed_files)
def test_chained_list_requests(self):
"""
Test the generation of a more complex list request
"""
self.setup_adapter('', self.files[0], True)
self.setup_adapter('prefix/file1', self.files[1], False)
self.assertEquals(list(self.r.run()), self.parsed_files)
| 34.727273
| 82
| 0.561518
|
9f3196947c3e13cafe0adca1331aa619377c2f6c
| 11,218
|
py
|
Python
|
util/job_launching/get_stats.py
|
kiliakis/gpgpu-sim_simulations
|
cbd8fa40d5716d18639f81a344d16cc9a3729df1
|
[
"BSD-2-Clause"
] | null | null | null |
util/job_launching/get_stats.py
|
kiliakis/gpgpu-sim_simulations
|
cbd8fa40d5716d18639f81a344d16cc9a3729df1
|
[
"BSD-2-Clause"
] | null | null | null |
util/job_launching/get_stats.py
|
kiliakis/gpgpu-sim_simulations
|
cbd8fa40d5716d18639f81a344d16cc9a3729df1
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
from optparse import OptionParser
import re
import os
import subprocess
import sys
import common
import math
import yaml
import csv
# this_directory = os.path.dirname(os.path.realpath(__file__)) + "/"
#*********************************************************--
# main script start
#*********************************************************--
this_directory = os.path.dirname(os.path.realpath(__file__)) + "/"
help_str = "There are 3 ways to use this file" +\
" 1) Specify a sim_name: \"-N <the-name>\"\n" +\
" If you do this, then the job launching logfiles" +\
" will be searched for the most recent sim_name job launch." +\
" Those specific output files will be parsed." +\
" 2) Specify a logfile: \"-l <the-file>\"\n" +\
" If you do this, then the jobs in the specific logfile will be parsed" +\
" If no options are specified, then it basically defaults to the -l" +\
" option using the latest logfile."
# " 3) Specify a configs yml: \"-y <the-yaml>\"\n"+\
# " The \"configs\" and \"benchmark\" lists in the yaml are parsed."+\
# " The most recent runs of these bench+config options are searched.\n"+\
parser = OptionParser(usage=help_str)
parser = OptionParser()
parser.add_option("-l", "--logfile", dest="logfile",
help="The logfile the status is based on. " +
"By default, we will base it on the latest simulations launched.\n" +
"specify \"all\" to use all the simulation logfiles in the directory",
default="")
parser.add_option("-r", "--run_dir", dest="run_dir",
help="The directory where the benchmark/config directories exist.", default="")
parser.add_option("-N", "--sim_name", dest="sim_name",
help="If you are launchign run_simulations.py with the \"-N\" option" +
" then you can run ./job_status.py with \"-N\" and it will" +
" give you the status of the latest run with that name." +
" if you want older runs from this name, then just point it directly at the" +
" logfile with \"-l\"", default="")
parser.add_option("-c", "--configs_yml", dest="configs_yml", default="",
help="If this option is specified, then sim_name and logfile are ignored." +
"Instead, the output files that will be parsed will")
parser.add_option("-a", "--apps_yml", dest="apps_yml", default="",
help="If this option is specified, then sim_name and logfile are ignored." +
"Instead, the output files that will be parsed will")
# parser.add_option("-b", "--simulator_build", dest="simulator_build", default="",
# help="If you only want data from a particular build of the simulator, specify this flag.")
parser.add_option("-s", "--stats_yml", dest="stats_yml", default="",
help="The yaml file that defines the stats you want to collect." +
" by default it uses stats/example_stats.yml")
# parser.add_option("-f", "--file", dest="file", default="stdout",
# help="Print to the stdout or save to file.")
parser.add_option("-f", "--file", dest="file", action="store_true",
help="Print to the stdout or save to file.")
(options, args) = parser.parse_args()
options.logfile = options.logfile.strip()
options.run_dir = options.run_dir.strip()
options.sim_name = options.sim_name.strip()
if not options.file:
options.file = "stdout"
else:
temp = options.logfile.split('sim_log.')[1].split('.txt')[0]
options.file = '{}results/csvfiles/stats-{}.csv'.format(this_directory, temp)
# options.file = options.file.strip()
cuda_version = common.get_cuda_version()
options.run_dir = common.dir_option_test(options.run_dir, this_directory + ("../../sim_run_%s/" % cuda_version),
this_directory)
if not os.path.isdir(options.run_dir):
exit(options.run_dir +
" does not exist - specify the run directory where the benchmark/config dirs exist")
options.stats_yml = common.file_option_test(options.stats_yml, os.path.join(this_directory, "stats", "example_stats.yml"),
this_directory)
options.configs_yml = common.file_option_test(
options.configs_yml, "", this_directory)
options.apps_yml = common.file_option_test(options.apps_yml, "", this_directory)
stat_map = {}
configs = set()
apps_and_args = set()
specific_jobIds = {}
stats_to_pull = {}
stats_yaml = yaml.load(open(options.stats_yml))
stats = {}
for stat in stats_yaml['collect']:
stats_to_pull[stat] = re.compile(stats_yaml['collect'][stat])
if options.configs_yml != "" and options.apps_yml != "":
for app in common.parse_app_yml(options.apps_yml):
a, b, exe_name, args_list = app
for args in args_list:
apps_and_args.add(os.path.join(exe_name, re.sub(
r"[^a-z^A-Z^0-9]", "_", args.strip())))
for config, params, gpuconf_file in common.parse_config_yml(options.configs_yml):
configs.add(config)
else:
# This code gets the logfiles to pull the stats from if you are using the "-l" or "-N" option
parsed_logfiles = []
logfiles_directory = this_directory + "../job_launching/logfiles/"
if options.logfile == "":
if not os.path.exists(logfiles_directory):
exit("No logfile specified and the default logfile directory cannot be found")
all_logfiles = [os.path.join(logfiles_directory, f)
for f in os.listdir(logfiles_directory) if(re.match(r'sim_log.*', f))]
if len(all_logfiles) == 0:
exit("ERROR - No Logfiles in " + logfiles_directory)
if options.sim_name != "":
named_sim = []
for logf in all_logfiles:
match_str = r".*\/sim_log\.{0}\..*".format(options.sim_name)
if re.match(match_str, logf):
named_sim.append(logf)
if len(named_sim) == 0:
exit("Could not find logfiles for job with the name \"{0}\"".format(
options.sim_name))
all_logfiles = named_sim
parsed_logfiles.append(max(all_logfiles, key=os.path.getmtime))
elif options.logfile == "all":
parsed_logfiles = [os.path.join(logfiles_directory, f)
for f in os.listdir(logfiles_directory) if(re.match(r'sim_log.*\.latest', f))]
else:
parsed_logfiles.append(common.file_option_test(
options.logfile, "", this_directory))
# print "Using logfiles " + str(parsed_logfiles)
for logfile in parsed_logfiles:
if not os.path.isfile(logfile):
exit("Cannot open Logfile " + logfile)
with open(logfile) as f:
for line in f:
time, jobId, app, args, config, jobname = line.split()
configs.add(config)
app_and_args = os.path.join(app, args)
apps_and_args.add(app_and_args)
specific_jobIds[config + app_and_args] = jobId
for app_and_args in apps_and_args:
for config in configs:
# now get the right output file
output_dir = os.path.join(options.run_dir, app_and_args, config)
if not os.path.isdir(output_dir):
print("WARNING the outputdir " + output_dir + " does not exist")
continue
if config + app_and_args in specific_jobIds:
jobId = specific_jobIds[config + app_and_args]
outfile = os.path.join(output_dir, app_and_args.replace(
"/", "-") + "." + "o" + jobId)
else:
all_outfiles = [os.path.join(output_dir, f)
for f in os.listdir(output_dir) if(re.match(r'.*\.o[0-9]+', f))]
outfile = max(all_outfiles, key=os.path.getmtime)
stat_found = set()
if not os.path.isfile(outfile):
print "WARNING - " + outfile + " does not exist"
continue
# Do a quick 100-line pass to get the GPGPU-Sim Version number
MAX_LINES = 100
count = 0
for line in open(outfile).readlines():
count += 1
if count >= MAX_LINES:
break
build_match = re.match(".*\[build\s+(.*)\].*", line)
if build_match:
stat_map[app_and_args + config +
"GPGPU-Sim-build"] = build_match.group(1)
break
# Only go up for 10000 lines looking for stuff
MAX_LINES = 100000
count = 0
for line in reversed(open(outfile).readlines()):
count += 1
if count >= MAX_LINES:
break
# pull out some stats
for stat_name, token in stats_to_pull.iteritems():
if stat_name in stat_found:
continue
existance_test = token.search(line.rstrip())
if existance_test != None:
stat_found.add(stat_name)
number = existance_test.group(1).strip()
# if app_and_args + config + stat_name not in stat_map:
# stat_map[app_and_args + config + stat_name] = []
# stat_map[app_and_args + config + stat_name].append(number)
stat_map[app_and_args + config + stat_name] = number
if len(stat_found) == len(stats_to_pull):
break
rows = [['app_and_args', 'config', 'metric', 'num_kernels', 'valuelist']]
for appargs in apps_and_args:
for config in configs:
for stat_name in stats_to_pull:
row = [appargs, config, stat_name]
if appargs + config + stat_name in stat_map:
row.append(1)
row.append(stat_map[appargs + config + stat_name])
else:
row += ['0', 'nan']
rows.append(row)
if options.file != 'stdout':
with open(options.file, 'w') as f:
writer = csv.writer(f, delimiter='\t')
writer.writerows(rows)
else:
for r in rows:
print '{:<25.25}\t{:<20.20}\t{:<20.20}\t{:<4}\t{:<50.50}'.format(*r)
# else:
# print header
# print rows
# After collection, spew out the tables
# DIVISION = "-" * 100
# csv_str = ""
# # Just adding this in here since it is a special case and is not parsed like
# # everything else, because you need to read from the beginning not the end
# stats_to_pull["GPGPU-Sim-build"] = ""
# for stat_name in stats_to_pull:
# csv_str += DIVISION + "\n"
# csv_str += stat_name + ","
# for config in configs:
# csv_str += config + ","
# csv_str += "\n"
# for appargs in apps_and_args:
# csv_str += appargs + ","
# for config in configs:
# if appargs + config + stat_name in stat_map:
# csv_str += stat_map[appargs + config + stat_name] + ","
# else:
# csv_str += "NA,"
# csv_str += "\n"
# csv_str += "\n"
# print csv_str
| 42.492424
| 122
| 0.580496
|
9daf9ffd93c109545d68a60cc75ca7bfad124397
| 1,083
|
py
|
Python
|
numba/ocl/tests/oclpy/test_idiv.py
|
SPIRV/NUMBA
|
6b93f44c923e7bf8cd9f95cc5188bba3aea4e75d
|
[
"BSD-2-Clause",
"MIT"
] | 4
|
2017-06-30T14:22:30.000Z
|
2021-01-11T16:47:23.000Z
|
numba/ocl/tests/oclpy/test_idiv.py
|
SPIRV/NUMBA
|
6b93f44c923e7bf8cd9f95cc5188bba3aea4e75d
|
[
"BSD-2-Clause",
"MIT"
] | 1
|
2017-12-21T23:31:59.000Z
|
2017-12-29T16:56:05.000Z
|
numba/ocl/tests/oclpy/test_idiv.py
|
SPIRV/NUMBA
|
6b93f44c923e7bf8cd9f95cc5188bba3aea4e75d
|
[
"BSD-2-Clause",
"MIT"
] | null | null | null |
from __future__ import print_function, division, absolute_import
import numpy as np
from numba import ocl, float32, float64, int32
from numba.ocl.testing import unittest
class TestOclIDiv(unittest.TestCase):
def test_inplace_div(self):
@ocl.jit('(float32[:,:], int32, int32)')
def div(grid, l_x, l_y):
for x in range(l_x):
for y in range(l_y):
grid[x, y] /= 2.0
x = np.ones((2, 2), dtype=np.float32)
grid = ocl.to_device(x)
div(grid, 2, 2)
y = grid.copy_to_host()
self.assertTrue(np.all(y == 0.5))
def test_inplace_div_double(self):
@ocl.jit('(float64[:, :], int32, int32)')
def div_double(grid, l_x, l_y):
for x in range(l_x):
for y in range(l_y):
grid[x, y] /= 2.0
x = np.ones((2, 2), dtype=np.float64)
grid = ocl.to_device(x)
div_double(grid, 2, 2)
y = grid.copy_to_host()
self.assertTrue(np.all(y == 0.5))
if __name__ == '__main__':
unittest.main()
| 27.075
| 64
| 0.551247
|
812e887bc2d4396d07bf34fb1f219c44d9c81004
| 5,544
|
py
|
Python
|
src/transformers/models/cpm/tokenization_cpm.py
|
dctelus/transformers
|
6786cbc4b14ebff0ac59c768cadd109391db9a08
|
[
"Apache-2.0"
] | 8,028
|
2018-11-05T15:19:44.000Z
|
2019-07-16T09:14:59.000Z
|
src/transformers/models/cpm/tokenization_cpm.py
|
arron1227/transformers
|
b18dfd95e1f60ae65a959a7b255fc06522170d1b
|
[
"Apache-2.0"
] | 731
|
2018-11-05T21:35:52.000Z
|
2019-07-16T09:51:26.000Z
|
src/transformers/models/cpm/tokenization_cpm.py
|
arron1227/transformers
|
b18dfd95e1f60ae65a959a7b255fc06522170d1b
|
[
"Apache-2.0"
] | 2,106
|
2018-11-05T15:29:15.000Z
|
2019-07-16T08:51:57.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from ...utils import logging
from ..xlnet.tokenization_xlnet import XLNetTokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class CpmTokenizer(XLNetTokenizer):
"""Runs pre-tokenization with Jieba segmentation tool. It is used in CPM models."""
def __init__(self, *args, **kwargs):
"""
Construct a CPM tokenizer. Based on [Jieba](https://pypi.org/project/jieba/) and
[SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that
contains the vocabulary necessary to instantiate a tokenizer.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether to lowercase the input when tokenizing.
remove_space (`bool`, *optional*, defaults to `True`):
Whether to strip the text when tokenizing (removing excess spaces before and after the string).
keep_accents (`bool`, *optional*, defaults to `False`):
Whether to keep accents when tokenizing.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier
token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of
sequence. The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be
this token instead.
sep_token (`str`, *optional*, defaults to `"<sep>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences
for sequence classification or for a text and a question for question answering. It is also used as the
last token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"<cls>"`):
The classifier token which is used when doing sequence classification (classification of the whole
sequence instead of per-token classification). It is the first token of the sequence when built with
special tokens.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
additional_special_tokens (`List[str]`, *optional*, defaults to `["<eop>", "<eod>"]`):
Additional special tokens used by the tokenizer.
Attributes:
sp_model (`SentencePieceProcessor`):
The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
"""
super().__init__(*args, **kwargs)
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation."
)
self.jieba = jieba
self.translator = str.maketrans(" \n", "\u2582\u2583")
def _tokenize(self, text, *args, **kwargs):
text = [x.translate(self.translator) for x in self.jieba.cut(text, cut_all=False)]
text = " ".join(text)
return super()._tokenize(text, *args, **kwargs)
def _decode(self, *args, **kwargs):
text = super()._decode(*args, **kwargs)
text = text.replace(" ", "").replace("\u2582", " ").replace("\u2583", "\n")
return text
| 47.793103
| 119
| 0.633478
|
833016b1eca4fdc2d889cef89e39d1fc4fa992a1
| 25,740
|
py
|
Python
|
language-modelling/transformer_xl_wt2/pytorch/train.py
|
ischlag/Fast-Weight-Memory-public
|
64c077f02ec320ec535cb66db3600453e1ef445f
|
[
"MIT"
] | 17
|
2021-01-17T02:13:15.000Z
|
2021-12-12T13:55:33.000Z
|
language-modelling/transformer_xl_wt2/pytorch/train.py
|
ischlag/Fast-Weight-Memory-public
|
64c077f02ec320ec535cb66db3600453e1ef445f
|
[
"MIT"
] | null | null | null |
language-modelling/transformer_xl_wt2/pytorch/train.py
|
ischlag/Fast-Weight-Memory-public
|
64c077f02ec320ec535cb66db3600453e1ef445f
|
[
"MIT"
] | 5
|
2021-03-08T16:10:38.000Z
|
2022-01-04T14:42:16.000Z
|
# coding: utf-8
import argparse
import time
import math
import os, sys
import itertools
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from data_utils import get_lm_corpus
from mem_transformer import MemTransformerLM
from utils.exp_utils import create_exp_dir
from utils.data_parallel import BalancedDataParallel
parser = argparse.ArgumentParser(description='PyTorch Transformer Language Model')
parser.add_argument('--data', type=str, default='../data/wikitext-103',
help='location of the data corpus')
parser.add_argument('--dataset', type=str, default='wt103',
choices=['wt103', 'lm1b', 'enwik8', 'text8'],
help='dataset name')
parser.add_argument('--n_layer', type=int, default=12,
help='number of total layers')
parser.add_argument('--n_head', type=int, default=10,
help='number of heads')
parser.add_argument('--d_head', type=int, default=50,
help='head dimension')
parser.add_argument('--d_embed', type=int, default=-1,
help='embedding dimension')
parser.add_argument('--d_model', type=int, default=500,
help='model dimension')
parser.add_argument('--d_inner', type=int, default=1000,
help='inner dimension in FF')
parser.add_argument('--dropout', type=float, default=0.0,
help='global dropout rate')
parser.add_argument('--dropoute', type=float, default=0.1,
help='Discrete input embedding dropout rate.')
parser.add_argument('--dropouto', type=float, default=0.1,
help='Output dropout rate.')
parser.add_argument('--dropouti', type=float, default=0.1,
help='Input embedding dropout rate.')
parser.add_argument('--dropatt', type=float, default=0.0,
help='attention probability dropout rate')
parser.add_argument('--init', default='normal', type=str,
help='parameter initializer to use.')
parser.add_argument('--emb_init', default='normal', type=str,
help='parameter initializer to use.')
parser.add_argument('--init_range', type=float, default=0.1,
help='parameters initialized by U(-init_range, init_range)')
parser.add_argument('--emb_init_range', type=float, default=0.01,
help='parameters initialized by U(-init_range, init_range)')
parser.add_argument('--init_std', type=float, default=0.02,
help='parameters initialized by N(0, init_std)')
parser.add_argument('--proj_init_std', type=float, default=0.01,
help='parameters initialized by N(0, init_std)')
parser.add_argument('--optim', default='adam', type=str,
choices=['adam', 'sgd', 'adagrad'],
help='optimizer to use.')
parser.add_argument('--lr', type=float, default=0.00025,
help='initial learning rate (0.00025|5 for adam|sgd)')
parser.add_argument('--mom', type=float, default=0.0,
help='momentum for sgd')
parser.add_argument('--scheduler', default='cosine', type=str,
choices=['cosine', 'inv_sqrt', 'dev_perf', 'constant'],
help='lr scheduler to use.')
parser.add_argument('--warmup_step', type=int, default=0,
help='upper epoch limit')
parser.add_argument('--decay_rate', type=float, default=0.5,
help='decay factor when ReduceLROnPlateau is used')
parser.add_argument('--lr_min', type=float, default=0.0,
help='minimum learning rate during annealing')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--clip_nonemb', action='store_true',
help='only clip the gradient of non-embedding params')
parser.add_argument('--max_step', type=int, default=100000,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=60,
help='batch size')
parser.add_argument('--batch_chunk', type=int, default=1,
help='split batch into chunks to save memory')
parser.add_argument('--tgt_len', type=int, default=70,
help='number of tokens to predict')
parser.add_argument('--eval_tgt_len', type=int, default=50,
help='number of tokens to predict for evaluation')
parser.add_argument('--ext_len', type=int, default=0,
help='length of the extended context')
parser.add_argument('--mem_len', type=int, default=0,
help='length of the retained previous heads')
parser.add_argument('--not_tied', action='store_true',
help='do not tie the word embedding and softmax weights')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--adaptive', action='store_true',
help='use adaptive softmax')
parser.add_argument('--div_val', type=int, default=1,
help='divident value for adapative input and softmax')
parser.add_argument('--pre_lnorm', action='store_true',
help='apply LayerNorm to the input instead of the output')
parser.add_argument('--varlen', action='store_true',
help='use variable length')
parser.add_argument('--multi_gpu', action='store_true',
help='use multiple GPU')
parser.add_argument('--log-interval', type=int, default=200,
help='report interval')
parser.add_argument('--eval-interval', type=int, default=500,
help='evaluation interval')
parser.add_argument('--work-dir', default='LM-TFM', type=str,
help='experiment directory.')
parser.add_argument('--restart', action='store_true',
help='restart training from the saved checkpoint')
parser.add_argument('--restart_dir', type=str, default='',
help='restart dir')
parser.add_argument('--debug', action='store_true',
help='run in debug mode (do not create exp dir)')
parser.add_argument('--same_length', action='store_true',
help='use the same attn length for all tokens')
parser.add_argument('--attn_type', type=int, default=0,
help='attention type. 0 for ours, 1 for Shaw et al,'
'2 for Vaswani et al, 3 for Al Rfou et al.')
parser.add_argument('--clamp_len', type=int, default=-1,
help='use the same pos embeddings after clamp_len')
parser.add_argument('--eta_min', type=float, default=0.0,
help='min learning rate for cosine scheduler')
parser.add_argument('--gpu0_bsz', type=int, default=-1,
help='batch size on gpu 0')
parser.add_argument('--max_eval_steps', type=int, default=-1,
help='max eval steps')
parser.add_argument('--sample_softmax', type=int, default=-1,
help='number of samples in sampled softmax')
parser.add_argument('--patience', type=int, default=0,
help='patience')
parser.add_argument('--finetune_v2', action='store_true',
help='finetune v2')
parser.add_argument('--finetune_v3', action='store_true',
help='finetune v3')
parser.add_argument('--fp16', action='store_true',
help='Run in pseudo-fp16 mode (fp16 storage fp32 math).')
parser.add_argument('--static-loss-scale', type=float, default=1,
help='Static loss scale, positive power of 2 values can '
'improve fp16 convergence.')
parser.add_argument('--dynamic-loss-scale', action='store_true',
help='Use dynamic loss scaling. If supplied, this argument'
' supersedes --static-loss-scale.')
parser.add_argument('--wdecay', type=float, default=1.2e-6,
help='weight decay applied to all weights')
args = parser.parse_args()
print(args)
args.tied = not args.not_tied
if args.d_embed < 0:
args.d_embed = args.d_model
assert args.ext_len >= 0, 'extended context length must be non-negative'
assert args.batch_size % args.batch_chunk == 0
args.work_dir = '{}-{}-{}'.format(args.work_dir, args.dataset, args.seed)
#args.work_dir = os.path.join(args.work_dir, time.strftime('%Y%m%d-%H%M%S'))
logging = create_exp_dir(args.work_dir, debug=args.debug)
# Set the random seed manually for reproducibility.
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print('WARNING: You have a CUDA device, so you should probably run with --cuda')
else:
torch.cuda.manual_seed_all(args.seed)
# Validate `--fp16` option
if args.fp16:
if not args.cuda:
print('WARNING: --fp16 requires --cuda, ignoring --fp16 option')
args.fp16 = False
else:
try:
from apex.fp16_utils import FP16_Optimizer
except:
print('WARNING: apex not installed, ignoring --fp16 option')
args.fp16 = False
device = torch.device('cuda' if args.cuda else 'cpu')
###############################################################################
# Load data
###############################################################################
corpus = get_lm_corpus(args.data, args.dataset)
ntokens = len(corpus.vocab)
args.n_token = ntokens
eval_batch_size = 10
tr_iter = corpus.get_iterator('train', args.batch_size, args.tgt_len,
device=device, ext_len=args.ext_len)
va_iter = corpus.get_iterator('valid', eval_batch_size, args.eval_tgt_len,
device=device, ext_len=args.ext_len)
te_iter = corpus.get_iterator('test', eval_batch_size, args.eval_tgt_len,
device=device, ext_len=args.ext_len)
# adaptive softmax / embedding
cutoffs, tie_projs = [], [False]
if args.adaptive:
assert args.dataset in ['wt103', 'lm1b']
if args.dataset == 'wt103':
cutoffs = [20000, 40000, 200000]
tie_projs += [True] * len(cutoffs)
elif args.dataset == 'lm1b':
cutoffs = [60000, 100000, 640000]
tie_projs += [False] * len(cutoffs)
###############################################################################
# Build the model
###############################################################################
def init_weight(weight):
if args.init == 'uniform':
nn.init.uniform_(weight, -args.init_range, args.init_range)
#nn.init.xavier_uniform(weight)#, -args.init_range, args.init_range)
elif args.init == 'normal':
nn.init.normal_(weight, 0.0, args.init_std)
#nn.init.xavier_normal(weight)#, -args.init_range, args.init_range)
def init_bias(bias):
nn.init.constant_(bias, 0.0)
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
if hasattr(m, 'weight') and m.weight is not None:
init_weight(m.weight)
if hasattr(m, 'bias') and m.bias is not None:
init_bias(m.bias)
elif classname.find('AdaptiveEmbedding') != -1:
if hasattr(m, 'emb_projs'):
for i in range(len(m.emb_projs)):
if m.emb_projs[i] is not None:
nn.init.normal_(m.emb_projs[i], 0.0, args.proj_init_std)
elif classname.find('Embedding') != -1:
if hasattr(m, 'weight'):
init_weight(m.weight)
elif classname.find('ProjectedAdaptiveLogSoftmax') != -1:
if hasattr(m, 'cluster_weight') and m.cluster_weight is not None:
init_weight(m.cluster_weight)
if hasattr(m, 'cluster_bias') and m.cluster_bias is not None:
init_bias(m.cluster_bias)
if hasattr(m, 'out_projs'):
for i in range(len(m.out_projs)):
if m.out_projs[i] is not None:
nn.init.normal_(m.out_projs[i], 0.0, args.proj_init_std)
elif classname.find('LayerNorm') != -1:
if hasattr(m, 'weight'):
nn.init.normal_(m.weight, 1.0, args.init_std)
if hasattr(m, 'bias') and m.bias is not None:
init_bias(m.bias)
elif classname.find('TransformerLM') != -1:
if hasattr(m, 'r_emb'):
init_weight(m.r_emb)
if hasattr(m, 'r_w_bias'):
init_weight(m.r_w_bias)
if hasattr(m, 'r_r_bias'):
init_weight(m.r_r_bias)
if hasattr(m, 'r_bias'):
init_bias(m.r_bias)
# CHANGE 9
elif classname.find('RelPartialLearnableDecoderLayer') != -1:
for layer in m.pos_ff.CoreNet.modules():
if hasattr(layer, 'weight'):
init_weight(layer.weight)
if hasattr(layer, 'bias'):
init_bias(layer.bias)
def update_dropout(m):
classname = m.__class__.__name__
if classname.find('Dropout') != -1:
if hasattr(m, 'p'):
m.p = args.dropout
def update_dropatt(m):
if hasattr(m, 'dropatt'):
m.dropatt.p = args.dropatt
def init_all_weights(model):
for name, module in model.named_modules():
if isinstance(module, nn.Linear):
nn.init.xavier_uniform_(module.weight.data)
print(module.weight.shape)
if hasattr(module, 'bias'):
module.weight.data.zero_()
else:
print(type(module))
if args.restart:
with open(os.path.join(args.restart_dir, 'model.pt'), 'rb') as f:
model = torch.load(f)
if not args.fp16:
model = model.float()
model.apply(update_dropout)
model.apply(update_dropatt)
else:
model = MemTransformerLM(ntokens, args.n_layer, args.n_head, args.d_model,
args.d_head, args.d_inner, args.dropout, args.dropatt,
tie_weight=args.tied, d_embed=args.d_embed, div_val=args.div_val,
tie_projs=tie_projs, pre_lnorm=args.pre_lnorm, tgt_len=args.tgt_len,
ext_len=args.ext_len, mem_len=args.mem_len, cutoffs=cutoffs,
same_length=args.same_length, attn_type=args.attn_type,
clamp_len=args.clamp_len, sample_softmax=args.sample_softmax,
dropoute=args.dropoute, dropouti=args.dropouti, dropouto=args.dropouto)
model.apply(weights_init)
model.word_emb.apply(weights_init) # ensure embedding init is not overridden by out_layer in case of weight sharing
#init_all_weights(model)
args.n_all_param = sum([p.nelement() for p in model.parameters()])
args.n_nonemb_param = sum([p.nelement() for p in model.layers.parameters()])
if args.fp16:
model = model.half()
if args.multi_gpu:
model = model.to(device)
if args.gpu0_bsz >= 0:
para_model = BalancedDataParallel(args.gpu0_bsz // args.batch_chunk,
model, dim=1).to(device)
else:
para_model = nn.DataParallel(model, dim=1).to(device)
else:
para_model = model.to(device)
#### optimizer
if args.optim.lower() == 'sgd':
if args.sample_softmax > 0:
dense_params, sparse_params = [], []
for param in model.parameters():
if param.size() == model.word_emb.weight.size():
sparse_params.append(param)
else:
dense_params.append(param)
optimizer_sparse = optim.SGD(sparse_params, lr=args.lr * 2)
optimizer = optim.SGD(dense_params, lr=args.lr, momentum=args.mom)
else:
optimizer = optim.SGD(model.parameters(), lr=args.lr,
momentum=args.mom)
elif args.optim.lower() == 'adam':
if args.sample_softmax > 0:
dense_params, sparse_params = [], []
for param in model.parameters():
if param.size() == model.word_emb.weight.size():
sparse_params.append(param)
else:
dense_params.append(param)
# CHANGE 10
optimizer_sparse = optim.SparseAdam(sparse_params, lr=args.lr, weight_decay=args.wdecay)
optimizer = optim.Adam(dense_params, lr=args.lr, weight_decay=args.wdecay)
else:
# CHANGE 10
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
elif args.optim.lower() == 'adagrad':
optimizer = optim.Adagrad(model.parameters(), lr=args.lr)
#### scheduler
if args.scheduler == 'cosine':
# here we do not set eta_min to lr_min to be backward compatible
# because in previous versions eta_min is default to 0
# rather than the default value of lr_min 1e-6
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer,
args.max_step, eta_min=args.eta_min) # should use eta_min arg
if args.sample_softmax > 0:
scheduler_sparse = optim.lr_scheduler.CosineAnnealingLR(optimizer_sparse,
args.max_step, eta_min=args.eta_min) # should use eta_min arg
elif args.scheduler == 'inv_sqrt':
# originally used for Transformer (in Attention is all you need)
def lr_lambda(step):
# return a multiplier instead of a learning rate
if step == 0 and args.warmup_step == 0:
return 1.
else:
return 1. / (step ** 0.5) if step > args.warmup_step \
else step / (args.warmup_step ** 1.5)
scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)
elif args.scheduler == 'dev_perf':
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
factor=args.decay_rate, patience=args.patience, min_lr=args.lr_min)
if args.sample_softmax > 0:
scheduler_sparse = optim.lr_scheduler.ReduceLROnPlateau(optimizer_sparse,
factor=args.decay_rate, patience=args.patience, min_lr=args.lr_min)
elif args.scheduler == 'constant':
pass
if args.cuda and args.fp16:
# If args.dynamic_loss_scale is False, static_loss_scale will be used.
# If args.dynamic_loss_scale is True, it will take precedence over static_loss_scale.
optimizer = FP16_Optimizer(optimizer,
static_loss_scale = args.static_loss_scale,
dynamic_loss_scale = args.dynamic_loss_scale,
dynamic_loss_args = {'init_scale': 2 ** 16})
if args.restart:
if os.path.exists(os.path.join(args.restart_dir, 'optimizer.pt')):
with open(os.path.join(args.restart_dir, 'optimizer.pt'), 'rb') as f:
opt_state_dict = torch.load(f)
optimizer.load_state_dict(opt_state_dict)
else:
print('Optimizer was not saved. Start from scratch.')
logging('=' * 100)
for k, v in args.__dict__.items():
logging(' - {} : {}'.format(k, v))
logging('=' * 100)
logging('#params = {}'.format(args.n_all_param))
logging('#non emb params = {}'.format(args.n_nonemb_param))
###############################################################################
# Training code
###############################################################################
def evaluate(eval_iter):
# Turn on evaluation mode which disables dropout.
model.eval()
# If the model does not use memory at all, make the ext_len longer.
# Otherwise, make the mem_len longer and keep the ext_len the same.
if args.mem_len == 0:
model.reset_length(args.eval_tgt_len,
args.ext_len+args.tgt_len-args.eval_tgt_len, args.mem_len)
else:
model.reset_length(args.eval_tgt_len,
args.ext_len, args.mem_len+args.tgt_len-args.eval_tgt_len)
# Evaluation
total_len, total_loss = 0, 0.
with torch.no_grad():
mems = tuple()
for i, (data, target, seq_len) in enumerate(eval_iter):
if args.max_eval_steps > 0 and i >= args.max_eval_steps:
break
ret = model(data, target, *mems)
loss, mems = ret[0], ret[1:]
loss = loss.mean()
total_loss += seq_len * loss.float().item()
total_len += seq_len
# Switch back to the training mode
model.reset_length(args.tgt_len, args.ext_len, args.mem_len)
model.train()
return total_loss / total_len
def train():
# Turn on training mode which enables dropout.
global train_step, train_loss, best_val_loss, eval_start_time, log_start_time
model.train()
if args.batch_chunk > 1:
mems = [tuple() for _ in range(args.batch_chunk)]
else:
mems = tuple()
train_iter = tr_iter.get_varlen_iter() if args.varlen else tr_iter
for batch, (data, target, seq_len) in enumerate(train_iter):
model.zero_grad()
if args.batch_chunk > 1:
data_chunks = torch.chunk(data, args.batch_chunk, 1)
target_chunks = torch.chunk(target, args.batch_chunk, 1)
for i in range(args.batch_chunk):
data_i = data_chunks[i].contiguous()
target_i = target_chunks[i].contiguous()
ret = para_model(data_i, target_i, *mems[i])
loss, mems[i] = ret[0], ret[1:]
loss = loss.float().mean().type_as(loss) / args.batch_chunk
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
train_loss += loss.float().item()
else:
ret = para_model(data, target, *mems)
loss, mems = ret[0], ret[1:]
loss = loss.float().mean().type_as(loss)
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
train_loss += loss.float().item()
if args.fp16:
optimizer.clip_master_grads(args.clip)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
if args.sample_softmax > 0:
optimizer_sparse.step()
# step-wise learning rate annealing
train_step += 1
if args.scheduler in ['cosine', 'constant', 'dev_perf']:
# linear warmup stage
if train_step < args.warmup_step:
curr_lr = args.lr * train_step / args.warmup_step
optimizer.param_groups[0]['lr'] = curr_lr
if args.sample_softmax > 0:
optimizer_sparse.param_groups[0]['lr'] = curr_lr * 2
else:
if args.scheduler == 'cosine':
scheduler.step(train_step)
if args.sample_softmax > 0:
scheduler_sparse.step(train_step)
elif args.scheduler == 'inv_sqrt':
scheduler.step(train_step)
if train_step % args.log_interval == 0:
cur_loss = train_loss / args.log_interval
elapsed = time.time() - log_start_time
log_str = '| epoch {:3d} step {:>8d} | {:>6d} batches | lr {:.3g} ' \
'| ms/batch {:5.2f} | loss {:5.2f}'.format(
epoch, train_step, batch+1, optimizer.param_groups[0]['lr'],
elapsed * 1000 / args.log_interval, cur_loss)
if args.dataset in ['enwik8', 'text8']:
log_str += ' | bpc {:9.5f}'.format(cur_loss / math.log(2))
else:
log_str += ' | ppl {:9.3f}'.format(math.exp(cur_loss))
logging(log_str)
train_loss = 0
log_start_time = time.time()
if train_step % args.eval_interval == 0:
val_loss = evaluate(va_iter)
logging('-' * 100)
log_str = '| Eval {:3d} at step {:>8d} | time: {:5.2f}s ' \
'| valid loss {:5.2f}'.format(
train_step // args.eval_interval, train_step,
(time.time() - eval_start_time), val_loss)
if args.dataset in ['enwik8', 'text8']:
log_str += ' | bpc {:9.5f}'.format(val_loss / math.log(2))
else:
log_str += ' | valid ppl {:9.3f}'.format(math.exp(val_loss))
logging(log_str)
logging('-' * 100)
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss or val_loss < best_val_loss:
if not args.debug:
with open(os.path.join(args.work_dir, 'model.pt'), 'wb') as f:
torch.save(model, f)
with open(os.path.join(args.work_dir, 'optimizer.pt'), 'wb') as f:
torch.save(optimizer.state_dict(), f)
best_val_loss = val_loss
# dev-performance based learning rate annealing
if args.scheduler == 'dev_perf':
scheduler.step(val_loss)
if args.sample_softmax > 0:
scheduler_sparse.step(val_loss)
eval_start_time = time.time()
if train_step == args.max_step:
break
# Loop over epochs.
train_step = 0
train_loss = 0
best_val_loss = None
log_start_time = time.time()
eval_start_time = time.time()
# At any point you can hit Ctrl + C to break out of training early.
try:
for epoch in itertools.count(start=1):
train()
if train_step == args.max_step:
logging('-' * 100)
logging('End of training')
break
except KeyboardInterrupt:
logging('-' * 100)
logging('Exiting from training early')
# Load the best saved model.
with open(os.path.join(args.work_dir, 'model.pt'), 'rb') as f:
model = torch.load(f)
para_model = model.to(device)
# Run on test data.
test_loss = evaluate(te_iter)
logging('=' * 100)
if args.dataset in ['enwik8', 'text8']:
logging('| End of training | test loss {:5.2f} | test bpc {:9.5f}'.format(
test_loss, test_loss / math.log(2)))
else:
logging('| End of training | test loss {:5.2f} | test ppl {:9.3f}'.format(
test_loss, math.exp(test_loss)))
logging('=' * 100)
| 43.260504
| 119
| 0.60303
|
d57eb1e52078dad25298e38865c5509ca6248dd2
| 385
|
py
|
Python
|
CursoEmVideo-Python3-Mundo2/desafio050.py
|
martinsnathalia/Python
|
0cabf894dc9c0d0307bd1136831e011580b497c5
|
[
"MIT"
] | null | null | null |
CursoEmVideo-Python3-Mundo2/desafio050.py
|
martinsnathalia/Python
|
0cabf894dc9c0d0307bd1136831e011580b497c5
|
[
"MIT"
] | null | null | null |
CursoEmVideo-Python3-Mundo2/desafio050.py
|
martinsnathalia/Python
|
0cabf894dc9c0d0307bd1136831e011580b497c5
|
[
"MIT"
] | null | null | null |
# Desenvolva um programa que leia seis números inteiros e mostre a soma apenas daqueles que forem pares. Se o valor digitado for ímpar, desconsidere-o.
soma = 0
cont = 0
for c in range (1,7):
num = int(input('Digite o {}º número: '.format(c)))
if num % 2 == 0:
soma = soma+num
cont += 1
print('Você informou {} números pares e a soma é {}.'.format(cont,soma))
| 35
| 151
| 0.649351
|
97c0d567de696daa42bcc20d0ee228d630d59d21
| 3,267
|
py
|
Python
|
2.GiantBook/MyUnionFind.py
|
VladAlexandruIlie/FoundationsOfComputing
|
939d3338d114ed4bbd54b922e6fcfbd7b770b201
|
[
"MIT"
] | null | null | null |
2.GiantBook/MyUnionFind.py
|
VladAlexandruIlie/FoundationsOfComputing
|
939d3338d114ed4bbd54b922e6fcfbd7b770b201
|
[
"MIT"
] | null | null | null |
2.GiantBook/MyUnionFind.py
|
VladAlexandruIlie/FoundationsOfComputing
|
939d3338d114ed4bbd54b922e6fcfbd7b770b201
|
[
"MIT"
] | null | null | null |
class MyUnionFind:
"""
This is an implementation of the union-find data structure - see module documentation for
more info.
This implementation uses quick find. Initializing a data structure with n sites takes linear time.
Afterwards, the find, connected, and count operations take constant time but the union operation
takes linear time.
For additional documentation, see Section 1.5 of Algorithms, 4th Edition by Robert Sedgewick and Kevin Wayne.
"""
def __init__(self, n):
"""
Initializes an empty union-find data structure with n sites,
0 through n-1. Each site is initially in its own component.
:param n: the number of sites
"""
self._count = n
self._id = list(range(n))
self._isolates = n
self._biggestComponent = 0
def _validate(self, p):
# validate that p is a valid index
n = len(self._id)
if p < 0 or p >= n:
raise ValueError('index {} is not between 0 and {}'.format(p, n))
def union(self, p, q):
"""
Merges the component containing site p with the
component containing site q.
:param p: the integer representing one site
:param q: the integer representing the other site
"""
self._validate(p)
self._validate(q)
p_id = self._id[p] # needed for correctness
q_id = self._id[q] # to reduce the number of array accesses
# p and q are already in the same component
if p_id == q_id:
return
#print(p, p_id, self.isIsolated(p))
#print(q, q_id, self.isIsolated(q))
if p_id == p and self.isIsolated(p) == 1:
self._isolates -= 1
if q_id == q and self.isIsolated(q) == 1:
self._isolates -= 1
size = 1
for i in range(len(self._id)):
if self._id[i] == p_id:
size += 1
self._id[i] = q_id
else:
if self._id[i] == q_id:
size += 1
if size > self._biggestComponent:
self._biggestComponent = size
self._count -= 1
def isIsolated(self, p):
for i in range(len(self._id)):
if self._id[i] == self._id[p] and i != p:
return 0
return 1
def find(self, p):
"""
Returns the component identifier for the component containing site p.
:param p: the integer representing one site
:return: the component identifier for the component containing site p
"""
self._validate(p)
return self._id[p]
def connected(self, p, q):
"""
Returns true if the two sites are in the same component.
:param p: the integer representing one site
:param q: the integer representing the other site
:return: true if the two sites p and q are in the same component; false otherwise
"""
self._validate(p)
self._validate(q)
return self._id[p] == self._id[q]
def count(self):
return self._count
| 33.336735
| 114
| 0.549434
|
aa57d4790afac3a75db22de654f43dd4f142b24a
| 135,529
|
py
|
Python
|
netbox/dcim/tests/test_api.py
|
habalux/netbox
|
dd7249d2ef09a8f09139c228b8186683237ad3ee
|
[
"Apache-2.0"
] | 2
|
2019-03-11T12:34:06.000Z
|
2021-04-22T12:55:04.000Z
|
netbox/dcim/tests/test_api.py
|
habalux/netbox
|
dd7249d2ef09a8f09139c228b8186683237ad3ee
|
[
"Apache-2.0"
] | 2
|
2021-09-08T00:50:06.000Z
|
2022-01-13T01:06:14.000Z
|
netbox/dcim/tests/test_api.py
|
habalux/netbox
|
dd7249d2ef09a8f09139c228b8186683237ad3ee
|
[
"Apache-2.0"
] | 1
|
2021-05-11T07:10:44.000Z
|
2021-05-11T07:10:44.000Z
|
from django.urls import reverse
from netaddr import IPNetwork
from rest_framework import status
from circuits.models import Circuit, CircuitTermination, CircuitType, Provider
from dcim.constants import *
from dcim.models import (
Cable, ConsolePort, ConsolePortTemplate, ConsoleServerPort, ConsoleServerPortTemplate, Device, DeviceBay,
DeviceBayTemplate, DeviceRole, DeviceType, FrontPort, Interface, InterfaceTemplate, Manufacturer,
InventoryItem, Platform, PowerPort, PowerPortTemplate, PowerOutlet, PowerOutletTemplate, Rack, RackGroup,
RackReservation, RackRole, RearPort, Region, Site, VirtualChassis,
)
from ipam.models import IPAddress, VLAN
from extras.models import Graph, GRAPH_TYPE_INTERFACE, GRAPH_TYPE_SITE
from utilities.testing import APITestCase
from virtualization.models import Cluster, ClusterType
class RegionTest(APITestCase):
def setUp(self):
super().setUp()
self.region1 = Region.objects.create(name='Test Region 1', slug='test-region-1')
self.region2 = Region.objects.create(name='Test Region 2', slug='test-region-2')
self.region3 = Region.objects.create(name='Test Region 3', slug='test-region-3')
def test_get_region(self):
url = reverse('dcim-api:region-detail', kwargs={'pk': self.region1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.region1.name)
def test_list_regions(self):
url = reverse('dcim-api:region-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_list_regions_brief(self):
url = reverse('dcim-api:region-list')
response = self.client.get('{}?brief=1'.format(url), **self.header)
self.assertEqual(
sorted(response.data['results'][0]),
['id', 'name', 'slug', 'url']
)
def test_create_region(self):
data = {
'name': 'Test Region 4',
'slug': 'test-region-4',
}
url = reverse('dcim-api:region-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Region.objects.count(), 4)
region4 = Region.objects.get(pk=response.data['id'])
self.assertEqual(region4.name, data['name'])
self.assertEqual(region4.slug, data['slug'])
def test_create_region_bulk(self):
data = [
{
'name': 'Test Region 4',
'slug': 'test-region-4',
},
{
'name': 'Test Region 5',
'slug': 'test-region-5',
},
{
'name': 'Test Region 6',
'slug': 'test-region-6',
},
]
url = reverse('dcim-api:region-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Region.objects.count(), 6)
self.assertEqual(response.data[0]['name'], data[0]['name'])
self.assertEqual(response.data[1]['name'], data[1]['name'])
self.assertEqual(response.data[2]['name'], data[2]['name'])
def test_update_region(self):
data = {
'name': 'Test Region X',
'slug': 'test-region-x',
}
url = reverse('dcim-api:region-detail', kwargs={'pk': self.region1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(Region.objects.count(), 3)
region1 = Region.objects.get(pk=response.data['id'])
self.assertEqual(region1.name, data['name'])
self.assertEqual(region1.slug, data['slug'])
def test_delete_region(self):
url = reverse('dcim-api:region-detail', kwargs={'pk': self.region1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(Region.objects.count(), 2)
class SiteTest(APITestCase):
def setUp(self):
super().setUp()
self.region1 = Region.objects.create(name='Test Region 1', slug='test-region-1')
self.region2 = Region.objects.create(name='Test Region 2', slug='test-region-2')
self.site1 = Site.objects.create(region=self.region1, name='Test Site 1', slug='test-site-1')
self.site2 = Site.objects.create(region=self.region1, name='Test Site 2', slug='test-site-2')
self.site3 = Site.objects.create(region=self.region1, name='Test Site 3', slug='test-site-3')
def test_get_site(self):
url = reverse('dcim-api:site-detail', kwargs={'pk': self.site1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.site1.name)
def test_get_site_graphs(self):
self.graph1 = Graph.objects.create(
type=GRAPH_TYPE_SITE, name='Test Graph 1',
source='http://example.com/graphs.py?site={{ obj.slug }}&foo=1'
)
self.graph2 = Graph.objects.create(
type=GRAPH_TYPE_SITE, name='Test Graph 2',
source='http://example.com/graphs.py?site={{ obj.slug }}&foo=2'
)
self.graph3 = Graph.objects.create(
type=GRAPH_TYPE_SITE, name='Test Graph 3',
source='http://example.com/graphs.py?site={{ obj.slug }}&foo=3'
)
url = reverse('dcim-api:site-graphs', kwargs={'pk': self.site1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(len(response.data), 3)
self.assertEqual(response.data[0]['embed_url'], 'http://example.com/graphs.py?site=test-site-1&foo=1')
def test_list_sites(self):
url = reverse('dcim-api:site-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_list_sites_brief(self):
url = reverse('dcim-api:site-list')
response = self.client.get('{}?brief=1'.format(url), **self.header)
self.assertEqual(
sorted(response.data['results'][0]),
['id', 'name', 'slug', 'url']
)
def test_create_site(self):
data = {
'name': 'Test Site 4',
'slug': 'test-site-4',
'region': self.region1.pk,
'status': SITE_STATUS_ACTIVE,
}
url = reverse('dcim-api:site-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Site.objects.count(), 4)
site4 = Site.objects.get(pk=response.data['id'])
self.assertEqual(site4.name, data['name'])
self.assertEqual(site4.slug, data['slug'])
self.assertEqual(site4.region_id, data['region'])
def test_create_site_bulk(self):
data = [
{
'name': 'Test Site 4',
'slug': 'test-site-4',
'region': self.region1.pk,
'status': SITE_STATUS_ACTIVE,
},
{
'name': 'Test Site 5',
'slug': 'test-site-5',
'region': self.region1.pk,
'status': SITE_STATUS_ACTIVE,
},
{
'name': 'Test Site 6',
'slug': 'test-site-6',
'region': self.region1.pk,
'status': SITE_STATUS_ACTIVE,
},
]
url = reverse('dcim-api:site-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Site.objects.count(), 6)
self.assertEqual(response.data[0]['name'], data[0]['name'])
self.assertEqual(response.data[1]['name'], data[1]['name'])
self.assertEqual(response.data[2]['name'], data[2]['name'])
def test_update_site(self):
data = {
'name': 'Test Site X',
'slug': 'test-site-x',
'region': self.region2.pk,
}
url = reverse('dcim-api:site-detail', kwargs={'pk': self.site1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(Site.objects.count(), 3)
site1 = Site.objects.get(pk=response.data['id'])
self.assertEqual(site1.name, data['name'])
self.assertEqual(site1.slug, data['slug'])
self.assertEqual(site1.region_id, data['region'])
def test_delete_site(self):
url = reverse('dcim-api:site-detail', kwargs={'pk': self.site1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(Site.objects.count(), 2)
class RackGroupTest(APITestCase):
def setUp(self):
super().setUp()
self.site1 = Site.objects.create(name='Test Site 1', slug='test-site-1')
self.site2 = Site.objects.create(name='Test Site 2', slug='test-site-2')
self.rackgroup1 = RackGroup.objects.create(site=self.site1, name='Test Rack Group 1', slug='test-rack-group-1')
self.rackgroup2 = RackGroup.objects.create(site=self.site1, name='Test Rack Group 2', slug='test-rack-group-2')
self.rackgroup3 = RackGroup.objects.create(site=self.site1, name='Test Rack Group 3', slug='test-rack-group-3')
def test_get_rackgroup(self):
url = reverse('dcim-api:rackgroup-detail', kwargs={'pk': self.rackgroup1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.rackgroup1.name)
def test_list_rackgroups(self):
url = reverse('dcim-api:rackgroup-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_list_rackgroups_brief(self):
url = reverse('dcim-api:rackgroup-list')
response = self.client.get('{}?brief=1'.format(url), **self.header)
self.assertEqual(
sorted(response.data['results'][0]),
['id', 'name', 'slug', 'url']
)
def test_create_rackgroup(self):
data = {
'name': 'Test Rack Group 4',
'slug': 'test-rack-group-4',
'site': self.site1.pk,
}
url = reverse('dcim-api:rackgroup-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(RackGroup.objects.count(), 4)
rackgroup4 = RackGroup.objects.get(pk=response.data['id'])
self.assertEqual(rackgroup4.name, data['name'])
self.assertEqual(rackgroup4.slug, data['slug'])
self.assertEqual(rackgroup4.site_id, data['site'])
def test_create_rackgroup_bulk(self):
data = [
{
'name': 'Test Rack Group 4',
'slug': 'test-rack-group-4',
'site': self.site1.pk,
},
{
'name': 'Test Rack Group 5',
'slug': 'test-rack-group-5',
'site': self.site1.pk,
},
{
'name': 'Test Rack Group 6',
'slug': 'test-rack-group-6',
'site': self.site1.pk,
},
]
url = reverse('dcim-api:rackgroup-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(RackGroup.objects.count(), 6)
self.assertEqual(response.data[0]['name'], data[0]['name'])
self.assertEqual(response.data[1]['name'], data[1]['name'])
self.assertEqual(response.data[2]['name'], data[2]['name'])
def test_update_rackgroup(self):
data = {
'name': 'Test Rack Group X',
'slug': 'test-rack-group-x',
'site': self.site2.pk,
}
url = reverse('dcim-api:rackgroup-detail', kwargs={'pk': self.rackgroup1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(RackGroup.objects.count(), 3)
rackgroup1 = RackGroup.objects.get(pk=response.data['id'])
self.assertEqual(rackgroup1.name, data['name'])
self.assertEqual(rackgroup1.slug, data['slug'])
self.assertEqual(rackgroup1.site_id, data['site'])
def test_delete_rackgroup(self):
url = reverse('dcim-api:rackgroup-detail', kwargs={'pk': self.rackgroup1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(RackGroup.objects.count(), 2)
class RackRoleTest(APITestCase):
def setUp(self):
super().setUp()
self.rackrole1 = RackRole.objects.create(name='Test Rack Role 1', slug='test-rack-role-1', color='ff0000')
self.rackrole2 = RackRole.objects.create(name='Test Rack Role 2', slug='test-rack-role-2', color='00ff00')
self.rackrole3 = RackRole.objects.create(name='Test Rack Role 3', slug='test-rack-role-3', color='0000ff')
def test_get_rackrole(self):
url = reverse('dcim-api:rackrole-detail', kwargs={'pk': self.rackrole1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.rackrole1.name)
def test_list_rackroles(self):
url = reverse('dcim-api:rackrole-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_list_rackroles_brief(self):
url = reverse('dcim-api:rackrole-list')
response = self.client.get('{}?brief=1'.format(url), **self.header)
self.assertEqual(
sorted(response.data['results'][0]),
['id', 'name', 'slug', 'url']
)
def test_create_rackrole(self):
data = {
'name': 'Test Rack Role 4',
'slug': 'test-rack-role-4',
'color': 'ffff00',
}
url = reverse('dcim-api:rackrole-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(RackRole.objects.count(), 4)
rackrole1 = RackRole.objects.get(pk=response.data['id'])
self.assertEqual(rackrole1.name, data['name'])
self.assertEqual(rackrole1.slug, data['slug'])
self.assertEqual(rackrole1.color, data['color'])
def test_create_rackrole_bulk(self):
data = [
{
'name': 'Test Rack Role 4',
'slug': 'test-rack-role-4',
'color': 'ffff00',
},
{
'name': 'Test Rack Role 5',
'slug': 'test-rack-role-5',
'color': 'ffff00',
},
{
'name': 'Test Rack Role 6',
'slug': 'test-rack-role-6',
'color': 'ffff00',
},
]
url = reverse('dcim-api:rackrole-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(RackRole.objects.count(), 6)
self.assertEqual(response.data[0]['name'], data[0]['name'])
self.assertEqual(response.data[1]['name'], data[1]['name'])
self.assertEqual(response.data[2]['name'], data[2]['name'])
def test_update_rackrole(self):
data = {
'name': 'Test Rack Role X',
'slug': 'test-rack-role-x',
'color': 'ffff00',
}
url = reverse('dcim-api:rackrole-detail', kwargs={'pk': self.rackrole1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(RackRole.objects.count(), 3)
rackrole1 = RackRole.objects.get(pk=response.data['id'])
self.assertEqual(rackrole1.name, data['name'])
self.assertEqual(rackrole1.slug, data['slug'])
self.assertEqual(rackrole1.color, data['color'])
def test_delete_rackrole(self):
url = reverse('dcim-api:rackrole-detail', kwargs={'pk': self.rackrole1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(RackRole.objects.count(), 2)
class RackTest(APITestCase):
def setUp(self):
super().setUp()
self.site1 = Site.objects.create(name='Test Site 1', slug='test-site-1')
self.site2 = Site.objects.create(name='Test Site 2', slug='test-site-2')
self.rackgroup1 = RackGroup.objects.create(site=self.site1, name='Test Rack Group 1', slug='test-rack-group-1')
self.rackgroup2 = RackGroup.objects.create(site=self.site2, name='Test Rack Group 2', slug='test-rack-group-2')
self.rackrole1 = RackRole.objects.create(name='Test Rack Role 1', slug='test-rack-role-1', color='ff0000')
self.rackrole2 = RackRole.objects.create(name='Test Rack Role 2', slug='test-rack-role-2', color='00ff00')
self.rack1 = Rack.objects.create(
site=self.site1, group=self.rackgroup1, role=self.rackrole1, name='Test Rack 1', u_height=42,
)
self.rack2 = Rack.objects.create(
site=self.site1, group=self.rackgroup1, role=self.rackrole1, name='Test Rack 2', u_height=42,
)
self.rack3 = Rack.objects.create(
site=self.site1, group=self.rackgroup1, role=self.rackrole1, name='Test Rack 3', u_height=42,
)
def test_get_rack(self):
url = reverse('dcim-api:rack-detail', kwargs={'pk': self.rack1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.rack1.name)
def test_get_rack_units(self):
url = reverse('dcim-api:rack-units', kwargs={'pk': self.rack1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 42)
def test_list_racks(self):
url = reverse('dcim-api:rack-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_list_racks_brief(self):
url = reverse('dcim-api:rack-list')
response = self.client.get('{}?brief=1'.format(url), **self.header)
self.assertEqual(
sorted(response.data['results'][0]),
['display_name', 'id', 'name', 'url']
)
def test_create_rack(self):
data = {
'name': 'Test Rack 4',
'site': self.site1.pk,
'group': self.rackgroup1.pk,
'role': self.rackrole1.pk,
}
url = reverse('dcim-api:rack-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Rack.objects.count(), 4)
rack4 = Rack.objects.get(pk=response.data['id'])
self.assertEqual(rack4.name, data['name'])
self.assertEqual(rack4.site_id, data['site'])
self.assertEqual(rack4.group_id, data['group'])
self.assertEqual(rack4.role_id, data['role'])
def test_create_rack_bulk(self):
data = [
{
'name': 'Test Rack 4',
'site': self.site1.pk,
'group': self.rackgroup1.pk,
'role': self.rackrole1.pk,
},
{
'name': 'Test Rack 5',
'site': self.site1.pk,
'group': self.rackgroup1.pk,
'role': self.rackrole1.pk,
},
{
'name': 'Test Rack 6',
'site': self.site1.pk,
'group': self.rackgroup1.pk,
'role': self.rackrole1.pk,
},
]
url = reverse('dcim-api:rack-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Rack.objects.count(), 6)
self.assertEqual(response.data[0]['name'], data[0]['name'])
self.assertEqual(response.data[1]['name'], data[1]['name'])
self.assertEqual(response.data[2]['name'], data[2]['name'])
def test_update_rack(self):
data = {
'name': 'Test Rack X',
'site': self.site2.pk,
'group': self.rackgroup2.pk,
'role': self.rackrole2.pk,
}
url = reverse('dcim-api:rack-detail', kwargs={'pk': self.rack1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(Rack.objects.count(), 3)
rack1 = Rack.objects.get(pk=response.data['id'])
self.assertEqual(rack1.name, data['name'])
self.assertEqual(rack1.site_id, data['site'])
self.assertEqual(rack1.group_id, data['group'])
self.assertEqual(rack1.role_id, data['role'])
def test_delete_rack(self):
url = reverse('dcim-api:rack-detail', kwargs={'pk': self.rack1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(Rack.objects.count(), 2)
class RackReservationTest(APITestCase):
def setUp(self):
super().setUp()
self.site1 = Site.objects.create(name='Test Site 1', slug='test-site-1')
self.rack1 = Rack.objects.create(site=self.site1, name='Test Rack 1')
self.rackreservation1 = RackReservation.objects.create(
rack=self.rack1, units=[1, 2, 3], user=self.user, description='Reservation #1',
)
self.rackreservation2 = RackReservation.objects.create(
rack=self.rack1, units=[4, 5, 6], user=self.user, description='Reservation #2',
)
self.rackreservation3 = RackReservation.objects.create(
rack=self.rack1, units=[7, 8, 9], user=self.user, description='Reservation #3',
)
def test_get_rackreservation(self):
url = reverse('dcim-api:rackreservation-detail', kwargs={'pk': self.rackreservation1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['id'], self.rackreservation1.pk)
def test_list_rackreservations(self):
url = reverse('dcim-api:rackreservation-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_create_rackreservation(self):
data = {
'rack': self.rack1.pk,
'units': [10, 11, 12],
'user': self.user.pk,
'description': 'Fourth reservation',
}
url = reverse('dcim-api:rackreservation-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(RackReservation.objects.count(), 4)
rackreservation4 = RackReservation.objects.get(pk=response.data['id'])
self.assertEqual(rackreservation4.rack_id, data['rack'])
self.assertEqual(rackreservation4.units, data['units'])
self.assertEqual(rackreservation4.user_id, data['user'])
self.assertEqual(rackreservation4.description, data['description'])
def test_create_rackreservation_bulk(self):
data = [
{
'rack': self.rack1.pk,
'units': [10, 11, 12],
'user': self.user.pk,
'description': 'Reservation #4',
},
{
'rack': self.rack1.pk,
'units': [13, 14, 15],
'user': self.user.pk,
'description': 'Reservation #5',
},
{
'rack': self.rack1.pk,
'units': [16, 17, 18],
'user': self.user.pk,
'description': 'Reservation #6',
},
]
url = reverse('dcim-api:rackreservation-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(RackReservation.objects.count(), 6)
self.assertEqual(response.data[0]['description'], data[0]['description'])
self.assertEqual(response.data[1]['description'], data[1]['description'])
self.assertEqual(response.data[2]['description'], data[2]['description'])
def test_update_rackreservation(self):
data = {
'rack': self.rack1.pk,
'units': [10, 11, 12],
'user': self.user.pk,
'description': 'Modified reservation',
}
url = reverse('dcim-api:rackreservation-detail', kwargs={'pk': self.rackreservation1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(RackReservation.objects.count(), 3)
rackreservation1 = RackReservation.objects.get(pk=response.data['id'])
self.assertEqual(rackreservation1.units, data['units'])
self.assertEqual(rackreservation1.description, data['description'])
def test_delete_rackreservation(self):
url = reverse('dcim-api:rackreservation-detail', kwargs={'pk': self.rackreservation1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(RackReservation.objects.count(), 2)
class ManufacturerTest(APITestCase):
def setUp(self):
super().setUp()
self.manufacturer1 = Manufacturer.objects.create(name='Test Manufacturer 1', slug='test-manufacturer-1')
self.manufacturer2 = Manufacturer.objects.create(name='Test Manufacturer 2', slug='test-manufacturer-2')
self.manufacturer3 = Manufacturer.objects.create(name='Test Manufacturer 3', slug='test-manufacturer-3')
def test_get_manufacturer(self):
url = reverse('dcim-api:manufacturer-detail', kwargs={'pk': self.manufacturer1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.manufacturer1.name)
def test_list_manufacturers(self):
url = reverse('dcim-api:manufacturer-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_list_manufacturers_brief(self):
url = reverse('dcim-api:manufacturer-list')
response = self.client.get('{}?brief=1'.format(url), **self.header)
self.assertEqual(
sorted(response.data['results'][0]),
['id', 'name', 'slug', 'url']
)
def test_create_manufacturer(self):
data = {
'name': 'Test Manufacturer 4',
'slug': 'test-manufacturer-4',
}
url = reverse('dcim-api:manufacturer-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Manufacturer.objects.count(), 4)
manufacturer4 = Manufacturer.objects.get(pk=response.data['id'])
self.assertEqual(manufacturer4.name, data['name'])
self.assertEqual(manufacturer4.slug, data['slug'])
def test_create_manufacturer_bulk(self):
data = [
{
'name': 'Test Manufacturer 4',
'slug': 'test-manufacturer-4',
},
{
'name': 'Test Manufacturer 5',
'slug': 'test-manufacturer-5',
},
{
'name': 'Test Manufacturer 6',
'slug': 'test-manufacturer-6',
},
]
url = reverse('dcim-api:manufacturer-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Manufacturer.objects.count(), 6)
self.assertEqual(response.data[0]['name'], data[0]['name'])
self.assertEqual(response.data[1]['name'], data[1]['name'])
self.assertEqual(response.data[2]['name'], data[2]['name'])
def test_update_manufacturer(self):
data = {
'name': 'Test Manufacturer X',
'slug': 'test-manufacturer-x',
}
url = reverse('dcim-api:manufacturer-detail', kwargs={'pk': self.manufacturer1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(Manufacturer.objects.count(), 3)
manufacturer1 = Manufacturer.objects.get(pk=response.data['id'])
self.assertEqual(manufacturer1.name, data['name'])
self.assertEqual(manufacturer1.slug, data['slug'])
def test_delete_manufacturer(self):
url = reverse('dcim-api:manufacturer-detail', kwargs={'pk': self.manufacturer1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(Manufacturer.objects.count(), 2)
class DeviceTypeTest(APITestCase):
def setUp(self):
super().setUp()
self.manufacturer1 = Manufacturer.objects.create(name='Test Manufacturer 1', slug='test-manufacturer-1')
self.manufacturer2 = Manufacturer.objects.create(name='Test Manufacturer 2', slug='test-manufacturer-2')
self.devicetype1 = DeviceType.objects.create(
manufacturer=self.manufacturer1, model='Test Device Type 1', slug='test-device-type-1'
)
self.devicetype2 = DeviceType.objects.create(
manufacturer=self.manufacturer1, model='Test Device Type 2', slug='test-device-type-2'
)
self.devicetype3 = DeviceType.objects.create(
manufacturer=self.manufacturer1, model='Test Device Type 3', slug='test-device-type-3'
)
def test_get_devicetype(self):
url = reverse('dcim-api:devicetype-detail', kwargs={'pk': self.devicetype1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['model'], self.devicetype1.model)
def test_list_devicetypes(self):
url = reverse('dcim-api:devicetype-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_list_devicetypes_brief(self):
url = reverse('dcim-api:devicetype-list')
response = self.client.get('{}?brief=1'.format(url), **self.header)
self.assertEqual(
sorted(response.data['results'][0]),
['display_name', 'id', 'manufacturer', 'model', 'slug', 'url']
)
def test_create_devicetype(self):
data = {
'manufacturer': self.manufacturer1.pk,
'model': 'Test Device Type 4',
'slug': 'test-device-type-4',
}
url = reverse('dcim-api:devicetype-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(DeviceType.objects.count(), 4)
devicetype4 = DeviceType.objects.get(pk=response.data['id'])
self.assertEqual(devicetype4.manufacturer_id, data['manufacturer'])
self.assertEqual(devicetype4.model, data['model'])
self.assertEqual(devicetype4.slug, data['slug'])
def test_create_devicetype_bulk(self):
data = [
{
'manufacturer': self.manufacturer1.pk,
'model': 'Test Device Type 4',
'slug': 'test-device-type-4',
},
{
'manufacturer': self.manufacturer1.pk,
'model': 'Test Device Type 5',
'slug': 'test-device-type-5',
},
{
'manufacturer': self.manufacturer1.pk,
'model': 'Test Device Type 6',
'slug': 'test-device-type-6',
},
]
url = reverse('dcim-api:devicetype-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(DeviceType.objects.count(), 6)
self.assertEqual(response.data[0]['model'], data[0]['model'])
self.assertEqual(response.data[1]['model'], data[1]['model'])
self.assertEqual(response.data[2]['model'], data[2]['model'])
def test_update_devicetype(self):
data = {
'manufacturer': self.manufacturer2.pk,
'model': 'Test Device Type X',
'slug': 'test-device-type-x',
}
url = reverse('dcim-api:devicetype-detail', kwargs={'pk': self.devicetype1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(DeviceType.objects.count(), 3)
devicetype1 = DeviceType.objects.get(pk=response.data['id'])
self.assertEqual(devicetype1.manufacturer_id, data['manufacturer'])
self.assertEqual(devicetype1.model, data['model'])
self.assertEqual(devicetype1.slug, data['slug'])
def test_delete_devicetype(self):
url = reverse('dcim-api:devicetype-detail', kwargs={'pk': self.devicetype1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(DeviceType.objects.count(), 2)
class ConsolePortTemplateTest(APITestCase):
def setUp(self):
super().setUp()
self.manufacturer = Manufacturer.objects.create(name='Test Manufacturer 1', slug='test-manufacturer-1')
self.devicetype = DeviceType.objects.create(
manufacturer=self.manufacturer, model='Test Device Type 1', slug='test-device-type-1'
)
self.consoleporttemplate1 = ConsolePortTemplate.objects.create(
device_type=self.devicetype, name='Test CP Template 1'
)
self.consoleporttemplate2 = ConsolePortTemplate.objects.create(
device_type=self.devicetype, name='Test CP Template 2'
)
self.consoleporttemplate3 = ConsolePortTemplate.objects.create(
device_type=self.devicetype, name='Test CP Template 3'
)
def test_get_consoleporttemplate(self):
url = reverse('dcim-api:consoleporttemplate-detail', kwargs={'pk': self.consoleporttemplate1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.consoleporttemplate1.name)
def test_list_consoleporttemplates(self):
url = reverse('dcim-api:consoleporttemplate-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_create_consoleporttemplate(self):
data = {
'device_type': self.devicetype.pk,
'name': 'Test CP Template 4',
}
url = reverse('dcim-api:consoleporttemplate-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(ConsolePortTemplate.objects.count(), 4)
consoleporttemplate4 = ConsolePortTemplate.objects.get(pk=response.data['id'])
self.assertEqual(consoleporttemplate4.device_type_id, data['device_type'])
self.assertEqual(consoleporttemplate4.name, data['name'])
def test_create_consoleporttemplate_bulk(self):
data = [
{
'device_type': self.devicetype.pk,
'name': 'Test CP Template 4',
},
{
'device_type': self.devicetype.pk,
'name': 'Test CP Template 5',
},
{
'device_type': self.devicetype.pk,
'name': 'Test CP Template 6',
},
]
url = reverse('dcim-api:consoleporttemplate-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(ConsolePortTemplate.objects.count(), 6)
self.assertEqual(response.data[0]['name'], data[0]['name'])
self.assertEqual(response.data[1]['name'], data[1]['name'])
self.assertEqual(response.data[2]['name'], data[2]['name'])
def test_update_consoleporttemplate(self):
data = {
'device_type': self.devicetype.pk,
'name': 'Test CP Template X',
}
url = reverse('dcim-api:consoleporttemplate-detail', kwargs={'pk': self.consoleporttemplate1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(ConsolePortTemplate.objects.count(), 3)
consoleporttemplate1 = ConsolePortTemplate.objects.get(pk=response.data['id'])
self.assertEqual(consoleporttemplate1.name, data['name'])
def test_delete_consoleporttemplate(self):
url = reverse('dcim-api:consoleporttemplate-detail', kwargs={'pk': self.consoleporttemplate1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(ConsolePortTemplate.objects.count(), 2)
class ConsoleServerPortTemplateTest(APITestCase):
def setUp(self):
super().setUp()
self.manufacturer = Manufacturer.objects.create(name='Test Manufacturer 1', slug='test-manufacturer-1')
self.devicetype = DeviceType.objects.create(
manufacturer=self.manufacturer, model='Test Device Type 1', slug='test-device-type-1'
)
self.consoleserverporttemplate1 = ConsoleServerPortTemplate.objects.create(
device_type=self.devicetype, name='Test CSP Template 1'
)
self.consoleserverporttemplate2 = ConsoleServerPortTemplate.objects.create(
device_type=self.devicetype, name='Test CSP Template 2'
)
self.consoleserverporttemplate3 = ConsoleServerPortTemplate.objects.create(
device_type=self.devicetype, name='Test CSP Template 3'
)
def test_get_consoleserverporttemplate(self):
url = reverse('dcim-api:consoleserverporttemplate-detail', kwargs={'pk': self.consoleserverporttemplate1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.consoleserverporttemplate1.name)
def test_list_consoleserverporttemplates(self):
url = reverse('dcim-api:consoleserverporttemplate-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_create_consoleserverporttemplate(self):
data = {
'device_type': self.devicetype.pk,
'name': 'Test CSP Template 4',
}
url = reverse('dcim-api:consoleserverporttemplate-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(ConsoleServerPortTemplate.objects.count(), 4)
consoleserverporttemplate4 = ConsoleServerPortTemplate.objects.get(pk=response.data['id'])
self.assertEqual(consoleserverporttemplate4.device_type_id, data['device_type'])
self.assertEqual(consoleserverporttemplate4.name, data['name'])
def test_create_consoleserverporttemplate_bulk(self):
data = [
{
'device_type': self.devicetype.pk,
'name': 'Test CSP Template 4',
},
{
'device_type': self.devicetype.pk,
'name': 'Test CSP Template 5',
},
{
'device_type': self.devicetype.pk,
'name': 'Test CSP Template 6',
},
]
url = reverse('dcim-api:consoleserverporttemplate-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(ConsoleServerPortTemplate.objects.count(), 6)
self.assertEqual(response.data[0]['name'], data[0]['name'])
self.assertEqual(response.data[1]['name'], data[1]['name'])
self.assertEqual(response.data[2]['name'], data[2]['name'])
def test_update_consoleserverporttemplate(self):
data = {
'device_type': self.devicetype.pk,
'name': 'Test CSP Template X',
}
url = reverse('dcim-api:consoleserverporttemplate-detail', kwargs={'pk': self.consoleserverporttemplate1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(ConsoleServerPortTemplate.objects.count(), 3)
consoleserverporttemplate1 = ConsoleServerPortTemplate.objects.get(pk=response.data['id'])
self.assertEqual(consoleserverporttemplate1.name, data['name'])
def test_delete_consoleserverporttemplate(self):
url = reverse('dcim-api:consoleserverporttemplate-detail', kwargs={'pk': self.consoleserverporttemplate1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(ConsoleServerPortTemplate.objects.count(), 2)
class PowerPortTemplateTest(APITestCase):
def setUp(self):
super().setUp()
self.manufacturer = Manufacturer.objects.create(name='Test Manufacturer 1', slug='test-manufacturer-1')
self.devicetype = DeviceType.objects.create(
manufacturer=self.manufacturer, model='Test Device Type 1', slug='test-device-type-1'
)
self.powerporttemplate1 = PowerPortTemplate.objects.create(
device_type=self.devicetype, name='Test PP Template 1'
)
self.powerporttemplate2 = PowerPortTemplate.objects.create(
device_type=self.devicetype, name='Test PP Template 2'
)
self.powerporttemplate3 = PowerPortTemplate.objects.create(
device_type=self.devicetype, name='Test PP Template 3'
)
def test_get_powerporttemplate(self):
url = reverse('dcim-api:powerporttemplate-detail', kwargs={'pk': self.powerporttemplate1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.powerporttemplate1.name)
def test_list_powerporttemplates(self):
url = reverse('dcim-api:powerporttemplate-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_create_powerporttemplate(self):
data = {
'device_type': self.devicetype.pk,
'name': 'Test PP Template 4',
}
url = reverse('dcim-api:powerporttemplate-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(PowerPortTemplate.objects.count(), 4)
powerporttemplate4 = PowerPortTemplate.objects.get(pk=response.data['id'])
self.assertEqual(powerporttemplate4.device_type_id, data['device_type'])
self.assertEqual(powerporttemplate4.name, data['name'])
def test_create_powerporttemplate_bulk(self):
data = [
{
'device_type': self.devicetype.pk,
'name': 'Test PP Template 4',
},
{
'device_type': self.devicetype.pk,
'name': 'Test PP Template 5',
},
{
'device_type': self.devicetype.pk,
'name': 'Test PP Template 6',
},
]
url = reverse('dcim-api:powerporttemplate-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(PowerPortTemplate.objects.count(), 6)
self.assertEqual(response.data[0]['name'], data[0]['name'])
self.assertEqual(response.data[1]['name'], data[1]['name'])
self.assertEqual(response.data[2]['name'], data[2]['name'])
def test_update_powerporttemplate(self):
data = {
'device_type': self.devicetype.pk,
'name': 'Test PP Template X',
}
url = reverse('dcim-api:powerporttemplate-detail', kwargs={'pk': self.powerporttemplate1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(PowerPortTemplate.objects.count(), 3)
powerporttemplate1 = PowerPortTemplate.objects.get(pk=response.data['id'])
self.assertEqual(powerporttemplate1.name, data['name'])
def test_delete_powerporttemplate(self):
url = reverse('dcim-api:powerporttemplate-detail', kwargs={'pk': self.powerporttemplate1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(PowerPortTemplate.objects.count(), 2)
class PowerOutletTemplateTest(APITestCase):
def setUp(self):
super().setUp()
self.manufacturer = Manufacturer.objects.create(name='Test Manufacturer 1', slug='test-manufacturer-1')
self.devicetype = DeviceType.objects.create(
manufacturer=self.manufacturer, model='Test Device Type 1', slug='test-device-type-1'
)
self.poweroutlettemplate1 = PowerOutletTemplate.objects.create(
device_type=self.devicetype, name='Test PO Template 1'
)
self.poweroutlettemplate2 = PowerOutletTemplate.objects.create(
device_type=self.devicetype, name='Test PO Template 2'
)
self.poweroutlettemplate3 = PowerOutletTemplate.objects.create(
device_type=self.devicetype, name='Test PO Template 3'
)
def test_get_poweroutlettemplate(self):
url = reverse('dcim-api:poweroutlettemplate-detail', kwargs={'pk': self.poweroutlettemplate1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.poweroutlettemplate1.name)
def test_list_poweroutlettemplates(self):
url = reverse('dcim-api:poweroutlettemplate-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_create_poweroutlettemplate(self):
data = {
'device_type': self.devicetype.pk,
'name': 'Test PO Template 4',
}
url = reverse('dcim-api:poweroutlettemplate-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(PowerOutletTemplate.objects.count(), 4)
poweroutlettemplate4 = PowerOutletTemplate.objects.get(pk=response.data['id'])
self.assertEqual(poweroutlettemplate4.device_type_id, data['device_type'])
self.assertEqual(poweroutlettemplate4.name, data['name'])
def test_create_poweroutlettemplate_bulk(self):
data = [
{
'device_type': self.devicetype.pk,
'name': 'Test PO Template 4',
},
{
'device_type': self.devicetype.pk,
'name': 'Test PO Template 5',
},
{
'device_type': self.devicetype.pk,
'name': 'Test PO Template 6',
},
]
url = reverse('dcim-api:poweroutlettemplate-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(PowerOutletTemplate.objects.count(), 6)
self.assertEqual(response.data[0]['name'], data[0]['name'])
self.assertEqual(response.data[1]['name'], data[1]['name'])
self.assertEqual(response.data[2]['name'], data[2]['name'])
def test_update_poweroutlettemplate(self):
data = {
'device_type': self.devicetype.pk,
'name': 'Test PO Template X',
}
url = reverse('dcim-api:poweroutlettemplate-detail', kwargs={'pk': self.poweroutlettemplate1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(PowerOutletTemplate.objects.count(), 3)
poweroutlettemplate1 = PowerOutletTemplate.objects.get(pk=response.data['id'])
self.assertEqual(poweroutlettemplate1.name, data['name'])
def test_delete_poweroutlettemplate(self):
url = reverse('dcim-api:poweroutlettemplate-detail', kwargs={'pk': self.poweroutlettemplate1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(PowerOutletTemplate.objects.count(), 2)
class InterfaceTemplateTest(APITestCase):
def setUp(self):
super().setUp()
self.manufacturer = Manufacturer.objects.create(name='Test Manufacturer 1', slug='test-manufacturer-1')
self.devicetype = DeviceType.objects.create(
manufacturer=self.manufacturer, model='Test Device Type 1', slug='test-device-type-1'
)
self.interfacetemplate1 = InterfaceTemplate.objects.create(
device_type=self.devicetype, name='Test Interface Template 1'
)
self.interfacetemplate2 = InterfaceTemplate.objects.create(
device_type=self.devicetype, name='Test Interface Template 2'
)
self.interfacetemplate3 = InterfaceTemplate.objects.create(
device_type=self.devicetype, name='Test Interface Template 3'
)
def test_get_interfacetemplate(self):
url = reverse('dcim-api:interfacetemplate-detail', kwargs={'pk': self.interfacetemplate1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.interfacetemplate1.name)
def test_list_interfacetemplates(self):
url = reverse('dcim-api:interfacetemplate-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_create_interfacetemplate(self):
data = {
'device_type': self.devicetype.pk,
'name': 'Test Interface Template 4',
}
url = reverse('dcim-api:interfacetemplate-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(InterfaceTemplate.objects.count(), 4)
interfacetemplate4 = InterfaceTemplate.objects.get(pk=response.data['id'])
self.assertEqual(interfacetemplate4.device_type_id, data['device_type'])
self.assertEqual(interfacetemplate4.name, data['name'])
def test_create_interfacetemplate_bulk(self):
data = [
{
'device_type': self.devicetype.pk,
'name': 'Test Interface Template 4',
},
{
'device_type': self.devicetype.pk,
'name': 'Test Interface Template 5',
},
{
'device_type': self.devicetype.pk,
'name': 'Test Interface Template 6',
},
]
url = reverse('dcim-api:interfacetemplate-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(InterfaceTemplate.objects.count(), 6)
self.assertEqual(response.data[0]['name'], data[0]['name'])
self.assertEqual(response.data[1]['name'], data[1]['name'])
self.assertEqual(response.data[2]['name'], data[2]['name'])
def test_update_interfacetemplate(self):
data = {
'device_type': self.devicetype.pk,
'name': 'Test Interface Template X',
}
url = reverse('dcim-api:interfacetemplate-detail', kwargs={'pk': self.interfacetemplate1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(InterfaceTemplate.objects.count(), 3)
interfacetemplate1 = InterfaceTemplate.objects.get(pk=response.data['id'])
self.assertEqual(interfacetemplate1.name, data['name'])
def test_delete_interfacetemplate(self):
url = reverse('dcim-api:interfacetemplate-detail', kwargs={'pk': self.interfacetemplate1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(InterfaceTemplate.objects.count(), 2)
class DeviceBayTemplateTest(APITestCase):
def setUp(self):
super().setUp()
self.manufacturer = Manufacturer.objects.create(name='Test Manufacturer 1', slug='test-manufacturer-1')
self.devicetype = DeviceType.objects.create(
manufacturer=self.manufacturer, model='Test Device Type 1', slug='test-device-type-1'
)
self.devicebaytemplate1 = DeviceBayTemplate.objects.create(
device_type=self.devicetype, name='Test Device Bay Template 1'
)
self.devicebaytemplate2 = DeviceBayTemplate.objects.create(
device_type=self.devicetype, name='Test Device Bay Template 2'
)
self.devicebaytemplate3 = DeviceBayTemplate.objects.create(
device_type=self.devicetype, name='Test Device Bay Template 3'
)
def test_get_devicebaytemplate(self):
url = reverse('dcim-api:devicebaytemplate-detail', kwargs={'pk': self.devicebaytemplate1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.devicebaytemplate1.name)
def test_list_devicebaytemplates(self):
url = reverse('dcim-api:devicebaytemplate-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_create_devicebaytemplate(self):
data = {
'device_type': self.devicetype.pk,
'name': 'Test Device Bay Template 4',
}
url = reverse('dcim-api:devicebaytemplate-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(DeviceBayTemplate.objects.count(), 4)
devicebaytemplate4 = DeviceBayTemplate.objects.get(pk=response.data['id'])
self.assertEqual(devicebaytemplate4.device_type_id, data['device_type'])
self.assertEqual(devicebaytemplate4.name, data['name'])
def test_create_devicebaytemplate_bulk(self):
data = [
{
'device_type': self.devicetype.pk,
'name': 'Test Device Bay Template 4',
},
{
'device_type': self.devicetype.pk,
'name': 'Test Device Bay Template 5',
},
{
'device_type': self.devicetype.pk,
'name': 'Test Device Bay Template 6',
},
]
url = reverse('dcim-api:devicebaytemplate-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(DeviceBayTemplate.objects.count(), 6)
self.assertEqual(response.data[0]['name'], data[0]['name'])
self.assertEqual(response.data[1]['name'], data[1]['name'])
self.assertEqual(response.data[2]['name'], data[2]['name'])
def test_update_devicebaytemplate(self):
data = {
'device_type': self.devicetype.pk,
'name': 'Test Device Bay Template X',
}
url = reverse('dcim-api:devicebaytemplate-detail', kwargs={'pk': self.devicebaytemplate1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(DeviceBayTemplate.objects.count(), 3)
devicebaytemplate1 = DeviceBayTemplate.objects.get(pk=response.data['id'])
self.assertEqual(devicebaytemplate1.name, data['name'])
def test_delete_devicebaytemplate(self):
url = reverse('dcim-api:devicebaytemplate-detail', kwargs={'pk': self.devicebaytemplate1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(DeviceBayTemplate.objects.count(), 2)
class DeviceRoleTest(APITestCase):
def setUp(self):
super().setUp()
self.devicerole1 = DeviceRole.objects.create(
name='Test Device Role 1', slug='test-device-role-1', color='ff0000'
)
self.devicerole2 = DeviceRole.objects.create(
name='Test Device Role 2', slug='test-device-role-2', color='00ff00'
)
self.devicerole3 = DeviceRole.objects.create(
name='Test Device Role 3', slug='test-device-role-3', color='0000ff'
)
def test_get_devicerole(self):
url = reverse('dcim-api:devicerole-detail', kwargs={'pk': self.devicerole1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.devicerole1.name)
def test_list_deviceroles(self):
url = reverse('dcim-api:devicerole-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_list_deviceroles_brief(self):
url = reverse('dcim-api:devicerole-list')
response = self.client.get('{}?brief=1'.format(url), **self.header)
self.assertEqual(
sorted(response.data['results'][0]),
['id', 'name', 'slug', 'url']
)
def test_create_devicerole(self):
data = {
'name': 'Test Device Role 4',
'slug': 'test-device-role-4',
'color': 'ffff00',
}
url = reverse('dcim-api:devicerole-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(DeviceRole.objects.count(), 4)
devicerole4 = DeviceRole.objects.get(pk=response.data['id'])
self.assertEqual(devicerole4.name, data['name'])
self.assertEqual(devicerole4.slug, data['slug'])
self.assertEqual(devicerole4.color, data['color'])
def test_create_devicerole_bulk(self):
data = [
{
'name': 'Test Device Role 4',
'slug': 'test-device-role-4',
'color': 'ffff00',
},
{
'name': 'Test Device Role 5',
'slug': 'test-device-role-5',
'color': 'ffff00',
},
{
'name': 'Test Device Role 6',
'slug': 'test-device-role-6',
'color': 'ffff00',
},
]
url = reverse('dcim-api:devicerole-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(DeviceRole.objects.count(), 6)
self.assertEqual(response.data[0]['name'], data[0]['name'])
self.assertEqual(response.data[1]['name'], data[1]['name'])
self.assertEqual(response.data[2]['name'], data[2]['name'])
def test_update_devicerole(self):
data = {
'name': 'Test Device Role X',
'slug': 'test-device-role-x',
'color': '00ffff',
}
url = reverse('dcim-api:devicerole-detail', kwargs={'pk': self.devicerole1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(DeviceRole.objects.count(), 3)
devicerole1 = DeviceRole.objects.get(pk=response.data['id'])
self.assertEqual(devicerole1.name, data['name'])
self.assertEqual(devicerole1.slug, data['slug'])
self.assertEqual(devicerole1.color, data['color'])
def test_delete_devicerole(self):
url = reverse('dcim-api:devicerole-detail', kwargs={'pk': self.devicerole1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(DeviceRole.objects.count(), 2)
class PlatformTest(APITestCase):
def setUp(self):
super().setUp()
self.platform1 = Platform.objects.create(name='Test Platform 1', slug='test-platform-1')
self.platform2 = Platform.objects.create(name='Test Platform 2', slug='test-platform-2')
self.platform3 = Platform.objects.create(name='Test Platform 3', slug='test-platform-3')
def test_get_platform(self):
url = reverse('dcim-api:platform-detail', kwargs={'pk': self.platform1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.platform1.name)
def test_list_platforms(self):
url = reverse('dcim-api:platform-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_list_platforms_brief(self):
url = reverse('dcim-api:platform-list')
response = self.client.get('{}?brief=1'.format(url), **self.header)
self.assertEqual(
sorted(response.data['results'][0]),
['id', 'name', 'slug', 'url']
)
def test_create_platform(self):
data = {
'name': 'Test Platform 4',
'slug': 'test-platform-4',
}
url = reverse('dcim-api:platform-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Platform.objects.count(), 4)
platform4 = Platform.objects.get(pk=response.data['id'])
self.assertEqual(platform4.name, data['name'])
self.assertEqual(platform4.slug, data['slug'])
def test_create_platform_bulk(self):
data = [
{
'name': 'Test Platform 4',
'slug': 'test-platform-4',
},
{
'name': 'Test Platform 5',
'slug': 'test-platform-5',
},
{
'name': 'Test Platform 6',
'slug': 'test-platform-6',
},
]
url = reverse('dcim-api:platform-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Platform.objects.count(), 6)
self.assertEqual(response.data[0]['name'], data[0]['name'])
self.assertEqual(response.data[1]['name'], data[1]['name'])
self.assertEqual(response.data[2]['name'], data[2]['name'])
def test_update_platform(self):
data = {
'name': 'Test Platform X',
'slug': 'test-platform-x',
}
url = reverse('dcim-api:platform-detail', kwargs={'pk': self.platform1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(Platform.objects.count(), 3)
platform1 = Platform.objects.get(pk=response.data['id'])
self.assertEqual(platform1.name, data['name'])
self.assertEqual(platform1.slug, data['slug'])
def test_delete_platform(self):
url = reverse('dcim-api:platform-detail', kwargs={'pk': self.platform1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(Platform.objects.count(), 2)
class DeviceTest(APITestCase):
def setUp(self):
super().setUp()
self.site1 = Site.objects.create(name='Test Site 1', slug='test-site-1')
self.site2 = Site.objects.create(name='Test Site 2', slug='test-site-2')
manufacturer = Manufacturer.objects.create(name='Test Manufacturer 1', slug='test-manufacturer-1')
self.devicetype1 = DeviceType.objects.create(
manufacturer=manufacturer, model='Test Device Type 1', slug='test-device-type-1'
)
self.devicetype2 = DeviceType.objects.create(
manufacturer=manufacturer, model='Test Device Type 2', slug='test-device-type-2'
)
self.devicerole1 = DeviceRole.objects.create(
name='Test Device Role 1', slug='test-device-role-1', color='ff0000'
)
self.devicerole2 = DeviceRole.objects.create(
name='Test Device Role 2', slug='test-device-role-2', color='00ff00'
)
cluster_type = ClusterType.objects.create(name='Test Cluster Type 1', slug='test-cluster-type-1')
self.cluster1 = Cluster.objects.create(name='Test Cluster 1', type=cluster_type)
self.device1 = Device.objects.create(
device_type=self.devicetype1,
device_role=self.devicerole1,
name='Test Device 1',
site=self.site1,
cluster=self.cluster1
)
self.device2 = Device.objects.create(
device_type=self.devicetype1,
device_role=self.devicerole1,
name='Test Device 2',
site=self.site1,
cluster=self.cluster1
)
self.device3 = Device.objects.create(
device_type=self.devicetype1,
device_role=self.devicerole1,
name='Test Device 3',
site=self.site1,
cluster=self.cluster1
)
def test_get_device(self):
url = reverse('dcim-api:device-detail', kwargs={'pk': self.device1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.device1.name)
self.assertEqual(response.data['device_role']['id'], self.devicerole1.pk)
self.assertEqual(response.data['cluster']['id'], self.cluster1.pk)
def test_list_devices(self):
url = reverse('dcim-api:device-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_list_devices_brief(self):
url = reverse('dcim-api:device-list')
response = self.client.get('{}?brief=1'.format(url), **self.header)
self.assertEqual(
sorted(response.data['results'][0]),
['display_name', 'id', 'name', 'url']
)
def test_create_device(self):
data = {
'device_type': self.devicetype1.pk,
'device_role': self.devicerole1.pk,
'name': 'Test Device 4',
'site': self.site1.pk,
'cluster': self.cluster1.pk,
}
url = reverse('dcim-api:device-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Device.objects.count(), 4)
device4 = Device.objects.get(pk=response.data['id'])
self.assertEqual(device4.device_type_id, data['device_type'])
self.assertEqual(device4.device_role_id, data['device_role'])
self.assertEqual(device4.name, data['name'])
self.assertEqual(device4.site.pk, data['site'])
self.assertEqual(device4.cluster.pk, data['cluster'])
def test_create_device_bulk(self):
data = [
{
'device_type': self.devicetype1.pk,
'device_role': self.devicerole1.pk,
'name': 'Test Device 4',
'site': self.site1.pk,
},
{
'device_type': self.devicetype1.pk,
'device_role': self.devicerole1.pk,
'name': 'Test Device 5',
'site': self.site1.pk,
},
{
'device_type': self.devicetype1.pk,
'device_role': self.devicerole1.pk,
'name': 'Test Device 6',
'site': self.site1.pk,
},
]
url = reverse('dcim-api:device-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Device.objects.count(), 6)
self.assertEqual(response.data[0]['name'], data[0]['name'])
self.assertEqual(response.data[1]['name'], data[1]['name'])
self.assertEqual(response.data[2]['name'], data[2]['name'])
def test_update_device(self):
interface = Interface.objects.create(name='Test Interface 1', device=self.device1)
ip4_address = IPAddress.objects.create(address=IPNetwork('192.0.2.1/24'), interface=interface)
ip6_address = IPAddress.objects.create(address=IPNetwork('2001:db8::1/64'), interface=interface)
data = {
'device_type': self.devicetype2.pk,
'device_role': self.devicerole2.pk,
'name': 'Test Device X',
'site': self.site2.pk,
'primary_ip4': ip4_address.pk,
'primary_ip6': ip6_address.pk,
}
url = reverse('dcim-api:device-detail', kwargs={'pk': self.device1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(Device.objects.count(), 3)
device1 = Device.objects.get(pk=response.data['id'])
self.assertEqual(device1.device_type_id, data['device_type'])
self.assertEqual(device1.device_role_id, data['device_role'])
self.assertEqual(device1.name, data['name'])
self.assertEqual(device1.site.pk, data['site'])
self.assertEqual(device1.primary_ip4.pk, data['primary_ip4'])
self.assertEqual(device1.primary_ip6.pk, data['primary_ip6'])
def test_delete_device(self):
url = reverse('dcim-api:device-detail', kwargs={'pk': self.device1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(Device.objects.count(), 2)
class ConsolePortTest(APITestCase):
def setUp(self):
super().setUp()
site = Site.objects.create(name='Test Site 1', slug='test-site-1')
manufacturer = Manufacturer.objects.create(name='Test Manufacturer 1', slug='test-manufacturer-1')
devicetype = DeviceType.objects.create(
manufacturer=manufacturer, model='Test Device Type 1', slug='test-device-type-1'
)
devicerole = DeviceRole.objects.create(
name='Test Device Role 1', slug='test-device-role-1', color='ff0000'
)
self.device = Device.objects.create(
device_type=devicetype, device_role=devicerole, name='Test Device 1', site=site
)
self.consoleport1 = ConsolePort.objects.create(device=self.device, name='Test Console Port 1')
self.consoleport2 = ConsolePort.objects.create(device=self.device, name='Test Console Port 2')
self.consoleport3 = ConsolePort.objects.create(device=self.device, name='Test Console Port 3')
def test_get_consoleport(self):
url = reverse('dcim-api:consoleport-detail', kwargs={'pk': self.consoleport1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.consoleport1.name)
def test_list_consoleports(self):
url = reverse('dcim-api:consoleport-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_list_consoleports_brief(self):
url = reverse('dcim-api:consoleport-list')
response = self.client.get('{}?brief=1'.format(url), **self.header)
self.assertEqual(
sorted(response.data['results'][0]),
['cable', 'connection_status', 'device', 'id', 'name', 'url']
)
def test_create_consoleport(self):
data = {
'device': self.device.pk,
'name': 'Test Console Port 4',
}
url = reverse('dcim-api:consoleport-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(ConsolePort.objects.count(), 4)
consoleport4 = ConsolePort.objects.get(pk=response.data['id'])
self.assertEqual(consoleport4.device_id, data['device'])
self.assertEqual(consoleport4.name, data['name'])
def test_create_consoleport_bulk(self):
data = [
{
'device': self.device.pk,
'name': 'Test Console Port 4',
},
{
'device': self.device.pk,
'name': 'Test Console Port 5',
},
{
'device': self.device.pk,
'name': 'Test Console Port 6',
},
]
url = reverse('dcim-api:consoleport-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(ConsolePort.objects.count(), 6)
self.assertEqual(response.data[0]['name'], data[0]['name'])
self.assertEqual(response.data[1]['name'], data[1]['name'])
self.assertEqual(response.data[2]['name'], data[2]['name'])
def test_update_consoleport(self):
consoleserverport = ConsoleServerPort.objects.create(device=self.device, name='Test CS Port 1')
data = {
'device': self.device.pk,
'name': 'Test Console Port X',
}
url = reverse('dcim-api:consoleport-detail', kwargs={'pk': self.consoleport1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(ConsolePort.objects.count(), 3)
consoleport1 = ConsolePort.objects.get(pk=response.data['id'])
self.assertEqual(consoleport1.name, data['name'])
def test_delete_consoleport(self):
url = reverse('dcim-api:consoleport-detail', kwargs={'pk': self.consoleport1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(ConsolePort.objects.count(), 2)
class ConsoleServerPortTest(APITestCase):
def setUp(self):
super().setUp()
site = Site.objects.create(name='Test Site 1', slug='test-site-1')
manufacturer = Manufacturer.objects.create(name='Test Manufacturer 1', slug='test-manufacturer-1')
devicetype = DeviceType.objects.create(
manufacturer=manufacturer, model='Test Device Type 1', slug='test-device-type-1'
)
devicerole = DeviceRole.objects.create(
name='Test Device Role 1', slug='test-device-role-1', color='ff0000'
)
self.device = Device.objects.create(
device_type=devicetype, device_role=devicerole, name='Test Device 1', site=site
)
self.consoleserverport1 = ConsoleServerPort.objects.create(device=self.device, name='Test CS Port 1')
self.consoleserverport2 = ConsoleServerPort.objects.create(device=self.device, name='Test CS Port 2')
self.consoleserverport3 = ConsoleServerPort.objects.create(device=self.device, name='Test CS Port 3')
def test_get_consoleserverport(self):
url = reverse('dcim-api:consoleserverport-detail', kwargs={'pk': self.consoleserverport1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.consoleserverport1.name)
def test_list_consoleserverports(self):
url = reverse('dcim-api:consoleserverport-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_list_consoleserverports_brief(self):
url = reverse('dcim-api:consoleserverport-list')
response = self.client.get('{}?brief=1'.format(url), **self.header)
self.assertEqual(
sorted(response.data['results'][0]),
['cable', 'connection_status', 'device', 'id', 'name', 'url']
)
def test_create_consoleserverport(self):
data = {
'device': self.device.pk,
'name': 'Test CS Port 4',
}
url = reverse('dcim-api:consoleserverport-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(ConsoleServerPort.objects.count(), 4)
consoleserverport4 = ConsoleServerPort.objects.get(pk=response.data['id'])
self.assertEqual(consoleserverport4.device_id, data['device'])
self.assertEqual(consoleserverport4.name, data['name'])
def test_create_consoleserverport_bulk(self):
data = [
{
'device': self.device.pk,
'name': 'Test CS Port 4',
},
{
'device': self.device.pk,
'name': 'Test CS Port 5',
},
{
'device': self.device.pk,
'name': 'Test CS Port 6',
},
]
url = reverse('dcim-api:consoleserverport-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(ConsoleServerPort.objects.count(), 6)
self.assertEqual(response.data[0]['name'], data[0]['name'])
self.assertEqual(response.data[1]['name'], data[1]['name'])
self.assertEqual(response.data[2]['name'], data[2]['name'])
def test_update_consoleserverport(self):
data = {
'device': self.device.pk,
'name': 'Test CS Port X',
}
url = reverse('dcim-api:consoleserverport-detail', kwargs={'pk': self.consoleserverport1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(ConsoleServerPort.objects.count(), 3)
consoleserverport1 = ConsoleServerPort.objects.get(pk=response.data['id'])
self.assertEqual(consoleserverport1.name, data['name'])
def test_delete_consoleserverport(self):
url = reverse('dcim-api:consoleserverport-detail', kwargs={'pk': self.consoleserverport1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(ConsoleServerPort.objects.count(), 2)
class PowerPortTest(APITestCase):
def setUp(self):
super().setUp()
site = Site.objects.create(name='Test Site 1', slug='test-site-1')
manufacturer = Manufacturer.objects.create(name='Test Manufacturer 1', slug='test-manufacturer-1')
devicetype = DeviceType.objects.create(
manufacturer=manufacturer, model='Test Device Type 1', slug='test-device-type-1'
)
devicerole = DeviceRole.objects.create(
name='Test Device Role 1', slug='test-device-role-1', color='ff0000'
)
self.device = Device.objects.create(
device_type=devicetype, device_role=devicerole, name='Test Device 1', site=site
)
self.powerport1 = PowerPort.objects.create(device=self.device, name='Test Power Port 1')
self.powerport2 = PowerPort.objects.create(device=self.device, name='Test Power Port 2')
self.powerport3 = PowerPort.objects.create(device=self.device, name='Test Power Port 3')
def test_get_powerport(self):
url = reverse('dcim-api:powerport-detail', kwargs={'pk': self.powerport1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.powerport1.name)
def test_list_powerports(self):
url = reverse('dcim-api:powerport-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_list_powerports_brief(self):
url = reverse('dcim-api:powerport-list')
response = self.client.get('{}?brief=1'.format(url), **self.header)
self.assertEqual(
sorted(response.data['results'][0]),
['cable', 'connection_status', 'device', 'id', 'name', 'url']
)
def test_create_powerport(self):
data = {
'device': self.device.pk,
'name': 'Test Power Port 4',
}
url = reverse('dcim-api:powerport-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(PowerPort.objects.count(), 4)
powerport4 = PowerPort.objects.get(pk=response.data['id'])
self.assertEqual(powerport4.device_id, data['device'])
self.assertEqual(powerport4.name, data['name'])
def test_create_powerport_bulk(self):
data = [
{
'device': self.device.pk,
'name': 'Test Power Port 4',
},
{
'device': self.device.pk,
'name': 'Test Power Port 5',
},
{
'device': self.device.pk,
'name': 'Test Power Port 6',
},
]
url = reverse('dcim-api:powerport-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(PowerPort.objects.count(), 6)
self.assertEqual(response.data[0]['name'], data[0]['name'])
self.assertEqual(response.data[1]['name'], data[1]['name'])
self.assertEqual(response.data[2]['name'], data[2]['name'])
def test_update_powerport(self):
poweroutlet = PowerOutlet.objects.create(device=self.device, name='Test Power Outlet 1')
data = {
'device': self.device.pk,
'name': 'Test Power Port X',
}
url = reverse('dcim-api:powerport-detail', kwargs={'pk': self.powerport1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(PowerPort.objects.count(), 3)
powerport1 = PowerPort.objects.get(pk=response.data['id'])
self.assertEqual(powerport1.name, data['name'])
def test_delete_powerport(self):
url = reverse('dcim-api:powerport-detail', kwargs={'pk': self.powerport1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(PowerPort.objects.count(), 2)
class PowerOutletTest(APITestCase):
def setUp(self):
super().setUp()
site = Site.objects.create(name='Test Site 1', slug='test-site-1')
manufacturer = Manufacturer.objects.create(name='Test Manufacturer 1', slug='test-manufacturer-1')
devicetype = DeviceType.objects.create(
manufacturer=manufacturer, model='Test Device Type 1', slug='test-device-type-1'
)
devicerole = DeviceRole.objects.create(
name='Test Device Role 1', slug='test-device-role-1', color='ff0000'
)
self.device = Device.objects.create(
device_type=devicetype, device_role=devicerole, name='Test Device 1', site=site
)
self.poweroutlet1 = PowerOutlet.objects.create(device=self.device, name='Test Power Outlet 1')
self.poweroutlet2 = PowerOutlet.objects.create(device=self.device, name='Test Power Outlet 2')
self.poweroutlet3 = PowerOutlet.objects.create(device=self.device, name='Test Power Outlet 3')
def test_get_poweroutlet(self):
url = reverse('dcim-api:poweroutlet-detail', kwargs={'pk': self.poweroutlet1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.poweroutlet1.name)
def test_list_poweroutlets(self):
url = reverse('dcim-api:poweroutlet-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_list_poweroutlets_brief(self):
url = reverse('dcim-api:poweroutlet-list')
response = self.client.get('{}?brief=1'.format(url), **self.header)
self.assertEqual(
sorted(response.data['results'][0]),
['cable', 'connection_status', 'device', 'id', 'name', 'url']
)
def test_create_poweroutlet(self):
data = {
'device': self.device.pk,
'name': 'Test Power Outlet 4',
}
url = reverse('dcim-api:poweroutlet-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(PowerOutlet.objects.count(), 4)
poweroutlet4 = PowerOutlet.objects.get(pk=response.data['id'])
self.assertEqual(poweroutlet4.device_id, data['device'])
self.assertEqual(poweroutlet4.name, data['name'])
def test_create_poweroutlet_bulk(self):
data = [
{
'device': self.device.pk,
'name': 'Test Power Outlet 4',
},
{
'device': self.device.pk,
'name': 'Test Power Outlet 5',
},
{
'device': self.device.pk,
'name': 'Test Power Outlet 6',
},
]
url = reverse('dcim-api:poweroutlet-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(PowerOutlet.objects.count(), 6)
self.assertEqual(response.data[0]['name'], data[0]['name'])
self.assertEqual(response.data[1]['name'], data[1]['name'])
self.assertEqual(response.data[2]['name'], data[2]['name'])
def test_update_poweroutlet(self):
data = {
'device': self.device.pk,
'name': 'Test Power Outlet X',
}
url = reverse('dcim-api:poweroutlet-detail', kwargs={'pk': self.poweroutlet1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(PowerOutlet.objects.count(), 3)
poweroutlet1 = PowerOutlet.objects.get(pk=response.data['id'])
self.assertEqual(poweroutlet1.name, data['name'])
def test_delete_poweroutlet(self):
url = reverse('dcim-api:poweroutlet-detail', kwargs={'pk': self.poweroutlet1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(PowerOutlet.objects.count(), 2)
class InterfaceTest(APITestCase):
def setUp(self):
super().setUp()
site = Site.objects.create(name='Test Site 1', slug='test-site-1')
manufacturer = Manufacturer.objects.create(name='Test Manufacturer 1', slug='test-manufacturer-1')
devicetype = DeviceType.objects.create(
manufacturer=manufacturer, model='Test Device Type 1', slug='test-device-type-1'
)
devicerole = DeviceRole.objects.create(
name='Test Device Role 1', slug='test-device-role-1', color='ff0000'
)
self.device = Device.objects.create(
device_type=devicetype, device_role=devicerole, name='Test Device 1', site=site
)
self.interface1 = Interface.objects.create(device=self.device, name='Test Interface 1')
self.interface2 = Interface.objects.create(device=self.device, name='Test Interface 2')
self.interface3 = Interface.objects.create(device=self.device, name='Test Interface 3')
self.vlan1 = VLAN.objects.create(name="Test VLAN 1", vid=1)
self.vlan2 = VLAN.objects.create(name="Test VLAN 2", vid=2)
self.vlan3 = VLAN.objects.create(name="Test VLAN 3", vid=3)
def test_get_interface(self):
url = reverse('dcim-api:interface-detail', kwargs={'pk': self.interface1.pk})
response = self.client.get(url, **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(response.data['name'], self.interface1.name)
def test_get_interface_graphs(self):
self.graph1 = Graph.objects.create(
type=GRAPH_TYPE_INTERFACE, name='Test Graph 1',
source='http://example.com/graphs.py?interface={{ obj.name }}&foo=1'
)
self.graph2 = Graph.objects.create(
type=GRAPH_TYPE_INTERFACE, name='Test Graph 2',
source='http://example.com/graphs.py?interface={{ obj.name }}&foo=2'
)
self.graph3 = Graph.objects.create(
type=GRAPH_TYPE_INTERFACE, name='Test Graph 3',
source='http://example.com/graphs.py?interface={{ obj.name }}&foo=3'
)
url = reverse('dcim-api:interface-graphs', kwargs={'pk': self.interface1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(len(response.data), 3)
self.assertEqual(response.data[0]['embed_url'], 'http://example.com/graphs.py?interface=Test Interface 1&foo=1')
def test_list_interfaces(self):
url = reverse('dcim-api:interface-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_list_interfaces_brief(self):
url = reverse('dcim-api:interface-list')
response = self.client.get('{}?brief=1'.format(url), **self.header)
self.assertEqual(
sorted(response.data['results'][0]),
['cable', 'connection_status', 'device', 'id', 'name', 'url']
)
def test_create_interface(self):
data = {
'device': self.device.pk,
'name': 'Test Interface 4',
}
url = reverse('dcim-api:interface-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Interface.objects.count(), 4)
interface4 = Interface.objects.get(pk=response.data['id'])
self.assertEqual(interface4.device_id, data['device'])
self.assertEqual(interface4.name, data['name'])
def test_create_interface_with_802_1q(self):
data = {
'device': self.device.pk,
'name': 'Test Interface 4',
'mode': IFACE_MODE_TAGGED,
'untagged_vlan': self.vlan3.id,
'tagged_vlans': [self.vlan1.id, self.vlan2.id],
}
url = reverse('dcim-api:interface-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Interface.objects.count(), 4)
self.assertEqual(response.data['device']['id'], data['device'])
self.assertEqual(response.data['name'], data['name'])
self.assertEqual(response.data['untagged_vlan']['id'], data['untagged_vlan'])
self.assertEqual([v['id'] for v in response.data['tagged_vlans']], data['tagged_vlans'])
def test_create_interface_bulk(self):
data = [
{
'device': self.device.pk,
'name': 'Test Interface 4',
},
{
'device': self.device.pk,
'name': 'Test Interface 5',
},
{
'device': self.device.pk,
'name': 'Test Interface 6',
},
]
url = reverse('dcim-api:interface-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Interface.objects.count(), 6)
self.assertEqual(response.data[0]['name'], data[0]['name'])
self.assertEqual(response.data[1]['name'], data[1]['name'])
self.assertEqual(response.data[2]['name'], data[2]['name'])
def test_create_interface_802_1q_bulk(self):
data = [
{
'device': self.device.pk,
'name': 'Test Interface 4',
'mode': IFACE_MODE_TAGGED,
'untagged_vlan': self.vlan2.id,
'tagged_vlans': [self.vlan1.id],
},
{
'device': self.device.pk,
'name': 'Test Interface 5',
'mode': IFACE_MODE_TAGGED,
'untagged_vlan': self.vlan2.id,
'tagged_vlans': [self.vlan1.id],
},
{
'device': self.device.pk,
'name': 'Test Interface 6',
'mode': IFACE_MODE_TAGGED,
'untagged_vlan': self.vlan2.id,
'tagged_vlans': [self.vlan1.id],
},
]
url = reverse('dcim-api:interface-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Interface.objects.count(), 6)
for i in range(0, 3):
self.assertEqual(response.data[i]['name'], data[i]['name'])
self.assertEqual([v['id'] for v in response.data[i]['tagged_vlans']], data[i]['tagged_vlans'])
self.assertEqual(response.data[i]['untagged_vlan']['id'], data[i]['untagged_vlan'])
def test_update_interface(self):
lag_interface = Interface.objects.create(
device=self.device, name='Test LAG Interface', form_factor=IFACE_FF_LAG
)
data = {
'device': self.device.pk,
'name': 'Test Interface X',
'lag': lag_interface.pk,
}
url = reverse('dcim-api:interface-detail', kwargs={'pk': self.interface1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(Interface.objects.count(), 4)
interface1 = Interface.objects.get(pk=response.data['id'])
self.assertEqual(interface1.name, data['name'])
self.assertEqual(interface1.lag_id, data['lag'])
def test_delete_interface(self):
url = reverse('dcim-api:interface-detail', kwargs={'pk': self.interface1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(Interface.objects.count(), 2)
class DeviceBayTest(APITestCase):
def setUp(self):
super().setUp()
site = Site.objects.create(name='Test Site 1', slug='test-site-1')
manufacturer = Manufacturer.objects.create(name='Test Manufacturer 1', slug='test-manufacturer-1')
self.devicetype1 = DeviceType.objects.create(
manufacturer=manufacturer, model='Parent Device Type', slug='parent-device-type',
subdevice_role=SUBDEVICE_ROLE_PARENT
)
self.devicetype2 = DeviceType.objects.create(
manufacturer=manufacturer, model='Child Device Type', slug='child-device-type',
subdevice_role=SUBDEVICE_ROLE_CHILD
)
devicerole = DeviceRole.objects.create(
name='Test Device Role 1', slug='test-device-role-1', color='ff0000'
)
self.parent_device = Device.objects.create(
device_type=self.devicetype1, device_role=devicerole, name='Parent Device 1', site=site
)
self.child_device = Device.objects.create(
device_type=self.devicetype2, device_role=devicerole, name='Child Device 1', site=site
)
self.devicebay1 = DeviceBay.objects.create(device=self.parent_device, name='Test Device Bay 1')
self.devicebay2 = DeviceBay.objects.create(device=self.parent_device, name='Test Device Bay 2')
self.devicebay3 = DeviceBay.objects.create(device=self.parent_device, name='Test Device Bay 3')
def test_get_devicebay(self):
url = reverse('dcim-api:devicebay-detail', kwargs={'pk': self.devicebay1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.devicebay1.name)
def test_list_devicebays(self):
url = reverse('dcim-api:devicebay-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_list_devicebays_brief(self):
url = reverse('dcim-api:devicebay-list')
response = self.client.get('{}?brief=1'.format(url), **self.header)
self.assertEqual(
sorted(response.data['results'][0]),
['device', 'id', 'name', 'url']
)
def test_create_devicebay(self):
data = {
'device': self.parent_device.pk,
'name': 'Test Device Bay 4',
'installed_device': self.child_device.pk,
}
url = reverse('dcim-api:devicebay-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(DeviceBay.objects.count(), 4)
devicebay4 = DeviceBay.objects.get(pk=response.data['id'])
self.assertEqual(devicebay4.device_id, data['device'])
self.assertEqual(devicebay4.name, data['name'])
self.assertEqual(devicebay4.installed_device_id, data['installed_device'])
def test_create_devicebay_bulk(self):
data = [
{
'device': self.parent_device.pk,
'name': 'Test Device Bay 4',
},
{
'device': self.parent_device.pk,
'name': 'Test Device Bay 5',
},
{
'device': self.parent_device.pk,
'name': 'Test Device Bay 6',
},
]
url = reverse('dcim-api:devicebay-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(DeviceBay.objects.count(), 6)
self.assertEqual(response.data[0]['name'], data[0]['name'])
self.assertEqual(response.data[1]['name'], data[1]['name'])
self.assertEqual(response.data[2]['name'], data[2]['name'])
def test_update_devicebay(self):
data = {
'device': self.parent_device.pk,
'name': 'Test Device Bay X',
'installed_device': self.child_device.pk,
}
url = reverse('dcim-api:devicebay-detail', kwargs={'pk': self.devicebay1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(DeviceBay.objects.count(), 3)
devicebay1 = DeviceBay.objects.get(pk=response.data['id'])
self.assertEqual(devicebay1.name, data['name'])
self.assertEqual(devicebay1.installed_device_id, data['installed_device'])
def test_delete_devicebay(self):
url = reverse('dcim-api:devicebay-detail', kwargs={'pk': self.devicebay1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(DeviceBay.objects.count(), 2)
class InventoryItemTest(APITestCase):
def setUp(self):
super().setUp()
site = Site.objects.create(name='Test Site 1', slug='test-site-1')
self.manufacturer = Manufacturer.objects.create(name='Test Manufacturer 1', slug='test-manufacturer-1')
devicetype = DeviceType.objects.create(
manufacturer=self.manufacturer, model='Test Device Type 1', slug='test-device-type-1'
)
devicerole = DeviceRole.objects.create(
name='Test Device Role 1', slug='test-device-role-1', color='ff0000'
)
self.device = Device.objects.create(
device_type=devicetype, device_role=devicerole, name='Test Device 1', site=site
)
self.inventoryitem1 = InventoryItem.objects.create(device=self.device, name='Test Inventory Item 1')
self.inventoryitem2 = InventoryItem.objects.create(device=self.device, name='Test Inventory Item 2')
self.inventoryitem3 = InventoryItem.objects.create(device=self.device, name='Test Inventory Item 3')
def test_get_inventoryitem(self):
url = reverse('dcim-api:inventoryitem-detail', kwargs={'pk': self.inventoryitem1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.inventoryitem1.name)
def test_list_inventoryitems(self):
url = reverse('dcim-api:inventoryitem-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_create_inventoryitem(self):
data = {
'device': self.device.pk,
'parent': self.inventoryitem1.pk,
'name': 'Test Inventory Item 4',
'manufacturer': self.manufacturer.pk,
}
url = reverse('dcim-api:inventoryitem-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(InventoryItem.objects.count(), 4)
inventoryitem4 = InventoryItem.objects.get(pk=response.data['id'])
self.assertEqual(inventoryitem4.device_id, data['device'])
self.assertEqual(inventoryitem4.parent_id, data['parent'])
self.assertEqual(inventoryitem4.name, data['name'])
self.assertEqual(inventoryitem4.manufacturer_id, data['manufacturer'])
def test_create_inventoryitem_bulk(self):
data = [
{
'device': self.device.pk,
'parent': self.inventoryitem1.pk,
'name': 'Test Inventory Item 4',
'manufacturer': self.manufacturer.pk,
},
{
'device': self.device.pk,
'parent': self.inventoryitem1.pk,
'name': 'Test Inventory Item 5',
'manufacturer': self.manufacturer.pk,
},
{
'device': self.device.pk,
'parent': self.inventoryitem1.pk,
'name': 'Test Inventory Item 6',
'manufacturer': self.manufacturer.pk,
},
]
url = reverse('dcim-api:inventoryitem-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(InventoryItem.objects.count(), 6)
self.assertEqual(response.data[0]['name'], data[0]['name'])
self.assertEqual(response.data[1]['name'], data[1]['name'])
self.assertEqual(response.data[2]['name'], data[2]['name'])
def test_update_inventoryitem(self):
data = {
'device': self.device.pk,
'parent': self.inventoryitem1.pk,
'name': 'Test Inventory Item X',
'manufacturer': self.manufacturer.pk,
}
url = reverse('dcim-api:inventoryitem-detail', kwargs={'pk': self.inventoryitem1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(InventoryItem.objects.count(), 3)
inventoryitem1 = InventoryItem.objects.get(pk=response.data['id'])
self.assertEqual(inventoryitem1.device_id, data['device'])
self.assertEqual(inventoryitem1.parent_id, data['parent'])
self.assertEqual(inventoryitem1.name, data['name'])
self.assertEqual(inventoryitem1.manufacturer_id, data['manufacturer'])
def test_delete_inventoryitem(self):
url = reverse('dcim-api:inventoryitem-detail', kwargs={'pk': self.inventoryitem1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(InventoryItem.objects.count(), 2)
class CableTest(APITestCase):
def setUp(self):
super().setUp()
site = Site.objects.create(name='Test Site 1', slug='test-site-1')
self.manufacturer = Manufacturer.objects.create(name='Test Manufacturer 1', slug='test-manufacturer-1')
devicetype = DeviceType.objects.create(
manufacturer=self.manufacturer, model='Test Device Type 1', slug='test-device-type-1'
)
devicerole = DeviceRole.objects.create(
name='Test Device Role 1', slug='test-device-role-1', color='ff0000'
)
self.device1 = Device.objects.create(
device_type=devicetype, device_role=devicerole, name='Test Device 1', site=site
)
self.device2 = Device.objects.create(
device_type=devicetype, device_role=devicerole, name='Test Device 2', site=site
)
for device in [self.device1, self.device2]:
for i in range(0, 10):
Interface(device=device, form_factor=IFACE_FF_1GE_FIXED, name='eth{}'.format(i)).save()
self.cable1 = Cable(
termination_a=self.device1.interfaces.get(name='eth0'),
termination_b=self.device2.interfaces.get(name='eth0'),
label='Test Cable 1'
)
self.cable1.save()
self.cable2 = Cable(
termination_a=self.device1.interfaces.get(name='eth1'),
termination_b=self.device2.interfaces.get(name='eth1'),
label='Test Cable 2'
)
self.cable2.save()
self.cable3 = Cable(
termination_a=self.device1.interfaces.get(name='eth2'),
termination_b=self.device2.interfaces.get(name='eth2'),
label='Test Cable 3'
)
self.cable3.save()
def test_get_cable(self):
url = reverse('dcim-api:cable-detail', kwargs={'pk': self.cable1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['id'], self.cable1.pk)
def test_list_cables(self):
url = reverse('dcim-api:cable-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_create_cable(self):
interface_a = self.device1.interfaces.get(name='eth3')
interface_b = self.device2.interfaces.get(name='eth3')
data = {
'termination_a_type': 'dcim.interface',
'termination_a_id': interface_a.pk,
'termination_b_type': 'dcim.interface',
'termination_b_id': interface_b.pk,
'status': CONNECTION_STATUS_PLANNED,
'label': 'Test Cable 4',
}
url = reverse('dcim-api:cable-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Cable.objects.count(), 4)
cable4 = Cable.objects.get(pk=response.data['id'])
self.assertEqual(cable4.termination_a, interface_a)
self.assertEqual(cable4.termination_b, interface_b)
self.assertEqual(cable4.status, data['status'])
self.assertEqual(cable4.label, data['label'])
def test_create_cable_bulk(self):
data = [
{
'termination_a_type': 'dcim.interface',
'termination_a_id': self.device1.interfaces.get(name='eth3').pk,
'termination_b_type': 'dcim.interface',
'termination_b_id': self.device2.interfaces.get(name='eth3').pk,
'label': 'Test Cable 4',
},
{
'termination_a_type': 'dcim.interface',
'termination_a_id': self.device1.interfaces.get(name='eth4').pk,
'termination_b_type': 'dcim.interface',
'termination_b_id': self.device2.interfaces.get(name='eth4').pk,
'label': 'Test Cable 5',
},
{
'termination_a_type': 'dcim.interface',
'termination_a_id': self.device1.interfaces.get(name='eth5').pk,
'termination_b_type': 'dcim.interface',
'termination_b_id': self.device2.interfaces.get(name='eth5').pk,
'label': 'Test Cable 6',
},
]
url = reverse('dcim-api:cable-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Cable.objects.count(), 6)
self.assertEqual(response.data[0]['label'], data[0]['label'])
self.assertEqual(response.data[1]['label'], data[1]['label'])
self.assertEqual(response.data[2]['label'], data[2]['label'])
def test_update_cable(self):
data = {
'label': 'Test Cable X',
'status': CONNECTION_STATUS_CONNECTED,
}
url = reverse('dcim-api:cable-detail', kwargs={'pk': self.cable1.pk})
response = self.client.patch(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(Cable.objects.count(), 3)
cable1 = Cable.objects.get(pk=response.data['id'])
self.assertEqual(cable1.status, data['status'])
self.assertEqual(cable1.label, data['label'])
def test_delete_cable(self):
url = reverse('dcim-api:cable-detail', kwargs={'pk': self.cable1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(Cable.objects.count(), 2)
class ConnectionTest(APITestCase):
def setUp(self):
super().setUp()
self.site = Site.objects.create(
name='Test Site 1', slug='test-site-1'
)
manufacturer = Manufacturer.objects.create(
name='Test Manufacturer 1', slug='test-manufacturer-1'
)
devicetype = DeviceType.objects.create(
manufacturer=manufacturer, model='Test Device Type 1', slug='test-device-type-1'
)
devicerole = DeviceRole.objects.create(
name='Test Device Role 1', slug='test-device-role-1', color='ff0000'
)
self.device1 = Device.objects.create(
device_type=devicetype, device_role=devicerole, name='Test Device 1', site=self.site
)
self.device2 = Device.objects.create(
device_type=devicetype, device_role=devicerole, name='Test Device 2', site=self.site
)
self.panel1 = Device.objects.create(
device_type=devicetype, device_role=devicerole, name='Test Panel 1', site=self.site
)
self.panel2 = Device.objects.create(
device_type=devicetype, device_role=devicerole, name='Test Panel 2', site=self.site
)
def test_create_direct_console_connection(self):
consoleport1 = ConsolePort.objects.create(
device=self.device1, name='Test Console Port 1'
)
consoleserverport1 = ConsoleServerPort.objects.create(
device=self.device2, name='Test Console Server Port 1'
)
data = {
'termination_a_type': 'dcim.consoleport',
'termination_a_id': consoleport1.pk,
'termination_b_type': 'dcim.consoleserverport',
'termination_b_id': consoleserverport1.pk,
}
url = reverse('dcim-api:cable-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Cable.objects.count(), 1)
cable = Cable.objects.get(pk=response.data['id'])
consoleport1 = ConsolePort.objects.get(pk=consoleport1.pk)
consoleserverport1 = ConsoleServerPort.objects.get(pk=consoleserverport1.pk)
self.assertEqual(cable.termination_a, consoleport1)
self.assertEqual(cable.termination_b, consoleserverport1)
self.assertEqual(consoleport1.cable, cable)
self.assertEqual(consoleserverport1.cable, cable)
self.assertEqual(consoleport1.connected_endpoint, consoleserverport1)
self.assertEqual(consoleserverport1.connected_endpoint, consoleport1)
def test_create_patched_console_connection(self):
consoleport1 = ConsolePort.objects.create(
device=self.device1, name='Test Console Port 1'
)
consoleserverport1 = ConsoleServerPort.objects.create(
device=self.device2, name='Test Console Server Port 1'
)
rearport1 = RearPort.objects.create(
device=self.panel1, name='Test Rear Port 1', type=PORT_TYPE_8P8C
)
frontport1 = FrontPort.objects.create(
device=self.panel1, name='Test Front Port 1', type=PORT_TYPE_8P8C, rear_port=rearport1
)
rearport2 = RearPort.objects.create(
device=self.panel2, name='Test Rear Port 2', type=PORT_TYPE_8P8C
)
frontport2 = FrontPort.objects.create(
device=self.panel2, name='Test Front Port 2', type=PORT_TYPE_8P8C, rear_port=rearport2
)
url = reverse('dcim-api:cable-list')
cables = [
# Console port to panel1 front
{
'termination_a_type': 'dcim.consoleport',
'termination_a_id': consoleport1.pk,
'termination_b_type': 'dcim.frontport',
'termination_b_id': frontport1.pk,
},
# Panel1 rear to panel2 rear
{
'termination_a_type': 'dcim.rearport',
'termination_a_id': rearport1.pk,
'termination_b_type': 'dcim.rearport',
'termination_b_id': rearport2.pk,
},
# Panel2 front to console server port
{
'termination_a_type': 'dcim.frontport',
'termination_a_id': frontport2.pk,
'termination_b_type': 'dcim.consoleserverport',
'termination_b_id': consoleserverport1.pk,
},
]
for data in cables:
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
cable = Cable.objects.get(pk=response.data['id'])
self.assertEqual(cable.termination_a.cable, cable)
self.assertEqual(cable.termination_b.cable, cable)
consoleport1 = ConsolePort.objects.get(pk=consoleport1.pk)
consoleserverport1 = ConsoleServerPort.objects.get(pk=consoleserverport1.pk)
self.assertEqual(consoleport1.connected_endpoint, consoleserverport1)
self.assertEqual(consoleserverport1.connected_endpoint, consoleport1)
def test_create_direct_power_connection(self):
powerport1 = PowerPort.objects.create(
device=self.device1, name='Test Power Port 1'
)
poweroutlet1 = PowerOutlet.objects.create(
device=self.device2, name='Test Power Outlet 1'
)
data = {
'termination_a_type': 'dcim.powerport',
'termination_a_id': powerport1.pk,
'termination_b_type': 'dcim.poweroutlet',
'termination_b_id': poweroutlet1.pk,
}
url = reverse('dcim-api:cable-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Cable.objects.count(), 1)
cable = Cable.objects.get(pk=response.data['id'])
powerport1 = PowerPort.objects.get(pk=powerport1.pk)
poweroutlet1 = PowerOutlet.objects.get(pk=poweroutlet1.pk)
self.assertEqual(cable.termination_a, powerport1)
self.assertEqual(cable.termination_b, poweroutlet1)
self.assertEqual(powerport1.cable, cable)
self.assertEqual(poweroutlet1.cable, cable)
self.assertEqual(powerport1.connected_endpoint, poweroutlet1)
self.assertEqual(poweroutlet1.connected_endpoint, powerport1)
# Note: Power connections via patch ports are not supported.
def test_create_direct_interface_connection(self):
interface1 = Interface.objects.create(
device=self.device1, name='Test Interface 1'
)
interface2 = Interface.objects.create(
device=self.device2, name='Test Interface 2'
)
data = {
'termination_a_type': 'dcim.interface',
'termination_a_id': interface1.pk,
'termination_b_type': 'dcim.interface',
'termination_b_id': interface2.pk,
}
url = reverse('dcim-api:cable-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Cable.objects.count(), 1)
cable = Cable.objects.get(pk=response.data['id'])
interface1 = Interface.objects.get(pk=interface1.pk)
interface2 = Interface.objects.get(pk=interface2.pk)
self.assertEqual(cable.termination_a, interface1)
self.assertEqual(cable.termination_b, interface2)
self.assertEqual(interface1.cable, cable)
self.assertEqual(interface2.cable, cable)
self.assertEqual(interface1.connected_endpoint, interface2)
self.assertEqual(interface2.connected_endpoint, interface1)
def test_create_patched_interface_connection(self):
interface1 = Interface.objects.create(
device=self.device1, name='Test Interface 1'
)
interface2 = Interface.objects.create(
device=self.device2, name='Test Interface 2'
)
rearport1 = RearPort.objects.create(
device=self.panel1, name='Test Rear Port 1', type=PORT_TYPE_8P8C
)
frontport1 = FrontPort.objects.create(
device=self.panel1, name='Test Front Port 1', type=PORT_TYPE_8P8C, rear_port=rearport1
)
rearport2 = RearPort.objects.create(
device=self.panel2, name='Test Rear Port 2', type=PORT_TYPE_8P8C
)
frontport2 = FrontPort.objects.create(
device=self.panel2, name='Test Front Port 2', type=PORT_TYPE_8P8C, rear_port=rearport2
)
url = reverse('dcim-api:cable-list')
cables = [
# Interface1 to panel1 front
{
'termination_a_type': 'dcim.interface',
'termination_a_id': interface1.pk,
'termination_b_type': 'dcim.frontport',
'termination_b_id': frontport1.pk,
},
# Panel1 rear to panel2 rear
{
'termination_a_type': 'dcim.rearport',
'termination_a_id': rearport1.pk,
'termination_b_type': 'dcim.rearport',
'termination_b_id': rearport2.pk,
},
# Panel2 front to interface2
{
'termination_a_type': 'dcim.frontport',
'termination_a_id': frontport2.pk,
'termination_b_type': 'dcim.interface',
'termination_b_id': interface2.pk,
},
]
for data in cables:
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
cable = Cable.objects.get(pk=response.data['id'])
self.assertEqual(cable.termination_a.cable, cable)
self.assertEqual(cable.termination_b.cable, cable)
interface1 = Interface.objects.get(pk=interface1.pk)
interface2 = Interface.objects.get(pk=interface2.pk)
self.assertEqual(interface1.connected_endpoint, interface2)
self.assertEqual(interface2.connected_endpoint, interface1)
def test_create_direct_circuittermination_connection(self):
provider = Provider.objects.create(
name='Test Provider 1', slug='test-provider-1'
)
circuittype = CircuitType.objects.create(
name='Test Circuit Type 1', slug='test-circuit-type-1'
)
circuit = Circuit.objects.create(
provider=provider, type=circuittype, cid='Test Circuit 1'
)
interface1 = Interface.objects.create(
device=self.device1, name='Test Interface 1'
)
circuittermination1 = CircuitTermination.objects.create(
circuit=circuit, term_side='A', site=self.site, port_speed=10000
)
data = {
'termination_a_type': 'dcim.interface',
'termination_a_id': interface1.pk,
'termination_b_type': 'circuits.circuittermination',
'termination_b_id': circuittermination1.pk,
}
url = reverse('dcim-api:cable-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Cable.objects.count(), 1)
cable = Cable.objects.get(pk=response.data['id'])
interface1 = Interface.objects.get(pk=interface1.pk)
circuittermination1 = CircuitTermination.objects.get(pk=circuittermination1.pk)
self.assertEqual(cable.termination_a, interface1)
self.assertEqual(cable.termination_b, circuittermination1)
self.assertEqual(interface1.cable, cable)
self.assertEqual(circuittermination1.cable, cable)
self.assertEqual(interface1.connected_endpoint, circuittermination1)
self.assertEqual(circuittermination1.connected_endpoint, interface1)
def test_create_patched_circuittermination_connection(self):
provider = Provider.objects.create(
name='Test Provider 1', slug='test-provider-1'
)
circuittype = CircuitType.objects.create(
name='Test Circuit Type 1', slug='test-circuit-type-1'
)
circuit = Circuit.objects.create(
provider=provider, type=circuittype, cid='Test Circuit 1'
)
interface1 = Interface.objects.create(
device=self.device1, name='Test Interface 1'
)
circuittermination1 = CircuitTermination.objects.create(
circuit=circuit, term_side='A', site=self.site, port_speed=10000
)
rearport1 = RearPort.objects.create(
device=self.panel1, name='Test Rear Port 1', type=PORT_TYPE_8P8C
)
frontport1 = FrontPort.objects.create(
device=self.panel1, name='Test Front Port 1', type=PORT_TYPE_8P8C, rear_port=rearport1
)
rearport2 = RearPort.objects.create(
device=self.panel2, name='Test Rear Port 2', type=PORT_TYPE_8P8C
)
frontport2 = FrontPort.objects.create(
device=self.panel2, name='Test Front Port 2', type=PORT_TYPE_8P8C, rear_port=rearport2
)
url = reverse('dcim-api:cable-list')
cables = [
# Interface to panel1 front
{
'termination_a_type': 'dcim.interface',
'termination_a_id': interface1.pk,
'termination_b_type': 'dcim.frontport',
'termination_b_id': frontport1.pk,
},
# Panel1 rear to panel2 rear
{
'termination_a_type': 'dcim.rearport',
'termination_a_id': rearport1.pk,
'termination_b_type': 'dcim.rearport',
'termination_b_id': rearport2.pk,
},
# Panel2 front to circuit termination
{
'termination_a_type': 'dcim.frontport',
'termination_a_id': frontport2.pk,
'termination_b_type': 'circuits.circuittermination',
'termination_b_id': circuittermination1.pk,
},
]
for data in cables:
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
cable = Cable.objects.get(pk=response.data['id'])
self.assertEqual(cable.termination_a.cable, cable)
self.assertEqual(cable.termination_b.cable, cable)
interface1 = Interface.objects.get(pk=interface1.pk)
circuittermination1 = CircuitTermination.objects.get(pk=circuittermination1.pk)
self.assertEqual(interface1.connected_endpoint, circuittermination1)
self.assertEqual(circuittermination1.connected_endpoint, interface1)
class ConnectedDeviceTest(APITestCase):
def setUp(self):
super().setUp()
self.site1 = Site.objects.create(name='Test Site 1', slug='test-site-1')
self.site2 = Site.objects.create(name='Test Site 2', slug='test-site-2')
manufacturer = Manufacturer.objects.create(name='Test Manufacturer 1', slug='test-manufacturer-1')
self.devicetype1 = DeviceType.objects.create(
manufacturer=manufacturer, model='Test Device Type 1', slug='test-device-type-1'
)
self.devicetype2 = DeviceType.objects.create(
manufacturer=manufacturer, model='Test Device Type 2', slug='test-device-type-2'
)
self.devicerole1 = DeviceRole.objects.create(
name='Test Device Role 1', slug='test-device-role-1', color='ff0000'
)
self.devicerole2 = DeviceRole.objects.create(
name='Test Device Role 2', slug='test-device-role-2', color='00ff00'
)
self.device1 = Device.objects.create(
device_type=self.devicetype1, device_role=self.devicerole1, name='TestDevice1', site=self.site1
)
self.device2 = Device.objects.create(
device_type=self.devicetype1, device_role=self.devicerole1, name='TestDevice2', site=self.site1
)
self.interface1 = Interface.objects.create(device=self.device1, name='eth0')
self.interface2 = Interface.objects.create(device=self.device2, name='eth0')
cable = Cable(termination_a=self.interface1, termination_b=self.interface2)
cable.save()
def test_get_connected_device(self):
url = reverse('dcim-api:connected-device-list')
response = self.client.get(url + '?peer_device=TestDevice2&peer_interface=eth0', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(response.data['name'], self.device1.name)
class VirtualChassisTest(APITestCase):
def setUp(self):
super().setUp()
site = Site.objects.create(name='Test Site', slug='test-site')
manufacturer = Manufacturer.objects.create(name='Test Manufacturer', slug='test-manufacturer')
device_type = DeviceType.objects.create(
manufacturer=manufacturer, model='Test Device Type', slug='test-device-type'
)
device_role = DeviceRole.objects.create(
name='Test Device Role', slug='test-device-role', color='ff0000'
)
# Create 9 member Devices with 12 interfaces each
self.device1 = Device.objects.create(
device_type=device_type, device_role=device_role, name='StackSwitch1', site=site
)
self.device2 = Device.objects.create(
device_type=device_type, device_role=device_role, name='StackSwitch2', site=site
)
self.device3 = Device.objects.create(
device_type=device_type, device_role=device_role, name='StackSwitch3', site=site
)
self.device4 = Device.objects.create(
device_type=device_type, device_role=device_role, name='StackSwitch4', site=site
)
self.device5 = Device.objects.create(
device_type=device_type, device_role=device_role, name='StackSwitch5', site=site
)
self.device6 = Device.objects.create(
device_type=device_type, device_role=device_role, name='StackSwitch6', site=site
)
self.device7 = Device.objects.create(
device_type=device_type, device_role=device_role, name='StackSwitch7', site=site
)
self.device8 = Device.objects.create(
device_type=device_type, device_role=device_role, name='StackSwitch8', site=site
)
self.device9 = Device.objects.create(
device_type=device_type, device_role=device_role, name='StackSwitch9', site=site
)
for i in range(0, 13):
Interface.objects.create(device=self.device1, name='1/{}'.format(i), form_factor=IFACE_FF_1GE_FIXED)
for i in range(0, 13):
Interface.objects.create(device=self.device2, name='2/{}'.format(i), form_factor=IFACE_FF_1GE_FIXED)
for i in range(0, 13):
Interface.objects.create(device=self.device3, name='3/{}'.format(i), form_factor=IFACE_FF_1GE_FIXED)
for i in range(0, 13):
Interface.objects.create(device=self.device4, name='1/{}'.format(i), form_factor=IFACE_FF_1GE_FIXED)
for i in range(0, 13):
Interface.objects.create(device=self.device5, name='2/{}'.format(i), form_factor=IFACE_FF_1GE_FIXED)
for i in range(0, 13):
Interface.objects.create(device=self.device6, name='3/{}'.format(i), form_factor=IFACE_FF_1GE_FIXED)
for i in range(0, 13):
Interface.objects.create(device=self.device7, name='1/{}'.format(i), form_factor=IFACE_FF_1GE_FIXED)
for i in range(0, 13):
Interface.objects.create(device=self.device8, name='2/{}'.format(i), form_factor=IFACE_FF_1GE_FIXED)
for i in range(0, 13):
Interface.objects.create(device=self.device9, name='3/{}'.format(i), form_factor=IFACE_FF_1GE_FIXED)
# Create two VirtualChassis with three members each
self.vc1 = VirtualChassis.objects.create(master=self.device1, domain='test-domain-1')
Device.objects.filter(pk=self.device2.pk).update(virtual_chassis=self.vc1, vc_position=2)
Device.objects.filter(pk=self.device3.pk).update(virtual_chassis=self.vc1, vc_position=3)
self.vc2 = VirtualChassis.objects.create(master=self.device4, domain='test-domain-2')
Device.objects.filter(pk=self.device5.pk).update(virtual_chassis=self.vc2, vc_position=2)
Device.objects.filter(pk=self.device6.pk).update(virtual_chassis=self.vc2, vc_position=3)
def test_get_virtualchassis(self):
url = reverse('dcim-api:virtualchassis-detail', kwargs={'pk': self.vc1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['domain'], self.vc1.domain)
def test_list_virtualchassis(self):
url = reverse('dcim-api:virtualchassis-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 2)
def test_list_virtualchassis_brief(self):
url = reverse('dcim-api:virtualchassis-list')
response = self.client.get('{}?brief=1'.format(url), **self.header)
self.assertEqual(
sorted(response.data['results'][0]),
['id', 'master', 'url']
)
def test_create_virtualchassis(self):
data = {
'master': self.device7.pk,
'domain': 'test-domain-3',
}
url = reverse('dcim-api:virtualchassis-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(VirtualChassis.objects.count(), 3)
vc3 = VirtualChassis.objects.get(pk=response.data['id'])
self.assertEqual(vc3.master.pk, data['master'])
self.assertEqual(vc3.domain, data['domain'])
# Verify that the master device was automatically assigned to the VC
self.assertTrue(Device.objects.filter(pk=vc3.master.pk, virtual_chassis=vc3.pk).exists())
def test_create_virtualchassis_bulk(self):
data = [
{
'master': self.device7.pk,
'domain': 'test-domain-3',
},
{
'master': self.device8.pk,
'domain': 'test-domain-4',
},
{
'master': self.device9.pk,
'domain': 'test-domain-5',
},
]
url = reverse('dcim-api:virtualchassis-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(VirtualChassis.objects.count(), 5)
for i in range(0, 3):
self.assertEqual(response.data[i]['master']['id'], data[i]['master'])
self.assertEqual(response.data[i]['domain'], data[i]['domain'])
def test_update_virtualchassis(self):
data = {
'master': self.device2.pk,
'domain': 'test-domain-x',
}
url = reverse('dcim-api:virtualchassis-detail', kwargs={'pk': self.vc1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(VirtualChassis.objects.count(), 2)
vc1 = VirtualChassis.objects.get(pk=response.data['id'])
self.assertEqual(vc1.master.pk, data['master'])
self.assertEqual(vc1.domain, data['domain'])
def test_delete_virtualchassis(self):
url = reverse('dcim-api:virtualchassis-detail', kwargs={'pk': self.vc1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(VirtualChassis.objects.count(), 1)
# Verify that all VC members have had their VC-related fields nullified
for d in [self.device1, self.device2, self.device3]:
self.assertTrue(
Device.objects.filter(pk=d.pk, virtual_chassis=None, vc_position=None, vc_priority=None)
)
| 38.601253
| 120
| 0.617728
|
9af97033112294a73bebe93464a5f9fac08290b1
| 1,478
|
py
|
Python
|
src/roboy_hand/gesture_recognition/train/xml_to_csv.py
|
Roboy/ss18_hand
|
3a4b3524fe653478c8a31e62b593708fdeebef70
|
[
"BSD-3-Clause"
] | 1
|
2018-07-07T21:43:05.000Z
|
2018-07-07T21:43:05.000Z
|
src/roboy_hand/gesture_recognition/train/xml_to_csv.py
|
Roboy/ss18_hand
|
3a4b3524fe653478c8a31e62b593708fdeebef70
|
[
"BSD-3-Clause"
] | null | null | null |
src/roboy_hand/gesture_recognition/train/xml_to_csv.py
|
Roboy/ss18_hand
|
3a4b3524fe653478c8a31e62b593708fdeebef70
|
[
"BSD-3-Clause"
] | 2
|
2018-05-02T15:30:49.000Z
|
2020-09-13T12:23:45.000Z
|
import os
import glob
import pandas as pd
import xml.etree.ElementTree as ET
def xml_to_csv(path):
xml_list = []
gesture_names = ['fist', 'one', 'pinky', 'loser', 'two', 'three', 'horn', 'rockNroll', 'five', 'ok']
for xml_file in glob.glob(path + '/*.xml'):
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall('object'):
# P1_G1_0
file_name = root.find('filename').text.split('_')
# G1
gesture_name = file_name[1]
# 1
gesture_id = int(gesture_name[1:]) - 1
value = (root.find('filename').text,
int(root.find('size')[0].text),
int(root.find('size')[1].text),
gesture_names[gesture_id],
int(member[4][0].text),
int(member[4][1].text),
int(member[4][2].text),
int(member[4][3].text)
)
print (value)
xml_list.append(value)
column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
xml_df = pd.DataFrame(xml_list, columns=column_name)
return xml_df
def main():
for directory in ['train', 'test']:
image_path = os.path.join(os.getcwd(), 'images/{}'.format(directory))
xml_df = xml_to_csv(image_path)
xml_df.to_csv('data/{}_labels.csv'.format(directory), index=None)
main()
| 32.844444
| 104
| 0.521651
|
2c436e20987f1a9ab4b3f42d63e59edc26b03492
| 2,272
|
py
|
Python
|
PyObjCTest/test_nsinvocation.py
|
linuxfood/pyobjc-framework-Cocoa-test
|
3475890f165ab26a740f13d5afe4c62b4423a140
|
[
"MIT"
] | null | null | null |
PyObjCTest/test_nsinvocation.py
|
linuxfood/pyobjc-framework-Cocoa-test
|
3475890f165ab26a740f13d5afe4c62b4423a140
|
[
"MIT"
] | null | null | null |
PyObjCTest/test_nsinvocation.py
|
linuxfood/pyobjc-framework-Cocoa-test
|
3475890f165ab26a740f13d5afe4c62b4423a140
|
[
"MIT"
] | null | null | null |
import Foundation
from PyObjCTools.TestSupport import TestCase
class TestNSInvocation(TestCase):
def test_dummy(self):
value = Foundation.NSMutableArray.arrayWithArray_([1, 2, 3])
invocation = Foundation.NSInvocation.invocationWithMethodSignature_(
value.methodSignatureForSelector_("count")
)
invocation.setSelector_("count")
invocation.setTarget_(value)
invocation.invoke()
v = invocation.getReturnValue_(None)
self.assertIsInstance(v, int)
self.assertEqual(v, 3)
invocation = Foundation.NSInvocation.invocationWithMethodSignature_(
value.methodSignatureForSelector_("addObject:")
)
invocation.setSelector_("addObject:")
invocation.setTarget_(value)
invocation.setArgument_atIndex_(b"hello".decode("ascii"), 2)
v = invocation.getArgument_atIndex_(None, 2)
self.assertEqual(v, b"hello".decode("ascii"))
invocation.invoke()
self.assertEqual(value.count(), 4)
def testMethods(self):
self.assertResultIsBOOL(Foundation.NSInvocation.argumentsRetained)
def testNoUnsupported(self):
self.assertNotHasAttr(Foundation, "NSObjCValue")
self.assertNotHasAttr(Foundation, "NSObjCNoType")
self.assertNotHasAttr(Foundation, "NSObjCVoidType")
self.assertNotHasAttr(Foundation, "NSObjCCharType")
self.assertNotHasAttr(Foundation, "NSObjCShortType")
self.assertNotHasAttr(Foundation, "NSObjCLongType")
self.assertNotHasAttr(Foundation, "NSObjCLonglongType")
self.assertNotHasAttr(Foundation, "NSObjCFloatType")
self.assertNotHasAttr(Foundation, "NSObjCDoubleType")
self.assertNotHasAttr(Foundation, "NSObjCBoolType")
self.assertNotHasAttr(Foundation, "NSObjCSelectorType")
self.assertNotHasAttr(Foundation, "NSObjCObjectType")
self.assertNotHasAttr(Foundation, "NSObjCStructType")
self.assertNotHasAttr(Foundation, "NSObjCPointerType")
self.assertNotHasAttr(Foundation, "NSObjCStringType")
self.assertNotHasAttr(Foundation, "NSObjCArrayType")
self.assertNotHasAttr(Foundation, "NSObjCUnionType")
self.assertNotHasAttr(Foundation, "NSObjCBitfield")
| 42.074074
| 76
| 0.712148
|
adfe13c9edfffc4836b45d212f3c7c39c92aa089
| 498
|
py
|
Python
|
web/transaction/models.py
|
vshagur/wallet-rest-api
|
0d8c09c52f0c86a1f36f12949d9733caa0db7ff0
|
[
"MIT"
] | null | null | null |
web/transaction/models.py
|
vshagur/wallet-rest-api
|
0d8c09c52f0c86a1f36f12949d9733caa0db7ff0
|
[
"MIT"
] | null | null | null |
web/transaction/models.py
|
vshagur/wallet-rest-api
|
0d8c09c52f0c86a1f36f12949d9733caa0db7ff0
|
[
"MIT"
] | null | null | null |
from django.db import models
from wallet.models import Wallet
class Transaction(models.Model):
comment = models.TextField(
null=False,
blank=True,
max_length=1024,
)
balance = models.DecimalField(
null=False,
max_digits=12,
decimal_places=2,
)
wallet = models.ForeignKey(
Wallet,
on_delete=models.CASCADE,
)
datetime = models.DateTimeField(
auto_now_add=True,
auto_created=True,
)
| 18.444444
| 36
| 0.608434
|
b665ece82721a41654015a35bf62a7fcc39dbe9d
| 441
|
py
|
Python
|
core/migrations/0010_auto_20180928_1104.py
|
CobwebOrg/cobweb-django
|
14241326860620dbaa64f7eefc6d4b393f80d23c
|
[
"MIT"
] | 7
|
2017-09-14T18:52:58.000Z
|
2020-05-18T21:01:20.000Z
|
core/migrations/0010_auto_20180928_1104.py
|
CobwebOrg/cobweb-django
|
14241326860620dbaa64f7eefc6d4b393f80d23c
|
[
"MIT"
] | 151
|
2017-09-14T18:46:02.000Z
|
2022-02-10T09:18:44.000Z
|
core/migrations/0010_auto_20180928_1104.py
|
CobwebOrg/cobweb-django
|
14241326860620dbaa64f7eefc6d4b393f80d23c
|
[
"MIT"
] | 1
|
2017-10-29T19:37:29.000Z
|
2017-10-29T19:37:29.000Z
|
# Generated by Django 2.1.1 on 2018-09-28 18:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0009_auto_20180917_1115'),
]
operations = [
migrations.AlterField(
model_name='organization',
name='slug',
field=models.SlugField(help_text='Choose a Cobweb URL for your organization.', unique=True),
),
]
| 23.210526
| 104
| 0.621315
|
871db85e3c75b5c13403ad72ba0e24de827e66c6
| 5,642
|
py
|
Python
|
foscam_poll.py
|
eagleco/udi-camera-poly
|
d761fe1033a7b5d294cb73438d5d8ccfbf9dbd37
|
[
"MIT"
] | null | null | null |
foscam_poll.py
|
eagleco/udi-camera-poly
|
d761fe1033a7b5d294cb73438d5d8ccfbf9dbd37
|
[
"MIT"
] | null | null | null |
foscam_poll.py
|
eagleco/udi-camera-poly
|
d761fe1033a7b5d294cb73438d5d8ccfbf9dbd37
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
#
# Basic script to send UDP requests looking for foscam cameras.
#
import os
import socket
import sys
import time
import select
from struct import unpack,pack
from camera_funcs import get_valid_node_name
TIMEOUT = 6 # Run for 30 seconds max.
PING_INTERVAL = 2 # Once every 5 seconds
PING_PORT_NUMBER = 10000
PING_MSG_SIZE = 130
# ftp://109.108.88.53/Nadzor/FOSCAM/SDK%20CGI/MJPEG%20CGI%20SDK/MJPEG%20CGI%20SDK/Ipcamera%20device%20search%20protocol.pdf
SEARCH_REQUEST = pack('>4sH?8sll4s', b'MO_I', 0, 0, b'', 67108864, 0, b'')
def foscam_poll(logger=None,verbose=False):
clients = []
clients_by_addr = {}
# Create UDP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
# Ask operating system to let us do broadcasts from socket
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
# Bind UDP socket to local port so we can receive pings
sock.bind(('',0)) # Was, PING_PORT_NUMBER, but sender can be any open port.
# Use timeout
sock.settimeout(PING_INTERVAL)
main_timeout = time.time() + TIMEOUT
responses = {}
while time.time() < main_timeout:
# Broadcast our beacon
if logger is not None:
logger.info("Pinging for Foscam's")
sock.sendto(SEARCH_REQUEST, 0, ("255.255.255.255", PING_PORT_NUMBER))
ping_timeout = time.time() + PING_INTERVAL
while time.time() < ping_timeout:
# Listen for a response with timeout
addr = None
try:
msg, (addr, uport) = sock.recvfrom(PING_MSG_SIZE)
# Someone answered our ping, store it.
if addr not in responses:
if logger is not None:
logger.info("Saving response from %s:%s" % (addr,uport))
responses[addr] = msg
except socket.timeout:
if logger is not None:
logger.debug("No more reponses")
sock.close()
if logger is not None:
logger.debug("All done looking")
for addr, msg in iter(responses.items()):
if logger is not None:
logger.debug("Response from: %s" % (addr))
if verbose:
logger.debug("msg=%s" % msg)
if msg == SEARCH_REQUEST:
if logger is not None:
logger.debug("ignore my echo")
elif len(msg) == 88 or len(msg) == 121 or len(msg) == 129:
if len(msg) == 88:
upk = unpack('>23s13s21s4I4b4b4bH?',msg)
(header, id, name, ip_i, mask_i, gateway_i, dns_i, r1, r2, r3, r4, s1, s2, s3, s4, a1, a2, a3, a4, port, dhcp) = upk
type = ""
mtype = "MJPEG"
elif len(msg) == 121:
# I can't find documentation for the last 19 and 14 bytes, but the 14 seems to
# be a string that indicates what type of camera A=HD and b=H.264
# I see this for my FI9828P V2
upk = unpack('>23s13s21s4I4b4b4bH?19s14s',msg)
(header, id, name, ip_i, mask_i, gateway_i, dns_i, r1, r2, r3, r4, s1, s2, s3, s4, a1, a2, a3, a4, port, dhcp, unknown, type) = upk
mtype = "HD2"
elif len(msg) == 129:
# And this has has another 8 bytes at the end? I see this on my FI9826P V2
upk = unpack('>23s13s21s4I4b4b4bH?19s14s8s',msg)
(header, id, name, ip_i, mask_i, gateway_i, dns_i, r1, r2, r3, r4, s1, s2, s3, s4, a1, a2, a3, a4, port, dhcp, unknown1, type, unknown2) = upk
mtype = "HD2"
if verbose and logger is not None:
logger.debug(upk)
#type = type.decode()
id = id.decode()
name = name.decode()
client = {
'type': type,#rstrip(b'\x00'),
'mtype': mtype,
'id': id.rstrip('\x00'),
'name': get_valid_node_name(name.rstrip('\x00')),
'ip': socket.inet_ntoa(pack('!I',ip_i)),
'port': port,
'mask': socket.inet_ntoa(pack('!I',mask_i)),
'gateway': socket.inet_ntoa(pack('!I',gateway_i)),
'dns': socket.inet_ntoa(pack('!I',dns_i)),
'reserve': "%d.%d.%d.%d" % (r1, r2, r3, r4),
'sys': "%d.%d.%d.%d" % (s1, s2, s3, s4),
'app': "%d.%d.%d.%d" % (a1, a2, a3, a4),
'dhcp': dhcp,
'reserve_a': (r1, r2, r3, r4),
'sys_a': (s1, s2, s3, s4),
'app_a': (a1, a2, a3, a4),
}
if logger is not None:
logger.info("Foscam Info: %s" % (client))
clients.append(client)
else:
if logger is not None:
logger.debug("Ignoring message of size " + str(len(msg)))
return clients
if __name__ == '__main__':
import logging
import sys
# Create our logger
logger = logging.getLogger('foscam_poll')
logger.setLevel(logging.DEBUG)
# create console handler
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(ch)
verbose = False
if (len(sys.argv) > 1 and sys.argv[1] == "-v"):
verbose = True
foscam_poll(logger,verbose)
| 38.380952
| 158
| 0.542184
|
50dfa79079457225271ab03d1ae821e8f18a36d9
| 38,749
|
py
|
Python
|
skopt/space/space.py
|
teffland/scikit-optimize
|
b02c90e22e45fc69b0587dff601621b540e2579f
|
[
"BSD-3-Clause"
] | null | null | null |
skopt/space/space.py
|
teffland/scikit-optimize
|
b02c90e22e45fc69b0587dff601621b540e2579f
|
[
"BSD-3-Clause"
] | null | null | null |
skopt/space/space.py
|
teffland/scikit-optimize
|
b02c90e22e45fc69b0587dff601621b540e2579f
|
[
"BSD-3-Clause"
] | null | null | null |
import numbers
import numpy as np
import yaml
from scipy.stats.distributions import randint
from scipy.stats.distributions import rv_discrete
from scipy.stats.distributions import uniform
from sklearn.utils import check_random_state
from sklearn.utils.fixes import sp_version
from .transformers import CategoricalEncoder
from .transformers import StringEncoder
from .transformers import LabelEncoder
from .transformers import Normalize
from .transformers import Identity
from .transformers import LogN
from .transformers import Pipeline
# helper class to be able to print [1, ..., 4] instead of [1, '...', 4]
class _Ellipsis:
def __repr__(self):
return '...'
def _transpose_list_array(x):
"""Transposes a list matrix
"""
n_dims = len(x)
assert n_dims > 0
n_samples = len(x[0])
rows = [None] * n_samples
for i in range(n_samples):
r = [None] * n_dims
for j in range(n_dims):
r[j] = x[j][i]
rows[i] = r
return rows
def check_dimension(dimension, transform=None):
"""Turn a provided dimension description into a dimension object.
Checks that the provided dimension falls into one of the
supported types. For a list of supported types, look at
the documentation of ``dimension`` below.
If ``dimension`` is already a ``Dimension`` instance, return it.
Parameters
----------
dimension : Dimension
Search space Dimension.
Each search dimension can be defined either as
- a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(lower_bound, upper_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
transform : "identity", "normalize", "string", "label", "onehot" optional
- For `Categorical` dimensions, the following transformations are
supported.
- "onehot" (default) one-hot transformation of the original space.
- "label" integer transformation of the original space
- "string" string transformation of the original space.
- "identity" same as the original space.
- For `Real` and `Integer` dimensions, the following transformations
are supported.
- "identity", (default) the transformed space is the same as the
original space.
- "normalize", the transformed space is scaled to be between 0 and 1.
Returns
-------
dimension : Dimension
Dimension instance.
"""
if isinstance(dimension, Dimension):
return dimension
if not isinstance(dimension, (list, tuple, np.ndarray)):
raise ValueError("Dimension has to be a list or tuple.")
# A `Dimension` described by a single value is assumed to be
# a `Categorical` dimension. This can be used in `BayesSearchCV`
# to define subspaces that fix one value, e.g. to choose the
# model type, see "sklearn-gridsearchcv-replacement.py"
# for examples.
if len(dimension) == 1:
return Categorical(dimension, transform=transform)
if len(dimension) == 2:
if any([isinstance(d, (str, bool)) or isinstance(d, np.bool_)
for d in dimension]):
return Categorical(dimension, transform=transform)
elif all([isinstance(dim, numbers.Integral) for dim in dimension]):
return Integer(*dimension, transform=transform)
elif any([isinstance(dim, numbers.Real) for dim in dimension]):
return Real(*dimension, transform=transform)
else:
raise ValueError("Invalid dimension {}. Read the documentation for"
" supported types.".format(dimension))
if len(dimension) == 3:
if (any([isinstance(dim, int) for dim in dimension[:2]]) and
dimension[2] in ["uniform", "log-uniform"]):
return Integer(*dimension, transform=transform)
elif (any([isinstance(dim, (float, int)) for dim in dimension[:2]]) and
dimension[2] in ["uniform", "log-uniform"]):
return Real(*dimension, transform=transform)
else:
return Categorical(dimension, transform=transform)
if len(dimension) == 4:
if (any([isinstance(dim, int) for dim in dimension[:2]]) and
dimension[2] == "log-uniform" and isinstance(dimension[3],
int)):
return Integer(*dimension, transform=transform)
elif (any([isinstance(dim, (float, int)) for dim in dimension[:2]]) and
dimension[2] == "log-uniform" and isinstance(dimension[3], int)):
return Real(*dimension, transform=transform)
if len(dimension) > 3:
return Categorical(dimension, transform=transform)
raise ValueError("Invalid dimension {}. Read the documentation for "
"supported types.".format(dimension))
class Dimension(object):
"""Base class for search space dimensions."""
prior = None
def rvs(self, n_samples=1, random_state=None):
"""Draw random samples.
Parameters
----------
n_samples : int or None
The number of samples to be drawn.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
"""
rng = check_random_state(random_state)
samples = self._rvs.rvs(size=n_samples, random_state=rng)
return self.inverse_transform(samples)
def transform(self, X):
"""Transform samples form the original space to a warped space."""
return self.transformer.transform(X)
def inverse_transform(self, Xt):
"""Inverse transform samples from the warped space back into the
original space.
"""
return self.transformer.inverse_transform(Xt)
def set_transformer(self):
raise NotImplementedError
@property
def size(self):
return 1
@property
def transformed_size(self):
return 1
@property
def bounds(self):
raise NotImplementedError
@property
def is_constant(self):
raise NotImplementedError
@property
def transformed_bounds(self):
raise NotImplementedError
@property
def name(self):
return self._name
@name.setter
def name(self, value):
if isinstance(value, str) or value is None:
self._name = value
else:
raise ValueError("Dimension's name must be either string or None.")
def _uniform_inclusive(loc=0.0, scale=1.0):
# like scipy.stats.distributions but inclusive of `high`
# XXX scale + 1. might not actually be a float after scale if
# XXX scale is very large.
return uniform(loc=loc, scale=np.nextafter(scale, scale + 1.))
class Real(Dimension):
"""Search space dimension that can take on any real value.
Parameters
----------
low : float
Lower bound (inclusive).
high : float
Upper bound (inclusive).
prior : "uniform" or "log-uniform", default="uniform"
Distribution to use when sampling random points for this dimension.
- If `"uniform"`, points are sampled uniformly between the lower
and upper bounds.
- If `"log-uniform"`, points are sampled uniformly between
`log(lower, base)` and `log(upper, base)` where log
has base `base`.
base : int
The logarithmic base to use for a log-uniform prior.
- Default 10, otherwise commonly 2.
transform : "identity", "normalize", optional
The following transformations are supported.
- "identity", (default) the transformed space is the same as the
original space.
- "normalize", the transformed space is scaled to be between
0 and 1.
name : str or None
Name associated with the dimension, e.g., "learning rate".
dtype : str or dtype, default=np.float
float type which will be used in inverse_transform,
can be float.
"""
def __init__(self, low, high, prior="uniform", base=10, transform=None,
name=None, dtype=np.float):
if high <= low:
raise ValueError("the lower bound {} has to be less than the"
" upper bound {}".format(low, high))
self.low = low
self.high = high
self.prior = prior
self.base = base
self.log_base = np.log10(base)
self.name = name
self.dtype = dtype
self._rvs = None
self.transformer = None
self.transform_ = transform
if isinstance(self.dtype, str) and self.dtype\
not in ['float', 'float16', 'float32', 'float64']:
raise ValueError("dtype must be 'float', 'float16', 'float32'"
"or 'float64'"
" got {}".format(self.dtype))
elif isinstance(self.dtype, type) and self.dtype\
not in [float, np.float, np.float16, np.float32, np.float64]:
raise ValueError("dtype must be float, np.float"
" got {}".format(self.dtype))
if prior not in ("uniform", "log-uniform"):
raise ValueError("prior must be `uniform` or `log-uniform`"
", got {}".format(prior))
if transform is None:
transform = "identity"
self.set_transformer(transform)
def set_transformer(self, transform="identitiy"):
"""Define rvs and transformer spaces.
Parameters
----------
transform : str
Can be 'normalize' or 'identity'
"""
self.transform_ = transform
if self.transform_ not in ["normalize", "identity"]:
raise ValueError("transform should be 'normalize' or 'identity'"
" got {}".format(self.transform_))
# XXX: The _rvs is for sampling in the transformed space.
# The rvs on Dimension calls inverse_transform on the points sampled
# using _rvs
if self.transform_ == "normalize":
# set upper bound to next float after 1. to make the numbers
# inclusive of upper edge
self._rvs = _uniform_inclusive(0., 1.)
if self.prior == "uniform":
self.transformer = Pipeline(
[Identity(), Normalize(self.low, self.high)])
else:
self.transformer = Pipeline(
[LogN(self.base),
Normalize(np.log10(self.low) / self.log_base,
np.log10(self.high) / self.log_base)]
)
else:
if self.prior == "uniform":
self._rvs = _uniform_inclusive(self.low, self.high - self.low)
self.transformer = Identity()
else:
self._rvs = _uniform_inclusive(
np.log10(self.low) / self.log_base,
np.log10(self.high) / self.log_base -
np.log10(self.low) / self.log_base)
self.transformer = LogN(self.base)
def __eq__(self, other):
return (type(self) is type(other) and
np.allclose([self.low], [other.low]) and
np.allclose([self.high], [other.high]) and
self.prior == other.prior and
self.transform_ == other.transform_)
def __repr__(self):
return "Real(low={}, high={}, prior='{}', transform='{}')".format(
self.low, self.high, self.prior, self.transform_)
def inverse_transform(self, Xt):
"""Inverse transform samples from the warped space back into the
original space.
"""
inv_transform = super(Real, self).inverse_transform(Xt)
if isinstance(inv_transform, list):
inv_transform = np.array(inv_transform)
inv_transform = np.clip(inv_transform,
self.low, self.high).astype(self.dtype)
if self.dtype == float or self.dtype == 'float':
# necessary, otherwise the type is converted to a numpy type
return getattr(inv_transform, "tolist", lambda: value)()
else:
return inv_transform
@property
def bounds(self):
return (self.low, self.high)
@property
def is_constant(self):
return self.low == self.high
def __contains__(self, point):
if isinstance(point, list):
point = np.array(point)
return self.low <= point <= self.high
@property
def transformed_bounds(self):
if self.transform_ == "normalize":
return 0.0, 1.0
else:
if self.prior == "uniform":
return self.low, self.high
else:
return np.log10(self.low), np.log10(self.high)
def distance(self, a, b):
"""Compute distance between point `a` and `b`.
Parameters
----------
a : float
First point.
b : float
Second point.
"""
if not (a in self and b in self):
raise RuntimeError("Can only compute distance for values within "
"the space, not %s and %s." % (a, b))
return abs(a - b)
class Integer(Dimension):
"""Search space dimension that can take on integer values.
Parameters
----------
low : int
Lower bound (inclusive).
high : int
Upper bound (inclusive).
prior : "uniform" or "log-uniform", default="uniform"
Distribution to use when sampling random integers for
this dimension.
- If `"uniform"`, integers are sampled uniformly between the lower
and upper bounds.
- If `"log-uniform"`, integers are sampled uniformly between
`log(lower, base)` and `log(upper, base)` where log
has base `base`.
base : int
The logarithmic base to use for a log-uniform prior.
- Default 10, otherwise commonly 2.
transform : "identity", "normalize", optional
The following transformations are supported.
- "identity", (default) the transformed space is the same as the
original space.
- "normalize", the transformed space is scaled to be between
0 and 1.
name : str or None
Name associated with dimension, e.g., "number of trees".
dtype : str or dtype, default=np.int64
integer type which will be used in inverse_transform,
can be int, np.int16, np.uint32, np.int32, np.int64 (default).
When set to int, `inverse_transform` returns a list instead of
a numpy array
"""
def __init__(self, low, high, prior="uniform", base=10, transform=None,
name=None, dtype=np.int64):
if high <= low:
raise ValueError("the lower bound {} has to be less than the"
" upper bound {}".format(low, high))
self.low = low
self.high = high
self.prior = prior
self.base = base
self.log_base = np.log10(base)
self.name = name
self.dtype = dtype
self.transform_ = transform
self._rvs = None
self.transformer = None
if isinstance(self.dtype, str) and self.dtype\
not in ['int', 'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64']:
raise ValueError("dtype must be 'int', 'int8', 'int16',"
"'int32', 'int64', 'uint8',"
"'uint16', 'uint32', or"
"'uint64', but got {}".format(self.dtype))
elif isinstance(self.dtype, type) and self.dtype\
not in [int, np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64]:
raise ValueError("dtype must be 'int', 'np.int8', 'np.int16',"
"'np.int32', 'np.int64', 'np.uint8',"
"'np.uint16', 'np.uint32', or"
"'np.uint64', but got {}".format(self.dtype))
if prior not in ("uniform", "log-uniform"):
raise ValueError("prior must be `uniform` or `log-uniform`"
", got {}".format(prior))
if transform is None:
transform = "identity"
self.set_transformer(transform)
def set_transformer(self, transform="identitiy"):
"""Define _rvs and transformer spaces.
Parameters
----------
transform : str
Can be 'normalize' or 'identity'
"""
self.transform_ = transform
if transform not in ["normalize", "identity"]:
raise ValueError("transform should be 'normalize' or 'identity'"
" got {}".format(self.transform_))
if self.transform_ == "normalize":
self._rvs = _uniform_inclusive(0.0, 1.0)
if self.prior == "uniform":
self.transformer = Pipeline(
[Identity(), Normalize(self.low, self.high, is_int=True)])
else:
self.transformer = Pipeline(
[LogN(self.base),
Normalize(np.log10(self.low) / self.log_base,
np.log10(self.high) / self.log_base)]
)
else:
if self.prior == "uniform":
self._rvs = randint(self.low, self.high + 1)
self.transformer = Identity()
else:
self._rvs = _uniform_inclusive(
np.log10(self.low) / self.log_base,
np.log10(self.high) / self.log_base -
np.log10(self.low) / self.log_base)
self.transformer = LogN(self.base)
def __eq__(self, other):
return (type(self) is type(other) and
np.allclose([self.low], [other.low]) and
np.allclose([self.high], [other.high]))
def __repr__(self):
return "Integer(low={}, high={}, prior='{}', transform='{}')".format(
self.low, self.high, self.prior, self.transform_)
def inverse_transform(self, Xt):
"""Inverse transform samples from the warped space back into the
original space.
"""
# The concatenation of all transformed dimensions makes Xt to be
# of type float, hence the required cast back to int.
inv_transform = super(Integer, self).inverse_transform(Xt)
if isinstance(inv_transform, list):
inv_transform = np.array(inv_transform)
inv_transform = np.clip(inv_transform,
self.low, self.high)
if self.dtype == int or self.dtype == 'int':
# necessary, otherwise the type is converted to a numpy type
return getattr(np.round(inv_transform).astype(self.dtype),
"tolist", lambda: value)()
else:
return np.round(inv_transform).astype(self.dtype)
@property
def bounds(self):
return (self.low, self.high)
@property
def is_constant(self):
return self.low == self.high
def __contains__(self, point):
if isinstance(point, list):
point = np.array(point)
return self.low <= point <= self.high
@property
def transformed_bounds(self):
if self.transform_ == "normalize":
return 0., 1.
else:
return (self.low, self.high)
def distance(self, a, b):
"""Compute distance between point `a` and `b`.
Parameters
----------
a : int
First point.
b : int
Second point.
"""
if not (a in self and b in self):
raise RuntimeError("Can only compute distance for values within "
"the space, not %s and %s." % (a, b))
return abs(a - b)
class Categorical(Dimension):
"""Search space dimension that can take on categorical values.
Parameters
----------
categories : list, shape=(n_categories,)
Sequence of possible categories.
prior : list, shape=(categories,), default=None
Prior probabilities for each category. By default all categories
are equally likely.
transform : "onehot", "string", "identity", "label", default="onehot"
- "identity", the transformed space is the same as the original
space.
- "string", the transformed space is a string encoded
representation of the original space.
- "label", the transformed space is a label encoded
representation (integer) of the original space.
- "onehot", the transformed space is a one-hot encoded
representation of the original space.
name : str or None
Name associated with dimension, e.g., "colors".
"""
def __init__(self, categories, prior=None, transform=None, name=None):
self.categories = tuple(categories)
self.name = name
if transform is None:
transform = "onehot"
self.transform_ = transform
self.transformer = None
self._rvs = None
self.prior = prior
if prior is None:
self.prior_ = np.tile(1. / len(self.categories),
len(self.categories))
else:
self.prior_ = prior
self.set_transformer(transform)
def set_transformer(self, transform="onehot"):
"""Define _rvs and transformer spaces.
Parameters
----------
transform : str
Can be 'normalize', 'onehot', 'string', 'label', or 'identity'
"""
self.transform_ = transform
if transform not in ["identity", "onehot", "string", "normalize",
"label"]:
raise ValueError("Expected transform to be 'identity', 'string',"
"'label' or 'onehot' got {}".format(transform))
if transform == "onehot":
self.transformer = CategoricalEncoder()
self.transformer.fit(self.categories)
elif transform == "string":
self.transformer = StringEncoder()
self.transformer.fit(self.categories)
elif transform == "label":
self.transformer = LabelEncoder()
self.transformer.fit(self.categories)
elif transform == "normalize":
self.transformer = Pipeline(
[LabelEncoder(list(self.categories)),
Normalize(0, len(self.categories) - 1, is_int=True)])
else:
self.transformer = Identity()
self.transformer.fit(self.categories)
if transform == "normalize":
self._rvs = _uniform_inclusive(0.0, 1.0)
else:
# XXX check that sum(prior) == 1
self._rvs = rv_discrete(
values=(range(len(self.categories)), self.prior_)
)
def __eq__(self, other):
return (type(self) is type(other) and
self.categories == other.categories and
np.allclose(self.prior_, other.prior_))
def __repr__(self):
if len(self.categories) > 7:
cats = self.categories[:3] + (_Ellipsis(),) + self.categories[-3:]
else:
cats = self.categories
if self.prior is not None and len(self.prior) > 7:
prior = self.prior[:3] + [_Ellipsis()] + self.prior[-3:]
else:
prior = self.prior
return "Categorical(categories={}, prior={})".format(cats, prior)
def inverse_transform(self, Xt):
"""Inverse transform samples from the warped space back into the
original space.
"""
# The concatenation of all transformed dimensions makes Xt to be
# of type float, hence the required cast back to int.
inv_transform = super(Categorical, self).inverse_transform(Xt)
if isinstance(inv_transform, list):
inv_transform = np.array(inv_transform)
return inv_transform
def rvs(self, n_samples=None, random_state=None):
choices = self._rvs.rvs(size=n_samples, random_state=random_state)
if isinstance(choices, numbers.Integral):
return self.categories[choices]
elif self.transform_ == "normalize" and isinstance(choices, float):
return self.inverse_transform([(choices)])
elif self.transform_ == "normalize":
return self.inverse_transform(list(choices))
else:
return [self.categories[c] for c in choices]
@property
def transformed_size(self):
if self.transform_ == "onehot":
size = len(self.categories)
# when len(categories) == 2, CategoricalEncoder outputs a
# single value
return size if size != 2 else 1
return 1
@property
def bounds(self):
return self.categories
@property
def is_constant(self):
return len(self.categories) <= 1
def __contains__(self, point):
return point in self.categories
@property
def transformed_bounds(self):
if self.transformed_size == 1:
return 0.0, 1.0
else:
return [(0.0, 1.0) for i in range(self.transformed_size)]
def distance(self, a, b):
"""Compute distance between category `a` and `b`.
As categories have no order the distance between two points is one
if a != b and zero otherwise.
Parameters
----------
a : category
First category.
b : category
Second category.
"""
if not (a in self and b in self):
raise RuntimeError("Can only compute distance for values within"
" the space, not {} and {}.".format(a, b))
return 1 if a != b else 0
class Space(object):
"""Initialize a search space from given specifications.
Parameters
----------
dimensions : list, shape=(n_dims,)
List of search space dimensions.
Each search dimension can be defined either as
- a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(lower_bound, upper_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
.. note::
The upper and lower bounds are inclusive for `Integer`
dimensions.
"""
def __init__(self, dimensions):
self.dimensions = [check_dimension(dim) for dim in dimensions]
def __eq__(self, other):
return all([a == b for a, b in zip(self.dimensions, other.dimensions)])
def __repr__(self):
if len(self.dimensions) > 31:
dims = self.dimensions[:15] + [_Ellipsis()] + self.dimensions[-15:]
else:
dims = self.dimensions
return "Space([{}])".format(',\n '.join(map(str, dims)))
def __iter__(self):
return iter(self.dimensions)
@property
def dimension_names(self):
"""
Names of all the dimensions in the search-space.
"""
index = 0
names = []
for dim in self.dimensions:
if dim.name is None:
names.append("X_%d" % index)
else:
names.append(dim.name)
index += 1
return names
@property
def is_real(self):
"""
Returns true if all dimensions are Real
"""
return all([isinstance(dim, Real) for dim in self.dimensions])
@classmethod
def from_yaml(cls, yml_path, namespace=None):
"""Create Space from yaml configuration file
Parameters
----------
yml_path : str
Full path to yaml configuration file, example YaML below:
Space:
- Integer:
low: -5
high: 5
- Categorical:
categories:
- a
- b
- Real:
low: 1.0
high: 5.0
prior: log-uniform
namespace : str, default=None
Namespace within configuration file to use, will use first
namespace if not provided
Returns
-------
space : Space
Instantiated Space object
"""
with open(yml_path, 'rb') as f:
config = yaml.safe_load(f)
dimension_classes = {'real': Real,
'integer': Integer,
'categorical': Categorical}
# Extract space options for configuration file
if isinstance(config, dict):
if namespace is None:
options = next(iter(config.values()))
else:
options = config[namespace]
elif isinstance(config, list):
options = config
else:
raise TypeError('YaML does not specify a list or dictionary')
# Populate list with Dimension objects
dimensions = []
for option in options:
key = next(iter(option.keys()))
# Make configuration case insensitive
dimension_class = key.lower()
values = {k.lower(): v for k, v in option[key].items()}
if dimension_class in dimension_classes:
# Instantiate Dimension subclass and add it to the list
dimension = dimension_classes[dimension_class](**values)
dimensions.append(dimension)
space = cls(dimensions=dimensions)
return space
def rvs(self, n_samples=1, random_state=None):
"""Draw random samples.
The samples are in the original space. They need to be transformed
before being passed to a model or minimizer by `space.transform()`.
Parameters
----------
n_samples : int, default=1
Number of samples to be drawn from the space.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
Returns
-------
points : list of lists, shape=(n_points, n_dims)
Points sampled from the space.
"""
rng = check_random_state(random_state)
# Draw
columns = []
for dim in self.dimensions:
columns.append(dim.rvs(n_samples=n_samples, random_state=rng))
# Transpose
return _transpose_list_array(columns)
def set_transformer(self, transform):
"""Sets the transformer of all dimension objects to `transform`
Parameters
----------
transform : str or list of str
Sets all transformer,, when `transform` is a string.
Otherwise, transform must be a list with strings with
the same length as `dimensions`
"""
# Transform
for j in range(self.n_dims):
if isinstance(transform, list):
self.dimensions[j].set_transformer(transform[j])
else:
self.dimensions[j].set_transformer(transform)
def set_transformer_by_type(self, transform, dim_type):
"""Sets the transformer of `dim_type` objects to `transform`
Parameters
----------
transform : str
Sets all transformer of type `dim_type` to `transform`
dim_type : type
Can be `skopt.space.Real`, `skopt.space.Integer` or
`skopt.space.Categorical`
"""
# Transform
for j in range(self.n_dims):
if isinstance(self.dimensions[j], dim_type):
self.dimensions[j].set_transformer(transform)
def get_transformer(self):
"""Returns all transformers as list"""
return [self.dimensions[j].transform_ for j in range(self.n_dims)]
def transform(self, X):
"""Transform samples from the original space into a warped space.
Note: this transformation is expected to be used to project samples
into a suitable space for numerical optimization.
Parameters
----------
X : list of lists, shape=(n_samples, n_dims)
The samples to transform.
Returns
-------
Xt : array of floats, shape=(n_samples, transformed_n_dims)
The transformed samples.
"""
# Pack by dimension
columns = []
for dim in self.dimensions:
columns.append([])
for i in range(len(X)):
for j in range(self.n_dims):
columns[j].append(X[i][j])
# Transform
for j in range(self.n_dims):
columns[j] = self.dimensions[j].transform(columns[j])
# Repack as an array
Xt = np.hstack([np.asarray(c).reshape((len(X), -1)) for c in columns])
return Xt
def inverse_transform(self, Xt):
"""Inverse transform samples from the warped space back to the
original space.
Parameters
----------
Xt : array of floats, shape=(n_samples, transformed_n_dims)
The samples to inverse transform.
Returns
-------
X : list of lists, shape=(n_samples, n_dims)
The original samples.
"""
# Inverse transform
columns = []
start = 0
Xt = np.asarray(Xt)
for j in range(self.n_dims):
dim = self.dimensions[j]
offset = dim.transformed_size
if offset == 1:
columns.append(dim.inverse_transform(Xt[:, start]))
else:
columns.append(
dim.inverse_transform(Xt[:, start:start + offset]))
start += offset
# Transpose
return _transpose_list_array(columns)
@property
def n_dims(self):
"""The dimensionality of the original space."""
return len(self.dimensions)
@property
def transformed_n_dims(self):
"""The dimensionality of the warped space."""
return sum([dim.transformed_size for dim in self.dimensions])
@property
def bounds(self):
"""The dimension bounds, in the original space."""
b = []
for dim in self.dimensions:
if dim.size == 1:
b.append(dim.bounds)
else:
b.extend(dim.bounds)
return b
def __contains__(self, point):
"""Check that `point` is within the bounds of the space."""
for component, dim in zip(point, self.dimensions):
if component not in dim:
return False
return True
def __getitem__(self, dimension_names):
"""
Lookup and return the search-space dimension with the given name.
This allows for dict-like lookup of dimensions, for example:
`space['foo']` returns the dimension named 'foo' if it exists,
otherwise `None` is returned.
It also allows for lookup of a list of dimension-names, for example:
`space[['foo', 'bar']]` returns the two dimensions named
'foo' and 'bar' if they exist.
Parameters
----------
dimension_names : str or list(str)
Name of a single search-space dimension (str).
List of names for search-space dimensions (list(str)).
Returns
-------
dims tuple (index, Dimension), list(tuple(index, Dimension)), \
(None, None)
A single search-space dimension with the given name,
or a list of search-space dimensions with the given names.
"""
def _get(dimension_name):
"""Helper-function for getting a single dimension."""
index = 0
# Get the index of the search-space dimension using its name.
for dim in self.dimensions:
if dimension_name == dim.name:
return (index, dim)
elif dimension_name == index:
return (index, dim)
index += 1
return (None, None)
if isinstance(dimension_names, (str, int)):
# Get a single search-space dimension.
dims = _get(dimension_name=dimension_names)
elif isinstance(dimension_names, (list, tuple)):
# Get a list of search-space dimensions.
# Note that we do not check whether the names are really strings.
dims = [_get(dimension_name=name) for name in dimension_names]
else:
msg = "Dimension name should be either string or" \
"list of strings, but got {}."
raise ValueError(msg.format(type(dimension_names)))
return dims
@property
def transformed_bounds(self):
"""The dimension bounds, in the warped space."""
b = []
for dim in self.dimensions:
if dim.transformed_size == 1:
b.append(dim.transformed_bounds)
else:
b.extend(dim.transformed_bounds)
return b
@property
def is_categorical(self):
"""Space contains exclusively categorical dimensions"""
return all([isinstance(dim, Categorical) for dim in self.dimensions])
@property
def is_partly_categorical(self):
"""Space contains any categorical dimensions"""
return any([isinstance(dim, Categorical) for dim in self.dimensions])
@property
def n_constant_dimensions(self):
"""Returns the number of constant dimensions which have zero degree of
freedom, e.g. an Integer dimensions with (0., 0.) as bounds.
"""
n = 0
for dim in self.dimensions:
if dim.is_constant:
n += 1
return n
def distance(self, point_a, point_b):
"""Compute distance between two points in this space.
Parameters
----------
point_a : array
First point.
point_b : array
Second point.
"""
distance = 0.
for a, b, dim in zip(point_a, point_b, self.dimensions):
distance += dim.distance(a, b)
return distance
| 33.901137
| 79
| 0.569511
|
bc873de95e9a7a08c1d0d11f58604f77ca65ae9c
| 2,637
|
py
|
Python
|
lib/models/online/classifier/features.py
|
sunshuofeng/TracKit
|
bf9d09aed7d7b8e01ccfc40a67653aa0666c868d
|
[
"MIT"
] | null | null | null |
lib/models/online/classifier/features.py
|
sunshuofeng/TracKit
|
bf9d09aed7d7b8e01ccfc40a67653aa0666c868d
|
[
"MIT"
] | null | null | null |
lib/models/online/classifier/features.py
|
sunshuofeng/TracKit
|
bf9d09aed7d7b8e01ccfc40a67653aa0666c868d
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
import torch.nn.functional as F
from torchvision.models.resnet import BasicBlock, Bottleneck
from lib.models.online.layers.normalization import InstanceL2Norm
from lib.models.online.layers.transform import InterpCat
def residual_basic_block(feature_dim=256, num_blocks=1, l2norm=True, final_conv=False, norm_scale=1.0, out_dim=None,
interp_cat=False):
"""Construct a network block based on the BasicBlock used in ResNet 18 and 34."""
if out_dim is None:
out_dim = feature_dim
feat_layers = []
if interp_cat:
feat_layers.append(InterpCat())
for i in range(num_blocks):
odim = feature_dim if i < num_blocks - 1 + int(final_conv) else out_dim
feat_layers.append(BasicBlock(feature_dim, odim))
if final_conv:
feat_layers.append(nn.Conv2d(feature_dim, out_dim, kernel_size=3, padding=1, bias=False))
if l2norm:
feat_layers.append(InstanceL2Norm(scale=norm_scale))
return nn.Sequential(*feat_layers)
def residual_basic_block_pool(feature_dim=256, num_blocks=1, l2norm=True, final_conv=False, norm_scale=1.0, out_dim=None,
pool=True):
"""Construct a network block based on the BasicBlock used in ResNet."""
if out_dim is None:
out_dim = feature_dim
feat_layers = []
for i in range(num_blocks):
odim = feature_dim if i < num_blocks - 1 + int(final_conv) else out_dim
feat_layers.append(BasicBlock(feature_dim, odim))
if final_conv:
feat_layers.append(nn.Conv2d(feature_dim, out_dim, kernel_size=3, padding=1, bias=False))
if pool:
feat_layers.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
if l2norm:
feat_layers.append(InstanceL2Norm(scale=norm_scale))
return nn.Sequential(*feat_layers)
def residual_bottleneck(feature_dim=256, num_blocks=1, l2norm=True, final_conv=False, norm_scale=1.0, out_dim=None,
interp_cat=False):
"""Construct a network block based on the Bottleneck block used in ResNet."""
if out_dim is None:
out_dim = feature_dim
feat_layers = []
if interp_cat:
feat_layers.append(InterpCat())
for i in range(num_blocks):
planes = feature_dim if i < num_blocks - 1 + int(final_conv) else out_dim // 4
feat_layers.append(Bottleneck(4*feature_dim, planes))
if final_conv:
feat_layers.append(nn.Conv2d(4*feature_dim, out_dim, kernel_size=3, padding=1, bias=False))
if l2norm:
feat_layers.append(InstanceL2Norm(scale=norm_scale))
return nn.Sequential(*feat_layers)
| 42.532258
| 121
| 0.6989
|
3dfd9e6bed87f2c8692f66191d250e39ae4ffe32
| 311
|
py
|
Python
|
main.py
|
ramanveerji/PlateInfo
|
397ba8e60383d69de00684e2487daf2c25be9d69
|
[
"MIT"
] | 1
|
2021-06-06T23:22:40.000Z
|
2021-06-06T23:22:40.000Z
|
main.py
|
ramanveerji/PlateInfo
|
397ba8e60383d69de00684e2487daf2c25be9d69
|
[
"MIT"
] | null | null | null |
main.py
|
ramanveerji/PlateInfo
|
397ba8e60383d69de00684e2487daf2c25be9d69
|
[
"MIT"
] | 2
|
2021-06-06T23:22:32.000Z
|
2021-08-11T20:11:35.000Z
|
from pyrogram import Client, idle
from configs import API_ID, API_HASH, TOKEN
import asyncio
async def main():
await client.start()
await idle()
client = Client("bot", API_ID, API_HASH, bot_token=TOKEN, plugins=dict(root="plugins"))
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| 19.4375
| 87
| 0.73955
|
7519555675bc53473ccd84e5cde96edbcfad1f1b
| 25,964
|
py
|
Python
|
geoportal/c2cgeoportal_geoportal/__init__.py
|
ponceta/c2cgeoportal
|
06f17346a561f8509fd7a732760c19e859d0ed02
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
geoportal/c2cgeoportal_geoportal/__init__.py
|
ponceta/c2cgeoportal
|
06f17346a561f8509fd7a732760c19e859d0ed02
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
geoportal/c2cgeoportal_geoportal/__init__.py
|
ponceta/c2cgeoportal
|
06f17346a561f8509fd7a732760c19e859d0ed02
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2019, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
import time
import logging
import os
import binascii
from urllib.parse import urlsplit
import c2cwsgiutils
import re
import simplejson as json
from socket import gethostbyname, gaierror
from ipcalc import IP, Network
from Crypto.Cipher import AES
import importlib
import zope.event.classhandler
from pyramid.config import Configurator
from pyramid_mako import add_mako_renderer
from pyramid.interfaces import IStaticURLInfo
from pyramid.httpexceptions import HTTPException
import pyramid.security
from papyrus.renderers import GeoJSON
from c2cgeoportal_geoportal.lib.xsd import XSD
import c2cwsgiutils.db
import c2cwsgiutils.index
from c2cwsgiutils.health_check import HealthCheck
from sqlalchemy.orm import Session
import c2cgeoportal_commons.models
import c2cgeoportal_geoportal.views
from c2cgeoportal_geoportal.lib import dbreflection, caching, \
C2CPregenerator, MultiDomainStaticURLInfo, checker, check_collector
LOG = logging.getLogger(__name__)
# Header predicate to accept only JSON content
# OL/cgxp are not setting the correct content type for JSON. We have to accept
# XML as well even though JSON is actually send.
JSON_CONTENT_TYPE = "Content-Type:application/(?:json|xml)"
class DecimalJSON:
def __call__(self, info):
def _render(value, system):
ret = json.dumps(value, use_decimal=True)
request = system.get("request")
if request is not None:
request.response.content_type = "application/json"
return ret
return _render
INTERFACE_TYPE_NGEO = "ngeo"
def add_interface(
config, interface_name="desktop", interface_type=INTERFACE_TYPE_NGEO, default=False, **kwargs
): # pragma: no cover
del interface_type # unused
route = "/" if default else "/{0!s}".format(interface_name)
add_interface_ngeo(
config,
route_name=interface_name,
route=route,
renderer="/{0!s}.html".format(interface_name),
**kwargs
)
def add_interface_ngeo(config, route_name, route, renderer=None, permission=None): # pragma: no cover
# Cannot be at the header to do not load the model too early
from c2cgeoportal_geoportal.views.entry import Entry
config.add_route(route_name, route, request_method="GET")
config.add_view(
Entry,
attr="get_ngeo_index_vars",
route_name=route_name,
renderer=renderer,
permission=permission
)
# permalink theme: recover the theme for generating custom viewer.js url
config.add_route(
"{}theme".format(route_name),
"{}{}theme/{{themes}}".format(route, "" if route[-1] == "/" else "/"),
request_method="GET",
)
config.add_view(
Entry,
attr="get_ngeo_index_vars",
route_name="{}theme".format(route_name),
renderer=renderer,
permission=permission
)
def add_admin_interface(config):
if config.get_settings().get('enable_admin_interface', False):
config.add_request_method(
# pylint: disable=not-callable
lambda request: c2cgeoportal_commons.models.DBSession(), 'dbsession', reify=True
)
config.add_view(c2cgeoportal_geoportal.views.add_ending_slash, 'add_ending_slash')
config.add_route('add_ending_slash', '/admin', request_method='GET')
config.include('c2cgeoportal_admin', route_prefix='/admin')
def locale_negotiator(request):
lang = request.params.get("lang")
if lang is None:
lang = request.cookies.get('_LOCALE_')
else:
request.response.set_cookie('_LOCALE_', lang)
if lang is None:
# if best_match returns None then use the default_locale_name configuration variable
return request.accept_language.best_match(
request.registry.settings.get("available_locale_names"),
default_match=request.registry.settings.get("default_locale_name"))
return lang
def _match_url_start(reference, value):
"""
Checks that the val URL starts like the ref URL.
"""
reference_parts = reference.rstrip("/").split("/")
value_parts = value[0:len(reference_parts)]
return reference_parts == value_parts
def is_valid_referer(request, settings=None):
if request.referer is not None:
referer = urlsplit(request.referer)._replace(query="", fragment="").geturl().rstrip("/").split("/")
if settings is None:
settings = request.registry.settings
list_ = settings.get("authorized_referers", [])
return any(_match_url_start(e, referer) for e in list_)
else:
return True
def create_get_user_from_request(settings):
def get_user_from_request(request, username=None):
""" Return the User object for the request.
Return ``None`` if:
* user is anonymous
* it does not exist in the database
* the referer is invalid
"""
from c2cgeoportal_commons.models import DBSession
from c2cgeoportal_commons.models.static import User
try:
if "auth" in request.params:
auth_enc = request.params.get("auth")
if auth_enc is not None:
urllogin = request.registry.settings.get("urllogin", {})
aeskey = urllogin.get("aes_key")
if aeskey is None: # pragma: nocover
raise Exception("urllogin is not configured")
now = int(time.time())
data = binascii.unhexlify(auth_enc.encode('ascii'))
nonce = data[0:16]
tag = data[16:32]
ciphertext = data[32:]
cipher = AES.new(aeskey.encode("ascii"), AES.MODE_EAX, nonce)
auth = json.loads(cipher.decrypt_and_verify(ciphertext, tag).decode("utf-8"))
if "t" in auth and "u" in auth and "p" in auth:
timestamp = int(auth["t"])
if now < timestamp and request.registry.validate_user(
request, auth["u"], auth["p"]
):
headers = pyramid.security.remember(request, auth["u"])
request.response.headerlist.extend(headers)
except Exception as e:
LOG.error("URL login error: %s.", e, exc_info=True)
if not hasattr(request, "is_valid_referer"):
request.is_valid_referer = is_valid_referer(request, settings)
if not request.is_valid_referer:
LOG.debug("Invalid referer for %s: %s", request.path_qs, repr(request.referer))
return None
if not hasattr(request, "user_"):
request.user_ = None
if username is None:
username = request.authenticated_userid
if username is not None:
# We know we will need the role object of the
# user so we use joined loading
request.user_ = DBSession.query(User) \
.filter_by(username=username) \
.first()
return request.user_
return get_user_from_request
def set_user_validator(config, user_validator):
""" Call this function to register a user validator function.
The validator function is passed three arguments: ``request``,
``username``, and ``password``. The function should return the
user name if the credentials are valid, and ``None`` otherwise.
The validator should not do the actual authentication operation
by calling ``remember``, this is handled by the ``login`` view.
"""
def register():
config.registry.validate_user = user_validator
config.action("user_validator", register)
def default_user_validator(request, username, password):
"""
Validate the username/password. This is c2cgeoportal's
default user validator.
Return None if we are anonymous, the string to remember otherwise.
"""
del request # unused
from c2cgeoportal_commons.models import DBSession
from c2cgeoportal_commons.models.static import User
user = DBSession.query(User).filter_by(username=username).first()
if user is None:
LOG.info('Unknow user "%s" tried to log in', username)
return None
if user.deactivated:
LOG.info('Deactivated user "%s" tried to log in', username)
return None
if user.expired():
LOG.info('Expired user %s tried to log in', username)
return None
if not user.validate_password(password):
LOG.info('User "%s" tried to log in with bad credentials', username)
return None
return username
class OgcproxyRoutePredicate:
""" Serve as a custom route predicate function for ogcproxy.
We do not want the OGC proxy to be used to reach the app's
mapserv script. We just return False if the url includes
"mapserv". It is rather drastic, but works for us. """
def __init__(self, val, config):
del val # unused
del config # unused
self.private_networks = [
Network("127.0.0.0/8"),
Network("10.0.0.0/8"),
Network("172.16.0.0/12"),
Network("192.168.0.0/16"),
]
def __call__(self, context, request):
url = request.params.get("url")
if url is None:
return False
parts = urlsplit(url)
try:
ip = IP(gethostbyname(parts.netloc))
except gaierror as e:
LOG.info("Unable to get host name for %s: %s", url, e)
return False
for net in self.private_networks:
if ip in net:
return False
return True
@staticmethod
def phash(): # pragma: no cover
return ""
class MapserverproxyRoutePredicate:
""" Serve as a custom route predicate function for mapserverproxy.
If the hide_capabilities setting is set and is true then we want to
return 404s on GetCapabilities requests."""
def __init__(self, val, config):
pass
def __call__(self, context, request):
hide_capabilities = request.registry.settings.get("hide_capabilities")
if not hide_capabilities:
return True
params = dict(
(k.lower(), v.lower()) for k, v in request.params.items()
)
return "request" not in params or params["request"] != "getcapabilities"
@staticmethod
def phash():
return ""
def add_cors_route(config, pattern, service):
"""
Add the OPTIONS route and view need for services supporting CORS.
"""
def view(request): # pragma: no cover
from c2cgeoportal_geoportal.lib.caching import set_common_headers, NO_CACHE
return set_common_headers(request, service, NO_CACHE)
name = pattern + "_options"
config.add_route(name, pattern, request_method="OPTIONS")
config.add_view(view, route_name=name)
def error_handler(http_exception, request): # pragma: no cover
"""
View callable for handling all the exceptions that are not already handled.
"""
LOG.warning("%s returned status code %s", request.url, http_exception.status_code)
return caching.set_common_headers(
request, "error", caching.NO_CACHE, http_exception, vary=True
)
def call_hook(settings, name, *args, **kwargs):
hooks = settings.get("hooks", {})
hook = hooks.get(name)
if hook is None:
return
parts = hook.split(".")
module = importlib.import_module(".".join(parts[0:-1]))
function_ = getattr(module, parts[-1])
function_(*args, **kwargs)
def includeme(config: pyramid.config.Configurator):
"""
This function returns a Pyramid WSGI application.
"""
settings = config.get_settings()
config.include("c2cgeoportal_commons")
call_hook(settings, "after_settings", settings)
get_user_from_request = create_get_user_from_request(settings)
config.add_request_method(get_user_from_request, name="user", property=True)
config.add_request_method(get_user_from_request, name="get_user")
# Configure 'locale' dir as the translation dir for c2cgeoportal app
config.add_translation_dirs("c2cgeoportal_geoportal:locale/")
config.include('c2cwsgiutils.pyramid.includeme')
health_check = HealthCheck(config)
# Initialise DBSessions
init_dbsessions(settings, config, health_check)
# Initialize the dbreflection module
dbreflection.init()
checker.init(config, health_check)
check_collector.init(config, health_check)
# dogpile.cache configuration
if 'cache' in settings:
caching.init_region(settings['cache'])
from c2cgeoportal_commons.models.main import InvalidateCacheEvent
@zope.event.classhandler.handler(InvalidateCacheEvent)
def handle(event: InvalidateCacheEvent): # pylint: disable=unused-variable
del event
caching.invalidate_region()
# Register a tween to get back the cache buster path.
if 'cache_path' not in config.get_settings():
config.get_settings()['cache_path'] = ['static']
config.add_tween("c2cgeoportal_geoportal.lib.cacheversion.CachebusterTween")
config.add_tween("c2cgeoportal_geoportal.lib.webpack.WebpackTween")
config.add_tween("c2cgeoportal_geoportal.lib.headers.HeadersTween")
# Bind the mako renderer to other file extensions
add_mako_renderer(config, ".html")
add_mako_renderer(config, ".js")
# Add the "geojson" renderer
config.add_renderer("geojson", GeoJSON())
# Add decimal json renderer
config.add_renderer("decimaljson", DecimalJSON())
# Add the "xsd" renderer
config.add_renderer("xsd", XSD(
include_foreign_keys=True
))
# Add the set_user_validator directive, and set a default user validator
config.add_directive("set_user_validator", set_user_validator)
config.set_user_validator(default_user_validator)
config.add_route('dynamic', '/dynamic.json', request_method="GET")
if settings.get("ogcproxy_enable", False): # pragma: no cover
# Add an OGCProxy view
config.add_route_predicate("ogc_server", OgcproxyRoutePredicate)
config.add_route(
"ogcproxy", "/ogcproxy",
ogc_server=True
)
config.add_view("papyrus_ogcproxy.views:ogcproxy", route_name="ogcproxy")
# Add routes to the mapserver proxy
config.add_route_predicate("mapserverproxy", MapserverproxyRoutePredicate)
config.add_route(
"mapserverproxy", "/mapserv_proxy",
mapserverproxy=True, pregenerator=C2CPregenerator(role=True),
)
# Add route to the tinyows proxy
config.add_route(
"tinyowsproxy", "/tinyows_proxy",
pregenerator=C2CPregenerator(role=True),
)
# Add routes to the entry view class
config.add_route("base", "/", static=True)
config.add_route("loginform", "/login.html", request_method="GET")
add_cors_route(config, "/login", "login")
config.add_route("login", "/login", request_method="POST")
add_cors_route(config, "/logout", "login")
config.add_route("logout", "/logout", request_method="GET")
add_cors_route(config, "/loginchange", "login")
config.add_route("loginchange", "/loginchange", request_method="POST")
add_cors_route(config, "/loginresetpassword", "login")
config.add_route("loginresetpassword", "/loginresetpassword", request_method="POST")
add_cors_route(config, "/loginuser", "login")
config.add_route("loginuser", "/loginuser", request_method="GET")
config.add_route("testi18n", "/testi18n.html", request_method="GET")
config.add_route("apijs", "/api.js", request_method="GET")
config.add_route("xapijs", "/xapi.js", request_method="GET")
config.add_route("apihelp", "/apihelp.html", request_method="GET")
config.add_route("xapihelp", "/xapihelp.html", request_method="GET")
config.add_route(
"themes", "/themes",
request_method="GET",
pregenerator=C2CPregenerator(role=True),
)
config.add_route("invalidate", "/invalidate", request_method="GET")
# Print proxy routes
config.add_route("printproxy", "/printproxy", request_method="HEAD")
add_cors_route(config, "/printproxy/*all", "print")
config.add_route(
"printproxy_capabilities", "/printproxy/capabilities.json",
request_method="GET", pregenerator=C2CPregenerator(role=True),
)
config.add_route(
"printproxy_report_create", "/printproxy/report.{format}",
request_method="POST", header=JSON_CONTENT_TYPE
)
config.add_route(
"printproxy_status", "/printproxy/status/{ref}.json",
request_method="GET"
)
config.add_route(
"printproxy_cancel", "/printproxy/cancel/{ref}",
request_method="DELETE"
)
config.add_route(
"printproxy_report_get", "/printproxy/report/{ref}",
request_method="GET"
)
# Full-text search routes
add_cors_route(config, "/fulltextsearch", "fulltextsearch")
config.add_route("fulltextsearch", "/fulltextsearch")
# Access to raster data
add_cors_route(config, "/raster", "raster")
config.add_route("raster", "/raster", request_method="GET")
add_cors_route(config, "/profile.{ext}", "profile")
config.add_route("profile.csv", "/profile.csv", request_method="POST")
config.add_route("profile.json", "/profile.json", request_method="POST")
# Shortener
add_cors_route(config, "/short/create", "shortener")
config.add_route("shortener_create", "/short/create", request_method="POST")
config.add_route("shortener_get", "/s/{ref}", request_method="GET")
# Geometry processing
config.add_route("difference", "/difference", request_method="POST")
# PDF report tool
config.add_route("pdfreport", "/pdfreport/{layername}/{ids}", request_method="GET")
# Add routes for the "layers" web service
add_cors_route(config, "/layers/*all", "layers")
config.add_route(
"layers_count", "/layers/{layer_id:\\d+}/count",
request_method="GET"
)
config.add_route(
"layers_metadata", "/layers/{layer_id:\\d+}/md.xsd",
request_method="GET",
pregenerator=C2CPregenerator(role=True),
)
config.add_route(
"layers_read_many",
"/layers/{layer_id:\\d+,?(\\d+,)*\\d*$}",
request_method="GET") # supports URLs like /layers/1,2,3
config.add_route(
"layers_read_one", "/layers/{layer_id:\\d+}/{feature_id}",
request_method="GET")
config.add_route(
"layers_create", "/layers/{layer_id:\\d+}",
request_method="POST", header=JSON_CONTENT_TYPE)
config.add_route(
"layers_update", "/layers/{layer_id:\\d+}/{feature_id}",
request_method="PUT", header=JSON_CONTENT_TYPE)
config.add_route(
"layers_delete", "/layers/{layer_id:\\d+}/{feature_id}",
request_method="DELETE")
config.add_route(
"layers_enumerate_attribute_values",
"/layers/{layer_name}/values/{field_name}",
request_method="GET",
pregenerator=C2CPregenerator(),
)
# There is no view corresponding to that route, it is to be used from
# mako templates to get the root of the "layers" web service
config.add_route("layers_root", "/layers", request_method="HEAD")
# Resource proxy (load external url, useful when loading non https content)
config.add_route("resourceproxy", "/resourceproxy", request_method="GET")
# Dev
config.add_route("dev", "/dev/*path", request_method="GET")
# Used memory in caches
add_cors_route(config, "/memory", "memory")
config.add_route("memory", "/memory", request_method="GET")
# Scan view decorator for adding routes
config.scan(ignore=[
"c2cgeoportal_geoportal.lib",
"c2cgeoportal_geoportal.scaffolds",
"c2cgeoportal_geoportal.scripts"
])
if "subdomains" in settings: # pragma: no cover
config.registry.registerUtility(
MultiDomainStaticURLInfo(), IStaticURLInfo)
add_admin_interface(config)
# Add the project static view with cache buster
from c2cgeoportal_geoportal.lib.cacheversion import version_cache_buster
package = config.get_settings()["package"]
config.add_static_view(
name="static",
path="{}_geoportal:static".format(package),
cache_max_age=int(config.get_settings()["default_max_age"]),
)
config.add_cache_buster("{}_geoportal:static".format(package), version_cache_buster)
# Add the project static view without cache buster
config.add_static_view(
name="static-ngeo",
path="{}_geoportal:static-ngeo".format(package),
cache_max_age=int(config.get_settings()["default_max_age"]),
)
# Handles the other HTTP errors raised by the views. Without that,
# the client receives a status=200 without content.
config.add_view(error_handler, context=HTTPException)
c2cwsgiutils.index.additional_title = '<div class="row"><div class="col-lg-3"><h2>GeoMapFish</h2>' \
'</div><div class="col-lg">'
c2cwsgiutils.index.additional_auth.extend([
'<a href="../tiles/admin/">TileCloud chain admin</a><br>',
'<a href="../invalidate">Invalidate the cache</a><br>',
'<a href="../memory">Memory status</a>',
])
if config.get_settings().get('enable_admin_interface', False):
c2cwsgiutils.index.additional_noauth.append('<a href="../admin/">Admin</a><br>')
c2cwsgiutils.index.additional_noauth.append(
'</div></div><div class="row"><div class="col-lg-3"><h3>Interfaces</h3></div><div class="col-lg">'
)
c2cwsgiutils.index.additional_noauth.append('<a href="../">Default</a><br>')
for interface in config.get_settings().get("interfaces", []):
if interface != config.get_settings().get("default_interface"):
c2cwsgiutils.index.additional_noauth.append(
'<p><a href="../{interface}">{interface}</a><br>'.format(
interface=interface
)
)
c2cwsgiutils.index.additional_noauth.append('<a href="../apihelp.html">API help</a><br>')
c2cwsgiutils.index.additional_noauth.append('</div></div><hr>')
def init_dbsessions(settings: dict, config: Configurator, health_check: HealthCheck = None) -> None:
db_chooser = settings.get('db_chooser', {})
master_paths = [re.compile(i.replace('//', '/')) for i in db_chooser.get('master', [])]
slave_paths = [re.compile(i.replace('//', '/')) for i in db_chooser.get('slave', [])]
slave_prefix = 'sqlalchemy_slave' if 'sqlalchemy_slave.url' in settings else None
c2cgeoportal_commons.models.DBSession, rw_bind, _ = c2cwsgiutils.db.setup_session(
config, 'sqlalchemy', slave_prefix, force_master=master_paths, force_slave=slave_paths)
c2cgeoportal_commons.models.Base.metadata.bind = rw_bind
c2cgeoportal_commons.models.DBSessions['dbsession'] = c2cgeoportal_commons.models.DBSession
for dbsession_name, dbsession_config in settings.get('dbsessions', {}).items(): # pragma: nocover
c2cgeoportal_commons.models.DBSessions[dbsession_name] = \
c2cwsgiutils.db.create_session(config, dbsession_name, **dbsession_config)
c2cgeoportal_commons.models.Base.metadata.clear()
from c2cgeoportal_commons.models import main
if health_check is not None:
for name, session in c2cgeoportal_commons.models.DBSessions.items():
if name == 'dbsession':
health_check.add_db_session_check(session, at_least_one_model=main.Theme)
alembic_ini = os.path.join(os.path.abspath(os.path.curdir), 'alembic.ini')
if os.path.exists(alembic_ini):
health_check.add_alembic_check(session, alembic_ini_path=alembic_ini, name='main',
version_schema=settings['schema'])
health_check.add_alembic_check(session, alembic_ini_path=alembic_ini, name='static',
version_schema=settings['schema_static'])
else: # pragma: no cover
def check(session_: Session) -> None:
session_.execute('SELECT 1')
health_check.add_db_session_check(session, query_cb=check)
| 38.984985
| 107
| 0.669619
|
a2eef0d32530a2a3bb7630878bddb31c835d7b76
| 4,694
|
py
|
Python
|
benchmark/startQiskit_QC3268.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_QC3268.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_QC3268.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=4
# total number=52
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=24
prog.cz(input_qubit[0],input_qubit[3]) # number=25
prog.h(input_qubit[3]) # number=26
prog.h(input_qubit[3]) # number=21
prog.cz(input_qubit[0],input_qubit[3]) # number=22
prog.h(input_qubit[3]) # number=23
prog.h(input_qubit[3]) # number=27
prog.cz(input_qubit[0],input_qubit[3]) # number=28
prog.h(input_qubit[3]) # number=29
prog.h(input_qubit[3]) # number=37
prog.cz(input_qubit[0],input_qubit[3]) # number=38
prog.h(input_qubit[3]) # number=39
prog.h(input_qubit[3]) # number=43
prog.cz(input_qubit[0],input_qubit[3]) # number=44
prog.h(input_qubit[3]) # number=45
prog.x(input_qubit[3]) # number=41
prog.h(input_qubit[3]) # number=46
prog.cz(input_qubit[0],input_qubit[3]) # number=47
prog.h(input_qubit[3]) # number=48
prog.h(input_qubit[3]) # number=33
prog.cz(input_qubit[0],input_qubit[3]) # number=34
prog.h(input_qubit[3]) # number=35
prog.h(input_qubit[3]) # number=49
prog.cz(input_qubit[0],input_qubit[3]) # number=50
prog.h(input_qubit[3]) # number=51
prog.rx(-0.364424747816416,input_qubit[3]) # number=36
prog.y(input_qubit[3]) # number=20
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.cx(input_qubit[0],input_qubit[3]) # number=12
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=19
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC3268.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 35.832061
| 165
| 0.65637
|
3c511bd5b9c90216b04470c8b0d489686fa19f48
| 6,361
|
py
|
Python
|
lib/galaxy/webapps/galaxy/api/tool_dependencies.py
|
innovate-invent/galaxy
|
10aa953a40e171246bdd1804c74e8019da8e8200
|
[
"CC-BY-3.0"
] | 2
|
2017-10-23T14:44:12.000Z
|
2018-01-14T10:37:28.000Z
|
lib/galaxy/webapps/galaxy/api/tool_dependencies.py
|
innovate-invent/galaxy
|
10aa953a40e171246bdd1804c74e8019da8e8200
|
[
"CC-BY-3.0"
] | 30
|
2016-10-20T15:35:12.000Z
|
2018-10-02T15:59:54.000Z
|
lib/galaxy/webapps/galaxy/api/tool_dependencies.py
|
innovate-invent/galaxy
|
10aa953a40e171246bdd1804c74e8019da8e8200
|
[
"CC-BY-3.0"
] | 7
|
2016-11-03T19:11:01.000Z
|
2020-05-11T14:23:52.000Z
|
"""
API operations allowing clients to manage tool dependencies.
"""
import logging
from galaxy.tools.deps import views
from galaxy.web import (
_future_expose_api as expose_api,
require_admin
)
from galaxy.web.base.controller import BaseAPIController
log = logging.getLogger(__name__)
class ToolDependenciesAPIController(BaseAPIController):
def __init__(self, app):
super(ToolDependenciesAPIController, self).__init__(app)
self._view = views.DependencyResolversView(app)
@expose_api
@require_admin
def index(self, trans, **kwd):
"""
GET /api/dependency_resolvers
"""
return self._view.index()
@expose_api
@require_admin
def show(self, trans, id):
"""
GET /api/dependency_resolvers/<id>
"""
return self._view.show(id)
@expose_api
@require_admin
def update(self, trans):
"""
PUT /api/dependency_resolvers
Reload tool dependency resolution configuration.
"""
return self._view.reload()
@expose_api
@require_admin
def resolver_dependency(self, trans, id, **kwds):
"""
GET /api/dependency_resolvers/{index}/dependency
Resolve described requirement against specified dependency resolver.
:type index: int
:param index: index of the dependency resolver
:type kwds: dict
:param kwds: dictionary structure containing extra parameters
:type name: str
:param name: name of the requirement to find a dependency for (required)
:type version: str
:param version: version of the requirement to find a dependency for (required)
:type exact: bool
:param exact: require an exact match to specify requirement (do not discard
version information to resolve dependency).
:rtype: dict
:returns: a dictified description of the dependency, with attribute
``dependency_type: None`` if no match was found.
"""
return self._view.resolver_dependency(id, **kwds)
@expose_api
@require_admin
def install_dependency(self, trans, id=None, **kwds):
"""
POST /api/dependency_resolvers/{index}/dependency
Install described requirement against specified dependency resolver.
:type index: int
:param index: index of the dependency resolver
:type kwds: dict
:param kwds: dictionary structure containing extra parameters
:type name: str
:param name: name of the requirement to find a dependency for (required)
:type version: str
:param version: version of the requirement to find a dependency for (required)
:type exact: bool
:param exact: require an exact match to specify requirement (do not discard
version information to resolve dependency).
:rtype: dict
:returns: a dictified description of the dependency, with attribute
``dependency_type: None`` if no match was found.
"""
self._view.install_dependency(id, **kwds)
return self._view.manager_dependency(**kwds)
@expose_api
@require_admin
def manager_dependency(self, trans, **kwds):
"""
GET /api/dependency_resolvers/dependency
Resolve described requirement against all dependency resolvers, returning
the match with highest priority.
:type index: int
:param index: index of the dependency resolver
:type kwds: dict
:param kwds: dictionary structure containing extra parameters
:type name: str
:param name: name of the requirement to find a dependency for (required)
:type version: str
:param version: version of the requirement to find a dependency for (required)
:type exact: bool
:param exact: require an exact match to specify requirement (do not discard
version information to resolve dependency).
:rtype: dict
:returns: a dictified description of the dependency, with type: None
if no match was found.
"""
return self._view.manager_dependency(**kwds)
@expose_api
@require_admin
def resolver_requirements(self, trans, id, **kwds):
"""
GET /api/dependency_resolvers/{index}/requirements
Find all "simple" requirements that could be resolved "exactly"
by this dependency resolver. The dependency resolver must implement
ListDependencyResolver.
:type index: int
:param index: index of the dependency resolver
:rtype: dict
:returns: a dictified description of the requirement that could
be resolved.
"""
return self._view.resolver_requirements(id)
@expose_api
@require_admin
def manager_requirements(self, trans, **kwds):
"""
GET /api/dependency_resolvers/requirements
Find all "simple" requirements that could be resolved "exactly"
by all dependency resolvers that support this operation.
:type index: int
:param index: index of the dependency resolver
:rtype: dict
:returns: a dictified description of the requirement that could
be resolved (keyed on 'requirement') and the index of
the corresponding resolver (keyed on 'index').
"""
return self._view.manager_requirements()
@expose_api
@require_admin
def clean(self, trans, id=None, **kwds):
"""
POST /api/dependency_resolvers/{index}/clean
Cleans up intermediate files created by resolvers during the dependency
installation.
:type index: int
:param index: index of the dependency resolver
:rtype: dict
:returns: a dictified description of the requirement that could
be resolved (keyed on 'requirement') and the index of
the corresponding resolver (keyed on 'index').
"""
return self._view.clean(id, **kwds)
| 34.570652
| 88
| 0.620972
|
f8b9f8e1eddf48c78bb87f560f1cb093016aeb03
| 2,289
|
py
|
Python
|
serpens/validators.py
|
noverde/serpens
|
36ee126eb6f9ca2793ac49934fe207a5916f3848
|
[
"MIT"
] | null | null | null |
serpens/validators.py
|
noverde/serpens
|
36ee126eb6f9ca2793ac49934fe207a5916f3848
|
[
"MIT"
] | 11
|
2020-11-26T10:41:07.000Z
|
2021-11-09T20:19:44.000Z
|
serpens/validators.py
|
noverde/serpens
|
36ee126eb6f9ca2793ac49934fe207a5916f3848
|
[
"MIT"
] | 2
|
2021-11-28T15:15:42.000Z
|
2022-01-17T13:48:20.000Z
|
import re
from typing import List, Tuple
def validate_cpf(cpf: str) -> bool:
if not cpf.isdigit() or len(cpf) != 11 or len(set(cpf)) == 1:
return False
def _validate_digit(numbers: List[int], index: int) -> bool:
values = zip(numbers[0:index], range(index + 1, 1, -1))
sum_of_products = sum(a * b for a, b in values)
expected = (sum_of_products * 10 % 11) % 10
return numbers[index] == expected
numbers = tuple(map(int, cpf))
ninth = _validate_digit(numbers, 9)
tenth = _validate_digit(numbers, 10)
return ninth and tenth
def validate_cnpj(cnpj: str) -> bool:
if not cnpj.isdigit() or len(cnpj) != 14 or cnpj == cnpj[::-1]:
return False
def _digit(multiplicands: Tuple[int], multipliers: Tuple[int]) -> int:
result = sum(a * b for a, b in zip(multiplicands, multipliers))
remainder = result % 11
digit = 0 if remainder < 2 else 11 - remainder
return digit
numbers = tuple(map(int, cnpj))
multiplicands1 = numbers[:-2]
multipliers1 = (5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2)
digit1 = _digit(multiplicands1, multipliers1)
multiplicands2 = numbers[:-2] + (digit1,)
multipliers2 = (6, 5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2)
digit2 = _digit(multiplicands2, multipliers2)
return numbers[-2:] == (digit1, digit2)
def validate_email(email: str) -> bool:
if "@" not in email:
return False
user, domain = email.rsplit("@", 1)
user_pattern = (
r"(^[-!#$%&'*+/=?^`{}|~\w]+(\.[-!#$%&'*+/=?^`{}|~\w]+)*\Z"
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]'
r'|\\[\001-\011\013\014\016-\177])*"\Z)'
)
if not re.match(user_pattern, user, flags=re.I | re.U):
return False
if domain == "localhost":
return True
domain_pattern = (
r"(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+"
r"(?:[A-Z]{2,6}|[A-Z0-9-]{2,})\Z"
r"|^\[(25[0-5]|2[0-4]\d|[0-1]?\d?\d)"
r"(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\]\Z"
)
if not re.match(domain_pattern, domain, flags=re.I | re.U):
return False
return True
def validate_mobile_number(number: str) -> bool:
match = re.match(r"^(?!(.)\1{10})[1-9]{2}9\d{8}$", number)
return match is not None
| 28.974684
| 74
| 0.548274
|
dedc021e5e7e12553246f559a7cca975ca06efe3
| 859
|
py
|
Python
|
backend/core/urls.py
|
enoscar/maiden
|
e22efd024df4707533e86943f4f412e32e831577
|
[
"MIT"
] | null | null | null |
backend/core/urls.py
|
enoscar/maiden
|
e22efd024df4707533e86943f4f412e32e831577
|
[
"MIT"
] | 6
|
2021-03-10T17:04:11.000Z
|
2021-09-22T18:58:32.000Z
|
backend/core/urls.py
|
enoscar/maiden
|
e22efd024df4707533e86943f4f412e32e831577
|
[
"MIT"
] | 1
|
2020-11-17T20:23:00.000Z
|
2020-11-17T20:23:00.000Z
|
"""core URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('api/admin/', admin.site.urls),
path('api/auth/', include('authentication.urls')),
path('api/main/', include('main.urls')),
]
| 35.791667
| 77
| 0.698487
|
5850ff6cfad6f6d4031188376a5ea429f54077d9
| 101
|
py
|
Python
|
main_api.py
|
norrisng/FcomServer
|
7d0be9e6a2c22bd0e42a5af397fb17c3b5c139b6
|
[
"MIT"
] | 4
|
2018-12-11T03:09:10.000Z
|
2021-05-21T18:36:54.000Z
|
main_api.py
|
1989car/FcomServer
|
1c5afb0eeb9de8afd5d4981202d16980030c09ed
|
[
"MIT"
] | 12
|
2018-10-26T05:47:14.000Z
|
2019-05-17T22:05:47.000Z
|
main_api.py
|
norrisng/FcomServer
|
7d0be9e6a2c22bd0e42a5af397fb17c3b5c139b6
|
[
"MIT"
] | 1
|
2019-05-30T13:13:05.000Z
|
2019-05-30T13:13:05.000Z
|
from api.message_api import app
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=False)
| 20.2
| 40
| 0.683168
|
3e6624ff88a6e327c06df4319feeff07c7425a6f
| 1,928
|
py
|
Python
|
JZ/JZ25.py
|
lpuls/leetcode
|
00e0ea28ed056c01a1e117e70f5560ada1cd8bcc
|
[
"MIT"
] | null | null | null |
JZ/JZ25.py
|
lpuls/leetcode
|
00e0ea28ed056c01a1e117e70f5560ada1cd8bcc
|
[
"MIT"
] | null | null | null |
JZ/JZ25.py
|
lpuls/leetcode
|
00e0ea28ed056c01a1e117e70f5560ada1cd8bcc
|
[
"MIT"
] | null | null | null |
# _*_coding:utf-8_*_
class RandomListNode:
def __init__(self, x):
self.label = x
self.next = None
self.random = None
class Solution:
# 返回 RandomListNode
def Clone(self, pHead):
if None is pHead:
return None
temp = dict()
node = pHead
new_list = None
new_list_node = None
while None is not node:
if None is new_list_node:
new_list = RandomListNode(node.label)
new_list_node = new_list
else:
new_node = RandomListNode(node.label)
new_list_node.next = new_node
new_list_node = new_list_node.next
temp[node] = node.random
node = node.next
node = pHead
new_list_node = new_list
while None is not node:
new_list_node.random = temp[node]
new_list_node = new_list_node.next
node = node.next
return new_list
def spawn(node_value, node_random):
if len(node_value) <= 0:
return None
result = list()
for item in node_value:
result.append(RandomListNode(item))
for index, item in enumerate(node_random):
if item >= 0:
result[index].random = result[item]
for index, item in enumerate(result):
if index + 1 >= len(result):
item.next = None
else:
item.next = result[index + 1]
return result[0]
def show(list_node):
head = list_node
while head:
print(head.label, end='\t')
head = head.next
print('')
s = Solution()
case = [
spawn([1, 2, 3, 4, 5], [2, 4, 1, 3, 0]),
spawn([1, 2, 3, 4, 5], [2, 4, 1, 3, -1]),
spawn([1, 2, 3, 4, 5], [-1, -1, -1, -1, -1])
]
for item in case:
result = s.Clone(item)
print(item, result)
show(item)
show(result)
print('')
| 22.418605
| 53
| 0.52749
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.