hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
50aad8671045ef73ebdd66a68ab6ad51ab1d12d9
| 5,781
|
py
|
Python
|
allennlp/data/token_indexers/pretrained_transformer_mismatched_indexer.py
|
lgessler/allennlp
|
0e64b4d3281808fac0fe00cc5b56e5378dbb7615
|
[
"Apache-2.0"
] | 1
|
2021-05-18T01:26:52.000Z
|
2021-05-18T01:26:52.000Z
|
allennlp/data/token_indexers/pretrained_transformer_mismatched_indexer.py
|
lgessler/allennlp
|
0e64b4d3281808fac0fe00cc5b56e5378dbb7615
|
[
"Apache-2.0"
] | 66
|
2020-10-26T18:47:03.000Z
|
2022-03-29T13:04:30.000Z
|
allennlp/data/token_indexers/pretrained_transformer_mismatched_indexer.py
|
lgessler/allennlp
|
0e64b4d3281808fac0fe00cc5b56e5378dbb7615
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict, List, Any, Optional
import logging
from overrides import overrides
import torch
from allennlp.common.util import pad_sequence_to_length
from allennlp.data.vocabulary import Vocabulary
from allennlp.data.tokenizers.token import Token
from allennlp.data.token_indexers import PretrainedTransformerIndexer, TokenIndexer
from allennlp.data.token_indexers.token_indexer import IndexedTokenList
logger = logging.getLogger(__name__)
@TokenIndexer.register("pretrained_transformer_mismatched")
class PretrainedTransformerMismatchedIndexer(TokenIndexer):
"""
Use this indexer when (for whatever reason) you are not using a corresponding
`PretrainedTransformerTokenizer` on your input. We assume that you used a tokenizer that splits
strings into words, while the transformer expects wordpieces as input. This indexer splits the
words into wordpieces and flattens them out. You should use the corresponding
`PretrainedTransformerMismatchedEmbedder` to embed these wordpieces and then pull out a single
vector for each original word.
Registered as a `TokenIndexer` with name "pretrained_transformer_mismatched".
# Parameters
model_name : `str`
The name of the `transformers` model to use.
namespace : `str`, optional (default=`tags`)
We will add the tokens in the pytorch_transformer vocabulary to this vocabulary namespace.
We use a somewhat confusing default value of `tags` so that we do not add padding or UNK
tokens to this namespace, which would break on loading because we wouldn't find our default
OOV token.
max_length : `int`, optional (default = `None`)
If positive, split the document into segments of this many tokens (including special tokens)
before feeding into the embedder. The embedder embeds these segments independently and
concatenate the results to get the original document representation. Should be set to
the same value as the `max_length` option on the `PretrainedTransformerMismatchedEmbedder`.
tokenizer_kwargs : `Dict[str, Any]`, optional (default = `None`)
Dictionary with
[additional arguments](https://github.com/huggingface/transformers/blob/155c782a2ccd103cf63ad48a2becd7c76a7d2115/transformers/tokenization_utils.py#L691)
for `AutoTokenizer.from_pretrained`.
""" # noqa: E501
def __init__(
self,
model_name: str,
namespace: str = "tags",
max_length: int = None,
tokenizer_kwargs: Optional[Dict[str, Any]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
# The matched version v.s. mismatched
self._matched_indexer = PretrainedTransformerIndexer(
model_name,
namespace=namespace,
max_length=max_length,
tokenizer_kwargs=tokenizer_kwargs,
**kwargs,
)
self._allennlp_tokenizer = self._matched_indexer._allennlp_tokenizer
self._tokenizer = self._matched_indexer._tokenizer
self._num_added_start_tokens = self._matched_indexer._num_added_start_tokens
self._num_added_end_tokens = self._matched_indexer._num_added_end_tokens
@overrides
def count_vocab_items(self, token: Token, counter: Dict[str, Dict[str, int]]):
return self._matched_indexer.count_vocab_items(token, counter)
@overrides
def tokens_to_indices(self, tokens: List[Token], vocabulary: Vocabulary) -> IndexedTokenList:
self._matched_indexer._add_encoding_to_vocabulary_if_needed(vocabulary)
wordpieces, offsets = self._allennlp_tokenizer.intra_word_tokenize([t.text for t in tokens])
# For tokens that don't correspond to any word pieces, we put (-1, -1) into the offsets.
# That results in the embedding for the token to be all zeros.
offsets = [x if x is not None else (-1, -1) for x in offsets]
output: IndexedTokenList = {
"token_ids": [t.text_id for t in wordpieces],
"mask": [True] * len(tokens), # for original tokens (i.e. word-level)
"type_ids": [t.type_id for t in wordpieces],
"offsets": offsets,
"wordpiece_mask": [True] * len(wordpieces), # for wordpieces (i.e. subword-level)
}
return self._matched_indexer._postprocess_output(output)
@overrides
def get_empty_token_list(self) -> IndexedTokenList:
output = self._matched_indexer.get_empty_token_list()
output["offsets"] = []
output["wordpiece_mask"] = []
return output
@overrides
def as_padded_tensor_dict(
self, tokens: IndexedTokenList, padding_lengths: Dict[str, int]
) -> Dict[str, torch.Tensor]:
tokens = tokens.copy()
padding_lengths = padding_lengths.copy()
offsets_tokens = tokens.pop("offsets")
offsets_padding_lengths = padding_lengths.pop("offsets")
tensor_dict = self._matched_indexer.as_padded_tensor_dict(tokens, padding_lengths)
tensor_dict["offsets"] = torch.LongTensor(
pad_sequence_to_length(
offsets_tokens, offsets_padding_lengths, default_value=lambda: (0, 0)
)
)
return tensor_dict
def __eq__(self, other):
if isinstance(other, PretrainedTransformerMismatchedIndexer):
for key in self.__dict__:
if key == "_tokenizer":
# This is a reference to a function in the huggingface code, which we can't
# really modify to make this clean. So we special-case it.
continue
if self.__dict__[key] != other.__dict__[key]:
return False
return True
return NotImplemented
| 44.469231
| 161
| 0.691403
|
bb5ff90b334da4ff55b749fdcceb78e6b7652ee9
| 177
|
py
|
Python
|
atest/testdata/test_libraries/nön_äscii_dïr/valid.py
|
robotframework/robotframework
|
8b149711c0d20869ac707041ab1086d87efd3c4d
|
[
"ECL-2.0",
"Apache-2.0"
] | 7,073
|
2015-01-01T17:19:16.000Z
|
2022-03-31T22:01:29.000Z
|
atest/testdata/test_libraries/nön_äscii_dïr/valid.py
|
robotframework/robotframework
|
8b149711c0d20869ac707041ab1086d87efd3c4d
|
[
"ECL-2.0",
"Apache-2.0"
] | 2,412
|
2015-01-02T09:29:05.000Z
|
2022-03-31T13:10:46.000Z
|
atest/testdata/test_libraries/nön_äscii_dïr/valid.py
|
robotframework/robotframework
|
8b149711c0d20869ac707041ab1086d87efd3c4d
|
[
"ECL-2.0",
"Apache-2.0"
] | 2,298
|
2015-01-03T02:47:15.000Z
|
2022-03-31T02:00:16.000Z
|
def keyword_in_non_ascii_dir():
return "Keyword in 'nön_äscii_dïr'!"
def failing_keyword_in_non_ascii_dir():
raise AssertionError("Keyword in 'nön_äscii_dïr' fails!")
| 25.285714
| 61
| 0.768362
|
a8b20c2cd29f7135c03d042d55d405ff5304ae7f
| 3,954
|
py
|
Python
|
tests/datasets/test_nasa_marine_debris.py
|
RitwikGupta/torchgeo
|
14c19e35c2b17f9cd6f2dcbdc0968283aa89fbbb
|
[
"MIT"
] | null | null | null |
tests/datasets/test_nasa_marine_debris.py
|
RitwikGupta/torchgeo
|
14c19e35c2b17f9cd6f2dcbdc0968283aa89fbbb
|
[
"MIT"
] | null | null | null |
tests/datasets/test_nasa_marine_debris.py
|
RitwikGupta/torchgeo
|
14c19e35c2b17f9cd6f2dcbdc0968283aa89fbbb
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import glob
import os
import shutil
from pathlib import Path
from typing import Generator
import matplotlib.pyplot as plt
import pytest
import torch
import torch.nn as nn
from _pytest.monkeypatch import MonkeyPatch
from torchgeo.datasets import NASAMarineDebris, NASAMarineDebrisDataModule
class Dataset:
def download(self, output_dir: str, **kwargs: str) -> None:
glob_path = os.path.join("tests", "data", "nasa_marine_debris", "*.tar.gz")
for tarball in glob.iglob(glob_path):
shutil.copy(tarball, output_dir)
def fetch(dataset_id: str, **kwargs: str) -> Dataset:
return Dataset()
class TestNASAMarineDebris:
@pytest.fixture()
def dataset(
self, monkeypatch: Generator[MonkeyPatch, None, None], tmp_path: Path
) -> NASAMarineDebris:
radiant_mlhub = pytest.importorskip("radiant_mlhub", minversion="0.2.1")
monkeypatch.setattr( # type: ignore[attr-defined]
radiant_mlhub.Dataset, "fetch", fetch
)
md5s = ["fe8698d1e68b3f24f0b86b04419a797d", "d8084f5a72778349e07ac90ec1e1d990"]
monkeypatch.setattr( # type: ignore[attr-defined]
NASAMarineDebris, "md5s", md5s
)
root = str(tmp_path)
transforms = nn.Identity() # type: ignore[attr-defined]
return NASAMarineDebris(root, transforms, download=True, checksum=True)
def test_getitem(self, dataset: NASAMarineDebris) -> None:
x = dataset[0]
assert isinstance(x, dict)
assert isinstance(x["image"], torch.Tensor)
assert isinstance(x["boxes"], torch.Tensor)
assert x["image"].shape[0] == 3
assert x["boxes"].shape[-1] == 4
def test_len(self, dataset: NASAMarineDebris) -> None:
assert len(dataset) == 4
def test_already_downloaded(
self, dataset: NASAMarineDebris, tmp_path: Path
) -> None:
NASAMarineDebris(root=str(tmp_path), download=True)
def test_already_downloaded_not_extracted(
self, dataset: NASAMarineDebris, tmp_path: Path
) -> None:
shutil.rmtree(dataset.root)
os.makedirs(str(tmp_path), exist_ok=True)
Dataset().download(output_dir=str(tmp_path))
print(os.listdir(str(tmp_path)))
NASAMarineDebris(root=str(tmp_path), download=False)
def test_not_downloaded(self, tmp_path: Path) -> None:
err = "Dataset not found in `root` directory and `download=False`, "
"either specify a different `root` directory or use `download=True` "
"to automaticaly download the dataset."
with pytest.raises(RuntimeError, match=err):
NASAMarineDebris(str(tmp_path))
def test_plot(self, dataset: NASAMarineDebris) -> None:
x = dataset[0].copy()
dataset.plot(x, suptitle="Test")
plt.close()
dataset.plot(x, show_titles=False)
plt.close()
x["prediction_boxes"] = x["boxes"].clone()
dataset.plot(x)
plt.close()
class TestNASAMarineDebrisDataModule:
@pytest.fixture(scope="class")
def datamodule(self) -> NASAMarineDebrisDataModule:
root = os.path.join("tests", "data", "nasa_marine_debris")
batch_size = 2
num_workers = 0
val_split_pct = 0.3
test_split_pct = 0.3
dm = NASAMarineDebrisDataModule(
root, batch_size, num_workers, val_split_pct, test_split_pct
)
dm.prepare_data()
dm.setup()
return dm
def test_train_dataloader(self, datamodule: NASAMarineDebrisDataModule) -> None:
next(iter(datamodule.train_dataloader()))
def test_val_dataloader(self, datamodule: NASAMarineDebrisDataModule) -> None:
next(iter(datamodule.val_dataloader()))
def test_test_dataloader(self, datamodule: NASAMarineDebrisDataModule) -> None:
next(iter(datamodule.test_dataloader()))
| 34.99115
| 87
| 0.66692
|
5e73f3fb71fb6601867dd11e282bda93ad5eabf7
| 29,217
|
py
|
Python
|
codes/e_utils/replay_buffer.py
|
linklab/link_rl
|
e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99
|
[
"MIT"
] | null | null | null |
codes/e_utils/replay_buffer.py
|
linklab/link_rl
|
e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99
|
[
"MIT"
] | null | null | null |
codes/e_utils/replay_buffer.py
|
linklab/link_rl
|
e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99
|
[
"MIT"
] | 1
|
2021-11-23T12:30:37.000Z
|
2021-11-23T12:30:37.000Z
|
import math
import random
import operator
import sys
import numpy as np
from codes.e_utils.experience import ExperienceSource, ExperienceSourceFirstLast
from codes.e_utils.experience_single import ExperienceSourceSingleEnvFirstLast
class TrajectoryBuffer:
def __init__(self, experience_source):
self.experience_source = experience_source
self.experience_source_iter = None if experience_source is None else iter(experience_source)
self.buffer = []
def __len__(self):
return len(self.buffer)
def set_experience_source(self, experience_source):
self.experience_source = experience_source
self.experience_source_iter = None if experience_source is None else iter(experience_source)
def populate(self, num_samples):
entry = None
for _ in range(num_samples):
entry = next(self.experience_source_iter)
self.buffer.append(entry)
return entry
def sample_all(self):
return self.buffer
def clear(self):
self.buffer.clear()
class ExperienceSourceBuffer:
"""
The same as ExperienceSource, but takes episodes from the simple buffer
"""
def __init__(self, buffer, n_step=1):
"""
Create buffered experience source
:param buffer: list of episodes, each is a list of Experience object
:param n_step: count of steps in every entry
"""
self.update_buffer(buffer)
self.n_step = n_step
def update_buffer(self, buffer):
self.buffer = buffer
self.lens = list(map(len, buffer))
def __iter__(self):
"""
Infinitely sample episode from the buffer and then sample item offset
"""
while True:
episode = random.randrange(len(self.buffer))
ofs = random.randrange(self.lens[episode] - self.n_step - 1)
yield self.buffer[episode][ofs:ofs + self.n_step]
class ExperienceReplayBuffer:
def __init__(self, experience_source, buffer_size):
assert isinstance(
experience_source,
(ExperienceSource, ExperienceSourceFirstLast, ExperienceSourceSingleEnvFirstLast, type(None))
)
assert isinstance(buffer_size, int)
self.experience_source = experience_source
self.experience_source_iter = None if experience_source is None else iter(experience_source)
self.buffer = []
self.capacity = buffer_size
self.pos = 0
def __len__(self):
return len(self.buffer)
def __iter__(self):
return iter(self.buffer)
def set_experience_source(self, experience_source):
self.experience_source = experience_source
self.experience_source_iter = None if experience_source is None else iter(experience_source)
def sample_all_for_on_policy(self, expected_model_version):
return [sample for sample in self.buffer if sample.model_version == expected_model_version]
def sample(self, batch_size):
"""
Get one random batch from experience replay
TODO: implement sampling order policy
:param batch_size:
:return:
"""
if batch_size is None or len(self.buffer) <= batch_size:
return self.buffer
# Warning: replace=False makes random.choice O(n)
keys = np.random.choice(len(self.buffer), batch_size, replace=True)
return [self.buffer[key] for key in keys]
def add_sample(self, sample):
if len(self.buffer) < self.capacity:
self.buffer.append(sample)
else:
self.buffer[self.pos] = sample
self.pos = (self.pos + 1) % self.capacity
def populate(self, num_samples):
"""
Populates samples into the buffer
:param samples: how many samples to populate
"""
entry = None
for _ in range(num_samples):
entry = next(self.experience_source_iter)
self.add_sample(entry)
return entry
def populate_with_action_count(self, num_samples, action_count):
"""
Populates samples into the buffer
:param samples: how many samples to populate
"""
for _ in range(num_samples):
entry = next(self.experience_source_iter)
action_count[entry.action] += 1
self.add_sample(entry)
def update_priorities(self, batch_indices, batch_priorities):
raise NotImplementedError()
def update_beta(self, idx):
raise NotImplementedError()
def clear(self):
self.buffer.clear()
self.pos = 0
def size(self):
return len(self.buffer)
class PrioReplayBufferNaive:
def __init__(self, experience_source, buffer_size, prob_alpha=0.6):
self.experience_source_iter = iter(experience_source)
self.prob_alpha = prob_alpha
self.capacity = buffer_size
self.pos = 0
self.buffer = []
self.priorities = np.zeros((buffer_size,), dtype=np.float32)
def __len__(self):
return len(self.buffer)
def set_experience_source(self, experience_source):
self.experience_source = experience_source
self.experience_source_iter = None if experience_source is None else iter(experience_source)
def populate(self, num_samples):
max_prio = self.priorities.max() if self.buffer else 1.0
for _ in range(num_samples):
sample = next(self.experience_source_iter)
if len(self.buffer) < self.capacity:
self.buffer.append(sample)
else:
self.buffer[self.pos] = sample
self.priorities[self.pos] = max_prio
self.pos = (self.pos + 1) % self.capacity
def sample(self, batch_size, beta=0.4):
if len(self.buffer) == self.capacity:
prios = self.priorities
else:
prios = self.priorities[:self.pos]
probs = np.array(prios, dtype=np.float32) ** self.prob_alpha
probs /= probs.sum()
indices = np.random.choice(len(self.buffer), batch_size, p=probs, replace=True)
samples = [self.buffer[idx] for idx in indices]
total = len(self.buffer)
weights = (total * probs[indices]) ** (-beta)
weights /= weights.max()
return samples, indices, np.array(weights, dtype=np.float32)
def update_priorities(self, batch_indices, batch_priorities):
for idx, prio in zip(batch_indices, batch_priorities):
self.priorities[idx] = prio
def size(self):
return len(self.buffer)
# sumtree 사용 버전
class PrioritizedReplayBuffer(ExperienceReplayBuffer):
def __init__(self, experience_source, buffer_size, alpha=0.6, n_step=1, beta_start=0.4, beta_frames=100000):
super(PrioritizedReplayBuffer, self).__init__(experience_source, buffer_size)
assert alpha > 0
self.alpha = alpha
self.beta = beta_start
self.n_step = n_step
self.beta_start = beta_start
self.beta_frames = beta_frames
self.buffer_size = buffer_size
it_capacity = 1
while it_capacity < self.buffer_size:
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._it_min = MinSegmentTree(it_capacity)
self._max_priority = 1.0
def clear(self):
self.buffer.clear()
it_capacity = 1
while it_capacity < self.buffer_size:
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._it_min = MinSegmentTree(it_capacity)
self._max_priority = 1.0
def update_beta(self, idx):
v = self.beta_start + idx * (1.0 - self.beta_start) / self.beta_frames
self.beta = min(1.0, v)
return self.beta
def add_sample(self, *args, **kwargs):
idx = self.pos
super().add_sample(*args, **kwargs)
self._it_sum[idx] = self._max_priority ** self.alpha
self._it_min[idx] = self._max_priority ** self.alpha
def _sample_proportional(self, batch_size):
assert len(self) > self.n_step
res = []
for _ in range(batch_size):
while True:
mass = random.random() * self._it_sum.sum(0, len(self) - 1)
idx = self._it_sum.find_prefixsum_idx(mass)
upper = self.pos
lower = (self.pos - self.n_step)
if lower < 0:
lower = self.capacity + lower
if lower < upper:
if not lower <= idx < upper:
res.append(idx)
break
else:
if upper <= idx < lower:
res.append(idx)
break
return res
def sample(self, batch_size):
assert self.beta > 0
idxes = self._sample_proportional(batch_size)
# print("#################")
# print(idxes)
# print(self.pos)
# print("#################")
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self)) ** (-self.beta)
for idx in idxes:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * len(self)) ** (-self.beta)
weights.append(weight / max_weight)
weights = np.array(weights, dtype=np.float32)
samples = [self.buffer[idx] for idx in idxes]
return samples, idxes, weights
def update_priorities(self, idxes, priorities):
# with torch.no_grad():
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0.0, priority
assert 0 <= idx < len(self), idx
self._it_sum[idx] = priority ** self.alpha
self._it_min[idx] = priority ** self.alpha
self._max_priority = max(self._max_priority, priority)
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"""Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient `reduce`
operation which reduces `operation` over
a contiguous subsequence of items in the
array.
Paramters
---------
capacity: int
Total size of the array - must be a power of two.
operation: lambda obj, obj -> obj
and operation for combining elements (eg. sum, max)
must for a mathematical group together with the set of
possible values for array elements.
neutral_element: obj
neutral element for the operation above. eg. float('-inf')
for max and 0 for sum.
"""
assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end)
)
def reduce(self, start=0, end=None):
"""Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
Parameters
----------
start: int
beginning of the subsequence
end: int
end of the subsequences
Returns
-------
reduced: obj
result of reducing self.operation over the specified range of array elements.
"""
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
# index of the leaf
idx += self._capacity
self._value[idx] = val
idx //= 2
while idx >= 1:
self._value[idx] = self._operation(
self._value[2 * idx],
self._value[2 * idx + 1]
)
idx //= 2
def __getitem__(self, idx):
assert 0 <= idx < self._capacity
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(
capacity=capacity,
operation=operator.add,
neutral_element=0.0
)
def sum(self, start=0, end=None):
"""Returns arr[start] + ... + arr[end]"""
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
"""Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
Parameters
----------
perfixsum: float
upperbound on the sum of array prefix
Returns
-------
idx: int
highest index satisfying the prefixsum constraint
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
idx = 1
while idx < self._capacity: # while non-leaf
if self._value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx]
idx = 2 * idx + 1
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(
capacity=capacity,
operation=min,
neutral_element=float('inf')
)
def min(self, start=0, end=None):
"""Returns min(arr[start], ..., arr[end])"""
return super(MinSegmentTree, self).reduce(start, end)
class RankBasedPrioritizedReplayBuffer(ExperienceReplayBuffer):
def __init__(self, experience_source, buffer_size, params, alpha=0.7, beta_start=0.5, beta_frames=100000):
super(RankBasedPrioritizedReplayBuffer, self).__init__(experience_source, buffer_size)
assert alpha > 0
self.alpha = alpha
self.beta = beta_start
self.beta_start = beta_start
self.beta_frames = beta_frames
self._max_priority = 1.0
self.replace_flag = True
self.learn_start = params.MIN_REPLAY_SIZE_FOR_TRAIN
self.total_steps = params.MAX_GLOBAL_STEP
# partition number N, split total size to N part
self.partition_num = 100
self.batch_size = params.BATCH_SIZE
self.index = 0
self.record_size = 0
self.isFull = False
self.buffer = {}
# self._experience = {}
self.priority_queue = BinaryHeap(self.capacity)
self.distributions = self.build_distributions()
# self.beta_grad = (1 - self.beta_start) / (self.total_steps - self.learn_start)
def build_distributions(self):
"""
preprocess pow of rank
(rank i) ^ (-alpha) / sum ((rank i) ^ (-alpha))
:return: distributions, dict
"""
res = {}
n_partitions = self.partition_num
partition_num = 1
# each part size
partition_size = int(math.floor(1.0*self.capacity / n_partitions))
for n in range(partition_size, self.capacity + 1, partition_size):
if self.learn_start <= n <= self.capacity:
distribution = {}
# P(i) = (rank i) ^ (-alpha) / sum ((rank i) ^ (-alpha))
pdf = list(
map(lambda x: math.pow(x, -self.alpha), range(1, n + 1))
)
pdf_sum = math.fsum(pdf)
distribution['pdf'] = list(map(lambda x: x / pdf_sum, pdf))
# split to k segment, and than uniform sample in each k
# set k = batch_size, each segment has total probability is 1 / batch_size
# strata_ends keep each segment start pos and end pos
cdf = np.cumsum(distribution['pdf'])
strata_ends = {1: 0, self.batch_size + 1: n}
step = 1.0 / self.batch_size
index = 1
for s in range(2, self.batch_size + 1):
while cdf[index] < step:
index += 1
strata_ends[s] = index
step += 1.0 / self.batch_size
distribution['strata_ends'] = strata_ends
res[partition_num] = distribution
partition_num += 1
return res
def fix_index(self):
"""
get next insert index
:return: index, int
"""
if self.record_size < self.capacity:#self.record_size <= self.size:
self.record_size += 1
if self.index % self.capacity == 0:
self.isFull = True if len(self.buffer) == self.capacity else False
if self.replace_flag:
self.index = 1
return self.index
else:
sys.stderr.write('Experience replay buff is full and replace is set to FALSE!\n')
return -1
else:
self.index += 1
return self.index
def store(self, sample):
"""
store experience, suggest that experience is a tuple of (s1, a, r, s2, t)
so each experience is valid
:param sample: maybe a tuple, or list
:return: bool, indicate insert status
"""
insert_index = self.fix_index()
if insert_index > 0:
if insert_index in self.buffer:
del self.buffer[insert_index]
self.buffer[insert_index] = sample
# add to priority queue
priority = self.priority_queue.get_max_priority()
self.priority_queue.update(priority, insert_index)
return True
else:
sys.stderr.write('Insert failed\n')
return False
def retrieve(self, indices):
"""
get experience from indices
:param indices: list of experience id
:return: experience replay sample
"""
return [self.buffer[v] for v in indices]
def rebalance(self):
"""
rebalance priority queue
:return: None
"""
self.priority_queue.balance_tree()
def update_priorities(self, idxes, priorities):
"""
update priority according indices and deltas
:param idxes: list of experience id
:param priorities: list of delta, order correspond to indices
:return: None
"""
assert len(idxes) == len(priorities)
for i in range(0, len(idxes)):
self.priority_queue.update(math.fabs(priorities[i]), idxes[i])
def update_beta(self, idx):
v = self.beta_start + idx * (1.0 - self.beta_start) / self.beta_frames
self.beta = min(1.0, v)
return self.beta
def add_sample(self, sample):
self.store(sample)
def sample(self, _):
"""
sample a mini batch from experience replay
:return: experience, list, samples
:return: w, list, weights
:return: rank_e_id, list, samples id, used for update priority
"""
if self.record_size < self.learn_start:
sys.stderr.write('Record size less than learn start! Sample failed\n')
return False, False, False
dist_index = int(math.floor(1.0 * self.record_size / self.capacity * self.partition_num))
partition_size = int(math.floor(1.0 * self.capacity / self.partition_num))
partition_max = dist_index * partition_size
distribution = self.distributions[dist_index]
rank_list = []
# sample from k segments
for n in range(1, self.batch_size + 1):
if distribution['strata_ends'][n] + 1 <= distribution['strata_ends'][n + 1]:
index = random.randint(distribution['strata_ends'][n] + 1,
distribution['strata_ends'][n + 1])
else:
index = random.randint(distribution['strata_ends'][n + 1],
distribution['strata_ends'][n] + 1)
rank_list.append(index)
# beta, increase by global_step, max 1
# beta = min(self.beta_zero + (global_step - self.learn_start - 1) * self.beta_grad, 1)
beta = self.beta
# find all alpha pow, notice that pdf is a list, start from 0
alpha_pow = [distribution['pdf'][v - 1] for v in rank_list]
# w = (N * P(i)) ^ (-beta) / max w
w = np.power(np.array(alpha_pow) * partition_max, -beta)
w_max = max(w)
w = np.divide(w, w_max)
# rank list is priority id
# convert to experience id
rank_e_id = self.priority_queue.priority_to_experience(rank_list)
# get experience id according rank_e_id
experience = self.retrieve(rank_e_id)
return experience, rank_e_id, w
class BinaryHeap(object):
def __init__(self, priority_size=100, priority_init=None, replace=True):
self.e2p = {}
self.p2e = {}
self.replace = replace
if priority_init is None:
self.priority_queue = {}
self.size = 0
self.max_size = priority_size
else:
# not yet test
self.priority_queue = priority_init
self.size = len(self.priority_queue)
self.max_size = None or self.size
experience_list = list(map(lambda x: self.priority_queue[x], self.priority_queue))
self.p2e = list_to_dict(experience_list)
self.e2p = exchange_key_value(self.p2e)
for i in range(int(self.size / 2), -1, -1):
self.down_heap(i)
def __repr__(self):
"""
:return: string of the priority queue, with level info
"""
if self.size == 0:
return 'No element in heap!'
to_string = ''
level = -1
max_level = int(math.floor(math.log(self.size, 2)))
for i in range(1, self.size + 1):
now_level = int(math.floor(math.log(i, 2)))
if level != now_level:
to_string = to_string + ('\n' if level != -1 else '') \
+ ' ' * (max_level - now_level)
level = now_level
to_string = to_string + '%.2f ' % self.priority_queue[i][1] + ' ' * (max_level - now_level)
return to_string
def check_full(self):
return self.size > self.max_size
def _insert(self, priority, e_id):
"""
insert new experience id with priority
(maybe don't need get_max_priority and implement it in this function)
:param priority: priority value
:param e_id: experience id
:return: bool
"""
self.size += 1
if self.check_full() and not self.replace:
sys.stderr.write('Error: no space left to add experience id %d with priority value %f\n' % (e_id, priority))
return False
else:
self.size = min(self.size, self.max_size)
self.priority_queue[self.size] = (priority, e_id)
self.p2e[self.size] = e_id
self.e2p[e_id] = self.size
self.up_heap(self.size)
return True
def update(self, priority, e_id):
"""
update priority value according its experience id
:param priority: new priority value
:param e_id: experience id
:return: bool
"""
if e_id in self.e2p:
p_id = self.e2p[e_id]
self.priority_queue[p_id] = (priority, e_id)
self.p2e[p_id] = e_id
self.down_heap(p_id)
self.up_heap(p_id)
return True
else:
# this e id is new, do insert
return self._insert(priority, e_id)
def get_max_priority(self):
"""
get max priority, if no experience, return 1
:return: max priority if size > 0 else 1
"""
if self.size > 0:
return self.priority_queue[1][0]
else:
return 1
def pop(self):
"""
pop out the max priority value with its experience id
:return: priority value & experience id
"""
if self.size == 0:
sys.stderr.write('Error: no value in heap, pop failed\n')
return False, False
pop_priority, pop_e_id = self.priority_queue[1]
self.e2p[pop_e_id] = -1
# replace first
last_priority, last_e_id = self.priority_queue[self.size]
self.priority_queue[1] = (last_priority, last_e_id)
self.size -= 1
self.e2p[last_e_id] = 1
self.p2e[1] = last_e_id
self.down_heap(1)
return pop_priority, pop_e_id
def up_heap(self, i):
"""
upward balance
:param i: tree node i
:return: None
"""
if i > 1:
parent = int(math.floor(i / 2))
if self.priority_queue[parent][0] < self.priority_queue[i][0]:
tmp = self.priority_queue[i]
self.priority_queue[i] = self.priority_queue[parent]
self.priority_queue[parent] = tmp
# change e2p & p2e
self.e2p[self.priority_queue[i][1]] = i
self.e2p[self.priority_queue[parent][1]] = parent
self.p2e[i] = self.priority_queue[i][1]
self.p2e[parent] = self.priority_queue[parent][1]
# up heap parent
self.up_heap(parent)
def down_heap(self, i):
"""
downward balance
:param i: tree node i
:return: None
"""
if i < self.size:
greatest = i
left, right = i * 2, i * 2 + 1
if left < self.size and self.priority_queue[left][0] > self.priority_queue[greatest][0]:
greatest = left
if right < self.size and self.priority_queue[right][0] > self.priority_queue[greatest][0]:
greatest = right
if greatest != i:
tmp = self.priority_queue[i]
self.priority_queue[i] = self.priority_queue[greatest]
self.priority_queue[greatest] = tmp
# change e2p & p2e
self.e2p[self.priority_queue[i][1]] = i
self.e2p[self.priority_queue[greatest][1]] = greatest
self.p2e[i] = self.priority_queue[i][1]
self.p2e[greatest] = self.priority_queue[greatest][1]
# down heap greatest
self.down_heap(greatest)
def get_priority(self):
"""
get all priority value
:return: list of priority
"""
return list(map(lambda x: x[0], self.priority_queue.values()))[0:self.size]
def get_e_id(self):
"""
get all experience id in priority queue
:return: list of experience ids order by their priority
"""
return list(map(lambda x: x[1], self.priority_queue.values()))[0:self.size]
def balance_tree(self):
"""
rebalance priority queue
:return: None
"""
sort_array = sorted(self.priority_queue.values(), key=lambda x: x[0], reverse=True)
# reconstruct priority_queue
self.priority_queue.clear()
self.p2e.clear()
self.e2p.clear()
cnt = 1
while cnt <= self.size:
priority, e_id = sort_array[cnt - 1]
self.priority_queue[cnt] = (priority, e_id)
self.p2e[cnt] = e_id
self.e2p[e_id] = cnt
cnt += 1
# sort the heap
for i in range(int(math.floor(self.size / 2)), 1, -1):
self.down_heap(i)
def priority_to_experience(self, priority_ids):
"""
retrieve experience ids by priority ids
:param priority_ids: list of priority id
:return: list of experience id
"""
return [self.p2e[i] for i in priority_ids]
def list_to_dict(in_list):
return dict((i, in_list[i]) for i in range(0, len(in_list)))
def exchange_key_value(in_dict):
return dict((in_dict[i], i) for i in in_dict)
| 34.617299
| 120
| 0.577609
|
3f72ccf5cddee8bbafb195e3360ca7fa736a5fd9
| 23,989
|
py
|
Python
|
tensorflow/python/ops/data_flow_ops.py
|
hholst80/tenorflow
|
79df325975402e03df89747947ff5b7f18407c52
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/ops/data_flow_ops.py
|
hholst80/tenorflow
|
79df325975402e03df89747947ff5b7f18407c52
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/ops/data_flow_ops.py
|
hholst80/tenorflow
|
79df325975402e03df89747947ff5b7f18407c52
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data Flow Operations."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import common_shapes
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_data_flow_ops import *
# pylint: enable=wildcard-import
def _as_type_list(dtypes):
"""Convert dtypes to a list of types."""
assert dtypes is not None
if not (isinstance(dtypes, list) or isinstance(dtypes, tuple)):
# We have a single type.
return [dtypes]
else:
# We have a list or tuple of types.
return list(dtypes)
def _as_shape_list(shapes, dtypes, unknown_dim_allowed=False,
unknown_rank_allowed=False):
"""Convert shapes to a list of tuples of int (or None)."""
if unknown_dim_allowed:
if (not isinstance(shapes, collections.Sequence)
or not shapes
or any(shape is None or isinstance(shape, int) for shape in shapes)):
raise ValueError(
"When providing partial shapes, a list of shapes must be provided.")
if shapes is None: return None
if isinstance(shapes, tensor_shape.TensorShape):
shapes = [shapes]
if not isinstance(shapes, (tuple, list)):
raise TypeError(
"shapes must be a TensorShape or a list or tuple of TensorShapes.")
if all(shape is None or isinstance(shape, int) for shape in shapes):
# We have a single shape.
shapes = [shapes]
shapes = [tensor_shape.as_shape(shape) for shape in shapes]
if not unknown_dim_allowed:
if any([not shape.is_fully_defined() for shape in shapes]):
raise ValueError("All shapes must be fully defined: %s" % shapes)
if not unknown_rank_allowed:
if any([shape.dims is None for shape in shapes]):
raise ValueError("All shapes must have a defined rank: %s" % shapes)
return shapes
# pylint: disable=protected-access
class QueueBase(object):
"""Base class for queue implementations.
A queue is a TensorFlow data structure that stores tensors across
multiple steps, and exposes operations that enqueue and dequeue
tensors.
Each queue element is a tuple of one or more tensors, where each
tuple component has a static dtype, and may have a static shape. The
queue implementations support versions of enqueue and dequeue that
handle single elements, versions that support enqueuing and
dequeuing a batch of elements at once.
See [`tf.FIFOQueue`](#FIFOQueue) and
[`tf.RandomShuffleQueue`](#RandomShuffleQueue) for concrete
implementations of this class, and instructions on how to create
them.
@@enqueue
@@enqueue_many
@@dequeue
@@dequeue_many
@@size
@@close
"""
def __init__(self, dtypes, shapes, queue_ref):
"""Constructs a queue object from a queue reference.
Args:
dtypes: A list of types. The length of dtypes must equal the number
of tensors in each element.
shapes: Constraints on the shapes of tensors in an element:
A list of shape tuples or None. This list is the same length
as dtypes. If the shape of any tensors in the element are constrained,
all must be; shapes can be None if the shapes should not be constrained.
queue_ref: The queue reference, i.e. the output of the queue op.
"""
self._dtypes = dtypes
if shapes is not None:
self._shapes = [tensor_shape.TensorShape(s) for s in shapes]
else:
self._shapes = [tensor_shape.unknown_shape() for _ in self._dtypes]
self._queue_ref = queue_ref
self._name = self._queue_ref.op.name.split("/")[-1]
@staticmethod
def from_list(index, queues):
"""Create a queue using the queue reference from `queues[index]`.
Args:
index: An integer scalar tensor that determines the input that gets
selected.
queues: A list of `QueueBase` objects.
Returns:
A `QueueBase` object.
Raises:
TypeError: When `queues` is not a list of `QueueBase` objects,
or when the data types of `queues` are not all the same.
"""
if ((not queues) or
(not isinstance(queues, list)) or
(not all(isinstance(x, QueueBase) for x in queues))):
raise TypeError("A list of queues expected")
dtypes = queues[0].dtypes
if not all([dtypes == q.dtypes for q in queues[1:]]):
raise TypeError("Queues do not have matching component dtypes.")
queue_refs = [x.queue_ref for x in queues]
selected_queue = control_flow_ops.ref_select(index, queue_refs)
# TODO(josh11b): Unify the shapes of the queues too?
return QueueBase(dtypes=dtypes, shapes=None, queue_ref=selected_queue)
@property
def queue_ref(self):
"""The underlying queue reference."""
return self._queue_ref
@property
def name(self):
"""The name of the underlying queue."""
return self._queue_ref.op.name
@property
def dtypes(self):
"""The list of dtypes for each component of a queue element."""
return self._dtypes
def _check_enqueue_dtypes(self, vals):
"""Returns `vals` as a list of `Tensor`s, having checked their dtypes.
Args:
vals: A tensor or a list of tensors, corresponding to an
enqueue(_many) tuple.
Returns:
A list of `Tensor` objects.
"""
if not isinstance(vals, (list, tuple)):
vals = [vals]
tensors = []
for i, (val, dtype) in enumerate(zip(vals, self._dtypes)):
tensors.append(ops.convert_to_tensor(val, dtype=dtype,
name="component_%d" % i))
return tensors
def enqueue(self, vals, name=None):
"""Enqueues one element to this queue.
If the queue is full when this operation executes, it will block
until the element has been enqueued.
Args:
vals: The tuple of `Tensor` objects to be enqueued.
name: A name for the operation (optional).
Returns:
The operation that enqueues a new tuple of tensors to the queue.
"""
if not isinstance(vals, (list, tuple)):
vals = [vals]
with ops.op_scope(vals, name, "%s_enqueue" % self._name) as scope:
vals = self._check_enqueue_dtypes(vals)
# NOTE(mrry): Not using a shape function because we need access to
# the `QueueBase` object.
for val, shape in zip(vals, self._shapes):
val.get_shape().assert_is_compatible_with(shape)
return gen_data_flow_ops._queue_enqueue(self._queue_ref, vals, name=scope)
def enqueue_many(self, vals, name=None):
"""Enqueues zero or more elements to this queue.
This operation slices each component tensor along the 0th dimension to
make multiple queue elements. All of the tensors in `vals` must have the
same size in the 0th dimension.
If the queue is full when this operation executes, it will block
until all of the elements have been enqueued.
Args:
vals: The tensor or tuple of tensors from which the queue elements
are taken.
name: A name for the operation (optional).
Returns:
The operation that enqueues a batch of tuples of tensors to the queue.
"""
if not isinstance(vals, (list, tuple)):
vals = [vals]
with ops.op_scope(vals, name, "%s_EnqueueMany" % self._name) as scope:
vals = self._check_enqueue_dtypes(vals)
# NOTE(mrry): Not using a shape function because we need access to
# the `QueueBase` object.
batch_dim = vals[0].get_shape().with_rank_at_least(1)[0]
for val, shape in zip(vals, self._shapes):
batch_dim = batch_dim.merge_with(
val.get_shape().with_rank_at_least(1)[0])
val.get_shape()[1:].assert_is_compatible_with(shape)
return gen_data_flow_ops._queue_enqueue_many(
self._queue_ref, vals, name=scope)
def dequeue(self, name=None):
"""Dequeues one element from this queue.
If the queue is empty when this operation executes, it will block
until there is an element to dequeue.
Args:
name: A name for the operation (optional).
Returns:
The tuple of tensors that was dequeued.
"""
if name is None:
name = "%s_Dequeue" % self._name
ret = gen_data_flow_ops._queue_dequeue(
self._queue_ref, self._dtypes, name=name)
# NOTE(mrry): Not using a shape function because we need access to
# the `QueueBase` object.
op = ret[0].op
for output, shape in zip(op.values(), self._shapes):
output.set_shape(shape)
return ret if len(ret) != 1 else ret[0]
def dequeue_many(self, n, name=None):
"""Dequeues and concatenates `n` elements from this queue.
This operation concatenates queue-element component tensors along
the 0th dimension to make a single component tensor. All of the
components in the dequeued tuple will have size `n` in the 0th dimension.
If the queue contains fewer than `n` elements when this operation
executes, it will block until `n` elements have been dequeued.
Args:
n: A scalar `Tensor` containing the number of elements to dequeue.
name: A name for the operation (optional).
Returns:
The tuple of concatenated tensors that was dequeued.
"""
if name is None:
name = "%s_DequeueMany" % self._name
ret = gen_data_flow_ops._queue_dequeue_many(
self._queue_ref, n, self._dtypes, name=name)
# NOTE(mrry): Not using a shape function because we need access to
# the Queue object.
op = ret[0].op
batch_dim = tensor_shape.Dimension(tensor_util.constant_value(op.inputs[1]))
for output, shape in zip(op.values(), self._shapes):
output.set_shape(tensor_shape.TensorShape([batch_dim]).concatenate(shape))
return ret if len(ret) != 1 else ret[0]
def close(self, cancel_pending_enqueues=False, name=None):
"""Closes this queue.
This operation signals that no more elements will be enqueued in
the given queue. Subsequent `enqueue` and `enqueue_many`
operations will fail. Subsequent `dequeue` and `dequeue_many`
operations will continue to succeed if sufficient elements remain
in the queue. Subsequent `dequeue` and `dequeue_many` operations
that would block will fail immediately.
If `cancel_pending_enqueues` is `True`, all pending requests will also
be cancelled.
Args:
cancel_pending_enqueues: (Optional.) A boolean, defaulting to
`False` (described above).
name: A name for the operation (optional).
Returns:
The operation that closes the queue.
"""
if name is None:
name = "%s_Close" % self._name
return gen_data_flow_ops._queue_close(
self._queue_ref, cancel_pending_enqueues=cancel_pending_enqueues,
name=name)
def size(self, name=None):
"""Compute the number of elements in this queue.
Args:
name: A name for the operation (optional).
Returns:
A scalar tensor containing the number of elements in this queue.
"""
if name is None:
name = "%s_Size" % self._name
return gen_data_flow_ops._queue_size(self._queue_ref, name=name)
class RandomShuffleQueue(QueueBase):
"""A queue implementation that dequeues elements in a random order.
See [`tf.QueueBase`](#QueueBase) for a description of the methods on
this class.
@@__init__
"""
def __init__(self, capacity, min_after_dequeue, dtypes, shapes=None,
seed=None, shared_name=None, name="random_shuffle_queue"):
"""Create a queue that dequeues elements in a random order.
A `RandomShuffleQueue` has bounded capacity; supports multiple
concurrent producers and consumers; and provides exactly-once
delivery.
A `RandomShuffleQueue` holds a list of up to `capacity`
elements. Each element is a fixed-length tuple of tensors whose
dtypes are described by `dtypes`, and whose shapes are optionally
described by the `shapes` argument.
If the `shapes` argument is specified, each component of a queue
element must have the respective fixed shape. If it is
unspecified, different queue elements may have different shapes,
but the use of `dequeue_many` is disallowed.
The `min_after_dequeue` argument allows the caller to specify a
minimum number of elements that will remain in the queue after a
`dequeue` or `dequeue_many` operation completes, to ensure a
minimum level of mixing of elements. This invariant is maintained
by blocking those operations until sufficient elements have been
enqueued. The `min_after_dequeue` argument is ignored after the
queue has been closed.
Args:
capacity: An integer. The upper bound on the number of elements
that may be stored in this queue.
min_after_dequeue: An integer (described above).
dtypes: A list of `DType` objects. The length of `dtypes` must equal
the number of tensors in each queue element.
shapes: (Optional.) A list of fully-defined `TensorShape` objects,
with the same length as `dtypes` or `None`.
seed: A Python integer. Used to create a random seed. See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
shared_name: (Optional.) If non-empty, this queue will be shared under
the given name across multiple sessions.
name: Optional name for the queue operation.
"""
dtypes = _as_type_list(dtypes)
shapes = _as_shape_list(shapes, dtypes)
seed1, seed2 = random_seed.get_seed(seed)
queue_ref = gen_data_flow_ops._random_shuffle_queue(
component_types=dtypes, shapes=shapes, capacity=capacity,
min_after_dequeue=min_after_dequeue, seed=seed1, seed2=seed2,
shared_name=shared_name, name=name)
super(RandomShuffleQueue, self).__init__(dtypes, shapes, queue_ref)
class FIFOQueue(QueueBase):
"""A queue implementation that dequeues elements in first-in-first out order.
See [`tf.QueueBase`](#QueueBase) for a description of the methods on
this class.
@@__init__
"""
def __init__(self, capacity, dtypes, shapes=None, shared_name=None,
name="fifo_queue"):
"""Creates a queue that dequeues elements in a first-in first-out order.
A `FIFOQueue` has bounded capacity; supports multiple concurrent
producers and consumers; and provides exactly-once delivery.
A `FIFOQueue` holds a list of up to `capacity` elements. Each
element is a fixed-length tuple of tensors whose dtypes are
described by `dtypes`, and whose shapes are optionally described
by the `shapes` argument.
If the `shapes` argument is specified, each component of a queue
element must have the respective fixed shape. If it is
unspecified, different queue elements may have different shapes,
but the use of `dequeue_many` is disallowed.
Args:
capacity: An integer. The upper bound on the number of elements
that may be stored in this queue.
dtypes: A list of `DType` objects. The length of `dtypes` must equal
the number of tensors in each queue element.
shapes: (Optional.) A list of fully-defined `TensorShape` objects,
with the same length as `dtypes` or `None`.
shared_name: (Optional.) If non-empty, this queue will be shared under
the given name across multiple sessions.
name: Optional name for the queue operation.
"""
dtypes = _as_type_list(dtypes)
shapes = _as_shape_list(shapes, dtypes)
queue_ref = gen_data_flow_ops._fifo_queue(
component_types=dtypes, shapes=shapes, capacity=capacity,
shared_name=shared_name, name=name)
super(FIFOQueue, self).__init__(dtypes, shapes, queue_ref)
class PaddingFIFOQueue(QueueBase):
""""A FIFOQueue that supports batching variable-sized tensors by padding.
A `PaddingFIFOQueue` may contain components with dynamic shape, while also
supporting `dequeue_many`. See the constructor for more details.
See [`tf.QueueBase`](#QueueBase) for a description of the methods on
this class.
@@__init__
"""
def __init__(self, capacity, dtypes, shapes, shared_name=None,
name="padding_fifo_queue"):
"""Creates a queue that dequeues elements in a first-in first-out order.
A `PaddingFIFOQueue` has bounded capacity; supports multiple concurrent
producers and consumers; and provides exactly-once delivery.
A `PaddingFIFOQueue` holds a list of up to `capacity` elements. Each
element is a fixed-length tuple of tensors whose dtypes are
described by `dtypes`, and whose shapes are described by the `shapes`
argument.
The `shapes` argument must be specified; each component of a queue
element must have the respective shape. Shapes of fixed
rank but variable size are allowed by setting any shape dimension to None.
In this case, the inputs' shape may vary along the given dimension, and
`dequeue_many` will pad the given dimension with zeros up to the maximum
shape of all elements in the given batch.
Args:
capacity: An integer. The upper bound on the number of elements
that may be stored in this queue.
dtypes: A list of `DType` objects. The length of `dtypes` must equal
the number of tensors in each queue element.
shapes: A list of `TensorShape` objects, with the same length as
`dtypes`. Any dimension in the `TensorShape` containing value
`None` is dynamic and allows values to be enqueued with
variable size in that dimension.
shared_name: (Optional.) If non-empty, this queue will be shared under
the given name across multiple sessions.
name: Optional name for the queue operation.
Raises:
ValueError: If shapes is not a list of shapes, or the lengths of dtypes
and shapes do not match.
"""
dtypes = _as_type_list(dtypes)
shapes = _as_shape_list(shapes, dtypes, unknown_dim_allowed=True)
if len(dtypes) != len(shapes):
raise ValueError("Shapes must be provided for all components, "
"but received %d dtypes and %d shapes."
% (len(dtypes), len(shapes)))
queue_ref = gen_data_flow_ops._padding_fifo_queue(
component_types=dtypes, shapes=shapes, capacity=capacity,
shared_name=shared_name, name=name)
super(PaddingFIFOQueue, self).__init__(dtypes, shapes, queue_ref)
# TODO(josh11b): class BatchQueue(QueueBase):
def initialize_all_tables(name="init_all_tables"):
"""Returns an Op that initializes all tables of the default graph.
Args:
name: Optional name for the initialization op.
Returns:
An Op that initializes all tables. Note that if there are
not tables the returned Op is a NoOp.
"""
initializers = ops.get_collection(ops.GraphKeys.TABLE_INITIALIZERS)
if initializers:
return control_flow_ops.group(*initializers, name=name)
return control_flow_ops.no_op(name=name)
ops.NoGradient("LookupTableFind")
ops.NoGradient("LookupTableSize")
ops.NoGradient("HashTable")
ops.NoGradient("InitializeTable")
ops.RegisterShape("QueueSize")(common_shapes.scalar_shape)
ops.RegisterShape("Queue")(common_shapes.scalar_shape)
ops.RegisterShape("FIFOQueue")(common_shapes.scalar_shape)
ops.RegisterShape("PaddingFIFOQueue")(common_shapes.scalar_shape)
ops.RegisterShape("RandomShuffleQueue")(common_shapes.scalar_shape)
def _ScalarToVoidShape(op):
"""Shape function for ops that take a scalar and produce no outputs."""
op.inputs[0].get_shape().merge_with(tensor_shape.scalar())
return []
# NOTE(mrry): The following ops use higher-level information in the
# Queue class to provide shape information.
ops.RegisterShape("QueueDequeue")(common_shapes.unknown_shape)
ops.RegisterShape("QueueDequeueMany")(common_shapes.unknown_shape)
ops.RegisterShape("QueueEnqueue")(common_shapes.unknown_shape)
ops.RegisterShape("QueueEnqueueMany")(common_shapes.unknown_shape)
ops.RegisterShape("QueueClose")(_ScalarToVoidShape)
ops.RegisterShape("Stack")(common_shapes.scalar_shape)
ops.RegisterShape("StackPush")(common_shapes.unknown_shape)
ops.RegisterShape("StackPop")(common_shapes.unknown_shape)
ops.RegisterShape("StackClose")(_ScalarToVoidShape)
# NOTE(yuanbyu): We probably can do better here.
ops.RegisterShape("GetSessionHandle")(common_shapes.scalar_shape)
ops.RegisterShape("GetSessionTensor")(common_shapes.unknown_shape)
ops.RegisterShape("DeleteSessionTensor")(_ScalarToVoidShape)
@ops.RegisterShape("DynamicPartition")
def _DynamicPartitionShape(op):
"""Shape function for data_flow_ops.dynamic_partition."""
data_shape = op.inputs[0].get_shape()
partitions_shape = op.inputs[1].get_shape()
# If we don't know the rank of partitions, we don't know anything
mid = partitions_shape.ndims
if mid is None:
result_shape = tensor_shape.unknown_shape()
else:
# data_shape must start with partitions_shape
partitions_shape.assert_is_compatible_with(data_shape[:mid])
# The partition shape is dynamic in the 0th dimension, and matches
# data_shape in the remaining dimensions.
result_shape = tensor_shape.TensorShape([None]).concatenate(
data_shape[mid:])
return [result_shape] * op.get_attr("num_partitions")
@ops.RegisterShape("DynamicStitch")
def _DynamicStitchShape(op):
"""Shape function for data_flow_ops.dynamic_stitch."""
num_partitions = op.get_attr("N")
indices_shapes = [t.get_shape() for t in op.inputs[0:num_partitions]]
data_shapes = [t.get_shape() for t in op.inputs[num_partitions:]]
output_shape = tensor_shape.unknown_shape()
extra_shape = tensor_shape.TensorShape(None)
for indices_shape, data_shape in zip(indices_shapes, data_shapes):
indices_ndims = indices_shape.ndims
if indices_ndims is not None:
# Assert that data_shape starts with indices_shape
indices_shape.merge_with(data_shape[:indices_ndims])
# The rest belongs to output
extra_shape = extra_shape.merge_with(data_shape[indices_ndims:])
return [tensor_shape.TensorShape([None]).concatenate(extra_shape)]
@ops.RegisterShape("LookupTableFind")
def _LookupTableFindShape(op):
"""Shape function for data_flow_ops._lookup_table_find."""
op.inputs[0].get_shape().merge_with(tensor_shape.scalar())
shape_in = op.inputs[1].get_shape()
return [shape_in]
@ops.RegisterShape("LookupTableSize")
def _LookupTableSizeShape(op):
"""Shape function for data_flow_ops._lookup_table_find."""
op.inputs[0].get_shape().merge_with(tensor_shape.scalar())
return [tensor_shape.scalar()]
@ops.RegisterShape("HashTable")
def _HashTableShape(_):
"""Shape function for data_flow_ops._hash_table."""
return [tensor_shape.scalar()]
@ops.RegisterShape("InitializeTable")
def _InitializeLookupTableShape(op):
"""Shape function for data_flow_ops._initialize_table."""
op.inputs[0].get_shape().merge_with(tensor_shape.scalar())
keys_shape = op.inputs[1].get_shape().with_rank(1)
op.inputs[2].get_shape().merge_with(keys_shape)
return []
| 37.25
| 81
| 0.714911
|
705df2b433856b87121a6b9854afc9c1385aeef4
| 1,553
|
py
|
Python
|
monk/gluon/models/params.py
|
gstearmit/monk_v1
|
89184ae27dc6d134620034d5b12aa86473ea47ba
|
[
"Apache-2.0"
] | null | null | null |
monk/gluon/models/params.py
|
gstearmit/monk_v1
|
89184ae27dc6d134620034d5b12aa86473ea47ba
|
[
"Apache-2.0"
] | 9
|
2020-01-28T21:40:39.000Z
|
2022-02-10T01:24:06.000Z
|
monk/gluon/models/params.py
|
abhi-kumar/monk_kaggle_bengali_ai
|
12a6c654446e887706c1a8bed82fccf8a98ce356
|
[
"Apache-2.0"
] | 1
|
2020-10-07T12:57:44.000Z
|
2020-10-07T12:57:44.000Z
|
from gluon.models.imports import *
from system.imports import *
from gluon.models.models import combined_list_lower
@accepts(str, dict, post_trace=True)
@TraceFunction(trace_args=False, trace_rv=False)
def set_model_name(name, system_dict):
if(name not in combined_list_lower):
msg = "Model name {} not in {}".format(name, combined_list_lower);
raise ConstraintError(msg);
system_dict["model"]["params"]["model_name"] = name;
return system_dict;
@accepts(bool, dict, post_trace=True)
@TraceFunction(trace_args=False, trace_rv=False)
def set_device(value, system_dict):
GPUs = GPUtil.getGPUs()
if(value and len(GPUs)==0):
msg = "GPU not accessible yet requested."
ConstraintWarning(msg)
system_dict["model"]["params"]["use_gpu"] = False;
else:
system_dict["model"]["params"]["use_gpu"] = value;
return system_dict;
@accepts(bool, dict, post_trace=True)
@TraceFunction(trace_args=False, trace_rv=False)
def set_pretrained(value, system_dict):
system_dict["model"]["params"]["use_pretrained"] = value;
return system_dict;
@accepts(bool, dict, post_trace=True)
@TraceFunction(trace_args=False, trace_rv=False)
def set_freeze_base_network(value, system_dict):
system_dict["model"]["params"]["freeze_base_network"] = value;
return system_dict;
@accepts([str, list], dict, post_trace=True)
@TraceFunction(trace_args=False, trace_rv=False)
def set_model_path(path, system_dict):
system_dict["model"]["params"]["model_path"] = path;
return system_dict;
| 33.042553
| 74
| 0.719897
|
74ffb811f8ed0fb3aca358e798814645f4d623aa
| 212
|
py
|
Python
|
src/app/api/ping.py
|
goldytech/fastapi-rnd
|
3249b55d87c96b70a60e028df76eade6c04721c0
|
[
"MIT"
] | 1
|
2021-11-17T12:59:35.000Z
|
2021-11-17T12:59:35.000Z
|
src/app/api/ping.py
|
goldytech/fastapi-rnd
|
3249b55d87c96b70a60e028df76eade6c04721c0
|
[
"MIT"
] | null | null | null |
src/app/api/ping.py
|
goldytech/fastapi-rnd
|
3249b55d87c96b70a60e028df76eade6c04721c0
|
[
"MIT"
] | null | null | null |
from fastapi import APIRouter
router = APIRouter()
@router.get("/ping")
async def pong():
# some async operation could happen here
# example: `notes = await get_all_notes()`
return {"ping": "pong"}
| 21.2
| 46
| 0.674528
|
ab458bcef85a3a3aa240db3dbe65bf04ec292bc5
| 24,362
|
py
|
Python
|
rllib/env/multi_agent_env.py
|
richardliao/ray
|
9eb1635055d93741d263d200bd700708e3d38841
|
[
"Apache-2.0"
] | null | null | null |
rllib/env/multi_agent_env.py
|
richardliao/ray
|
9eb1635055d93741d263d200bd700708e3d38841
|
[
"Apache-2.0"
] | null | null | null |
rllib/env/multi_agent_env.py
|
richardliao/ray
|
9eb1635055d93741d263d200bd700708e3d38841
|
[
"Apache-2.0"
] | null | null | null |
import gym
import logging
from typing import Callable, Dict, List, Tuple, Type, Optional, Union, Set
from ray.rllib.env.base_env import BaseEnv
from ray.rllib.utils.annotations import ExperimentalAPI, override, PublicAPI, \
DeveloperAPI
from ray.rllib.utils.typing import AgentID, EnvCreator, EnvID, EnvType, \
MultiAgentDict, MultiEnvDict
# If the obs space is Dict type, look for the global state under this key.
ENV_STATE = "state"
logger = logging.getLogger(__name__)
@PublicAPI
class MultiAgentEnv(gym.Env):
"""An environment that hosts multiple independent agents.
Agents are identified by (string) agent ids. Note that these "agents" here
are not to be confused with RLlib Trainers, which are also sometimes
referred to as "agents" or "RL agents".
"""
def __init__(self):
if not hasattr(self, "observation_space"):
self.observation_space = None
if not hasattr(self, "action_space"):
self.action_space = None
if not hasattr(self, "_agent_ids"):
self._agent_ids = set()
# do the action and observation spaces map from agent ids to spaces
# for the individual agents?
if not hasattr(self, "_spaces_in_preferred_format"):
self._spaces_in_preferred_format = None
@PublicAPI
def reset(self) -> MultiAgentDict:
"""Resets the env and returns observations from ready agents.
Returns:
New observations for each ready agent.
Examples:
>>> env = MyMultiAgentEnv()
>>> obs = env.reset()
>>> print(obs)
{
"car_0": [2.4, 1.6],
"car_1": [3.4, -3.2],
"traffic_light_1": [0, 3, 5, 1],
}
"""
raise NotImplementedError
@PublicAPI
def step(
self, action_dict: MultiAgentDict
) -> Tuple[MultiAgentDict, MultiAgentDict, MultiAgentDict, MultiAgentDict]:
"""Returns observations from ready agents.
The returns are dicts mapping from agent_id strings to values. The
number of agents in the env can vary over time.
Returns:
Tuple containing 1) new observations for
each ready agent, 2) reward values for each ready agent. If
the episode is just started, the value will be None.
3) Done values for each ready agent. The special key
"__all__" (required) is used to indicate env termination.
4) Optional info values for each agent id.
Examples:
>>> obs, rewards, dones, infos = env.step(
... action_dict={
... "car_0": 1, "car_1": 0, "traffic_light_1": 2,
... })
>>> print(rewards)
{
"car_0": 3,
"car_1": -1,
"traffic_light_1": 0,
}
>>> print(dones)
{
"car_0": False, # car_0 is still running
"car_1": True, # car_1 is done
"__all__": False, # the env is not done
}
>>> print(infos)
{
"car_0": {}, # info for car_0
"car_1": {}, # info for car_1
}
"""
raise NotImplementedError
@ExperimentalAPI
def observation_space_contains(self, x: MultiAgentDict) -> bool:
"""Checks if the observation space contains the given key.
Args:
x: Observations to check.
Returns:
True if the observation space contains the given all observations
in x.
"""
if not hasattr(self, "_spaces_in_preferred_format") or \
self._spaces_in_preferred_format is None:
self._spaces_in_preferred_format = \
self._check_if_space_maps_agent_id_to_sub_space()
if self._spaces_in_preferred_format:
return self.observation_space.contains(x)
logger.warning("observation_space_contains() has not been implemented")
return True
@ExperimentalAPI
def action_space_contains(self, x: MultiAgentDict) -> bool:
"""Checks if the action space contains the given action.
Args:
x: Actions to check.
Returns:
True if the action space contains all actions in x.
"""
if not hasattr(self, "_spaces_in_preferred_format") or \
self._spaces_in_preferred_format is None:
self._spaces_in_preferred_format = \
self._check_if_space_maps_agent_id_to_sub_space()
if self._spaces_in_preferred_format:
return self.action_space.contains(x)
logger.warning("action_space_contains() has not been implemented")
return True
@ExperimentalAPI
def action_space_sample(self, agent_ids: list = None) -> MultiAgentDict:
"""Returns a random action for each environment, and potentially each
agent in that environment.
Args:
agent_ids: List of agent ids to sample actions for. If None or
empty list, sample actions for all agents in the
environment.
Returns:
A random action for each environment.
"""
if not hasattr(self, "_spaces_in_preferred_format") or \
self._spaces_in_preferred_format is None:
self._spaces_in_preferred_format = \
self._check_if_space_maps_agent_id_to_sub_space()
if self._spaces_in_preferred_format:
if agent_ids is None:
agent_ids = self.get_agent_ids()
samples = self.action_space.sample()
return {agent_id: samples[agent_id] for agent_id in agent_ids}
logger.warning("action_space_sample() has not been implemented")
del agent_ids
return {}
@ExperimentalAPI
def observation_space_sample(self, agent_ids: list = None) -> MultiEnvDict:
"""Returns a random observation from the observation space for each
agent if agent_ids is None, otherwise returns a random observation for
the agents in agent_ids.
Args:
agent_ids: List of agent ids to sample actions for. If None or
empty list, sample actions for all agents in the
environment.
Returns:
A random action for each environment.
"""
if not hasattr(self, "_spaces_in_preferred_format") or \
self._spaces_in_preferred_format is None:
self._spaces_in_preferred_format = \
self._check_if_space_maps_agent_id_to_sub_space()
if self._spaces_in_preferred_format:
if agent_ids is None:
agent_ids = self.get_agent_ids()
samples = self.observation_space.sample()
samples = {agent_id: samples[agent_id] for agent_id in agent_ids}
return samples
logger.warning("observation_space_sample() has not been implemented")
del agent_ids
return {}
@PublicAPI
def get_agent_ids(self) -> Set[AgentID]:
"""Returns a set of agent ids in the environment.
Returns:
Set of agent ids.
"""
if not isinstance(self._agent_ids, set):
self._agent_ids = set(self._agent_ids)
return self._agent_ids
@PublicAPI
def render(self, mode=None) -> None:
"""Tries to render the environment."""
# By default, do nothing.
pass
# yapf: disable
# __grouping_doc_begin__
@ExperimentalAPI
def with_agent_groups(
self,
groups: Dict[str, List[AgentID]],
obs_space: gym.Space = None,
act_space: gym.Space = None) -> "MultiAgentEnv":
"""Convenience method for grouping together agents in this env.
An agent group is a list of agent IDs that are mapped to a single
logical agent. All agents of the group must act at the same time in the
environment. The grouped agent exposes Tuple action and observation
spaces that are the concatenated action and obs spaces of the
individual agents.
The rewards of all the agents in a group are summed. The individual
agent rewards are available under the "individual_rewards" key of the
group info return.
Agent grouping is required to leverage algorithms such as Q-Mix.
This API is experimental.
Args:
groups: Mapping from group id to a list of the agent ids
of group members. If an agent id is not present in any group
value, it will be left ungrouped.
obs_space: Optional observation space for the grouped
env. Must be a tuple space.
act_space: Optional action space for the grouped env.
Must be a tuple space.
Examples:
>>> env = YourMultiAgentEnv(...)
>>> grouped_env = env.with_agent_groups(env, {
... "group1": ["agent1", "agent2", "agent3"],
... "group2": ["agent4", "agent5"],
... })
"""
from ray.rllib.env.wrappers.group_agents_wrapper import \
GroupAgentsWrapper
return GroupAgentsWrapper(self, groups, obs_space, act_space)
# __grouping_doc_end__
# yapf: enable
@PublicAPI
def to_base_env(
self,
make_env: Optional[Callable[[int], EnvType]] = None,
num_envs: int = 1,
remote_envs: bool = False,
remote_env_batch_wait_ms: int = 0,
) -> "BaseEnv":
"""Converts an RLlib MultiAgentEnv into a BaseEnv object.
The resulting BaseEnv is always vectorized (contains n
sub-environments) to support batched forward passes, where n may
also be 1. BaseEnv also supports async execution via the `poll` and
`send_actions` methods and thus supports external simulators.
Args:
make_env: A callable taking an int as input (which indicates
the number of individual sub-environments within the final
vectorized BaseEnv) and returning one individual
sub-environment.
num_envs: The number of sub-environments to create in the
resulting (vectorized) BaseEnv. The already existing `env`
will be one of the `num_envs`.
remote_envs: Whether each sub-env should be a @ray.remote
actor. You can set this behavior in your config via the
`remote_worker_envs=True` option.
remote_env_batch_wait_ms: The wait time (in ms) to poll remote
sub-environments for, if applicable. Only used if
`remote_envs` is True.
Returns:
The resulting BaseEnv object.
"""
from ray.rllib.env.remote_base_env import RemoteBaseEnv
if remote_envs:
env = RemoteBaseEnv(
make_env,
num_envs,
multiagent=True,
remote_env_batch_wait_ms=remote_env_batch_wait_ms)
# Sub-environments are not ray.remote actors.
else:
env = MultiAgentEnvWrapper(
make_env=make_env, existing_envs=[self], num_envs=num_envs)
return env
@DeveloperAPI
def _check_if_space_maps_agent_id_to_sub_space(self) -> bool:
# do the action and observation spaces map from agent ids to spaces
# for the individual agents?
obs_space_check = (
hasattr(self, "observation_space")
and isinstance(self.observation_space, gym.spaces.Dict)
and set(self.observation_space.keys()) == self.get_agent_ids())
action_space_check = (
hasattr(self, "action_space")
and isinstance(self.action_space, gym.spaces.Dict)
and set(self.action_space.keys()) == self.get_agent_ids())
return obs_space_check and action_space_check
def make_multi_agent(env_name_or_creator: Union[str, EnvCreator],
) -> Type["MultiAgentEnv"]:
"""Convenience wrapper for any single-agent env to be converted into MA.
Allows you to convert a simple (single-agent) `gym.Env` class
into a `MultiAgentEnv` class. This function simply stacks n instances
of the given ```gym.Env``` class into one unified ``MultiAgentEnv`` class
and returns this class, thus pretending the agents act together in the
same environment, whereas - under the hood - they live separately from
each other in n parallel single-agent envs.
Agent IDs in the resulting and are int numbers starting from 0
(first agent).
Args:
env_name_or_creator: String specifier or env_maker function taking
an EnvContext object as only arg and returning a gym.Env.
Returns:
New MultiAgentEnv class to be used as env.
The constructor takes a config dict with `num_agents` key
(default=1). The rest of the config dict will be passed on to the
underlying single-agent env's constructor.
Examples:
>>> # By gym string:
>>> ma_cartpole_cls = make_multi_agent("CartPole-v0")
>>> # Create a 2 agent multi-agent cartpole.
>>> ma_cartpole = ma_cartpole_cls({"num_agents": 2})
>>> obs = ma_cartpole.reset()
>>> print(obs)
... {0: [...], 1: [...]}
>>> # By env-maker callable:
>>> from ray.rllib.examples.env.stateless_cartpole import \
... StatelessCartPole
>>> ma_stateless_cartpole_cls = make_multi_agent(
... lambda config: StatelessCartPole(config))
>>> # Create a 3 agent multi-agent stateless cartpole.
>>> ma_stateless_cartpole = ma_stateless_cartpole_cls(
... {"num_agents": 3})
>>> print(obs)
... {0: [...], 1: [...], 2: [...]}
"""
class MultiEnv(MultiAgentEnv):
def __init__(self, config=None):
MultiAgentEnv.__init__(self)
config = config or {}
num = config.pop("num_agents", 1)
if isinstance(env_name_or_creator, str):
self.agents = [
gym.make(env_name_or_creator) for _ in range(num)
]
else:
self.agents = [env_name_or_creator(config) for _ in range(num)]
self.dones = set()
self.observation_space = self.agents[0].observation_space
self.action_space = self.agents[0].action_space
self._agent_ids = set(range(num))
@override(MultiAgentEnv)
def observation_space_sample(self,
agent_ids: list = None) -> MultiAgentDict:
if agent_ids is None:
agent_ids = list(range(len(self.agents)))
obs = {
agent_id: self.observation_space.sample()
for agent_id in agent_ids
}
return obs
@override(MultiAgentEnv)
def action_space_sample(self,
agent_ids: list = None) -> MultiAgentDict:
if agent_ids is None:
agent_ids = list(range(len(self.agents)))
actions = {
agent_id: self.action_space.sample()
for agent_id in agent_ids
}
return actions
@override(MultiAgentEnv)
def action_space_contains(self, x: MultiAgentDict) -> bool:
if not isinstance(x, dict):
return False
return all(self.action_space.contains(val) for val in x.values())
@override(MultiAgentEnv)
def observation_space_contains(self, x: MultiAgentDict) -> bool:
if not isinstance(x, dict):
return False
return all(
self.observation_space.contains(val) for val in x.values())
@override(MultiAgentEnv)
def reset(self):
self.dones = set()
return {i: a.reset() for i, a in enumerate(self.agents)}
@override(MultiAgentEnv)
def step(self, action_dict):
obs, rew, done, info = {}, {}, {}, {}
for i, action in action_dict.items():
obs[i], rew[i], done[i], info[i] = self.agents[i].step(action)
if done[i]:
self.dones.add(i)
done["__all__"] = len(self.dones) == len(self.agents)
return obs, rew, done, info
@override(MultiAgentEnv)
def render(self, mode=None):
return self.agents[0].render(mode)
return MultiEnv
class MultiAgentEnvWrapper(BaseEnv):
"""Internal adapter of MultiAgentEnv to BaseEnv.
This also supports vectorization if num_envs > 1.
"""
def __init__(self, make_env: Callable[[int], EnvType],
existing_envs: MultiAgentEnv, num_envs: int):
"""Wraps MultiAgentEnv(s) into the BaseEnv API.
Args:
make_env (Callable[[int], EnvType]): Factory that produces a new
MultiAgentEnv instance. Must be defined, if the number of
existing envs is less than num_envs.
existing_envs (List[MultiAgentEnv]): List of already existing
multi-agent envs.
num_envs (int): Desired num multiagent envs to have at the end in
total. This will include the given (already created)
`existing_envs`.
"""
self.make_env = make_env
self.envs = existing_envs
self.num_envs = num_envs
self.dones = set()
while len(self.envs) < self.num_envs:
self.envs.append(self.make_env(len(self.envs)))
for env in self.envs:
assert isinstance(env, MultiAgentEnv)
self.env_states = [_MultiAgentEnvState(env) for env in self.envs]
self._unwrapped_env = self.envs[0].unwrapped
self._agent_ids = self._unwrapped_env.get_agent_ids()
@override(BaseEnv)
def poll(self) -> Tuple[MultiEnvDict, MultiEnvDict, MultiEnvDict,
MultiEnvDict, MultiEnvDict]:
obs, rewards, dones, infos = {}, {}, {}, {}
for i, env_state in enumerate(self.env_states):
obs[i], rewards[i], dones[i], infos[i] = env_state.poll()
return obs, rewards, dones, infos, {}
@override(BaseEnv)
def send_actions(self, action_dict: MultiEnvDict) -> None:
for env_id, agent_dict in action_dict.items():
if env_id in self.dones:
raise ValueError("Env {} is already done".format(env_id))
env = self.envs[env_id]
obs, rewards, dones, infos = env.step(agent_dict)
assert isinstance(obs, dict), "Not a multi-agent obs"
assert isinstance(rewards, dict), "Not a multi-agent reward"
assert isinstance(dones, dict), "Not a multi-agent return"
assert isinstance(infos, dict), "Not a multi-agent info"
if set(infos).difference(set(obs)):
raise ValueError("Key set for infos must be a subset of obs: "
"{} vs {}".format(infos.keys(), obs.keys()))
if "__all__" not in dones:
raise ValueError(
"In multi-agent environments, '__all__': True|False must "
"be included in the 'done' dict: got {}.".format(dones))
if dones["__all__"]:
self.dones.add(env_id)
self.env_states[env_id].observe(obs, rewards, dones, infos)
@override(BaseEnv)
def try_reset(self,
env_id: Optional[EnvID] = None) -> Optional[MultiEnvDict]:
ret = {}
if isinstance(env_id, int):
env_id = [env_id]
if env_id is None:
env_id = list(range(len(self.envs)))
for idx in env_id:
obs = self.env_states[idx].reset()
assert isinstance(obs, dict), "Not a multi-agent obs"
if obs is not None and idx in self.dones:
self.dones.remove(idx)
ret[idx] = obs
return ret
@override(BaseEnv)
def get_sub_environments(self, as_dict: bool = False) -> List[EnvType]:
if as_dict:
return {
_id: env_state
for _id, env_state in enumerate(self.env_states)
}
return [state.env for state in self.env_states]
@override(BaseEnv)
def try_render(self, env_id: Optional[EnvID] = None) -> None:
if env_id is None:
env_id = 0
assert isinstance(env_id, int)
return self.envs[env_id].render()
@property
@override(BaseEnv)
@PublicAPI
def observation_space(self) -> gym.spaces.Dict:
self.envs[0].observation_space
@property
@override(BaseEnv)
@PublicAPI
def action_space(self) -> gym.Space:
return self.envs[0].action_space
@override(BaseEnv)
def observation_space_contains(self, x: MultiEnvDict) -> bool:
return all(
self.envs[0].observation_space_contains(val) for val in x.values())
@override(BaseEnv)
def action_space_contains(self, x: MultiEnvDict) -> bool:
return all(
self.envs[0].action_space_contains(val) for val in x.values())
@override(BaseEnv)
def observation_space_sample(self, agent_ids: list = None) -> MultiEnvDict:
return {0: self.envs[0].observation_space_sample(agent_ids)}
@override(BaseEnv)
def action_space_sample(self, agent_ids: list = None) -> MultiEnvDict:
return {0: self.envs[0].action_space_sample(agent_ids)}
@override(BaseEnv)
def get_agent_ids(self) -> Set[AgentID]:
return self._agent_ids
class _MultiAgentEnvState:
def __init__(self, env: MultiAgentEnv):
assert isinstance(env, MultiAgentEnv)
self.env = env
self.initialized = False
self.last_obs = {}
self.last_rewards = {}
self.last_dones = {"__all__": False}
self.last_infos = {}
def poll(
self
) -> Tuple[MultiAgentDict, MultiAgentDict, MultiAgentDict, MultiAgentDict]:
if not self.initialized:
self.reset()
self.initialized = True
observations = self.last_obs
rewards = {}
dones = {"__all__": self.last_dones["__all__"]}
infos = {}
# If episode is done, release everything we have.
if dones["__all__"]:
rewards = self.last_rewards
self.last_rewards = {}
dones = self.last_dones
self.last_dones = {}
self.last_obs = {}
infos = self.last_infos
self.last_infos = {}
# Only release those agents' rewards/dones/infos, whose
# observations we have.
else:
for ag in observations.keys():
if ag in self.last_rewards:
rewards[ag] = self.last_rewards[ag]
del self.last_rewards[ag]
if ag in self.last_dones:
dones[ag] = self.last_dones[ag]
del self.last_dones[ag]
if ag in self.last_infos:
infos[ag] = self.last_infos[ag]
del self.last_infos[ag]
self.last_dones["__all__"] = False
return observations, rewards, dones, infos
def observe(self, obs: MultiAgentDict, rewards: MultiAgentDict,
dones: MultiAgentDict, infos: MultiAgentDict):
self.last_obs = obs
for ag, r in rewards.items():
if ag in self.last_rewards:
self.last_rewards[ag] += r
else:
self.last_rewards[ag] = r
for ag, d in dones.items():
if ag in self.last_dones:
self.last_dones[ag] = self.last_dones[ag] or d
else:
self.last_dones[ag] = d
self.last_infos = infos
def reset(self) -> MultiAgentDict:
self.last_obs = self.env.reset()
self.last_rewards = {}
self.last_dones = {"__all__": False}
self.last_infos = {}
return self.last_obs
| 37.94704
| 79
| 0.589443
|
c8e8a6398cc8771a23a7377780f121aea39c1ca0
| 12,440
|
py
|
Python
|
tests/test_markdown.py
|
mitya57/pymarkups
|
adb36c6d1ba674e026227ced97110ff47224996f
|
[
"BSD-3-Clause"
] | 3
|
2015-01-08T10:26:41.000Z
|
2015-02-19T13:41:46.000Z
|
tests/test_markdown.py
|
mitya57/pymarkups
|
adb36c6d1ba674e026227ced97110ff47224996f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_markdown.py
|
mitya57/pymarkups
|
adb36c6d1ba674e026227ced97110ff47224996f
|
[
"BSD-3-Clause"
] | null | null | null |
# vim: ts=8:sts=8:sw=8:noexpandtab
# This file is part of python-markups test suite
# License: 3-clause BSD, see LICENSE file
# Copyright: (C) Dmitry Shachnev, 2012-2021
from markups.markdown import MarkdownMarkup, _canonicalized_ext_names
from os.path import join
from tempfile import TemporaryDirectory
import unittest
import warnings
try:
import pymdownx
except ImportError:
pymdownx = None
try:
import yaml
except ImportError:
yaml = None
tables_source = \
'''th1 | th2
--- | ---
t11 | t21
t12 | t22'''
tables_output = \
'''<table>
<thead>
<tr>
<th>th1</th>
<th>th2</th>
</tr>
</thead>
<tbody>
<tr>
<td>t11</td>
<td>t21</td>
</tr>
<tr>
<td>t12</td>
<td>t22</td>
</tr>
</tbody>
</table>
'''
deflists_source = \
'''Apple
: Pomaceous fruit of plants of the genus Malus in
the family Rosaceae.
Orange
: The fruit of an evergreen tree of the genus Citrus.'''
deflists_output = \
'''<dl>
<dt>Apple</dt>
<dd>Pomaceous fruit of plants of the genus Malus in
the family Rosaceae.</dd>
<dt>Orange</dt>
<dd>The fruit of an evergreen tree of the genus Citrus.</dd>
</dl>
'''
mathjax_header = \
'<!--- Type: markdown; Required extensions: mathjax --->\n\n'
mathjax_source = \
r'''$i_1$ some text \$escaped\$ $i_2$
\(\LaTeX\) \\(escaped\)
$$m_1$$ text $$m_2$$
\[m_3\] text \[m_4\]
\( \sin \alpha \) text \( \sin \beta \)
\[ \alpha \] text \[ \beta \]
\$$escaped\$$ \\[escaped\]
'''
mathjax_output = \
r'''<p>
<script type="math/tex">i_1</script> some text $escaped$ <script type="math/tex">i_2</script>
</p>
<p>
<script type="math/tex">\LaTeX</script> \(escaped)</p>
<p>
<script type="math/tex; mode=display">m_1</script> text <script type="math/tex; mode=display">m_2</script>
</p>
<p>
<script type="math/tex; mode=display">m_3</script> text <script type="math/tex; mode=display">m_4</script>
</p>
<p>
<script type="math/tex"> \sin \alpha </script> text <script type="math/tex"> \sin \beta </script>
</p>
<p>
<script type="math/tex; mode=display"> \alpha </script> text <script type="math/tex; mode=display"> \beta </script>
</p>
<p>$$escaped$$ \[escaped]</p>
'''
mathjax_multiline_source = \
r'''
$$
\TeX
\LaTeX
$$
'''
mathjax_multiline_output = \
r'''<p>
<script type="math/tex; mode=display">
\TeX
\LaTeX
</script>
</p>
'''
mathjax_multilevel_source = \
r'''
\begin{equation*}
\begin{pmatrix}
1 & 0\\
0 & 1
\end{pmatrix}
\end{equation*}
'''
mathjax_multilevel_output = \
r'''<p>
<script type="math/tex; mode=display">\begin{equation*}
\begin{pmatrix}
1 & 0\\
0 & 1
\end{pmatrix}
\end{equation*}</script>
</p>
'''
@unittest.skipUnless(MarkdownMarkup.available(), 'Markdown not available')
class MarkdownTest(unittest.TestCase):
maxDiff = None
def setUp(self):
warnings.simplefilter("ignore", Warning)
def test_empty_file(self):
markup = MarkdownMarkup()
self.assertEqual(markup.convert('').get_document_body(), '\n')
def test_extensions_loading(self):
markup = MarkdownMarkup()
self.assertIsNone(markup._canonicalize_extension_name('nonexistent'))
self.assertIsNone(markup._canonicalize_extension_name('nonexistent(someoption)'))
self.assertIsNone(markup._canonicalize_extension_name('.foobar'))
self.assertEqual(markup._canonicalize_extension_name('meta'), 'markdown.extensions.meta')
name, parameters = markup._split_extension_config('toc(anchorlink=1, foo=bar)')
self.assertEqual(name, 'toc')
self.assertEqual(parameters, {'anchorlink': '1', 'foo': 'bar'})
def test_loading_extensions_by_module_name(self):
markup = MarkdownMarkup(extensions=['markdown.extensions.footnotes'])
source = ('Footnotes[^1] have a label and the content.\n\n'
'[^1]: This is a footnote content.')
html = markup.convert(source).get_document_body()
self.assertIn('<sup', html)
self.assertIn('footnote-backref', html)
def test_removing_duplicate_extensions(self):
markup = MarkdownMarkup(extensions=['remove_extra', 'toc', 'markdown.extensions.toc'])
self.assertEqual(len(markup.extensions), 1)
self.assertIn('markdown.extensions.toc', markup.extensions)
def test_extensions_parameters(self):
markup = MarkdownMarkup(extensions=['toc(anchorlink=1)'])
html = markup.convert('## Header').get_document_body()
self.assertEqual(html,
'<h2 id="header"><a class="toclink" href="#header">Header</a></h2>\n')
self.assertEqual(_canonicalized_ext_names['toc'], 'markdown.extensions.toc')
def test_document_extensions_parameters(self):
markup = MarkdownMarkup(extensions=[])
toc_header = '<!--- Required extensions: toc(anchorlink=1) --->\n\n'
html = markup.convert(toc_header + '## Header').get_document_body()
self.assertEqual(html, toc_header +
'<h2 id="header"><a class="toclink" href="#header">Header</a></h2>\n')
toc_header = '<!--- Required extensions: toc(title=Table of contents, baselevel=3) wikilinks --->\n\n'
html = markup.convert(toc_header + '[TOC]\n\n# Header\n[[Link]]').get_document_body()
self.assertEqual(html, toc_header +
'<div class="toc"><span class="toctitle">Table of contents</span><ul>\n'
'<li><a href="#header">Header</a></li>\n'
'</ul>\n</div>\n'
'<h3 id="header">Header</h3>\n'
'<p><a class="wikilink" href="/Link/">Link</a></p>\n')
def test_document_extensions_change(self):
"""Extensions from document should be replaced on each run, not added."""
markup = MarkdownMarkup(extensions=[])
toc_header = '<!-- Required extensions: toc -->\n\n'
content = '[TOC]\n\n# Header'
html = markup.convert(toc_header + content).get_document_body()
self.assertNotIn('<p>[TOC]</p>', html)
html = markup.convert(content).get_document_body()
self.assertIn('<p>[TOC]</p>', html)
html = markup.convert(toc_header + content).get_document_body()
self.assertNotIn('<p>[TOC]</p>', html)
def test_extra(self):
markup = MarkdownMarkup()
html = markup.convert(tables_source).get_document_body()
self.assertEqual(tables_output, html)
html = markup.convert(deflists_source).get_document_body()
self.assertEqual(deflists_output, html)
def test_remove_extra(self):
markup = MarkdownMarkup(extensions=['remove_extra'])
html = markup.convert(tables_source).get_document_body()
self.assertNotIn('<table>', html)
def test_remove_extra_document_extension(self):
markup = MarkdownMarkup(extensions=[])
html = markup.convert(
'Required-Extensions: remove_extra\n\n' +
tables_source).get_document_body()
self.assertNotIn('<table>', html)
def test_remove_extra_double(self):
"""Removing extra twice should not cause a crash."""
markup = MarkdownMarkup(extensions=['remove_extra'])
markup.convert('Required-Extensions: remove_extra\n')
def test_remove_extra_removes_mathjax(self):
markup = MarkdownMarkup(extensions=['remove_extra'])
html = markup.convert('$$1$$').get_document_body()
self.assertNotIn('math/tex', html)
def test_meta(self):
markup = MarkdownMarkup()
text = ('Required-Extensions: meta\n'
'Title: Hello, world!\n\n'
'Some text here.')
title = markup.convert(text).get_document_title()
self.assertEqual('Hello, world!', title)
def test_default_math(self):
# by default $...$ delimeter should be disabled
markup = MarkdownMarkup(extensions=[])
self.assertEqual('<p>$1$</p>\n', markup.convert('$1$').get_document_body())
self.assertEqual('<p>\n<script type="math/tex; mode=display">1</script>\n</p>\n',
markup.convert('$$1$$').get_document_body())
def test_mathjax(self):
markup = MarkdownMarkup(extensions=['mathjax'])
# Escaping should work
self.assertEqual('', markup.convert('Hello, \\$2+2$!').get_javascript())
js = markup.convert(mathjax_source).get_javascript()
self.assertIn('<script', js)
body = markup.convert(mathjax_source).get_document_body()
self.assertEqual(mathjax_output, body)
def test_mathjax_document_extension(self):
markup = MarkdownMarkup()
text = mathjax_header + mathjax_source
body = markup.convert(text).get_document_body()
self.assertEqual(mathjax_header + mathjax_output, body)
def test_mathjax_multiline(self):
markup = MarkdownMarkup(extensions=['mathjax'])
body = markup.convert(mathjax_multiline_source).get_document_body()
self.assertEqual(mathjax_multiline_output, body)
def test_mathjax_multilevel(self):
markup = MarkdownMarkup()
body = markup.convert(mathjax_multilevel_source).get_document_body()
self.assertEqual(mathjax_multilevel_output, body)
def test_mathjax_asciimath(self):
markup = MarkdownMarkup(extensions=['mdx_math(use_asciimath=1)'])
converted = markup.convert(r'\( [[a,b],[c,d]] \)')
body = converted.get_document_body()
self.assertIn('<script type="math/asciimath">', body)
self.assertIn('<script type="text/javascript"', converted.get_javascript())
def test_not_loading_sys(self):
with self.assertWarnsRegex(ImportWarning, 'Extension "sys" does not exist.'):
markup = MarkdownMarkup(extensions=['sys'])
self.assertNotIn('sys', markup.extensions)
def test_extensions_txt_file(self):
with TemporaryDirectory() as tmpdirname:
txtfilename = join(tmpdirname, "markdown-extensions.txt")
with open(txtfilename, "w") as f:
f.write("foo\n# bar\nbaz(arg=value)\n")
markup = MarkdownMarkup(filename=join(tmpdirname, "foo.md"))
self.assertEqual(markup.global_extensions,
[("foo", {}), ("baz", {"arg": "value"})])
@unittest.skipIf(yaml is None, "PyYAML module is not available")
def test_extensions_yaml_file(self):
with TemporaryDirectory() as tmpdirname:
yamlfilename = join(tmpdirname, "markdown-extensions.yaml")
with open(yamlfilename, "w") as f:
f.write('- smarty:\n'
' substitutions:\n'
' left-single-quote: "‚"\n'
' right-single-quote: "‘"\n'
' smart_dashes: False\n'
'- toc:\n'
' permalink: True\n'
' separator: "_"\n'
' toc_depth: 3\n'
'- sane_lists\n')
markup = MarkdownMarkup(filename=join(tmpdirname, "foo.md"))
self.assertEqual(
markup.global_extensions,
[("smarty", {"substitutions": {"left-single-quote": "‚",
"right-single-quote": "‘"},
"smart_dashes": False}),
("toc", {"permalink": True, "separator": "_", "toc_depth": 3}),
("sane_lists", {}),
])
converted = markup.convert("'foo' -- bar")
body = converted.get_document_body()
self.assertEqual(body, '<p>‚foo‘ -- bar</p>\n')
@unittest.skipIf(yaml is None, "PyYAML module is not available")
def test_extensions_yaml_file_invalid(self):
with TemporaryDirectory() as tmpdirname:
yamlfilename = join(tmpdirname, "markdown-extensions.yaml")
with open(yamlfilename, "w") as f:
f.write('[this is an invalid YAML file')
with self.assertWarns(SyntaxWarning) as cm:
MarkdownMarkup(filename=join(tmpdirname, "foo.md"))
self.assertIn("Failed parsing", str(cm.warning))
self.assertIn("expected ',' or ']'", str(cm.warning))
def test_codehilite(self):
markup = MarkdownMarkup(extensions=["codehilite"])
converted = markup.convert(' :::python\n import foo')
stylesheet = converted.get_stylesheet()
self.assertIn(".codehilite .k {", stylesheet)
body = converted.get_document_body()
self.assertIn('<div class="codehilite">', body)
def test_codehilite_custom_class(self):
markup = MarkdownMarkup(extensions=["codehilite(css_class=myclass)"])
converted = markup.convert(' :::python\n import foo')
stylesheet = converted.get_stylesheet()
self.assertIn(".myclass .k {", stylesheet)
body = converted.get_document_body()
self.assertIn('<div class="myclass">', body)
@unittest.skipIf(pymdownx is None, "pymdownx module is not available")
def test_pymdownx_highlight(self):
markup = MarkdownMarkup(extensions=["pymdownx.highlight"])
converted = markup.convert(' import foo')
stylesheet = converted.get_stylesheet()
self.assertIn(".highlight .k {", stylesheet)
body = converted.get_document_body()
self.assertIn('<div class="highlight">', body)
@unittest.skipIf(pymdownx is None, "pymdownx module is not available")
def test_pymdownx_highlight_custom_class(self):
markup = MarkdownMarkup(extensions=["pymdownx.highlight(css_class=myclass)"])
converted = markup.convert(' import foo')
stylesheet = converted.get_stylesheet()
self.assertIn(".myclass .k {", stylesheet)
body = converted.get_document_body()
self.assertIn('<div class="myclass">', body)
| 33.085106
| 115
| 0.694453
|
fc17bbc6839a7d954a5f5ec480ecddcbec6addbf
| 1,087
|
py
|
Python
|
setup.py
|
pymetrics/cookiecutter-pypackage
|
59328c468b51713a1b01fd3e8dbdc8c1cf0a831b
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
pymetrics/cookiecutter-pypackage
|
59328c468b51713a1b01fd3e8dbdc8c1cf0a831b
|
[
"BSD-3-Clause"
] | 4
|
2021-11-18T14:46:50.000Z
|
2021-11-29T07:36:35.000Z
|
setup.py
|
pymetrics/cookiecutter-pypackage
|
59328c468b51713a1b01fd3e8dbdc8c1cf0a831b
|
[
"BSD-3-Clause"
] | null | null | null |
# !/usr/bin/env python
from distutils.core import setup
setup(
name="cookiecutter-python-library",
packages=[],
version="0.2.0",
description="Cookiecutter template for a Python library",
author="Pymetrics, Inc.",
license="BSD",
url="https://github.com/pymetrics/cookiecutter-python-library",
keywords=[
"cookiecutter",
"template",
"package",
],
python_requires=">=3.7",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development",
],
)
| 31.057143
| 70
| 0.594296
|
0ad134cac31d602dccf7d3121ebf88c19fa0bfec
| 2,642
|
py
|
Python
|
src/inst_NARVAL.py
|
hlruh/serval
|
f2b43737aa865f7b5f9fb1fdb5d25483f37b068c
|
[
"MIT"
] | 28
|
2017-10-31T22:26:05.000Z
|
2022-03-15T18:18:18.000Z
|
src/inst_NARVAL.py
|
hlruh/serval
|
f2b43737aa865f7b5f9fb1fdb5d25483f37b068c
|
[
"MIT"
] | 50
|
2018-01-11T13:47:03.000Z
|
2022-03-11T08:57:58.000Z
|
src/inst_NARVAL.py
|
hlruh/serval
|
f2b43737aa865f7b5f9fb1fdb5d25483f37b068c
|
[
"MIT"
] | 8
|
2019-04-24T14:56:33.000Z
|
2022-01-18T08:28:15.000Z
|
from read_spec import *
from arrays import Arrays
# Instrument parameters
name = __name__[5:]
obsloc = dict(lat=42.9333, lon= 0.1333, elevation=2869.4)
obsname = None
pat = '*.fits'
iomax = 40
#pmax = 7800 - 300
pmax = -300
#oset = ':66'
snmax = 1900
maskfile = 'telluric_mask_carm_short.dat'
# Instrument read functions
def scan(self, s, pfits=True):
with open(s[:-1]+'meta') as f:
hdrcards = f.read()
hdr = dict(x.replace("'", "").split(" = ") for x in hdrcards.split("\n"))
self.instname = hdr['Instrument']
self.drsberv = float(hdr.get('Helvel', np.nan)[:-5]) # strip km/s
self.drsbjd = hdr.get('BTLA', np.nan)
self.dateobs = hdr['Observation date']
self.mjd = float(hdr.get('Julian Date'))
self.drift = np.nan
self.e_drift = np.nan
self.sn55 = float(hdr.get('SnrMax', 50))
self.fileid = self.dateobs
self.calmode = "%s,%s,%s" % (hdr.get('Instrumental Mode', ''), hdr.get('Stokes', ''), hdr.get('P_NAME2', ''))
self.timeid = self.fileid
self.ra = float(hdr['RA'])
self.de = float(hdr['DEC'])
self.airmass = float(hdr.get('Airmass', np.nan))
self.exptime = float(hdr['Texposure'].strip(' sec '))
self.tmmean = 0.5
self.header = pyfits.Header({'HIERARCH '+k:v for k,v in hdr.items()})
self.header['OBJECT'] = hdr['Object name']
def data(self, orders=None, pfits=True):
# w_air, f, e = np.loadtxt(self.filename, skiprows=2, unpack=True) # very slow
with open(self.filename) as f:
f.readline()
f.readline()
_data = np.fromfile(f, sep=" ")
w_air, f, e = _data.reshape(-1, 3).T
w = airtovac(w_air*10*(1.000-self.drsberv/3e5 )) # or /(1+berv)?
# find the jumps and split into orders
idx = list(np.where(abs(np.diff(w)) > 0.1)[0]+1)
oidx = [slice(*sl) for sl in zip([0]+idx, idx+[None])]
idx = [0] + idx + [len(f)]
if 0:
# padding orders with nans
nx = np.diff(idx).max()
no = len(oidx)
f_ = f
w_ = w
e_ = e
f = np.zeros((no,nx)) * np.nan
w = np.zeros((no,nx)) * np.nan
e = np.zeros((no,nx)) * np.nan
for o in np.arange(no):
w[o,:idx[o+1]-idx[o]] = w_[oidx[o]]
f[o,:idx[o+1]-idx[o]] = f_[oidx[o]]
e[o,:idx[o+1]-idx[o]] = e_[oidx[o]]
else:
w = Arrays([w[oi] for oi in oidx])
f = Arrays([f[oi] for oi in oidx])
e = Arrays([e[oi] for oi in oidx])
self.bpmap = bpmap = np.isnan(f).astype(int) # flag 1 for nan
with np.errstate(invalid='ignore'):
bpmap[f < -3*e] |= flag.neg
bpmap[e==0] |= flag.nan
return w[orders], f[orders], e[orders], bpmap[orders]
| 29.355556
| 112
| 0.571537
|
d3c228015a2233245ff1fbb62ea8a55ceb8e250d
| 869
|
py
|
Python
|
src/billing/migrations/0023_auto_20160425_1220.py
|
paveu/srvup_rest
|
97491df4106d5e8b951c6117770fe74072612e49
|
[
"MIT"
] | 1
|
2015-10-10T16:49:30.000Z
|
2015-10-10T16:49:30.000Z
|
src/billing/migrations/0023_auto_20160425_1220.py
|
paveu/srvup_rest
|
97491df4106d5e8b951c6117770fe74072612e49
|
[
"MIT"
] | null | null | null |
src/billing/migrations/0023_auto_20160425_1220.py
|
paveu/srvup_rest
|
97491df4106d5e8b951c6117770fe74072612e49
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('billing', '0022_auto_20160422_2020'),
]
operations = [
migrations.AlterField(
model_name='membership',
name='date_end',
field=models.DateTimeField(default=datetime.datetime(2016, 4, 25, 12, 20, 13, 634171, tzinfo=utc), verbose_name=b'End Date'),
preserve_default=True,
),
migrations.AlterField(
model_name='membership',
name='date_start',
field=models.DateTimeField(default=datetime.datetime(2016, 4, 25, 12, 20, 13, 634108, tzinfo=utc), verbose_name=b'Start Date'),
preserve_default=True,
),
]
| 29.965517
| 139
| 0.631761
|
0e09a46a6d658bff42421b85f3708c9190dd39ef
| 478
|
py
|
Python
|
minion/errors.py
|
timofurrer/minion-ci
|
411d0ea6638fb37d7e170cc8c8c5815304cc9f5c
|
[
"MIT"
] | 49
|
2016-03-07T06:42:40.000Z
|
2021-03-06T02:43:02.000Z
|
minion/errors.py
|
timofurrer/minion-ci
|
411d0ea6638fb37d7e170cc8c8c5815304cc9f5c
|
[
"MIT"
] | 16
|
2016-03-08T07:20:52.000Z
|
2017-04-21T18:15:12.000Z
|
minion/errors.py
|
timofurrer/minion-ci
|
411d0ea6638fb37d7e170cc8c8c5815304cc9f5c
|
[
"MIT"
] | 9
|
2016-03-29T22:08:52.000Z
|
2021-06-16T16:29:30.000Z
|
"""
`minion-ci` is a minimalist, decentralized, flexible Continuous Integration Server for hackers.
This module contains the exceptions specific to `minion` errors.
:copyright: (c) by Timo Furrer
:license: MIT, see LICENSE for details
"""
class MinionError(Exception):
"""Exception which is raised for minion specific errors."""
pass
class MinionMongoError(Exception):
"""Exception raised for minion specific errors related to mongodb"""
pass
| 29.875
| 99
| 0.723849
|
72d8c238a2e1817098eefcae18b0a3b56aedeb6b
| 1,875
|
py
|
Python
|
paddlex/interpret/core/interpretation.py
|
Channingss/PaddleX
|
06fe9552472f0379ff1a16c339c9784c973b5a04
|
[
"Apache-2.0"
] | 3
|
2020-05-12T03:09:13.000Z
|
2020-06-18T02:50:34.000Z
|
paddlex/interpret/core/interpretation.py
|
wyc880622/PaddleX
|
f001960b7359f3a88b7dd96e1f34500b90566ceb
|
[
"Apache-2.0"
] | null | null | null |
paddlex/interpret/core/interpretation.py
|
wyc880622/PaddleX
|
f001960b7359f3a88b7dd96e1f34500b90566ceb
|
[
"Apache-2.0"
] | 1
|
2020-05-18T07:06:28.000Z
|
2020-05-18T07:06:28.000Z
|
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from .interpretation_algorithms import CAM, LIME, NormLIME
from .normlime_base import precompute_normlime_weights
class Interpretation(object):
"""
Base class for all interpretation algorithms.
"""
def __init__(self, interpretation_algorithm_name, predict_fn, label_names, **kwargs):
supported_algorithms = {
'cam': CAM,
'lime': LIME,
'normlime': NormLIME
}
self.algorithm_name = interpretation_algorithm_name.lower()
assert self.algorithm_name in supported_algorithms.keys()
self.predict_fn = predict_fn
# initialization for the interpretation algorithm.
self.algorithm = supported_algorithms[self.algorithm_name](
self.predict_fn, label_names, **kwargs
)
def interpret(self, data_, visualization=True, save_to_disk=True, save_dir='./tmp'):
"""
Args:
data_: data_ can be a path or numpy.ndarray.
visualization: whether to show using matplotlib.
save_to_disk: whether to save the figure in local disk.
save_dir: dir to save figure if save_to_disk is True.
Returns:
"""
return self.algorithm.interpret(data_, visualization, save_to_disk, save_dir)
| 36.057692
| 89
| 0.696533
|
2177a7a5e622a623db5afca53545157c7bcc5d39
| 1,375
|
py
|
Python
|
api/analyses/analyses.py
|
capdragon/cannlytics
|
47eeda80b1faf54d709def3641d9476501508fec
|
[
"MIT"
] | null | null | null |
api/analyses/analyses.py
|
capdragon/cannlytics
|
47eeda80b1faf54d709def3641d9476501508fec
|
[
"MIT"
] | null | null | null |
api/analyses/analyses.py
|
capdragon/cannlytics
|
47eeda80b1faf54d709def3641d9476501508fec
|
[
"MIT"
] | null | null | null |
"""
Analyses Views | Cannlytics API
Created: 4/21/2021
API to interface with cannabis regulation information.
"""
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
@api_view(['GET', 'POST', 'DELETE'])
def analyses(request, format=None):
"""Get, create, or update information about cannabis analyses."""
if request.method == 'GET':
# TODO: Implement filters!
# data = get_collection(f"labs/{org_id}/analyses")
return Response({'error': 'not_implemented'}, content_type='application/json')
elif request.method == 'POST':
return Response({'error': 'not_implemented'}, content_type='application/json')
elif request.method == 'DELETE':
return Response({'error': 'not_implemented'}, content_type='application/json')
@api_view(['GET', 'POST', 'DELETE'])
def analytes(request, format=None):
"""Get, create, or update information about cannabis analysis analytes."""
if request.method == 'GET':
return Response({'error': 'not_implemented'}, content_type='application/json')
elif request.method == 'POST':
return Response({'error': 'not_implemented'}, content_type='application/json')
elif request.method == 'DELETE':
return Response({'error': 'not_implemented'}, content_type='application/json')
| 30.555556
| 86
| 0.690909
|
4bec77a28acc049066498aaeac09943c25f0e2df
| 2,008
|
py
|
Python
|
setup.py
|
angelsantosa/django-simple-mail
|
bac536af0acfdcfe006338b0da9d44c3db91b274
|
[
"MIT"
] | 1
|
2020-01-17T15:56:24.000Z
|
2020-01-17T15:56:24.000Z
|
setup.py
|
angelsantosa/django-simple-mail
|
bac536af0acfdcfe006338b0da9d44c3db91b274
|
[
"MIT"
] | null | null | null |
setup.py
|
angelsantosa/django-simple-mail
|
bac536af0acfdcfe006338b0da9d44c3db91b274
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import simple_mail
from setuptools import setup, find_packages
README = os.path.join(os.path.dirname(__file__), 'README.rst')
# When running tests using tox, README.md is not found
try:
with open(README) as file:
long_description = file.read()
except Exception:
long_description = ''
setup(
name='django_simple_mail',
version=simple_mail.__version__,
description='A simple and customizable email template built for Django',
long_description=long_description,
url='https://github.com/charlesthk/django-simple-mail',
author='Charles TISSIER',
author_email='charles@vingtcinq.io',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Framework :: Django :: 2.1',
'Framework :: Django :: 2.0',
'Framework :: Django :: 1.11',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.9',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.3',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='python django mail html template',
packages=find_packages(),
include_package_data=True,
install_requires=[
'html2text>=2018.1.9',
'premailer>=3.2.0',
'Pillow>=5.2.0',
'django-imagekit>=4.0.2',
'six>=1.11.0',
],
# test_suite='tests',
)
| 32.918033
| 76
| 0.608068
|
ca132b45c241e394b9896801cdbae65dba12d032
| 3,291
|
py
|
Python
|
nplusone/ext/sqlalchemy.py
|
jsl-takino/nplusone
|
8137b102f01a6da2a44af874257568f5c05fe624
|
[
"MIT"
] | 1
|
2022-02-22T07:42:58.000Z
|
2022-02-22T07:42:58.000Z
|
nplusone/ext/sqlalchemy.py
|
jsl-takino/nplusone
|
8137b102f01a6da2a44af874257568f5c05fe624
|
[
"MIT"
] | null | null | null |
nplusone/ext/sqlalchemy.py
|
jsl-takino/nplusone
|
8137b102f01a6da2a44af874257568f5c05fe624
|
[
"MIT"
] | 1
|
2021-08-23T05:38:31.000Z
|
2021-08-23T05:38:31.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import inspect
import itertools
from sqlalchemy.orm import query
from sqlalchemy.orm import loading
from sqlalchemy.orm import attributes
from sqlalchemy.orm import strategies
from nplusone.core import signals
def to_key(instance):
model = type(instance)
return ':'.join(itertools.chain(
[model.__name__],
(
format(instance.__dict__.get(key.key)) # Avoid recursion on __get__
for key in get_primary_keys(model)
)
))
def get_primary_keys(model):
mapper = model.__mapper__
return [
mapper.get_property_by_column(column)
for column in mapper.primary_key
]
def parse_load(args, kwargs, context, ret):
return [
to_key(row) for row in ret
if hasattr(row, '__table__')
]
def parse_lazy_load(args, kwargs, context):
loader, state, _ = args
return state.object.__class__, to_key(state.object), loader.parent_property.key
def parse_attribute_get(args, kwargs, context):
attr, instance = args[:2]
if instance is None:
return None
return attr.class_, attr.key, [to_key(instance)]
strategies.LazyLoader._load_for_state = signals.signalify(
signals.lazy_load,
strategies.LazyLoader._load_for_state,
parser=parse_lazy_load,
)
def parse_populate(args, kwargs, context):
query_context = args[0]
state = args[2]
instance = state.object
return instance.__class__, context['key'], [to_key(instance)], id(query_context)
# Emit `eager_load` on populating from `joinedload` or `subqueryload`
original_populate_full = loading._populate_full
def _populate_full(*args, **kwargs):
ret = original_populate_full(*args, **kwargs)
context = inspect.getcallargs(original_populate_full, *args, **kwargs)
for key, _ in context['populators'].get('eager', []):
if context['dict_'].get(key):
signals.eager_load.send(
signals.get_worker(),
args=args,
kwargs=kwargs,
context={'key': key},
parser=parse_populate,
)
return ret
loading._populate_full = _populate_full
attributes.InstrumentedAttribute.__get__ = signals.signalify(
signals.touch,
attributes.InstrumentedAttribute.__get__,
parser=parse_attribute_get,
)
def is_single(offset, limit):
return limit is not None and limit - (offset or 0) == 1
original_query_iter = query.Query.__iter__
def query_iter(self):
ret, clone = itertools.tee(original_query_iter(self))
signal = (
signals.ignore_load
if is_single(self._offset, self._limit)
else signals.load
)
signal.send(
signals.get_worker(),
args=(self, ),
ret=list(clone),
parser=parse_load,
)
return ret
query.Query.__iter__ = query_iter
def parse_get(args, kwargs, context, ret):
return [to_key(ret)] if hasattr(ret, '__table__') else []
# Ignore records loaded during `one`
for method in ['one_or_none', 'one']:
try:
original = getattr(query.Query, method)
except AttributeError:
continue
decorated = signals.signalify(signals.ignore_load, original, parse_get)
setattr(query.Query, method, decorated)
| 25.913386
| 84
| 0.673959
|
1c82d9fcc2e7d9515f70eb527b9fe0b037d9e707
| 2,943
|
py
|
Python
|
tools/marvin/marvin/deployAndRun.py
|
cinderella/incubator-cloudstack
|
0af2fafcdc374ee7f09f78391b386fd6acbdf5c2
|
[
"Apache-2.0"
] | null | null | null |
tools/marvin/marvin/deployAndRun.py
|
cinderella/incubator-cloudstack
|
0af2fafcdc374ee7f09f78391b386fd6acbdf5c2
|
[
"Apache-2.0"
] | null | null | null |
tools/marvin/marvin/deployAndRun.py
|
cinderella/incubator-cloudstack
|
0af2fafcdc374ee7f09f78391b386fd6acbdf5c2
|
[
"Apache-2.0"
] | 1
|
2019-05-02T23:20:53.000Z
|
2019-05-02T23:20:53.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import deployDataCenter
import TestCaseExecuteEngine
from optparse import OptionParser
import os
if __name__ == "__main__":
parser = OptionParser() #TODO: deprecate and use the argparse module
parser.add_option("-c", "--config", action="store", default="./datacenterCfg", dest="config", help="the path where the json config file generated, by default is ./datacenterCfg")
parser.add_option("-d", "--directory", dest="testCaseFolder", help="the test case directory")
parser.add_option("-r", "--result", dest="result", help="test result log file")
parser.add_option("-t", "--client", dest="testcaselog", help="test case log file")
parser.add_option("-l", "--load", dest="load", action="store_true", help="only load config, do not deploy, it will only run testcase")
parser.add_option("-f", "--file", dest="module", help="run tests in the given file")
parser.add_option("-x", "--xml", dest="xmlrunner", help="use the xml runner to generate xml reports and path to store xml files")
(options, args) = parser.parse_args()
testResultLogFile = None
if options.result is not None:
testResultLogFile = options.result
testCaseLogFile = None
if options.testcaselog is not None:
testCaseLogFile = options.testcaselog
deploy = deployDataCenter.deployDataCenters(options.config)
if options.load:
deploy.loadCfg()
else:
deploy.deploy()
format = "text"
xmlDir = None
if options.xmlrunner is not None:
xmlDir = options.xmlrunner
format = "xml"
if options.testCaseFolder is None:
if options.module is None:
parser.print_usage()
exit(1)
else:
engine = TestCaseExecuteEngine.TestCaseExecuteEngine(deploy.testClient, testCaseLogFile, testResultLogFile, format, xmlDir)
engine.loadTestsFromFile(options.module)
engine.run()
else:
engine = TestCaseExecuteEngine.TestCaseExecuteEngine(deploy.testClient, testCaseLogFile, testResultLogFile, format, xmlDir)
engine.loadTestsFromDir(options.testCaseFolder)
engine.run()
| 43.925373
| 182
| 0.703024
|
0b7f71f2c97773a3330b509b3de5f8fe223ab1fe
| 2,143
|
py
|
Python
|
Spy-Game/code.py
|
lakshit-sharma/greyatom-python-for-data-science
|
55a6e5a4c54a4f7135cc09fb287d2f2fa1d36413
|
[
"MIT"
] | null | null | null |
Spy-Game/code.py
|
lakshit-sharma/greyatom-python-for-data-science
|
55a6e5a4c54a4f7135cc09fb287d2f2fa1d36413
|
[
"MIT"
] | null | null | null |
Spy-Game/code.py
|
lakshit-sharma/greyatom-python-for-data-science
|
55a6e5a4c54a4f7135cc09fb287d2f2fa1d36413
|
[
"MIT"
] | null | null | null |
# --------------
##File path for the file
file_path
#Code starts here
def read_file(path):
file = open(path, 'r')
sentence = file.readline()
file.close()
return sentence
sample_message = read_file(file_path)
# --------------
#Code starts here
message_1 = read_file(file_path_1)
message_2 = read_file(file_path_2)
print(message_1, message_2)
def fuse_msg(message_a, message_b):
quotient = int(message_b)//int(message_a)
return str(quotient)
secret_msg_1 = fuse_msg(message_1, message_2)
# --------------
#Code starts here
message_3 = read_file(file_path_3)
print(message_3)
def substitute_msg(message_c):
sub = ""
if message_c == "Red":
sub = "Army General"
elif message_c == "Green":
sub = "Data Scientist"
elif message_c == "Blue":
sub = "Marine Biologist"
return sub
secret_msg_2 = substitute_msg(message_3)
# --------------
# File path for message 4 and message 5
file_path_4
file_path_5
#Code starts here
message_4 = read_file(file_path_4)
message_5 = read_file(file_path_5)
print(message_4, message_5)
def compare_msg(message_d, message_e):
a_list = message_d.split(" ")
b_list = message_e.split(" ")
c_list = []
c_list = [i for i in a_list if i not in b_list]
final_msg = " ".join(c_list)
return final_msg
secret_msg_3 = compare_msg(message_4, message_5)
# --------------
#Code starts here
message_6 = read_file(file_path_6)
print(message_6)
def extract_msg(message_f):
a_list = message_f.split(" ")
even_word = lambda x: len(x)%2 == 0
b_list = list(filter(even_word, a_list))
final_msg = " ".join(b_list)
return final_msg
secret_msg_4 = extract_msg(message_6)
# --------------
#Secret message parts in the correct order
message_parts=[secret_msg_3, secret_msg_1, secret_msg_4, secret_msg_2]
final_path= user_data_dir + '/secret_message.txt'
#Code starts here
secret_msg = " ".join(message_parts)
def write_file(secret_msg, path):
fp = open(final_path, 'a+')
fp.write(secret_msg)
fp.close()
write_file(secret_msg, final_path)
print(secret_msg)
| 18.474138
| 71
| 0.664956
|
ba22ef264f10c4f7a4e49d9ce0c61e57c347e46c
| 1,202
|
py
|
Python
|
netado/urls.py
|
Ciwara/neta
|
c8c31a3846ea6e8f8baaa70d5fb19ebac3c7a425
|
[
"Apache-2.0"
] | null | null | null |
netado/urls.py
|
Ciwara/neta
|
c8c31a3846ea6e8f8baaa70d5fb19ebac3c7a425
|
[
"Apache-2.0"
] | 5
|
2016-11-03T16:45:51.000Z
|
2017-03-28T10:17:30.000Z
|
netado/urls.py
|
Ciwara/neta
|
c8c31a3846ea6e8f8baaa70d5fb19ebac3c7a425
|
[
"Apache-2.0"
] | null | null | null |
"""netado URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import url
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.auth import views as auth_views
from neta import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^home/', views.home, name='home'),
url(r'^logout/$', auth_views.logout, {'next_page': '/'}, name='logout'),
url(r'^admin/', admin.site.urls),
url(r'add_user/$', views.add_user, name='add-user')
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 38.774194
| 79
| 0.706323
|
292697beb33121295e9c9bbef5b57f5616226830
| 10,426
|
py
|
Python
|
EvalBox/Attack/AdvAttack/ba.py
|
Yzx835/AISafety
|
eb09551814898c7f6d86641b47faf7845c948640
|
[
"MIT"
] | 32
|
2020-10-20T06:12:48.000Z
|
2022-03-30T03:31:24.000Z
|
EvalBox/Attack/AdvAttack/ba.py
|
Yzx835/AISafety
|
eb09551814898c7f6d86641b47faf7845c948640
|
[
"MIT"
] | 2
|
2021-03-24T13:54:50.000Z
|
2021-10-11T13:37:31.000Z
|
EvalBox/Attack/AdvAttack/ba.py
|
Yzx835/AISafety
|
eb09551814898c7f6d86641b47faf7845c948640
|
[
"MIT"
] | 19
|
2020-10-22T05:42:51.000Z
|
2022-02-04T07:07:39.000Z
|
#!/usr/bin/env python
# coding=UTF-8
"""
@Author: Linna
@LastEditors: Linna
@Description:
@Date: 2019-04-26 11:00:10
@LastEditTime: 2019-04-26
"""
import numpy as np
import torch
from torch.autograd import Variable
from EvalBox.Attack.AdvAttack.attack import Attack
class BA(Attack):
def __init__(self, model=None, device=None, IsTargeted=None, **kwargs):
"""
@description: The Boundary Attack
@param {
model:
device:
kwargs:
}
@return: None
"""
super(BA, self).__init__(model, device, IsTargeted)
# self.criterion = torch.nn.CrossEntropyLoss()
self._parse_params(**kwargs)
def _parse_params(self, **kwargs):
"""
@description:
@param {
epsilon:
eps_iter:
num_steps:
}
@return: adversarial_samples
"""
# 扰动的步长系数
self.epsilon = float(kwargs.get("epsilon", 0.01))
# 重新缩放的扰动的尺度
self.delta = float(kwargs.get("delta", 0.01))
# 归一化数据的上下边界
self.lower_bound = float(kwargs.get("lower_bound", 0.0))
self.upper_bound = float(kwargs.get("upper_bound", 1.0))
# 扰动样本更新的最大内层迭代次数
self.max_iter = int(kwargs.get("max_iter", 10))
# 用来搜索合适的epsilon的迭代次数
self.binary_search_steps = int(kwargs.get("binary_search_steps", 20))
# 单次批处理
self.batch_size = int(kwargs.get("batch_size", 8))
# 用来调整delta系数的更新系数
self.step_adapt = float(kwargs.get("step_adapt", 0.9))
# 过程中生成的潜在扰动样本的采样的数目
self.sample_size = int(kwargs.get("sample_size", 80))
# 初始化的随机样本的数目
self.init_size = int(kwargs.get("init_size", 200))
# 获得样本之间距离
def get_diff(self, sample1, sample2):
return np.linalg.norm((sample1 - sample2).astype(np.float32))
# 获得高斯噪声的样本
def gaussian_sample_noise(self, epsilon, imageshape, bounds):
min_, max_ = bounds
std = epsilon / np.sqrt(3) * (max_ - min_)
noise = np.random.normal(scale=std, size=imageshape)
noise = noise.astype(np.float32)
return noise
# 获得均匀分布的样本
def unifom_sample_noise(self, epsilon, imageshape, bounds):
min_, max_ = bounds
w = epsilon * (max_ - min_)
noise = np.random.uniform(-w, w, size=imageshape)
noise = noise.astype(np.float32)
return noise
# 计算样本的L2距离
def get_dist(self, xs, x2s):
l2dist = torch.sum((xs - x2s) ** 2, [1, 2, 3])
return l2dist
def _perturb(self, x, y, y_p):
clip_min, clip_max = self.classifier.clip_values
# First, create an initial adversarial sample
initial_sample = self._init_sample(x, y, y_p, clip_min, clip_max)
# If an initial adversarial example is not found, then return the original image
if initial_sample is None:
return x
# If an initial adversarial example found, then go with boundary attack
if self.targeted:
x_adv = self._attack(
initial_sample, x, y, self.delta, self.epsilon, clip_min, clip_max
)
else:
x_adv = self._attack(
initial_sample, x, y_p, self.delta, self.epsilon, clip_min, clip_max
)
return 0
# 初始化随机样本
def _init_sample(self, x, y, targeted, clip_min, clip_max):
nprd = np.random.RandomState()
initial_sample = None
if targeted:
# Attack satisfied
# Attack unsatisfied yet
for _ in range(self.init_size):
random_img_numpy = nprd.uniform(
clip_min, clip_max, size=x.shape
).astype(x.dtype)
random_img = np.expand_dims(random_img_numpy, axis=0)
tensor_random_img = Variable(
torch.from_numpy(random_img).to(self.device)
)
output = self.model(tensor_random_img)
random_class = torch.argmax(output, 1)
random_class = random_class.data.cpu().numpy()
if random_class[0] == y:
initial_sample = random_img_numpy
break
else:
for _ in range(self.init_size):
# random_img_numpy = nprd.uniform(clip_min, clip_max, size=x.shape).astype(x.dtype)
mean_, std_ = np.mean(x), np.std(x)
random_img_numpy = nprd.normal(
loc=mean_, scale=2 * std_, size=x.shape
).astype(x.dtype)
random_img = np.expand_dims(random_img_numpy, axis=0)
tensor_random_img = Variable(
torch.from_numpy(random_img).to(self.device)
)
output = self.model(tensor_random_img)
random_class = torch.argmax(output, 1)
random_class = random_class.data.cpu().numpy()
initial_sample = random_img_numpy
if random_class[0] != y:
initial_sample = random_img_numpy
break
return initial_sample
# 正交扰动生成
def _orthogonal_perturb(self, delta, current_sample, original_sample):
perturb = np.random.randn(
original_sample.shape[0], original_sample.shape[1], original_sample.shape[2]
)
# Rescale the perturbation
perturb /= np.linalg.norm(perturb)
perturb *= delta * np.linalg.norm(original_sample - current_sample)
# Project the perturbation onto sphere
direction = original_sample - current_sample
perturb = np.swapaxes(perturb, 0, 0 - 1)
direction = np.swapaxes(direction, 0, 0 - 1)
vdot = np.vdot(perturb, direction)
perturb -= vdot * direction
perturb = np.swapaxes(perturb, 0, 0 - 1)
return perturb
def compare(object1, object2, target_flag):
return object1 == object2 if target_flag else object1 != object2
def generate(self, xs=None, ys=None, target_flag=False):
"""
@description:
@param {
xs:
ys:
device:
}
@return: adv_xs{numpy.ndarray}
"""
device = self.device
targeted = self.IsTargeted
var_xs, var_ys = Variable(xs.to(device)), Variable(ys.to(device))
with torch.no_grad():
outputs = self.model(var_xs)
preds = torch.argmax(outputs, 1)
preds = preds.data.cpu().numpy()
labels = ys.cpu().numpy()
n_xs = var_xs.cpu().numpy()
epsilon_list = [self.epsilon] * self.batch_size
delta_list = [self.delta] * self.batch_size
# 注意是复制,不是直接赋值
adversarial_samples = n_xs.copy()
# get the first step of boudary as init parameter and input
adversarial_sample = n_xs[0]
numbers = n_xs.shape[0]
rangenumbers = 0
if numbers <= self.batch_size:
rangenumbers = numbers
else:
rangenumbers = self.batch_size
for i in range(rangenumbers):
origin_sample = n_xs[i]
# Move to the first boundary
adversarial_sample = self._init_sample(
origin_sample, preds[i], target_flag, 0, 1
)
for search_for_epsilon in range(self.binary_search_steps):
for iteration_times in range(self.max_iter):
potential_perturbed_images = []
for _ in range(self.sample_size):
perturbed_image = adversarial_sample + self._orthogonal_perturb(
delta_list[i], adversarial_sample, origin_sample
)
perturbed_image = np.array(perturbed_image)
perturbed_image = np.clip(
perturbed_image, self.lower_bound, self.upper_bound
)
potential_perturbed_images.append(perturbed_image)
# potential_perturbed_images
var_images = Variable(
torch.from_numpy(np.array(potential_perturbed_images)).to(
self.device
)
)
predictions_outputs = self.model(var_images.float())
predictions = torch.argmax(predictions_outputs, 1)
predictions = predictions.data.cpu().numpy()
if target_flag:
satisfied = predictions == labels[i]
else:
satisfied = predictions != labels[i]
delta_ratio = np.mean(satisfied)
if delta_ratio < 0.5:
delta_list[i] *= self.step_adapt
else:
delta_list[i] /= self.step_adapt
if delta_ratio > 0:
adversarial_sample = np.array(potential_perturbed_images)[
np.where(satisfied)[0][0]
]
break
for _ in range(self.max_iter):
perturb = origin_sample - adversarial_sample
perturb *= epsilon_list[i]
potential_adv = adversarial_sample + perturb
potential_adv = np.clip(potential_adv, 0, 1)
potential_adv_expand = np.expand_dims(potential_adv, axis=0)
potential_image = Variable(
torch.from_numpy(potential_adv_expand).to(self.device)
)
output = self.model(potential_image.float())
pred_out = torch.argmax(output, 1)
pred_out = pred_out.data.cpu().numpy()
if target_flag:
satisfied = pred_out == labels[i]
else:
satisfied = pred_out != labels[i]
if satisfied:
adversarial_sample = potential_adv
epsilon_list[i] /= self.step_adapt
break
else:
epsilon_list[i] *= self.step_adapt
adversarial_samples[i] = adversarial_sample
return torch.from_numpy(adversarial_samples)
| 35.222973
| 99
| 0.544025
|
88ba051697d9354695483192fdfb2e5e255e6d23
| 562
|
py
|
Python
|
foodtaskerapp/migrations/0006_auto_20191207_1139.py
|
Kgotso-Koete/foodTasker-Restaurant-web-app
|
2812983660ec2a1bf210085e202cafaf2c92b3ae
|
[
"MIT"
] | 10
|
2020-03-28T15:08:59.000Z
|
2021-12-09T18:14:04.000Z
|
foodtaskerapp/migrations/0006_auto_20191207_1139.py
|
Kgotso-Koete/foodTasker-Restaurant-web-app
|
2812983660ec2a1bf210085e202cafaf2c92b3ae
|
[
"MIT"
] | 5
|
2020-02-12T03:21:28.000Z
|
2021-06-10T22:19:36.000Z
|
foodtaskerapp/migrations/0006_auto_20191207_1139.py
|
Kgotso-Koete/foodTasker-Restaurant-web-app
|
2812983660ec2a1bf210085e202cafaf2c92b3ae
|
[
"MIT"
] | 10
|
2020-04-11T16:07:08.000Z
|
2021-12-12T15:36:50.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2019-12-07 11:39
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('foodtaskerapp', '0005_orderdetails'),
]
operations = [
migrations.AlterField(
model_name='order',
name='driver',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='foodtaskerapp.Driver'),
),
]
| 25.545455
| 131
| 0.656584
|
1f62c5758453ea8d6efc1466807a573b2bd85270
| 6,800
|
py
|
Python
|
src/meltano/core/plugin/project_plugin.py
|
learningequality/meltano
|
b1383d9d97053792f9880bd7a1becee95f7c6e73
|
[
"MIT"
] | 1
|
2021-03-04T01:29:23.000Z
|
2021-03-04T01:29:23.000Z
|
src/meltano/core/plugin/project_plugin.py
|
learningequality/meltano
|
b1383d9d97053792f9880bd7a1becee95f7c6e73
|
[
"MIT"
] | null | null | null |
src/meltano/core/plugin/project_plugin.py
|
learningequality/meltano
|
b1383d9d97053792f9880bd7a1becee95f7c6e73
|
[
"MIT"
] | 2
|
2021-07-23T06:18:07.000Z
|
2022-03-04T03:56:55.000Z
|
import copy
import logging
from typing import Optional
from meltano.core.setting_definition import SettingDefinition
from meltano.core.utils import flatten, uniques_in
from .base import PluginDefinition, PluginRef, PluginType, Variant
from .factory import base_plugin_factory
logger = logging.getLogger(__name__)
class CyclicInheritanceError(Exception):
"""Exception raised when project plugin inherits from itself cyclicly."""
def __init__(self, plugin: "ProjectPlugin", ancestor: "ProjectPlugin"):
"""Initialize cyclic inheritance error."""
self.plugin = plugin
self.ancestor = ancestor
def __str__(self):
"""Return error message."""
return "{type} '{name}' cannot inherit from '{ancestor}', which itself inherits from '{name}'".format(
type=self.plugin.type.descriptor.capitalize(),
name=self.plugin.name,
ancestor=self.ancestor.name,
)
class ProjectPlugin(PluginRef):
VARIANT_ATTR = "variant"
def __init__(
self,
plugin_type: PluginType,
name: str,
inherit_from: Optional[str] = None,
namespace: Optional[str] = None,
variant: Optional[str] = None,
pip_url: Optional[str] = None,
config: Optional[dict] = {},
default_variant=Variant.ORIGINAL_NAME,
**extras,
):
super().__init__(plugin_type, name)
# Attributes will be listed in meltano.yml in the order they are set on self:
self.inherit_from = (
inherit_from if inherit_from and inherit_from != name else None
)
# If a custom definition is provided, its properties will come before all others in meltano.yml
self.custom_definition = None
self._flattened.add("custom_definition")
self._parent = None
if not self.inherit_from and namespace:
# When not explicitly inheriting, a namespace indicates an embedded custom plugin definition
self.custom_definition = PluginDefinition(
plugin_type, name, namespace, variant=variant, pip_url=pip_url, **extras
)
# Any properties considered "extra" by the embedded plugin definition
# should be considered extras of the project plugin, since they are
# the current values, not default values.
extras = self.custom_definition.extras
self.custom_definition.extras = {}
# Typically, the parent is set from ProjectPluginsService.current_plugins,
# where we have access to the discoverable plugin definitions coming from
# PluginDiscoveryService, but here we can set the parent directly.
self.parent = base_plugin_factory(self.custom_definition, variant)
# These properties are also set on the parent, but can be overridden
self.namespace = namespace
self.set_presentation_attrs(extras)
self.variant = variant
self.pip_url = pip_url
self._fallbacks.update(
["logo_url", "description", self.VARIANT_ATTR, "pip_url"]
)
# If no variant is set, we fall back on the default
self._defaults[self.VARIANT_ATTR] = lambda _: default_variant
if self.inherit_from:
# When explicitly inheriting from a project plugin or discoverable definition,
# derive default values from our own name
self._defaults["namespace"] = lambda plugin: plugin.name.replace("-", "_")
self._defaults["label"] = lambda plugin: (
f"{plugin.parent.label}: {plugin.name}"
if plugin.parent
else plugin.name
)
else:
# When shadowing a discoverable definition with the same name (no `inherit_from`),
# or an embedded custom definition (with `namespace`), fall back on parent's
# values derived from its name instead
self._fallbacks.update(["namespace", "label"])
self.config = copy.deepcopy(config)
self.extras = extras
if "profiles" in extras:
logger.warning(
f"Plugin configuration profiles are no longer supported, ignoring `profiles` in '{name}' {plugin_type.descriptor} definition."
)
@property
def parent(self):
return self._parent
@parent.setter
def parent(self, new_parent):
ancestor = new_parent
while isinstance(ancestor, self.__class__):
if ancestor == self:
raise CyclicInheritanceError(self, ancestor)
ancestor = ancestor.parent
self._parent = new_parent
self._fallback_to = new_parent
@property
def is_variant_set(self):
"""Return whether variant is set explicitly."""
return self.is_attr_set(self.VARIANT_ATTR)
@property
def info(self):
return {"name": self.name, "namespace": self.namespace, "variant": self.variant}
@property
def info_env(self):
# MELTANO_EXTRACTOR_...
return flatten({"meltano": {self.type.singular: self.info}}, "env_var")
def env_prefixes(self, for_writing=False):
prefixes = [self.name, self.namespace]
if for_writing:
prefixes.extend(self._parent.env_prefixes(for_writing=True))
prefixes.append(f"meltano_{self.type.verb}"), # MELTANO_EXTRACT_...
return uniques_in(prefixes)
@property
def extra_config(self):
return {f"_{k}": v for k, v in self.extras.items()}
@property
def config_with_extras(self):
return {**self.config, **self.extra_config}
@config_with_extras.setter
def config_with_extras(self, new_config_with_extras):
self.config.clear()
self.extras.clear()
for k, v in new_config_with_extras.items():
if k.startswith("_"):
self.extras[k[1:]] = v
else:
self.config[k] = v
@property
def settings(self):
existing_settings = self._parent.settings
return [
*existing_settings,
*SettingDefinition.from_missing(existing_settings, self.config),
]
@property
def extra_settings(self):
existing_settings = self._parent.extra_settings
return [
*existing_settings,
*SettingDefinition.from_missing(existing_settings, self.extra_config),
]
@property
def settings_with_extras(self):
return [*self.settings, *self.extra_settings]
def is_custom(self):
return self.custom_definition is not None
@property
def is_shadowing(self):
"""Return whether this plugin is shadowing a base plugin with the same name."""
return not self.inherit_from
| 34.693878
| 142
| 0.637647
|
a32bf8cd91eba6fa3329fc8f8e526a7da7d70c00
| 1,995
|
py
|
Python
|
strainrec_app/routes/strains_routes.py
|
bw-ft-med-cabinet-4/DS
|
d718120cf7544240f914557f5a2473968e68dd33
|
[
"MIT"
] | null | null | null |
strainrec_app/routes/strains_routes.py
|
bw-ft-med-cabinet-4/DS
|
d718120cf7544240f914557f5a2473968e68dd33
|
[
"MIT"
] | null | null | null |
strainrec_app/routes/strains_routes.py
|
bw-ft-med-cabinet-4/DS
|
d718120cf7544240f914557f5a2473968e68dd33
|
[
"MIT"
] | 1
|
2020-10-15T21:54:04.000Z
|
2020-10-15T21:54:04.000Z
|
from flask import Blueprint, render_template, redirect, jsonify, Response
import json
from bson import json_util
from strainrec_app.recommender import load_model, data
from strainrec_app.leafly_recommender import load_model as leafly_model
from strainrec_app.services.mongo_service import strains_collection
strains_routes = Blueprint("strains_routes", __name__)
@strains_routes.route("/")
def home():
return render_template("home.html")
@strains_routes.route("/strains.json")
def strains():
print(data)
print(dir(data))
strains_records = data.to_dict()
return jsonify(strains_records)
@strains_routes.route("/strains/recommend/<input_string>")
def recommend(input_string=None):
input = {"input": str(input_string)}
print(input)
package = load_model()
input_vec = package['tfidf'].transform([str(input_string)])
predictions = package['model'].kneighbors(input_vec.todense())
recommendations = predictions[1]
strains_info = data.iloc[recommendations[0]
].reset_index().to_json(orient='records', indent=2)
print("RESULT:", strains_info)
return jsonify(json.loads(strains_info))
@strains_routes.route("/api/v1/strains")
def api_strains():
srecords = strains_collection.find({'isStub': False})
resp = Response(json.dumps(
{'data': list(srecords)}, default=json_util.default), mimetype='application/json')
return resp
@strains_routes.route("/api/v1/recommend/<input_string>")
def api_recommend(input_string=None):
input_str = str(input_string)
print("INPUT: ", input_str)
package = leafly_model()
input_vec = package['tfidf'].transform([input_str])
predictions = package['model'].kneighbors(input_vec.todense())
recommendations = predictions[1]
strains_info = data.iloc[recommendations[0]
].reset_index().to_json(orient='records', indent=2)
print("RESULT:", strains_info)
return jsonify(json.loads(strains_info))
| 31.171875
| 90
| 0.715789
|
2d0d0c07f2f686aeb119adab4875094809880aa1
| 1,381
|
py
|
Python
|
00 - Very Basics/10-set-and-dictionary.py
|
vladimirioan82/fg-study-python-from-0-to-expert
|
501952e365edd8dce74d79453291a4563c24691d
|
[
"Apache-2.0"
] | null | null | null |
00 - Very Basics/10-set-and-dictionary.py
|
vladimirioan82/fg-study-python-from-0-to-expert
|
501952e365edd8dce74d79453291a4563c24691d
|
[
"Apache-2.0"
] | null | null | null |
00 - Very Basics/10-set-and-dictionary.py
|
vladimirioan82/fg-study-python-from-0-to-expert
|
501952e365edd8dce74d79453291a4563c24691d
|
[
"Apache-2.0"
] | null | null | null |
'''
Sets and Dictionaries
'''
# sets - sets are mutable
A = {1, 0, 6, 7, 9, 9, 6, 10, 25, 66, 38}
B = set({0, 6, 9, 25, 40, 90, 125, 654, 826})
print(A, type(A))
print(B, type(B))
print(10 in A)
print(666 in B)
for element in A:
print(element)
A.add(75)
print(A)
print(A.pop())
A.remove(38)
A.add(4)
A.add(697)
A.add(0)
print(A)
print('{} | {} = {}'.format(A, B, A | B)) # union
print('{} | {} = {}'.format(A, B, A - B)) # difference
print('{} | {} = {}'.format(A, B, A & B)) # intersection
print('{} | {} = {}'.format(A, B, A ^ B)) # symmetric difference
print('*' * 200)
# dictionaries
users = {
2423425: 'John',
2342412: 'Elvis',
3423121: 'Maria',
2578322: 'Elisa',
1214124: 'Elsa',
9342320: 'Joseph',
2420441: 'Josephina',
8920132: 'Carol',
9941332: 'Timotei',
8779945: 'Elsa'
}
languages = dict({
(0, 1): 'Java',
(0, 2): 'JavaScript',
('a', 'b'): 'Python',
('c', 'd'): 'Angular',
'a': 'Cobol',
'b': 'Pascal'
})
print(users, type(users))
print(languages, type(languages))
for key in languages.keys():
print('languages[{}] = {}'.format(key, languages[key]))
for key, value in users.items():
print('{} : {}'.format(key, value))
print(languages.values())
print(languages.get('a'))
print(languages.get((7, 9))) # this will return None
print(languages['a'])
# print(languages[(7, 9)]) - this will throw an error
print(languages[(0, 2)])
| 21.578125
| 64
| 0.577842
|
e539353e724cc20017d97db3854ef159ccbb3ce9
| 19,551
|
py
|
Python
|
main.py
|
Maxime26s/6GEI311-Lab5
|
339d43453302ccbe02da8545f8fc798bc33e3f2c
|
[
"MIT"
] | 1
|
2021-11-16T04:43:55.000Z
|
2021-11-16T04:43:55.000Z
|
main.py
|
Maxime26s/6GEI311-Lab5
|
339d43453302ccbe02da8545f8fc798bc33e3f2c
|
[
"MIT"
] | null | null | null |
main.py
|
Maxime26s/6GEI311-Lab5
|
339d43453302ccbe02da8545f8fc798bc33e3f2c
|
[
"MIT"
] | null | null | null |
# pip install -r requirements.txt
import tkinter
import numpy as np
from tkinter import *
import tkinter as tk
from tkinter import ttk
from functools import partial
from tkinter.filedialog import askopenfilename
from PIL import Image, ImageTk, ImageDraw
from image_processing import ImageProcessing
import cv2
from imutils.video import FileVideoStream, VideoStream
import threading
from time import time
from image_acquisition import get_image
import send_alert
import performance_statistics
# https://stackoverflow.com/questions/323972/is-there-any-way-to-kill-a-thread
class StoppableThread(threading.Thread):
"""Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition."""
def __init__(self, *args, **kwargs):
super(StoppableThread, self).__init__(*args, **kwargs)
self._stop_event = threading.Event()
def stop(self):
self._stop_event.set()
def stopped(self):
return self._stop_event.is_set()
# https://www.pluralsight.com/guides/importing-image-data-into-numpy-arrays
# Classe contenant l'application (tkinter)
class Options(tk.Toplevel):
def __init__(self, parent, oldOptions, confirm):
# super().__init__(parent) # no need it
self.notebook = ttk.Notebook(parent)
self.notebook.grid(row=1, column=0)
self.confirm = confirm
self.threshold = tk.IntVar(value=20)
self.framerate = tk.IntVar(value=0)
self.compression_ratio = tk.DoubleVar(value=0.5)
self.scale_ratio = tk.DoubleVar(value=0.2)
self.kernel_size = tk.IntVar(value=5)
self.bg_buffer_size = tk.IntVar(value=10)
self.motion_buffer_size = tk.IntVar(value=2)
self.min_size_ratio = tk.DoubleVar(value=0.001)
self.shouldCombine = tk.BooleanVar(value=True)
self.gaussian_algo = tk.StringVar(value="CV2")
if oldOptions != None:
self.threshold.set(oldOptions.threshold.get())
self.framerate.set(oldOptions.framerate.get())
self.compression_ratio.set(oldOptions.compression_ratio.get())
self.scale_ratio.set(oldOptions.scale_ratio.get())
self.kernel_size.set(oldOptions.kernel_size.get())
self.bg_buffer_size.set(oldOptions.bg_buffer_size.get())
self.motion_buffer_size.set(oldOptions.motion_buffer_size.get())
self.min_size_ratio.set(oldOptions.min_size_ratio.get())
self.shouldCombine.set(oldOptions.shouldCombine.get())
self.gaussian_algo.set(oldOptions.gaussian_algo.get())
self.option = tkinter.Toplevel()
self.option.geometry("+1000+600")
self.option.title("Options")
self.tab = ttk.Notebook(self.option) # Création du système d'onglets
self.t1 = ttk.Frame(self.tab)
self.t2 = ttk.Frame(self.tab)
self.t3 = ttk.Frame(self.tab)
self.t4 = ttk.Frame(self.tab)
self.t5 = ttk.Frame(self.tab)
self.t6 = ttk.Frame(self.tab)
self.tab.add(self.t1, text="General")
self.tab.add(self.t2, text="Resize")
self.tab.add(self.t3, text="Gaussian blur")
self.tab.add(self.t4, text="Image buffer")
self.tab.add(self.t5, text="Detection")
self.tab.pack(expand=1, fill="both")
self.add_tab1()
self.add_tab2()
self.add_tab3()
self.add_tab4()
self.add_tab5()
# Recharge le module de traitement d'image avec les options
def confirm_options(self):
self.confirm()
# Ajoute l'onglet d'option général
def add_tab1(self):
self.l0 = Label(self.t1, text="Threshold", anchor="w")
self.l0.grid(row=0, column=0, padx=10, pady=(15, 10))
self.ent0 = Entry(self.t1, textvariable=self.threshold, width=21)
self.ent0.place(width=150, height=50)
self.ent0.grid(row=0, column=1, pady=5, padx=10)
self.l1 = Label(self.t1, text="Framerate", anchor="w")
self.l1.grid(row=1, column=0, padx=10, pady=10)
self.ent1 = Entry(self.t1, textvariable=self.framerate, width=21)
self.ent1.place(width=150, height=50)
self.ent1.grid(row=1, column=1, pady=5, padx=10)
button = Button(self.t1, text="Confirm", padx=24, command=self.confirm_options)
button.grid(row=2, columnspan=2, padx=5, pady=5)
# Ajoute l'onglet d'option de compression d'image
def add_tab2(self):
self.l1 = Label(self.t2, text="Scale ratio", anchor="w")
self.l1.grid(row=1, column=0, padx=10, pady=(15, 10))
self.ent2 = Entry(self.t2, textvariable=self.scale_ratio, width=21)
self.ent2.place(width=150, height=50)
self.ent2.grid(row=1, column=1, pady=5, padx=10)
self.l2 = Label(self.t2, text="Compression", anchor="w")
self.l2.grid(row=2, column=0, padx=10, pady=10)
self.ent3 = Entry(self.t2, textvariable=self.compression_ratio, width=21)
self.ent3.place(width=150, height=50)
self.ent3.grid(row=2, column=1, pady=5, padx=10)
button = Button(self.t2, text="Confirm", padx=24, command=self.confirm_options)
button.grid(row=3, columnspan=2, padx=5, pady=5)
# Ajoute l'onglet d'option du flou gaussien
def add_tab3(self):
vlist = ["CV2", "Custom"]
self.l1 = Label(self.t3, text="Algorithme", anchor="w")
self.l1.grid(row=0, column=0, padx=10, pady=(15, 10))
self.combo = ttk.Combobox(
self.t3, textvariable=self.gaussian_algo, values=vlist
)
self.combo.set("CV2")
self.combo.grid(row=0, column=1, padx=5, pady=5)
self.l2 = Label(self.t3, text="Kernel size", anchor="w")
self.l2.grid(row=1, column=0, padx=10, pady=(15, 10))
self.ent4 = Entry(self.t3, textvariable=self.kernel_size, width=21)
self.ent4.place(width=150, height=50)
self.ent4.grid(row=1, column=1, pady=5, padx=10)
button = Button(self.t3, text="Confirm", padx=24, command=self.confirm_options)
button.grid(row=2, columnspan=2, padx=5, pady=5)
# Ajoute l'onglet d'option des buffers
def add_tab4(self):
self.l1 = Label(self.t4, text="Motion Buffer size", anchor="w")
self.l1.grid(row=0, column=0, padx=10, pady=(15, 10))
self.ent6 = Entry(self.t4, textvariable=self.motion_buffer_size, width=21)
self.ent6.place(width=150, height=50)
self.ent6.grid(row=0, column=1, pady=5, padx=10)
self.l2 = Label(self.t4, text="Background Buffer size", anchor="w")
self.l2.grid(row=1, column=0, padx=10, pady=(15, 10))
self.ent5 = Entry(self.t4, textvariable=self.bg_buffer_size, width=21)
self.ent5.place(width=150, height=50)
self.ent5.grid(row=1, column=1, pady=5, padx=10)
button = Button(self.t4, text="Confirm", padx=24, command=self.confirm_options)
button.grid(row=2, columnspan=2, padx=5, pady=5)
# Ajoute l'onglet d'option de la détection de mouvement
def add_tab5(self):
self.l1 = Label(self.t5, text="Min size ratio", anchor="w")
self.l1.grid(row=1, column=0, padx=10, pady=(15, 10))
self.ent7 = Entry(self.t5, textvariable=self.min_size_ratio, width=21)
self.ent7.place(width=150, height=50)
self.ent7.grid(row=1, column=1, pady=5, padx=10)
self.l2 = Label(self.t5, text="Combine", anchor="w")
self.l2.grid(row=2, column=0, padx=10, pady=(15, 10))
self.ent8 = Checkbutton(
self.t5, variable=self.shouldCombine, onvalue=True, offvalue=False, width=21
)
self.ent8.place(width=150, height=50)
self.ent8.grid(row=2, column=1, pady=5, padx=10)
button = Button(self.t5, text="Confirm", padx=24, command=self.confirm_options)
button.grid(row=3, columnspan=2, padx=5, pady=5)
# Interface principale
class Interface(tk.Tk):
# Initialisation de la fenêtre
def __init__(self):
tk.Tk.__init__(self)
self.create_main()
self.label = None
self.motion_label = None
self.stat = None
self.thread = None
self.alert_sent = False
self.stat_isOpen = False
self.last_frame_time = time()
self.options = None
self.label_all_stat1 = []
self.label_all_stat2 = []
self.label_all_stat3 = []
self.mail = ""
self.sms = ""
# self.vs = VideoStream(src=0).start()
# self.start_thread()
# Création de boutons
def create_main(self):
button1 = Button(
self,
text="Source",
width=10,
command=partial(self.select_file),
)
button1.grid(row=1, column=0, padx=5, pady=5)
button2 = Button(
self,
text="Motion filter",
width=10,
command=partial(self.open_motiom_filter),
)
button2.grid(row=1, column=1, padx=5, pady=5)
button3 = Button(
self,
text="Stats",
width=10,
command=partial(self.open_stat),
)
button3.grid(row=1, column=2, padx=5, pady=5)
button4 = Button(
self,
text="Options",
width=10,
command=partial(self.open_options),
)
button4.grid(row=1, column=3, padx=5, pady=5)
button5 = Button(
self,
text="Set Alert",
width=10,
command=partial(self.select_alert),
)
button5.grid(row=1, column=4, padx=5, pady=5)
def select_alert(self):
self.select_alert_win = tk.Toplevel()
self.select_alert_win.title("Select File")
mail = tk.StringVar()
sms = tk.StringVar()
self.label_select_alert1 = Label(self.select_alert_win, text="Mail", anchor="w")
self.label_select_alert1.grid(row=0, column=0, padx=10, pady=(15, 10))
self.ent_select_alert1 = Entry(
self.select_alert_win, textvariable=mail, width=21
)
self.ent_select_alert1.grid(row=0, column=1, pady=5, padx=10)
self.label_select_alert2 = Label(self.select_alert_win, text="SMS", anchor="w")
self.label_select_alert2.grid(row=1, column=0, padx=10, pady=(15, 10))
self.ent_select_alert2 = Entry(
self.select_alert_win, textvariable=sms, width=21
)
self.ent_select_alert2.grid(row=1, column=1, pady=5, padx=10)
def confirm_select_alert():
self.mail = mail.get()
self.sms = sms.get()
self.alert_sent = False
self.select_alert_win.destroy()
btn_confirm = Button(
self.select_alert_win,
text="Confirm",
padx=24,
command=confirm_select_alert,
)
btn_confirm.grid(row=2, columnspan=2, padx=5, pady=5)
# Fonction de sélection de fichier
def select_file(self):
self.select_file_win = tk.Toplevel()
self.select_file_win.title("Select File")
self.path = tk.StringVar()
self.label_select_file = tk.Label(
self.select_file_win, text="Select a file or put a link"
)
self.label_select_file.grid(row=0, columnspan=2, padx=10, pady=10)
self.ent_select_file = Entry(
self.select_file_win, textvariable=self.path, width=21
)
self.ent_select_file.place(width=150, height=50)
self.ent_select_file.grid(row=1, column=0, pady=5, padx=10)
button_browse = Button(
self.select_file_win,
text="Browse",
width=10,
command=partial(self.select_localfile),
)
button_browse.grid(row=1, column=1, padx=5, pady=5)
button_confirm_file = Button(
self.select_file_win,
text="Confirm",
width=10,
command=partial(self.confirm_select_file),
)
button_confirm_file.grid(row=2, columnspan=2, padx=5, pady=5)
def select_localfile(self):
if self.thread != None:
self.stop_thread()
Tk().withdraw()
fileName = askopenfilename()
self.path.set(fileName)
def confirm_select_file(self):
self.vs = FileVideoStream(self.path.get()).start()
self.start_thread()
self.select_file_win.destroy()
# Ouvre la vue du filtre d'image
def open_motiom_filter(self):
motion_filter = tk.Toplevel()
motion_filter.geometry("+1000+100")
motion_filter.title("Motion Filter")
self.motion_label = tk.Label(motion_filter)
self.motion_label.grid(row=0, columnspan=5, padx=10, pady=10)
# Actualise les statistiques
def add_stat(self):
self.list_stat = performance_statistics.stats
for i in range(len(self.list_stat)):
self.label_all_stat1[i]["text"] = self.list_stat[i][0]
self.label_all_stat2[i]["text"] = self.list_stat[i][1]
# Ouvre la fenêtre de statistiques
def open_stat(self):
self.stat = tk.Toplevel()
self.stat.geometry("+1000+350")
self.stat.title("Statistiques")
self.stat_isOpen = True
self.list_stat = performance_statistics.stats
self.label_stat_name = tk.Label(self.stat, text="Name")
self.label_stat_name.grid(row=0, column=0, padx=10, pady=10)
self.label_stat_average = tk.Label(self.stat, text="Average Time")
self.label_stat_average.grid(row=0, column=1, padx=10, pady=10)
self.label_all_stat1.clear()
self.label_all_stat2.clear()
for i in range(len(self.list_stat)):
self.label_stat1 = tk.Label(self.stat, text=self.list_stat[i][0])
self.label_stat1.grid(row=i + 1, column=0, padx=10, pady=10)
self.label_stat2 = tk.Label(self.stat, text=self.list_stat[i][1])
self.label_stat2.grid(row=i + 1, column=1, padx=10, pady=10)
self.label_all_stat1.append(self.label_stat1)
self.label_all_stat2.append(self.label_stat2)
# Ouvre le menu d'options
def open_options(self):
self.options = Options(self, self.options, self.restart_thread)
# Logique de traitement d'image et d'affichage pour chaque frame
def video_loop(self):
try:
last_stat_update = 0
while not self.thread.stopped():
if self.options != None and self.options.framerate.get() > 0:
while (
time()
<= self.last_frame_time + 1 / self.options.framerate.get()
):
pass
self.last_frame_time = time()
path = self.path.get()
if path[0:4] == "http":
self.frame = get_image(self.path.get())
else:
self.frame = self.vs.read()
if self.frame is None:
break
image = Image.fromarray(self.frame)
image = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
image, motion, detection, boxes = self.image_processing.process_image(
image
)
image = Image.fromarray(image.astype("uint8"))
motion = Image.fromarray(motion.astype("uint8"))
detection = Image.fromarray(detection.astype("uint8"))
for box in boxes:
box.resize(detection.size[0], image.size[0])
image = self.draw_rectangle(image, box)
proportion = 1
if image.size[0] > image.size[1]:
proportion = 1280 / image.size[0]
else:
proportion = 720 / image.size[1]
newSize = (
int(image.size[0] * proportion),
int(image.size[1] * proportion),
)
image = Image.fromarray(
cv2.resize(
np.asarray(image),
newSize,
cv2.INTER_CUBIC,
).astype("uint8")
)
self.display_image(image, self.label)
if self.motion_label != None:
try:
self.display_image(motion, self.motion_label)
except:
self.motion_label = None
if self.stat != None and time() - last_stat_update > 1:
try:
self.add_stat()
except:
self.stat = None
last_stat_update = time()
if len(boxes) >= 1 and self.alert_sent == False:
if self.mail != "":
image.save("IPcam.png")
thread_send_email = threading.Thread(
target=lambda: send_alert.send_email(str(self.mail))
)
thread_send_email.start()
self.alert_sent = True
if self.sms != "":
thread_send_sms = threading.Thread(
target=lambda: send_alert.send_sms(str(self.sms))
)
thread_send_sms.start()
self.alert_sent = True
except:
return
# Affiche l'image dans un label
def display_image(self, image, label):
self.image = ImageTk.PhotoImage(image)
if label is None:
label = tk.Label(image=self.image)
label.grid(row=0, columnspan=5, padx=10, pady=10)
else:
label.configure(image=self.image)
label.image = self.image
# Dessine un rectangle à une position donnée
def draw_rectangle(self, image, box):
draw = ImageDraw.Draw(image)
draw.rectangle(
(box.p1.y, box.p1.x, box.p2.y, box.p2.x), fill=None, outline="red"
)
return image
# Restart le thread de filtrage/détection de mouvement
def restart_thread(self):
if self.thread != None:
self.stop_thread()
self.start_thread()
# Tue le thread
def stop_thread(self):
self.thread.stop()
self.thread.join(timeout=0.05)
# Commence le thread avec les settings appropriés pour le traitement d'image
def start_thread(self):
if self.options != None:
self.image_processing = ImageProcessing(
self.options.threshold.get(),
self.options.scale_ratio.get(),
self.options.compression_ratio.get(),
self.options.bg_buffer_size.get(),
self.options.motion_buffer_size.get(),
self.options.kernel_size.get(),
self.options.gaussian_algo.get(),
self.options.min_size_ratio.get(),
self.options.shouldCombine.get(),
)
else:
self.image_processing = ImageProcessing()
self.thread = StoppableThread(target=self.video_loop, args=())
self.thread.daemon = True
self.thread.start()
# Fonction pour fermer l'application
def quit(self):
self.master.destroy()
# Init l'application
if __name__ == "__main__":
root = Interface()
root.title("Motion detection")
root.mainloop()
if root.thread != None:
root.thread.stop()
root.thread.join(timeout=0.05)
| 36.681051
| 88
| 0.58621
|
3ad781b7e43eab77e8eb267efabd983cbc2f7d30
| 687
|
py
|
Python
|
src/utils/python/arc/paths.py
|
maikenp/arc
|
994ec850c73affb75e81ab9056cb8146ba75fa9f
|
[
"Apache-2.0"
] | null | null | null |
src/utils/python/arc/paths.py
|
maikenp/arc
|
994ec850c73affb75e81ab9056cb8146ba75fa9f
|
[
"Apache-2.0"
] | null | null | null |
src/utils/python/arc/paths.py
|
maikenp/arc
|
994ec850c73affb75e81ab9056cb8146ba75fa9f
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
import os
try:
# try to import -generated file
from .paths_dist import *
except ImportError:
# use defaults
if 'ARC_LOCATION' in os.environ:
ARC_LOCATION = os.environ['ARC_LOCATION']
else:
ARC_LOCATION = '/usr'
ARC_LIBEXEC_DIR = ARC_LOCATION + '/libexec/arc'
ARC_DATA_DIR = ARC_LOCATION + '/share/arc'
ARC_LIB_DIR = ARC_LOCATION + '/lib64/arc'
ARC_RUN_DIR = '/var/run/arc'
ARC_DOC_DIR = ARC_LOCATION + '/share/doc/nordugrid-arc/'
ARC_CONF = '/etc/arc.conf'
ARC_VERSION = 'devel'
# define ARC_LOCATION to be use by tools like gm-jobs
os.environ['ARC_LOCATION'] = ARC_LOCATION
| 26.423077
| 60
| 0.679767
|
23b19f0b5109c38e5d0ae1768cd4b44c475e4d74
| 1,628
|
py
|
Python
|
lib/oeqa/runtime/cases/node_state_manager.py
|
sashko/meta-bistro
|
9c123a4e2af62f253e351bf978156894bbc4da4f
|
[
"MIT"
] | 14
|
2015-09-30T07:26:58.000Z
|
2020-05-02T16:22:14.000Z
|
lib/oeqa/runtime/cases/node_state_manager.py
|
sashko/meta-bistro
|
9c123a4e2af62f253e351bf978156894bbc4da4f
|
[
"MIT"
] | 135
|
2015-05-13T14:51:41.000Z
|
2019-09-06T13:35:17.000Z
|
lib/oeqa/runtime/cases/node_state_manager.py
|
sashko/meta-bistro
|
9c123a4e2af62f253e351bf978156894bbc4da4f
|
[
"MIT"
] | 38
|
2015-06-08T20:34:00.000Z
|
2021-09-01T09:48:10.000Z
|
import os
from oeqa.oetest import oeRuntimeTest
from oeqa.runtime.case import OERuntimeTestCase
from oeqa.utils.decorators import skipUnlessPassed
class NodeStateManagerDaemonTest(OERuntimeTestCase):
def test_nsm_installed(self):
(status, _) = self.target.run('rpm -qa | grep node-state-manager')
self.assertEqual(status, 0, "node-state-manager package is not installed")
# nodestatemanager-daemon is disabled by default, so start it first
(status, _) = self.target.run('systemctl start nodestatemanager-daemon')
self.assertEqual(status, 0, "Couldn't start nodestatemanager-daemon")
@skipUnlessPassed("test_nsm_installed")
def test_nsm_running(self):
(status, _) = self.target.run('ps -ef | grep NodeStateManager')
self.assertEqual(status, 0, "No NodeStateManager process running, ps output: %s" %
self.target.run(oeRuntimeTest.pscmd)[1])
@skipUnlessPassed("test_nsm_running")
def test_nsm_daemon_restart(self):
(status, output) = self.target.run('systemctl restart nodestatemanager-daemon')
self.assertEqual(status, 0, "Couldn't restart node-state-manager: %d %s" % (status, output))
@skipUnlessPassed("test_nsm_installed")
def test_dbus_config_exists(self):
dbus_config = "/etc/dbus-1/system.d/org.genivi.NodeStateManager.LifeCycleControl.service"
status = os.path.isfile(dbus_config)
self.assertEqual(status, 0, "Couldn't find %s" % dbus_config)
status = os.access(dbus_config, os.R_OK)
self.assertEqual(status, 0, "Couldn't access %s" % dbus_config)
| 44
| 100
| 0.705774
|
3cddb65e386fd54253599d6a976165bd0750af39
| 759
|
py
|
Python
|
alipay/aop/api/response/ZolozIdentificationZolozidGetResponse.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/response/ZolozIdentificationZolozidGetResponse.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/response/ZolozIdentificationZolozidGetResponse.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class ZolozIdentificationZolozidGetResponse(AlipayResponse):
def __init__(self):
super(ZolozIdentificationZolozidGetResponse, self).__init__()
self._result_info = None
@property
def result_info(self):
return self._result_info
@result_info.setter
def result_info(self, value):
self._result_info = value
def parse_response_content(self, response_content):
response = super(ZolozIdentificationZolozidGetResponse, self).parse_response_content(response_content)
if 'result_info' in response:
self.result_info = response['result_info']
| 29.192308
| 110
| 0.72859
|
7817851674bf7e48c499e046a626efe90368bba8
| 2,389
|
py
|
Python
|
src/urban_meal_delivery/forecasts/methods/arima.py
|
webartifex/urban-meal-delivery
|
0f60640bc09fab142815d1a8eaea44653c3fc467
|
[
"MIT"
] | 1
|
2021-03-29T21:41:51.000Z
|
2021-03-29T21:41:51.000Z
|
src/urban_meal_delivery/forecasts/methods/arima.py
|
webartifex/urban-meal-delivery
|
0f60640bc09fab142815d1a8eaea44653c3fc467
|
[
"MIT"
] | null | null | null |
src/urban_meal_delivery/forecasts/methods/arima.py
|
webartifex/urban-meal-delivery
|
0f60640bc09fab142815d1a8eaea44653c3fc467
|
[
"MIT"
] | null | null | null |
"""A wrapper around R's "auto.arima" function."""
import pandas as pd
from rpy2 import robjects
from rpy2.robjects import pandas2ri
def predict(
training_ts: pd.Series,
forecast_interval: pd.DatetimeIndex,
*,
frequency: int,
seasonal_fit: bool = False,
) -> pd.DataFrame:
"""Predict with an automatically chosen ARIMA model.
Note: The function does not check if the `forecast_interval`
extends the `training_ts`'s interval without a gap!
Args:
training_ts: past observations to be fitted
forecast_interval: interval into which the `training_ts` is forecast;
its length becomes the step size `h` in the forecasting model in R
frequency: frequency of the observations in the `training_ts`
seasonal_fit: if a seasonal ARIMA model should be fitted
Returns:
predictions: point forecasts (i.e., the "prediction" column) and
confidence intervals (i.e, the four "low/high80/95" columns)
Raises:
ValueError: if `training_ts` contains `NaN` values
"""
# Initialize R only if it is actually used.
# For example, the nox session "ci-tests-fast" does not use it.
from urban_meal_delivery import init_r # noqa:F401,WPS433
# Re-seed R every time it is used to ensure reproducibility.
robjects.r('set.seed(42)')
if training_ts.isnull().any():
raise ValueError('`training_ts` must not contain `NaN` values')
# Copy the data from Python to R.
robjects.globalenv['data'] = robjects.r['ts'](
pandas2ri.py2rpy(training_ts), frequency=frequency,
)
seasonal = 'TRUE' if bool(seasonal_fit) else 'FALSE'
n_steps_ahead = len(forecast_interval)
# Make the predictions in R.
result = robjects.r(
f"""
as.data.frame(
forecast(
auto.arima(data, approximation = TRUE, seasonal = {seasonal:s}),
h = {n_steps_ahead:d}
)
)
""",
)
# Convert the results into a nice `pd.DataFrame` with the right `.index`.
forecasts = pandas2ri.rpy2py(result)
forecasts.index = forecast_interval
return forecasts.round(5).rename(
columns={
'Point Forecast': 'prediction',
'Lo 80': 'low80',
'Hi 80': 'high80',
'Lo 95': 'low95',
'Hi 95': 'high95',
},
)
| 31.025974
| 80
| 0.627878
|
5a90d9ace461cf019ebdc61671e3ef526641d2c8
| 9,431
|
py
|
Python
|
src/gms/derive_level_paths.py
|
hohe12ly/inundation-mapping
|
d133addd4d730b5c468dcf1a8f7dfab35c55cbd7
|
[
"Info-ZIP"
] | null | null | null |
src/gms/derive_level_paths.py
|
hohe12ly/inundation-mapping
|
d133addd4d730b5c468dcf1a8f7dfab35c55cbd7
|
[
"Info-ZIP"
] | null | null | null |
src/gms/derive_level_paths.py
|
hohe12ly/inundation-mapping
|
d133addd4d730b5c468dcf1a8f7dfab35c55cbd7
|
[
"Info-ZIP"
] | null | null | null |
#!/usr/bin/env python3
from stream_branches import StreamNetwork
import argparse
from utils.shared_functions import get_fossid_from_huc8
import geopandas as gpd
def Derive_level_paths(in_stream_network, out_stream_network,branch_id_attribute,
out_stream_network_dissolved=None,huc_id=None,
headwaters_outfile=None,catchments=None,
catchments_outfile=None,
branch_inlets_outfile=None,
toNode_attribute='To_Node',fromNode_attribute='From_Node',
reach_id_attribute='HydroID',verbose=False
):
# getting foss_id of huc8
#foss_id = get_fossid_from_huc8(huc8_id=huc_id,foss_id_attribute='fossid',
#hucs_layerName='WBDHU8')
if verbose:
print("Deriving level paths ...")
# load file
if verbose:
print("Loading stream network ...")
stream_network = StreamNetwork.from_file(in_stream_network)
inlets_attribute = 'inlet_id'
outlets_attribute = 'outlet_id'
outlet_linestring_index = -1
# converts multi-linestrings to linestrings
stream_network = stream_network.multilinestrings_to_linestrings()
# derive nodes
stream_network = stream_network.derive_nodes(toNode_attribute=toNode_attribute,
fromNode_attribute=fromNode_attribute,
reach_id_attribute=reach_id_attribute,
outlet_linestring_index=outlet_linestring_index,
node_prefix=None,
verbose=verbose)
# derive outlets and inlets
stream_network = stream_network.derive_outlets(toNode_attribute,
fromNode_attribute,
outlets_attribute=outlets_attribute,
verbose=verbose
)
stream_network = stream_network.derive_inlets(toNode_attribute,
fromNode_attribute,
inlets_attribute=inlets_attribute,
verbose=verbose
)
# derive up and downstream networks
upstreams, downstreams = stream_network.make_up_and_downstream_dictionaries(
reach_id_attribute=reach_id_attribute,
toNode_attribute=toNode_attribute,
fromNode_attribute=fromNode_attribute,
verbose=True
)
# derive arbolate sum
stream_network = stream_network.get_arbolate_sum(arbolate_sum_attribute='arbolate_sum',
inlets_attribute=inlets_attribute,
reach_id_attribute=reach_id_attribute,
upstreams=upstreams,
downstreams=downstreams,
length_conversion_factor_to_km = 0.001,
verbose=verbose
)
# derive stream branches
stream_network = stream_network.derive_stream_branches( toNode_attribute=toNode_attribute,
fromNode_attribute=fromNode_attribute,
upstreams=upstreams,
branch_id_attribute=branch_id_attribute,
reach_id_attribute=reach_id_attribute,
comparison_attributes='arbolate_sum',
comparison_function=max,
verbose=verbose
)
# filter out streams with out catchments
if (catchments is not None) & (catchments_outfile is not None):
catchments = gpd.read_file(catchments)
stream_network = stream_network.remove_branches_without_catchments(
catchments,
reach_id_attribute=reach_id_attribute,
branch_id_attribute=branch_id_attribute,
reach_id_attribute_in_catchments=reach_id_attribute,
verbose=verbose
)
# subset which columns to merge
stream_network_to_merge = stream_network.filter(
items = [reach_id_attribute,inlets_attribute,
outlets_attribute,branch_id_attribute]
)
catchments = catchments.merge(stream_network_to_merge,how='inner',
left_on=reach_id_attribute,
right_on=reach_id_attribute
)
catchments.reset_index(drop=True,inplace=True)
catchments.to_file(catchments_outfile,index=False,driver='GPKG')
# derive headwaters
if headwaters_outfile is not None:
headwaters = stream_network.derive_headwater_points_with_inlets(
fromNode_attribute=fromNode_attribute,
inlets_attribute=inlets_attribute,
outlet_linestring_index=outlet_linestring_index
)
# headwaters write
headwaters.to_file(headwaters_outfile,index=False,driver='GPKG')
if out_stream_network is not None:
if verbose:
print("Writing stream branches ...")
stream_network.write(out_stream_network,index=True)
if out_stream_network_dissolved is not None:
# dissolve by levelpath
stream_network = stream_network.dissolve_by_branch(branch_id_attribute=branch_id_attribute,
attribute_excluded=None, #'order_',
values_excluded=None, #[1,2],
out_vector_files=out_stream_network_dissolved,
verbose=verbose)
branch_inlets = stream_network.derive_inlet_points_by_feature( feature_attribute=branch_id_attribute,
outlet_linestring_index=outlet_linestring_index
)
if branch_inlets_outfile is not None:
branch_inlets.to_file(branch_inlets_outfile,index=False,driver='GPKG')
return(stream_network)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create stream network level paths')
parser.add_argument('-i','--in-stream-network', help='Input stream network', required=True)
parser.add_argument('-b','--branch-id-attribute', help='Name of the branch attribute desired', required=True)
parser.add_argument('-u','--huc-id', help='Current HUC ID', required=False,default=None)
parser.add_argument('-r','--reach-id-attribute', help='Reach ID attribute to use in source file', required=False,default='HydroID')
parser.add_argument('-c','--catchments', help='NWM catchments to append level path data to', required=False, default=None)
parser.add_argument('-t','--catchments-outfile', help='NWM catchments outfile with appended level path data', required=False, default=None)
parser.add_argument('-n','--branch_inlets_outfile', help='Output level paths inlets', required=False,default=None)
parser.add_argument('-o','--out-stream-network', help='Output stream network', required=False,default=None)
parser.add_argument('-e','--headwaters-outfile', help='Output stream network headwater points', required=False,default=None)
parser.add_argument('-d','--out-stream-network-dissolved', help='Dissolved output stream network', required=False,default=None)
parser.add_argument('-v','--verbose', help='Verbose output', required=False,default=False,action='store_true')
args = vars(parser.parse_args())
Derive_level_paths(**args)
| 55.804734
| 143
| 0.504188
|
9bd019301ac492ca65904113cbd61611ae81fa2b
| 3,690
|
py
|
Python
|
PhysicsTools/NanoAOD/python/isotracks_cff.py
|
AndrissP/cmssw
|
b03578d2a2573923af5db50d0508baf3bd6a208e
|
[
"Apache-2.0"
] | null | null | null |
PhysicsTools/NanoAOD/python/isotracks_cff.py
|
AndrissP/cmssw
|
b03578d2a2573923af5db50d0508baf3bd6a208e
|
[
"Apache-2.0"
] | null | null | null |
PhysicsTools/NanoAOD/python/isotracks_cff.py
|
AndrissP/cmssw
|
b03578d2a2573923af5db50d0508baf3bd6a208e
|
[
"Apache-2.0"
] | null | null | null |
import FWCore.ParameterSet.Config as cms
from PhysicsTools.NanoAOD.common_cff import *
from PhysicsTools.NanoAOD.nano_eras_cff import *
finalIsolatedTracks = cms.EDProducer("IsolatedTrackCleaner",
tracks = cms.InputTag("isolatedTracks"),
cut = cms.string("((pt>5 && (abs(pdgId) == 11 || abs(pdgId) == 13)) || pt > 10) && (abs(pdgId) < 15 || abs(eta) < 2.5) && ((abs(dxy) < 0.2 && abs(dz) < 0.1) || pt>15) && ((pfIsolationDR03().chargedHadronIso < 5 && pt < 25) || pfIsolationDR03().chargedHadronIso/pt < 0.2)"),
finalLeptons = cms.VInputTag(
cms.InputTag("finalElectrons"),
cms.InputTag("finalLooseMuons"),
),
)
(run2_nanoAOD_106Xv1 & ~run2_nanoAOD_devel).toModify(finalIsolatedTracks, cut = "((pt>5 && (abs(pdgId) == 11 || abs(pdgId) == 13)) || pt > 10) && (abs(pdgId) < 15 || abs(eta) < 2.5) && abs(dxy) < 0.2 && abs(dz) < 0.1 && ((pfIsolationDR03().chargedHadronIso < 5 && pt < 25) || pfIsolationDR03().chargedHadronIso/pt < 0.2)")
isoForIsoTk = cms.EDProducer("IsoTrackIsoValueMapProducer",
src = cms.InputTag("finalIsolatedTracks"),
relative = cms.bool(True),
rho_MiniIso = cms.InputTag("fixedGridRhoFastjetAll"),
EAFile_MiniIso = cms.FileInPath("PhysicsTools/NanoAOD/data/effAreaMuons_cone03_pfNeuHadronsAndPhotons_80X.txt"),
)
isFromLostTrackForIsoTk = cms.EDProducer("IsFromLostTrackMapProducer",
srcIsoTracks = cms.InputTag("finalIsolatedTracks"),
packedPFCandidates = cms.InputTag("packedPFCandidates"),
lostTracks = cms.InputTag("lostTracks"),
)
isoTrackTable = cms.EDProducer("SimpleCandidateFlatTableProducer",
src = cms.InputTag("finalIsolatedTracks"),
cut = cms.string(""), # filtered already above
name = cms.string("IsoTrack"),
doc = cms.string("isolated tracks after basic selection (" + finalIsolatedTracks.cut.value() + ") and lepton veto"),
singleton = cms.bool(False), # the number of entries is variable
extension = cms.bool(False), # this is the main table for the muons
variables = cms.PSet(P3Vars,
dz = Var("dz",float,doc="dz (with sign) wrt first PV, in cm",precision=10),
dxy = Var("dxy",float,doc="dxy (with sign) wrt first PV, in cm",precision=10),
pfRelIso03_chg = Var("pfIsolationDR03().chargedHadronIso/pt",float,doc="PF relative isolation dR=0.3, charged component",precision=10),
pfRelIso03_all = Var("(pfIsolationDR03().chargedHadronIso + max(pfIsolationDR03().neutralHadronIso + pfIsolationDR03().photonIso - pfIsolationDR03().puChargedHadronIso/2,0.0))/pt",float,doc="PF relative isolation dR=0.3, total (deltaBeta corrections)",precision=10),
isPFcand = Var("packedCandRef().isNonnull()",bool,doc="if isolated track is a PF candidate"),
fromPV = Var("fromPV", int, doc="isolated track comes from PV"),
pdgId = Var("pdgId",int,doc="PDG id of PF cand"),
isHighPurityTrack = Var("isHighPurityTrack",bool,doc="track is high purity"),
charge = Var("charge", int, doc="electric charge"),
),
externalVariables = cms.PSet(
miniPFRelIso_chg = ExtVar("isoForIsoTk:miniIsoChg",float,doc="mini PF relative isolation, charged component",precision=10),
miniPFRelIso_all = ExtVar("isoForIsoTk:miniIsoAll",float,doc="mini PF relative isolation, total (with scaled rho*EA PU corrections)",precision=10),
isFromLostTrack = ExtVar("isFromLostTrackForIsoTk:isFromLostTrack",bool,doc="if isolated track comes from a lost track"),
),
)
(run2_nanoAOD_106Xv1 & ~run2_nanoAOD_devel).toModify(isoTrackTable.variables, charge = None)
isoTrackSequence = cms.Sequence(finalIsolatedTracks + isoForIsoTk + isFromLostTrackForIsoTk)
isoTrackTables = cms.Sequence(isoTrackTable)
| 62.542373
| 322
| 0.704336
|
865b3b4495a68339e31e77bfbcb393d24b032152
| 9,381
|
py
|
Python
|
tools/gh_api.py
|
equinaut/statsmodels
|
6fe8d4e351416727641db4c3d3552f4ec4f46d0e
|
[
"BSD-3-Clause"
] | 1
|
2020-05-09T08:42:52.000Z
|
2020-05-09T08:42:52.000Z
|
tools/gh_api.py
|
equinaut/statsmodels
|
6fe8d4e351416727641db4c3d3552f4ec4f46d0e
|
[
"BSD-3-Clause"
] | null | null | null |
tools/gh_api.py
|
equinaut/statsmodels
|
6fe8d4e351416727641db4c3d3552f4ec4f46d0e
|
[
"BSD-3-Clause"
] | 1
|
2020-05-09T08:42:58.000Z
|
2020-05-09T08:42:58.000Z
|
"""Functions for Github API requests.
Copied from IPython 9e82bc5
https://github.com/ipython/ipython/blob/master/tools/gh_api.py
"""
from __future__ import print_function
try:
input = raw_input
except NameError:
pass
import os
import re
import sys
from datetime import datetime
import requests
import getpass
import json
try:
import requests_cache
except ImportError:
print("no cache")
else:
requests_cache.install_cache("gh_api")
# Keyring stores passwords by a 'username', but we're not storing a username
# and password
fake_username = 'statsmodels_tools'
class Obj(dict):
"""Dictionary with attribute access to names."""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, val):
self[name] = val
token = None
def get_auth_token():
global token
if token is not None:
return token
import keyring
token = keyring.get_password('github', fake_username)
if token is not None:
return token
print("Please enter your github username and password. These are not "
"stored, only used to get an oAuth token. You can revoke this at "
"any time on Github.")
user = input("Username: ")
pw = getpass.getpass("Password: ")
auth_request = {
"scopes": [
"public_repo",
"gist"
],
"note": "Statsmodels tools - {}".format(datetime.now().isoformat()),
"note_url": "https://github.com/statsmodels/statsmodels/tree/master/tools", # noqa:E501
}
response = requests.post('https://api.github.com/authorizations',
auth=(user, pw), data=json.dumps(auth_request))
response.raise_for_status()
token = json.loads(response.text)['token']
keyring.set_password('github', fake_username, token)
return token
def make_auth_header():
return {'Authorization': 'token ' + get_auth_token()}
def post_issue_comment(project, num, body):
url = ('https://api.github.com/repos/{project}/issues/{num}/comments'
.format(project=project, num=num))
payload = json.dumps({'body': body})
requests.post(url, data=payload, headers=make_auth_header())
def post_gist(content, description='', filename='file', auth=False):
"""Post some text to a Gist, and return the URL."""
post_data = json.dumps({
"description": description,
"public": True,
"files": {
filename: {
"content": content
}
}
}).encode('utf-8')
headers = make_auth_header() if auth else {}
response = requests.post("https://api.github.com/gists",
data=post_data, headers=headers)
response.raise_for_status()
response_data = json.loads(response.text)
return response_data['html_url']
def get_pull_request(project, num, auth=False):
"""get pull request info by number
"""
url = ("https://api.github.com/repos/{project}/pulls/{num}"
.format(project=project, num=num))
if auth:
header = make_auth_header()
else:
header = None
response = requests.get(url, headers=header)
response.raise_for_status()
return json.loads(response.text, object_hook=Obj)
def get_pull_request_files(project, num, auth=False):
"""get list of files in a pull request"""
url = ("https://api.github.com/repos/{project}/pulls/{num}/files"
.format(project=project, num=num))
if auth:
header = make_auth_header()
else:
header = None
return get_paged_request(url, headers=header)
element_pat = re.compile(r'<(.+?)>')
rel_pat = re.compile(r'rel=[\'"](\w+)[\'"]')
def get_paged_request(url, headers=None, **params):
"""get a full list, handling APIv3's paging"""
results = []
params.setdefault("per_page", 100)
while True:
print("fetching %s with %s" % (url, params), file=sys.stderr)
response = requests.get(url, headers=headers, params=params)
response.raise_for_status()
results.extend(response.json())
if 'next' in response.links:
url = response.links['next']['url']
else:
break
return results
def get_pulls_list(project, auth=False, **params):
"""get pull request list"""
params.setdefault("state", "closed")
url = ("https://api.github.com/repos/{project}/pulls"
.format(project=project))
if auth:
headers = make_auth_header()
else:
headers = None
pages = get_paged_request(url, headers=headers, **params)
return pages
def get_issues_list(project, auth=False, **params):
"""get issues list"""
params.setdefault("state", "closed")
url = ("https://api.github.com/repos/{project}/issues"
.format(project=project))
if auth:
headers = make_auth_header()
else:
headers = None
pages = get_paged_request(url, headers=headers, **params)
return pages
def get_milestones(project, auth=False, **params):
url = ("https://api.github.com/repos/{project}/milestones"
.format(project=project))
if auth:
headers = make_auth_header()
else:
headers = None
pages = get_paged_request(url, headers=headers, **params)
return pages
def get_milestone_id(project, milestone, auth=False, **params):
pages = get_milestones(project, auth=auth, **params)
for page in pages:
if page['title'] == milestone:
return page['number']
else:
raise ValueError("milestone %s not found" % milestone)
def is_pull_request(issue):
"""Return True if the given issue is a pull request."""
return bool(issue.get('pull_request', {}).get('html_url', None))
# encode_multipart_formdata is from urllib3.filepost
# The only change is to iter_fields, to enforce S3's required key ordering
def iter_fields(fields):
fields = fields.copy()
for key in ('key', 'acl', 'Filename', 'success_action_status',
'AWSAccessKeyId', 'Policy', 'Signature',
'Content-Type', 'file'):
yield (key, fields.pop(key))
for (k, v) in fields.items():
yield k, v
def encode_multipart_formdata(fields, boundary=None):
"""
Encode a dictionary of ``fields`` using the multipart/form-data mime
format.
:param fields:
Dictionary of fields or list of (key, value) field tuples. The key is
treated as the field name, and the value as the body of the form-data
bytes. If the value is a tuple of two elements, then the first element
is treated as the filename of the form-data section.
Field names and filenames must be unicode.
:param boundary:
If not specified, then a random boundary will be generated using
:func:`mimetools.choose_boundary`.
"""
# copy requests imports in here:
from io import BytesIO
from requests.packages.urllib3.filepost import (
choose_boundary, six, writer, b, get_content_type
)
body = BytesIO()
if boundary is None:
boundary = choose_boundary()
for fieldname, value in iter_fields(fields):
body.write(b('--%s\r\n' % (boundary)))
if isinstance(value, tuple):
filename, data = value
writer(body).write('Content-Disposition: form-data; name="%s"; '
'filename="%s"\r\n' % (fieldname, filename))
body.write(b('Content-Type: %s\r\n\r\n' %
(get_content_type(filename))))
else:
data = value
writer(body).write('Content-Disposition: form-data; name="%s"\r\n'
% (fieldname))
body.write(b'Content-Type: text/plain\r\n\r\n')
if isinstance(data, int):
data = str(data) # Backwards compatibility
if isinstance(data, six.text_type):
writer(body).write(data)
else:
body.write(data)
body.write(b'\r\n')
body.write(b('--%s--\r\n' % (boundary)))
content_type = b('multipart/form-data; boundary=%s' % boundary)
return body.getvalue(), content_type
def post_download(project, filename, name=None, description=""):
"""Upload a file to the GitHub downloads area"""
if name is None:
name = os.path.basename(filename)
with open(filename, 'rb') as f:
filedata = f.read()
url = ("https://api.github.com/repos/{project}/downloads"
.format(project=project))
payload = json.dumps(dict(name=name, size=len(filedata),
description=description))
response = requests.post(url, data=payload, headers=make_auth_header())
response.raise_for_status()
reply = json.loads(response.content)
s3_url = reply['s3_url']
fields = dict(
key=reply['path'],
acl=reply['acl'],
success_action_status=201,
Filename=reply['name'],
AWSAccessKeyId=reply['accesskeyid'],
Policy=reply['policy'],
Signature=reply['signature'],
file=(reply['name'], filedata),
)
fields['Content-Type'] = reply['mime_type']
data, content_type = encode_multipart_formdata(fields)
s3r = requests.post(s3_url, data=data,
headers={'Content-Type': content_type})
return s3r
| 30.067308
| 94
| 0.62456
|
ef1bfecc1914544fcdc8aeda7dfa3e2e3c991430
| 1,363
|
py
|
Python
|
server_status/__init__.py
|
airtonix/django-server-status
|
5bd732c6a56b2f4a816142592ded23f2dc2500ad
|
[
"MIT"
] | 2
|
2015-09-11T23:29:46.000Z
|
2021-06-18T02:59:50.000Z
|
server_status/__init__.py
|
airtonix/django-server-status
|
5bd732c6a56b2f4a816142592ded23f2dc2500ad
|
[
"MIT"
] | null | null | null |
server_status/__init__.py
|
airtonix/django-server-status
|
5bd732c6a56b2f4a816142592ded23f2dc2500ad
|
[
"MIT"
] | null | null | null |
def autodiscover():
import copy
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
from .conf import settings
from .registry import plugins
"""
Auto-discover INSTALLED_APPS plugin modules and fail silently when
not present. This forces an import on them to register the plugin.
"""
for app in settings.INSTALLED_APPS:
mod = import_module(app)
# Attempt to import the app's plugin module.
try:
before_import_registry = copy.copy(plugins._registry)
name = '{}.{}'.format(app, settings.SERVER_STATUS_PLUGIN_MODULE_NAME)
import_module(name)
except Exception as error:
# Reset the model registry to the state before the last import as
# this import will have to reoccur on the next request and this
# could raise NotRegistered and AlreadyRegistered exceptions
# (see #8245).
plugins._registry = before_import_registry
# Decide whether to bubble up this error. If the app just
# doesn't have a plugin module, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(mod, settings.SERVER_STATUS_PLUGIN_MODULE_NAME):
raise
| 41.30303
| 84
| 0.666178
|
844ed94ffbd0247bc066796115e06e62ed058853
| 7,894
|
py
|
Python
|
xentica/seeds/patterns.py
|
a5kin/xentica
|
ca08fac9f85af71c9d6d98545a33d50323f851b3
|
[
"MIT"
] | 23
|
2018-02-24T23:38:54.000Z
|
2022-02-16T15:20:46.000Z
|
xentica/seeds/patterns.py
|
a5kin/xentica
|
ca08fac9f85af71c9d6d98545a33d50323f851b3
|
[
"MIT"
] | 39
|
2017-11-20T21:54:30.000Z
|
2020-09-01T12:43:57.000Z
|
xentica/seeds/patterns.py
|
a5kin/xentica
|
ca08fac9f85af71c9d6d98545a33d50323f851b3
|
[
"MIT"
] | 4
|
2019-04-23T03:56:44.000Z
|
2021-05-14T11:12:54.000Z
|
"""
The module containing different patterns for CA seed initialization.
Each pattern class have one mandatory method ``generate()`` which is
called automatically at the initialization stage.
Patterns are intended for use in
:class:`Experiment <xentica.core.experiments.Experiment>` classes.
See the example of general usage above.
.. _The Concept: http://artipixoids.a5kin.net/concept/artipixoids_concept.pdf
"""
import abc
import numpy as np
from .random import LocalRandom
__all__ = ['RandomPattern', 'BigBang', 'PrimordialSoup', 'ValDict', ]
class ValDictMeta(type):
"""A placeholder for :class:`ValDict` metaclass."""
class ValDict(metaclass=ValDictMeta):
"""
A wrapper over the Python dictionary.
It can keep descriptor classes along with regular values. When you
get the item, the necessary value is automatically obtaining
either directly or via descriptor logic.
Read-only, you should set all dictionary values at the class
initialization.
The example of usage::
>>> from xentica.seeds.random import RandInt
>>> from xentica.seeds.patterns import ValDict
>>> d = {'a': 2, 's': RandInt(11, 23), 'd': 3.3}
>>> vd = ValDict(d)
>>> vd['a']
2
>>> vd['s']
14
>>> vd['d']
3.3
:param d:
Dictionary with mixed values. May contain descriptor classes.
:param parent:
A reference to the class holding the dictionary. Optional.
"""
def __init__(self, d, parent=None):
"""Initialize the class."""
self._d = d
self.parent = parent
if parent is None:
self.parent = self
self.random = LocalRandom()
def items(self):
"""Iterate over dictionary items."""
for key in self._d.keys():
val = self[key]
yield key, val
def keys(self):
"""Iterate over dictionary keys."""
for k in self._d.keys():
yield k
def __getitem__(self, key):
"""Implement the logic of obtaining item from dictionary."""
if key in self._d:
if hasattr(self._d[key], '__get__'):
return self._d[key].__get__(self.parent, self.parent.__class__)
return self._d[key]
raise KeyError(key)
def __setitem__(self, key, val):
"""Suppress direct item setting, may be allowed in future."""
raise NotImplementedError
class RandomPattern:
"""
The base class for random patterns.
:param vals:
Dictionary with mixed values. May contain descriptor classes.
"""
def __init__(self, vals):
"""Initialize the class."""
self._random = LocalRandom()
self.vals = ValDict(vals, self)
@property
def random(self):
"""Get the random stream."""
return self._random
@random.setter
def random(self, val):
"""Set the random stream."""
self._random = val
def __add__(self, other):
"""Return a chained pattern."""
return ChainedPattern(self, other)
@abc.abstractmethod
def generate(self, cells, bsca):
"""
Generate the entire initial state.
This is an abstract method, you must implement it in
:class:`RandomPattern` subclasses.
:param cells:
NumPy array with cells' states as items. The seed will be
generated over this array.
:param bsca:
:class:`xentica.core.CellularAutomaton` instance, to access
the field's size and other attributes.
"""
class ChainedPattern(RandomPattern):
"""The join of two other patterns."""
def __init__(self, pattern1, pattern2):
self._pattern1 = pattern1
self._pattern2 = pattern2
super().__init__({})
@RandomPattern.random.setter
def random(self, val):
"""Set the random stream."""
self._pattern1.random = val
self._pattern2.random = val
def generate(self, cells, bsca):
"""
Generate two patterns sequentially.
See :meth:`RandomPattern.generate` for details.
"""
self._pattern1.generate(cells, bsca)
self._pattern2.generate(cells, bsca)
class BigBang(RandomPattern):
"""
Random init pattern, known as *"Big Bang"*.
Citation from `The Concept`_:
*"A small area of space is initialized with a high amount of energy
and random parameters per each quantum. Outside the area, quanta
has either zero or minimum possible amount of energy. This is a
good test for the ability of energy to spread in empty space."*
The current implementation generates a value for every cell inside a
specified N-cube area. Cells outside the area remain unchanged.
:param vals:
Dictionary with mixed values. May contain descriptor classes.
:param pos:
A tuple with the coordinates of the lowest corner of the Bang area.
:param size:
A tuple with the size of the Bang area per each dimension.
"""
def __init__(self, vals, pos=None, size=None):
"""Initialize class."""
self.pos = np.asarray(pos) if pos else None
self.size = np.asarray(size) if size else None
super().__init__(vals)
def _prepare_area(self, bsca_size):
"""
Prepare area size and position.
:param bsca_size: tuple with CA size.
"""
dims = range(len(bsca_size))
randint = self.random.standard.randint
if self.size is None:
rnd_vec = [randint(1, bsca_size[i]) for i in dims]
self.size = np.asarray(rnd_vec)
if self.pos is None:
rnd_vec = [randint(0, bsca_size[i]) for i in dims]
self.pos = np.asarray(rnd_vec)
for i in range(len(self.pos)):
coord, width, side = self.pos[i], self.size[i], bsca_size[i]
if coord + width >= side:
self.pos[i] = side - width
self.pos[i] = max(0, self.pos[i])
def generate(self, cells, bsca):
"""
Generate the entire initial state.
See :meth:`RandomPattern.generate` for details.
"""
self._prepare_area(bsca.size)
indices = np.arange(0, bsca.cells_num)
coords = bsca.index_to_coord(indices)
area = None
for i in range(len(bsca.size)):
condition = (coords[i] >= self.pos[i])
condition &= (coords[i] < self.pos[i] + self.size[i])
if area is None:
area = condition
continue
area &= (condition)
state = {}
for name in sorted(self.vals.keys()):
val = self.vals[name]
state[name] = val
cells[np.where(area)] = bsca.pack_state(state)
class PrimordialSoup(RandomPattern):
"""
Random init pattern, known as *"Primordial Soup"*.
Citation from `The Concept`_:
*"Each and every quantum initially has an equally small amount
of energy, other parameters are random. This is a good test
for the ability of energy to self-organize in clusters from
the completely uniform distribution."*
The current implementation populates the entire board with
generated values.
:param vals:
Dictionary with mixed values. May contain descriptor classes.
"""
def __init__(self, vals):
"""Initialize class."""
self.size = None
super().__init__(vals)
def generate(self, cells, bsca):
"""
Generate the entire initial state.
See :meth:`RandomPattern.generate` for details.
"""
self.size = bsca.size
state = {}
for name in sorted(self.vals.keys()):
val = self.vals[name]
state[name] = val
cells[:bsca.cells_num] = bsca.pack_state(state)
| 28.810219
| 79
| 0.606157
|
3281707efa74a8c0c22a4bfa08c5575adffa7ad7
| 2,630
|
py
|
Python
|
PyLexicon/core.py
|
LordFlashmeow/PyLexicon
|
cc9ebb863f9365c335dc443dd6f90163029436de
|
[
"MIT"
] | null | null | null |
PyLexicon/core.py
|
LordFlashmeow/PyLexicon
|
cc9ebb863f9365c335dc443dd6f90163029436de
|
[
"MIT"
] | null | null | null |
PyLexicon/core.py
|
LordFlashmeow/PyLexicon
|
cc9ebb863f9365c335dc443dd6f90163029436de
|
[
"MIT"
] | null | null | null |
import re
import requests
from bs4 import BeautifulSoup
def _get_soup_object(url, parser="html.parser"):
return BeautifulSoup(requests.get(url).text, parser)
def define(word):
""" Looks up and defines a word
Args:
word: a single word string
Returns:
A dictionary
{'type of word':['Definition 1', 'Definition 2']}
Example:
define('hello')
{'Noun': ['an expression of greeting']}
"""
if len(word.split()) > 1:
raise ValueError("Search must only be one word")
html = _get_soup_object(
"http://wordnetweb.princeton.edu/perl/webwn?s=" + word)
types = html.findAll('h3')
lists = html.findAll('ul')
meaning = {}
for item in types:
reg = str(lists[types.index(item)])
meanings = []
for x in re.findall(r'> \((.*?)\) <', reg):
if 'often followed by' in x:
pass
elif len(x) > 5 or ' ' in str(x):
meanings.append(x)
name = item.text
meaning[name] = meanings
return meaning
def synonym(word):
""" Looks up and returns synonyms of a word
Args:
word: a single word string
Returns:
A list of synonyms
['synonym 1', 'synonym 2']
Example:
synonym('hello')
['welcome', 'howdy', 'hi', 'greetings', 'bonjour']
"""
if len(word.split()) > 1:
raise ValueError("Search must only be one word")
html = _get_soup_object("http://www.thesaurus.com/browse/" + word)
terms = html.select("div#filters-0")[0].findAll("li")
if len(terms) > 5:
terms = terms[:5] # Shorten the list to five synonyms
similars = []
for item in terms:
similars.append(item.select("span.text")[0].getText())
return similars
def antonym(word):
""" Looks up and returns antonyms of a word
Args:
word: a single word string
Returns:
A list of antonyms
['antonym 1', 'antonym 2']
Example:
antonym('hello')
['adios', 'au revoir', 'goodbye']
"""
if len(word.split()) > 1:
raise ValueError("Search must only be one word")
html = _get_soup_object("http://www.thesaurus.com/browse/" + word)
terms = html.select("section.antonyms")[0].findAll("li")
if len(terms) > 5:
terms = terms[:5] # Shorten the list to five antonyms
opposites = []
for item in terms:
opposites.append(item.select("span.text")[0].getText())
return opposites
| 23.909091
| 70
| 0.54981
|
b82eb8619c6ac1351e653b348e21c85c7a8d1fa8
| 3,274
|
py
|
Python
|
Numpy_basics.py
|
yashika-5/summer19_python
|
e0fb252b6d6577ce8c68cac6155ae2b90c1e9073
|
[
"Apache-2.0"
] | null | null | null |
Numpy_basics.py
|
yashika-5/summer19_python
|
e0fb252b6d6577ce8c68cac6155ae2b90c1e9073
|
[
"Apache-2.0"
] | null | null | null |
Numpy_basics.py
|
yashika-5/summer19_python
|
e0fb252b6d6577ce8c68cac6155ae2b90c1e9073
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# In[1]:
my_list =[1,2,3]
# In[47]:
import numpy as np
import pandas as pd
# In[5]:
arr = np.array(my_list)
arr
# In[19]:
arr.dtype
# In[7]:
my_mat = [[1,2,300],[34,23,41],[78,34,87]]
# In[8]:
np.array(my_mat)
# In[9]:
np.arange(0,10)
# In[10]:
np.arange(0,11,3)
# In[11]:
np.ones(5)
# In[12]:
np.zeros(5)
# In[13]:
np.linspace(0,10,10)
# In[14]:
np.linspace(0,10,100)
# In[15]:
np.eye(5)
# In[16]:
np.random.rand(9)
# In[18]:
np.random.randn(8,8,6)
# In[20]:
a = np.arange(0,10)
# In[21]:
a
# In[22]:
a[0:5]
# In[23]:
a[5:]
# In[24]:
a[:]
# In[25]:
a[:5]
# In[26]:
arr1 = [[1,2,3],[5,32,67],[87,12,9]]
# In[27]:
arr1
# In[33]:
arr2 = np.array(arr1)
# In[34]:
arr2
# In[35]:
arr2[:2]
# In[36]:
arr2[0:]
# In[37]:
arr2[1:]
# In[38]:
arr2[1: ,1:]
# In[40]:
p =arr2[1: ,1:]
# In[41]:
p
# In[42]:
p[1: ]
# In[43]:
p[1: ,1:]
# In[44]:
arr_2d = np.arange(50).reshape(5,10)
# In[45]:
arr_2d
# In[46]:
arr_2d[1:3 , 3:5]
# In[48]:
clear
# In[58]:
labels = ['a','b','c']
my_data = [10,20,30]
arr = np.array(my_data)
d = { 'a' : 10 , 'b' : 20 , 'c' :30 }
# In[51]:
my_data
# In[52]:
arr
# In[53]:
d
# In[54]:
pd.Series(data = my_data)
# In[55]:
pd.Series(my_data,labels)
# In[56]:
pd.Series(arr)
# In[59]:
pd.Series(d)
# In[61]:
pd.Series(data=[sum,print,len])
# In[64]:
ser1 = pd.Series([1,2,3],['USA','Germany','Japan'])
# ser1
# In[65]:
ser1
# In[66]:
ser1[1]
# In[67]:
ser1['USA']
# In[68]:
ser2 = pd.Series(data= labels)
# In[69]:
ser2[0]
# In[70]:
ser2
# In[71]:
ser1 + ser2
# # Pandas - DataFrames
# In[72]:
from numpy.random import randn
# In[74]:
a = np.random.seed(101)
# In[92]:
randn(2)
# In[93]:
df = pd.DataFrame(randn(6,4),['A','B','C','D','E','F'],['W','X','Y','Z'])
# In[94]:
df
# In[95]:
df['W']
# In[96]:
type(df['W'])
# In[97]:
type(df)
# In[98]:
df.W
# In[99]:
df.X
# In[101]:
df[['W','Z']]
# In[103]:
df['new'] = df['W'] + df['Y']
# In[104]:
df
# In[111]:
#df.drop('new',axis=1,inplace=True)
# In[112]:
df.drop('E')
# In[113]:
df.shape
# df
# In[115]:
df[['Z']]
# In[116]:
#ROWS
# In[117]:
df
# In[118]:
df.loc['A']
# In[119]:
df.iloc[2]
# In[120]:
df
# In[121]:
df.loc['E','Z']
# In[122]:
df.loc[['A','B'],['W','Y']]
# In[126]:
booldf = df > 0
# In[127]:
booldf
# In[128]:
df
# In[129]:
df[booldf]
# In[130]:
df
# In[131]:
df[df['Z'] < 0]
# In[133]:
df[df['Z'] < 0] [['X']]
# In[134]:
df['W'] >0
# In[139]:
df[(df['W']>0) & (df['Z']>1)]
# In[138]:
df
# In[140]:
df.reset_index()
# In[141]:
newind = 'CA NY WY OR CO'.split()
# In[142]:
newind
# In[151]:
df('States')=newind
# In[ ]:
# In[150]:
df
# In[147]:
df.set_index('States')
# In[152]:
d = { 'A':[1,2,np.nan],'B ':[5,np.nan,np.nan],'c':[1,2,3]}
# In[153]:
df1 = pd.DataFrame(d)
# In[154]:
df1
# In[158]:
df1.dropna(axis=1)
# In[163]:
df1.dropna(thresh=1)
# In[164]:
df1.fillna(value='FILL VALUEEE')
# In[165]:
df1['A']
# In[167]:
df1['A'].fillna(value=df1['A'].mean())
| 5.172196
| 73
| 0.459988
|
9363177ea33de4226bd1deb503365a58ce2b6a92
| 403
|
py
|
Python
|
python/validating-credit-card-number.py
|
gajubadge11/HackerRank-1
|
7b136ccaa1ed47ae737467ace6b494c720ccb942
|
[
"MIT"
] | 340
|
2018-06-17T19:45:56.000Z
|
2022-03-22T02:26:15.000Z
|
python/validating-credit-card-number.py
|
gajubadge11/HackerRank-1
|
7b136ccaa1ed47ae737467ace6b494c720ccb942
|
[
"MIT"
] | 3
|
2021-02-02T17:17:29.000Z
|
2021-05-18T10:06:04.000Z
|
python/validating-credit-card-number.py
|
gajubadge11/HackerRank-1
|
7b136ccaa1ed47ae737467ace6b494c720ccb942
|
[
"MIT"
] | 229
|
2019-04-20T08:28:49.000Z
|
2022-03-31T04:23:52.000Z
|
import re
if __name__ == "__main__":
t = int(input().strip())
for _ in range(t):
num = "".join(input())
if (re.match(r'^[456]', num) and
(re.match(r'([\d]{4}-){3}[\d]{4}$', num) or
re.match(r'[\d]{16}', num)) and
not re.search(r'(\d)\1{3,}', num.replace("-", ""))):
print("Valid")
else:
print("Invalid")
| 26.866667
| 64
| 0.419355
|
afbe440870a09411371d2cb6c2139c1651f4aa27
| 20,356
|
py
|
Python
|
python/cuml/test/test_fil.py
|
frankier/cuml
|
c2f246a923232a63c65082036a3bd1278a2fa7d7
|
[
"Apache-2.0"
] | null | null | null |
python/cuml/test/test_fil.py
|
frankier/cuml
|
c2f246a923232a63c65082036a3bd1278a2fa7d7
|
[
"Apache-2.0"
] | null | null | null |
python/cuml/test/test_fil.py
|
frankier/cuml
|
c2f246a923232a63c65082036a3bd1278a2fa7d7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
import os
from cuml import ForestInference
from cuml.test.utils import array_equal, unit_param, \
quality_param, stress_param
from cuml.common.import_utils import has_xgboost
from cuml.common.import_utils import has_lightgbm
from sklearn.datasets import make_classification, make_regression
from sklearn.ensemble import GradientBoostingClassifier, \
GradientBoostingRegressor, RandomForestClassifier, RandomForestRegressor, \
ExtraTreesClassifier, ExtraTreesRegressor
from sklearn.metrics import accuracy_score, mean_squared_error
from sklearn.model_selection import train_test_split
if has_xgboost():
import xgboost as xgb
def simulate_data(m, n, k=2, random_state=None, classification=True,
bias=0.0):
if classification:
features, labels = make_classification(n_samples=m,
n_features=n,
n_informative=int(n/5),
n_classes=k,
random_state=random_state)
else:
features, labels = make_regression(n_samples=m,
n_features=n,
n_informative=int(n/5),
n_targets=1,
bias=bias,
random_state=random_state)
return np.c_[features].astype(np.float32), \
np.c_[labels].astype(np.float32).flatten()
# absolute tolerance for FIL predict_proba
# False is binary classification, True is multiclass
proba_atol = {False: 3e-7, True: 3e-6}
def _build_and_save_xgboost(model_path,
X_train,
y_train,
classification=True,
num_rounds=5,
n_classes=2,
xgboost_params={}):
"""Trains a small xgboost classifier and saves it to model_path"""
dtrain = xgb.DMatrix(X_train, label=y_train)
# instantiate params
params = {'silent': 1}
# learning task params
if classification:
params['eval_metric'] = 'error'
if n_classes == 2:
params['objective'] = 'binary:logistic'
else:
params['num_class'] = n_classes
params['objective'] = 'multi:softprob'
else:
params['eval_metric'] = 'error'
params['objective'] = 'reg:squarederror'
params['base_score'] = 0.0
params['max_depth'] = 25
params.update(xgboost_params)
bst = xgb.train(params, dtrain, num_rounds)
bst.save_model(model_path)
return bst
@pytest.mark.parametrize('n_rows', [unit_param(1000),
quality_param(10000),
stress_param(500000)])
@pytest.mark.parametrize('n_columns', [unit_param(30),
quality_param(100),
stress_param(1000)])
@pytest.mark.parametrize('num_rounds', [unit_param(1),
unit_param(5),
quality_param(50),
stress_param(90)])
@pytest.mark.parametrize('n_classes', [2, 5, 25])
@pytest.mark.skipif(has_xgboost() is False, reason="need to install xgboost")
def test_fil_classification(n_rows, n_columns, num_rounds,
n_classes, tmp_path):
# settings
classification = True # change this to false to use regression
random_state = np.random.RandomState(43210)
X, y = simulate_data(n_rows, n_columns, n_classes,
random_state=random_state,
classification=classification)
# identify shape and indices
n_rows, n_columns = X.shape
train_size = 0.80
X_train, X_validation, y_train, y_validation = train_test_split(
X, y, train_size=train_size, random_state=0)
model_path = os.path.join(tmp_path, 'xgb_class.model')
bst = _build_and_save_xgboost(model_path, X_train, y_train,
num_rounds=num_rounds,
classification=classification,
n_classes=n_classes)
dvalidation = xgb.DMatrix(X_validation, label=y_validation)
if n_classes == 2:
xgb_preds = bst.predict(dvalidation)
xgb_preds_int = np.around(xgb_preds)
xgb_proba = np.stack([1-xgb_preds, xgb_preds], axis=1)
else:
xgb_proba = bst.predict(dvalidation)
xgb_preds_int = xgb_proba.argmax(axis=1)
xgb_acc = accuracy_score(y_validation, xgb_preds_int)
fm = ForestInference.load(model_path,
algo='auto',
output_class=True,
threshold=0.50)
fil_preds = np.asarray(fm.predict(X_validation))
fil_proba = np.asarray(fm.predict_proba(X_validation))
fil_acc = accuracy_score(y_validation, fil_preds)
assert fil_acc == pytest.approx(xgb_acc, abs=0.01)
assert array_equal(fil_preds, xgb_preds_int)
np.testing.assert_allclose(fil_proba, xgb_proba,
atol=proba_atol[n_classes > 2])
@pytest.mark.parametrize('n_rows', [unit_param(1000), quality_param(10000),
stress_param(500000)])
@pytest.mark.parametrize('n_columns', [unit_param(20), quality_param(100),
stress_param(1000)])
@pytest.mark.parametrize('num_rounds', [unit_param(5), quality_param(10),
stress_param(90)])
@pytest.mark.parametrize('max_depth', [unit_param(3),
unit_param(7),
stress_param(11)])
@pytest.mark.skipif(has_xgboost() is False, reason="need to install xgboost")
def test_fil_regression(n_rows, n_columns, num_rounds, tmp_path, max_depth):
# settings
classification = False # change this to false to use regression
n_rows = n_rows # we'll use 1 millions rows
n_columns = n_columns
random_state = np.random.RandomState(43210)
X, y = simulate_data(n_rows, n_columns,
random_state=random_state,
classification=classification, bias=10.0)
# identify shape and indices
n_rows, n_columns = X.shape
train_size = 0.80
X_train, X_validation, y_train, y_validation = train_test_split(
X, y, train_size=train_size, random_state=0)
model_path = os.path.join(tmp_path, 'xgb_reg.model')
bst = _build_and_save_xgboost(model_path, X_train,
y_train,
classification=classification,
num_rounds=num_rounds,
xgboost_params={'max_depth': max_depth})
dvalidation = xgb.DMatrix(X_validation, label=y_validation)
xgb_preds = bst.predict(dvalidation)
xgb_mse = mean_squared_error(y_validation, xgb_preds)
fm = ForestInference.load(model_path,
algo='auto',
output_class=False)
fil_preds = np.asarray(fm.predict(X_validation))
fil_preds = np.reshape(fil_preds, np.shape(xgb_preds))
fil_mse = mean_squared_error(y_validation, fil_preds)
assert fil_mse == pytest.approx(xgb_mse, abs=0.01)
assert np.allclose(fil_preds, xgb_preds, 1e-3)
@pytest.mark.parametrize('n_rows', [1000])
@pytest.mark.parametrize('n_columns', [30])
# Skip depth 20 for dense tests
@pytest.mark.parametrize('max_depth,storage_type',
[(2, False), (2, True), (10, False), (10, True),
(20, True)])
# FIL not supporting multi-class sklearn RandomForestClassifiers
# When n_classes=25, fit a single estimator only to reduce test time
@pytest.mark.parametrize('n_classes,model_class,n_estimators',
[(2, GradientBoostingClassifier, 1),
(2, GradientBoostingClassifier, 10),
(2, RandomForestClassifier, 1),
(2, RandomForestClassifier, 10),
(2, ExtraTreesClassifier, 1),
(2, ExtraTreesClassifier, 10),
(5, GradientBoostingClassifier, 1),
(5, GradientBoostingClassifier, 10),
(25, GradientBoostingClassifier, 1)])
def test_fil_skl_classification(n_rows, n_columns, n_estimators, max_depth,
n_classes, storage_type, model_class):
# settings
classification = True # change this to false to use regression
random_state = np.random.RandomState(43210)
X, y = simulate_data(n_rows, n_columns, n_classes,
random_state=random_state,
classification=classification)
# identify shape and indices
train_size = 0.80
X_train, X_validation, y_train, y_validation = train_test_split(
X, y, train_size=train_size, random_state=0)
init_kwargs = {
'n_estimators': n_estimators,
'max_depth': max_depth,
}
if model_class in [RandomForestClassifier, ExtraTreesClassifier]:
init_kwargs['max_features'] = 0.3
init_kwargs['n_jobs'] = -1
else:
# model_class == GradientBoostingClassifier
init_kwargs['init'] = 'zero'
skl_model = model_class(**init_kwargs, random_state=random_state)
skl_model.fit(X_train, y_train)
skl_preds = skl_model.predict(X_validation)
skl_preds_int = np.around(skl_preds)
skl_proba = skl_model.predict_proba(X_validation)
skl_acc = accuracy_score(y_validation, skl_preds_int)
algo = 'NAIVE' if storage_type else 'BATCH_TREE_REORG'
fm = ForestInference.load_from_sklearn(skl_model,
algo=algo,
output_class=True,
threshold=0.50,
storage_type=storage_type)
fil_preds = np.asarray(fm.predict(X_validation))
fil_preds = np.reshape(fil_preds, np.shape(skl_preds_int))
fil_acc = accuracy_score(y_validation, fil_preds)
# fil_acc is within p99 error bars of skl_acc (diff == 0.017 +- 0.012)
# however, some tests have a delta as big as 0.04.
# sklearn uses float64 thresholds, while FIL uses float32
# TODO(levsnv): once FIL supports float64 accuracy, revisit thresholds
threshold = 1e-5 if n_classes == 2 else 0.1
assert fil_acc == pytest.approx(skl_acc, abs=threshold)
if n_classes == 2:
assert array_equal(fil_preds, skl_preds_int)
fil_proba = np.asarray(fm.predict_proba(X_validation))
fil_proba = np.reshape(fil_proba, np.shape(skl_proba))
np.testing.assert_allclose(fil_proba, skl_proba,
atol=proba_atol[n_classes > 2])
@pytest.mark.parametrize('n_rows', [1000])
@pytest.mark.parametrize('n_columns', [20])
@pytest.mark.parametrize('n_classes,model_class,n_estimators',
[(1, GradientBoostingRegressor, 1),
(1, GradientBoostingRegressor, 10),
(1, RandomForestRegressor, 1),
(1, RandomForestRegressor, 10),
(1, ExtraTreesRegressor, 1),
(1, ExtraTreesRegressor, 10),
(5, GradientBoostingRegressor, 10)])
@pytest.mark.parametrize('max_depth', [2, 10, 20])
@pytest.mark.parametrize('storage_type', [False, True])
def test_fil_skl_regression(n_rows, n_columns, n_classes, model_class,
n_estimators, max_depth, storage_type):
# skip depth 20 for dense tests
if max_depth == 20 and not storage_type:
return
# settings
random_state = np.random.RandomState(43210)
X, y = simulate_data(n_rows, n_columns, n_classes,
random_state=random_state,
classification=False)
# identify shape and indices
train_size = 0.80
X_train, X_validation, y_train, y_validation = train_test_split(
X, y, train_size=train_size, random_state=0)
init_kwargs = {
'n_estimators': n_estimators,
'max_depth': max_depth,
}
if model_class in [RandomForestRegressor, ExtraTreesRegressor]:
init_kwargs['max_features'] = 0.3
init_kwargs['n_jobs'] = -1
else:
# model_class == GradientBoostingRegressor
init_kwargs['init'] = 'zero'
skl_model = model_class(**init_kwargs)
skl_model.fit(X_train, y_train)
skl_preds = skl_model.predict(X_validation)
skl_mse = mean_squared_error(y_validation, skl_preds)
algo = 'NAIVE' if storage_type else 'BATCH_TREE_REORG'
fm = ForestInference.load_from_sklearn(skl_model,
algo=algo,
output_class=False,
storage_type=storage_type)
fil_preds = np.asarray(fm.predict(X_validation))
fil_preds = np.reshape(fil_preds, np.shape(skl_preds))
fil_mse = mean_squared_error(y_validation, fil_preds)
assert fil_mse <= skl_mse * (1. + 1e-6) + 1e-4
assert np.allclose(fil_preds, skl_preds, 1.2e-3)
@pytest.fixture(scope="session")
def small_classifier_and_preds(tmpdir_factory):
X, y = simulate_data(500, 10,
random_state=43210,
classification=True)
model_path = str(tmpdir_factory.mktemp("models").join("small_class.model"))
bst = _build_and_save_xgboost(model_path, X, y)
# just do within-sample since it's not an accuracy test
dtrain = xgb.DMatrix(X, label=y)
xgb_preds = bst.predict(dtrain)
return (model_path, X, xgb_preds)
@pytest.mark.skipif(has_xgboost() is False, reason="need to install xgboost")
@pytest.mark.parametrize('algo', ['AUTO', 'NAIVE', 'TREE_REORG',
'BATCH_TREE_REORG',
'auto', 'naive', 'tree_reorg',
'batch_tree_reorg'])
def test_output_algos(algo, small_classifier_and_preds):
model_path, X, xgb_preds = small_classifier_and_preds
fm = ForestInference.load(model_path,
algo=algo,
output_class=True,
threshold=0.50)
xgb_preds_int = np.around(xgb_preds)
fil_preds = np.asarray(fm.predict(X))
fil_preds = np.reshape(fil_preds, np.shape(xgb_preds_int))
assert np.allclose(fil_preds, xgb_preds_int, 1e-3)
@pytest.mark.skipif(has_xgboost() is False, reason="need to install xgboost")
@pytest.mark.parametrize('storage_type',
[False, True, 'auto', 'dense', 'sparse', 'sparse8'])
def test_output_storage_type(storage_type, small_classifier_and_preds):
model_path, X, xgb_preds = small_classifier_and_preds
fm = ForestInference.load(model_path,
output_class=True,
storage_type=storage_type,
threshold=0.50)
xgb_preds_int = np.around(xgb_preds)
fil_preds = np.asarray(fm.predict(X))
fil_preds = np.reshape(fil_preds, np.shape(xgb_preds_int))
assert np.allclose(fil_preds, xgb_preds_int, 1e-3)
@pytest.mark.skipif(has_xgboost() is False, reason="need to install xgboost")
@pytest.mark.parametrize('storage_type', ['dense', 'sparse'])
@pytest.mark.parametrize('blocks_per_sm', [1, 2, 3, 4])
def test_output_blocks_per_sm(storage_type, blocks_per_sm,
small_classifier_and_preds):
model_path, X, xgb_preds = small_classifier_and_preds
fm = ForestInference.load(model_path,
output_class=True,
storage_type=storage_type,
threshold=0.50,
blocks_per_sm=blocks_per_sm)
xgb_preds_int = np.around(xgb_preds)
fil_preds = np.asarray(fm.predict(X))
fil_preds = np.reshape(fil_preds, np.shape(xgb_preds_int))
assert np.allclose(fil_preds, xgb_preds_int, 1e-3)
@pytest.mark.parametrize('output_class', [True, False])
@pytest.mark.skipif(has_xgboost() is False, reason="need to install xgboost")
def test_thresholding(output_class, small_classifier_and_preds):
model_path, X, xgb_preds = small_classifier_and_preds
fm = ForestInference.load(model_path,
algo='TREE_REORG',
output_class=output_class,
threshold=0.50)
fil_preds = np.asarray(fm.predict(X))
if output_class:
assert ((fil_preds != 0.0) & (fil_preds != 1.0)).sum() == 0
else:
assert ((fil_preds != 0.0) & (fil_preds != 1.0)).sum() > 0
@pytest.mark.skipif(has_xgboost() is False, reason="need to install xgboost")
def test_output_args(small_classifier_and_preds):
model_path, X, xgb_preds = small_classifier_and_preds
fm = ForestInference.load(model_path,
algo='TREE_REORG',
output_class=False,
threshold=0.50)
X = np.asarray(X)
fil_preds = fm.predict(X)
fil_preds = np.reshape(fil_preds, np.shape(xgb_preds))
assert array_equal(fil_preds, xgb_preds, 1e-3)
@pytest.mark.parametrize('num_classes', [2, 5])
@pytest.mark.skipif(has_lightgbm() is False, reason="need to install lightgbm")
def test_lightgbm(tmp_path, num_classes):
import lightgbm as lgb
X, y = simulate_data(500,
10 if num_classes == 2 else 50,
num_classes,
random_state=43210,
classification=True)
train_data = lgb.Dataset(X, label=y)
num_round = 5
model_path = str(os.path.join(tmp_path, 'lgb.model'))
if num_classes == 2:
param = {'objective': 'binary',
'metric': 'binary_logloss',
'num_class': 1}
bst = lgb.train(param, train_data, num_round)
bst.save_model(model_path)
fm = ForestInference.load(model_path,
algo='TREE_REORG',
output_class=True,
model_type="lightgbm")
# binary classification
gbm_proba = bst.predict(X)
fil_proba = fm.predict_proba(X)[:, 1]
gbm_preds = (gbm_proba > 0.5)
fil_preds = fm.predict(X)
assert array_equal(gbm_preds, fil_preds)
np.testing.assert_allclose(gbm_proba, fil_proba,
atol=proba_atol[num_classes > 2])
else:
# multi-class classification
lgm = lgb.LGBMClassifier(objective='multiclass',
boosting_type='gbdt',
n_estimators=num_round)
lgm.fit(X, y)
lgm.booster_.save_model(model_path)
fm = ForestInference.load(model_path,
algo='TREE_REORG',
output_class=True,
model_type="lightgbm")
lgm_preds = lgm.predict(X)
assert array_equal(lgm.booster_.predict(X).argmax(axis=1), lgm_preds)
assert array_equal(lgm_preds, fm.predict(X))
# lightgbm uses float64 thresholds, while FIL uses float32
np.testing.assert_allclose(lgm.predict_proba(X), fm.predict_proba(X),
atol=proba_atol[num_classes > 2])
| 41.290061
| 79
| 0.599283
|
58d6499fe5f83790af723707e91fcfdbf8d51384
| 2,018
|
py
|
Python
|
tests/integration/scollection_lazy_search_test.py
|
KalinkinaMaria/selene
|
859e1102c85740b52af8d0f08dd6b6490b4bd2ff
|
[
"MIT"
] | null | null | null |
tests/integration/scollection_lazy_search_test.py
|
KalinkinaMaria/selene
|
859e1102c85740b52af8d0f08dd6b6490b4bd2ff
|
[
"MIT"
] | 1
|
2021-06-02T04:21:17.000Z
|
2021-06-02T04:21:17.000Z
|
tests/integration/scollection_lazy_search_test.py
|
vkarpenko/selene
|
4776357430c940be38f38be9981006dd156f9730
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selene import config
from selene.common.none_object import NoneObject
from selene.driver import SeleneDriver
from tests.acceptance.helpers.helper import get_test_driver
from tests.integration.helpers.givenpage import GivenPage
__author__ = 'yashaka'
driver = NoneObject('driver') # type: SeleneDriver
GIVEN_PAGE = NoneObject('GivenPage') # type: GivenPage
WHEN = GIVEN_PAGE # type: GivenPage
original_timeout = config.timeout
def setup_module(m):
global driver
driver = SeleneDriver.wrap(get_test_driver())
global GIVEN_PAGE
GIVEN_PAGE = GivenPage(driver)
global WHEN
WHEN = GIVEN_PAGE
def teardown_module(m):
driver.quit()
def setup_function(fn):
global original_timeout
config.timeout = original_timeout
def test_search_is_lazy_and_does_not_start_on_creation():
GIVEN_PAGE.opened_empty()
non_existent_collection = driver.all('.not-existing')
assert str(non_existent_collection)
def test_search_is_postponed_until_actual_action_like_questioning_count():
GIVEN_PAGE.opened_empty()
elements = driver.all('.will-appear')
WHEN.load_body('''
<ul>Hello to:
<li class='will-appear'>Bob</li>
<li class='will-appear'>Kate</li>
</ul>''')
assert len(elements) == 2
def test_search_is_updated_on_next_actual_action_like_questioning_count():
GIVEN_PAGE.opened_empty()
elements = driver.all('.will-appear')
WHEN.load_body('''
<ul>Hello to:
<li class='will-appear'>Bob</li>
<li class='will-appear'>Kate</li>
</ul>''')
assert len(elements) == 2
WHEN.load_body('''
<ul>Hello to:
<li class='will-appear'>Bob</li>
<li class='will-appear'>Kate</li>
<li class='will-appear'>Joe</li>
</ul>''')
assert len(elements) == 3
| 28.828571
| 74
| 0.635778
|
29dba1d8340f76e670b37a695d97e000b06b77de
| 125
|
py
|
Python
|
Curso/ExMundo1/Ex008MetrosCent.py
|
DavidBitner/Aprendizado-Python
|
e1dcf18f9473c697fc2302f34a2d3e025ca6c969
|
[
"MIT"
] | null | null | null |
Curso/ExMundo1/Ex008MetrosCent.py
|
DavidBitner/Aprendizado-Python
|
e1dcf18f9473c697fc2302f34a2d3e025ca6c969
|
[
"MIT"
] | null | null | null |
Curso/ExMundo1/Ex008MetrosCent.py
|
DavidBitner/Aprendizado-Python
|
e1dcf18f9473c697fc2302f34a2d3e025ca6c969
|
[
"MIT"
] | null | null | null |
n = int(input('Digite o numero de metros: '))
print('{} metros são {} centimetros e {} milimetros'.format(n, n*100, n*1000))
| 41.666667
| 78
| 0.656
|
eece83d859fe01f8a6b7c04691c86b5dc45f0969
| 4,630
|
py
|
Python
|
explorer/exporters.py
|
worthwhile/django-sql-explorer
|
394778be99278f2aedbed4bea6b6ecb30b928330
|
[
"MIT"
] | null | null | null |
explorer/exporters.py
|
worthwhile/django-sql-explorer
|
394778be99278f2aedbed4bea6b6ecb30b928330
|
[
"MIT"
] | null | null | null |
explorer/exporters.py
|
worthwhile/django-sql-explorer
|
394778be99278f2aedbed4bea6b6ecb30b928330
|
[
"MIT"
] | null | null | null |
from django.db import DatabaseError
from django.core.serializers.json import DjangoJSONEncoder
import json
import string
import sys
from datetime import datetime
PY3 = sys.version_info[0] == 3
if PY3:
import csv
else:
import unicodecsv as csv
from django.utils.module_loading import import_string
from . import app_settings
from six import StringIO, BytesIO
def get_exporter_class(format):
class_str = dict(getattr(app_settings, 'EXPLORER_DATA_EXPORTERS'))[format]
return import_string(class_str)
class BaseExporter(object):
name = ''
content_type = ''
file_extension = ''
def __init__(self, query):
self.query = query
def get_output(self, **kwargs):
value = self.get_file_output(**kwargs).getvalue()
if PY3:
return value
else:
return str(value)
def get_file_output(self, **kwargs):
try:
res = self.query.execute_query_only()
return self._get_output(res, **kwargs)
except DatabaseError as e:
return StringIO(str(e))
def _get_output(self, res, **kwargs):
"""
:param res: QueryResult
:param kwargs: Optional. Any exporter-specific arguments.
:return: File-like object
"""
raise NotImplementedError
def get_filename(self):
# build list of valid chars, build filename from title and replace spaces
valid_chars = '-_.() %s%s' % (string.ascii_letters, string.digits)
filename = ''.join(c for c in self.query.title if c in valid_chars)
filename = filename.replace(' ', '_')
return '{}{}'.format(filename, self.file_extension)
class CSVExporter(BaseExporter):
name = 'CSV'
content_type = 'text/csv'
file_extension = '.csv'
def _get_output(self, res, **kwargs):
delim = kwargs.get('delim') or app_settings.CSV_DELIMETER
delim = '\t' if delim == 'tab' else str(delim)
delim = app_settings.CSV_DELIMETER if len(delim) > 1 else delim
csv_data = StringIO()
if PY3:
writer = csv.writer(csv_data, delimiter=delim)
else:
writer = csv.writer(csv_data, delimiter=delim, encoding='utf-8')
writer.writerow(res.headers)
for row in res.data:
writer.writerow([s for s in row])
return csv_data
class JSONExporter(BaseExporter):
name = 'JSON'
content_type = 'application/json'
file_extension = '.json'
def _get_output(self, res, **kwargs):
data = []
for row in res.data:
data.append(
dict(zip([str(h) if h is not None else '' for h in res.headers], row))
)
json_data = json.dumps(data, cls=DjangoJSONEncoder)
return StringIO(json_data)
class ExcelExporter(BaseExporter):
name = 'Excel'
content_type = 'application/vnd.ms-excel'
file_extension = '.xlsx'
def _get_output(self, res, **kwargs):
import xlsxwriter
output = BytesIO()
wb = xlsxwriter.Workbook(output)
# XLSX writer wont allow sheet names > 31 characters
# https://github.com/jmcnamara/XlsxWriter/blob/master/xlsxwriter/test/workbook/test_check_sheetname.py
title = self.query.title[:31]
ws = wb.add_worksheet(name=title)
# Write headers
row = 0
col = 0
header_style = wb.add_format({'bold': True})
for header in res.header_strings:
ws.write(row, col, header, header_style)
col += 1
# Write data
row = 1
col = 0
for data_row in res.data:
for data in data_row:
# xlsxwriter can't handle timezone-aware datetimes, so we help out here and just cast it to a string
if isinstance(data, datetime):
data = str(data)
# JSON and Array fields
if isinstance(data, dict) or isinstance(data, list):
data = json.dumps(data)
ws.write(row, col, data)
col += 1
row += 1
col = 0
wb.close()
return output
class PdfExporter(BaseExporter):
name = 'PDF'
content_type = 'application/pdf'
file_extension = '.pdf'
def _get_output(self, res, **kwargs):
from django_xhtml2pdf.utils import generate_pdf
output = BytesIO()
ctx = {
'headers': res.header_strings,
'data': res.data,
}
result = generate_pdf('explorer/pdf_template.html', file_object=output, context=ctx)
return output
| 28.404908
| 116
| 0.602376
|
b6700053532f4b6e1140c1bf988bbfef17c32423
| 3,914
|
py
|
Python
|
tests/test_project/settings.py
|
resulto-admin/django-static-url
|
138799d6783b1ada0857a9e95c62f2ee40981462
|
[
"BSD-3-Clause"
] | 2
|
2016-11-16T19:16:06.000Z
|
2016-11-16T19:16:31.000Z
|
tests/test_project/settings.py
|
powergo/django-static-url
|
138799d6783b1ada0857a9e95c62f2ee40981462
|
[
"BSD-3-Clause"
] | 1
|
2016-11-16T13:32:23.000Z
|
2016-11-16T13:35:19.000Z
|
tests/test_project/settings.py
|
powergo/django-static-url
|
138799d6783b1ada0857a9e95c62f2ee40981462
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import sys
# Import source code dir
# This ensures that we can import `django_static_url.xyz...` from within the tests/
# folder containing all the tests from the lib code.
sys.path.insert(0, os.getcwd())
sys.path.insert(0, os.path.join(os.getcwd(), os.pardir))
sys.path.insert(0, os.path.join(os.getcwd(), os.pardir, os.pardir))
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = "abcdef123-"
DEBUG = True
IN_TEST = True
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": "test",
"USER": "test",
"PASSWORD": "test",
"PORT": 5432,
"HOST": "127.0.0.1",
}
}
MIDDLEWARE_CLASSES = (
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
'django.contrib.messages.middleware.MessageMiddleware',
)
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
'django.contrib.messages',
"django.contrib.staticfiles",
"tests.test_app"
]
PASSWORD_HASHERS = (
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher",
"django.contrib.auth.hashers.Argon2PasswordHasher",
"django.contrib.auth.hashers.BCryptSHA256PasswordHasher",
"django.contrib.auth.hashers.BCryptPasswordHasher",
)
TEMPLATES = [
{
"BACKEND":
"django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
],
}
},
]
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s %(message)s"
},
"simple": {
"format": "%(levelname)s %(message)s"
},
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "simple"
},
},
"loggers": {
"django": {
"handlers": ["console"],
"level": "ERROR",
"propagate": False,
},
"django.db.backends": {
"handlers": ["console"],
"level": "ERROR",
"propagate": False,
},
"django.request": {
"handlers": ["console"],
"level": "ERROR",
"propagate": False,
},
},
}
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
TIME_ZONE = "America/Montreal"
USE_I18N = True
USE_L10N = True
USE_THOUSAND_SEPARATOR = True
USE_TZ = True
SITE_ID = 1
OVERRIDE_CURRENT_IP = None
# LANGUAGES
LANGUAGE_CODE = "en"
ugettext = lambda s: s # dummy ugettext function, as django"s docs say
LANGUAGES = (
("en", ugettext("English")),
("fr", ugettext("French")),
)
STATIC_URL = "/static/"
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder"
)
STATIC_ROOT = os.path.join(BASE_DIR, "static")
try:
if os.environ.get("CIRCLECI") == "true":
current_module = sys.modules[__name__]
from tests.test_project.circleci_settings import configure
configure(current_module)
except ImportError:
pass
| 24.616352
| 85
| 0.607818
|
0f004abb8e24c731a3090093183c98a2575d94e1
| 370
|
py
|
Python
|
ippon/cup_phase/serializers.py
|
morynicz/ippon_back
|
dce901bfc649c6f8efbbf0907654e0860606b3e3
|
[
"MIT"
] | null | null | null |
ippon/cup_phase/serializers.py
|
morynicz/ippon_back
|
dce901bfc649c6f8efbbf0907654e0860606b3e3
|
[
"MIT"
] | 13
|
2018-12-22T15:30:56.000Z
|
2022-03-12T00:22:31.000Z
|
ippon/cup_phase/serializers.py
|
morynicz/ippon_back
|
dce901bfc649c6f8efbbf0907654e0860606b3e3
|
[
"MIT"
] | 2
|
2019-06-01T11:28:23.000Z
|
2020-03-27T15:19:11.000Z
|
from rest_framework import serializers
import ippon.models
class CupPhaseSerializer(serializers.ModelSerializer):
class Meta:
model = ippon.models.cup_phase.CupPhase
fields = (
'id',
'tournament',
'name',
'fight_length',
'final_fight_length',
'number_of_positions'
)
| 21.764706
| 54
| 0.578378
|
f722d732521d717904d23e60794bd5ed17668867
| 3,488
|
py
|
Python
|
projects/anosql/test/python/conftest.py
|
arrdem/source
|
df9aae1253ed415ade3a2b59e8a0996ff659543d
|
[
"MIT"
] | 4
|
2021-08-17T15:47:38.000Z
|
2021-10-06T01:59:32.000Z
|
projects/anosql/test/python/conftest.py
|
arrdem/source
|
df9aae1253ed415ade3a2b59e8a0996ff659543d
|
[
"MIT"
] | 8
|
2021-08-14T17:47:08.000Z
|
2021-09-20T20:22:47.000Z
|
projects/anosql/test/python/conftest.py
|
arrdem/source
|
df9aae1253ed415ade3a2b59e8a0996ff659543d
|
[
"MIT"
] | 1
|
2021-10-09T21:24:35.000Z
|
2021-10-09T21:24:35.000Z
|
import csv
import os
import sqlite3
import pytest
BLOGDB_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "blogdb")
USERS_DATA_PATH = os.path.join(BLOGDB_PATH, "data", "users_data.csv")
BLOGS_DATA_PATH = os.path.join(BLOGDB_PATH, "data", "blogs_data.csv")
def populate_sqlite3_db(db_path):
conn = sqlite3.connect(db_path)
cur = conn.cursor()
cur.executescript(
"""
create table users (
userid integer not null primary key,
username text not null,
firstname integer not null,
lastname text not null
);
create table blogs (
blogid integer not null primary key,
userid integer not null,
title text not null,
content text not null,
published date not null default CURRENT_DATE,
foreign key(userid) references users(userid)
);
"""
)
with open(USERS_DATA_PATH) as fp:
users = list(csv.reader(fp))
cur.executemany(
"""
insert into users (
username,
firstname,
lastname
) values (?, ?, ?);""",
users,
)
with open(BLOGS_DATA_PATH) as fp:
blogs = list(csv.reader(fp))
cur.executemany(
"""
insert into blogs (
userid,
title,
content,
published
) values (?, ?, ?, ?);""",
blogs,
)
conn.commit()
conn.close()
@pytest.fixture()
def sqlite3_db_path(tmpdir):
db_path = os.path.join(tmpdir.strpath, "blogdb.db")
populate_sqlite3_db(db_path)
return db_path
@pytest.fixture()
def sqlite3_conn(sqlite3_db_path):
conn = sqlite3.connect(sqlite3_db_path)
yield conn
conn.close()
@pytest.fixture
def pg_conn(postgresql):
with postgresql:
# Loads data from blogdb fixture data
with postgresql.cursor() as cur:
cur.execute(
"""
create table users (
userid serial not null primary key,
username varchar(32) not null,
firstname varchar(255) not null,
lastname varchar(255) not null
);"""
)
cur.execute(
"""
create table blogs (
blogid serial not null primary key,
userid integer not null references users(userid),
title varchar(255) not null,
content text not null,
published date not null default CURRENT_DATE
);"""
)
with postgresql.cursor() as cur:
with open(USERS_DATA_PATH) as fp:
cur.copy_from(
fp, "users", sep=",", columns=["username", "firstname", "lastname"]
)
with open(BLOGS_DATA_PATH) as fp:
cur.copy_from(
fp,
"blogs",
sep=",",
columns=["userid", "title", "content", "published"],
)
return postgresql
@pytest.fixture()
def pg_dsn(pg_conn):
p = pg_conn.get_dsn_parameters()
return "postgres://{user}@{host}:{port}/{dbname}".format(**p)
| 28.357724
| 87
| 0.497133
|
b3a7ff3076c1287f5856378b985ff2247bda87b7
| 1,662
|
py
|
Python
|
tests/test_demand.py
|
EPAENERGYSTAR/epathermostat
|
98aaf571fe8e15e1a372567776081fd9dae7e872
|
[
"MIT"
] | 12
|
2017-03-08T23:17:44.000Z
|
2021-10-15T15:56:30.000Z
|
tests/test_demand.py
|
EPAENERGYSTAR/epathermostat
|
98aaf571fe8e15e1a372567776081fd9dae7e872
|
[
"MIT"
] | 23
|
2017-10-24T00:07:37.000Z
|
2021-07-15T14:42:31.000Z
|
tests/test_demand.py
|
EPAENERGYSTAR/epathermostat
|
98aaf571fe8e15e1a372567776081fd9dae7e872
|
[
"MIT"
] | 11
|
2017-03-22T22:35:30.000Z
|
2021-01-01T22:30:14.000Z
|
import pandas as pd
import numpy as np
from numpy.testing import assert_allclose
import pytest
from .fixtures.thermostats import thermostat_type_1
from .fixtures.thermostats import core_heating_day_set_type_1_entire as core_heating_day_set_type_1
from .fixtures.thermostats import core_cooling_day_set_type_1_entire as core_cooling_day_set_type_1
from .fixtures.thermostats import core_heating_day_set_type_1_empty
from .fixtures.thermostats import core_cooling_day_set_type_1_empty
from .fixtures.thermostats import metrics_type_1_data
RTOL = 1e-3
ATOL = 1e-3
def test_get_cooling_demand_dailyavgCTD_empty(thermostat_type_1, core_cooling_day_set_type_1_empty, metrics_type_1_data):
thermostat_type_1.get_cooling_demand(core_cooling_day_set_type_1_empty)
def test_get_cooling_demand_dailyavgHTD_empty(thermostat_type_1, core_heating_day_set_type_1_empty, metrics_type_1_data):
thermostat_type_1.get_heating_demand(core_heating_day_set_type_1_empty)
def test_get_cooling_demand(thermostat_type_1, core_cooling_day_set_type_1, metrics_type_1_data):
demand, tau_estimate, alpha_estimate, mse, rmse, cvrmse, mape, mae = \
thermostat_type_1.get_cooling_demand(core_cooling_day_set_type_1)
assert_allclose(demand.mean(), metrics_type_1_data[0]["mean_demand"], rtol=RTOL, atol=ATOL)
def test_get_heating_demand(thermostat_type_1, core_heating_day_set_type_1, metrics_type_1_data):
demand, tau_estimate, alpha_estimate, mse, rmse, cvrmse, mape, mae = \
thermostat_type_1.get_heating_demand(core_heating_day_set_type_1)
assert_allclose(demand.mean(), metrics_type_1_data[1]["mean_demand"], rtol=RTOL, atol=ATOL)
| 46.166667
| 121
| 0.847172
|
28e45871535d002b3a0bfdd3bab9b68bace2ac60
| 2,567
|
py
|
Python
|
piexif/_common.py
|
hugovk/Piexif
|
48ced13646e8e85cc0bf30be5874d644c28ccdd4
|
[
"MIT"
] | 2
|
2018-12-19T19:55:47.000Z
|
2019-09-27T12:26:10.000Z
|
piexif/_common.py
|
hugovk/Piexif
|
48ced13646e8e85cc0bf30be5874d644c28ccdd4
|
[
"MIT"
] | null | null | null |
piexif/_common.py
|
hugovk/Piexif
|
48ced13646e8e85cc0bf30be5874d644c28ccdd4
|
[
"MIT"
] | null | null | null |
import struct
from ._exceptions import InvalidImageDataError
def split_into_segments(data):
"""Slices JPEG meta data into a list from JPEG binary data.
"""
if data[0:2] != b"\xff\xd8":
raise InvalidImageDataError("Given data isn't JPEG.")
head = 2
segments = [b"\xff\xd8"]
while 1:
if data[head: head + 2] == b"\xff\xda":
segments.append(data[head:])
break
else:
length = struct.unpack(">H", data[head + 2: head + 4])[0]
endPoint = head + length + 2
seg = data[head: endPoint]
segments.append(seg)
head = endPoint
if (head >= len(data)):
raise InvalidImageDataError("Wrong JPEG data.")
return segments
def read_exif_from_file(filename):
"""Slices JPEG meta data into a list from JPEG binary data.
"""
f = open(filename, "rb")
data = f.read(6)
if data[0:2] != b"\xff\xd8":
raise InvalidImageDataError("Given data isn't JPEG.")
head = data[2:6]
HEAD_LENGTH = 4
exif = None
while 1:
length = struct.unpack(">H", head[2: 4])[0]
if head[:2] == b"\xff\xe1":
segment_data = f.read(length - 2)
if segment_data[:4] != b'Exif':
head = f.read(HEAD_LENGTH)
continue
exif = head + segment_data
break
elif head[0:1] == b"\xff":
f.read(length - 2)
head = f.read(HEAD_LENGTH)
else:
break
f.close()
return exif
def get_exif_seg(segments):
"""Returns Exif from JPEG meta data list
"""
for seg in segments:
if seg[0:2] == b"\xff\xe1" and seg[4:10] == b"Exif\x00\x00":
return seg
return None
def merge_segments(segments, exif=b""):
"""Merges Exif with APP0 and APP1 manipulations.
"""
if segments[1][0:2] == b"\xff\xe0" and \
segments[2][0:2] == b"\xff\xe1" and \
segments[2][4:10] == b"Exif\x00\x00":
if exif:
segments[2] = exif
segments.pop(1)
elif exif is None:
segments.pop(2)
else:
segments.pop(1)
elif segments[1][0:2] == b"\xff\xe0":
if exif:
segments[1] = exif
elif segments[1][0:2] == b"\xff\xe1" and \
segments[1][4:10] == b"Exif\x00\x00":
if exif:
segments[1] = exif
elif exif is None:
segments.pop(1)
else:
if exif:
segments.insert(1, exif)
return b"".join(segments)
| 27.021053
| 69
| 0.523568
|
5bbaaef7644c49cab0114822fa0e477de955e618
| 564
|
py
|
Python
|
applications/tenant/managers.py
|
dev-easyshares/mighty
|
a6cf473fb8cfbf5b92db68c7b068fc8ae2911b8b
|
[
"MIT"
] | null | null | null |
applications/tenant/managers.py
|
dev-easyshares/mighty
|
a6cf473fb8cfbf5b92db68c7b068fc8ae2911b8b
|
[
"MIT"
] | 1
|
2022-03-12T00:57:37.000Z
|
2022-03-12T00:57:37.000Z
|
applications/tenant/managers.py
|
dev-easyshares/mighty
|
a6cf473fb8cfbf5b92db68c7b068fc8ae2911b8b
|
[
"MIT"
] | null | null | null |
from django.db import models
class RoleManager(models.Manager.from_queryset(models.QuerySet)):
def get_queryset(self):
return super().get_queryset()\
.annotate(sql_count=models.Count('roles_tenant'))
Selected_related = ('group', 'user', 'invitation')
Prefetch_related = ('roles',)
class TenantManager(models.Manager.from_queryset(models.QuerySet)):
def get_queryset(self):
return super().get_queryset()\
.select_related(*Selected_related)\
.prefetch_related(*Prefetch_related)\
.annotate()
| 35.25
| 67
| 0.687943
|
37a7f7897f0812ad348057e44847ce1be01b61f1
| 3,039
|
py
|
Python
|
benchmark_ofa_stereo.py
|
blackjack2015/once-for-all
|
f87e61ef7ad94e39bae01438b90d5d3ad43986c1
|
[
"Apache-2.0"
] | null | null | null |
benchmark_ofa_stereo.py
|
blackjack2015/once-for-all
|
f87e61ef7ad94e39bae01438b90d5d3ad43986c1
|
[
"Apache-2.0"
] | null | null | null |
benchmark_ofa_stereo.py
|
blackjack2015/once-for-all
|
f87e61ef7ad94e39bae01438b90d5d3ad43986c1
|
[
"Apache-2.0"
] | null | null | null |
# Once for All: Train One Network and Specialize it for Efficient Deployment
# Han Cai, Chuang Gan, Tianzhe Wang, Zhekai Zhang, Song Han
# International Conference on Learning Representations (ICLR), 2020.
import os
import torch
import argparse
from ofa.stereo_matching.data_providers.stereo import StereoDataProvider
from ofa.stereo_matching.run_manager import StereoRunConfig, RunManager
from ofa.stereo_matching.elastic_nn.networks.ofa_aanet import OFAAANet
from ofa.stereo_matching.elastic_nn.training.progressive_shrinking import load_models
import numpy as np
from ofa.utils.pytorch_utils import get_net_info
parser = argparse.ArgumentParser()
parser.add_argument(
'-g',
'--gpu',
help='The gpu(s) to use',
type=str,
default='0')
parser.add_argument(
'-n',
'--net',
metavar='OFAAANet',
default='ofa_aanet',
choices=['ofa_aanet_d234_e346_k357_w1.0',
'ofa_aanet'],
help='OFA AANet networks')
args = parser.parse_args()
if args.gpu == 'all':
device_list = range(torch.cuda.device_count())
args.gpu = ','.join(str(_) for _ in device_list)
else:
device_list = [int(_) for _ in args.gpu.split(',')]
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
ofa_network = OFAAANet(ks_list=[3,5,7], expand_ratio_list=[2,4,6,8], depth_list=[2,3,4], scale_list=[2,3,4])
model_file = 'ofa_stereo_checkpoints/ofa_stereo_D234_E2468_K357_S4'
init = torch.load(model_file, map_location='cpu')
model_dict = init['state_dict']
ofa_network.load_state_dict(model_dict)
""" Randomly sample a sub-network,
you can also manually set the sub-network using:
ofa_network.set_active_subnet(ks=7, e=6, d=4)
"""
#ofa_network.sample_active_subnet()
#ofa_network.set_max_net()
d = 4
e = 8
ks = 7
s = 4
ofa_network.set_active_subnet(ks=ks, d=d, e=e, s=s)
subnet = ofa_network.get_active_subnet(preserve_weight=True)
#subnet = ofa_network
save_path = "ofa_stereo_checkpoints/aanet_D%d_E%d_K%d_S%d" % (d, e, ks, s)
torch.save(subnet.state_dict(), save_path)
net = subnet
net.eval()
net = net.cuda()
#net = net.get_tensorrt_model()
#torch.save(net.state_dict(), 'models/mobilefadnet_trt.pth')
get_net_info(net, input_shape=(3, 540, 960))
# fake input data
dummy_left = torch.randn(1, 3, 576, 960, dtype=torch.float).cuda()
dummy_right = torch.randn(1, 3, 576, 960, dtype=torch.float).cuda()
# INIT LOGGERS
starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
repetitions = 30
timings=np.zeros((repetitions,1))
#GPU-WARM-UP
for _ in range(10):
_ = net(dummy_left, dummy_right)
# MEASURE PERFORMANCE
with torch.no_grad():
for rep in range(-3, repetitions):
starter.record()
_ = net(dummy_left, dummy_right)
ender.record()
# WAIT FOR GPU SYNC
torch.cuda.synchronize()
if rep >= 0:
curr_time = starter.elapsed_time(ender)
timings[rep] = curr_time
print(rep, curr_time)
mean_syn = np.sum(timings) / repetitions
std_syn = np.std(timings)
print(mean_syn)
| 30.69697
| 108
| 0.716025
|
e56c1fa82d45e61d9ce12da1bd0b9f59f27acbca
| 10,453
|
py
|
Python
|
tensorboard/plugins/mesh/mesh_plugin_test.py
|
BearerPipelineTest/tensorboard
|
0fa03a9a8309dc137a15645c931e8b625bc3869c
|
[
"Apache-2.0"
] | null | null | null |
tensorboard/plugins/mesh/mesh_plugin_test.py
|
BearerPipelineTest/tensorboard
|
0fa03a9a8309dc137a15645c931e8b625bc3869c
|
[
"Apache-2.0"
] | null | null | null |
tensorboard/plugins/mesh/mesh_plugin_test.py
|
BearerPipelineTest/tensorboard
|
0fa03a9a8309dc137a15645c931e8b625bc3869c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the Tensorboard mesh plugin."""
import collections.abc
import os
import shutil
import numpy as np
import tensorflow as tf
import time
from unittest import mock
from werkzeug import test as werkzeug_test
from werkzeug import wrappers
from tensorboard.backend import application
from tensorboard.backend.event_processing import data_provider
from tensorboard.backend.event_processing import (
plugin_event_multiplexer as event_multiplexer,
)
from tensorboard.plugins import base_plugin
from tensorboard.plugins.mesh import mesh_plugin
from tensorboard.plugins.mesh import summary
from tensorboard.plugins.mesh import plugin_data_pb2
from tensorboard.plugins.mesh import test_utils
from tensorboard.util import test_util as tensorboard_test_util
class MeshPluginTest(tf.test.TestCase):
"""Tests for mesh plugin server."""
def setUp(self):
# We use numpy.random to generate meshes. We seed to avoid non-determinism
# in this test.
np.random.seed(17)
# Log dir to save temp events into.
self.log_dir = self.get_temp_dir()
# Create mesh summary.
with tf.compat.v1.Graph().as_default():
tf_placeholder = tf.compat.v1.placeholder
sess = tf.compat.v1.Session()
point_cloud = test_utils.get_random_mesh(1000)
point_cloud_vertices = tf_placeholder(
tf.float32, point_cloud.vertices.shape
)
mesh_no_color = test_utils.get_random_mesh(2000, add_faces=True)
mesh_no_color_extended = test_utils.get_random_mesh(
2500, add_faces=True
)
mesh_no_color_vertices = tf_placeholder(tf.float32, [1, None, 3])
mesh_no_color_faces = tf_placeholder(tf.int32, [1, None, 3])
mesh_color = test_utils.get_random_mesh(
3000, add_faces=True, add_colors=True
)
mesh_color_vertices = tf_placeholder(
tf.float32, mesh_color.vertices.shape
)
mesh_color_faces = tf_placeholder(tf.int32, mesh_color.faces.shape)
mesh_color_colors = tf_placeholder(
tf.uint8, mesh_color.colors.shape
)
self.data = [
point_cloud,
mesh_no_color,
mesh_no_color_extended,
mesh_color,
]
# In case when name is present and display_name is not, we will reuse name
# as display_name. Summaries below intended to test both cases.
self.names = ["point_cloud", "mesh_no_color", "mesh_color"]
summary.op(
self.names[0],
point_cloud_vertices,
description="just point cloud",
)
summary.op(
self.names[1],
mesh_no_color_vertices,
faces=mesh_no_color_faces,
display_name="name_to_display_in_ui",
description="beautiful mesh in grayscale",
)
summary.op(
self.names[2],
mesh_color_vertices,
faces=mesh_color_faces,
colors=mesh_color_colors,
description="mesh with random colors",
)
merged_summary_op = tf.compat.v1.summary.merge_all()
self.runs = ["bar"]
self.steps = 20
bar_directory = os.path.join(self.log_dir, self.runs[0])
with tensorboard_test_util.FileWriterCache.get(
bar_directory
) as writer:
writer.add_graph(sess.graph)
for step in range(self.steps):
# Alternate between two random meshes with different number of
# vertices.
no_color = (
mesh_no_color
if step % 2 == 0
else mesh_no_color_extended
)
with mock.patch.object(time, "time", return_value=step):
writer.add_summary(
sess.run(
merged_summary_op,
feed_dict={
point_cloud_vertices: point_cloud.vertices,
mesh_no_color_vertices: no_color.vertices,
mesh_no_color_faces: no_color.faces,
mesh_color_vertices: mesh_color.vertices,
mesh_color_faces: mesh_color.faces,
mesh_color_colors: mesh_color.colors,
},
),
global_step=step,
)
# Start a server that will receive requests.
multiplexer = event_multiplexer.EventMultiplexer(
{
"bar": bar_directory,
}
)
provider = data_provider.MultiplexerDataProvider(
multiplexer, self.log_dir
)
self.context = base_plugin.TBContext(
logdir=self.log_dir, data_provider=provider
)
self.plugin = mesh_plugin.MeshPlugin(self.context)
# Wait until after plugin construction to reload the multiplexer because the
# plugin caches data from the multiplexer upon construction and this affects
# logic tested later down.
# TODO(https://github.com/tensorflow/tensorboard/issues/2579): Eliminate the
# caching of data at construction time and move this Reload() up to just
# after the multiplexer is created.
multiplexer.Reload()
wsgi_app = application.TensorBoardWSGI([self.plugin])
self.server = werkzeug_test.Client(wsgi_app, wrappers.Response)
self.routes = self.plugin.get_plugin_apps()
def tearDown(self):
shutil.rmtree(self.log_dir, ignore_errors=True)
def testRoutes(self):
"""Tests that the /tags route offers the correct run to tag mapping."""
self.assertIsInstance(self.routes["/tags"], collections.abc.Callable)
self.assertIsInstance(self.routes["/meshes"], collections.abc.Callable)
self.assertIsInstance(self.routes["/data"], collections.abc.Callable)
def testTagsRoute(self):
"""Tests that the /tags route offers the correct run to tag mapping."""
response = self.server.get("/data/plugin/mesh/tags")
self.assertEqual(200, response.status_code)
tags = test_utils.deserialize_json_response(response.get_data())
self.assertIn(self.runs[0], tags)
for name in self.names:
self.assertIn(name, tags[self.runs[0]])
def validate_data_response(
self, run, tag, sample, content_type, dtype, ground_truth_data, step=0
):
"""Makes request and checks that response has expected data."""
response = self.server.get(
"/data/plugin/mesh/data?run=%s&tag=%s&sample=%d&content_type="
"%s&step=%d" % (run, tag, sample, content_type, step)
)
self.assertEqual(200, response.status_code)
data = test_utils.deserialize_array_buffer_response(
next(response.response), dtype
)
self.assertEqual(ground_truth_data.reshape(-1).tolist(), data.tolist())
def testDataRoute(self):
"""Tests that the /data route returns correct data for meshes."""
self.validate_data_response(
self.runs[0],
self.names[0],
0,
"VERTEX",
np.float32,
self.data[0].vertices,
)
self.validate_data_response(
self.runs[0], self.names[1], 0, "FACE", np.int32, self.data[1].faces
)
# Validate that the same summary has mesh with different number of faces at
# different step=1.
self.validate_data_response(
self.runs[0],
self.names[1],
0,
"FACE",
np.int32,
self.data[2].faces,
step=1,
)
self.validate_data_response(
self.runs[0],
self.names[2],
0,
"COLOR",
np.uint8,
self.data[3].colors,
)
def testMetadataRoute(self):
"""Tests that the /meshes route returns correct metadata for meshes."""
response = self.server.get(
"/data/plugin/mesh/meshes?run=%s&tag=%s&sample=%d"
% (self.runs[0], self.names[0], 0)
)
self.assertEqual(200, response.status_code)
metadata = test_utils.deserialize_json_response(response.get_data())
self.assertEqual(len(metadata), self.steps)
self.assertAllEqual(
metadata[0]["content_type"], plugin_data_pb2.MeshPluginData.VERTEX
)
self.assertAllEqual(
metadata[0]["data_shape"], self.data[0].vertices.shape
)
def testsEventsAlwaysSortedByStep(self):
"""Tests that events always sorted by step."""
response = self.server.get(
"/data/plugin/mesh/meshes?run=%s&tag=%s&sample=%d"
% (self.runs[0], self.names[1], 0)
)
self.assertEqual(200, response.status_code)
metadata = test_utils.deserialize_json_response(response.get_data())
for i in range(1, self.steps):
# Step will be equal when two tensors of different content type
# belong to the same mesh.
self.assertLessEqual(metadata[i - 1]["step"], metadata[i]["step"])
def testIsActive(self):
self.assertFalse(self.plugin.is_active())
if __name__ == "__main__":
tf.test.main()
| 39.149813
| 86
| 0.589687
|
916e021584a861ccf9813ff26a1189894bb98ed2
| 2,309
|
py
|
Python
|
{{cookiecutter.git_project_name}}/tests/test_{{cookiecutter.project_slug}}.py
|
imAsparky/cookiecutter-py3-package
|
e007cac29c211bb6a931f2cebebab809e401f5d1
|
[
"BSD-3-Clause"
] | null | null | null |
{{cookiecutter.git_project_name}}/tests/test_{{cookiecutter.project_slug}}.py
|
imAsparky/cookiecutter-py3-package
|
e007cac29c211bb6a931f2cebebab809e401f5d1
|
[
"BSD-3-Clause"
] | 116
|
2021-08-23T19:27:19.000Z
|
2022-01-15T02:38:12.000Z
|
{{cookiecutter.git_project_name}}/tests/test_{{cookiecutter.project_slug}}.py
|
imAsparky/cookiecutter-py3-package
|
e007cac29c211bb6a931f2cebebab809e401f5d1
|
[
"BSD-3-Clause"
] | 1
|
2021-08-31T20:44:09.000Z
|
2021-08-31T20:44:09.000Z
|
#!/usr/bin/env python
"""Tests for `{{ cookiecutter.git_project_name }}` package."""
import unittest
import pytest
{%- if cookiecutter.command_line_interface | lower == 'click' %}
from click.testing import CliRunner
{%- endif %}
from {{cookiecutter.project_slug}} import {{cookiecutter.project_slug}}
{%- if cookiecutter.command_line_interface | lower == 'click' %}
from {{cookiecutter.project_slug}} import cli
{%- endif %}
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
assert 000 == 000
assert 111 == 111
{%- if cookiecutter.command_line_interface | lower == 'click' %}
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert '{{ cookiecutter.project_slug }}.cli.main' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
{%- endif %}
class Test{{cookiecutter.project_slug | title}}(unittest.TestCase):
"""Tests for `{{ cookiecutter.project_slug }}` package."""
def setUp(self):
"""Set up test fixtures, if any."""
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_000_something(self):
"""A fake test as example."""
assert "aaa" == "aaa"
assert "aaa" != "bbb"
{%- if cookiecutter.command_line_interface | lower == 'click' %}
def test_command_line_interface(self):
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert '{{ cookiecutter.project_slug }}.cli.main' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
{%- endif %}
| 28.506173
| 78
| 0.660026
|
a66674ae813cf75ef50f250d5624e578bbb47d3a
| 4,813
|
py
|
Python
|
loaders/doc3dwc_loader.py
|
KongBOy/analyze_11_code
|
1baef56ba0da74b67e653bbea013e3843034c3f0
|
[
"MIT"
] | null | null | null |
loaders/doc3dwc_loader.py
|
KongBOy/analyze_11_code
|
1baef56ba0da74b67e653bbea013e3843034c3f0
|
[
"MIT"
] | null | null | null |
loaders/doc3dwc_loader.py
|
KongBOy/analyze_11_code
|
1baef56ba0da74b67e653bbea013e3843034c3f0
|
[
"MIT"
] | null | null | null |
import os
from os.path import join as pjoin
import collections
import json
import torch
import numpy as np
import scipy.misc as m
import scipy.io as io
import matplotlib.pyplot as plt
import glob
import cv2
import random
from tqdm import tqdm
from torch.utils import data
from loaders.augmentationsk import data_aug, tight_crop
class doc3dwcLoader(data.Dataset):
"""
Loader for world coordinate regression and RGB images
"""
def __init__(self, root,
split='train',
is_transform=False,
img_size=512,
augmentations=None):
self.root = os.path.expanduser(root)
self.split = split
self.is_transform = is_transform
self.augmentations = augmentations
self.n_classes = 3
self.files = collections.defaultdict(list)
self.img_size = img_size if isinstance(img_size, tuple) else (img_size, img_size)
for split in ['train', 'val']:
path = pjoin(self.root, split + '.txt')
file_list = tuple(open(path, 'r'))
file_list = [id_.rstrip() for id_ in file_list]
self.files[split] = file_list
# self.setup_annotations()
if self.augmentations:
self.txpths = []
with open(os.path.join(self.root[:-7], 'augtexnames.txt'), 'r') as f:
for line in f:
txpth = line.strip()
self.txpths.append(txpth)
def __len__(self):
return len(self.files[self.split])
def __getitem__(self, index):
im_name = self.files[self.split][index] # 1/824_8-cp_Page_0503-7Nw0001
im_path = pjoin(self.root, 'img', im_name + '.png')
lbl_path = pjoin(self.root, 'wc' , im_name + '.exr')
im = m.imread(im_path, mode='RGB')
im = np.array(im, dtype=np.uint8)
lbl = cv2.imread(lbl_path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
lbl = np.array(lbl, dtype=np.float)
if 'val' in self.split:
im, lbl = tight_crop(im / 255.0, lbl)
if self.augmentations: # this is for training, default false for validation\
tex_id = random.randint(0, len(self.txpths) - 1)
txpth = self.txpths[tex_id]
tex = cv2.imread(os.path.join(self.root[:-7], txpth)).astype(np.uint8)
bg = cv2.resize(tex, self.img_size, interpolation=cv2.INTER_NEAREST)
im, lbl = data_aug(im, lbl, bg)
if self.is_transform:
im, lbl = self.transform(im, lbl)
return im, lbl
def transform(self, img, lbl):
img = m.imresize(img, self.img_size) # uint8 with RGB mode ### 這一行會自動把值又弄回 0~255 喔! https://docs.scipy.org/doc/scipy-1.2.1/reference/generated/scipy.misc.imresize.html#scipy.misc.imresize
if img.shape[-1] == 4:
img = img[:, :, :3] # Discard the alpha channel
img = img[:, :, ::-1] # RGB -> BGR
# plt.imshow(img)
# plt.show()
img = img.astype(float) / 255.0 ### 因為 m.imresize 又會把值弄回 0~255 ,所以這邊要在除一次
img = img.transpose(2, 0, 1) # NHWC -> NCHW
lbl = lbl.astype(float)
# normalize label
msk = ((lbl[:, :, 0] != 0) & (lbl[:, :, 1] != 0) & (lbl[:, :, 2] != 0)).astype(np.uint8) * 255
xmx, xmn, ymx, ymn, zmx, zmn = 1.2539363, -1.2442188, 1.2396319, -1.2289206, 0.6436657, -0.67492497 # calculate from all the wcs
lbl[:, :, 0] = (lbl[:, :, 0] - zmn) / (zmx - zmn)
lbl[:, :, 1] = (lbl[:, :, 1] - ymn) / (ymx - ymn)
lbl[:, :, 2] = (lbl[:, :, 2] - xmn) / (xmx - xmn)
lbl = cv2.bitwise_and(lbl, lbl, mask=msk)
lbl = cv2.resize(lbl, self.img_size, interpolation=cv2.INTER_NEAREST)
lbl = lbl.transpose(2, 0, 1) # NHWC -> NCHW
lbl = np.array(lbl, dtype=np.float)
# to torch
img = torch.from_numpy(img).float()
lbl = torch.from_numpy(lbl).float()
return img, lbl
# # Leave code for debugging purposes
# if __name__ == '__main__':
# local_path = './data/DewarpNet/doc3d/'
# bs = 4
# dst = doc3dwcLoader(root=local_path, split='trainswat3dmini', is_transform=True, augmentations=True)
# trainloader = data.DataLoader(dst, batch_size=bs)
# for i, data in enumerate(trainloader):
# imgs, labels = data
# imgs = imgs.numpy()
# lbls = labels.numpy()
# imgs = np.transpose(imgs, [0,2,3,1])
# lbls = np.transpose(lbls, [0,2,3,1])
# f, axarr = plt.subplots(bs, 2)
# for j in range(bs):
# # print imgs[j].shape
# axarr[j][0].imshow(imgs[j])
# axarr[j][1].imshow(lbls[j])
# plt.show()
# a = raw_input()
# if a == 'ex':
# break
# else:
# plt.close()
| 38.198413
| 196
| 0.563682
|
ea0c335504288728aa28eee0dd2c154190037174
| 157
|
py
|
Python
|
plugins/sort_by_article_count/count.py
|
julianespinel/website
|
a16394cf878fdfd8e1fa86ed23779d31ec4f1316
|
[
"MIT"
] | null | null | null |
plugins/sort_by_article_count/count.py
|
julianespinel/website
|
a16394cf878fdfd8e1fa86ed23779d31ec4f1316
|
[
"MIT"
] | 9
|
2020-08-30T19:52:19.000Z
|
2021-10-03T16:55:45.000Z
|
plugins/sort_by_article_count/count.py
|
julianespinel/website
|
a16394cf878fdfd8e1fa86ed23779d31ec4f1316
|
[
"MIT"
] | null | null | null |
def sort_by_article_count(tags):
"""Return a number articles with the given tag."""
return sorted(tags, key=lambda tags: len(tags[1]), reverse=True)
| 39.25
| 68
| 0.713376
|
b37bf8f613d40848aa52a456f274c1f4c55c29dc
| 2,361
|
py
|
Python
|
Acceleration/memcached/regressionSims/testgen/memtest_manysets.py
|
pooyaww/Vivado_HLS_Samples
|
6dc48bded1fc577c99404fc99c5089ae7279189a
|
[
"BSD-3-Clause"
] | 326
|
2016-07-06T01:50:43.000Z
|
2022-03-31T21:50:19.000Z
|
Acceleration/memcached/regressionSims/testgen/memtest_manysets.py
|
asicguy/HLx_Examples
|
249406bf7718c33d10a837ddd2ee71a683d481e8
|
[
"BSD-3-Clause"
] | 10
|
2017-04-05T16:02:19.000Z
|
2021-06-09T14:26:40.000Z
|
Acceleration/memcached/regressionSims/testgen/memtest_manysets.py
|
asicguy/HLx_Examples
|
249406bf7718c33d10a837ddd2ee71a683d481e8
|
[
"BSD-3-Clause"
] | 192
|
2016-08-31T09:15:18.000Z
|
2022-03-01T11:28:12.000Z
|
#!/usr/bin/python
import memlib
keySizes = range(1,28)
valueSizes = keySizes[:]
keyChars = map(chr, range(97, 126))
valueChars1 = map(chr, range(65, 94))
valueChars2 = valueChars1[1:] + valueChars1[:1]
valueChars3 = valueChars1[2:] + valueChars1[:2]
valueChars4 = valueChars1[3:] + valueChars1[:3]
valueChars5 = valueChars1[4:] + valueChars1[:4]
valueChars6 = valueChars1[5:] + valueChars1[:5]
valueChars7 = valueChars1[6:] + valueChars1[:6]
valueChars8 = valueChars1[7:] + valueChars1[:7]
valueChars9 = valueChars1[8:] + valueChars1[:8]
valueChars10 = valueChars1[9:] + valueChars1[:9]
valueChars11 = valueChars1[10:] + valueChars1[:10]
valueChars12 = valueChars1[11:] + valueChars1[:11]
valueChars13 = valueChars1[12:] + valueChars1[:12]
valueChars14 = valueChars1[13:] + valueChars1[:13]
valueChars15 = valueChars1[14:] + valueChars1[:14]
keyPairs = zip(keySizes, keyChars)
keys = map(lambda (size, char): char * size, keyPairs)
def get_pairs(keys, value_chars):
value_pairs = zip(valueSizes, value_chars)
values_use = map(lambda (size, char): char * size, value_pairs)
pairs = []
for i in range(0,27):
pairs.append( memlib.kv_pair(keys[i], values_use[i], "08080808", 42) )
return pairs
def do_sets(keys, value_chars, testset):
pairs = get_pairs(keys, value_chars)
for p in pairs:
memlib.setSuccess(p, testset)
def do_getsSuccess(keys, value_chars, testset):
pairs = get_pairs(keys, value_chars)
for p in pairs:
memlib.getSuccess(p, testset)
def do_getsFail(keys, value_chars, testset):
pairs = get_pairs(keys, value_chars)
for p in pairs:
memlib.getFail(p, testset)
testset = memlib.newTestset()
do_sets(keys, valueChars1, testset)
do_sets(keys, valueChars2, testset)
do_sets(keys, valueChars3, testset)
do_sets(keys, valueChars4, testset)
do_sets(keys, valueChars5, testset)
do_sets(keys, valueChars6, testset)
do_sets(keys, valueChars7, testset)
do_sets(keys, valueChars8, testset)
do_sets(keys, valueChars9, testset)
do_sets(keys, valueChars10, testset)
do_sets(keys, valueChars11, testset)
do_sets(keys, valueChars12, testset)
do_sets(keys, valueChars13, testset)
do_sets(keys, valueChars14, testset)
do_sets(keys, valueChars15, testset)
do_getsSuccess(keys, valueChars15, testset)
memlib.flush(testset)
do_getsFail(keys, valueChars15, testset)
memlib.generate("MANYSETS", testset)
memlib.generate_hls("MANYSETS", testset)
| 31.905405
| 72
| 0.752647
|
ec8df2b48734d1aeaaff458650b3d5c47692eb41
| 567
|
py
|
Python
|
hello.py
|
KianaLiu1/cgi-lab
|
0685390ef1319ec6608821c0a78d62ac6d5d9e04
|
[
"Apache-2.0"
] | null | null | null |
hello.py
|
KianaLiu1/cgi-lab
|
0685390ef1319ec6608821c0a78d62ac6d5d9e04
|
[
"Apache-2.0"
] | null | null | null |
hello.py
|
KianaLiu1/cgi-lab
|
0685390ef1319ec6608821c0a78d62ac6d5d9e04
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import os, json
#print all env variables as plain text
# print("Content-Type: text/plain")
# print()
# print(os.environ)
#print env variables as json
# print("Content-Type: application/json")
# print()
# print(json.dumps(dict(os.environ), indent = 2))
#print query parameter data in html
# print("Content-Type: text/html")
# print()
# print(f"<p>QUERY_STRING={os.environ['QUERY_STRING']}</p>")
#print user browser information in html
print("Content-Type: text/html")
print()
print(f"<p>HTTP_USER_AGENT={os.environ['HTTP_USER_AGENT']}</p>")
| 24.652174
| 64
| 0.712522
|
0a468b88c7630c3d697b10c7c91849e6f7c914a2
| 2,471
|
py
|
Python
|
test/IECore/ops/maths/multiply/multiply-2.py
|
bradleyhenke/cortex
|
f8245cc6c9464b1de9e6c6e57068248198e63de0
|
[
"BSD-3-Clause"
] | 386
|
2015-01-02T11:10:43.000Z
|
2022-03-10T15:12:20.000Z
|
test/IECore/ops/maths/multiply/multiply-2.py
|
bradleyhenke/cortex
|
f8245cc6c9464b1de9e6c6e57068248198e63de0
|
[
"BSD-3-Clause"
] | 484
|
2015-01-09T18:28:06.000Z
|
2022-03-31T16:02:04.000Z
|
test/IECore/ops/maths/multiply/multiply-2.py
|
bradleyhenke/cortex
|
f8245cc6c9464b1de9e6c6e57068248198e63de0
|
[
"BSD-3-Clause"
] | 99
|
2015-01-28T23:18:04.000Z
|
2022-03-27T00:59:39.000Z
|
##########################################################################
#
# Copyright (c) 2007-2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from IECore import *
class multiply( Op ) :
def __init__( self ) :
Op.__init__( self,
"multiplies two numbers together - in a new and improved version 2!!.",
IntParameter(
name = "result",
description = "a multiplied by b",
defaultValue = 2,
)
)
self.parameters().addParameter(
IntParameter(
name = "a",
description = "first operand",
defaultValue = 1,
)
)
self.parameters().addParameter(
IntParameter(
name = "b",
description = "second operand",
defaultValue = 2,
)
)
def doOperation( self, operands ) :
return IntData( operands["a"].value * operands["b"].value )
registerRunTimeTyped( multiply )
| 33.391892
| 76
| 0.674221
|
ace92815d8d90993c7378b71ba05c9e46c70b570
| 9,499
|
py
|
Python
|
oauth_provider/views.py
|
ovidioreyna/django-oauth-plus
|
b9b64a3ac24fd11f471763c88462bbf3c53e46e6
|
[
"BSD-3-Clause"
] | null | null | null |
oauth_provider/views.py
|
ovidioreyna/django-oauth-plus
|
b9b64a3ac24fd11f471763c88462bbf3c53e46e6
|
[
"BSD-3-Clause"
] | 4
|
2018-01-11T20:59:12.000Z
|
2020-05-12T12:48:53.000Z
|
oauth_provider/views.py
|
ovidioreyna/django-oauth-plus
|
b9b64a3ac24fd11f471763c88462bbf3c53e46e6
|
[
"BSD-3-Clause"
] | 3
|
2017-12-18T20:01:36.000Z
|
2018-12-17T05:35:53.000Z
|
from __future__ import absolute_import
import oauth2 as oauth
from django.conf import settings
from django.contrib.auth import authenticate
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import get_callable
from django.http import HttpResponse, HttpResponseBadRequest
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import csrf_exempt
from six.moves.urllib.parse import urlencode
from oauth_provider.compat import UnsafeRedirect
from .consts import OUT_OF_BAND
from .decorators import oauth_required
from .forms import AuthorizeRequestTokenForm
from .responses import (COULD_NOT_VERIFY_OAUTH_REQUEST_RESPONSE,
INVALID_CONSUMER_RESPONSE,
INVALID_PARAMS_RESPONSE)
from .store import InvalidConsumerError, InvalidTokenError, store
from .utils import (get_oauth_request,
is_xauth_request,
require_params,
send_oauth_error,
verify_oauth_request)
OAUTH_AUTHORIZE_VIEW = 'OAUTH_AUTHORIZE_VIEW'
OAUTH_CALLBACK_VIEW = 'OAUTH_CALLBACK_VIEW'
UNSAFE_REDIRECTS = getattr(settings, "OAUTH_UNSAFE_REDIRECTS", False)
@csrf_exempt
def request_token(request):
oauth_request = get_oauth_request(request)
if oauth_request is None:
return INVALID_PARAMS_RESPONSE
missing_params = require_params(oauth_request, ('oauth_callback',))
if missing_params is not None:
return missing_params
if is_xauth_request(oauth_request):
return HttpResponseBadRequest('xAuth not allowed for this method.')
try:
consumer = store.get_consumer(request, oauth_request, oauth_request['oauth_consumer_key'])
except InvalidConsumerError:
return INVALID_CONSUMER_RESPONSE
if not verify_oauth_request(request, oauth_request, consumer):
return COULD_NOT_VERIFY_OAUTH_REQUEST_RESPONSE
try:
request_token = store.create_request_token(request, oauth_request, consumer, oauth_request['oauth_callback'])
except oauth.Error as err:
return send_oauth_error(err)
ret = urlencode({
'oauth_token': request_token.key,
'oauth_token_secret': request_token.secret,
'oauth_callback_confirmed': 'true'
})
return HttpResponse(ret, content_type='application/x-www-form-urlencoded')
@login_required
def user_authorization(request, form_class=AuthorizeRequestTokenForm):
oauth_token = request.POST.get('oauth_token', request.GET.get('oauth_token'))
if not oauth_token:
return HttpResponseBadRequest('No request token specified.')
oauth_request = get_oauth_request(request)
try:
request_token = store.get_request_token(request, oauth_request, oauth_token)
except InvalidTokenError:
return HttpResponseBadRequest('Invalid request token.')
consumer = store.get_consumer_for_request_token(request, oauth_request, request_token)
if request.method == 'POST':
form = form_class(request.POST)
if request.session.get('oauth', '') == request_token.key and form.is_valid():
request.session['oauth'] = ''
if form.cleaned_data['authorize_access']:
request_token = store.authorize_request_token(request, oauth_request, request_token)
args = {'oauth_token': request_token.key}
else:
args = {'error': _('Access not granted by user.')}
if request_token.callback is not None and request_token.callback != OUT_OF_BAND:
callback_url = request_token.get_callback_url(args)
if UNSAFE_REDIRECTS:
response = UnsafeRedirect(callback_url)
else:
response = HttpResponseRedirect(callback_url)
else:
# try to get custom callback view
callback_view_str = getattr(settings, OAUTH_CALLBACK_VIEW,
'oauth_provider.views.fake_callback_view')
try:
view_callable = get_callable(callback_view_str)
except AttributeError:
raise Exception("%s view doesn't exist." % callback_view_str)
# try to treat it as Class Based View (CBV)
try:
callback_view = view_callable.as_view()
except AttributeError:
# if it appears not to be CBV treat it like FBV
callback_view = view_callable
response = callback_view(request, **args)
else:
response = send_oauth_error(oauth.Error(_('Action not allowed.')))
else:
# try to get custom authorize view
authorize_view_str = getattr(settings, OAUTH_AUTHORIZE_VIEW,
'oauth_provider.views.fake_authorize_view')
try:
view_callable = get_callable(authorize_view_str)
except AttributeError:
raise Exception("%s view doesn't exist." % authorize_view_str)
# try to treat it as Class Based View (CBV)
try:
authorize_view = view_callable.as_view()
except AttributeError:
# if it appears not to be CBV treat it like FBV
authorize_view = view_callable
params = oauth_request.get_normalized_parameters()
# set the oauth flag
request.session['oauth'] = request_token.key
response = authorize_view(request, request_token, request_token.get_callback_url(), params)
return response
@csrf_exempt
def access_token(request):
oauth_request = get_oauth_request(request)
if oauth_request is None:
return INVALID_PARAMS_RESPONSE
# Consumer
try:
consumer = store.get_consumer(request, oauth_request, oauth_request['oauth_consumer_key'])
except InvalidConsumerError:
return HttpResponseBadRequest('Invalid consumer.')
is_xauth = is_xauth_request(oauth_request)
if not is_xauth:
# Check Parameters
missing_params = require_params(oauth_request, ('oauth_token', 'oauth_verifier'))
if missing_params is not None:
return missing_params
# Check Request Token
try:
request_token = store.get_request_token(request, oauth_request, oauth_request['oauth_token'])
except InvalidTokenError:
return HttpResponseBadRequest('Invalid request token.')
if not request_token.is_approved:
return HttpResponseBadRequest('Request Token not approved by the user.')
# Verify Signature
if not verify_oauth_request(request, oauth_request, consumer, request_token):
return HttpResponseBadRequest('Could not verify OAuth request.')
# Check Verifier
if oauth_request.get('oauth_verifier', None) != request_token.verifier:
return HttpResponseBadRequest('Invalid OAuth verifier.')
else: # xAuth
# Check Parameters
missing_params = require_params(oauth_request, ('x_auth_username', 'x_auth_password', 'x_auth_mode'))
if missing_params is not None:
return missing_params
# Check if Consumer allows xAuth
if not consumer.xauth_allowed:
return HttpResponseBadRequest('xAuth not allowed for this method')
# Check Signature
if not verify_oauth_request(request, oauth_request, consumer):
return HttpResponseBadRequest('Could not verify xAuth request.')
user = authenticate(
x_auth_username=oauth_request.get_parameter('x_auth_username'),
x_auth_password=oauth_request.get_parameter('x_auth_password'),
x_auth_mode=oauth_request.get_parameter('x_auth_mode')
)
if not user:
return HttpResponseBadRequest('xAuth username or password is not valid')
else:
request.user = user
# Handle Request Token
try:
# request_token = store.create_request_token(request, oauth_request, consumer, oauth_request.get('oauth_callback'))
request_token = store.create_request_token(request, oauth_request, consumer, OUT_OF_BAND)
request_token = store.authorize_request_token(request, oauth_request, request_token)
except oauth.Error as err:
return send_oauth_error(err)
access_token = store.create_access_token(request, oauth_request, consumer, request_token)
ret = urlencode({
'oauth_token': access_token.key,
'oauth_token_secret': access_token.secret
})
return HttpResponse(ret, content_type='application/x-www-form-urlencoded')
@oauth_required
def protected_resource_example(request):
"""
Test view for accessing a Protected Resource.
"""
return HttpResponse('Protected Resource access!')
@login_required
def fake_authorize_view(request, token, callback, params):
"""
Fake view for tests. It must return an ``HttpResponse``.
You need to define your own in ``settings.OAUTH_AUTHORIZE_VIEW``.
"""
return HttpResponse('Fake authorize view for %s with params: %s.' % (token.consumer.name, params))
def fake_callback_view(request, **args):
"""
Fake view for tests. It must return an ``HttpResponse``.
You can define your own in ``settings.OAUTH_CALLBACK_VIEW``.
"""
return HttpResponse('Fake callback view.')
| 38.148594
| 127
| 0.68123
|
266ccc96f4e6ddfd7d31958d65b31f655a3002b6
| 11,655
|
py
|
Python
|
fxparamest.py
|
henrikjuergens/guitar-fx-extraction
|
442adab577a090e27de12d779a6d8a0aa917fe1f
|
[
"MIT"
] | 2
|
2020-09-06T07:55:17.000Z
|
2020-09-19T21:19:01.000Z
|
fxparamest.py
|
henrikjuergens/guitar-fx-extraction
|
442adab577a090e27de12d779a6d8a0aa917fe1f
|
[
"MIT"
] | null | null | null |
fxparamest.py
|
henrikjuergens/guitar-fx-extraction
|
442adab577a090e27de12d779a6d8a0aa917fe1f
|
[
"MIT"
] | 1
|
2021-12-17T18:14:02.000Z
|
2021-12-17T18:14:02.000Z
|
"""Estimates the parameters of the used Audio Effect"""
import os
from pathlib import Path
import pickle
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import librosa
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
import joblib
from keras import models, layers, optimizers, utils
from wavtoarray import DATA_PATH
import plots
def get_dist_feat(y_cut, sr):
"""Extracts features for Distortion parameter estimation"""
v_features = []
mfcc = librosa.feature.mfcc(y=y_cut, sr=sr, n_mfcc=3)
mfcc_delta = librosa.feature.delta(mfcc)
m_features_mfcc = np.concatenate((mfcc, mfcc_delta))
for feat in m_features_mfcc:
lin_coeff, lin_residual, _, _, _ = np.polyfit(np.arange(len(feat)), feat, 1, full=True)
v_features.extend(lin_coeff)
# v_features.append(lin_residual)
return v_features
def get_trem_feat(y_cut, sr):
"""Extracts features for Tremolo parameter estimation"""
rms = librosa.feature.rms(S=librosa.core.stft(y_cut))
rms_delta = librosa.feature.delta(rms)
m_features_rms = np.concatenate((rms, rms_delta))
v_features = []
for feat in m_features_rms:
feat_cut = feat - np.average(feat)
# feat[5:round(0.66*len(feat))] # Cut the Fadeout at two thirds of the file
# plots.rms_lin_reg(feat_cut)
feat_windowed = feat_cut * np.hanning(len(feat_cut))
feat_int = np.pad(feat_windowed, (0, 1024 - len(feat_windowed) % 1024), 'constant')
rfft = np.fft.rfft(feat_int)
rfft_norm = np.abs(rfft) * 4 / 1024
# plots.rms_fft(rfft_norm)
rfft_max = np.max(rfft_norm)
rfft_max_ind = np.argmax(rfft_norm)
low_limit = rfft_max_ind - 32 if rfft_max_ind - 32 >= 0 else 0
high_limit = rfft_max_ind + 32 if rfft_max_ind + 32 <= len(rfft_norm) else len(rfft_norm)
rfft_norm[low_limit:high_limit] = np.zeros(high_limit - low_limit)
rfft_max2_ind = np.argmax(rfft_norm)
if rfft_max_ind < rfft_max2_ind:
v_features.extend([rfft_max, rfft_max_ind,
np.max(rfft_norm), rfft_max2_ind])
else:
v_features.extend([np.max(rfft_norm), rfft_max2_ind,
rfft_max, rfft_max_ind])
return v_features
def get_dly_feat(y_cut, sr, y):
"""Extracts features for Delay parameter estimation"""
# uncut_onset_strength = librosa.onset.onset_strength(y=y_cut, sr=sr)
onset_strength = librosa.onset.onset_strength(y=y_cut, sr=sr)
onset_strength = np.reshape(onset_strength, [1, len(onset_strength)])
v_features = []
dly_onsets = librosa.onset.onset_detect(y=y_cut, sr=sr, units='frames', backtrack=False)
dtype = [('onset_strength', float), ('onset', int)]
all_onsets_strength = [(onset_strength[0, onset], onset) for onset in dly_onsets]
all_onsets_strength_np = np.array(all_onsets_strength, dtype=dtype)
onsets_sorted = np.sort(all_onsets_strength_np, order='onset_strength')
strongest_onset = onsets_sorted[-1]
if len(onsets_sorted) > 1:
print('More than one onset candidate found')
strongest_onset_2 = onsets_sorted[-2]
else:
strongest_onset_2 = np.array((0, 0), dtype=dtype)
mfcc_delta = librosa.feature.delta(librosa.feature.mfcc(y_cut, sr=sr, n_mfcc=1)
)[:, strongest_onset['onset']-5:strongest_onset['onset']+3]
if len(onsets_sorted) > 1:
mfcc_delta_2 = librosa.feature.delta(librosa.feature.mfcc(y_cut, sr=sr, n_mfcc=1)
)[:, strongest_onset_2['onset']-5:strongest_onset_2['onset']+3]
else:
mfcc_delta_2 = np.zeros((1, 8))
mfcc_delta_sum = np.sum(mfcc_delta, axis=1)
mfcc_delta_sum_2 = np.sum(mfcc_delta_2, axis=1)
rms = librosa.amplitude_to_db(librosa.feature.rms(y_cut)).T
v_features.extend([mfcc_delta_sum, strongest_onset['onset'], rms[strongest_onset['onset']],
mfcc_delta_sum_2, strongest_onset_2['onset'], rms[strongest_onset_2['onset']]])
# plots.onsets_and_strength(all_onsets_strength, onsets_sorted, dly_onsets, strongest_onset,
# strongest_onset_2, y_cut, onset_strength)
return v_features
def read_data(path_folder):
"""Reads sample data from files and extracts features"""
os.chdir(DATA_PATH)
sample_paths = ['Gitarre monophon/Samples/NoFX', 'Gitarre polyphon/Samples/NoFX']
train_data = []
train_labels = []
for path in sample_paths:
sample_path = os.path.join(path_folder, path)
os.chdir(sample_path)
for file_name in os.listdir(os.getcwd()):
if file_name.endswith(".wav"):
print(file_name)
os.chdir(Path('../../Labels'))
# Label names are: Edge, Gain, Tone
label_file = file_name[:-4] + '.pickle'
# label = [0.0, 0.0, 0.0]
with open(label_file, 'rb') as handle:
label = pickle.load(handle)
print(label)
if path_folder == 'DlyRandomSamples': # Fix limited delay plugin range
label[0] = label[0]*4.0
label[1] = label[1]*10.0
os.chdir('../Samples/NoFX')
train_labels.append(label)
# Loading the audio
y, sr = librosa.load(file_name, sr=44100)
# Onset Detection
y = np.insert(y, 0, np.zeros(1023))
y = librosa.util.normalize(y)
onset_frame = librosa.onset.onset_detect(y=y, sr=sr, units='frames',
pre_max=20000, post_max=20000,
pre_avg=20000, post_avg=20000, delta=0, wait=1000)
offset_frame = librosa.samples_to_frames(samples=y.shape[0])
onset_sample = librosa.core.frames_to_samples(onset_frame[0])
offset_sample = librosa.core.frames_to_samples(offset_frame)
y_cut = y[onset_sample:offset_sample]
v_features = []
if path_folder == 'DistRandomSamples':
v_features = get_dist_feat(y_cut=y_cut, sr=sr)
elif path_folder == 'TremRandomSamples':
v_features = get_trem_feat(y_cut=y_cut, sr=sr)
elif path_folder == 'DlyRandomSamples':
v_features = get_dly_feat(y_cut=y_cut, sr=sr, y=y)
else:
print('Sample folder for feature extraction not found')
train_data.append(np.hstack(v_features))
os.chdir(DATA_PATH)
train_data = np.array(train_data)
print(train_data.shape)
scaler = preprocessing.StandardScaler()
train_data = scaler.fit_transform(train_data)
train_labels = np.array(train_labels)
os.chdir(DATA_PATH)
return train_data, train_labels
def create_model(input_dim, output_dim):
"""Creates the Neural Network for the estimation"""
model = models.Sequential()
model.add(layers.Dense(32, input_dim=input_dim))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu'))
# model.add(layers.Dense(32, activation='relu'))
# model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(output_dim, activation='linear'))
model.summary()
model.compile(optimizer=optimizers.Adam(),
loss='mean_squared_error',
metrics=['mse'])
return model
def train_model(model, train_data, train_labels):
"""Trains the model for the estimation"""
utils.normalize(train_data)
history = model.fit(train_data, train_labels, epochs=1000, verbose=1, validation_split=0.2)
# plots.learning_curve(history)
def estimate(folder_path):
"""Reads the data from folder path, trains the model, and estimates on test data"""
os.chdir(DATA_PATH)
os.chdir(folder_path)
if not Path('ParamEstData.pickle').exists():
train_data, train_labels = read_data(path_folder=folder_path)
# plots.fx_par_data(train_data, train_labels)
os.chdir(DATA_PATH)
os.chdir(folder_path)
with open('ParamEstData.pickle', 'wb') as handle:
pickle.dump(train_data, handle)
with open('ParamEstLabels.pickle', 'wb') as handle:
pickle.dump(train_labels, handle)
print('Data Saved')
else:
with open('ParamEstData.pickle', 'rb') as handle:
train_data = pickle.load(handle)
with open('ParamEstLabels.pickle', 'rb') as handle:
train_labels = pickle.load(handle)
print('Data Loaded')
# plots.fx_par_data(train_data, train_labels)
os.chdir(DATA_PATH)
os.chdir(folder_path)
if not Path('ParamEstModel.pickle').exists():
my_model = create_model(train_data.shape[1], train_labels.shape[1])
train_data, test_data, train_labels, test_labels = train_test_split(train_data, train_labels,
test_size=0.3, random_state=42)
train_model(my_model, train_data, train_labels)
os.chdir(DATA_PATH)
os.chdir(folder_path)
with open('ParamEstModel.pickle', 'wb') as handle:
joblib.dump(my_model, handle)
print('Model Saved')
else:
with open('ParamEstModel.pickle', 'rb') as handle:
my_model = joblib.load(handle)
print('Model Loaded')
# train_data, test_data, train_labels, test_labels = train_test_split(train_data, train_labels,
# test_size=0.3, random_state=42)
test_data = train_data
test_labels = train_labels
pred = my_model.predict(test_data)
print(pred)
excl_threshold = 0.0 # Excludes data from evaluation, where true labels are below set threshold;
# threshold = 0.0 does not exclude any samples
pred_excl = pred[np.where(test_labels[:, 0] > excl_threshold)
and np.where(test_labels[:, 1] > excl_threshold)]
test_labels_excl = test_labels[np.where(test_labels[:, 0] > excl_threshold)
and np.where(test_labels[:, 1] > excl_threshold)]
error = np.abs(pred_excl - test_labels_excl)
random_error = np.reshape(np.abs(np.random.random(len(test_labels_excl))-test_labels_excl[:, 0]),
[len(test_labels_excl), 1])
data_frames = []
for (param_label, param_pred, param_error) in zip(test_labels.T.tolist(), pred.T.tolist(),
error.T.tolist()):
data_frames.append(pd.DataFrame({'Test Label': param_label, 'Prediction': param_pred,
'Error': param_error}))
plots.param_est_error_over_params(data_frames, test_labels, folder_path)
# error = np.concatenate((error, random_error), axis=1)
print(folder_path + ' Absolute Error Evaluation')
print('Mean Error:')
print(np.mean(error, axis=0))
print('Standard Deviation:')
print(np.std(error, axis=0))
# print('Median Error:')
# print(np.median(error, axis=0))
with open('NNAbsoluteErrorRelu.pickle', 'wb') as handle:
joblib.dump(error, handle)
print('Prediction Error Saved')
plots.param_est_error_boxplot(error, folder_path)
if __name__ == '__main__':
fx = ['DistRandomSamples', 'TremRandomSamples', 'DlyRandomSamples']
estimate(fx[1])
| 42.228261
| 109
| 0.62574
|
88a67d82e8f012f13c05c78896e8c1e075de5ae7
| 1,537
|
py
|
Python
|
gym_pcgrl/gym_pcgrl/envs/probs/binary/binary_ctrl_prob.py
|
JiangZehua/control-pcgrl3D
|
f9b04e65e1cbf70b7306f4df251450d83c6fb2be
|
[
"MIT"
] | null | null | null |
gym_pcgrl/gym_pcgrl/envs/probs/binary/binary_ctrl_prob.py
|
JiangZehua/control-pcgrl3D
|
f9b04e65e1cbf70b7306f4df251450d83c6fb2be
|
[
"MIT"
] | null | null | null |
gym_pcgrl/gym_pcgrl/envs/probs/binary/binary_ctrl_prob.py
|
JiangZehua/control-pcgrl3D
|
f9b04e65e1cbf70b7306f4df251450d83c6fb2be
|
[
"MIT"
] | null | null | null |
import numpy as np
from gym_pcgrl.envs.probs.binary.binary_prob import BinaryProblem
class BinaryCtrlProblem(BinaryProblem):
def __init__(self):
super(BinaryCtrlProblem, self).__init__()
self._max_path_length = np.ceil(self._width / 2) * (self._height) + np.floor(self._height/2)
# self._max_path_length = np.ceil(self._width / 2 + 1) * (self._height)
# default conditional targets
self.static_trgs = {"regions": 1, "path-length": self._max_path_length}
# boundaries for conditional inputs/targets
self.cond_bounds = {
# Upper bound: checkerboard
"regions": (0, self._width * np.ceil(self._height / 2)),
# 10101010
# 01010101
# 10101010
# 01010101
# 10101010
# FIXME: we shouldn't assume a square map here! Find out which dimension is bigger
# and "snake" along that one
# Upper bound: zig-zag
"path-length": (0, self._max_path_length),
# 11111111
# 00000001
# 11111111
# 10000000
# 11111111
}
# We do these things in the ParamRew wrapper (note that max change and iterations
def get_episode_over(self, new_stats, old_stats):
""" If the generator has reached its targets. (change percentage and max iterations handled in pcgrl_env)"""
return False
def get_reward(self, new_stats, old_stats):
return None
| 34.931818
| 116
| 0.597267
|
60f63c42839d834fe3adf6414fb5a6610df016d1
| 571
|
py
|
Python
|
projects/writing/sigurd/migrations/0002_story.py
|
NikolaiSie/django
|
a19bb9813d7ed957d05edeeaf380988265aae49d
|
[
"Apache-2.0"
] | null | null | null |
projects/writing/sigurd/migrations/0002_story.py
|
NikolaiSie/django
|
a19bb9813d7ed957d05edeeaf380988265aae49d
|
[
"Apache-2.0"
] | null | null | null |
projects/writing/sigurd/migrations/0002_story.py
|
NikolaiSie/django
|
a19bb9813d7ed957d05edeeaf380988265aae49d
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.2.6 on 2021-09-08 12:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sigurd', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Story',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, null=True)),
('text', models.TextField(null=True)),
],
),
]
| 25.954545
| 117
| 0.567426
|
2c35f153b2fda4e85604530fbb76d550b0a75490
| 1,004
|
py
|
Python
|
funcat/data/rt_data_from_tencent.py
|
xuan-wang/funcat
|
c4b184942564ab8a4092acb4907ab069fc44683c
|
[
"Apache-2.0"
] | 18
|
2019-05-30T01:00:38.000Z
|
2022-01-03T15:46:25.000Z
|
funcat/data/rt_data_from_tencent.py
|
xuan-wang/funcat
|
c4b184942564ab8a4092acb4907ab069fc44683c
|
[
"Apache-2.0"
] | 5
|
2019-05-28T15:01:18.000Z
|
2021-11-24T14:08:39.000Z
|
funcat/data/rt_data_from_tencent.py
|
xuan-wang/funcat
|
c4b184942564ab8a4092acb4907ab069fc44683c
|
[
"Apache-2.0"
] | 8
|
2020-10-30T10:03:02.000Z
|
2021-12-04T07:20:36.000Z
|
import pandas as pd
import requests
import json
url = "http://qt.gtimg.cn/q="
def get_runtime_data(ts_code, token=None):
code_suffix = ts_code[-2:].lower() + ts_code[:-3]
if token:
text = requests.get(url + code_suffix + '&token=' + token)
else:
text = requests.get(url + code_suffix)
if text.status_code == 200:
raw = text.text.split("~")
data = {
'ts_code': [ts_code],
'trade_date': [int(raw[30][:8])],
'close': [float(raw[3])],
'open': [float(raw[5])],
'high': [float(raw[33])],
'low': [float(raw[34])],
'pre_close': [float(raw[4])],
'change': [float(raw[31])],
'pct_chg': [float(raw[32])],
'vol': [float(raw[36])],
'amount': [float(raw[37])],
}
df = pd.DataFrame(data)
return df
else:
return None
if __name__ == '__main__':
df = get_runtime_data('601360.SH')
print(df)
| 25.1
| 66
| 0.498008
|
ef0717a316f58988738198f92d56d31ba0de5a60
| 2,317
|
py
|
Python
|
cnet.py
|
cherepas/seed2sh
|
3ca32fc425a1762ea9e29ece11732e1a9bb06b13
|
[
"MIT"
] | null | null | null |
cnet.py
|
cherepas/seed2sh
|
3ca32fc425a1762ea9e29ece11732e1a9bb06b13
|
[
"MIT"
] | null | null | null |
cnet.py
|
cherepas/seed2sh
|
3ca32fc425a1762ea9e29ece11732e1a9bb06b13
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def getsize(new_hi,ker,srd):
pad = np.asarray((0,0))
dil = np.asarray((1,1))
new_size = np.asarray(new_hi)
ker = np.asarray(ker)
srd = np.asarray(srd)
return(tuple((np.squeeze(\
(new_size+2*pad-dil*[ker-1]-1)/srd+1\
)).astype(int)))
class CNet(torch.nn.Module):
#network from Henderson, Ferrari article
def __init__(self, hidden_dim, chidden_dim, kernel_sizes, nim, h, w, usehaf):
super(CNet, self).__init__()
(new_h, new_w) = (h, w)
self.usehaf = usehaf
if usehaf:
self.conv0 = nn.Conv2d(nim, 32, 3, stride=2)
(new_h, new_w) = getsize((new_h, new_w),3,2)
self.conv1 = nn.Conv2d(32, 64, 3)
(new_h, new_w) = getsize((new_h, new_w),3,1)
self.pool = nn.MaxPool2d(2, 2)
(new_h, new_w) = getsize((new_h, new_w),2,2)
self.conv2 = nn.Conv2d(64, 96, 3)
(new_h, new_w) = getsize((new_h, new_w),3,1)
(new_h, new_w) = getsize((new_h, new_w),2,2)
self.conv3 = nn.Conv2d(96, 128, 3)
(new_h, new_w) = getsize((new_h, new_w),3,1)
(new_h, new_w) = getsize((new_h, new_w),2,2)
self.conv4 = nn.Conv2d(128, 128, 4)
(new_h, new_w) = getsize((new_h, new_w),4,1)
self.bn0 = torch.nn.BatchNorm2d(32)
self.bn1 = torch.nn.BatchNorm2d(64)
self.bn2 = torch.nn.BatchNorm2d(96)
self.bn3 = torch.nn.BatchNorm2d(128)
current_dim = 128*new_h*new_w
else:
current_dim = nim*new_h*new_w
self.layers = nn.ModuleList()
for hdim in hidden_dim:
self.layers.append(nn.Linear(current_dim, hdim))
current_dim = hdim
def forward(self, x):
if self.usehaf:
x = F.relu(self.bn0(self.conv0(x)))
x = F.relu(self.bn1(self.conv1(x)))
x = self.pool(x)
x = F.relu(self.bn2(self.conv2(x)))
x = self.pool(x)
x = F.relu(self.bn3(self.conv3(x)))
x = self.pool(x)
x = F.relu(self.bn3(self.conv4(x)))
x = x.view(x.shape[0], -1)
for layer in self.layers:
x = layer(x)
return x
| 38.616667
| 81
| 0.538196
|
c9e55aa32d3229f47c4d3e059797f3c7cece0f61
| 1,342
|
py
|
Python
|
Day 5/password_generator.py
|
Jean-Bi/100DaysOfCodePython
|
2069d1366c58e7d5f4cd30cfc786e9c2e44b82ca
|
[
"MIT"
] | null | null | null |
Day 5/password_generator.py
|
Jean-Bi/100DaysOfCodePython
|
2069d1366c58e7d5f4cd30cfc786e9c2e44b82ca
|
[
"MIT"
] | null | null | null |
Day 5/password_generator.py
|
Jean-Bi/100DaysOfCodePython
|
2069d1366c58e7d5f4cd30cfc786e9c2e44b82ca
|
[
"MIT"
] | null | null | null |
#Password Generator Project
import random
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
symbols = ['!', '#', '$', '%', '&', '(', ')', '*', '+']
print("Welcome to the PyPassword Generator!")
nr_letters= int(input("How many letters would you like in your password?\n"))
nr_symbols = int(input(f"How many symbols would you like?\n"))
nr_numbers = int(input(f"How many numbers would you like?\n"))
#Eazy Level - Order not randomised:
#e.g. 4 letter, 2 symbol, 2 number = JduE&!91
#Initializes the password
password = []
#Adds the letters to the password
for i in range(1, nr_letters+1):
password += random.choice(letters)
#Adds the symbols to the password
for i in range(1, nr_symbols+1):
password += random.choice(symbols)
#Adds the numbers to the password
for i in range(1, nr_numbers+1):
password += random.choice(numbers)
#Hard Level - Order of characters randomised:
#e.g. 4 letter, 2 symbol, 2 number = g^2jk8&P
#Shuffle the password
random.shuffle(password)
#Prints the password joining it
print("Your password is: " + ''.join(password))
| 36.27027
| 270
| 0.584203
|
0951b25c6b7672ec2e940ac79c5b0fcae5d7e0b0
| 8,877
|
py
|
Python
|
mapboxgl/utils.py
|
bnaul/mapboxgl-jupyter
|
01f401642961d1044f8bc86eac04de8bebe3705c
|
[
"MIT"
] | null | null | null |
mapboxgl/utils.py
|
bnaul/mapboxgl-jupyter
|
01f401642961d1044f8bc86eac04de8bebe3705c
|
[
"MIT"
] | null | null | null |
mapboxgl/utils.py
|
bnaul/mapboxgl-jupyter
|
01f401642961d1044f8bc86eac04de8bebe3705c
|
[
"MIT"
] | 1
|
2019-12-27T15:48:22.000Z
|
2019-12-27T15:48:22.000Z
|
from .colors import color_ramps, common_html_colors
from chroma import Color, Scale
import geojson
import json
import base64
from io import BytesIO
import re
from matplotlib.image import imsave
from colour import Color as Colour
def row_to_geojson(row, lon, lat):
"""Convert a pandas dataframe row to a geojson format object. Converts all datetimes to epoch seconds.
"""
# Let pandas handle json serialization
row_json = json.loads(row.to_json(date_format='epoch', date_unit='s'))
return geojson.Feature(geometry=geojson.Point((row_json[lon], row_json[lat])),
properties={key: row_json[key] for key in row_json.keys() if key not in [lon, lat]})
def df_to_geojson(df, properties=None, lat='lat', lon='lon', precision=None, filename=None):
"""Serialize a Pandas dataframe to a geojson format Python dictionary
"""
if precision:
df[lon] = df[lon].round(precision)
df[lat] = df[lat].round(precision)
if not properties:
# if no properties are selected, use all properties in dataframe
properties = [c for c in df.columns if c not in [lon, lat]]
for prop in properties:
# Check if list of properties exists in dataframe columns
if prop not in list(df.columns):
raise ValueError(
'properties must be a valid list of column names from dataframe')
if prop in [lon, lat]:
raise ValueError(
'properties cannot be the geometry longitude or latitude column')
if filename:
with open(filename, 'w+') as f:
# Overwrite file if it already exists
pass
with open(filename, 'a+') as f:
# Write out file to line
f.write('{"type": "FeatureCollection", "features": [\n')
for idx, row in df[[lon, lat] + properties].iterrows():
if idx == 0:
f.write(geojson.dumps(row_to_geojson(row, lon, lat)) + '\n')
else:
f.write(',' + geojson.dumps(row_to_geojson(row, lon, lat)) + '\n')
f.write(']}')
return {
"type": "file",
"filename": filename,
"feature_count": df.shape[0]
}
else:
features = []
df[[lon, lat] + properties].apply(lambda x: features.append(
row_to_geojson(x, lon, lat)), axis=1)
return geojson.FeatureCollection(features)
def scale_between(minval, maxval, numStops):
""" Scale a min and max value to equal interval domain with
numStops discrete values
"""
scale = []
if numStops < 2:
return [minval, maxval]
elif maxval < minval:
raise ValueError()
else:
domain = maxval - minval
interval = float(domain) / float(numStops)
for i in range(numStops):
scale.append(round(minval + interval * i, 2))
return scale
def create_radius_stops(breaks, min_radius, max_radius):
"""Convert a data breaks into a radius ramp
"""
num_breaks = len(breaks)
radius_breaks = scale_between(min_radius, max_radius, num_breaks)
stops = []
for i, b in enumerate(breaks):
stops.append([b, radius_breaks[i]])
return stops
def create_weight_stops(breaks):
"""Convert data breaks into a heatmap-weight ramp
"""
num_breaks = len(breaks)
weight_breaks = scale_between(0, 1, num_breaks)
stops = []
for i, b in enumerate(breaks):
stops.append([b, weight_breaks[i]])
return stops
def create_color_stops(breaks, colors='RdYlGn', color_ramps=color_ramps):
"""Convert a list of breaks into color stops using colors from colorBrewer
or a custom list of color values in RGB, RGBA, HSL, CSS text, or HEX format.
See www.colorbrewer2.org for a list of color options to pass
"""
num_breaks = len(breaks)
stops = []
if isinstance(colors, list):
# Check if colors contain a list of color values
if len(colors) == 0 or len(colors) != num_breaks:
raise ValueError(
'custom color list must be of same length as breaks list')
for color in colors:
# Check if color is valid string
try:
Colour(color)
except:
raise ValueError(
'The color code {color} is in the wrong format'.format(color=color))
for i, b in enumerate(breaks):
stops.append([b, colors[i]])
else:
if colors not in color_ramps.keys():
raise ValueError('color does not exist in colorBrewer!')
else:
try:
ramp = color_ramps[colors][num_breaks]
except KeyError:
raise ValueError("Color ramp {} does not have a {} breaks".format(
colors, num_breaks))
for i, b in enumerate(breaks):
stops.append([b, ramp[i]])
return stops
def rgb_tuple_from_str(color_string):
"""Convert color in format 'rgb(RRR,GGG,BBB)', 'rgba(RRR,GGG,BBB,alpha)',
'#RRGGBB', or limited English color name (eg 'red') to tuple (RRR, GGG, BBB)
"""
try:
# English color names (limited)
rgb_string = common_html_colors[color_string]
return tuple([float(x) for x in re.findall(r'\d{1,3}', rgb_string)])
except KeyError:
try:
# HEX color code
hex_string = color_string.lstrip('#')
return tuple(int(hex_string[i:i+2], 16) for i in (0, 2 ,4))
except ValueError:
# RGB or RGBA formatted strings
return tuple([int(x) if float(x) > 1 else float(x)
for x in re.findall(r"[-+]?\d*\.*\d+", color_string)])
def color_map(lookup, color_stops, default_color='rgb(122,122,122)'):
"""Return an rgb color value interpolated from given color_stops;
assumes colors in color_stops provided as strings of form 'rgb(RRR,GGG,BBB)'
or in hex: '#RRGGBB'
"""
# if no color_stops, use default color
if len(color_stops) == 0:
return default_color
# dictionary to lookup color from match-type color_stops
match_map = dict((x, y) for (x, y) in color_stops)
# if lookup matches stop exactly, return corresponding color (first priority)
# (includes non-numeric color_stop "keys" for finding color by match)
if lookup in match_map.keys():
return match_map.get(lookup)
# if lookup value numeric, map color by interpolating from color scale
if isinstance(lookup, (int, float, complex)):
# try ordering stops
try:
stops, colors = zip(*sorted(color_stops))
# if not all stops are numeric, attempt looking up as if categorical stops
except TypeError:
return match_map.get(lookup, default_color)
# for interpolation, all stops must be numeric
if not all(isinstance(x, (int, float, complex)) for x in stops):
return default_color
# check if lookup value in stops bounds
if float(lookup) <= stops[0]:
return colors[0]
elif float(lookup) >= stops[-1]:
return colors[-1]
# check if lookup value matches any stop value
elif float(lookup) in stops:
return colors[stops.index(lookup)]
# interpolation required
else:
rgb_tuples = [Color(rgb_tuple_from_str(x)) for x in colors]
# identify bounding color stop values
lower = max([stops[0]] + [x for x in stops if x < lookup])
upper = min([stops[-1]] + [x for x in stops if x > lookup])
# colors from bounding stops
lower_color = rgb_tuples[stops.index(lower)]
upper_color = rgb_tuples[stops.index(upper)]
# generate color scale for mapping lookup value to interpolated color
scale = Scale(Color(lower_color), Color(upper_color))
# compute linear "relative distance" from lower bound color to upper bound color
distance = (lookup - lower) / (upper - lower)
# return string representing rgb color value
return scale(distance).to_string().replace(', ', ',')
# default color value catch-all
return default_color
def img_encode(arr, **kwargs):
"""Encode ndarray to base64 string image data
Parameters
----------
arr: ndarray (rows, cols, depth)
kwargs: passed directly to matplotlib.image.imsave
"""
sio = BytesIO()
imsave(sio, arr, **kwargs)
sio.seek(0)
img_format = kwargs['format'] if kwargs.get('format') else 'png'
img_str = base64.b64encode(sio.getvalue()).decode()
return 'data:image/{};base64,{}'.format(img_format, img_str)
| 34.011494
| 111
| 0.601893
|
f56c52dacefb236f06556ef19da44dc41ef1e287
| 2,314
|
py
|
Python
|
routes/user.py
|
zaibacu/yt-flask-example
|
d85b9e4761b63ac710553ba1b44e854c1cbaa395
|
[
"MIT"
] | 1
|
2021-03-07T14:07:29.000Z
|
2021-03-07T14:07:29.000Z
|
routes/user.py
|
zaibacu/yt-flask-example
|
d85b9e4761b63ac710553ba1b44e854c1cbaa395
|
[
"MIT"
] | null | null | null |
routes/user.py
|
zaibacu/yt-flask-example
|
d85b9e4761b63ac710553ba1b44e854c1cbaa395
|
[
"MIT"
] | null | null | null |
import hashlib
from functools import wraps
from flask import Blueprint, session, redirect, render_template, request
from models import User
from database import db
bp = Blueprint("users", __name__)
def hash(password):
return hashlib.sha3_256(password.encode("UTF-8")).hexdigest()
def requires_login(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
user_id = session.get("user_id")
if user_id:
return fn(*args, **kwargs)
else:
return "Please login"
return wrapper
def requires_admin(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
user_id = session.get("user_id")
user = db.session.query(User).filter_by(id=user_id).one_or_none()
if user and user.is_admin:
return fn(*args, **kwargs)
else:
return "Only admin can see this"
return wrapper
@bp.route("/secret")
@requires_login
def secret():
return hash("Very secret")
@bp.route("/secret2")
@requires_admin
def secret2():
return hash("Very secret2")
@bp.route("/register", methods=["GET", "POST"])
def register():
if request.method == "GET":
return render_template("register.html")
elif request.method == "POST":
data = request.form
if data["password1"] != data["password2"]:
return redirect("/user/register")
username = data["username"]
password_hash = hash(data["password1"])
user = User(username=username, password_hash=password_hash)
db.session.add(user)
db.session.commit()
return redirect("/user/login")
@bp.route("/login", methods=["GET", "POST"])
def login():
if request.method == "GET":
return render_template("login.html")
elif request.method == "POST":
data = request.form
user = db.session.query(User).filter_by(username=data.get("username")).one_or_none()
if user and user.password_hash == hash(data.get("password")):
session["user_id"] = user.id
return redirect("/user/secret")
else:
if "user_id" in session:
del session["user_id"]
return redirect("/user/login")
@bp.route("/logout")
def logout():
if "user_id" in session:
del session["user_id"]
return redirect("/user/login")
| 25.428571
| 92
| 0.61452
|
ba67daebffe310ed3ad05aec4da1f9117de6ed44
| 22,629
|
py
|
Python
|
syft/serde/serde.py
|
robot-ai-machinelearning/PySyft
|
df7d539ad7643a93235249af05981578dd095a5a
|
[
"Apache-2.0"
] | 1
|
2019-12-29T15:20:44.000Z
|
2019-12-29T15:20:44.000Z
|
syft/serde/serde.py
|
robot-ai-machinelearning/PySyft
|
df7d539ad7643a93235249af05981578dd095a5a
|
[
"Apache-2.0"
] | null | null | null |
syft/serde/serde.py
|
robot-ai-machinelearning/PySyft
|
df7d539ad7643a93235249af05981578dd095a5a
|
[
"Apache-2.0"
] | null | null | null |
"""
This file exists to provide one common place for all serialization to occur
regardless of framework. As msgpack only supports basic types and binary formats
every type must be first be converted to one of these types. Thus, we've split our
functionality into three steps. When converting from a PySyft object (or collection
of objects) to an object to be sent over the wire (a message), those three steps
are (in order):
1. Simplify - converts PyTorch objects to simple Python objects (using pickle)
2. Serialize - converts Python objects to binary
3. Compress - compresses the binary (Now we're ready send!)
Inversely, when converting from a message sent over the wire back to a PySyft
object, the three steps are (in order):
1. Decompress - converts compressed binary back to decompressed binary
2. Deserialize - converts from binary to basic python objects
3. Detail - converts some basic python objects back to PyTorch objects (Tensors)
Furthermore, note that there is different simplification/serialization logic
for objects of different types. Thus, instead of using if/else logic, we have
global dictionaries which contain functions and Python types as keys. For
simplification logic, this dictionary is called "simplifiers". The keys
are the types and values are the simplification logic. For example,
simplifiers[tuple] will return the function which knows how to simplify the
tuple type. The same is true for all other simplifier/detailer functions.
By default, the simplification/detail operations expect Torch tensors. If the setup requires other
serialization process, it can override the functions _serialize_tensor and _deserialize_tensor
By default, we serialize using msgpack and compress using lz4.
If different compressions are required, the worker can override the function apply_compress_scheme
"""
from collections import OrderedDict
import inspect
import lz4
from lz4 import ( # noqa: F401
frame,
) # needed as otherwise we will get: module 'lz4' has no attribute 'frame'
import msgpack
import zstd
import syft
from syft import dependency_check
from syft.federated.train_config import TrainConfig
from syft.frameworks.torch.tensors.decorators.logging import LoggingTensor
from syft.frameworks.torch.tensors.interpreters.precision import FixedPrecisionTensor
from syft.frameworks.torch.tensors.interpreters.private import PrivateTensor
from syft.frameworks.torch.tensors.interpreters.additive_shared import AdditiveSharingTensor
from syft.frameworks.torch.tensors.interpreters.crt_precision import CRTPrecisionTensor
from syft.frameworks.torch.tensors.interpreters.autograd import AutogradTensor
from syft.frameworks.torch.tensors.interpreters.promise import PromiseTensor
from syft.generic.pointers.multi_pointer import MultiPointerTensor
from syft.generic.pointers.object_pointer import ObjectPointer
from syft.generic.pointers.pointer_tensor import PointerTensor
from syft.generic.pointers.pointer_plan import PointerPlan
from syft.generic.pointers.pointer_protocol import PointerProtocol
from syft.generic.pointers.object_wrapper import ObjectWrapper
from syft.messaging.plan import Plan
from syft.messaging.plan.state import State
from syft.messaging.plan.procedure import Procedure
from syft.messaging.protocol import Protocol
from syft.messaging.message import Message
from syft.messaging.message import Operation
from syft.messaging.message import ObjectMessage
from syft.messaging.message import ObjectRequestMessage
from syft.messaging.message import IsNoneMessage
from syft.messaging.message import GetShapeMessage
from syft.messaging.message import ForceObjectDeleteMessage
from syft.messaging.message import SearchMessage
from syft.messaging.message import PlanCommandMessage
from syft.serde.native_serde import MAP_NATIVE_SIMPLIFIERS_AND_DETAILERS
from syft.workers.abstract import AbstractWorker
from syft.workers.base import BaseWorker
from syft.exceptions import CompressionNotFoundException
from syft.exceptions import GetNotPermittedError
from syft.exceptions import ResponseSignatureError
if dependency_check.torch_available:
from syft.serde.torch_serde import MAP_TORCH_SIMPLIFIERS_AND_DETAILERS
else:
MAP_TORCH_SIMPLIFIERS_AND_DETAILERS = {}
if dependency_check.tensorflow_available:
from syft_tensorflow.serde import MAP_TF_SIMPLIFIERS_AND_DETAILERS
else:
MAP_TF_SIMPLIFIERS_AND_DETAILERS = {}
from syft.serde.proto import proto_type_info
# Maps a type to a tuple containing its simplifier and detailer function
# NOTE: serialization constants for these objects need to be defined in `proto.json` file
# in https://github.com/OpenMined/proto
MAP_TO_SIMPLIFIERS_AND_DETAILERS = OrderedDict(
list(MAP_NATIVE_SIMPLIFIERS_AND_DETAILERS.items())
+ list(MAP_TORCH_SIMPLIFIERS_AND_DETAILERS.items())
+ list(MAP_TF_SIMPLIFIERS_AND_DETAILERS.items())
)
# If an object implements its own simplify and detail functions it should be stored in this list
# NOTE: serialization constants for these objects need to be defined in `proto.json` file
# in https://github.com/OpenMined/proto
OBJ_SIMPLIFIER_AND_DETAILERS = [
AdditiveSharingTensor,
FixedPrecisionTensor,
PrivateTensor,
CRTPrecisionTensor,
LoggingTensor,
MultiPointerTensor,
PromiseTensor,
ObjectPointer,
Plan,
State,
Procedure,
Protocol,
PointerTensor,
PointerPlan,
PointerProtocol,
ObjectWrapper,
TrainConfig,
BaseWorker,
AutogradTensor,
Message,
Operation,
ObjectMessage,
ObjectRequestMessage,
IsNoneMessage,
GetShapeMessage,
ForceObjectDeleteMessage,
SearchMessage,
PlanCommandMessage,
]
# If an object implements its own force_simplify and force_detail functions it should be stored in this list
# NOTE: serialization constants for these objects need to be defined in `proto.json` file
# in https://github.com/OpenMined/proto
OBJ_FORCE_FULL_SIMPLIFIER_AND_DETAILERS = [BaseWorker]
# For registering syft objects with custom simplify and detail methods
# NOTE: serialization constants for these objects need to be defined in `proto.json` file
# in https://github.com/OpenMined/proto
EXCEPTION_SIMPLIFIER_AND_DETAILERS = [GetNotPermittedError, ResponseSignatureError]
# COMPRESSION SCHEME INT CODES
NO_COMPRESSION = 40
LZ4 = 41
ZSTD = 42
scheme_to_bytes = {
NO_COMPRESSION: NO_COMPRESSION.to_bytes(1, byteorder="big"),
LZ4: LZ4.to_bytes(1, byteorder="big"),
ZSTD: ZSTD.to_bytes(1, byteorder="big"),
}
## SECTION: High Level Simplification Router
def _force_full_simplify(worker: AbstractWorker, obj: object) -> object:
"""To force a full simplify generally if the usual _simplify is not suitable.
If we can not full simplify a object we simplify it as usual instead.
Args:
obj: The object.
Returns:
The simplified object.
"""
# check to see if there is a full simplifier
# for this type. If there is, return the full simplified object.
current_type = type(obj)
if current_type in forced_full_simplifiers:
result = (
forced_full_simplifiers[current_type][0],
forced_full_simplifiers[current_type][1](worker, obj),
)
return result
# If we already tried to find a full simplifier for this type but failed, we should
# simplify it instead.
elif current_type in no_full_simplifiers_found:
return _simplify(worker, obj)
else:
# If the object type is not in forced_full_simplifiers,
# we check the classes that this object inherits from.
# `inspect.getmro` give us all types this object inherits
# from, including `type(obj)`. We can skip the type of the
# object because we already tried this in the
# previous step.
classes_inheritance = inspect.getmro(type(obj))[1:]
for inheritance_type in classes_inheritance:
if inheritance_type in forced_full_simplifiers:
# Store the inheritance_type in forced_full_simplifiers so next
# time we see this type serde will be faster.
forced_full_simplifiers[current_type] = forced_full_simplifiers[inheritance_type]
result = (
forced_full_simplifiers[current_type][0],
forced_full_simplifiers[current_type][1](worker, obj),
)
return result
# If there is not a full_simplifier for this
# object, then we simplify it.
no_full_simplifiers_found.add(current_type)
return _simplify(worker, obj)
## SECTION: dinamically generate simplifiers and detailers
def _generate_simplifiers_and_detailers():
"""Generate simplifiers, forced full simplifiers and detailers,
by registering native and torch types, syft objects with custom
simplify and detail methods, or syft objects with custom
force_simplify and force_detail methods.
NOTE: this function uses `proto_type_info` that translates python class into Serde constant defined in
https://github.com/OpenMined/proto. If the class used in `MAP_TO_SIMPLIFIERS_AND_DETAILERS`,
`OBJ_SIMPLIFIER_AND_DETAILERS`, `EXCEPTION_SIMPLIFIER_AND_DETAILERS`, `OBJ_FORCE_FULL_SIMPLIFIER_AND_DETAILERS`
is not defined in `proto.json` file in https://github.com/OpenMined/proto, this function will error.
Returns:
The simplifiers, forced_full_simplifiers, detailers
"""
simplifiers = OrderedDict()
forced_full_simplifiers = OrderedDict()
detailers = OrderedDict()
def _add_simplifier_and_detailer(curr_type, simplifier, detailer, forced=False):
type_info = proto_type_info(curr_type)
if forced:
forced_full_simplifiers[curr_type] = (type_info.forced_code, simplifier)
detailers[type_info.forced_code] = detailer
else:
simplifiers[curr_type] = (type_info.code, simplifier)
detailers[type_info.code] = detailer
# Register native and torch types
for curr_type in MAP_TO_SIMPLIFIERS_AND_DETAILERS:
simplifier, detailer = MAP_TO_SIMPLIFIERS_AND_DETAILERS[curr_type]
_add_simplifier_and_detailer(curr_type, simplifier, detailer)
# Register syft objects with custom simplify and detail methods
for syft_type in OBJ_SIMPLIFIER_AND_DETAILERS + EXCEPTION_SIMPLIFIER_AND_DETAILERS:
simplifier, detailer = syft_type.simplify, syft_type.detail
_add_simplifier_and_detailer(syft_type, simplifier, detailer)
# Register syft objects with custom force_simplify and force_detail methods
for syft_type in OBJ_FORCE_FULL_SIMPLIFIER_AND_DETAILERS:
force_simplifier, force_detailer = syft_type.force_simplify, syft_type.force_detail
_add_simplifier_and_detailer(syft_type, force_simplifier, force_detailer, forced=True)
return simplifiers, forced_full_simplifiers, detailers
simplifiers, forced_full_simplifiers, detailers = _generate_simplifiers_and_detailers()
# Store types that are not simplifiable (int, float, None) so we
# can ignore them during serialization.
no_simplifiers_found, no_full_simplifiers_found = set(), set()
## SECTION: High Level Public Functions (these are the ones you use)
def serialize(
obj: object,
worker: AbstractWorker = None,
simplified: bool = False,
force_no_compression: bool = False,
force_no_serialization: bool = False,
force_full_simplification: bool = False,
) -> bin:
"""This method can serialize any object PySyft needs to send or store.
This is the high level function for serializing any object or collection
of objects which PySyft needs to send over the wire. It includes three
steps, Simplify, Serialize, and Compress as described inline below.
Args:
obj (object): the object to be serialized
simplified (bool): in some cases we want to pass in data which has
already been simplified - in which case we must skip double
simplification - which would be bad.... so bad... so... so bad
force_no_compression (bool): If true, this will override ANY module
settings and not compress the objects being serialized. The primary
expected use of this functionality is testing and/or experimentation.
force_no_serialization (bool): Primarily a testing tool, this will force
this method to return human-readable Python objects which is very useful
for testing and debugging (forceably overrides module compression,
serialization, and the 'force_no_compression' override)). In other words,
only simplification operations are performed.
force_full_simplification (bool): Some objects are only partially serialized
by default. For objects where this is the case, setting this flag to True
will force the entire object to be serialized. For example, setting this
flag to True will cause a VirtualWorker to be serialized WITH all of its
tensors while by default VirtualWorker objects only serialize a small
amount of metadata.
Returns:
binary: the serialized form of the object.
"""
if worker is None:
# TODO[jvmancuso]: This might be worth a standalone function.
worker = syft.framework.hook.local_worker
# 1) Simplify
# simplify difficult-to-serialize objects. See the _simpliy method
# for details on how this works. The general purpose is to handle types
# which the fast serializer cannot handle
if not simplified:
if force_full_simplification:
simple_objects = _force_full_simplify(worker, obj)
else:
simple_objects = _simplify(worker, obj)
else:
simple_objects = obj
# 2) Serialize
# serialize into a binary
if force_no_serialization:
return simple_objects
else:
binary = msgpack.dumps(simple_objects)
# 3) Compress
# optionally compress the binary and return the result
# prepend a 1-byte header '0' or '1' to the output stream
# to denote whether output stream is compressed or not
# if compressed stream length is greater than input stream
# we output the input stream as it is with header set to '0'
# otherwise we output the compressed stream with header set to '1'
# even if compressed flag is set to false by the caller we
# output the input stream as it is with header set to '0'
if force_no_compression:
return binary
else:
return _compress(binary)
def deserialize(binary: bin, worker: AbstractWorker = None, details=True) -> object:
""" This method can deserialize any object PySyft needs to send or store.
This is the high level function for deserializing any object or collection
of objects which PySyft has sent over the wire or stored. It includes three
steps, Decompress, Deserialize, and Detail as described inline below.
Args:
binary (bin): the serialized object to be deserialized.
worker (AbstractWorker): the worker which is acquiring the message content,
for example used to specify the owner of a tensor received(not obvious
for virtual workers)
details (bool): there are some cases where we need to perform the decompression
and deserialization part, but we don't need to detail all the message.
This is the case for Plan workers for instance
Returns:
object: the deserialized form of the binary input.
"""
if worker is None:
# TODO[jvmancuso]: This might be worth a standalone function.
worker = syft.framework.hook.local_worker
# 1) Decompress the binary if needed
binary = _decompress(binary)
# 2) Deserialize
# This function converts the binary into the appropriate python
# object (or nested dict/collection of python objects)
simple_objects = msgpack.loads(binary, use_list=False)
if details:
# 3) Detail
# This function converts typed, simple objects into their morefrom typing import Dict
# complex (and difficult to serialize) counterparts which the
# serialization library wasn't natively able to serialize (such
# as msgpack's inability to serialize torch tensors or ... or
# python slice objects
return _detail(worker, simple_objects)
else:
# sometimes we want to skip detailing (such as in Plan)
return simple_objects
## SECTION: chosen Compression Algorithm
def _apply_compress_scheme(decompressed_input_bin) -> tuple:
"""
Apply the selected compression scheme.
By default is used LZ4
Args:
decompressed_input_bin: the binary to be compressed
"""
return apply_lz4_compression(decompressed_input_bin)
def apply_lz4_compression(decompressed_input_bin) -> tuple:
"""
Apply LZ4 compression to the input
Args:
decompressed_input_bin: the binary to be compressed
Returns:
a tuple (compressed_result, LZ4)
"""
return lz4.frame.compress(decompressed_input_bin), LZ4
def apply_zstd_compression(decompressed_input_bin) -> tuple:
"""
Apply ZSTD compression to the input
Args:
decompressed_input_bin: the binary to be compressed
Returns:
a tuple (compressed_result, ZSTD)
"""
return zstd.compress(decompressed_input_bin), ZSTD
def apply_no_compression(decompressed_input_bin) -> tuple:
"""
No compression is applied to the input
Args:
decompressed_input_bin: the binary
Returns:
a tuple (the binary, LZ4)
"""
return decompressed_input_bin, NO_COMPRESSION
def _compress(decompressed_input_bin: bin) -> bin:
"""
This function compresses a binary using the function _apply_compress_scheme
if the input has been already compressed in some step, it will return it as it is
Args:
decompressed_input_bin (bin): binary to be compressed
Returns:
bin: a compressed binary
"""
compress_stream, compress_scheme = _apply_compress_scheme(decompressed_input_bin)
try:
z = scheme_to_bytes[compress_scheme] + compress_stream
return z
except KeyError:
raise CompressionNotFoundException(
f"Compression scheme not found for compression code: {str(compress_scheme)}"
)
def _decompress(binary: bin) -> bin:
"""
This function decompresses a binary using the scheme defined in the first byte of the input
Args:
binary (bin): a compressed binary
Returns:
bin: decompressed binary
"""
# check the 1-byte header to check the compression scheme used
compress_scheme = binary[0]
# remove the 1-byte header from the input stream
binary = binary[1:]
# 1) Decompress or return the original stream
if compress_scheme == LZ4:
return lz4.frame.decompress(binary)
elif compress_scheme == ZSTD:
return zstd.decompress(binary)
elif compress_scheme == NO_COMPRESSION:
return binary
else:
raise CompressionNotFoundException(
f"Compression scheme not found for compression code: {str(compress_scheme)}"
)
def _simplify(worker: AbstractWorker, obj: object, **kwargs) -> object:
"""
This function takes an object as input and returns a simple
Python object which is supported by the chosen serialization
method (such as JSON or msgpack). The reason we have this function
is that some objects are either NOT supported by high level (fast)
serializers OR the high level serializers don't support the fastest
form of serialization. For example, PyTorch tensors have custom pickle
functionality thus its better to pre-serialize PyTorch tensors using
pickle and then serialize the binary in with the rest of the message
being sent.
Args:
obj: An object which may need to be simplified.
Returns:
An simple Python object which msgpack can serialize.
Raises:
ValueError: if `move_this` or `in_front_of_that` are not both single ASCII
characters.
"""
# Check to see if there is a simplifier
# for this type. If there is, return the simplified object.
# breakpoint()
current_type = type(obj)
if current_type in simplifiers:
result = (simplifiers[current_type][0], simplifiers[current_type][1](worker, obj, **kwargs))
return result
# If we already tried to find a simplifier for this type but failed, we should
# just return the object as it is.
elif current_type in no_simplifiers_found:
return obj
else:
# If the object type is not in simplifiers,
# we check the classes that this object inherits from.
# `inspect.getmro` give us all types this object inherits
# from, including `type(obj)`. We can skip the type of the
# object because we already tried this in the
# previous step.
classes_inheritance = inspect.getmro(type(obj))[1:]
for inheritance_type in classes_inheritance:
if inheritance_type in simplifiers:
# Store the inheritance_type in simplifiers so next time we see this type
# serde will be faster.
simplifiers[current_type] = simplifiers[inheritance_type]
result = (
simplifiers[current_type][0],
simplifiers[current_type][1](worker, obj, **kwargs),
)
return result
# if there is not a simplifier for this
# object, then the object is already a
# simple python object and we can just
# return it.
no_simplifiers_found.add(current_type)
return obj
def _detail(worker: AbstractWorker, obj: object, **kwargs) -> object:
"""Reverses the functionality of _simplify.
Where applicable, it converts simple objects into more complex objects such
as converting binary objects into torch tensors. Read _simplify for more
information on why _simplify and detail are needed.
Args:
worker: the worker which is acquiring the message content, for example
used to specify the owner of a tensor received(not obvious for
virtual workers).
obj: a simple Python object which msgpack deserialized.
Returns:
obj: a more complex Python object which msgpack would have had trouble
deserializing directly.
"""
if type(obj) in (list, tuple):
return detailers[obj[0]](worker, obj[1], **kwargs)
else:
return obj
| 39.7
| 115
| 0.728799
|
0ba1774a98bbb212454435b57ef476e328ba2e48
| 2,627
|
py
|
Python
|
src/pyscaffold/contrib/setuptools_scm/hg.py
|
jleni/pyscaffold
|
0d0ed09235ec6f0f608a1ae45a1fef26cb74ffde
|
[
"MIT"
] | 3
|
2021-05-28T14:55:18.000Z
|
2022-01-18T08:38:11.000Z
|
src/pyscaffold/contrib/setuptools_scm/hg.py
|
jleni/pyscaffold
|
0d0ed09235ec6f0f608a1ae45a1fef26cb74ffde
|
[
"MIT"
] | 2
|
2017-10-04T19:38:48.000Z
|
2017-10-04T19:45:07.000Z
|
src/pyscaffold/contrib/setuptools_scm/hg.py
|
jleni/pyscaffold
|
0d0ed09235ec6f0f608a1ae45a1fef26cb74ffde
|
[
"MIT"
] | 2
|
2018-10-03T09:37:36.000Z
|
2019-07-31T02:13:58.000Z
|
import os
from .utils import do, trace, data_from_mime, has_command
from .version import meta, tags_to_versions
FILES_COMMAND = 'hg locate -I .'
def _hg_tagdist_normalize_tagcommit(root, tag, dist, node):
dirty = node.endswith('+')
node = 'h' + node.strip('+')
# Detect changes since the specified tag
revset = ("(branch(.)" # look for revisions in this branch only
" and tag({tag!r})::." # after the last tag
# ignore commits that only modify .hgtags and nothing else:
" and (merge() or file('re:^(?!\.hgtags).*$'))"
" and not tag({tag!r}))" # ignore the tagged commit itself
).format(tag=tag)
if tag != '0.0':
commits = do(['hg', 'log', '-r', revset, '--template', '{node|short}'],
root)
else:
commits = True
trace('normalize', locals())
if commits or dirty:
return meta(tag, distance=dist, node=node, dirty=dirty)
else:
return meta(tag)
def parse(root):
if not has_command('hg'):
return
identity_data = do('hg id -i -t', root).split()
if not identity_data:
return
node = identity_data.pop(0)
tags = tags_to_versions(identity_data)
# filter tip in degraded mode on old setuptools
tags = [x for x in tags if x != 'tip']
dirty = node[-1] == '+'
if tags:
return meta(tags[0], dirty=dirty)
if node.strip('+') == '0'*12:
trace('initial node', root)
return meta('0.0', dirty=dirty)
# the newline is needed for merge stae, see issue 72
cmd = 'hg parents --template "{latesttag} {latesttagdistance}\n"'
out = do(cmd, root)
try:
# in merge state we assume parent 1 is fine
tags, dist = out.splitlines()[0].split()
# pick latest tag from tag list
tag = tags.split(':')[-1]
if tag == 'null':
tag = '0.0'
dist = int(dist) + 1
return _hg_tagdist_normalize_tagcommit(root, tag, dist, node)
except ValueError:
pass # unpacking failed, old hg
def archival_to_version(data):
trace('data', data)
node = data.get('node', '')[:12]
if node:
node = 'h' + node
if 'tag' in data:
return meta(data['tag'])
elif 'latesttag' in data:
return meta(data['latesttag'],
distance=data['latesttagdistance'],
node=node)
else:
return meta('0.0', node=node)
def parse_archival(root):
archival = os.path.join(root, '.hg_archival.txt')
data = data_from_mime(archival)
return archival_to_version(data)
| 31.27381
| 79
| 0.574039
|
a99a1adbda277aa3ef6b3b59980e09cec889db71
| 1,138
|
py
|
Python
|
chapter_4/4.11.py
|
frbaroni/ctci
|
ff2f308aae76ab0e3dde09c7f88f37b86073cb38
|
[
"MIT"
] | null | null | null |
chapter_4/4.11.py
|
frbaroni/ctci
|
ff2f308aae76ab0e3dde09c7f88f37b86073cb38
|
[
"MIT"
] | null | null | null |
chapter_4/4.11.py
|
frbaroni/ctci
|
ff2f308aae76ab0e3dde09c7f88f37b86073cb38
|
[
"MIT"
] | null | null | null |
import unittest
import random
import collections
from mytree import Tree
tree_items = [50, 25, 75, 10, 40, 65, 80]
tree = Tree.create_normal(tree_items)
# Cache 'size' on Tree insertion/remove
def tree_size(head):
def count(node):
if node is None:
return 0
else:
return 1 + count(node.left) + count(node.right)
return count(head)
def randomNode(head):
def in_order(node, state):
if node and (state['steps'] >= 0):
if state['steps'] == 0:
state['result'] = node
state['steps'] -= 1
in_order(node.left, state)
in_order(node.right, state)
total = tree_size(head)
selected = random.randint(0, total - 1)
state = dict(steps=selected, result=None)
in_order(head, state)
return state['result']
iterations = 10000
distribution = collections.defaultdict(lambda: 0)
for i in range(iterations):
node = randomNode(tree)
distribution[node.value] += 1
for value in tree_items:
distributed = round((distribution[value] / iterations) * 100)
print('{} -> {}'.format(value, distributed))
| 26.465116
| 65
| 0.623023
|
3a7c21404d2ba6164bddea9909151e0c09744318
| 1,129
|
py
|
Python
|
api/crawl_data.py
|
osamhack2020/WEB_Major-Ring_5mA
|
6da90afb2fc0f206b094eab904ee993e848b1b4b
|
[
"MIT"
] | 2
|
2020-10-10T11:48:20.000Z
|
2020-10-18T06:18:46.000Z
|
api/crawl_data.py
|
osamhack2020/WEB_Major-Ring_5mA
|
6da90afb2fc0f206b094eab904ee993e848b1b4b
|
[
"MIT"
] | 2
|
2020-10-25T11:12:59.000Z
|
2020-10-31T01:41:32.000Z
|
api/crawl_data.py
|
osamhack2020/WEB_Major-Ring_5mA
|
6da90afb2fc0f206b094eab904ee993e848b1b4b
|
[
"MIT"
] | 2
|
2020-10-10T09:23:27.000Z
|
2020-10-10T09:39:55.000Z
|
#api for crawling
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
def crawl_wevity(Contest, db):
for i in range(0, 99):
url = "https://www.wevity.com/?c=find&s=1&gp=" + str(i)
soup = BeautifulSoup(urlopen(url), "html.parser")
# 불필요 span 태그 제거
for span in soup.select('div.ms-list a > span'):
span.replaceWith('')
contest_list = soup.select('ul.list li')
for contest in contest_list[1:]:
contest_title = str(contest.find('div', {'class': 'tit'}).find('a').get_text().strip())
contest_category = str(contest.find('div', {'class': 'sub-tit'}).get_text()[5:])
contest_organization = str(contest.find('div', {'class': 'organ'}).get_text().strip())
# print("제목: " + contest_title)
# print("카테고리 :" + str(contest_category))
# print("주최사: " + contest_organization)
new_contest = Contest(title=contest_title, category=contest_category, organization=contest_organization)
db.session.add(new_contest)
db.session.commit()
| 49.086957
| 116
| 0.597874
|
3fc377648d5d6c9b5a6d0963bf856f574ef34a39
| 10,285
|
py
|
Python
|
src/model_trainer.py
|
Sean1572/CloudSeg
|
c2b7b1bab8a350aec654f61323e33854ebc1547c
|
[
"MIT"
] | null | null | null |
src/model_trainer.py
|
Sean1572/CloudSeg
|
c2b7b1bab8a350aec654f61323e33854ebc1547c
|
[
"MIT"
] | null | null | null |
src/model_trainer.py
|
Sean1572/CloudSeg
|
c2b7b1bab8a350aec654f61323e33854ebc1547c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
#from google.colab import drive
#drive.mount('/content/drive')
# In[11]:
#rgb_img = '/content/drive/Shareddrives/ACM_Project_Team_3/HYTA-master/3GT/training'
#seg_img = '/content/drive/Shareddrives/ACM_Project_Team_3/HYTA-master/images/training'
#rgb_img = '/content/drive/Shareddrives/ACM_Project_Team_3/WSISEG-Database-master/whole sky images'
#seg_img = '/content/drive/Shareddrives/ACM_Project_Team_3/WSISEG-Database-master/annotation'
rgb_img_training = "./Data/WSISEG-Database-master/whole sky images"
seg_img_training = "./Data/WSISEG-Database-master/annotation"
rgb_img_testing = "./Data/HYTA-master/images/training"
seg_img_testing = "./Data/HYTA-master/3GT/training-fixed"
# In[10]:
#!pip3 install segmentation_models_pytorch
#!pip3 install natsort
from torch.utils.data import Dataset
import os
from PIL import Image
import torchvision.transforms as transforms
class CustomDataSet(Dataset):
def __init__(self, main_dir, label_dir, transform):
self.main_dir = main_dir
self.label_dir = label_dir
self.transform = transform
all_imgs = os.listdir(main_dir)
all_segs = os.listdir(main_dir)
self.total_imgs = natsorted(all_imgs)
self.total_segs = natsorted(all_segs)
def __len__(self):
return len(self.total_imgs)
def __getitem__(self, idx):
img_loc = os.path.join(self.main_dir, self.total_imgs[idx])
image = Image.open(img_loc).convert("RGB")
tensor_image = self.transform(image)
#tensor_image = transform(image)
#mean, std = tensor_image.mean([1,2]), tensor_image.std([1,2])
#print(mean, std, type(tensor_image))
seg_loc = os.path.join(self.label_dir, self.total_segs[idx])
labeled_image = Image.open(seg_loc).convert("RGB")
transform = transforms.Compose([transforms.Resize((256, 256)), transforms.ToTensor()])
labeled_image = transform(labeled_image)
labeled_image = labeled_image.float()
tensor_image = tensor_image.float()
return tensor_image, labeled_image
if __name__ == '__main__':
print("importing torch")
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision.models.segmentation import deeplabv3_resnet50
import segmentation_models_pytorch as smp
from numpy import asarray
import numpy as np
#/content/drive/Shareddrives/ACM_Project_Team_3
# Raw images
#/content/drive/Shareddrives/ACM_Project_Team_3/HYTA-master/images/training
# Labels
# /content/drive/Shareddrives/ACM_Project_Team_3/HYTA-master/3GT/training
from natsort import natsorted
def getDataStats(folder):
min_abs = 1
max_abs = 0
mean_sum = 0
std_sum = 0
i = 0
for image in os.listdir(folder):
#try:
img_loc = os.path.join(folder, image)
image = Image.open(img_loc).convert("RGB")
transform = transforms.Compose([transforms.Resize((256, 256)), transforms.ToTensor()])
tensor_image = transform(image)
mean, std, max_, min_ = tensor_image.mean([1,2]), tensor_image.std([1,2]), tensor_image.max(), tensor_image.min()
mean_sum += mean
std_sum += std
i += 1
min_abs = min(min_abs, min_)
max_abs = max(max_abs, max_)
if (max_ < 0 or min_ < 0):
print(image, min, max)
#except:
# print(image, "failed")
# continue
mean = mean_sum / i
std = std_sum / i
return mean, std, min_abs, max_abs
BATCH_SIZE = 32
## transformations
size = (256, 256)
transform = transforms.Compose([transforms.ToTensor(), transforms.Resize(size), transforms.Normalize([0.3114, 0.3166, 0.3946], [0.2580, 0.2593, 0.2953])])
#UNCOMMENT TO FIND MEAN AND STD OF DATASET
#mean, std = getDataStats(rgb_img)
#print("mean and std before normalize:")
#print("Mean of the image:", mean)
#print("Std of the image:", std)
## download and load training dataset
imagenet_data = CustomDataSet(rgb_img_training, seg_img_training, transform=transform)
trainloader = torch.utils.data.DataLoader(imagenet_data,
batch_size=BATCH_SIZE,
shuffle=True,
num_workers=2,)
## download and load training dataset
transform = transforms.Compose([transforms.ToTensor(), transforms.Resize(size), transforms.Normalize([0.3663, 0.4620, 0.5813], [0.1548, 0.1313, 0.1024])])
imagenet_data = CustomDataSet(rgb_img_testing, seg_img_testing, transform=transform)
testloader = torch.utils.data.DataLoader(imagenet_data,
batch_size=BATCH_SIZE,
shuffle=True,
num_workers=2)
#rgb_img_t = '/content/drive/MyDrive/Highway_Dataset/Test/TestSeq04/image'
#seg_img_t = '/content/drive/MyDrive/Highway_Dataset/Test/TestSeq04/label'
### download and load training dataset
#imagenet_data_test = CustomDataSet(rgb_img_t, seg_img_t, transform=transform)
#testloader = torch.utils.data.DataLoader(imagenet_data_test,
# batch_size=BATCH_SIZE,
# shuffle=False,
# num_workers=2)
#
for images, labels in trainloader:
print("batch size:", images.shape)
# In[8]:
import segmentation_models_pytorch as smp
learning_rate = 0.1
num_epochs = 10
#device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#model = MyModel()
#model = torchvision.models.segmentation.deeplabv3_resnet50(pretrained=True)
# model to eval() model and load onto computation devicce
#model.eval().to(device)
#model = model.to(device)
model = smp.MAnet(
encoder_name="resnet34", # choose encoder, e.g. mobilenet_v2 or efficientnet-b7
encoder_weights="imagenet", # use `imagenet` pre-trained weights for encoder initialization
classes=3,
in_channels=3,
#encoder_depth=5
)
from segmentation_models_pytorch.encoders import get_preprocessing_fn
preprocess_input = get_preprocessing_fn('resnet34', pretrained='imagenet')
for images, labels in trainloader:
print("batch size:", images.shape)
out = model(images)
pr_mask = out.sigmoid()
print(out.shape, labels.shape)
break
criterion = nn.MSELoss()
loss = smp.utils.losses.DiceLoss()
# using multiple metrics to train the model
metrics = [
smp.utils.metrics.IoU(threshold=0.5),
smp.utils.metrics.Fscore(threshold=0.5),
smp.utils.metrics.Accuracy(threshold=0.5),
smp.utils.metrics.Recall(threshold=0.5),
smp.utils.metrics.Precision(threshold=0.5),
]
# Using Adam optimizer
optimizer = torch.optim.Adam(params=model.parameters(), lr=0.000075) #310
DEVICE = 'cuda'#'cpu'
train_epoch = smp.utils.train.TrainEpoch(
model,
loss=loss,
metrics=metrics,
optimizer=optimizer,
device=DEVICE,
verbose=True,
)
valid_epoch = smp.utils.train.ValidEpoch(
model,
loss=loss,
metrics=metrics,
device=DEVICE,
verbose=True,
)
## Model
#ENCODER = 'se_resnext50_32x4d'
#ENCODER_WEIGHTS = 'imagenet'
#CLASSES = ['car']
#ACTIVATION = 'sigmoid' # could be None for logits or 'softmax2d' for multiclass segmentation
#DEVICE = 'cpu' # did not get 'cuda' doesn't work
#
## create segmentation model with pretrained encoder
#model = smp.FPN(
# encoder_name=ENCODER,
# encoder_weights=ENCODER_WEIGHTS,
# classes=len(CLASSES),
# activation=ACTIVATION,
#)
#
#preprocessing_fn = smp.encoders.get_preprocessing_fn(ENCODER, ENCODER_WEIGHTS)
max_score = 0
count = 0
torch.cuda.empty_cache()
for i in range(0, 15):
torch.cuda.empty_cache()
print('\nEpoch: {}'.format(i))
train_logs = train_epoch.run(trainloader)
valid_logs = valid_epoch.run(trainloader)#testloader)
# do something (save model, change lr, etc.)
if max_score < valid_logs['iou_score']:
max_score = valid_logs['iou_score']
torch.save(model, './best_model-MAnet#5.pth')
print('Model saved!')
else:
break
#if i == 3:
# optimizer.param_groups[0]['lr'] = 1e-5
# print('Decrease decoder learning rate to 1e-5!')
model.eval()
# In[27]:
import matplotlib.pyplot as plt
#https://github.com/qubvel/segmentation_models.pytorch/blob/master/examples/binary_segmentation_intro.ipynb
# In[37]:
batch = next(iter(trainloader))
with torch.no_grad():
model.eval()
logits = model(batch[0])
pr_masks = logits.sigmoid()
for image, gt_mask, pr_mask in zip(batch[0], batch[1], pr_masks):
plt.figure(figsize=(10, 5))
plt.subplot(1, 3, 1)
plt.imshow(image.numpy().transpose(1, 2, 0)) # convert CHW -> HWC
plt.title("Image")
plt.axis("off")
plt.subplot(1, 3, 2)
print(gt_mask)
plt.imshow(gt_mask.numpy().transpose(1, 2, 0).squeeze()) # just squeeze classes dim, because we have only one class
plt.title("Ground truth")
plt.axis("off")
plt.subplot(1, 3, 3)
print(pr_mask)
plt.imshow(pr_mask.numpy().transpose(1, 2, 0).squeeze()) # just squeeze classes dim, because we have only one class
plt.title("Prediction")
plt.axis("off")
plt.show()
# In[ ]:
display(prediction)
ground_truth = Image.open(seg_img).convert("RGB")
display(ground_truth)
# In[ ]:
# In[ ]:
| 32.342767
| 158
| 0.615946
|
ff9503f91d32deecf6754577f53747592a43c4bc
| 10,423
|
py
|
Python
|
api/app.py
|
frco9/karl-segmentation
|
84758aa42a8b3d7890ac6a58fa608753fdde0b92
|
[
"MIT"
] | null | null | null |
api/app.py
|
frco9/karl-segmentation
|
84758aa42a8b3d7890ac6a58fa608753fdde0b92
|
[
"MIT"
] | null | null | null |
api/app.py
|
frco9/karl-segmentation
|
84758aa42a8b3d7890ac6a58fa608753fdde0b92
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
#api/app.py
import os
import requests
from flask import Flask, current_app, Response, json, jsonify, request
from flask_cors import CORS
import pandas as pd
import argparse
from datetime import datetime
import os
import sys
import time
import scipy.misc
import cv2
from PIL import Image
import imageio
os.environ["CUDA_VISIBLE_DEVICES"]="1"
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from utils import *
from LIP_model import *
N_CLASSES = 20
INPUT_SIZE = (384, 384)
DATA_DIRECTORY = './datasets/examples'
DATA_LIST_PATH = './datasets/examples/list/val.txt'
RESTORE_FROM = './checkpoint/JPPNet-s2'
OUTPUT_DIR = './output/dataset'
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
os.makedirs('{}/images'.format(OUTPUT_DIR))
os.makedirs('{}/labels'.format(OUTPUT_DIR))
def create_app():
"""
Create app
"""
app = Flask(__name__)
CORS(app, supports_credentials=True)
def custom_response(res, status_code):
"""
Custom Response Function
"""
return Response(
mimetype="application/json",
response=json.dumps(res),
status=status_code
)
def convert_mask_lip(mask):
LIP_to_FP_dict = {
0: 0,
1: 1,
2: 2,
3: 0,
4: 3,
5: 4,
6: 7,
7: 4,
8: 0,
9: 6,
10: 7,
11: 17,
12: 5,
13: 11,
14: 15,
15: 14,
16: 13,
17: 12,
18: 10,
19: 9
}
LIP_rgb_to_code_dict = {
'0_0_0': 0,
'128_0_0': 1,
'255_0_0': 2,
'0_85_0': 3,
'170_0_51': 4,
'255_85_0': 5,
'0_0_85': 6,
'0_119_221': 7,
'85_85_0': 8,
'0_85_85': 9,
'85_51_0': 10,
'52_86_128': 11,
'0_128_0': 12,
'0_0_255': 13,
'51_170_221': 14,
'0_255_255': 15,
'85_255_170': 16,
'170_255_85': 17,
'255_255_0': 18,
'255_170_0': 19
}
image_bounds_dict = {}
new_matrix = []
for i, row in enumerate(mask):
new_row = []
for j, elem in enumerate(row):
new_col = []
color_str = str(elem[0]) + '_' + str(elem[1]) + '_' + str(elem[2])
LIP_code = LIP_rgb_to_code_dict[color_str]
FP_code = LIP_to_FP_dict[LIP_code]
FP_code = [FP_code]*3
new_row.append(FP_code)
new_matrix.append(new_row)
new_matrix = np.array(new_matrix).astype(np.uint8)
return new_matrix
def getBoundingBoxes(mask):
image_bounds_dict = {}
for i, row in enumerate(mask[0]):
for j, elem in enumerate(row):
color_str = str(elem[0]) + '_' + str(elem[1]) + '_' + str(elem[2])
if color_str not in image_bounds_dict:
image_bounds_dict[color_str] = {
'left': j, 'top': i, 'right': j, 'bottom': i}
else:
previous_left = image_bounds_dict[color_str]['left']
previous_right = image_bounds_dict[color_str]['right']
previous_top = image_bounds_dict[color_str]['top']
previous_bottom = image_bounds_dict[color_str]['bottom']
image_bounds_dict[color_str]['left'] = min(j, previous_left)
image_bounds_dict[color_str]['top'] = min(i, previous_top)
image_bounds_dict[color_str]['right'] = max(j, previous_right)
image_bounds_dict[color_str]['bottom'] = max(i, previous_bottom)
data = []
for key, item in image_bounds_dict.items():
data.append({
'id': key,
'bounds': item
})
return data
@app.route('/', methods=['GET'])
def index():
return 'alive'
@app.route('/getSegmentation', methods=['POST'])
def get_segmentation():
if 'file' not in request.files:
return custom_response({ 'error': 'No file provided' }, 400)
file = request.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
return custom_response({ 'error': 'File without name forbidden' }, 400)
img_contents = file.read()
with open('{}/images/{}.jpg'.format(OUTPUT_DIR, file.filename.split('.')[0]), "wb") as f:
f.write(img_contents)
# Create queue coordinator.
coord = tf.train.Coordinator()
h, w = INPUT_SIZE
# Load reader.
with tf.name_scope("create_inputs"):
reader = ImageReader(DATA_DIRECTORY, DATA_LIST_PATH, None, False, False, coord)
image = reader.read_images_from_binary(img_contents)
image_rev = tf.reverse(image, tf.stack([1]))
image_batch_origin = tf.stack([image, image_rev])
image_batch = tf.image.resize_images(image_batch_origin, [int(h), int(w)])
image_batch075 = tf.image.resize_images(image_batch_origin, [int(h * 0.75), int(w * 0.75)])
image_batch125 = tf.image.resize_images(image_batch_origin, [int(h * 1.25), int(w * 1.25)])
# Create network.
with tf.variable_scope('', reuse=False):
net_100 = JPPNetModel({'data': image_batch}, is_training=False, n_classes=N_CLASSES)
with tf.variable_scope('', reuse=True):
net_075 = JPPNetModel({'data': image_batch075}, is_training=False, n_classes=N_CLASSES)
with tf.variable_scope('', reuse=True):
net_125 = JPPNetModel({'data': image_batch125}, is_training=False, n_classes=N_CLASSES)
# parsing net
parsing_fea1_100 = net_100.layers['res5d_branch2b_parsing']
parsing_fea1_075 = net_075.layers['res5d_branch2b_parsing']
parsing_fea1_125 = net_125.layers['res5d_branch2b_parsing']
parsing_out1_100 = net_100.layers['fc1_human']
parsing_out1_075 = net_075.layers['fc1_human']
parsing_out1_125 = net_125.layers['fc1_human']
# pose net
resnet_fea_100 = net_100.layers['res4b22_relu']
resnet_fea_075 = net_075.layers['res4b22_relu']
resnet_fea_125 = net_125.layers['res4b22_relu']
with tf.variable_scope('', reuse=False):
pose_out1_100, pose_fea1_100 = pose_net(resnet_fea_100, 'fc1_pose')
pose_out2_100, pose_fea2_100 = pose_refine(pose_out1_100, parsing_out1_100, pose_fea1_100, name='fc2_pose')
parsing_out2_100, parsing_fea2_100 = parsing_refine(parsing_out1_100, pose_out1_100, parsing_fea1_100, name='fc2_parsing')
parsing_out3_100, parsing_fea3_100 = parsing_refine(parsing_out2_100, pose_out2_100, parsing_fea2_100, name='fc3_parsing')
with tf.variable_scope('', reuse=True):
pose_out1_075, pose_fea1_075 = pose_net(resnet_fea_075, 'fc1_pose')
pose_out2_075, pose_fea2_075 = pose_refine(pose_out1_075, parsing_out1_075, pose_fea1_075, name='fc2_pose')
parsing_out2_075, parsing_fea2_075 = parsing_refine(parsing_out1_075, pose_out1_075, parsing_fea1_075, name='fc2_parsing')
parsing_out3_075, parsing_fea3_075 = parsing_refine(parsing_out2_075, pose_out2_075, parsing_fea2_075, name='fc3_parsing')
with tf.variable_scope('', reuse=True):
pose_out1_125, pose_fea1_125 = pose_net(resnet_fea_125, 'fc1_pose')
pose_out2_125, pose_fea2_125 = pose_refine(pose_out1_125, parsing_out1_125, pose_fea1_125, name='fc2_pose')
parsing_out2_125, parsing_fea2_125 = parsing_refine(parsing_out1_125, pose_out1_125, parsing_fea1_125, name='fc2_parsing')
parsing_out3_125, parsing_fea3_125 = parsing_refine(parsing_out2_125, pose_out2_125, parsing_fea2_125, name='fc3_parsing')
parsing_out1 = tf.reduce_mean(tf.stack([tf.image.resize_images(parsing_out1_100, tf.shape(image_batch_origin)[1:3,]),
tf.image.resize_images(parsing_out1_075, tf.shape(image_batch_origin)[1:3,]),
tf.image.resize_images(parsing_out1_125, tf.shape(image_batch_origin)[1:3,])]), axis=0)
parsing_out2 = tf.reduce_mean(tf.stack([tf.image.resize_images(parsing_out2_100, tf.shape(image_batch_origin)[1:3,]),
tf.image.resize_images(parsing_out2_075, tf.shape(image_batch_origin)[1:3,]),
tf.image.resize_images(parsing_out2_125, tf.shape(image_batch_origin)[1:3,])]), axis=0)
parsing_out3 = tf.reduce_mean(tf.stack([tf.image.resize_images(parsing_out3_100, tf.shape(image_batch_origin)[1:3,]),
tf.image.resize_images(parsing_out3_075, tf.shape(image_batch_origin)[1:3,]),
tf.image.resize_images(parsing_out3_125, tf.shape(image_batch_origin)[1:3,])]), axis=0)
raw_output = tf.reduce_mean(tf.stack([parsing_out1, parsing_out2, parsing_out3]), axis=0)
head_output, tail_output = tf.unstack(raw_output, num=2, axis=0)
tail_list = tf.unstack(tail_output, num=20, axis=2)
tail_list_rev = [None] * 20
for xx in range(14):
tail_list_rev[xx] = tail_list[xx]
tail_list_rev[14] = tail_list[15]
tail_list_rev[15] = tail_list[14]
tail_list_rev[16] = tail_list[17]
tail_list_rev[17] = tail_list[16]
tail_list_rev[18] = tail_list[19]
tail_list_rev[19] = tail_list[18]
tail_output_rev = tf.stack(tail_list_rev, axis=2)
tail_output_rev = tf.reverse(tail_output_rev, tf.stack([1]))
raw_output_all = tf.reduce_mean(tf.stack([head_output, tail_output_rev]), axis=0)
raw_output_all = tf.expand_dims(raw_output_all, dim=0)
raw_output_all = tf.argmax(raw_output_all, dimension=3)
pred_all = tf.expand_dims(raw_output_all, dim=3) # Create 4-d tensor.
# Which variables to load.
restore_var = tf.global_variables()
# Set up tf session and initialize variables.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
init = tf.global_variables_initializer()
sess.run(init)
sess.run(tf.local_variables_initializer())
# Load weights.
loader = tf.train.Saver(var_list=restore_var)
if RESTORE_FROM is not None:
if load(loader, sess, RESTORE_FROM):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
# Iterate over training steps.
parsing_ = sess.run(pred_all)
img_id = file.filename
msk = decode_labels(parsing_, num_classes=N_CLASSES)
parsing_im = convert_mask_lip(msk[0])
imageio.imwrite('{}/labels/{}.png'.format(OUTPUT_DIR, img_id.split('.')[0]), parsing_im)
coord.request_stop()
bbox = getBoundingBoxes(msk)
return custom_response(bbox, 200)
return app
| 36.065744
| 130
| 0.657968
|
db61eba8eb1ff0874a0c03ae47c874b12ab5e71c
| 741
|
py
|
Python
|
AddBinary67.py
|
Bit64L/LeetCode-Python-
|
64847cbb1adcaca4561b949e8acc52e8e031a6cb
|
[
"MIT"
] | null | null | null |
AddBinary67.py
|
Bit64L/LeetCode-Python-
|
64847cbb1adcaca4561b949e8acc52e8e031a6cb
|
[
"MIT"
] | null | null | null |
AddBinary67.py
|
Bit64L/LeetCode-Python-
|
64847cbb1adcaca4561b949e8acc52e8e031a6cb
|
[
"MIT"
] | null | null | null |
class Solution:
def addBinary(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
len_a = len(a) - 1
len_b = len(b) - 1
c = ""
accu, i = 0, 0
while len_a >= 0 or len_b >= 0:
if len_a >= 0 and len_b >= 0:
sum_ = int(a[len_a]) + int(b[len_b]) + accu
else:
if len_a >= 0:
sum_ = int(a[len_a]) + accu
else:
sum_ = int(b[len_b]) + accu
digit = sum_ % 2
accu = int(sum_ / 2)
c += str(digit)
len_a -= 1
len_b -= 1
if accu != 0:
c += str(accu)
return c[::-1]
solution = Solution()
print(solution.addBinary("1","11"))
| 23.903226
| 53
| 0.407557
|
a4a14b8bdaefa312ab07f5255f38d2932f86a97a
| 1,018
|
py
|
Python
|
blog/migrations/0001_initial.py
|
Shawn9717/Neighberhood
|
d2cbc5e2489e56547703914f90fb663f8d1e92d0
|
[
"MIT"
] | null | null | null |
blog/migrations/0001_initial.py
|
Shawn9717/Neighberhood
|
d2cbc5e2489e56547703914f90fb663f8d1e92d0
|
[
"MIT"
] | null | null | null |
blog/migrations/0001_initial.py
|
Shawn9717/Neighberhood
|
d2cbc5e2489e56547703914f90fb663f8d1e92d0
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0.1 on 2022-01-07 19:41
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BlogPost',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('slug', models.CharField(max_length=130)),
('content', models.TextField()),
('image', models.ImageField(blank=True, null=True, upload_to='blog_pics')),
('dateTime', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 33.933333
| 120
| 0.620825
|
18baf4f58c9ab3a682c1b1ac0ae1ff347e0395d0
| 613
|
py
|
Python
|
4a.py
|
znuxor/adventofcode2017
|
79d0df07f24ea8d2793df3b1c853a85b760791c1
|
[
"BSD-3-Clause"
] | null | null | null |
4a.py
|
znuxor/adventofcode2017
|
79d0df07f24ea8d2793df3b1c853a85b760791c1
|
[
"BSD-3-Clause"
] | null | null | null |
4a.py
|
znuxor/adventofcode2017
|
79d0df07f24ea8d2793df3b1c853a85b760791c1
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import math
from collections import defaultdict
with open('4a_data.txt', 'r') as problem_input:
data_input = problem_input.read().split('\n')[:-1]
# data_input = [
# 'aa bb cc dd ee',
# 'aa bb cc dd aa',
# 'aa bb cc dd aaa'
# ]
print('total:', end='')
print(len(data_input))
print()
total_valid = 0
for line in data_input:
word_dict = defaultdict(lambda: 0)
for word in line.split(' '):
word_dict[word]+=1
if max(word_dict.values())==1:
total_valid += 1
print(repr(line))
print(total_valid)
| 19.774194
| 54
| 0.595432
|
c76a4b0117ac73c49fa05b5ce2e8151204a522ac
| 565
|
py
|
Python
|
codeforces/switches-and-lamps-985b.py
|
jan25/code_sorted
|
f405fd0898f72eb3d5428f9e10aefb4a009d5089
|
[
"Unlicense"
] | 2
|
2018-01-18T11:01:36.000Z
|
2021-12-20T18:14:48.000Z
|
codeforces/switches-and-lamps-985b.py
|
jan25/code_sorted
|
f405fd0898f72eb3d5428f9e10aefb4a009d5089
|
[
"Unlicense"
] | null | null | null |
codeforces/switches-and-lamps-985b.py
|
jan25/code_sorted
|
f405fd0898f72eb3d5428f9e10aefb4a009d5089
|
[
"Unlicense"
] | null | null | null |
# import random
# n, m = 2000, 2000
# print (n, m)
# for i in range(n):
# s = []
# for j in range(m):
# # if random.randint(0, 10) % 2 == 1:
# s.append('1')
# # else:
# # s.append('0')
# print (''.join(s))
n, m = map(int, input().split())
sums = [0] * m
strs = []
for i in range(n):
s = input()
strs.append(s)
for j in range(len(s)):
if s[j] == '1': sums[j] += 1
yes = "NO"
for s in strs:
ones, oks = 0, 0
for j in range(len(s)):
if s[j] == '1':
ones += 1
oks += 1 if sums[j] > 1 else 0
if ones == oks:
yes = "YES"
break
print (yes)
| 18.225806
| 40
| 0.497345
|
18fcbd565e1e2623144773f02a21b2a5d3c72c6e
| 2,605
|
py
|
Python
|
audacityAnnotation2WAVs.py
|
ShireeshPyreddy/pyAudioAnalysis
|
50276482f52710c322711fd5159c4d4fc229d64c
|
[
"Apache-2.0"
] | null | null | null |
audacityAnnotation2WAVs.py
|
ShireeshPyreddy/pyAudioAnalysis
|
50276482f52710c322711fd5159c4d4fc229d64c
|
[
"Apache-2.0"
] | null | null | null |
audacityAnnotation2WAVs.py
|
ShireeshPyreddy/pyAudioAnalysis
|
50276482f52710c322711fd5159c4d4fc229d64c
|
[
"Apache-2.0"
] | null | null | null |
import glob
import os
import audioBasicIO
import sys
import csv
import scipy.io.wavfile as wavfile
def annotation2files(wavFile, csvFile):
'''
Break an audio stream to segments of interest,
defined by a csv file
- wavFile: path to input wavfile
- csvFile: path to csvFile of segment limits
Input CSV file must be of the format <T1>\t<T2>\t<Label>
'''
[Fs, x] = audioBasicIO.readAudioFile(wavFile)
with open(csvFile, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='|')
for j, row in enumerate(reader):
T1 = float(row[0].replace(",","."))
T2 = float(row[1].replace(",","."))
label = "%s_%s_%.2f_%.2f.wav" % (wavFile, row[2], T1, T2)
label = label.replace(" ", "_")
xtemp = x[int(round(T1*Fs)):int(round(T2*Fs))]
print(T1, T2, label, xtemp.shape)
wavfile.write(label, Fs, xtemp)
def main(argv):
if argv[1] == "-f":
wavFile = argv[2]
annotationFile = argv[3]
annotation2files(wavFile, annotationFile)
elif argv[1] == "-d":
inputFolder = argv[2]
types = ('*.txt', '*.csv')
annotationFilesList = []
for files in types:
annotationFilesList.extend(glob.glob(os.path.join(inputFolder, files)))
for anFile in annotationFilesList:
wavFile = os.path.splitext(anFile)[0] + ".wav"
if not os.path.isfile(wavFile):
wavFile = os.path.splitext(anFile)[0] + ".mp3"
if not os.path.isfile(wavFile):
print("Audio file not found!")
return
annotation2files(wavFile, anFile)
if __name__ == '__main__':
# Used to extract a series of annotated WAV files based on (a) an audio file (mp3 or wav) and
# (b) a segment annotation file e.g. a "label" file generated in audacity
#
# usage 1:
# python audacityAnnotation2WAVs.py -f <audiofilepath> <annotationfilepath>
# The <annotationfilepath> is actually a tab-seperated file where each line has the format <startTime>\t<entTime>\t<classLabel>
# The result of this process is a series of WAV files with a file name <audiofilepath>_<startTime>_<endTime>_<classLabel>
#
# usage 2:
# python audacityAnnotation2WAVs.py -d <annotationfolderpath>
# Same but searches all .txt and .csv annotation files. Audio files are supposed to be in the same path / filename with a WAV extension
main(sys.argv)
| 38.880597
| 139
| 0.597313
|
e40f6da8840b6dffbb7e7ae9f4c4a95301a05991
| 45,005
|
py
|
Python
|
model/backbone/inception/v1bn.py
|
ahu-hpt/AOMD
|
8d99dbb803feaef55fc089bfb3399d2fb21d55d8
|
[
"Apache-2.0"
] | 2
|
2020-08-24T07:57:16.000Z
|
2022-01-16T02:06:40.000Z
|
model/backbone/inception/v1bn.py
|
ahu-hpt/AOMD
|
8d99dbb803feaef55fc089bfb3399d2fb21d55d8
|
[
"Apache-2.0"
] | null | null | null |
model/backbone/inception/v1bn.py
|
ahu-hpt/AOMD
|
8d99dbb803feaef55fc089bfb3399d2fb21d55d8
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['InceptionV1BN']
pretrained_settings = {
'bninception': {
'imagenet': {
# Was ported using python2 (may trigger warning)
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/bn_inception-239d2248.pth',
# 'url': 'http://yjxiong.me/others/bn_inception-9f5701afb96c8044.pth',
'input_space': 'BGR',
'input_size': [3, 224, 224],
'input_range': [0, 255],
'mean': [104, 117, 128],
'std': [1, 1, 1],
'num_classes': 1000
}
}
}
class InceptionV1BN(nn.Module):
output_size = 1024
def __init__(self, pretrained=True):
super(InceptionV1BN, self).__init__()
self.model = bninception(num_classes=1000, pretrained='imagenet' if pretrained else None)
def forward(self, input):
x = self.model.features(input)
x = self.model.global_pool(x)
return x.view(x.size(0), -1)
class BNInception(nn.Module):
def __init__(self, num_classes=1000):
super(BNInception, self).__init__()
inplace = True
self.conv1_7x7_s2 = nn.Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3))
self.conv1_7x7_s2_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.9, affine=True)
self.conv1_relu_7x7 = nn.ReLU (inplace)
self.pool1_3x3_s2 = nn.MaxPool2d ((3, 3), stride=(2, 2), dilation=(1, 1), ceil_mode=True)
self.conv2_3x3_reduce = nn.Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1))
self.conv2_3x3_reduce_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.9, affine=True)
self.conv2_relu_3x3_reduce = nn.ReLU (inplace)
self.conv2_3x3 = nn.Conv2d(64, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.conv2_3x3_bn = nn.BatchNorm2d(192, eps=1e-05, momentum=0.9, affine=True)
self.conv2_relu_3x3 = nn.ReLU (inplace)
self.pool2_3x3_s2 = nn.MaxPool2d ((3, 3), stride=(2, 2), dilation=(1, 1), ceil_mode=True)
self.inception_3a_1x1 = nn.Conv2d(192, 64, kernel_size=(1, 1), stride=(1, 1))
self.inception_3a_1x1_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.9, affine=True)
self.inception_3a_relu_1x1 = nn.ReLU (inplace)
self.inception_3a_3x3_reduce = nn.Conv2d(192, 64, kernel_size=(1, 1), stride=(1, 1))
self.inception_3a_3x3_reduce_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.9, affine=True)
self.inception_3a_relu_3x3_reduce = nn.ReLU (inplace)
self.inception_3a_3x3 = nn.Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_3a_3x3_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.9, affine=True)
self.inception_3a_relu_3x3 = nn.ReLU (inplace)
self.inception_3a_double_3x3_reduce = nn.Conv2d(192, 64, kernel_size=(1, 1), stride=(1, 1))
self.inception_3a_double_3x3_reduce_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.9, affine=True)
self.inception_3a_relu_double_3x3_reduce = nn.ReLU (inplace)
self.inception_3a_double_3x3_1 = nn.Conv2d(64, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_3a_double_3x3_1_bn = nn.BatchNorm2d(96, eps=1e-05, momentum=0.9, affine=True)
self.inception_3a_relu_double_3x3_1 = nn.ReLU (inplace)
self.inception_3a_double_3x3_2 = nn.Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_3a_double_3x3_2_bn = nn.BatchNorm2d(96, eps=1e-05, momentum=0.9, affine=True)
self.inception_3a_relu_double_3x3_2 = nn.ReLU (inplace)
self.inception_3a_pool = nn.AvgPool2d (3, stride=1, padding=1, ceil_mode=True, count_include_pad=True)
self.inception_3a_pool_proj = nn.Conv2d(192, 32, kernel_size=(1, 1), stride=(1, 1))
self.inception_3a_pool_proj_bn = nn.BatchNorm2d(32, eps=1e-05, momentum=0.9, affine=True)
self.inception_3a_relu_pool_proj = nn.ReLU (inplace)
self.inception_3b_1x1 = nn.Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1))
self.inception_3b_1x1_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.9, affine=True)
self.inception_3b_relu_1x1 = nn.ReLU (inplace)
self.inception_3b_3x3_reduce = nn.Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1))
self.inception_3b_3x3_reduce_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.9, affine=True)
self.inception_3b_relu_3x3_reduce = nn.ReLU (inplace)
self.inception_3b_3x3 = nn.Conv2d(64, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_3b_3x3_bn = nn.BatchNorm2d(96, eps=1e-05, momentum=0.9, affine=True)
self.inception_3b_relu_3x3 = nn.ReLU (inplace)
self.inception_3b_double_3x3_reduce = nn.Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1))
self.inception_3b_double_3x3_reduce_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.9, affine=True)
self.inception_3b_relu_double_3x3_reduce = nn.ReLU (inplace)
self.inception_3b_double_3x3_1 = nn.Conv2d(64, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_3b_double_3x3_1_bn = nn.BatchNorm2d(96, eps=1e-05, momentum=0.9, affine=True)
self.inception_3b_relu_double_3x3_1 = nn.ReLU (inplace)
self.inception_3b_double_3x3_2 = nn.Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_3b_double_3x3_2_bn = nn.BatchNorm2d(96, eps=1e-05, momentum=0.9, affine=True)
self.inception_3b_relu_double_3x3_2 = nn.ReLU (inplace)
self.inception_3b_pool = nn.AvgPool2d (3, stride=1, padding=1, ceil_mode=True, count_include_pad=True)
self.inception_3b_pool_proj = nn.Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1))
self.inception_3b_pool_proj_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.9, affine=True)
self.inception_3b_relu_pool_proj = nn.ReLU (inplace)
self.inception_3c_3x3_reduce = nn.Conv2d(320, 128, kernel_size=(1, 1), stride=(1, 1))
self.inception_3c_3x3_reduce_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True)
self.inception_3c_relu_3x3_reduce = nn.ReLU (inplace)
self.inception_3c_3x3 = nn.Conv2d(128, 160, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
self.inception_3c_3x3_bn = nn.BatchNorm2d(160, eps=1e-05, momentum=0.9, affine=True)
self.inception_3c_relu_3x3 = nn.ReLU (inplace)
self.inception_3c_double_3x3_reduce = nn.Conv2d(320, 64, kernel_size=(1, 1), stride=(1, 1))
self.inception_3c_double_3x3_reduce_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.9, affine=True)
self.inception_3c_relu_double_3x3_reduce = nn.ReLU (inplace)
self.inception_3c_double_3x3_1 = nn.Conv2d(64, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_3c_double_3x3_1_bn = nn.BatchNorm2d(96, eps=1e-05, momentum=0.9, affine=True)
self.inception_3c_relu_double_3x3_1 = nn.ReLU (inplace)
self.inception_3c_double_3x3_2 = nn.Conv2d(96, 96, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
self.inception_3c_double_3x3_2_bn = nn.BatchNorm2d(96, eps=1e-05, momentum=0.9, affine=True)
self.inception_3c_relu_double_3x3_2 = nn.ReLU (inplace)
self.inception_3c_pool = nn.MaxPool2d ((3, 3), stride=(2, 2), dilation=(1, 1), ceil_mode=True)
self.inception_4a_1x1 = nn.Conv2d(576, 224, kernel_size=(1, 1), stride=(1, 1))
self.inception_4a_1x1_bn = nn.BatchNorm2d(224, eps=1e-05, momentum=0.9, affine=True)
self.inception_4a_relu_1x1 = nn.ReLU (inplace)
self.inception_4a_3x3_reduce = nn.Conv2d(576, 64, kernel_size=(1, 1), stride=(1, 1))
self.inception_4a_3x3_reduce_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.9, affine=True)
self.inception_4a_relu_3x3_reduce = nn.ReLU (inplace)
self.inception_4a_3x3 = nn.Conv2d(64, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_4a_3x3_bn = nn.BatchNorm2d(96, eps=1e-05, momentum=0.9, affine=True)
self.inception_4a_relu_3x3 = nn.ReLU (inplace)
self.inception_4a_double_3x3_reduce = nn.Conv2d(576, 96, kernel_size=(1, 1), stride=(1, 1))
self.inception_4a_double_3x3_reduce_bn = nn.BatchNorm2d(96, eps=1e-05, momentum=0.9, affine=True)
self.inception_4a_relu_double_3x3_reduce = nn.ReLU (inplace)
self.inception_4a_double_3x3_1 = nn.Conv2d(96, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_4a_double_3x3_1_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True)
self.inception_4a_relu_double_3x3_1 = nn.ReLU (inplace)
self.inception_4a_double_3x3_2 = nn.Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_4a_double_3x3_2_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True)
self.inception_4a_relu_double_3x3_2 = nn.ReLU (inplace)
self.inception_4a_pool = nn.AvgPool2d (3, stride=1, padding=1, ceil_mode=True, count_include_pad=True)
self.inception_4a_pool_proj = nn.Conv2d(576, 128, kernel_size=(1, 1), stride=(1, 1))
self.inception_4a_pool_proj_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True)
self.inception_4a_relu_pool_proj = nn.ReLU (inplace)
self.inception_4b_1x1 = nn.Conv2d(576, 192, kernel_size=(1, 1), stride=(1, 1))
self.inception_4b_1x1_bn = nn.BatchNorm2d(192, eps=1e-05, momentum=0.9, affine=True)
self.inception_4b_relu_1x1 = nn.ReLU (inplace)
self.inception_4b_3x3_reduce = nn.Conv2d(576, 96, kernel_size=(1, 1), stride=(1, 1))
self.inception_4b_3x3_reduce_bn = nn.BatchNorm2d(96, eps=1e-05, momentum=0.9, affine=True)
self.inception_4b_relu_3x3_reduce = nn.ReLU (inplace)
self.inception_4b_3x3 = nn.Conv2d(96, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_4b_3x3_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True)
self.inception_4b_relu_3x3 = nn.ReLU (inplace)
self.inception_4b_double_3x3_reduce = nn.Conv2d(576, 96, kernel_size=(1, 1), stride=(1, 1))
self.inception_4b_double_3x3_reduce_bn = nn.BatchNorm2d(96, eps=1e-05, momentum=0.9, affine=True)
self.inception_4b_relu_double_3x3_reduce = nn.ReLU (inplace)
self.inception_4b_double_3x3_1 = nn.Conv2d(96, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_4b_double_3x3_1_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True)
self.inception_4b_relu_double_3x3_1 = nn.ReLU (inplace)
self.inception_4b_double_3x3_2 = nn.Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_4b_double_3x3_2_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True)
self.inception_4b_relu_double_3x3_2 = nn.ReLU (inplace)
self.inception_4b_pool = nn.AvgPool2d (3, stride=1, padding=1, ceil_mode=True, count_include_pad=True)
self.inception_4b_pool_proj = nn.Conv2d(576, 128, kernel_size=(1, 1), stride=(1, 1))
self.inception_4b_pool_proj_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True)
self.inception_4b_relu_pool_proj = nn.ReLU (inplace)
self.inception_4c_1x1 = nn.Conv2d(576, 160, kernel_size=(1, 1), stride=(1, 1))
self.inception_4c_1x1_bn = nn.BatchNorm2d(160, eps=1e-05, momentum=0.9, affine=True)
self.inception_4c_relu_1x1 = nn.ReLU (inplace)
self.inception_4c_3x3_reduce = nn.Conv2d(576, 128, kernel_size=(1, 1), stride=(1, 1))
self.inception_4c_3x3_reduce_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True)
self.inception_4c_relu_3x3_reduce = nn.ReLU (inplace)
self.inception_4c_3x3 = nn.Conv2d(128, 160, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_4c_3x3_bn = nn.BatchNorm2d(160, eps=1e-05, momentum=0.9, affine=True)
self.inception_4c_relu_3x3 = nn.ReLU (inplace)
self.inception_4c_double_3x3_reduce = nn.Conv2d(576, 128, kernel_size=(1, 1), stride=(1, 1))
self.inception_4c_double_3x3_reduce_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True)
self.inception_4c_relu_double_3x3_reduce = nn.ReLU (inplace)
self.inception_4c_double_3x3_1 = nn.Conv2d(128, 160, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_4c_double_3x3_1_bn = nn.BatchNorm2d(160, eps=1e-05, momentum=0.9, affine=True)
self.inception_4c_relu_double_3x3_1 = nn.ReLU (inplace)
self.inception_4c_double_3x3_2 = nn.Conv2d(160, 160, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_4c_double_3x3_2_bn = nn.BatchNorm2d(160, eps=1e-05, momentum=0.9, affine=True)
self.inception_4c_relu_double_3x3_2 = nn.ReLU (inplace)
self.inception_4c_pool = nn.AvgPool2d (3, stride=1, padding=1, ceil_mode=True, count_include_pad=True)
self.inception_4c_pool_proj = nn.Conv2d(576, 128, kernel_size=(1, 1), stride=(1, 1))
self.inception_4c_pool_proj_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True)
self.inception_4c_relu_pool_proj = nn.ReLU (inplace)
self.inception_4d_1x1 = nn.Conv2d(608, 96, kernel_size=(1, 1), stride=(1, 1))
self.inception_4d_1x1_bn = nn.BatchNorm2d(96, eps=1e-05, momentum=0.9, affine=True)
self.inception_4d_relu_1x1 = nn.ReLU (inplace)
self.inception_4d_3x3_reduce = nn.Conv2d(608, 128, kernel_size=(1, 1), stride=(1, 1))
self.inception_4d_3x3_reduce_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True)
self.inception_4d_relu_3x3_reduce = nn.ReLU (inplace)
self.inception_4d_3x3 = nn.Conv2d(128, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_4d_3x3_bn = nn.BatchNorm2d(192, eps=1e-05, momentum=0.9, affine=True)
self.inception_4d_relu_3x3 = nn.ReLU (inplace)
self.inception_4d_double_3x3_reduce = nn.Conv2d(608, 160, kernel_size=(1, 1), stride=(1, 1))
self.inception_4d_double_3x3_reduce_bn = nn.BatchNorm2d(160, eps=1e-05, momentum=0.9, affine=True)
self.inception_4d_relu_double_3x3_reduce = nn.ReLU (inplace)
self.inception_4d_double_3x3_1 = nn.Conv2d(160, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_4d_double_3x3_1_bn = nn.BatchNorm2d(192, eps=1e-05, momentum=0.9, affine=True)
self.inception_4d_relu_double_3x3_1 = nn.ReLU (inplace)
self.inception_4d_double_3x3_2 = nn.Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_4d_double_3x3_2_bn = nn.BatchNorm2d(192, eps=1e-05, momentum=0.9, affine=True)
self.inception_4d_relu_double_3x3_2 = nn.ReLU (inplace)
self.inception_4d_pool = nn.AvgPool2d (3, stride=1, padding=1, ceil_mode=True, count_include_pad=True)
self.inception_4d_pool_proj = nn.Conv2d(608, 128, kernel_size=(1, 1), stride=(1, 1))
self.inception_4d_pool_proj_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True)
self.inception_4d_relu_pool_proj = nn.ReLU (inplace)
self.inception_4e_3x3_reduce = nn.Conv2d(608, 128, kernel_size=(1, 1), stride=(1, 1))
self.inception_4e_3x3_reduce_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True)
self.inception_4e_relu_3x3_reduce = nn.ReLU (inplace)
self.inception_4e_3x3 = nn.Conv2d(128, 192, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
self.inception_4e_3x3_bn = nn.BatchNorm2d(192, eps=1e-05, momentum=0.9, affine=True)
self.inception_4e_relu_3x3 = nn.ReLU (inplace)
self.inception_4e_double_3x3_reduce = nn.Conv2d(608, 192, kernel_size=(1, 1), stride=(1, 1))
self.inception_4e_double_3x3_reduce_bn = nn.BatchNorm2d(192, eps=1e-05, momentum=0.9, affine=True)
self.inception_4e_relu_double_3x3_reduce = nn.ReLU (inplace)
self.inception_4e_double_3x3_1 = nn.Conv2d(192, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_4e_double_3x3_1_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.9, affine=True)
self.inception_4e_relu_double_3x3_1 = nn.ReLU (inplace)
self.inception_4e_double_3x3_2 = nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
self.inception_4e_double_3x3_2_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.9, affine=True)
self.inception_4e_relu_double_3x3_2 = nn.ReLU (inplace)
self.inception_4e_pool = nn.MaxPool2d ((3, 3), stride=(2, 2), dilation=(1, 1), ceil_mode=True)
self.inception_5a_1x1 = nn.Conv2d(1056, 352, kernel_size=(1, 1), stride=(1, 1))
self.inception_5a_1x1_bn = nn.BatchNorm2d(352, eps=1e-05, momentum=0.9, affine=True)
self.inception_5a_relu_1x1 = nn.ReLU (inplace)
self.inception_5a_3x3_reduce = nn.Conv2d(1056, 192, kernel_size=(1, 1), stride=(1, 1))
self.inception_5a_3x3_reduce_bn = nn.BatchNorm2d(192, eps=1e-05, momentum=0.9, affine=True)
self.inception_5a_relu_3x3_reduce = nn.ReLU (inplace)
self.inception_5a_3x3 = nn.Conv2d(192, 320, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_5a_3x3_bn = nn.BatchNorm2d(320, eps=1e-05, momentum=0.9, affine=True)
self.inception_5a_relu_3x3 = nn.ReLU (inplace)
self.inception_5a_double_3x3_reduce = nn.Conv2d(1056, 160, kernel_size=(1, 1), stride=(1, 1))
self.inception_5a_double_3x3_reduce_bn = nn.BatchNorm2d(160, eps=1e-05, momentum=0.9, affine=True)
self.inception_5a_relu_double_3x3_reduce = nn.ReLU (inplace)
self.inception_5a_double_3x3_1 = nn.Conv2d(160, 224, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_5a_double_3x3_1_bn = nn.BatchNorm2d(224, eps=1e-05, momentum=0.9, affine=True)
self.inception_5a_relu_double_3x3_1 = nn.ReLU (inplace)
self.inception_5a_double_3x3_2 = nn.Conv2d(224, 224, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_5a_double_3x3_2_bn = nn.BatchNorm2d(224, eps=1e-05, momentum=0.9, affine=True)
self.inception_5a_relu_double_3x3_2 = nn.ReLU (inplace)
self.inception_5a_pool = nn.AvgPool2d (3, stride=1, padding=1, ceil_mode=True, count_include_pad=True)
self.inception_5a_pool_proj = nn.Conv2d(1056, 128, kernel_size=(1, 1), stride=(1, 1))
self.inception_5a_pool_proj_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True)
self.inception_5a_relu_pool_proj = nn.ReLU (inplace)
self.inception_5b_1x1 = nn.Conv2d(1024, 352, kernel_size=(1, 1), stride=(1, 1))
self.inception_5b_1x1_bn = nn.BatchNorm2d(352, eps=1e-05, momentum=0.9, affine=True)
self.inception_5b_relu_1x1 = nn.ReLU (inplace)
self.inception_5b_3x3_reduce = nn.Conv2d(1024, 192, kernel_size=(1, 1), stride=(1, 1))
self.inception_5b_3x3_reduce_bn = nn.BatchNorm2d(192, eps=1e-05, momentum=0.9, affine=True)
self.inception_5b_relu_3x3_reduce = nn.ReLU (inplace)
self.inception_5b_3x3 = nn.Conv2d(192, 320, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_5b_3x3_bn = nn.BatchNorm2d(320, eps=1e-05, momentum=0.9, affine=True)
self.inception_5b_relu_3x3 = nn.ReLU (inplace)
self.inception_5b_double_3x3_reduce = nn.Conv2d(1024, 192, kernel_size=(1, 1), stride=(1, 1))
self.inception_5b_double_3x3_reduce_bn = nn.BatchNorm2d(192, eps=1e-05, momentum=0.9, affine=True)
self.inception_5b_relu_double_3x3_reduce = nn.ReLU (inplace)
self.inception_5b_double_3x3_1 = nn.Conv2d(192, 224, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_5b_double_3x3_1_bn = nn.BatchNorm2d(224, eps=1e-05, momentum=0.9, affine=True)
self.inception_5b_relu_double_3x3_1 = nn.ReLU (inplace)
self.inception_5b_double_3x3_2 = nn.Conv2d(224, 224, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_5b_double_3x3_2_bn = nn.BatchNorm2d(224, eps=1e-05, momentum=0.9, affine=True)
self.inception_5b_relu_double_3x3_2 = nn.ReLU (inplace)
self.inception_5b_pool = nn.MaxPool2d ((3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1), ceil_mode=True)
self.inception_5b_pool_proj = nn.Conv2d(1024, 128, kernel_size=(1, 1), stride=(1, 1))
self.inception_5b_pool_proj_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True)
self.inception_5b_relu_pool_proj = nn.ReLU (inplace)
self.global_pool = nn.AvgPool2d(7, stride=1, padding=0, ceil_mode=True, count_include_pad=True)
self.last_linear = nn.Linear(1024, num_classes)
def features(self, input):
conv1_7x7_s2_out = self.conv1_7x7_s2(input)
conv1_7x7_s2_bn_out = self.conv1_7x7_s2_bn(conv1_7x7_s2_out)
conv1_relu_7x7_out = self.conv1_relu_7x7(conv1_7x7_s2_bn_out)
pool1_3x3_s2_out = self.pool1_3x3_s2(conv1_7x7_s2_bn_out)
conv2_3x3_reduce_out = self.conv2_3x3_reduce(pool1_3x3_s2_out)
conv2_3x3_reduce_bn_out = self.conv2_3x3_reduce_bn(conv2_3x3_reduce_out)
conv2_relu_3x3_reduce_out = self.conv2_relu_3x3_reduce(conv2_3x3_reduce_bn_out)
conv2_3x3_out = self.conv2_3x3(conv2_3x3_reduce_bn_out)
conv2_3x3_bn_out = self.conv2_3x3_bn(conv2_3x3_out)
conv2_relu_3x3_out = self.conv2_relu_3x3(conv2_3x3_bn_out)
pool2_3x3_s2_out = self.pool2_3x3_s2(conv2_3x3_bn_out)
inception_3a_1x1_out = self.inception_3a_1x1(pool2_3x3_s2_out)
inception_3a_1x1_bn_out = self.inception_3a_1x1_bn(inception_3a_1x1_out)
inception_3a_relu_1x1_out = self.inception_3a_relu_1x1(inception_3a_1x1_bn_out)
inception_3a_3x3_reduce_out = self.inception_3a_3x3_reduce(pool2_3x3_s2_out)
inception_3a_3x3_reduce_bn_out = self.inception_3a_3x3_reduce_bn(inception_3a_3x3_reduce_out)
inception_3a_relu_3x3_reduce_out = self.inception_3a_relu_3x3_reduce(inception_3a_3x3_reduce_bn_out)
inception_3a_3x3_out = self.inception_3a_3x3(inception_3a_3x3_reduce_bn_out)
inception_3a_3x3_bn_out = self.inception_3a_3x3_bn(inception_3a_3x3_out)
inception_3a_relu_3x3_out = self.inception_3a_relu_3x3(inception_3a_3x3_bn_out)
inception_3a_double_3x3_reduce_out = self.inception_3a_double_3x3_reduce(pool2_3x3_s2_out)
inception_3a_double_3x3_reduce_bn_out = self.inception_3a_double_3x3_reduce_bn(inception_3a_double_3x3_reduce_out)
inception_3a_relu_double_3x3_reduce_out = self.inception_3a_relu_double_3x3_reduce(inception_3a_double_3x3_reduce_bn_out)
inception_3a_double_3x3_1_out = self.inception_3a_double_3x3_1(inception_3a_double_3x3_reduce_bn_out)
inception_3a_double_3x3_1_bn_out = self.inception_3a_double_3x3_1_bn(inception_3a_double_3x3_1_out)
inception_3a_relu_double_3x3_1_out = self.inception_3a_relu_double_3x3_1(inception_3a_double_3x3_1_bn_out)
inception_3a_double_3x3_2_out = self.inception_3a_double_3x3_2(inception_3a_double_3x3_1_bn_out)
inception_3a_double_3x3_2_bn_out = self.inception_3a_double_3x3_2_bn(inception_3a_double_3x3_2_out)
inception_3a_relu_double_3x3_2_out = self.inception_3a_relu_double_3x3_2(inception_3a_double_3x3_2_bn_out)
inception_3a_pool_out = self.inception_3a_pool(pool2_3x3_s2_out)
inception_3a_pool_proj_out = self.inception_3a_pool_proj(inception_3a_pool_out)
inception_3a_pool_proj_bn_out = self.inception_3a_pool_proj_bn(inception_3a_pool_proj_out)
inception_3a_relu_pool_proj_out = self.inception_3a_relu_pool_proj(inception_3a_pool_proj_bn_out)
inception_3a_output_out = torch.cat([inception_3a_1x1_bn_out,inception_3a_3x3_bn_out,inception_3a_double_3x3_2_bn_out,inception_3a_pool_proj_bn_out], 1)
inception_3b_1x1_out = self.inception_3b_1x1(inception_3a_output_out)
inception_3b_1x1_bn_out = self.inception_3b_1x1_bn(inception_3b_1x1_out)
inception_3b_relu_1x1_out = self.inception_3b_relu_1x1(inception_3b_1x1_bn_out)
inception_3b_3x3_reduce_out = self.inception_3b_3x3_reduce(inception_3a_output_out)
inception_3b_3x3_reduce_bn_out = self.inception_3b_3x3_reduce_bn(inception_3b_3x3_reduce_out)
inception_3b_relu_3x3_reduce_out = self.inception_3b_relu_3x3_reduce(inception_3b_3x3_reduce_bn_out)
inception_3b_3x3_out = self.inception_3b_3x3(inception_3b_3x3_reduce_bn_out)
inception_3b_3x3_bn_out = self.inception_3b_3x3_bn(inception_3b_3x3_out)
inception_3b_relu_3x3_out = self.inception_3b_relu_3x3(inception_3b_3x3_bn_out)
inception_3b_double_3x3_reduce_out = self.inception_3b_double_3x3_reduce(inception_3a_output_out)
inception_3b_double_3x3_reduce_bn_out = self.inception_3b_double_3x3_reduce_bn(inception_3b_double_3x3_reduce_out)
inception_3b_relu_double_3x3_reduce_out = self.inception_3b_relu_double_3x3_reduce(inception_3b_double_3x3_reduce_bn_out)
inception_3b_double_3x3_1_out = self.inception_3b_double_3x3_1(inception_3b_double_3x3_reduce_bn_out)
inception_3b_double_3x3_1_bn_out = self.inception_3b_double_3x3_1_bn(inception_3b_double_3x3_1_out)
inception_3b_relu_double_3x3_1_out = self.inception_3b_relu_double_3x3_1(inception_3b_double_3x3_1_bn_out)
inception_3b_double_3x3_2_out = self.inception_3b_double_3x3_2(inception_3b_double_3x3_1_bn_out)
inception_3b_double_3x3_2_bn_out = self.inception_3b_double_3x3_2_bn(inception_3b_double_3x3_2_out)
inception_3b_relu_double_3x3_2_out = self.inception_3b_relu_double_3x3_2(inception_3b_double_3x3_2_bn_out)
inception_3b_pool_out = self.inception_3b_pool(inception_3a_output_out)
inception_3b_pool_proj_out = self.inception_3b_pool_proj(inception_3b_pool_out)
inception_3b_pool_proj_bn_out = self.inception_3b_pool_proj_bn(inception_3b_pool_proj_out)
inception_3b_relu_pool_proj_out = self.inception_3b_relu_pool_proj(inception_3b_pool_proj_bn_out)
inception_3b_output_out = torch.cat([inception_3b_1x1_bn_out,inception_3b_3x3_bn_out,inception_3b_double_3x3_2_bn_out,inception_3b_pool_proj_bn_out], 1)
inception_3c_3x3_reduce_out = self.inception_3c_3x3_reduce(inception_3b_output_out)
inception_3c_3x3_reduce_bn_out = self.inception_3c_3x3_reduce_bn(inception_3c_3x3_reduce_out)
inception_3c_relu_3x3_reduce_out = self.inception_3c_relu_3x3_reduce(inception_3c_3x3_reduce_bn_out)
inception_3c_3x3_out = self.inception_3c_3x3(inception_3c_3x3_reduce_bn_out)
inception_3c_3x3_bn_out = self.inception_3c_3x3_bn(inception_3c_3x3_out)
inception_3c_relu_3x3_out = self.inception_3c_relu_3x3(inception_3c_3x3_bn_out)
inception_3c_double_3x3_reduce_out = self.inception_3c_double_3x3_reduce(inception_3b_output_out)
inception_3c_double_3x3_reduce_bn_out = self.inception_3c_double_3x3_reduce_bn(inception_3c_double_3x3_reduce_out)
inception_3c_relu_double_3x3_reduce_out = self.inception_3c_relu_double_3x3_reduce(inception_3c_double_3x3_reduce_bn_out)
inception_3c_double_3x3_1_out = self.inception_3c_double_3x3_1(inception_3c_double_3x3_reduce_bn_out)
inception_3c_double_3x3_1_bn_out = self.inception_3c_double_3x3_1_bn(inception_3c_double_3x3_1_out)
inception_3c_relu_double_3x3_1_out = self.inception_3c_relu_double_3x3_1(inception_3c_double_3x3_1_bn_out)
inception_3c_double_3x3_2_out = self.inception_3c_double_3x3_2(inception_3c_double_3x3_1_bn_out)
inception_3c_double_3x3_2_bn_out = self.inception_3c_double_3x3_2_bn(inception_3c_double_3x3_2_out)
inception_3c_relu_double_3x3_2_out = self.inception_3c_relu_double_3x3_2(inception_3c_double_3x3_2_bn_out)
inception_3c_pool_out = self.inception_3c_pool(inception_3b_output_out)
inception_3c_output_out = torch.cat([inception_3c_3x3_bn_out,inception_3c_double_3x3_2_bn_out,inception_3c_pool_out], 1)
inception_4a_1x1_out = self.inception_4a_1x1(inception_3c_output_out)
inception_4a_1x1_bn_out = self.inception_4a_1x1_bn(inception_4a_1x1_out)
inception_4a_relu_1x1_out = self.inception_4a_relu_1x1(inception_4a_1x1_bn_out)
inception_4a_3x3_reduce_out = self.inception_4a_3x3_reduce(inception_3c_output_out)
inception_4a_3x3_reduce_bn_out = self.inception_4a_3x3_reduce_bn(inception_4a_3x3_reduce_out)
inception_4a_relu_3x3_reduce_out = self.inception_4a_relu_3x3_reduce(inception_4a_3x3_reduce_bn_out)
inception_4a_3x3_out = self.inception_4a_3x3(inception_4a_3x3_reduce_bn_out)
inception_4a_3x3_bn_out = self.inception_4a_3x3_bn(inception_4a_3x3_out)
inception_4a_relu_3x3_out = self.inception_4a_relu_3x3(inception_4a_3x3_bn_out)
inception_4a_double_3x3_reduce_out = self.inception_4a_double_3x3_reduce(inception_3c_output_out)
inception_4a_double_3x3_reduce_bn_out = self.inception_4a_double_3x3_reduce_bn(inception_4a_double_3x3_reduce_out)
inception_4a_relu_double_3x3_reduce_out = self.inception_4a_relu_double_3x3_reduce(inception_4a_double_3x3_reduce_bn_out)
inception_4a_double_3x3_1_out = self.inception_4a_double_3x3_1(inception_4a_double_3x3_reduce_bn_out)
inception_4a_double_3x3_1_bn_out = self.inception_4a_double_3x3_1_bn(inception_4a_double_3x3_1_out)
inception_4a_relu_double_3x3_1_out = self.inception_4a_relu_double_3x3_1(inception_4a_double_3x3_1_bn_out)
inception_4a_double_3x3_2_out = self.inception_4a_double_3x3_2(inception_4a_double_3x3_1_bn_out)
inception_4a_double_3x3_2_bn_out = self.inception_4a_double_3x3_2_bn(inception_4a_double_3x3_2_out)
inception_4a_relu_double_3x3_2_out = self.inception_4a_relu_double_3x3_2(inception_4a_double_3x3_2_bn_out)
inception_4a_pool_out = self.inception_4a_pool(inception_3c_output_out)
inception_4a_pool_proj_out = self.inception_4a_pool_proj(inception_4a_pool_out)
inception_4a_pool_proj_bn_out = self.inception_4a_pool_proj_bn(inception_4a_pool_proj_out)
inception_4a_relu_pool_proj_out = self.inception_4a_relu_pool_proj(inception_4a_pool_proj_bn_out)
inception_4a_output_out = torch.cat([inception_4a_1x1_bn_out,inception_4a_3x3_bn_out,inception_4a_double_3x3_2_bn_out,inception_4a_pool_proj_bn_out], 1)
inception_4b_1x1_out = self.inception_4b_1x1(inception_4a_output_out)
inception_4b_1x1_bn_out = self.inception_4b_1x1_bn(inception_4b_1x1_out)
inception_4b_relu_1x1_out = self.inception_4b_relu_1x1(inception_4b_1x1_bn_out)
inception_4b_3x3_reduce_out = self.inception_4b_3x3_reduce(inception_4a_output_out)
inception_4b_3x3_reduce_bn_out = self.inception_4b_3x3_reduce_bn(inception_4b_3x3_reduce_out)
inception_4b_relu_3x3_reduce_out = self.inception_4b_relu_3x3_reduce(inception_4b_3x3_reduce_bn_out)
inception_4b_3x3_out = self.inception_4b_3x3(inception_4b_3x3_reduce_bn_out)
inception_4b_3x3_bn_out = self.inception_4b_3x3_bn(inception_4b_3x3_out)
inception_4b_relu_3x3_out = self.inception_4b_relu_3x3(inception_4b_3x3_bn_out)
inception_4b_double_3x3_reduce_out = self.inception_4b_double_3x3_reduce(inception_4a_output_out)
inception_4b_double_3x3_reduce_bn_out = self.inception_4b_double_3x3_reduce_bn(inception_4b_double_3x3_reduce_out)
inception_4b_relu_double_3x3_reduce_out = self.inception_4b_relu_double_3x3_reduce(inception_4b_double_3x3_reduce_bn_out)
inception_4b_double_3x3_1_out = self.inception_4b_double_3x3_1(inception_4b_double_3x3_reduce_bn_out)
inception_4b_double_3x3_1_bn_out = self.inception_4b_double_3x3_1_bn(inception_4b_double_3x3_1_out)
inception_4b_relu_double_3x3_1_out = self.inception_4b_relu_double_3x3_1(inception_4b_double_3x3_1_bn_out)
inception_4b_double_3x3_2_out = self.inception_4b_double_3x3_2(inception_4b_double_3x3_1_bn_out)
inception_4b_double_3x3_2_bn_out = self.inception_4b_double_3x3_2_bn(inception_4b_double_3x3_2_out)
inception_4b_relu_double_3x3_2_out = self.inception_4b_relu_double_3x3_2(inception_4b_double_3x3_2_bn_out)
inception_4b_pool_out = self.inception_4b_pool(inception_4a_output_out)
inception_4b_pool_proj_out = self.inception_4b_pool_proj(inception_4b_pool_out)
inception_4b_pool_proj_bn_out = self.inception_4b_pool_proj_bn(inception_4b_pool_proj_out)
inception_4b_relu_pool_proj_out = self.inception_4b_relu_pool_proj(inception_4b_pool_proj_bn_out)
inception_4b_output_out = torch.cat([inception_4b_1x1_bn_out,inception_4b_3x3_bn_out,inception_4b_double_3x3_2_bn_out,inception_4b_pool_proj_bn_out], 1)
inception_4c_1x1_out = self.inception_4c_1x1(inception_4b_output_out)
inception_4c_1x1_bn_out = self.inception_4c_1x1_bn(inception_4c_1x1_out)
inception_4c_relu_1x1_out = self.inception_4c_relu_1x1(inception_4c_1x1_bn_out)
inception_4c_3x3_reduce_out = self.inception_4c_3x3_reduce(inception_4b_output_out)
inception_4c_3x3_reduce_bn_out = self.inception_4c_3x3_reduce_bn(inception_4c_3x3_reduce_out)
inception_4c_relu_3x3_reduce_out = self.inception_4c_relu_3x3_reduce(inception_4c_3x3_reduce_bn_out)
inception_4c_3x3_out = self.inception_4c_3x3(inception_4c_3x3_reduce_bn_out)
inception_4c_3x3_bn_out = self.inception_4c_3x3_bn(inception_4c_3x3_out)
inception_4c_relu_3x3_out = self.inception_4c_relu_3x3(inception_4c_3x3_bn_out)
inception_4c_double_3x3_reduce_out = self.inception_4c_double_3x3_reduce(inception_4b_output_out)
inception_4c_double_3x3_reduce_bn_out = self.inception_4c_double_3x3_reduce_bn(inception_4c_double_3x3_reduce_out)
inception_4c_relu_double_3x3_reduce_out = self.inception_4c_relu_double_3x3_reduce(inception_4c_double_3x3_reduce_bn_out)
inception_4c_double_3x3_1_out = self.inception_4c_double_3x3_1(inception_4c_double_3x3_reduce_bn_out)
inception_4c_double_3x3_1_bn_out = self.inception_4c_double_3x3_1_bn(inception_4c_double_3x3_1_out)
inception_4c_relu_double_3x3_1_out = self.inception_4c_relu_double_3x3_1(inception_4c_double_3x3_1_bn_out)
inception_4c_double_3x3_2_out = self.inception_4c_double_3x3_2(inception_4c_double_3x3_1_bn_out)
inception_4c_double_3x3_2_bn_out = self.inception_4c_double_3x3_2_bn(inception_4c_double_3x3_2_out)
inception_4c_relu_double_3x3_2_out = self.inception_4c_relu_double_3x3_2(inception_4c_double_3x3_2_bn_out)
inception_4c_pool_out = self.inception_4c_pool(inception_4b_output_out)
inception_4c_pool_proj_out = self.inception_4c_pool_proj(inception_4c_pool_out)
inception_4c_pool_proj_bn_out = self.inception_4c_pool_proj_bn(inception_4c_pool_proj_out)
inception_4c_relu_pool_proj_out = self.inception_4c_relu_pool_proj(inception_4c_pool_proj_bn_out)
inception_4c_output_out = torch.cat([inception_4c_1x1_bn_out,inception_4c_3x3_bn_out,inception_4c_double_3x3_2_bn_out,inception_4c_pool_proj_bn_out], 1)
inception_4d_1x1_out = self.inception_4d_1x1(inception_4c_output_out)
inception_4d_1x1_bn_out = self.inception_4d_1x1_bn(inception_4d_1x1_out)
inception_4d_relu_1x1_out = self.inception_4d_relu_1x1(inception_4d_1x1_bn_out)
inception_4d_3x3_reduce_out = self.inception_4d_3x3_reduce(inception_4c_output_out)
inception_4d_3x3_reduce_bn_out = self.inception_4d_3x3_reduce_bn(inception_4d_3x3_reduce_out)
inception_4d_relu_3x3_reduce_out = self.inception_4d_relu_3x3_reduce(inception_4d_3x3_reduce_bn_out)
inception_4d_3x3_out = self.inception_4d_3x3(inception_4d_3x3_reduce_bn_out)
inception_4d_3x3_bn_out = self.inception_4d_3x3_bn(inception_4d_3x3_out)
inception_4d_relu_3x3_out = self.inception_4d_relu_3x3(inception_4d_3x3_bn_out)
inception_4d_double_3x3_reduce_out = self.inception_4d_double_3x3_reduce(inception_4c_output_out)
inception_4d_double_3x3_reduce_bn_out = self.inception_4d_double_3x3_reduce_bn(inception_4d_double_3x3_reduce_out)
inception_4d_relu_double_3x3_reduce_out = self.inception_4d_relu_double_3x3_reduce(inception_4d_double_3x3_reduce_bn_out)
inception_4d_double_3x3_1_out = self.inception_4d_double_3x3_1(inception_4d_double_3x3_reduce_bn_out)
inception_4d_double_3x3_1_bn_out = self.inception_4d_double_3x3_1_bn(inception_4d_double_3x3_1_out)
inception_4d_relu_double_3x3_1_out = self.inception_4d_relu_double_3x3_1(inception_4d_double_3x3_1_bn_out)
inception_4d_double_3x3_2_out = self.inception_4d_double_3x3_2(inception_4d_double_3x3_1_bn_out)
inception_4d_double_3x3_2_bn_out = self.inception_4d_double_3x3_2_bn(inception_4d_double_3x3_2_out)
inception_4d_relu_double_3x3_2_out = self.inception_4d_relu_double_3x3_2(inception_4d_double_3x3_2_bn_out)
inception_4d_pool_out = self.inception_4d_pool(inception_4c_output_out)
inception_4d_pool_proj_out = self.inception_4d_pool_proj(inception_4d_pool_out)
inception_4d_pool_proj_bn_out = self.inception_4d_pool_proj_bn(inception_4d_pool_proj_out)
inception_4d_relu_pool_proj_out = self.inception_4d_relu_pool_proj(inception_4d_pool_proj_bn_out)
inception_4d_output_out = torch.cat([inception_4d_1x1_bn_out,inception_4d_3x3_bn_out,inception_4d_double_3x3_2_bn_out,inception_4d_pool_proj_bn_out], 1)
inception_4e_3x3_reduce_out = self.inception_4e_3x3_reduce(inception_4d_output_out)
inception_4e_3x3_reduce_bn_out = self.inception_4e_3x3_reduce_bn(inception_4e_3x3_reduce_out)
inception_4e_relu_3x3_reduce_out = self.inception_4e_relu_3x3_reduce(inception_4e_3x3_reduce_bn_out)
inception_4e_3x3_out = self.inception_4e_3x3(inception_4e_3x3_reduce_bn_out)
inception_4e_3x3_bn_out = self.inception_4e_3x3_bn(inception_4e_3x3_out)
inception_4e_relu_3x3_out = self.inception_4e_relu_3x3(inception_4e_3x3_bn_out)
inception_4e_double_3x3_reduce_out = self.inception_4e_double_3x3_reduce(inception_4d_output_out)
inception_4e_double_3x3_reduce_bn_out = self.inception_4e_double_3x3_reduce_bn(inception_4e_double_3x3_reduce_out)
inception_4e_relu_double_3x3_reduce_out = self.inception_4e_relu_double_3x3_reduce(inception_4e_double_3x3_reduce_bn_out)
inception_4e_double_3x3_1_out = self.inception_4e_double_3x3_1(inception_4e_double_3x3_reduce_bn_out)
inception_4e_double_3x3_1_bn_out = self.inception_4e_double_3x3_1_bn(inception_4e_double_3x3_1_out)
inception_4e_relu_double_3x3_1_out = self.inception_4e_relu_double_3x3_1(inception_4e_double_3x3_1_bn_out)
inception_4e_double_3x3_2_out = self.inception_4e_double_3x3_2(inception_4e_double_3x3_1_bn_out)
inception_4e_double_3x3_2_bn_out = self.inception_4e_double_3x3_2_bn(inception_4e_double_3x3_2_out)
inception_4e_relu_double_3x3_2_out = self.inception_4e_relu_double_3x3_2(inception_4e_double_3x3_2_bn_out)
inception_4e_pool_out = self.inception_4e_pool(inception_4d_output_out)
inception_4e_output_out = torch.cat([inception_4e_3x3_bn_out,inception_4e_double_3x3_2_bn_out,inception_4e_pool_out], 1)
inception_5a_1x1_out = self.inception_5a_1x1(inception_4e_output_out)
inception_5a_1x1_bn_out = self.inception_5a_1x1_bn(inception_5a_1x1_out)
inception_5a_relu_1x1_out = self.inception_5a_relu_1x1(inception_5a_1x1_bn_out)
inception_5a_3x3_reduce_out = self.inception_5a_3x3_reduce(inception_4e_output_out)
inception_5a_3x3_reduce_bn_out = self.inception_5a_3x3_reduce_bn(inception_5a_3x3_reduce_out)
inception_5a_relu_3x3_reduce_out = self.inception_5a_relu_3x3_reduce(inception_5a_3x3_reduce_bn_out)
inception_5a_3x3_out = self.inception_5a_3x3(inception_5a_3x3_reduce_bn_out)
inception_5a_3x3_bn_out = self.inception_5a_3x3_bn(inception_5a_3x3_out)
inception_5a_relu_3x3_out = self.inception_5a_relu_3x3(inception_5a_3x3_bn_out)
inception_5a_double_3x3_reduce_out = self.inception_5a_double_3x3_reduce(inception_4e_output_out)
inception_5a_double_3x3_reduce_bn_out = self.inception_5a_double_3x3_reduce_bn(inception_5a_double_3x3_reduce_out)
inception_5a_relu_double_3x3_reduce_out = self.inception_5a_relu_double_3x3_reduce(inception_5a_double_3x3_reduce_bn_out)
inception_5a_double_3x3_1_out = self.inception_5a_double_3x3_1(inception_5a_double_3x3_reduce_bn_out)
inception_5a_double_3x3_1_bn_out = self.inception_5a_double_3x3_1_bn(inception_5a_double_3x3_1_out)
inception_5a_relu_double_3x3_1_out = self.inception_5a_relu_double_3x3_1(inception_5a_double_3x3_1_bn_out)
inception_5a_double_3x3_2_out = self.inception_5a_double_3x3_2(inception_5a_double_3x3_1_bn_out)
inception_5a_double_3x3_2_bn_out = self.inception_5a_double_3x3_2_bn(inception_5a_double_3x3_2_out)
inception_5a_relu_double_3x3_2_out = self.inception_5a_relu_double_3x3_2(inception_5a_double_3x3_2_bn_out)
inception_5a_pool_out = self.inception_5a_pool(inception_4e_output_out)
inception_5a_pool_proj_out = self.inception_5a_pool_proj(inception_5a_pool_out)
inception_5a_pool_proj_bn_out = self.inception_5a_pool_proj_bn(inception_5a_pool_proj_out)
inception_5a_relu_pool_proj_out = self.inception_5a_relu_pool_proj(inception_5a_pool_proj_bn_out)
inception_5a_output_out = torch.cat([inception_5a_1x1_bn_out,inception_5a_3x3_bn_out,inception_5a_double_3x3_2_bn_out,inception_5a_pool_proj_bn_out], 1)
inception_5b_1x1_out = self.inception_5b_1x1(inception_5a_output_out)
inception_5b_1x1_bn_out = self.inception_5b_1x1_bn(inception_5b_1x1_out)
inception_5b_relu_1x1_out = self.inception_5b_relu_1x1(inception_5b_1x1_bn_out)
inception_5b_3x3_reduce_out = self.inception_5b_3x3_reduce(inception_5a_output_out)
inception_5b_3x3_reduce_bn_out = self.inception_5b_3x3_reduce_bn(inception_5b_3x3_reduce_out)
inception_5b_relu_3x3_reduce_out = self.inception_5b_relu_3x3_reduce(inception_5b_3x3_reduce_bn_out)
inception_5b_3x3_out = self.inception_5b_3x3(inception_5b_3x3_reduce_bn_out)
inception_5b_3x3_bn_out = self.inception_5b_3x3_bn(inception_5b_3x3_out)
inception_5b_relu_3x3_out = self.inception_5b_relu_3x3(inception_5b_3x3_bn_out)
inception_5b_double_3x3_reduce_out = self.inception_5b_double_3x3_reduce(inception_5a_output_out)
inception_5b_double_3x3_reduce_bn_out = self.inception_5b_double_3x3_reduce_bn(inception_5b_double_3x3_reduce_out)
inception_5b_relu_double_3x3_reduce_out = self.inception_5b_relu_double_3x3_reduce(inception_5b_double_3x3_reduce_bn_out)
inception_5b_double_3x3_1_out = self.inception_5b_double_3x3_1(inception_5b_double_3x3_reduce_bn_out)
inception_5b_double_3x3_1_bn_out = self.inception_5b_double_3x3_1_bn(inception_5b_double_3x3_1_out)
inception_5b_relu_double_3x3_1_out = self.inception_5b_relu_double_3x3_1(inception_5b_double_3x3_1_bn_out)
inception_5b_double_3x3_2_out = self.inception_5b_double_3x3_2(inception_5b_double_3x3_1_bn_out)
inception_5b_double_3x3_2_bn_out = self.inception_5b_double_3x3_2_bn(inception_5b_double_3x3_2_out)
inception_5b_relu_double_3x3_2_out = self.inception_5b_relu_double_3x3_2(inception_5b_double_3x3_2_bn_out)
inception_5b_pool_out = self.inception_5b_pool(inception_5a_output_out)
inception_5b_pool_proj_out = self.inception_5b_pool_proj(inception_5b_pool_out)
inception_5b_pool_proj_bn_out = self.inception_5b_pool_proj_bn(inception_5b_pool_proj_out)
inception_5b_relu_pool_proj_out = self.inception_5b_relu_pool_proj(inception_5b_pool_proj_bn_out)
inception_5b_output_out = torch.cat([inception_5b_1x1_bn_out,inception_5b_3x3_bn_out,inception_5b_double_3x3_2_bn_out,inception_5b_pool_proj_bn_out], 1)
return inception_5b_output_out
def logits(self, features):
x = self.global_pool(features)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
def bninception(num_classes=1000, pretrained='imagenet'):
r"""BNInception model architecture from <https://arxiv.org/pdf/1502.03167.pdf>`_ paper.
"""
model = BNInception(num_classes=num_classes)
if pretrained is not None:
settings = pretrained_settings['bninception'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
weight = model_zoo.load_url(settings['url'])
weight = {k: v.squeeze(0) if v.size(0) == 1 else v for k, v in weight.items()}
model.load_state_dict(weight)
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model
if __name__ == '__main__':
model = bninception()
| 84.755179
| 160
| 0.769914
|
f6c74546ea131447c55e77562dab545fa4025daa
| 17,999
|
py
|
Python
|
nested_inline/admin.py
|
lexygon/django-nested-inline
|
38e9aca6ccad3a242fa76c37d8e1aed7bce9a7ad
|
[
"MIT"
] | null | null | null |
nested_inline/admin.py
|
lexygon/django-nested-inline
|
38e9aca6ccad3a242fa76c37d8e1aed7bce9a7ad
|
[
"MIT"
] | null | null | null |
nested_inline/admin.py
|
lexygon/django-nested-inline
|
38e9aca6ccad3a242fa76c37d8e1aed7bce9a7ad
|
[
"MIT"
] | null | null | null |
from django import VERSION
from django.contrib import admin
from django.contrib.admin import helpers
from django.contrib.admin.options import reverse, InlineModelAdmin
from django.core.exceptions import PermissionDenied
from django.forms.formsets import all_valid
from django.http import Http404
from django.utils.decorators import method_decorator
from django.utils.encoding import force_text
from django.views.decorators.csrf import csrf_protect
from django.utils.translation import ugettext as _
from django.contrib.admin.utils import unquote
from django.db import transaction, models
from django.utils.html import escape
from django.conf import settings
from django import forms
from django.contrib.admin.templatetags.admin_static import static
csrf_protect_m = method_decorator(csrf_protect)
class NestedModelAdmin(admin.ModelAdmin):
class Media:
css = {
"all": ('admin/css/forms-nested.css',)
}
js = ('admin/js/inlines-nested%s.js' % ('' if settings.DEBUG else '.min'),)
def get_inline_instances(self, request, obj=None):
inline_instances = []
for inline_class in self.inlines:
inline = inline_class(self.model, self.admin_site)
if request:
if not (inline.has_add_permission(request, obj) or
inline.has_change_permission(request, obj) or
inline.has_delete_permission(request, obj)):
continue
if not inline.has_add_permission(request, obj):
inline.max_num = 0
inline_instances.append(inline)
return inline_instances
def save_formset(self, request, form, formset, change):
"""
Given an inline formset save it to the database.
"""
instances = formset.save()
for form in formset.forms:
if hasattr(form, 'nested_formsets') and form not in formset.deleted_forms:
for nested_formset in form.nested_formsets:
self.save_formset(request, form, nested_formset, change)
def save_related(self, request, form, formsets, change):
"""
Given the ``HttpRequest``, the parent ``ModelForm`` instance, the
list of inline formsets and a boolean value based on whether the
parent is being added or changed, save the related objects to the
database. Note that at this point save_form() and save_model() have
already been called.
"""
form.save_m2m()
for formset in formsets:
self.save_formset(request, form, formset, change=change)
def add_nested_inline_formsets(self, request, inline, formset, depth=0):
if depth > 5:
raise Exception("Maximum nesting depth reached (5)")
for form in formset.forms:
nested_formsets = []
for nested_inline in inline.get_inline_instances(request):
InlineFormSet = nested_inline.get_formset(request, form.instance)
prefix = "%s-%s" % (form.prefix, InlineFormSet.get_default_prefix())
if request.method == 'POST' and any(s.startswith(prefix) for s in request.POST.keys()):
nested_formset = InlineFormSet(request.POST, request.FILES,
instance=form.instance,
prefix=prefix, queryset=nested_inline.get_queryset(request))
else:
nested_formset = InlineFormSet(instance=form.instance,
prefix=prefix, queryset=nested_inline.get_queryset(request))
nested_formsets.append(nested_formset)
if nested_inline.inlines:
self.add_nested_inline_formsets(request, nested_inline, nested_formset, depth=depth+1)
form.nested_formsets = nested_formsets
def wrap_nested_inline_formsets(self, request, inline, formset):
media = None
def get_media(extra_media):
if media:
return media + extra_media
else:
return extra_media
for form in formset.forms:
wrapped_nested_formsets = []
for nested_inline, nested_formset in zip(inline.get_inline_instances(request), form.nested_formsets):
if form.instance.pk:
instance = form.instance
else:
instance = None
fieldsets = list(nested_inline.get_fieldsets(request, instance))
readonly = list(nested_inline.get_readonly_fields(request, instance))
prepopulated = dict(nested_inline.get_prepopulated_fields(request, instance))
wrapped_nested_formset = helpers.InlineAdminFormSet(nested_inline, nested_formset,
fieldsets, prepopulated, readonly, model_admin=self)
wrapped_nested_formsets.append(wrapped_nested_formset)
media = get_media(wrapped_nested_formset.media)
if nested_inline.inlines:
media = get_media(self.wrap_nested_inline_formsets(request, nested_inline, nested_formset))
form.nested_formsets = wrapped_nested_formsets
return media
def formset_has_nested_data(self, formsets):
for formset in formsets:
if not formset.is_bound:
pass
for form in formset:
if hasattr(form, 'cleaned_data') and form.cleaned_data:
return True
elif hasattr(form, 'nested_formsets'):
if self.formset_has_nested_data(form.nested_formsets):
return True
def all_valid_with_nesting(self, formsets):
"Recursively validate all nested formsets"
if not all_valid(formsets):
return False
for formset in formsets:
if not formset.is_bound:
pass
for form in formset:
if hasattr(form, 'nested_formsets'):
if not self.all_valid_with_nesting(form.nested_formsets):
return False
#TODO - find out why this breaks when extra = 1 and just adding new item with no sub items
if (not hasattr(form, 'cleaned_data') or not form.cleaned_data) and self.formset_has_nested_data(form.nested_formsets):
form._errors["__all__"] = form.error_class([u"Parent object must be created when creating nested inlines."])
return False
return True
@csrf_protect_m
@transaction.atomic
def add_view(self, request, form_url='', extra_context=None):
"The 'add' admin view for this model."
model = self.model
opts = model._meta
# if not self.has_add_permission(request, obj):
# raise PermissionDenied
ModelForm = self.get_form(request)
formsets = []
inline_instances = self.get_inline_instances(request, None)
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES)
if form.is_valid():
new_object = self.save_form(request, form, change=False)
form_validated = True
else:
form_validated = False
new_object = self.model()
prefixes = {}
for FormSet, inline in self.get_formsets_with_inlines(request):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1 or not prefix:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset = FormSet(data=request.POST, files=request.FILES,
instance=new_object,
save_as_new="_saveasnew" in request.POST,
prefix=prefix, queryset=inline.get_queryset(request))
formsets.append(formset)
if inline.inlines:
self.add_nested_inline_formsets(request, inline, formset)
if self.all_valid_with_nesting(formsets) and form_validated:
self.save_model(request, new_object, form, False)
self.save_related(request, form, formsets, False)
args = ()
# Provide `add_message` argument to ModelAdmin.log_addition for
# Django 1.9 and up.
if VERSION[:2] >= (1, 9):
add_message = self.construct_change_message(
request, form, formsets, add=True
)
args = (request, new_object, add_message)
else:
args = (request, new_object)
self.log_addition(*args)
return self.response_add(request, new_object)
else:
# Prepare the dict of initial data from the request.
# We have to special-case M2Ms as a list of comma-separated PKs.
initial = dict(request.GET.items())
for k in initial:
try:
f = opts.get_field(k)
except models.FieldDoesNotExist:
continue
if isinstance(f, models.ManyToManyField):
initial[k] = initial[k].split(",")
form = ModelForm(initial=initial)
prefixes = {}
for FormSet, inline in self.get_formsets_with_inlines(request):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1 or not prefix:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset = FormSet(instance=self.model(), prefix=prefix,
queryset=inline.get_queryset(request))
formsets.append(formset)
if hasattr(inline, 'inlines') and inline.inlines:
self.add_nested_inline_formsets(request, inline, formset)
adminForm = helpers.AdminForm(form, list(self.get_fieldsets(request)),
self.get_prepopulated_fields(request),
self.get_readonly_fields(request),
model_admin=self)
media = self.media + adminForm.media
inline_admin_formsets = []
for inline, formset in zip(inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request))
readonly = list(inline.get_readonly_fields(request))
prepopulated = dict(inline.get_prepopulated_fields(request))
inline_admin_formset = helpers.InlineAdminFormSet(inline, formset,
fieldsets, prepopulated, readonly, model_admin=self)
inline_admin_formsets.append(inline_admin_formset)
media = media + inline_admin_formset.media
if hasattr(inline, 'inlines') and inline.inlines:
media += self.wrap_nested_inline_formsets(request, inline, formset)
context = {
'title': _('Add %s') % force_text(opts.verbose_name),
'adminform': adminForm,
'is_popup': "_popup" in request.GET,
'show_delete': False,
'media': media,
'inline_admin_formsets': inline_admin_formsets,
'errors': helpers.AdminErrorList(form, formsets),
'app_label': opts.app_label,
}
context.update(self.admin_site.each_context(request))
context.update(extra_context or {})
return self.render_change_form(request, context, form_url=form_url, add=True)
@csrf_protect_m
@transaction.atomic
def change_view(self, request, object_id, form_url='', extra_context=None):
"The 'change' admin view for this model."
model = self.model
opts = model._meta
obj = self.get_object(request, unquote(object_id))
if not self.has_change_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_text(opts.verbose_name), 'key': escape(object_id)})
if request.method == 'POST' and "_saveasnew" in request.POST:
return self.add_view(request, form_url=reverse('admin:%s_%s_add' %
(opts.app_label, opts.module_name),
current_app=self.admin_site.name))
ModelForm = self.get_form(request, obj)
formsets = []
inline_instances = self.get_inline_instances(request, obj)
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES, instance=obj)
if form.is_valid():
form_validated = True
new_object = self.save_form(request, form, change=True)
else:
form_validated = False
new_object = obj
prefixes = {}
for FormSet, inline in self.get_formsets_with_inlines(request, new_object):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1 or not prefix:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset = FormSet(request.POST, request.FILES,
instance=new_object, prefix=prefix,
queryset=inline.get_queryset(request))
formsets.append(formset)
if hasattr(inline, 'inlines') and inline.inlines:
self.add_nested_inline_formsets(request, inline, formset)
if self.all_valid_with_nesting(formsets) and form_validated:
self.save_model(request, new_object, form, True)
self.save_related(request, form, formsets, True)
change_message = self.construct_change_message(request, form, formsets)
self.log_change(request, new_object, change_message)
return self.response_change(request, new_object)
else:
form = ModelForm(instance=obj)
prefixes = {}
for FormSet, inline in self.get_formsets_with_inlines(request, obj):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1 or not prefix:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset = FormSet(instance=obj, prefix=prefix,
queryset=inline.get_queryset(request))
formsets.append(formset)
if hasattr(inline, 'inlines') and inline.inlines:
self.add_nested_inline_formsets(request, inline, formset)
adminForm = helpers.AdminForm(form, self.get_fieldsets(request, obj),
self.get_prepopulated_fields(request, obj),
self.get_readonly_fields(request, obj),
model_admin=self)
media = self.media + adminForm.media
inline_admin_formsets = []
for inline, formset in zip(inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request, obj))
readonly = list(inline.get_readonly_fields(request, obj))
prepopulated = dict(inline.get_prepopulated_fields(request, obj))
inline_admin_formset = helpers.InlineAdminFormSet(inline, formset,
fieldsets, prepopulated, readonly, model_admin=self)
inline_admin_formsets.append(inline_admin_formset)
media = media + inline_admin_formset.media
if hasattr(inline, 'inlines') and inline.inlines:
media += self.wrap_nested_inline_formsets(request, inline, formset)
context = {
'title': _('Change %s') % force_text(opts.verbose_name),
'adminform': adminForm,
'object_id': object_id,
'original': obj,
'is_popup': "_popup" in request.GET,
'media': media,
'inline_admin_formsets': inline_admin_formsets,
'errors': helpers.AdminErrorList(form, formsets),
'app_label': opts.app_label,
}
context.update(self.admin_site.each_context(request))
context.update(extra_context or {})
return self.render_change_form(request, context, change=True, obj=obj, form_url=form_url)
class NestedInline(InlineModelAdmin):
inlines = []
new_objects = []
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
js = ['jquery%s.js' % extra, 'jquery.init.js', 'inlines-nested%s.js' % extra]
if self.prepopulated_fields:
js.extend(['urlify.js', 'prepopulate%s.js' % extra])
if self.filter_vertical or self.filter_horizontal:
js.extend(['SelectBox.js', 'SelectFilter2.js'])
return forms.Media(js=[static('admin/js/%s' % url) for url in js])
def get_inline_instances(self, request, obj=None):
inline_instances = []
for inline_class in self.inlines:
inline = inline_class(self.model, self.admin_site)
if request:
if not (inline.has_add_permission(request, obj) or
inline.has_change_permission(request, obj) or
inline.has_delete_permission(request, obj)):
continue
if not inline.has_add_permission(request, obj):
inline.max_num = 0
inline_instances.append(inline)
return inline_instances
def get_formsets_with_inlines(self, request, obj=None):
for inline in self.get_inline_instances(request):
yield inline.get_formset(request, obj), inline
class NestedStackedInline(NestedInline):
template = 'admin/edit_inline/stacked-nested.html'
class NestedTabularInline(NestedInline):
template = 'admin/edit_inline/tabular-nested.html'
| 46.033248
| 156
| 0.607034
|
342bba8b6a5911ea6808c55d3ab89582a6f80b41
| 153
|
py
|
Python
|
PythonScript/PythonIntermedio/cap_2/excepciones.py
|
FranklinA/CoursesAndSelfStudy
|
63a699d6df7eef52190b608e2dd5728f7aa7264a
|
[
"MIT"
] | null | null | null |
PythonScript/PythonIntermedio/cap_2/excepciones.py
|
FranklinA/CoursesAndSelfStudy
|
63a699d6df7eef52190b608e2dd5728f7aa7264a
|
[
"MIT"
] | null | null | null |
PythonScript/PythonIntermedio/cap_2/excepciones.py
|
FranklinA/CoursesAndSelfStudy
|
63a699d6df7eef52190b608e2dd5728f7aa7264a
|
[
"MIT"
] | null | null | null |
#
lista=[2,4]
try:
print(lista[5])
except IndexError:
print("Error: error en el indice")
else:
print("No hay error")
finally:
print("Se ejecutó")
| 10.928571
| 35
| 0.666667
|
50d270f27cc825b4c04ac94bc88d24f7d1b275fa
| 1,016
|
py
|
Python
|
minos/examples/ga/dataset.py
|
qorrect/sisy
|
4c279f3a47109395d57521b5c8144b18693737fc
|
[
"Apache-2.0"
] | 6
|
2017-09-15T03:14:10.000Z
|
2019-12-03T04:15:21.000Z
|
minos/examples/ga/dataset.py
|
qorrect/sisy
|
4c279f3a47109395d57521b5c8144b18693737fc
|
[
"Apache-2.0"
] | 2
|
2017-09-21T01:49:42.000Z
|
2017-09-23T16:33:01.000Z
|
minos/examples/ga/dataset.py
|
qorrect/sisy
|
4c279f3a47109395d57521b5c8144b18693737fc
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on Feb 19, 2017
@author: julien
'''
from keras.datasets import reuters
from keras.preprocessing.text import Tokenizer
from keras.utils import np_utils
from minos.train.utils import SimpleBatchIterator
import numpy as np
def get_reuters_dataset(batch_size, max_words):
(X_train, y_train), (X_test, y_test) = reuters.load_data(nb_words=max_words, test_split=0.2)
nb_classes = np.max(y_train) + 1
tokenizer = Tokenizer(nb_words=max_words)
X_train = tokenizer.sequences_to_matrix(X_train, mode='binary')
X_test = tokenizer.sequences_to_matrix(X_test, mode='binary')
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
batch_iterator = SimpleBatchIterator(
X_train,
y_train,
batch_size,
autoloop=True)
test_batch_iterator = SimpleBatchIterator(
X_test,
y_test,
len(X_test),
autoloop=True)
return batch_iterator, test_batch_iterator, nb_classes
| 29.882353
| 96
| 0.727362
|
12c75356ac20e8703cbd02be9c25d28e94bef503
| 3,527
|
py
|
Python
|
setup.py
|
vanvalenlab/deepcell-tf
|
ec48625be3206bcc513783856bc42bd221bac9d0
|
[
"Apache-2.0"
] | 250
|
2018-09-19T23:55:06.000Z
|
2022-03-30T02:20:52.000Z
|
setup.py
|
vanvalenlab/deepcell-tf
|
ec48625be3206bcc513783856bc42bd221bac9d0
|
[
"Apache-2.0"
] | 251
|
2018-09-21T17:09:43.000Z
|
2022-02-28T19:04:50.000Z
|
setup.py
|
vanvalenlab/deepcell-tf
|
ec48625be3206bcc513783856bc42bd221bac9d0
|
[
"Apache-2.0"
] | 64
|
2018-11-29T15:22:15.000Z
|
2022-03-21T03:37:43.000Z
|
# Copyright 2016-2021 The Van Valen Lab at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/deepcell-tf/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# vanvalenlab@gmail.com
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from codecs import open
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md'), 'r', 'utf-8') as f:
readme = f.read()
about = {}
with open(os.path.join(here, 'deepcell', '_version.py'), 'r', 'utf-8') as f:
exec(f.read(), about)
setup(
name=about['__title__'],
version=about['__version__'],
author=about['__author__'],
author_email=about['__author_email__'],
description=about['__description__'],
url=about['__url__'],
download_url=about['__download_url__'],
license=about['__license__'],
long_description=readme,
long_description_content_type='text/markdown',
install_requires=[
'numpy>=1.16.6,<1.20.0',
'pydot>=1.4.2,<2',
'scipy>=1.2.3,<2',
'scikit-image>=0.14.5',
'scikit-learn>=0.20.4',
'tensorflow~=2.5.1',
'tensorflow_addons~=0.13.0',
'spektral~=1.0.4',
'jupyter>=1.0.0,<2',
'opencv-python-headless<5',
'deepcell-tracking~=0.5.0',
'deepcell-toolbox~=0.10.0'
],
extras_require={
'tests': [
'pytest<6',
'pytest-cov',
'pytest-pep8',
],
},
packages=find_packages(),
python_requires='>=3.6, <3.10',
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Image Processing',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| 34.920792
| 80
| 0.641622
|
f4a4af31c61b48838b4fdc20ddb101ea1125f1ec
| 9,913
|
py
|
Python
|
flow/core/rewards.py
|
kristery/flow
|
2638f8137541424af8de23159260d73c571f2e04
|
[
"MIT"
] | null | null | null |
flow/core/rewards.py
|
kristery/flow
|
2638f8137541424af8de23159260d73c571f2e04
|
[
"MIT"
] | null | null | null |
flow/core/rewards.py
|
kristery/flow
|
2638f8137541424af8de23159260d73c571f2e04
|
[
"MIT"
] | null | null | null |
"""A series of reward functions."""
import numpy as np
def desired_velocity(env, fail=False, edge_list=None):
r"""Encourage proximity to a desired velocity.
This function measures the deviation of a system of vehicles from a
user-specified desired velocity peaking when all vehicles in the ring
are set to this desired velocity. Moreover, in order to ensure that the
reward function naturally punishing the early termination of rollouts due
to collisions or other failures, the function is formulated as a mapping
:math:`r: \\mathcal{S} \\times \\mathcal{A}
\\rightarrow \\mathbb{R}_{\\geq 0}`.
This is done by subtracting the deviation of the system from the
desired velocity from the peak allowable deviation from the desired
velocity. Additionally, since the velocity of vehicles are
unbounded above, the reward is bounded below by zero,
to ensure nonnegativity.
Parameters
----------
env : flow.envs.Env
the environment variable, which contains information on the current
state of the system.
fail : bool, optional
specifies if any crash or other failure occurred in the system
edge_list : list of str, optional
list of edges the reward is computed over. If no edge_list is defined,
the reward is computed over all edges
Returns
-------
float
reward value
"""
if edge_list is None:
veh_ids = env.k.vehicle.get_ids()
else:
veh_ids = env.k.vehicle.get_ids_by_edge(edge_list)
vel = np.array(env.k.vehicle.get_speed(veh_ids))
num_vehicles = len(veh_ids)
if any(vel < -100) or fail or num_vehicles == 0:
return 0.
target_vel = env.env_params.additional_params['target_velocity']
max_cost = np.array([target_vel] * num_vehicles)
max_cost = np.linalg.norm(max_cost)
cost = vel - target_vel
cost = np.linalg.norm(cost)
# epsilon term (to deal with ZeroDivisionError exceptions)
eps = np.finfo(np.float32).eps
return max(max_cost - cost, 0) / (max_cost + eps)
def average_velocity(env, fail=False):
"""Encourage proximity to an average velocity.
This reward function returns the average velocity of all
vehicles in the system.
Parameters
----------
env : flow.envs.Env
the environment variable, which contains information on the current
state of the system.
fail : bool, optional
specifies if any crash or other failure occurred in the system
Returns
-------
float
reward value
"""
vel = np.array(env.k.vehicle.get_speed(env.k.vehicle.get_ids()))
if any(vel < -100) or fail:
return 0.
if len(vel) == 0:
return 0.
return np.mean(vel)
def rl_forward_progress(env, gain=0.1):
"""Rewared function used to reward the RL vehicles for travelling forward.
Parameters
----------
env : flow.envs.Env
the environment variable, which contains information on the current
state of the system.
gain : float
specifies how much to reward the RL vehicles
Returns
-------
float
reward value
"""
rl_velocity = env.k.vehicle.get_speed(env.k.vehicle.get_rl_ids())
rl_norm_vel = np.linalg.norm(rl_velocity, 1)
return rl_norm_vel * gain
def boolean_action_penalty(discrete_actions, gain=1.0):
"""Penalize boolean actions that indicate a switch."""
return gain * np.sum(discrete_actions)
def min_delay(env):
"""Reward function used to encourage minimization of total delay.
This function measures the deviation of a system of vehicles from all the
vehicles smoothly travelling at a fixed speed to their destinations.
Parameters
----------
env : flow.envs.Env
the environment variable, which contains information on the current
state of the system.
Returns
-------
float
reward value
"""
vel = np.array(env.k.vehicle.get_speed(env.k.vehicle.get_ids()))
vel = vel[vel >= -1e-6]
v_top = max(
env.k.network.speed_limit(edge)
for edge in env.k.network.get_edge_list())
time_step = env.sim_step
max_cost = time_step * sum(vel.shape)
# epsilon term (to deal with ZeroDivisionError exceptions)
eps = np.finfo(np.float32).eps
cost = time_step * sum((v_top - vel) / v_top)
return max((max_cost - cost) / (max_cost + eps), 0)
def avg_delay_specified_vehicles(env, veh_ids):
"""Calculate the average delay for a set of vehicles in the system.
Parameters
----------
env: flow.envs.Env
the environment variable, which contains information on the current
state of the system.
veh_ids: a list of the ids of the vehicles, for which we are calculating
average delay
Returns
-------
float
average delay
"""
sum = 0
for edge in env.k.network.get_edge_list():
for veh_id in env.k.vehicle.get_ids_by_edge(edge):
v_top = env.k.network.speed_limit(edge)
sum += (v_top - env.k.vehicle.get_speed(veh_id)) / v_top
time_step = env.sim_step
try:
cost = time_step * sum
return cost / len(veh_ids)
except ZeroDivisionError:
return 0
def min_delay_unscaled(env):
"""Return the average delay for all vehicles in the system.
Parameters
----------
env : flow.envs.Env
the environment variable, which contains information on the current
state of the system.
Returns
-------
float
reward value
"""
vel = np.array(env.k.vehicle.get_speed(env.k.vehicle.get_ids()))
#print(len(env.k.vehicle.get_ids()))
vel = vel[vel >= -1e-6]
v_top = max(
env.k.network.speed_limit(edge)
for edge in env.k.network.get_edge_list())
time_step = env.sim_step
# epsilon term (to deal with ZeroDivisionError exceptions)
eps = np.finfo(np.float32).eps
cost = time_step * sum((v_top - vel) / v_top)
return cost / (env.k.vehicle.num_vehicles + eps)
def waiting_penalty(env, gain=1):
ids = env.k.vehicle.get_ids()
total_wait_time = 0.
total_veh = 0
for vel in ids:
try:
total_veh += 1
total_wait_time += env.waiting_time[vel]
except:
pass
#print(total_veh)
return total_wait_time / total_veh * gain
def shortgreen_penalty(env, action, gain=1):
penalty = 0.
for idx in range(len(env.lc_green)):
if env.lc_green[idx] < 3 * env.min_switch_time and action[idx] > 0.5:
penalty += 1.
return gain * penalty
def penalize_standstill(env, gain=1):
"""Reward function that penalizes vehicle standstill.
Is it better for this to be:
a) penalize standstill in general?
b) multiplicative based on time that vel=0?
Parameters
----------
env : flow.envs.Env
the environment variable, which contains information on the current
state of the system.
gain : float
multiplicative factor on the action penalty
Returns
-------
float
reward value
"""
veh_ids = env.k.vehicle.get_ids()
vel = np.array(env.k.vehicle.get_speed(veh_ids))
num_standstill = len(vel[vel == 0])
penalty = gain * num_standstill
return -penalty
def penalize_near_standstill(env, thresh=0.3, gain=1):
"""Reward function which penalizes vehicles at a low velocity.
This reward function is used to penalize vehicles below a
specified threshold. This assists with discouraging RL from
gamifying a network, which can result in standstill behavior
or similarly bad, near-zero velocities.
Parameters
----------
env : flow.envs.Env
the environment variable, which contains information on the current
thresh : float
the velocity threshold below which penalties are applied
gain : float
multiplicative factor on the action penalty
"""
veh_ids = env.k.vehicle.get_ids()
vel = np.array(env.k.vehicle.get_speed(veh_ids))
penalize = len(vel[vel < thresh])
penalty = gain * penalize
return -penalty
def penalize_headway_variance(vehicles,
vids,
normalization=1,
penalty_gain=1,
penalty_exponent=1):
"""Reward function used to train rl vehicles to encourage large headways.
Parameters
----------
vehicles : flow.core.kernel.vehicle.KernelVehicle
contains the state of all vehicles in the network (generally
self.vehicles)
vids : list of str
list of ids for vehicles
normalization : float, optional
constant for scaling (down) the headways
penalty_gain : float, optional
sets the penalty for each vehicle between 0 and this value
penalty_exponent : float, optional
used to allow exponential punishing of smaller headways
"""
headways = penalty_gain * np.power(
np.array(
[vehicles.get_headway(veh_id) / normalization
for veh_id in vids]), penalty_exponent)
return -np.var(headways)
def punish_rl_lane_changes(env, penalty=1):
"""Penalize an RL vehicle performing lane changes.
This reward function is meant to minimize the number of lane changes and RL
vehicle performs.
Parameters
----------
env : flow.envs.Env
the environment variable, which contains information on the current
state of the system.
penalty : float, optional
penalty imposed on the reward function for any rl lane change action
"""
total_lane_change_penalty = 0
for veh_id in env.k.vehicle.get_rl_ids():
if env.k.vehicle.get_last_lc(veh_id) == env.timer:
total_lane_change_penalty -= penalty
return total_lane_change_penalty
| 30.130699
| 79
| 0.650963
|
fb4a3d7f4c619e2e4b1d522128897b6eb49b0ba8
| 1,602
|
py
|
Python
|
techa/cycle.py
|
havocesp/techa
|
518f90805b0728466836993b88e820bc8b0405b1
|
[
"Unlicense"
] | 21
|
2018-06-04T13:46:02.000Z
|
2021-12-19T01:20:12.000Z
|
techa/cycle.py
|
havocesp/techa
|
518f90805b0728466836993b88e820bc8b0405b1
|
[
"Unlicense"
] | null | null | null |
techa/cycle.py
|
havocesp/techa
|
518f90805b0728466836993b88e820bc8b0405b1
|
[
"Unlicense"
] | 7
|
2018-09-29T05:53:33.000Z
|
2020-12-27T10:02:30.000Z
|
# -*- coding: utf-8 -*-
"""
Cycle Indicators
"""
import pandas as pd
from talib.abstract import Function
__all__ = ['HT_DCPERIOD', 'HT_DCPHASE', 'HT_SINE', 'HT_TRENDMODE', 'HT_PHASOR']
def HT_DCPERIOD(data):
"""
Hilbert Transform
Dominant Cycle Period
:param pd.DataFrame data: pandas DataFrame with open, high, low, close data
:return pd.Series: with indicator data calculation results
"""
fn = Function('HT_DCPERIOD')
return fn(data)
def HT_DCPHASE(data):
"""
Hilbert Transform
Dominant Cycle Phase
:param pd.DataFrame data: pandas DataFrame with open, high, low, close data
:return pd.Series: with indicator data calculation results
"""
fn = Function('HT_DCPHASE')
return fn(data)
def HT_PHASOR(data):
"""
Hilbert Transform
In-Phase Indicator.
:param pd.DataFrame data: pandas DataFrame with open, high, low, close data
:return pd.Series: with indicator data calculation results
"""
fn = Function('HT_PHASOR')
return fn(data)
def HT_SINE(data):
"""
Hilbert Transform - SineWave Indicator
:param pd.DataFrame data: pandas DataFrame with open, high, low, close data
:return pd.Series: with indicator data calculation results
"""
fn = Function('HT_SINE')
return fn(data)
def HT_TRENDMODE(data):
"""
Hilbert Transform
Trend vs Cycle Mode.
:param pd.DataFrame data: pandas DataFrame with open, high, low, close data
:return pd.Series: with indicator data calculation results
"""
fn = Function('HT_TRENDMODE')
return fn(data)
| 21.36
| 79
| 0.672285
|
dc97e6322a1abe417b1cfbf39bc2b756cc9fe0dd
| 197,492
|
py
|
Python
|
venv/Lib/site-packages/xero_python/payrollnz/api/payroll_nz_api.py
|
RobMilinski/Xero-Starter-Branched-Test
|
c82382e674b34c2336ee164f5a079d6becd1ed46
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/xero_python/payrollnz/api/payroll_nz_api.py
|
RobMilinski/Xero-Starter-Branched-Test
|
c82382e674b34c2336ee164f5a079d6becd1ed46
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/xero_python/payrollnz/api/payroll_nz_api.py
|
RobMilinski/Xero-Starter-Branched-Test
|
c82382e674b34c2336ee164f5a079d6becd1ed46
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Xero Payroll NZ
This is the Xero Payroll API for orgs in the NZ region. # noqa: E501
Contact: api@xero.com
Generated by: https://openapi-generator.tech
"""
"""
OpenAPI spec version: 2.10.4
"""
import importlib
import re # noqa: F401
from xero_python import exceptions
from xero_python.api_client import ApiClient, ModelFinder
try:
from .exception_handler import translate_status_exception
except ImportError:
translate_status_exception = exceptions.translate_status_exception
class empty:
""" empty object to mark optional parameter not set """
class PayrollNzApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
base_url = "https://api.xero.com/payroll.xro/2.0"
models_module = importlib.import_module("xero_python.payrollnz.models")
def __init__(self, api_client=None, base_url=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.base_url = base_url or self.base_url
def get_resource_url(self, resource_path):
"""
Combine API base url with resource specific path
:param str resource_path: API endpoint specific path
:return: str full resource path
"""
return self.base_url + resource_path
def get_model_finder(self):
return ModelFinder(self.models_module)
def approve_timesheet(
self,
xero_tenant_id,
timesheet_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Approves a timesheet # noqa: E501
OAuth2 scope: payroll.timesheets
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str timesheet_id: Identifier for the timesheet (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: TimesheetObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `approve_timesheet`"
)
# verify the required parameter 'timesheet_id' is set
if timesheet_id is None:
raise ValueError(
"Missing the required parameter `timesheet_id` "
"when calling `approve_timesheet`"
)
collection_formats = {}
path_params = {
"TimesheetID": timesheet_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Timesheets/{TimesheetID}/Approve")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="TimesheetObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "approve_timesheet")
def create_deduction(
self,
xero_tenant_id,
deduction,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Creates a new deduction for a specific employee # noqa: E501
OAuth2 scope: payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param Deduction deduction: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: DeductionObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_deduction`"
)
# verify the required parameter 'deduction' is set
if deduction is None:
raise ValueError(
"Missing the required parameter `deduction` "
"when calling `create_deduction`"
)
collection_formats = {}
path_params = {}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = deduction
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Deductions")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="DeductionObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_deduction")
def create_earnings_rate(
self,
xero_tenant_id,
earnings_rate,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Creates a new earnings rate # noqa: E501
OAuth2 scope: payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param EarningsRate earnings_rate: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EarningsRateObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_earnings_rate`"
)
# verify the required parameter 'earnings_rate' is set
if earnings_rate is None:
raise ValueError(
"Missing the required parameter `earnings_rate` "
"when calling `create_earnings_rate`"
)
collection_formats = {}
path_params = {}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = earnings_rate
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/EarningsRates")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EarningsRateObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_earnings_rate")
def create_employee(
self,
xero_tenant_id,
employee,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Creates an employees # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param Employee employee: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_employee`"
)
# verify the required parameter 'employee' is set
if employee is None:
raise ValueError(
"Missing the required parameter `employee` "
"when calling `create_employee`"
)
collection_formats = {}
path_params = {}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = employee
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_employee")
def create_employee_earnings_template(
self,
xero_tenant_id,
employee_id,
earnings_template,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Creates earnings template records for an employee # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param EarningsTemplate earnings_template: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EarningsTemplateObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_employee_earnings_template`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `create_employee_earnings_template`"
)
# verify the required parameter 'earnings_template' is set
if earnings_template is None:
raise ValueError(
"Missing the required parameter `earnings_template` "
"when calling `create_employee_earnings_template`"
)
collection_formats = {}
path_params = {
"EmployeeID": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = earnings_template
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeID}/PayTemplates/earnings")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EarningsTemplateObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "create_employee_earnings_template"
)
def create_employee_leave(
self,
xero_tenant_id,
employee_id,
employee_leave,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Creates leave records for a specific employee # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param EmployeeLeave employee_leave: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeLeaveObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_employee_leave`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `create_employee_leave`"
)
# verify the required parameter 'employee_leave' is set
if employee_leave is None:
raise ValueError(
"Missing the required parameter `employee_leave` "
"when calling `create_employee_leave`"
)
collection_formats = {}
path_params = {
"EmployeeID": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = employee_leave
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeID}/Leave")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeLeaveObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_employee_leave")
def create_employee_leave_setup(
self,
xero_tenant_id,
employee_id,
employee_leave_setup,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Creates a leave set-up for a specific employee. This is required before viewing, configuring and requesting leave for an employee # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param EmployeeLeaveSetup employee_leave_setup: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeLeaveSetupObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_employee_leave_setup`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `create_employee_leave_setup`"
)
# verify the required parameter 'employee_leave_setup' is set
if employee_leave_setup is None:
raise ValueError(
"Missing the required parameter `employee_leave_setup` "
"when calling `create_employee_leave_setup`"
)
collection_formats = {}
path_params = {
"EmployeeID": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = employee_leave_setup
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeID}/leaveSetup")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeLeaveSetupObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_employee_leave_setup")
def create_employee_leave_type(
self,
xero_tenant_id,
employee_id,
employee_leave_type,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Creates leave type records for a specific employee # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param EmployeeLeaveType employee_leave_type: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeLeaveTypeObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_employee_leave_type`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `create_employee_leave_type`"
)
# verify the required parameter 'employee_leave_type' is set
if employee_leave_type is None:
raise ValueError(
"Missing the required parameter `employee_leave_type` "
"when calling `create_employee_leave_type`"
)
collection_formats = {}
path_params = {
"EmployeeID": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = employee_leave_type
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeID}/LeaveTypes")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeLeaveTypeObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_employee_leave_type")
def create_employee_opening_balances(
self,
xero_tenant_id,
employee_id,
employee_opening_balance,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Creates opening balances for a specific employee # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param list[EmployeeOpeningBalance] employee_opening_balance: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeOpeningBalancesObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_employee_opening_balances`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `create_employee_opening_balances`"
)
# verify the required parameter 'employee_opening_balance' is set
if employee_opening_balance is None:
raise ValueError(
"Missing the required parameter `employee_opening_balance` "
"when calling `create_employee_opening_balances`"
)
collection_formats = {}
path_params = {
"EmployeeID": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = employee_opening_balance
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeID}/openingBalances")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeOpeningBalancesObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "create_employee_opening_balances"
)
def create_employee_payment_method(
self,
xero_tenant_id,
employee_id,
payment_method,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Creates a payment method for an employee # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param PaymentMethod payment_method: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: PaymentMethodObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_employee_payment_method`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `create_employee_payment_method`"
)
# verify the required parameter 'payment_method' is set
if payment_method is None:
raise ValueError(
"Missing the required parameter `payment_method` "
"when calling `create_employee_payment_method`"
)
collection_formats = {}
path_params = {
"EmployeeID": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = payment_method
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeID}/PaymentMethods")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PaymentMethodObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "create_employee_payment_method"
)
def create_employee_salary_and_wage(
self,
xero_tenant_id,
employee_id,
salary_and_wage,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Creates an employee salary and wage record # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param SalaryAndWage salary_and_wage: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: SalaryAndWageObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_employee_salary_and_wage`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `create_employee_salary_and_wage`"
)
# verify the required parameter 'salary_and_wage' is set
if salary_and_wage is None:
raise ValueError(
"Missing the required parameter `salary_and_wage` "
"when calling `create_employee_salary_and_wage`"
)
collection_formats = {}
path_params = {
"EmployeeID": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = salary_and_wage
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeID}/SalaryAndWages")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="SalaryAndWageObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "create_employee_salary_and_wage"
)
def create_employment(
self,
xero_tenant_id,
employee_id,
employment,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Creates an employment detail for a specific employee # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param Employment employment: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmploymentObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_employment`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `create_employment`"
)
# verify the required parameter 'employment' is set
if employment is None:
raise ValueError(
"Missing the required parameter `employment` "
"when calling `create_employment`"
)
collection_formats = {}
path_params = {
"EmployeeID": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = employment
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeID}/Employment")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmploymentObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_employment")
def create_leave_type(
self,
xero_tenant_id,
leave_type,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Creates a new leave type # noqa: E501
OAuth2 scope: payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param LeaveType leave_type: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: LeaveTypeObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_leave_type`"
)
# verify the required parameter 'leave_type' is set
if leave_type is None:
raise ValueError(
"Missing the required parameter `leave_type` "
"when calling `create_leave_type`"
)
collection_formats = {}
path_params = {}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = leave_type
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/LeaveTypes")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="LeaveTypeObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_leave_type")
def create_multiple_employee_earnings_template(
self,
xero_tenant_id,
employee_id,
earnings_template,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Creates multiple employee earnings template records for a specific employee # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param list[EarningsTemplate] earnings_template: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeEarningsTemplates
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_multiple_employee_earnings_template`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `create_multiple_employee_earnings_template`"
)
# verify the required parameter 'earnings_template' is set
if earnings_template is None:
raise ValueError(
"Missing the required parameter `earnings_template` "
"when calling `create_multiple_employee_earnings_template`"
)
collection_formats = {}
path_params = {
"EmployeeID": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = earnings_template
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeID}/paytemplateearnings")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeEarningsTemplates",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "create_multiple_employee_earnings_template"
)
def create_pay_run(
self,
xero_tenant_id,
pay_run,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Creates a pay run # noqa: E501
OAuth2 scope: payroll.payruns
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param PayRun pay_run: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: PayRunObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_pay_run`"
)
# verify the required parameter 'pay_run' is set
if pay_run is None:
raise ValueError(
"Missing the required parameter `pay_run` "
"when calling `create_pay_run`"
)
collection_formats = {}
path_params = {}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = pay_run
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/PayRuns")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PayRunObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_pay_run")
def create_pay_run_calendar(
self,
xero_tenant_id,
pay_run_calendar,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Creates a new payrun calendar # noqa: E501
OAuth2 scope: payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param PayRunCalendar pay_run_calendar: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: PayRunCalendarObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_pay_run_calendar`"
)
# verify the required parameter 'pay_run_calendar' is set
if pay_run_calendar is None:
raise ValueError(
"Missing the required parameter `pay_run_calendar` "
"when calling `create_pay_run_calendar`"
)
collection_formats = {}
path_params = {}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = pay_run_calendar
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/PayRunCalendars")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PayRunCalendarObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_pay_run_calendar")
def create_reimbursement(
self,
xero_tenant_id,
reimbursement,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Creates a new reimbursement # noqa: E501
OAuth2 scope: payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param Reimbursement reimbursement: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: ReimbursementObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_reimbursement`"
)
# verify the required parameter 'reimbursement' is set
if reimbursement is None:
raise ValueError(
"Missing the required parameter `reimbursement` "
"when calling `create_reimbursement`"
)
collection_formats = {}
path_params = {}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = reimbursement
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Reimbursements")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="ReimbursementObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_reimbursement")
def create_superannuation(
self,
xero_tenant_id,
benefit,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Creates a new superannuation # noqa: E501
OAuth2 scope: payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param Benefit benefit: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: SuperannuationObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_superannuation`"
)
# verify the required parameter 'benefit' is set
if benefit is None:
raise ValueError(
"Missing the required parameter `benefit` "
"when calling `create_superannuation`"
)
collection_formats = {}
path_params = {}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = benefit
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Superannuations")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="SuperannuationObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_superannuation")
def create_timesheet(
self,
xero_tenant_id,
timesheet,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Creates a new timesheet # noqa: E501
OAuth2 scope: payroll.timesheets
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param Timesheet timesheet: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: TimesheetObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_timesheet`"
)
# verify the required parameter 'timesheet' is set
if timesheet is None:
raise ValueError(
"Missing the required parameter `timesheet` "
"when calling `create_timesheet`"
)
collection_formats = {}
path_params = {}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = timesheet
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Timesheets")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="TimesheetObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_timesheet")
def create_timesheet_line(
self,
xero_tenant_id,
timesheet_id,
timesheet_line,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Create a new timesheet line for a specific time sheet # noqa: E501
OAuth2 scope: payroll.timesheets
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str timesheet_id: Identifier for the timesheet (required)
:param TimesheetLine timesheet_line: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: TimesheetLineObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `create_timesheet_line`"
)
# verify the required parameter 'timesheet_id' is set
if timesheet_id is None:
raise ValueError(
"Missing the required parameter `timesheet_id` "
"when calling `create_timesheet_line`"
)
# verify the required parameter 'timesheet_line' is set
if timesheet_line is None:
raise ValueError(
"Missing the required parameter `timesheet_line` "
"when calling `create_timesheet_line`"
)
collection_formats = {}
path_params = {
"TimesheetID": timesheet_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = timesheet_line
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Timesheets/{TimesheetID}/Lines")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="TimesheetLineObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "create_timesheet_line")
def delete_employee_earnings_template(
self,
xero_tenant_id,
employee_id,
pay_template_earning_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Deletes an employee's earnings template record # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param str pay_template_earning_id: Id for single pay template earnings object (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EarningsTemplateObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `delete_employee_earnings_template`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `delete_employee_earnings_template`"
)
# verify the required parameter 'pay_template_earning_id' is set
if pay_template_earning_id is None:
raise ValueError(
"Missing the required parameter `pay_template_earning_id` "
"when calling `delete_employee_earnings_template`"
)
collection_formats = {}
path_params = {
"EmployeeID": employee_id,
"PayTemplateEarningID": pay_template_earning_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url(
"/Employees/{EmployeeID}/PayTemplates/earnings/{PayTemplateEarningID}"
)
try:
return self.api_client.call_api(
url,
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EarningsTemplateObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "delete_employee_earnings_template"
)
def delete_employee_leave(
self,
xero_tenant_id,
employee_id,
leave_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Deletes a leave record for a specific employee # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param str leave_id: Leave id for single object (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeLeaveObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `delete_employee_leave`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `delete_employee_leave`"
)
# verify the required parameter 'leave_id' is set
if leave_id is None:
raise ValueError(
"Missing the required parameter `leave_id` "
"when calling `delete_employee_leave`"
)
collection_formats = {}
path_params = {
"EmployeeID": employee_id,
"LeaveID": leave_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeID}/Leave/{LeaveID}")
try:
return self.api_client.call_api(
url,
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeLeaveObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "delete_employee_leave")
def delete_employee_salary_and_wage(
self,
xero_tenant_id,
employee_id,
salary_and_wages_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Deletes an employee's salary and wages record # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param str salary_and_wages_id: Id for single salary and wages object (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: SalaryAndWageObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `delete_employee_salary_and_wage`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `delete_employee_salary_and_wage`"
)
# verify the required parameter 'salary_and_wages_id' is set
if salary_and_wages_id is None:
raise ValueError(
"Missing the required parameter `salary_and_wages_id` "
"when calling `delete_employee_salary_and_wage`"
)
collection_formats = {}
path_params = {
"EmployeeID": employee_id,
"SalaryAndWagesID": salary_and_wages_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url(
"/Employees/{EmployeeID}/SalaryAndWages/{SalaryAndWagesID}"
)
try:
return self.api_client.call_api(
url,
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="SalaryAndWageObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "delete_employee_salary_and_wage"
)
def delete_timesheet(
self,
xero_tenant_id,
timesheet_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Deletes a timesheet # noqa: E501
OAuth2 scope: payroll.timesheets
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str timesheet_id: Identifier for the timesheet (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: TimesheetLine
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `delete_timesheet`"
)
# verify the required parameter 'timesheet_id' is set
if timesheet_id is None:
raise ValueError(
"Missing the required parameter `timesheet_id` "
"when calling `delete_timesheet`"
)
collection_formats = {}
path_params = {
"TimesheetID": timesheet_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Timesheets/{TimesheetID}")
try:
return self.api_client.call_api(
url,
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="TimesheetLine",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "delete_timesheet")
def delete_timesheet_line(
self,
xero_tenant_id,
timesheet_id,
timesheet_line_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Deletes a timesheet line for a specific timesheet # noqa: E501
OAuth2 scope: payroll.timesheets
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str timesheet_id: Identifier for the timesheet (required)
:param str timesheet_line_id: Identifier for the timesheet line (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: TimesheetLine
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `delete_timesheet_line`"
)
# verify the required parameter 'timesheet_id' is set
if timesheet_id is None:
raise ValueError(
"Missing the required parameter `timesheet_id` "
"when calling `delete_timesheet_line`"
)
# verify the required parameter 'timesheet_line_id' is set
if timesheet_line_id is None:
raise ValueError(
"Missing the required parameter `timesheet_line_id` "
"when calling `delete_timesheet_line`"
)
collection_formats = {}
path_params = {
"TimesheetID": timesheet_id,
"TimesheetLineID": timesheet_line_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Timesheets/{TimesheetID}/Lines/{TimesheetLineID}")
try:
return self.api_client.call_api(
url,
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="TimesheetLine",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "delete_timesheet_line")
def get_deduction(
self,
xero_tenant_id,
deduction_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves a single deduction by using a unique deduction ID # noqa: E501
OAuth2 scope: payroll.settings.read, payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str deduction_id: Identifier for the deduction (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: DeductionObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_deduction`"
)
# verify the required parameter 'deduction_id' is set
if deduction_id is None:
raise ValueError(
"Missing the required parameter `deduction_id` "
"when calling `get_deduction`"
)
collection_formats = {}
path_params = {
"deductionId": deduction_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Deductions/{deductionId}")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="DeductionObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_deduction")
def get_deductions(
self,
xero_tenant_id,
page=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves deductions for a specific employee # noqa: E501
OAuth2 scope: payroll.settings.read, payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param int page: Page number which specifies the set of records to retrieve. By default the number of the records per set is 100.
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: Deductions
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_deductions`"
)
collection_formats = {}
path_params = {}
query_params = []
if page is not empty:
query_params.append(("page", page))
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Deductions")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="Deductions",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_deductions")
def get_earnings_rate(
self,
xero_tenant_id,
earnings_rate_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves a specific earnings rates by using a unique earnings rate id # noqa: E501
OAuth2 scope: payroll.settings.read, payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str earnings_rate_id: Identifier for the earnings rate (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EarningsRateObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_earnings_rate`"
)
# verify the required parameter 'earnings_rate_id' is set
if earnings_rate_id is None:
raise ValueError(
"Missing the required parameter `earnings_rate_id` "
"when calling `get_earnings_rate`"
)
collection_formats = {}
path_params = {
"EarningsRateID": earnings_rate_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/EarningsRates/{EarningsRateID}")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EarningsRateObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_earnings_rate")
def get_earnings_rates(
self,
xero_tenant_id,
page=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves earnings rates # noqa: E501
OAuth2 scope: payroll.settings.read, payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param int page: Page number which specifies the set of records to retrieve. By default the number of the records per set is 100.
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EarningsRates
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_earnings_rates`"
)
collection_formats = {}
path_params = {}
query_params = []
if page is not empty:
query_params.append(("page", page))
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/EarningsRates")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EarningsRates",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_earnings_rates")
def get_employee(
self,
xero_tenant_id,
employee_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves an employees using a unique employee ID # noqa: E501
OAuth2 scope: payroll.employees.read, payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_employee`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `get_employee`"
)
collection_formats = {}
path_params = {
"EmployeeID": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeID}")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_employee")
def get_employee_leave_balances(
self,
xero_tenant_id,
employee_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves leave balances for a specific employee # noqa: E501
OAuth2 scope: payroll.employees.read, payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeLeaveBalances
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_employee_leave_balances`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `get_employee_leave_balances`"
)
collection_formats = {}
path_params = {
"EmployeeID": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeID}/LeaveBalances")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeLeaveBalances",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_employee_leave_balances")
def get_employee_leave_periods(
self,
xero_tenant_id,
employee_id,
start_date=empty,
end_date=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves leave periods for a specific employee # noqa: E501
OAuth2 scope: payroll.employees.read, payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param date start_date: Filter by start date
:param date end_date: Filter by end date
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: LeavePeriods
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_employee_leave_periods`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `get_employee_leave_periods`"
)
collection_formats = {}
path_params = {
"EmployeeID": employee_id,
}
query_params = []
if start_date is not empty:
query_params.append(("startDate", start_date))
if end_date is not empty:
query_params.append(("endDate", end_date))
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeID}/LeavePeriods")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="LeavePeriods",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_employee_leave_periods")
def get_employee_leave_types(
self,
xero_tenant_id,
employee_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves leave types for a specific employee # noqa: E501
OAuth2 scope: payroll.employees.read, payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeLeaveTypes
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_employee_leave_types`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `get_employee_leave_types`"
)
collection_formats = {}
path_params = {
"EmployeeID": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeID}/LeaveTypes")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeLeaveTypes",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_employee_leave_types")
def get_employee_leaves(
self,
xero_tenant_id,
employee_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves leave records for a specific employee # noqa: E501
OAuth2 scope: payroll.employees.read, payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeLeaves
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_employee_leaves`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `get_employee_leaves`"
)
collection_formats = {}
path_params = {
"EmployeeID": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeID}/Leave")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeLeaves",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_employee_leaves")
def get_employee_opening_balances(
self,
xero_tenant_id,
employee_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves the opening balance for a specific employee # noqa: E501
OAuth2 scope: payroll.employees.read, payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeOpeningBalancesObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_employee_opening_balances`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `get_employee_opening_balances`"
)
collection_formats = {}
path_params = {
"EmployeeID": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeID}/openingBalances")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeOpeningBalancesObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "get_employee_opening_balances"
)
def get_employee_pay_templates(
self,
xero_tenant_id,
employee_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves pay templates for a specific employee # noqa: E501
OAuth2 scope: payroll.employees.read, payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeePayTemplates
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_employee_pay_templates`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `get_employee_pay_templates`"
)
collection_formats = {}
path_params = {
"EmployeeID": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeID}/PayTemplates")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeePayTemplates",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_employee_pay_templates")
def get_employee_payment_method(
self,
xero_tenant_id,
employee_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves available payment methods for a specific employee # noqa: E501
OAuth2 scope: payroll.employees.read, payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: PaymentMethodObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_employee_payment_method`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `get_employee_payment_method`"
)
collection_formats = {}
path_params = {
"EmployeeID": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeID}/PaymentMethods")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PaymentMethodObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_employee_payment_method")
def get_employee_salary_and_wage(
self,
xero_tenant_id,
employee_id,
salary_and_wages_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves an employee's salary and wages record by using a unique salary and wage ID # noqa: E501
OAuth2 scope: payroll.employees, payroll.employees.read
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param str salary_and_wages_id: Id for single pay template earnings object (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: SalaryAndWages
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_employee_salary_and_wage`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `get_employee_salary_and_wage`"
)
# verify the required parameter 'salary_and_wages_id' is set
if salary_and_wages_id is None:
raise ValueError(
"Missing the required parameter `salary_and_wages_id` "
"when calling `get_employee_salary_and_wage`"
)
collection_formats = {}
path_params = {
"EmployeeID": employee_id,
"SalaryAndWagesID": salary_and_wages_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url(
"/Employees/{EmployeeID}/SalaryAndWages/{SalaryAndWagesID}"
)
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="SalaryAndWages",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "get_employee_salary_and_wage"
)
def get_employee_salary_and_wages(
self,
xero_tenant_id,
employee_id,
page=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves an employee's salary and wages # noqa: E501
OAuth2 scope: payroll.employees.read, payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param int page: Page number which specifies the set of records to retrieve. By default the number of the records per set is 100.
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: SalaryAndWages
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_employee_salary_and_wages`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `get_employee_salary_and_wages`"
)
collection_formats = {}
path_params = {
"EmployeeID": employee_id,
}
query_params = []
if page is not empty:
query_params.append(("page", page))
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeID}/SalaryAndWages")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="SalaryAndWages",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "get_employee_salary_and_wages"
)
def get_employee_tax(
self,
xero_tenant_id,
employee_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves tax records for a specific employee # noqa: E501
OAuth2 scope: payroll.employees.read, payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeTaxObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_employee_tax`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `get_employee_tax`"
)
collection_formats = {}
path_params = {
"EmployeeID": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeID}/Tax")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeTaxObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_employee_tax")
def get_employees(
self,
xero_tenant_id,
first_name=empty,
last_name=empty,
page=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves employees # noqa: E501
OAuth2 scope: payroll.employees.read
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str first_name: Filter by first name
:param str last_name: Filter by last name
:param int page: Page number which specifies the set of records to retrieve. By default the number of the records per set is 100.
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: Employees
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_employees`"
)
collection_formats = {}
path_params = {}
query_params = []
if first_name is not empty:
query_params.append(("firstName", first_name))
if last_name is not empty:
query_params.append(("lastName", last_name))
if page is not empty:
query_params.append(("page", page))
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="Employees",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_employees")
def get_leave_type(
self,
xero_tenant_id,
leave_type_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves a specific leave type by using a unique leave type ID # noqa: E501
OAuth2 scope: payroll.settings.read, payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str leave_type_id: Identifier for the leave type (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: LeaveTypeObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_leave_type`"
)
# verify the required parameter 'leave_type_id' is set
if leave_type_id is None:
raise ValueError(
"Missing the required parameter `leave_type_id` "
"when calling `get_leave_type`"
)
collection_formats = {}
path_params = {
"LeaveTypeID": leave_type_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/LeaveTypes/{LeaveTypeID}")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="LeaveTypeObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_leave_type")
def get_leave_types(
self,
xero_tenant_id,
page=empty,
active_only=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves leave types # noqa: E501
OAuth2 scope: payroll.settings.read, payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param int page: Page number which specifies the set of records to retrieve. By default the number of the records per set is 100.
:param bool active_only: Filters leave types by active status. By default the API returns all leave types.
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: LeaveTypes
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_leave_types`"
)
collection_formats = {}
path_params = {}
query_params = []
if page is not empty:
query_params.append(("page", page))
if active_only is not empty:
query_params.append(("ActiveOnly", active_only))
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/LeaveTypes")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="LeaveTypes",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_leave_types")
def get_pay_run(
self,
xero_tenant_id,
pay_run_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves a specific pay run by using a unique pay run ID # noqa: E501
OAuth2 scope: payroll.payruns.read, payroll.payruns
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str pay_run_id: Identifier for the pay run (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: PayRunObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_pay_run`"
)
# verify the required parameter 'pay_run_id' is set
if pay_run_id is None:
raise ValueError(
"Missing the required parameter `pay_run_id` "
"when calling `get_pay_run`"
)
collection_formats = {}
path_params = {
"PayRunID": pay_run_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/PayRuns/{PayRunID}")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PayRunObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_pay_run")
def get_pay_run_calendar(
self,
xero_tenant_id,
payroll_calendar_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves a specific payrun calendar by using a unique payroll calendar ID # noqa: E501
OAuth2 scope: payroll.settings.read, payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str payroll_calendar_id: Identifier for the payrun calendars (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: PayRunCalendarObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_pay_run_calendar`"
)
# verify the required parameter 'payroll_calendar_id' is set
if payroll_calendar_id is None:
raise ValueError(
"Missing the required parameter `payroll_calendar_id` "
"when calling `get_pay_run_calendar`"
)
collection_formats = {}
path_params = {
"PayrollCalendarID": payroll_calendar_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/PayRunCalendars/{PayrollCalendarID}")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PayRunCalendarObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_pay_run_calendar")
def get_pay_run_calendars(
self,
xero_tenant_id,
page=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves payrun calendars # noqa: E501
OAuth2 scope: payroll.settings.read, payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param int page: Page number which specifies the set of records to retrieve. By default the number of the records per set is 100.
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: PayRunCalendars
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_pay_run_calendars`"
)
collection_formats = {}
path_params = {}
query_params = []
if page is not empty:
query_params.append(("page", page))
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/PayRunCalendars")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PayRunCalendars",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_pay_run_calendars")
def get_pay_runs(
self,
xero_tenant_id,
page=empty,
status=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves pay runs # noqa: E501
OAuth2 scope: payroll.payruns.read, payroll.payruns
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param int page: Page number which specifies the set of records to retrieve. By default the number of the records per set is 100.
:param str status: By default get payruns will return all the payruns for an organization. You can add GET https://api.xero.com/payroll.xro/2.0/payRuns?statu={PayRunStatus} to filter the payruns by status.
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: PayRuns
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_pay_runs`"
)
collection_formats = {}
path_params = {}
query_params = []
if page is not empty:
query_params.append(("page", page))
if status is not empty:
query_params.append(("status", status))
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/PayRuns")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PayRuns",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_pay_runs")
def get_pay_slip(
self,
xero_tenant_id,
pay_slip_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves a specific payslip by a unique pay slip ID # noqa: E501
OAuth2 scope: payroll.payslip.read, payroll.payslip
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str pay_slip_id: Identifier for the payslip (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: PaySlipObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_pay_slip`"
)
# verify the required parameter 'pay_slip_id' is set
if pay_slip_id is None:
raise ValueError(
"Missing the required parameter `pay_slip_id` "
"when calling `get_pay_slip`"
)
collection_formats = {}
path_params = {
"PaySlipID": pay_slip_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/PaySlips/{PaySlipID}")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PaySlipObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_pay_slip")
def get_pay_slips(
self,
xero_tenant_id,
pay_run_id,
page=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves payslips # noqa: E501
OAuth2 scope: payroll.payslip.read, payroll.payslip
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str pay_run_id: PayrunID which specifies the containing payrun of payslips to retrieve. By default, the API does not group payslips by payrun. (required)
:param int page: Page number which specifies the set of records to retrieve. By default the number of the records per set is 100.
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: PaySlips
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_pay_slips`"
)
# verify the required parameter 'pay_run_id' is set
if pay_run_id is None:
raise ValueError(
"Missing the required parameter `pay_run_id` "
"when calling `get_pay_slips`"
)
collection_formats = {}
path_params = {}
query_params = [
("PayRunID", pay_run_id),
]
if page is not empty:
query_params.append(("page", page))
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/PaySlips")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PaySlips",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_pay_slips")
def get_reimbursement(
self,
xero_tenant_id,
reimbursement_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves a specific reimbursement by using a unique reimbursement ID # noqa: E501
OAuth2 scope: payroll.settings.read, payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str reimbursement_id: Identifier for the reimbursement (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: ReimbursementObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_reimbursement`"
)
# verify the required parameter 'reimbursement_id' is set
if reimbursement_id is None:
raise ValueError(
"Missing the required parameter `reimbursement_id` "
"when calling `get_reimbursement`"
)
collection_formats = {}
path_params = {
"ReimbursementID": reimbursement_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Reimbursements/{ReimbursementID}")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="ReimbursementObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_reimbursement")
def get_reimbursements(
self,
xero_tenant_id,
page=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves reimbursements # noqa: E501
OAuth2 scope: payroll.settings.read, payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param int page: Page number which specifies the set of records to retrieve. By default the number of the records per set is 100.
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: Reimbursements
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_reimbursements`"
)
collection_formats = {}
path_params = {}
query_params = []
if page is not empty:
query_params.append(("page", page))
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Reimbursements")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="Reimbursements",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_reimbursements")
def get_settings(
self,
xero_tenant_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves settings # noqa: E501
OAuth2 scope: payroll.settings.read, settings.payslip
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: Settings
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_settings`"
)
collection_formats = {}
path_params = {}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Settings")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="Settings",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_settings")
def get_statutory_deduction(
self,
xero_tenant_id,
id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves a specific statutory deduction by using a unique statutory deductions id # noqa: E501
OAuth2 scope: payroll.settings.read, payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str id: Identifier for the statutory deduction (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: StatutoryDeductionObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_statutory_deduction`"
)
# verify the required parameter 'id' is set
if id is None:
raise ValueError(
"Missing the required parameter `id` "
"when calling `get_statutory_deduction`"
)
collection_formats = {}
path_params = {
"id": id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/StatutoryDeductions/{id}")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="StatutoryDeductionObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_statutory_deduction")
def get_statutory_deductions(
self,
xero_tenant_id,
page=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves statutory deductions # noqa: E501
OAuth2 scope: payroll.settings.read, payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param int page: Page number which specifies the set of records to retrieve. By default the number of the records per set is 100.
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: StatutoryDeductions
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_statutory_deductions`"
)
collection_formats = {}
path_params = {}
query_params = []
if page is not empty:
query_params.append(("page", page))
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/StatutoryDeductions")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="StatutoryDeductions",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_statutory_deductions")
def get_superannuation(
self,
xero_tenant_id,
superannuation_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves a specific superannuation using a unique superannuation ID # noqa: E501
OAuth2 scope: payroll.settings.read, payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str superannuation_id: Identifier for the superannuation (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: SuperannuationObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_superannuation`"
)
# verify the required parameter 'superannuation_id' is set
if superannuation_id is None:
raise ValueError(
"Missing the required parameter `superannuation_id` "
"when calling `get_superannuation`"
)
collection_formats = {}
path_params = {
"SuperannuationID": superannuation_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Superannuations/{SuperannuationID}")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="SuperannuationObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_superannuation")
def get_superannuations(
self,
xero_tenant_id,
page=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves superannuations # noqa: E501
OAuth2 scope: payroll.settings.read, payroll.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param int page: Page number which specifies the set of records to retrieve. By default the number of the records per set is 100.
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: Superannuations
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_superannuations`"
)
collection_formats = {}
path_params = {}
query_params = []
if page is not empty:
query_params.append(("page", page))
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Superannuations")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="Superannuations",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_superannuations")
def get_timesheet(
self,
xero_tenant_id,
timesheet_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves a specific timesheet by using a unique timesheet ID # noqa: E501
OAuth2 scope: payroll.timesheets.read, timesheets.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str timesheet_id: Identifier for the timesheet (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: TimesheetObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_timesheet`"
)
# verify the required parameter 'timesheet_id' is set
if timesheet_id is None:
raise ValueError(
"Missing the required parameter `timesheet_id` "
"when calling `get_timesheet`"
)
collection_formats = {}
path_params = {
"TimesheetID": timesheet_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Timesheets/{TimesheetID}")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="TimesheetObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_timesheet")
def get_timesheets(
self,
xero_tenant_id,
page=empty,
employee_id=empty,
payroll_calendar_id=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves timesheets # noqa: E501
OAuth2 scope: payroll.timesheets.read, payroll.timesheets
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param int page: Page number which specifies the set of records to retrieve. By default the number of the records per set is 100.
:param str employee_id: By default get Timesheets will return the timesheets for all employees in an organization. You can add GET https://…/timesheets?filter=employeeId=={EmployeeID} to get only the timesheets of a particular employee.
:param str payroll_calendar_id: By default get Timesheets will return all the timesheets for an organization. You can add GET https://…/timesheets?filter=payrollCalendarId=={PayrollCalendarID} to filter the timesheets by payroll calendar id
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: Timesheets
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_timesheets`"
)
collection_formats = {}
path_params = {}
query_params = []
if page is not empty:
query_params.append(("page", page))
if employee_id is not empty:
query_params.append(("employeeId", employee_id))
if payroll_calendar_id is not empty:
query_params.append(("payrollCalendarId", payroll_calendar_id))
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Timesheets")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="Timesheets",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_timesheets")
def get_tracking_categories(
self,
xero_tenant_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves tracking categories # noqa: E501
OAuth2 scope: payroll.settings.read, settings.payslip
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: TrackingCategories
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_tracking_categories`"
)
collection_formats = {}
path_params = {}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Settings/TrackingCategories")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="TrackingCategories",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_tracking_categories")
def revert_timesheet(
self,
xero_tenant_id,
timesheet_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Reverts a timesheet to draft # noqa: E501
OAuth2 scope: payroll.timesheets
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str timesheet_id: Identifier for the timesheet (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: TimesheetObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `revert_timesheet`"
)
# verify the required parameter 'timesheet_id' is set
if timesheet_id is None:
raise ValueError(
"Missing the required parameter `timesheet_id` "
"when calling `revert_timesheet`"
)
collection_formats = {}
path_params = {
"TimesheetID": timesheet_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Timesheets/{TimesheetID}/RevertToDraft")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="TimesheetObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "revert_timesheet")
def update_employee(
self,
xero_tenant_id,
employee_id,
employee,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Updates an existing employee # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param Employee employee: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `update_employee`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `update_employee`"
)
# verify the required parameter 'employee' is set
if employee is None:
raise ValueError(
"Missing the required parameter `employee` "
"when calling `update_employee`"
)
collection_formats = {}
path_params = {
"EmployeeID": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = employee
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeID}")
try:
return self.api_client.call_api(
url,
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "update_employee")
def update_employee_earnings_template(
self,
xero_tenant_id,
employee_id,
pay_template_earning_id,
earnings_template,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Updates an earnings template records for an employee # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param str pay_template_earning_id: Id for single pay template earnings object (required)
:param EarningsTemplate earnings_template: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EarningsTemplateObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `update_employee_earnings_template`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `update_employee_earnings_template`"
)
# verify the required parameter 'pay_template_earning_id' is set
if pay_template_earning_id is None:
raise ValueError(
"Missing the required parameter `pay_template_earning_id` "
"when calling `update_employee_earnings_template`"
)
# verify the required parameter 'earnings_template' is set
if earnings_template is None:
raise ValueError(
"Missing the required parameter `earnings_template` "
"when calling `update_employee_earnings_template`"
)
collection_formats = {}
path_params = {
"EmployeeID": employee_id,
"PayTemplateEarningID": pay_template_earning_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = earnings_template
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url(
"/Employees/{EmployeeID}/PayTemplates/earnings/{PayTemplateEarningID}"
)
try:
return self.api_client.call_api(
url,
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EarningsTemplateObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "update_employee_earnings_template"
)
def update_employee_leave(
self,
xero_tenant_id,
employee_id,
leave_id,
employee_leave,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Updates leave records for a specific employee # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param str leave_id: Leave id for single object (required)
:param EmployeeLeave employee_leave: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeLeaveObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `update_employee_leave`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `update_employee_leave`"
)
# verify the required parameter 'leave_id' is set
if leave_id is None:
raise ValueError(
"Missing the required parameter `leave_id` "
"when calling `update_employee_leave`"
)
# verify the required parameter 'employee_leave' is set
if employee_leave is None:
raise ValueError(
"Missing the required parameter `employee_leave` "
"when calling `update_employee_leave`"
)
collection_formats = {}
path_params = {
"EmployeeID": employee_id,
"LeaveID": leave_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = employee_leave
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeID}/Leave/{LeaveID}")
try:
return self.api_client.call_api(
url,
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeLeaveObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "update_employee_leave")
def update_employee_salary_and_wage(
self,
xero_tenant_id,
employee_id,
salary_and_wages_id,
salary_and_wage,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Updates an employee's salary and wages record # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param str salary_and_wages_id: Id for single pay template earnings object (required)
:param SalaryAndWage salary_and_wage: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: SalaryAndWageObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `update_employee_salary_and_wage`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `update_employee_salary_and_wage`"
)
# verify the required parameter 'salary_and_wages_id' is set
if salary_and_wages_id is None:
raise ValueError(
"Missing the required parameter `salary_and_wages_id` "
"when calling `update_employee_salary_and_wage`"
)
# verify the required parameter 'salary_and_wage' is set
if salary_and_wage is None:
raise ValueError(
"Missing the required parameter `salary_and_wage` "
"when calling `update_employee_salary_and_wage`"
)
collection_formats = {}
path_params = {
"EmployeeID": employee_id,
"SalaryAndWagesID": salary_and_wages_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = salary_and_wage
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url(
"/Employees/{EmployeeID}/SalaryAndWages/{SalaryAndWagesID}"
)
try:
return self.api_client.call_api(
url,
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="SalaryAndWageObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "update_employee_salary_and_wage"
)
def update_employee_tax(
self,
xero_tenant_id,
employee_id,
employee_tax,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Updates the tax records for a specific employee # noqa: E501
OAuth2 scope: payroll.employees.read, payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str employee_id: Employee id for single object (required)
:param EmployeeTax employee_tax: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: EmployeeTaxObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `update_employee_tax`"
)
# verify the required parameter 'employee_id' is set
if employee_id is None:
raise ValueError(
"Missing the required parameter `employee_id` "
"when calling `update_employee_tax`"
)
# verify the required parameter 'employee_tax' is set
if employee_tax is None:
raise ValueError(
"Missing the required parameter `employee_tax` "
"when calling `update_employee_tax`"
)
collection_formats = {}
path_params = {
"EmployeeID": employee_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = employee_tax
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees/{EmployeeID}/Tax")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="EmployeeTaxObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "update_employee_tax")
def update_pay_run(
self,
xero_tenant_id,
pay_run_id,
pay_run,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Updates a pay run # noqa: E501
OAuth2 scope: payroll.payruns
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str pay_run_id: Identifier for the pay run (required)
:param PayRun pay_run: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: PayRunObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `update_pay_run`"
)
# verify the required parameter 'pay_run_id' is set
if pay_run_id is None:
raise ValueError(
"Missing the required parameter `pay_run_id` "
"when calling `update_pay_run`"
)
# verify the required parameter 'pay_run' is set
if pay_run is None:
raise ValueError(
"Missing the required parameter `pay_run` "
"when calling `update_pay_run`"
)
collection_formats = {}
path_params = {
"PayRunID": pay_run_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = pay_run
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/PayRuns/{PayRunID}")
try:
return self.api_client.call_api(
url,
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PayRunObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "update_pay_run")
def update_pay_slip_line_items(
self,
xero_tenant_id,
pay_slip_id,
pay_slip,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Creates an employee pay slip # noqa: E501
OAuth2 scope: payroll.employees
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str pay_slip_id: Identifier for the payslip (required)
:param PaySlip pay_slip: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: PaySlipObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `update_pay_slip_line_items`"
)
# verify the required parameter 'pay_slip_id' is set
if pay_slip_id is None:
raise ValueError(
"Missing the required parameter `pay_slip_id` "
"when calling `update_pay_slip_line_items`"
)
# verify the required parameter 'pay_slip' is set
if pay_slip is None:
raise ValueError(
"Missing the required parameter `pay_slip` "
"when calling `update_pay_slip_line_items`"
)
collection_formats = {}
path_params = {
"PaySlipID": pay_slip_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = pay_slip
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/PaySlips/{PaySlipID}")
try:
return self.api_client.call_api(
url,
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PaySlipObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "update_pay_slip_line_items")
def update_timesheet_line(
self,
xero_tenant_id,
timesheet_id,
timesheet_line_id,
timesheet_line,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Updates a timesheet line for a specific timesheet # noqa: E501
OAuth2 scope: payroll.timesheets
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str timesheet_id: Identifier for the timesheet (required)
:param str timesheet_line_id: Identifier for the timesheet line (required)
:param TimesheetLine timesheet_line: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: TimesheetLineObject
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `update_timesheet_line`"
)
# verify the required parameter 'timesheet_id' is set
if timesheet_id is None:
raise ValueError(
"Missing the required parameter `timesheet_id` "
"when calling `update_timesheet_line`"
)
# verify the required parameter 'timesheet_line_id' is set
if timesheet_line_id is None:
raise ValueError(
"Missing the required parameter `timesheet_line_id` "
"when calling `update_timesheet_line`"
)
# verify the required parameter 'timesheet_line' is set
if timesheet_line is None:
raise ValueError(
"Missing the required parameter `timesheet_line` "
"when calling `update_timesheet_line`"
)
collection_formats = {}
path_params = {
"TimesheetID": timesheet_id,
"TimesheetLineID": timesheet_line_id,
}
query_params = []
header_params = {
"Xero-Tenant-Id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = timesheet_line
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Timesheets/{TimesheetID}/Lines/{TimesheetLineID}")
try:
return self.api_client.call_api(
url,
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="TimesheetLineObject",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "update_timesheet_line")
| 35.38015
| 248
| 0.594814
|
44d6d325a716310d1708dea98a9467670d05773d
| 6,649
|
py
|
Python
|
Simulation_pypuf/bb84_xorpuf4_adaptive.py
|
mayaobobby/hybridpuf_simulation
|
2497f4afd62e635fc607af7aa8436b3a387c2d44
|
[
"Apache-2.0"
] | null | null | null |
Simulation_pypuf/bb84_xorpuf4_adaptive.py
|
mayaobobby/hybridpuf_simulation
|
2497f4afd62e635fc607af7aa8436b3a387c2d44
|
[
"Apache-2.0"
] | null | null | null |
Simulation_pypuf/bb84_xorpuf4_adaptive.py
|
mayaobobby/hybridpuf_simulation
|
2497f4afd62e635fc607af7aa8436b3a387c2d44
|
[
"Apache-2.0"
] | null | null | null |
import netsquid as ns
import numpy as np
import os, sys
import matplotlib.pyplot as plt
from pathlib import Path
import pypuf.simulation, pypuf.io
from challenge_test import arbitrary_challenges
from apuf_attack import *
seed_puf_instances = []
seed_challenge_instances = []
'''
Description: The number of CRPs per step.
'''
def crp_apuf(n, steps=20):
crps = np.array([])
N = 1000
step = 0
if n == 64:
step = 10e3
elif n == 128:
step = 40e3
for i in range(steps):
crps = np.append(crps, N)
N += step
return crps
'''
Parameter of CPUF:
n: input size
m: output size
N: N-sample of CRPs
k: For XORPUF (k-APUF in parallel)
noisiness_cpuf: CPUF device noise
'''
def CPUF_param(samples_instance, qubit_size):
n = 128
m = qubit_size*2
N_samples = samples_instance
k = 4
noisiness_cpuf = 0
return n, m, N_samples, k, noisiness_cpuf
'''
CPUF instance (n-bit challenge, m-bit response)
'''
def CPUF_gen(n, m, k, noisiness_cpuf):
global seed_puf_instances
seed_puf_instances, puf = [], []
for i in range(m):
seed_puf_instances.append(int.from_bytes(os.urandom(4), "big"))
puf.append(pypuf.simulation.XORArbiterPUF(n=n, noisiness=noisiness_cpuf, seed=seed_puf_instances[i], k=k))
return puf
'''
CRP instances(n-bit challenge, m-bit response)
'''
def CRP_gen_one(n, m, N_samples, puf):
seed_challenges = int.from_bytes(os.urandom(4), "big")
challenges_instance = pypuf.io.random_inputs(n, 1, seed_challenges)
challenges = np.zeros((N_samples, n))
responses = np.zeros((N_samples, m))
for i in range(m):
crps_instances = arbitrary_challenges.random_challenges_crps(puf[i], n, 1, challenges_instance)
responses[0][i] = crps_instances.responses
for j in range(N_samples):
challenges[j,:] = challenges_instance
responses[j,:] = responses[0,:]
global seed_challenge_instances
seed_challenge_instances.append(seed_challenges)
# pp: Post-processing
challenges_pp = (1 - challenges) // 2
responses_pp = (1 - responses) // 2
return challenges_pp, responses_pp
'''
Program to measure a qubit with a basis by optimal adversary (Aadpative)
'''
def OptimalMeasurement_adptive(bases_eve, bases_reference, states_reference):
states_eve = np.zeros(len(states_reference))
for i in range(len(bases_reference)):
if bases_eve[i] == bases_reference[i]:
states_eve[i] = states_reference[i]
else:
states_eve[i] = np.random.randint(2, size=1)
return states_eve
def run_experiment_adptive_attack(n, m, N_samples, k, noisiness_cpuf, qubit_size):
puf = CPUF_gen(n, m, k, noisiness_cpuf)
challenges_pp, responses_pp = CRP_gen_one(n, m, N_samples, puf)
runs = N_samples
bases_correct = []
counter_bases_match = 0
incorrect_basis = []
bases_eve = np.random.randint(2, size=qubit_size)
states_eve_record = np.zeros((runs, qubit_size))
for i in range(runs):
for j in range(qubit_size):
bases_reference = responses_pp[0][1::2]
states_reference = responses_pp[0][0::2]
states_eve = OptimalMeasurement_adptive(bases_eve, bases_reference, states_reference)
states_eve_record[i][:] = states_eve
if i >= 1:
if j in bases_correct:
pass
elif states_eve_record[i][j] != states_eve_record[i-1][j]:
bases_eve[j] = np.abs(1-bases_eve[j])
bases_correct.append(j)
if i >= 1:
if (bases_eve == bases_reference).all():
counter_bases_match = i+1
incorrect_basis[:qubit_size] = [0]*qubit_size
break
if i >= 4:
for k in range(qubit_size):
if bases_eve[k] != bases_reference[k]:
incorrect_basis.append(1)
else:
incorrect_basis.append(0)
break
return counter_bases_match, incorrect_basis
def run_experiment_adptive_attack_average(n, m, N_samples, k, noisiness_cpuf, repeat_time=1000, qubit_size=1):
counter_queries = []
iteration = 1000
correct_bases = [1000]*qubit_size
incorrect_basis = []
for i in range(iteration):
counter_bases_match = 0
counter_bases_match, incorrect_basis = run_experiment_adptive_attack(n, m, N_samples, k, noisiness_cpuf, qubit_size)
counter_queries.append(counter_bases_match)
correct_bases = [x1 - x2 for (x1, x2) in zip(correct_bases, incorrect_basis)]
query_adaptive = sum(counter_queries)/len(counter_queries)
accuracy = sum([x / iteration for x in correct_bases])/qubit_size
print("Number of adpative queries:", query_adaptive)
print("Correct rate:", accuracy)
return query_adaptive, accuracy
'''
Description: Emulation the underlying CPUF of HPUF with BB84 encoding
'''
def bb84_xorpuf4(puf_bit, puf_basis, position, k, steps, success_prob, query_adaptive):
# Times of repeat experiment (for each number of CRPs)
repeat_experiment = 20
###################################################################
# Obtain simulation result of HPUF under logistic regression attack
###################################################################
crps = crp_apuf(n, steps)
if position == 'basis':
accuracy_hpuf = instance_one_hybrid_apuf_attack_n(success_prob, puf_bit, puf_basis, crps, position, repeat_experiment, steps)
np.save('./data/xorpuf4/'+str(n)+'n_xorpuf4_adaptive_crps.npy', crps*query_adaptive)
np.save('./data/xorpuf4/'+str(n)+'h_xorpuf4_adaptive_'+position+'_a.npy', accuracy_hpuf)
return crps, accuracy_hpuf
if __name__ == '__main__':
'''
Simulation of HPUF with BB84 encoding and an underlying of 4XORPUF against adaptive adversaries
Variables:
n: length of challenge, it defaults to 64 bits
noisiness: noisiness, it effects the reliability(robustness) of CPUF instance
k: k value (CPUF construction per bit of response)
Steps:
1. Emulate the accuracy of obatined CRPs with adaptive queries.
2. Emulate the input-output behavior of CPUF that encodes basis value with the number of required CRPs.
3. Plot the result (Also find the plot.py script in data folder if running separately).
'''
# Enable GPU/CPU (optional)
# os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
Path("./data/xorpuf4").mkdir(parents=True, exist_ok=True)
qubit_size = 1
samples=100
n, m, N_samples, k, noisiness_cpuf = CPUF_param(samples, qubit_size)
# Times of iterations (with increment of CRPs)
steps = 20
###########################
# Create instances of CPUFs
###########################
puf_instances = CPUF_gen(n, m, k, noisiness_cpuf)
puf_bit = puf_instances[0]
puf_basis = puf_instances[1]
query_adaptive, accuracy = run_experiment_adptive_attack_average(n, m, N_samples, k, noisiness_cpuf, qubit_size)
success_prob = accuracy
crps, accuracy_hpuf = bb84_xorpuf4(puf_bit, puf_basis, 'basis', k, steps, success_prob, query_adaptive)
print(query_adaptive)
| 28.054852
| 127
| 0.716348
|
ec3a53d2f8c4a78203e68823d35608c43901876b
| 574
|
py
|
Python
|
block/BlockGrassBlock.py
|
uuk0/mcpython-4
|
1ece49257b3067027cc43b452a2fc44908d3514c
|
[
"MIT"
] | 2
|
2019-08-21T08:23:45.000Z
|
2019-09-25T13:20:28.000Z
|
block/BlockGrassBlock.py
|
uuk0/mcpython-4
|
1ece49257b3067027cc43b452a2fc44908d3514c
|
[
"MIT"
] | 11
|
2019-08-21T08:46:01.000Z
|
2021-09-08T01:18:04.000Z
|
block/BlockGrassBlock.py
|
uuk0/mcpython-4
|
1ece49257b3067027cc43b452a2fc44908d3514c
|
[
"MIT"
] | 5
|
2019-08-30T08:19:57.000Z
|
2019-10-26T03:31:16.000Z
|
"""mcpython - a minecraft clone written in python licenced under MIT-licence
authors: uuk, xkcdjerry
original game by forgleman licenced under MIT-licence
minecraft by Mojang
blocks based on 1.14.4.jar of minecraft, downloaded on 20th of July, 2019"""
import globals as G
from . import Block
@G.registry
class BlockGrassBlock(Block.Block):
"""
base class for grass
todo: add -> dirt convert
"""
@staticmethod
def get_name() -> str:
return "minecraft:grass_block"
def get_model_state(self) -> dict:
return {"snowy": "false"}
| 22.96
| 76
| 0.695122
|
ccf217f069fa7ac180dfef39b4e4bd705acb1163
| 743
|
py
|
Python
|
tinymce/urls.py
|
yychen/django-tinymce
|
8aca4af6d6be06e68c2fce2779679875b11c302e
|
[
"MIT"
] | 1
|
2019-04-15T10:28:49.000Z
|
2019-04-15T10:28:49.000Z
|
tinymce/urls.py
|
vstoykov/django-tinymce
|
1df8fe70f081419e7e79ed8c9f8d1a2644c814df
|
[
"MIT"
] | 9
|
2019-12-05T20:37:07.000Z
|
2022-02-10T12:34:48.000Z
|
tinymce/urls.py
|
vstoykov/django-tinymce
|
1df8fe70f081419e7e79ed8c9f8d1a2644c814df
|
[
"MIT"
] | 1
|
2022-03-27T05:19:58.000Z
|
2022-03-27T05:19:58.000Z
|
# Copyright (c) 2008 Joost Cassee
# Licensed under the terms of the MIT License (see LICENSE.txt)
try:
from django.conf.urls import url, patterns
except:
from django.conf.urls.defaults import url, patterns
urlpatterns = patterns('tinymce.views',
url(r'^js/textareas/(?P<name>.+)/$', 'textareas_js', name='tinymce-js'),
url(r'^js/textareas/(?P<name>.+)/(?P<lang>.*)$', 'textareas_js', name='tinymce-js-lang'),
url(r'^spellchecker/$', 'spell_check'),
url(r'^flatpages_link_list/$', 'flatpages_link_list'),
url(r'^compressor/$', 'compressor', name='tinymce-compressor'),
url(r'^filebrowser/$', 'filebrowser', name='tinymce-filebrowser'),
url(r'^preview/(?P<name>.+)/$', 'preview', name='tinymce-preview'),
)
| 41.277778
| 93
| 0.664872
|
99a71515f39edd2452617c4e4f775f04e23693df
| 1,322
|
py
|
Python
|
arcade/gl/__init__.py
|
thecodinghyrax/arcade
|
3d38ab42ce06e737f48240223484aa7706ffa11c
|
[
"MIT"
] | null | null | null |
arcade/gl/__init__.py
|
thecodinghyrax/arcade
|
3d38ab42ce06e737f48240223484aa7706ffa11c
|
[
"MIT"
] | null | null | null |
arcade/gl/__init__.py
|
thecodinghyrax/arcade
|
3d38ab42ce06e737f48240223484aa7706ffa11c
|
[
"MIT"
] | null | null | null |
"""
**Fair warning: This module contains the low level rendering API for arcade
and is only recommended for more advanced users**
This modules contains a wrapper over OpenGL 3.3 core making OpenGL
more reasonable to work with and easier to learn.
The API is based on `ModernGL <https://github.com/moderngl/moderngl>`_ implementing
a subset of the features.
We use pyglet's OpenGL bindings based on ctypes.
Creating OpenGL resources such as buffers, framebuffers programs and textures
should be done through methods in a context.
* Arcade users should access :py:attr:`arcade.Window.ctx` exposing an :py:class:`arcade.ArcadeContext`
* Pyglet users can instantiate an :py:class:`arcade.gl.Context` for the window or
extend this class with more features if needed.
"""
from .context import Context
from .types import BufferDescription
from .exceptions import ShaderException
from .enums import *
from .buffer import Buffer
from .vertex_array import Geometry, VertexArray
from .texture import Texture
from .framebuffer import Framebuffer
from .program import Program
from .query import Query
from . import geometry
__all__ = [
'Buffer',
'BufferDescription',
'Context',
'Framebuffer',
'Geometry',
'Program',
'Query',
'ShaderException',
'VertexArray',
'Texture',
'geometry',
]
| 30.045455
| 102
| 0.757943
|
5b3b46045c3dec29d887fb8daf89d1c0a4cc34dc
| 9,199
|
py
|
Python
|
respa_o365/calendar_sync.py
|
codepointtku/respa
|
bb9cd8459d5562569f976dbc609ec41ceecc8023
|
[
"MIT"
] | null | null | null |
respa_o365/calendar_sync.py
|
codepointtku/respa
|
bb9cd8459d5562569f976dbc609ec41ceecc8023
|
[
"MIT"
] | 38
|
2020-01-24T11:30:53.000Z
|
2022-01-28T12:42:47.000Z
|
respa_o365/calendar_sync.py
|
codepointtku/respa
|
bb9cd8459d5562569f976dbc609ec41ceecc8023
|
[
"MIT"
] | 14
|
2020-02-26T08:17:34.000Z
|
2021-09-14T07:57:21.000Z
|
import logging
import json
from django.db import transaction, DatabaseError
from respa_o365.respa_availabilility_repository import RespaAvailabilityRepository
from respa_o365.o365_availability_repository import O365AvailabilityRepository
import string
import random
from django.conf import settings
from django.utils.dateparse import parse_datetime
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.permissions import BasePermission, IsAuthenticated
from requests_oauthlib import OAuth2Session
from urllib.parse import urlparse, parse_qs
from resources.models import Resource, Reservation
from .id_mapper import IdMapper
from .models import OutlookCalendarLink, OutlookCalendarReservation, OutlookCalendarAvailability, OutlookSyncQueue
from .o365_calendar import O365Calendar, MicrosoftApi
from .o365_notifications import O365Notifications
from .o365_reservation_repository import O365ReservationRepository
from .reservation_sync import ReservationSync
from .respa_reservation_repository import RespaReservations
from respa_o365.sync_operations import reservationSyncActions, availabilitySyncActions
logger = logging.getLogger(__name__)
class CanSyncCalendars(BasePermission):
def has_object_permission(self, request, view, obj):
if isinstance(obj, Resource):
return obj.unit.is_manager(request.user)
return False
def add_to_queue(link):
OutlookSyncQueue.objects.create(calendar_link=link)
def process_queue():
try:
queue = OutlookSyncQueue.objects.all().order_by('calendar_link_id')
if not queue:
logger.info("Nothing to sync.")
return
logger.info("Handling {} entries from sync queue.".format(queue.count()))
previous_id = None
for item in queue:
with transaction.atomic():
link_id = item.calendar_link_id
same_than_previous = link_id == previous_id
previous_id = link_id
if not same_than_previous:
link = OutlookCalendarLink.objects.get(pk=link_id)
perform_sync_to_exchange(link, lambda sync: sync.sync_all())
item.delete()
except DatabaseError as e:
logger.warning("Outlook synchronisation failed due database error.", exc_info=e)
return
def perform_sync_to_exchange(link, func):
# Sync reservations
logger.info("Syncing reservations. User=%s, resource=%s (%s), link=%s", link.user.id, link.resource.name, link.resource.id, link.id)
_perform_sync(link=link, func=func, respa_memento_field='respa_reservation_sync_memento',
o365_memento_field='exchange_reservation_sync_memento', outlook_model=OutlookCalendarReservation,
outlook_model_event_id_property='reservation_id', respa_repo=RespaReservations, o365_repo=O365ReservationRepository,
event_prefix=settings.O365_CALENDAR_RESERVATION_EVENT_PREFIX, sync_actions=reservationSyncActions)
# Sync availability / periods
logger.info("Syncing availability. User=%s, resource=%s (%s), link=%s", link.user.id, link.resource.name, link.resource.id, link.id)
_perform_sync(link=link, func=func, respa_memento_field='respa_availability_sync_memento',
o365_memento_field='exchange_availability_sync_memento', outlook_model=OutlookCalendarAvailability,
outlook_model_event_id_property='period_id', respa_repo=RespaAvailabilityRepository, o365_repo=O365AvailabilityRepository,
event_prefix=settings.O365_CALENDAR_AVAILABILITY_EVENT_PREFIX, sync_actions=availabilitySyncActions)
def _perform_sync(link, func, respa_memento_field, o365_memento_field, outlook_model, outlook_model_event_id_property,
event_prefix, sync_actions, o365_repo, respa_repo):
token = link.token
respa_memento = getattr(link, respa_memento_field)
o365_memento = getattr(link, o365_memento_field)
id_mappings = {}
reservation_item_data = {}
known_exchange_items = set()
respa_change_keys = {}
exchange_change_keys = {}
for res in outlook_model.objects.filter(calendar_link=link):
event_id = getattr(res, outlook_model_event_id_property)
id_mappings[event_id] = res.exchange_id
reservation_item_data[event_id] = res
known_exchange_items.add(res.exchange_id)
respa_change_keys[event_id] = res.respa_change_key
exchange_change_keys[res.exchange_id] = res.exchange_change_key
# Initialise components
mapper = IdMapper(id_mappings)
api = MicrosoftApi(token)
cal = O365Calendar(microsoft_api=api, known_events=known_exchange_items, event_prefix=event_prefix)
o365 = o365_repo(cal)
respa = respa_repo(resource_id=link.resource.id)
sync = ReservationSync(respa, o365, id_mapper=mapper, respa_memento=respa_memento, remote_memento=o365_memento,
respa_change_keys=respa_change_keys, remote_change_keys=exchange_change_keys, sync_actions=sync_actions)
# Perform synchronisation
func(sync)
# Store data back to database
current_exchange_change_keys = sync.remote_change_keys()
current_respa_change_keys = sync.respa_change_keys()
for respa_id, exchange_id in mapper.changes():
ri = reservation_item_data[respa_id]
ri.exchange_id = exchange_id
ri.exchange_change_key = current_exchange_change_keys.pop(exchange_id, ri.exchange_change_key)
ri.respa_change_key = current_respa_change_keys.pop(respa_id, ri.respa_change_key)
ri.save()
for respa_id, exchange_id in mapper.removals():
reservation_item_data[respa_id].delete()
for respa_id, exchange_id in mapper.additions():
exchange_change_key = current_exchange_change_keys.pop(exchange_id, "")
respa_change_key = current_respa_change_keys.pop(respa_id, "")
# Temporary debug code
if outlook_model == OutlookCalendarReservation:
logger.info("Saving new O365 reservation info...")
existing = outlook_model.objects.filter(exchange_id=exchange_id).first()
if existing:
logger.info("O365 reservation already exists with exchange_id={}. Existing link={}, resource={}, reservation_id={}, respa_change_key={}, exchange_change_key={}".format(exchange_id, existing.link, existing.link.resource_id, existing.reservation_id, existing.respa_change_key, existing.exchange_change_key))
logger.info("Overwriting with link={}, resource={}, reservation_id={}, respa_change_key={}, exchange_change_key={}".format(link, link.resource_id, respa_id, respa_change_key, exchange_change_key))
existing.delete()
kwargs = {
outlook_model_event_id_property: respa_id,
}
outlook_model.objects.create(
calendar_link=link,
exchange_id=exchange_id,
respa_change_key=respa_change_key,
exchange_change_key=exchange_change_key,
**kwargs)
for exchange_id, current_exchange_change_key in current_exchange_change_keys.items():
old_exchange_change_key = exchange_change_keys.get(exchange_id, "")
if current_exchange_change_key != old_exchange_change_key:
respa_id = mapper.reverse.get(exchange_id)
ri = reservation_item_data.get(respa_id, None)
if ri:
ri.exchange_change_key = current_exchange_change_key
ri.respa_change_key = current_respa_change_keys.pop(respa_id, ri.respa_change_key)
ri.save()
for respa_id, current_respa_change_key in current_respa_change_keys.items():
old_respa_change_key = respa_change_keys.get(respa_id, "")
if current_respa_change_key != old_respa_change_key:
exchange_id = mapper.get(respa_id)
ri = reservation_item_data.get(respa_id, None)
if ri:
ri.respa_change_key = current_respa_change_key
ri.exchange_change_key = current_exchange_change_keys.pop(exchange_id, ri.exchange_change_key)
ri.save()
setattr(link, o365_memento_field, sync.remote_memento())
setattr(link, respa_memento_field, sync.respa_memento())
link.token = api.current_token()
link.save()
def ensure_notification(link):
url = getattr(settings, "O365_NOTIFICATION_URL", None)
if not url:
return
api = MicrosoftApi(link.token)
subscriptions = O365Notifications(api)
random_secret = ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(10))
sub_id, created = subscriptions.ensureNotifications(notification_url=url,
resource="/me/events",
events=["updated", "deleted", "created"],
client_state=random_secret,
subscription_id=link.exchange_subscription_id
)
if created:
link.exchange_subscription_id = sub_id
link.exchange_subscription_secret = random_secret
link.save()
| 52.267045
| 321
| 0.718448
|
57e145891d2a045185dc3153905933cfc77d6d38
| 538
|
py
|
Python
|
manage.py
|
nibinn/Rnd
|
e306b3da3af5e9da66d11436bc7abf3a77f50573
|
[
"MIT"
] | null | null | null |
manage.py
|
nibinn/Rnd
|
e306b3da3af5e9da66d11436bc7abf3a77f50573
|
[
"MIT"
] | null | null | null |
manage.py
|
nibinn/Rnd
|
e306b3da3af5e9da66d11436bc7abf3a77f50573
|
[
"MIT"
] | null | null | null |
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.888889
| 73
| 0.674721
|
3012c72d3cba79e4781a2da3a0883b10b7220092
| 3,452
|
py
|
Python
|
app/app/settings.py
|
daveholly57/recipe-app-api
|
41bef8bf7baf1a8cf17f1be4cf930e4e4f231569
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
daveholly57/recipe-app-api
|
41bef8bf7baf1a8cf17f1be4cf930e4e4f231569
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
daveholly57/recipe-app-api
|
41bef8bf7baf1a8cf17f1be4cf930e4e4f231569
|
[
"MIT"
] | null | null | null |
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'k5+r8_12rdrkyoq)7y09xacj$mt$h9859zdv02y%yd_wuvm#(3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
'recipe',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = '/vol/web/media'
STATIC_ROOT = '/vol/web/static'
# core is the app, and User is the model
AUTH_USER_MODEL = 'core.User'
| 25.57037
| 91
| 0.687717
|
16b12944a81906e7889598a6abc4eea87a1cd167
| 639
|
py
|
Python
|
code/exampleStrats/aaratio.py
|
yoyoyonono/PrisonersDilemmaTournament
|
3b960e5549908ab0cc62c856427ccb7c867ab0a3
|
[
"MIT"
] | null | null | null |
code/exampleStrats/aaratio.py
|
yoyoyonono/PrisonersDilemmaTournament
|
3b960e5549908ab0cc62c856427ccb7c867ab0a3
|
[
"MIT"
] | null | null | null |
code/exampleStrats/aaratio.py
|
yoyoyonono/PrisonersDilemmaTournament
|
3b960e5549908ab0cc62c856427ccb7c867ab0a3
|
[
"MIT"
] | null | null | null |
from typing import Any, Dict, Literal, Tuple, Union
import random
import numpy as np
def strategy(history: np.ndarray, memory: Union[Dict[str, Any], None]) -> Tuple[int, Union[Dict[str, Any], None]]:
#First Round
if history.shape[1] == 0:
memory = {}
memory['next_prediction'] = 1
return 1, memory
else:
if (len(history[1]) - np.count_nonzero(history[1]))/len(history[1]) > 0.1:
memory['next_prediction'] = 0
else:
memory['next_prediction'] = 1
if memory['next_prediction'] == 0:
return 0, memory
else:
return 1, memory
| 29.045455
| 114
| 0.5759
|
c38d017f6de121cc57d2bc60400334730975b902
| 4,531
|
py
|
Python
|
melp/libs/helices.py
|
maximilianKoeper/melp
|
863d1c55a36adf29f3508e15ecd5ed0a77544f53
|
[
"MIT"
] | 1
|
2021-12-07T10:00:23.000Z
|
2021-12-07T10:00:23.000Z
|
melp/libs/helices.py
|
maximilianKoeper/melp
|
863d1c55a36adf29f3508e15ecd5ed0a77544f53
|
[
"MIT"
] | null | null | null |
melp/libs/helices.py
|
maximilianKoeper/melp
|
863d1c55a36adf29f3508e15ecd5ed0a77544f53
|
[
"MIT"
] | 1
|
2021-11-15T13:41:06.000Z
|
2021-11-15T13:41:06.000Z
|
import numpy as np
from scipy.optimize import minimize, brute
from melp.libs import mathfunctions as mf
# DEBUG: Delete after testing is finished
# import matplotlib.pyplot as plt
class Helices:
def __init__(self, vx, vy, vz, px, py, pz, htype, tile_pos):
self.bfield = -1
self.z0 = vz
self.type = htype
self.tile_pos = tile_pos
pt = np.hypot(px, py)
self.r = pt / (0.3 * self.bfield)
self.theta = np.arctan2(pz, pt)
self.phi = np.arctan2(py, px)
if self.type == 1:
self.phi += np.pi / 2
elif self.type == 2:
self.phi -= np.pi / 2
else:
raise ValueError('Helices: init: type not supported')
self.dz = 2 * np.pi * self.r * np.tan(self.theta)
self.xc = self.r * np.cos(self.phi) + vx
self.yc = self.r * np.sin(self.phi) + vy
# self.xy_vec = 0.
# self.test1 = 0.
#####################
# private functions #
#####################
def __Helix(self, alpha):
xyz = np.zeros(3)
xyz[0] = self.xc - self.r * np.cos(alpha)
xyz[1] = self.yc - self.r * np.sin(alpha)
xyz[2] = self.z0 + self.dz * ((alpha - self.phi) / (2 * np.pi))
return xyz
# ------------------------------------
def __Minimize_Func_Angle(self, alpha):
xyz_circ = self.__Helix(alpha)
xyz_tile = np.zeros(3)
xyz_tile[0] = self.tile_pos[0]
xyz_tile[1] = self.tile_pos[1]
xyz_tile[2] = self.tile_pos[2]
# distance = mf.distance_between_2d(xyz_tile[0:2], xyz_circ[0:2])
distance = mf.distance_between_3d(xyz_tile, xyz_circ)
return distance
# ------------------------------------
def __Get_Phi(self):
tmp_min = brute(self.__Minimize_Func_Angle, ranges=((-10 * np.pi, +10 * np.pi),), Ns=100)[0]
tmp_min = minimize(self.__Minimize_Func_Angle, tmp_min).x
return tmp_min
# ------------------------------------
def __Get_Primary_Tile_Hit_Vector(self):
temp_phi = self.__Get_Phi()
v1_tmp = self.__Helix(temp_phi)
if self.type == 2:
offset = - 0.1
else:
offset = + 0.1
v2_tmp = self.__Helix(temp_phi + offset)
xy_hit_vector = np.array(v1_tmp) - np.array(v2_tmp)
return xy_hit_vector
# ------------------------------------
def __Get_Primary_Tile_Hit_Angle(self, tile_norm_vec, angle):
if angle == "phi":
norm_vec = -np.array(tile_norm_vec)[0:2]
temp_vec = mf.angle_between_phi(self.__Get_Primary_Tile_Hit_Vector()[0:2], norm_vec)
return temp_vec
elif angle == "theta":
norm_vec = -np.array([0, 0, 1])
temp_vec = mf.angle_between(self.__Get_Primary_Tile_Hit_Vector(), norm_vec)
return temp_vec
elif angle == "norm":
norm_vec = tile_norm_vec
temp_vec = mf.angle_between(self.__Get_Primary_Tile_Hit_Vector(), norm_vec)
return temp_vec
else:
raise ValueError("angle != [phi/theta/norm]")
#####################
# public functions #
#####################
def hitAngle(self, tile_norm_vec, angle="phi"):
return self.__Get_Primary_Tile_Hit_Angle(tile_norm_vec, angle)
#####################
# TESTING functions #
#####################
"""
def test (self):
#self.test1 = minimize(self.__Minimize_Func_Angle, 0).x
self.test1 = brute(self.__Minimize_Func_Angle, ranges=((-4*np.pi,+4*np.pi),), Ns=100)[0]
self.test1 = minimize(self.__Minimize_Func_Angle, self.test1).x
print(self.test1)
def plottest (self):
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
#startpoint
ax.scatter(self.__Helix(self.phi)[0],self.__Helix(self.phi)[1], self.__Helix(self.phi)[2], color="r")
i=self.phi
while i > -20:
#trajectory
ax.scatter(self.__Helix(i)[0],self.__Helix(i)[1],self.__Helix(i)[2], color="b", alpha = 0.1)
i-=0.1
#tilepos
ax.scatter(self.tile_pos[0],self.tile_pos[1],self.tile_pos[2], color="g")
#hitpos
ax.scatter(self.__Helix(self.test1)[0], self.__Helix(self.test1)[1], self.__Helix(self.test1)[2],color="y")
#ax.scatter(self.__Helix(self.test1+self.phi)[0], self.__Helix(self.test1+self.phi)[1], self.__Helix(self.test1+self.phi)[2],color="y")
plt.show()
"""
| 30.823129
| 143
| 0.543368
|
73bf3fd2ed97af265af2bf92646c82b27c3d79b0
| 21,935
|
py
|
Python
|
test/functional/test_framework/util.py
|
HuntCoinDeveloper/huntcoin
|
99198152d21b58ce598f46783074b64113cc5e64
|
[
"MIT"
] | 2
|
2019-05-13T02:10:08.000Z
|
2019-05-26T14:47:29.000Z
|
test/functional/test_framework/util.py
|
HuntCoinDeveloper/huntcoin
|
99198152d21b58ce598f46783074b64113cc5e64
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/util.py
|
HuntCoinDeveloper/huntcoin
|
99198152d21b58ce598f46783074b64113cc5e64
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import hexlify, unhexlify
from decimal import Decimal, ROUND_DOWN
import hashlib
import json
import logging
import os
import random
import re
from subprocess import CalledProcessError
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round(tx_size * fee_per_kB / 1000, 8)
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring not found:" + e.error['message'])
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
timeout += time.time()
while attempt < attempts and time.time() < timeout:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.05)
# Print the cause of the timeout
assert_greater_than(attempts, attempt)
assert_greater_than(timeout, time.time())
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, rpchost=None):
rpc_u, rpc_p = get_auth_cookie(datadir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node" + str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "huntcoin.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("server=1\n")
f.write("keypool=1\n")
f.write("discover=0\n")
f.write("listenonion=0\n")
return datadir
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "huntcoin.conf")):
with open(os.path.join(datadir, "huntcoin.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
with open(os.path.join(datadir, "regtest", ".cookie"), 'r') as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node" + str(n_node), "regtest", logname)
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
from_connection.disconnectnode(nodeid=peer_id)
for _ in range(50):
if [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == []:
break
time.sleep(0.1)
else:
raise AssertionError("timed out waiting for disconnect")
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
# Use getblockcount() instead of waitforblockheight() to determine the
# initial max height because the two RPCs look at different internal global
# variables (chainActive vs latestBlock) and the former gets updated
# earlier.
maxheight = max(x.getblockcount() for x in rpc_connections)
start_time = cur_time = time.time()
while cur_time <= start_time + timeout:
tips = [r.waitforblockheight(maxheight, int(wait * 1000)) for r in rpc_connections]
if all(t["height"] == maxheight for t in tips):
if all(t["hash"] == tips[0]["hash"] for t in tips):
return
raise AssertionError("Block sync failed, mismatched block hashes:{}".format(
"".join("\n {!r}".format(tip) for tip in tips)))
cur_time = time.time()
raise AssertionError("Block sync to height {} timed out:{}".format(
maxheight, "".join("\n {!r}".format(tip) for tip in tips)))
def sync_chain(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same best block
"""
while timeout > 0:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash == [best_hash[0]] * len(best_hash):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Chain sync failed: Best block hashes don't match")
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match + 1
if num_match == len(rpc_connections):
if flush_scheduler:
for r in rpc_connections:
r.syncwithvalidationinterfacequeue()
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
# Transaction/Block functions
#############################
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >= 0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransactionwithwallet(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value / 2)
outputs[addr2] = satoshi_round(send_value / 2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransactionwithwallet(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{"txid": coinbase, "vout": 0}]
outputs = {to_address: amount}
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransactionwithwallet(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransactionwithwallet(newtx, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
| 38.147826
| 119
| 0.650331
|
c694fcd4b988155371f12972811bd006b7a2dfc1
| 14,548
|
py
|
Python
|
vsphere/datadog_checks/vsphere/metrics.py
|
vbarbaresi/integrations-core
|
ab26ab1cd6c28a97c1ad1177093a93659658c7aa
|
[
"BSD-3-Clause"
] | 663
|
2016-08-23T05:23:45.000Z
|
2022-03-29T00:37:23.000Z
|
vsphere/datadog_checks/vsphere/metrics.py
|
vbarbaresi/integrations-core
|
ab26ab1cd6c28a97c1ad1177093a93659658c7aa
|
[
"BSD-3-Clause"
] | 6,642
|
2016-06-09T16:29:20.000Z
|
2022-03-31T22:24:09.000Z
|
vsphere/datadog_checks/vsphere/metrics.py
|
vbarbaresi/integrations-core
|
ab26ab1cd6c28a97c1ad1177093a93659658c7aa
|
[
"BSD-3-Clause"
] | 1,222
|
2017-01-27T15:51:38.000Z
|
2022-03-31T18:17:51.000Z
|
# (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
from pyVmomi import vim
# https://code.vmware.com/apis/358/vsphere/doc/cpu_counters.html
# Set of metrics that are emitted as percentages between 0 and 100. For those metrics, we divide the value by 100
# to get a float between 0 and 1.
PERCENT_METRICS = {
'cpu.capacity.contention.avg',
'cpu.coreUtilization.avg',
'cpu.corecount.contention.avg',
'cpu.demandEntitlementRatio.latest',
'cpu.latency.avg',
'cpu.readiness.avg',
'cpu.usage.avg',
'cpu.utilization.avg',
'datastore.siocActiveTimePercentage.avg',
'disk.capacity.contention.avg',
'disk.scsiReservationCnflctsPct.avg',
'gpu.mem.usage.avg',
'gpu.utilization.avg',
'mem.capacity.contention.avg',
'mem.latency.avg',
'mem.reservedCapacityPct.avg',
'mem.usage.avg',
'mem.vmfs.pbc.capMissRatio.latest',
'power.capacity.usagePct.avg',
'rescpu.actav1.latest',
'rescpu.actav15.latest',
'rescpu.actav5.latest',
'rescpu.actpk1.latest',
'rescpu.actpk15.latest',
'rescpu.actpk5.latest',
'rescpu.maxLimited1.latest',
'rescpu.maxLimited15.latest',
'rescpu.maxLimited5.latest',
'rescpu.runav1.latest',
'rescpu.runav15.latest',
'rescpu.runav5.latest',
'rescpu.runpk1.latest',
'rescpu.runpk15.latest',
'rescpu.runpk5.latest',
'storageAdapter.OIOsPct.avg',
'sys.diskUsage.latest',
'sys.resourceCpuAct1.latest',
'sys.resourceCpuAct5.latest',
'sys.resourceCpuMaxLimited1.latest',
'sys.resourceCpuMaxLimited5.latest',
'sys.resourceCpuRun1.latest',
'sys.resourceCpuRun5.latest',
'vcResources.priviledgedcpuusage.avg',
'vcResources.processcpuusage.avg',
'vcResources.systemcpuusage.avg',
'vcResources.systemnetusage.avg',
'vcResources.usercpuusage.avg',
'vsanDomObj.readCacheHitRate.latest',
}
# All metrics that can be collected from VirtualMachines.
VM_METRICS = {
'cpu.costop.sum',
'cpu.demand.avg',
'cpu.demandEntitlementRatio.latest',
'cpu.entitlement.latest',
'cpu.idle.sum',
'cpu.latency.avg',
'cpu.maxlimited.sum',
'cpu.overlap.sum',
'cpu.readiness.avg',
'cpu.ready.sum',
'cpu.run.sum',
'cpu.swapwait.sum',
'cpu.system.sum',
'cpu.usage.avg',
'cpu.usagemhz.avg',
'cpu.used.sum',
'cpu.wait.sum',
'datastore.maxTotalLatency.latest',
'datastore.numberReadAveraged.avg',
'datastore.numberWriteAveraged.avg',
'datastore.read.avg',
'datastore.totalReadLatency.avg',
'datastore.totalWriteLatency.avg',
'datastore.write.avg',
'disk.busResets.sum',
'disk.commands.sum',
'disk.commandsAborted.sum',
'disk.commandsAveraged.avg',
'disk.maxTotalLatency.latest',
'disk.numberRead.sum',
'disk.numberReadAveraged.avg',
'disk.numberWrite.sum',
'disk.numberWriteAveraged.avg',
'disk.read.avg',
'disk.usage.avg',
'disk.write.avg',
'hbr.hbrNetRx.avg',
'hbr.hbrNetTx.avg',
'mem.active.avg',
'mem.activewrite.avg',
'mem.compressed.avg',
'mem.compressionRate.avg',
'mem.consumed.avg',
'mem.decompressionRate.avg',
'mem.entitlement.avg',
'mem.granted.avg',
'mem.latency.avg',
'mem.llSwapInRate.avg',
'mem.llSwapOutRate.avg',
'mem.llSwapUsed.avg',
'mem.overhead.avg',
'mem.overheadMax.avg',
'mem.overheadTouched.avg',
'mem.shared.avg',
'mem.swapin.avg',
'mem.swapinRate.avg',
'mem.swapout.avg',
'mem.swapoutRate.avg',
'mem.swapped.avg',
'mem.swaptarget.avg',
'mem.usage.avg',
'mem.vmmemctl.avg',
'mem.vmmemctltarget.avg',
'mem.zero.avg',
'mem.zipSaved.latest',
'mem.zipped.latest',
'net.broadcastRx.sum',
'net.broadcastTx.sum',
'net.bytesRx.avg',
'net.bytesTx.avg',
'net.droppedRx.sum',
'net.droppedTx.sum',
'net.multicastRx.sum',
'net.multicastTx.sum',
'net.packetsRx.sum',
'net.packetsTx.sum',
'net.pnicBytesRx.avg',
'net.pnicBytesTx.avg',
'net.received.avg',
'net.transmitted.avg',
'net.usage.avg',
'power.energy.sum',
'power.power.avg',
'rescpu.actav1.latest',
'rescpu.actav15.latest',
'rescpu.actav5.latest',
'rescpu.actpk1.latest',
'rescpu.actpk15.latest',
'rescpu.actpk5.latest',
'rescpu.maxLimited1.latest',
'rescpu.maxLimited15.latest',
'rescpu.maxLimited5.latest',
'rescpu.runav1.latest',
'rescpu.runav15.latest',
'rescpu.runav5.latest',
'rescpu.runpk1.latest',
'rescpu.runpk15.latest',
'rescpu.runpk5.latest',
'rescpu.sampleCount.latest',
'rescpu.samplePeriod.latest',
'sys.heartbeat.latest',
'sys.heartbeat.sum',
'sys.osUptime.latest',
'sys.uptime.latest',
'virtualDisk.busResets.sum',
'virtualDisk.commandsAborted.sum',
'virtualDisk.largeSeeks.latest',
'virtualDisk.mediumSeeks.latest',
'virtualDisk.numberReadAveraged.avg',
'virtualDisk.numberWriteAveraged.avg',
'virtualDisk.read.avg',
'virtualDisk.readIOSize.latest',
'virtualDisk.readLatencyUS.latest',
'virtualDisk.readLoadMetric.latest',
'virtualDisk.readOIO.latest',
'virtualDisk.smallSeeks.latest',
'virtualDisk.totalReadLatency.avg',
'virtualDisk.totalWriteLatency.avg',
'virtualDisk.write.avg',
'virtualDisk.writeIOSize.latest',
'virtualDisk.writeLatencyUS.latest',
'virtualDisk.writeLoadMetric.latest',
'virtualDisk.writeOIO.latest',
}
# All metrics that can be collected from ESXi Hosts.
HOST_METRICS = {
'cpu.coreUtilization.avg',
'cpu.costop.sum',
'cpu.demand.avg',
'cpu.idle.sum',
'cpu.latency.avg',
'cpu.readiness.avg',
'cpu.ready.sum',
'cpu.reservedCapacity.avg',
'cpu.swapwait.sum',
'cpu.totalCapacity.avg',
'cpu.usage.avg',
'cpu.usagemhz.avg',
'cpu.used.sum',
'cpu.utilization.avg',
'cpu.wait.sum',
'datastore.datastoreIops.avg',
'datastore.datastoreMaxQueueDepth.latest',
'datastore.datastoreNormalReadLatency.latest',
'datastore.datastoreNormalWriteLatency.latest',
'datastore.datastoreReadBytes.latest',
'datastore.datastoreReadIops.latest',
'datastore.datastoreReadLoadMetric.latest',
'datastore.datastoreReadOIO.latest',
'datastore.datastoreVMObservedLatency.latest',
'datastore.datastoreWriteBytes.latest',
'datastore.datastoreWriteIops.latest',
'datastore.datastoreWriteLoadMetric.latest',
'datastore.datastoreWriteOIO.latest',
'datastore.maxTotalLatency.latest',
'datastore.numberReadAveraged.avg',
'datastore.numberWriteAveraged.avg',
'datastore.read.avg',
'datastore.siocActiveTimePercentage.avg',
'datastore.sizeNormalizedDatastoreLatency.avg',
'datastore.totalReadLatency.avg',
'datastore.totalWriteLatency.avg',
'datastore.write.avg',
'disk.busResets.sum',
'disk.commands.sum',
'disk.commandsAborted.sum',
'disk.commandsAveraged.avg',
'disk.deviceLatency.avg',
'disk.deviceReadLatency.avg',
'disk.deviceWriteLatency.avg',
'disk.kernelLatency.avg',
'disk.kernelReadLatency.avg',
'disk.kernelWriteLatency.avg',
'disk.maxQueueDepth.avg',
'disk.maxTotalLatency.latest',
'disk.numberRead.sum',
'disk.numberReadAveraged.avg',
'disk.numberWrite.sum',
'disk.numberWriteAveraged.avg',
'disk.queueLatency.avg',
'disk.queueReadLatency.avg',
'disk.queueWriteLatency.avg',
'disk.read.avg',
'disk.scsiReservationCnflctsPct.avg',
'disk.scsiReservationConflicts.sum',
'disk.totalLatency.avg',
'disk.totalReadLatency.avg',
'disk.totalWriteLatency.avg',
'disk.usage.avg',
'disk.write.avg',
'hbr.hbrNetRx.avg',
'hbr.hbrNetTx.avg',
'hbr.hbrNumVms.avg',
'mem.active.avg',
'mem.activewrite.avg',
'mem.compressed.avg',
'mem.compressionRate.avg',
'mem.consumed.avg',
'mem.consumed.userworlds.avg',
'mem.consumed.vms.avg',
'mem.decompressionRate.avg',
'mem.granted.avg',
'mem.heap.avg',
'mem.heapfree.avg',
'mem.latency.avg',
'mem.llSwapIn.avg',
'mem.llSwapInRate.avg',
'mem.llSwapOut.avg',
'mem.llSwapOutRate.avg',
'mem.llSwapUsed.avg',
'mem.lowfreethreshold.avg',
'mem.overhead.avg',
'mem.reservedCapacity.avg',
'mem.shared.avg',
'mem.sharedcommon.avg',
'mem.state.latest',
'mem.swapin.avg',
'mem.swapinRate.avg',
'mem.swapout.avg',
'mem.swapoutRate.avg',
'mem.swapused.avg',
'mem.sysUsage.avg',
'mem.totalCapacity.avg',
'mem.unreserved.avg',
'mem.usage.avg',
'mem.vmfs.pbc.capMissRatio.latest',
'mem.vmfs.pbc.overhead.latest',
'mem.vmfs.pbc.size.latest',
'mem.vmfs.pbc.sizeMax.latest',
'mem.vmfs.pbc.workingSet.latest',
'mem.vmfs.pbc.workingSetMax.latest',
'mem.vmmemctl.avg',
'mem.zero.avg',
'net.broadcastRx.sum',
'net.broadcastTx.sum',
'net.bytesRx.avg',
'net.bytesTx.avg',
'net.droppedRx.sum',
'net.droppedTx.sum',
'net.errorsRx.sum',
'net.errorsTx.sum',
'net.multicastRx.sum',
'net.multicastTx.sum',
'net.packetsRx.sum',
'net.packetsTx.sum',
'net.received.avg',
'net.transmitted.avg',
'net.unknownProtos.sum',
'net.usage.avg',
'power.energy.sum',
'power.power.avg',
'power.powerCap.avg',
'rescpu.actav1.latest',
'rescpu.actav15.latest',
'rescpu.actav5.latest',
'rescpu.actpk1.latest',
'rescpu.actpk15.latest',
'rescpu.actpk5.latest',
'rescpu.maxLimited1.latest',
'rescpu.maxLimited15.latest',
'rescpu.maxLimited5.latest',
'rescpu.runav1.latest',
'rescpu.runav15.latest',
'rescpu.runav5.latest',
'rescpu.runpk1.latest',
'rescpu.runpk15.latest',
'rescpu.runpk5.latest',
'rescpu.sampleCount.latest',
'rescpu.samplePeriod.latest',
'storageAdapter.commandsAveraged.avg',
'storageAdapter.maxTotalLatency.latest',
'storageAdapter.numberReadAveraged.avg',
'storageAdapter.numberWriteAveraged.avg',
'storageAdapter.outstandingIOs.avg',
'storageAdapter.queueDepth.avg',
'storageAdapter.queueLatency.avg',
'storageAdapter.queued.avg',
'storageAdapter.read.avg',
'storageAdapter.totalReadLatency.avg',
'storageAdapter.totalWriteLatency.avg',
'storageAdapter.write.avg',
'storagePath.busResets.sum',
'storagePath.commandsAborted.sum',
'storagePath.commandsAveraged.avg',
'storagePath.maxTotalLatency.latest',
'storagePath.numberReadAveraged.avg',
'storagePath.numberWriteAveraged.avg',
'storagePath.read.avg',
'storagePath.totalReadLatency.avg',
'storagePath.totalWriteLatency.avg',
'storagePath.write.avg',
'sys.resourceCpuAct1.latest',
'sys.resourceCpuAct5.latest',
'sys.resourceCpuAllocMax.latest',
'sys.resourceCpuAllocMin.latest',
'sys.resourceCpuAllocShares.latest',
'sys.resourceCpuMaxLimited1.latest',
'sys.resourceCpuMaxLimited5.latest',
'sys.resourceCpuRun1.latest',
'sys.resourceCpuRun5.latest',
'sys.resourceCpuUsage.avg',
'sys.resourceFdUsage.latest',
'sys.resourceMemAllocMax.latest',
'sys.resourceMemAllocMin.latest',
'sys.resourceMemAllocShares.latest',
'sys.resourceMemConsumed.latest',
'sys.resourceMemCow.latest',
'sys.resourceMemMapped.latest',
'sys.resourceMemOverhead.latest',
'sys.resourceMemShared.latest',
'sys.resourceMemSwapped.latest',
'sys.resourceMemTouched.latest',
'sys.resourceMemZero.latest',
'sys.uptime.latest',
'virtualDisk.busResets.sum',
'virtualDisk.commandsAborted.sum',
}
# All metrics that can be collected from Datastores.
DATASTORE_METRICS = {
'datastore.busResets.sum',
'datastore.commandsAborted.sum',
'datastore.numberReadAveraged.avg',
'datastore.numberWriteAveraged.avg',
'datastore.throughput.contention.avg',
'datastore.throughput.usage.avg',
'disk.busResets.sum',
'disk.capacity.contention.avg',
'disk.capacity.latest',
'disk.capacity.provisioned.avg',
'disk.capacity.usage.avg',
'disk.numberReadAveraged.avg',
'disk.numberWriteAveraged.avg',
'disk.provisioned.latest',
'disk.unshared.latest',
'disk.used.latest',
}
# All metrics that can be collected from Datacenters.
DATACENTER_METRICS = {
'vmop.numChangeDS.latest',
'vmop.numChangeHost.latest',
'vmop.numChangeHostDS.latest',
'vmop.numClone.latest',
'vmop.numCreate.latest',
'vmop.numDeploy.latest',
'vmop.numDestroy.latest',
'vmop.numPoweroff.latest',
'vmop.numPoweron.latest',
'vmop.numRebootGuest.latest',
'vmop.numReconfigure.latest',
'vmop.numRegister.latest',
'vmop.numReset.latest',
'vmop.numSVMotion.latest',
'vmop.numShutdownGuest.latest',
'vmop.numStandbyGuest.latest',
'vmop.numSuspend.latest',
'vmop.numUnregister.latest',
'vmop.numVMotion.latest',
'vmop.numXVMotion.latest',
}
# All metrics that can be collected from Clusters.
CLUSTER_METRICS = {
# clusterServices are only available for DRS and HA clusters, and can cause errors that are caught down
# the line by the integration. That means some API calls are unnecessary.
# TODO: Look if we can prevent those unnecessary API calls
'clusterServices.cpufairness.latest',
'clusterServices.effectivecpu.avg',
'clusterServices.effectivemem.avg',
'clusterServices.failover.latest',
'clusterServices.memfairness.latest',
'cpu.totalmhz.avg',
'cpu.usage.avg',
'cpu.usagemhz.avg',
'mem.consumed.avg',
'mem.overhead.avg',
'mem.totalmb.avg',
'mem.usage.avg',
'mem.vmmemctl.avg',
'vmop.numChangeDS.latest',
'vmop.numChangeHost.latest',
'vmop.numChangeHostDS.latest',
'vmop.numClone.latest',
'vmop.numCreate.latest',
'vmop.numDeploy.latest',
'vmop.numDestroy.latest',
'vmop.numPoweroff.latest',
'vmop.numPoweron.latest',
'vmop.numRebootGuest.latest',
'vmop.numReconfigure.latest',
'vmop.numRegister.latest',
'vmop.numReset.latest',
'vmop.numSVMotion.latest',
'vmop.numShutdownGuest.latest',
'vmop.numStandbyGuest.latest',
'vmop.numSuspend.latest',
'vmop.numUnregister.latest',
'vmop.numVMotion.latest',
'vmop.numXVMotion.latest',
}
ALLOWED_METRICS_FOR_MOR = {
vim.VirtualMachine: VM_METRICS,
vim.HostSystem: HOST_METRICS,
vim.Datacenter: DATACENTER_METRICS,
vim.Datastore: DATASTORE_METRICS,
vim.ClusterComputeResource: CLUSTER_METRICS,
}
| 30.756871
| 113
| 0.681949
|
e31c12bd64cd6bd69b8053c71bea1c69b001d67a
| 1,064
|
py
|
Python
|
ml_demo/predict.py
|
mmqm4544/rta
|
6e34e4f9eace2f58bb7376603de6f144a0b0658b
|
[
"MIT"
] | 2
|
2019-02-14T02:33:15.000Z
|
2019-02-14T02:33:17.000Z
|
ml_demo/predict.py
|
mmqm4544/rta
|
6e34e4f9eace2f58bb7376603de6f144a0b0658b
|
[
"MIT"
] | null | null | null |
ml_demo/predict.py
|
mmqm4544/rta
|
6e34e4f9eace2f58bb7376603de6f144a0b0658b
|
[
"MIT"
] | null | null | null |
import os
from sklearn.externals import joblib
from preprocessing import SQLPreprocessor
from db import DBClient
class Predictor(object):
def __init__(self, modelId):
self.modelId = modelId
self.client = DBClient.create()
def run(self, **kargs):
modelId = self.modelId
table_name = kargs.get("table_name")
feature_names = joblib.load('./model/%s_feature_names.pkl' % modelId)
pipeline = joblib.load('./model/%s_pipeline.pkl' % modelId)
if os.path.exists('./model/%s_le.pkl'):
le = joblib.load('./model/%s_le.pkl' % modelId)
else:
le = None
preprocessor = SQLPreprocessor(self.client, table_name)
records, data = preprocessor.getPredictData(feature_names)
predicted = pipeline.predict(data)
if le is not None:
predicted = le.inverse_transform(predicted).tolist()
outputTableName = preprocessor.outputPredictedData(feature_names, records, predicted)
return {"outputTableName": outputTableName}
| 32.242424
| 93
| 0.656015
|
c5c20c58e115804390fa0f6a754ec30f5878c038
| 3,984
|
py
|
Python
|
test/functional/wallet_scriptaddress2.py
|
PuertoRicoCoin/prtc
|
e5e9da6028c22fe353bfb3a06bb735f563678019
|
[
"MIT"
] | null | null | null |
test/functional/wallet_scriptaddress2.py
|
PuertoRicoCoin/prtc
|
e5e9da6028c22fe353bfb3a06bb735f563678019
|
[
"MIT"
] | null | null | null |
test/functional/wallet_scriptaddress2.py
|
PuertoRicoCoin/prtc
|
e5e9da6028c22fe353bfb3a06bb735f563678019
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test new PuertoRicoTainoCoin multisig prefix functionality.
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
)
class ScriptAddress2Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.setup_clean_chain = True
self.extra_args = [['-addresstype=legacy', '-deprecatedrpc=accounts', '-txindex=1'], [], ['-txindex=1']]
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
self.sync_all()
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
cnt = self.nodes[0].getblockcount()
# Mine some blocks
self.nodes[1].generate(101)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 101):
raise AssertionError("Failed to mine 100 blocks")
addr = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
multisig_addr = self.nodes[0].addmultisigaddress(2, [addr, addr2], "multisigaccount")['address']
assert_equal(multisig_addr[0], 'P')
# Send to a new multisig address
txid = self.nodes[1].sendtoaddress(multisig_addr, 1)
self.nodes[1].generate(101)
self.sync_all()
tx = self.nodes[0].getrawtransaction(txid, 1)
dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0],
tx["vout"][1]['scriptPubKey']['addresses'][0]]
assert(multisig_addr in dest_addrs)
# Spend from the new multisig address
addr3 = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr3, 0.8)
self.nodes[0].generate(2)
self.sync_all()
assert(self.nodes[0].getbalance("*", 1) < 0.2)
assert(self.nodes[1].listtransactions()[-1]['address'] == addr3)
# Send to an old multisig address. The api addmultisigaddress
# can only generate a new address so we manually compute
# multisig_addr_old beforehand using an old client.
priv_keys = ["cU7eeLPKzXeKMeZvnEJhvZZ3tLqVF3XGeo1BbM8dnbmV7pP3Pg89",
"cTw7mRhSvTfzqCt6MFgBoTBqwBpYu2rWugisXcwjv4cAASh3iqPt"]
addrs = ["mj6gNGRXPXrD69R5ApjcsDerZGrYKSfb6v",
"mqET4JA3L7P7FoUjUP3F6m6YsLpCkyzzou"]
self.nodes[0].importprivkey(priv_keys[0])
self.nodes[0].importprivkey(priv_keys[1])
multisig_addr_new = self.nodes[0].addmultisigaddress(2, addrs, "multisigaccount2")['address']
assert_equal(multisig_addr_new, 'PZ974ZrPrmqMmm1PSVp4m8YEgo3bCPZBbe')
multisig_addr_old = "2N5nLwYz9qfnGdaFLpPn3gS6oYPbmLTWPjq"
# Let's send to the old address. We can then find it in the
# new address with the new client. So basically the old
# address and the new one are the same thing.
txid = self.nodes[1].sendtoaddress(multisig_addr_old, 1)
self.nodes[1].generate(1)
self.sync_all()
tx = self.nodes[2].getrawtransaction(txid, 1)
dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0],
tx["vout"][1]['scriptPubKey']['addresses'][0]]
assert(multisig_addr_new in dest_addrs)
assert(multisig_addr_old not in dest_addrs)
# Spend from the new multisig address
addr4 = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr4, 0.8)
self.nodes[0].generate(2)
self.sync_all()
assert(self.nodes[0].getbalance("*", 1) < 0.4)
assert(self.nodes[1].listtransactions()[-1]['address'] == addr4)
if __name__ == '__main__':
ScriptAddress2Test().main()
| 39.058824
| 112
| 0.650853
|
63c4b7105ca9d02b92a93948879588e1e469a213
| 2,098
|
py
|
Python
|
day14/reindeer_race.py
|
JulienCote/advent_of_code_2015
|
567a589c6b1c161ebfba8dcf1b9f3bc1bf6e0d00
|
[
"MIT"
] | null | null | null |
day14/reindeer_race.py
|
JulienCote/advent_of_code_2015
|
567a589c6b1c161ebfba8dcf1b9f3bc1bf6e0d00
|
[
"MIT"
] | null | null | null |
day14/reindeer_race.py
|
JulienCote/advent_of_code_2015
|
567a589c6b1c161ebfba8dcf1b9f3bc1bf6e0d00
|
[
"MIT"
] | null | null | null |
import copy
reindeers = {}
race_time = 2503
def distance_based_race():
winner_distance = 0
for reindeer in reindeers.values():
full_cycle = race_time // (reindeer[1] + reindeer[2])
remaining_time_last_cycle = race_time % (reindeer[1] + reindeer[2])
remaining_distance_last_cycle = remaining_time_last_cycle if remaining_time_last_cycle <= reindeer[1] else reindeer[1]
competitor_distance = reindeer[0] * ((full_cycle * reindeer[1]) + remaining_distance_last_cycle)
if competitor_distance > winner_distance:
winner_distance = competitor_distance
return winner_distance
def point_based_race():
reindeers_state = copy.deepcopy(reindeers)
for timer in range(0, race_time):
leaders = []
for reindeer in reindeers:
if reindeers_state[reindeer][1] > 0:
reindeers_state[reindeer][3] += reindeers_state[reindeer][0]
reindeers_state[reindeer][1] -= 1
elif reindeers_state[reindeer][2] > 0:
reindeers_state[reindeer][2] -= 1
if reindeers_state[reindeer][2] == 0:
reindeers_state[reindeer][1] = reindeers[reindeer][1]
reindeers_state[reindeer][2] = reindeers[reindeer][2]
if leaders == [] or reindeers_state[leaders[0]][3] == reindeers_state[reindeer][3]:
leaders.append(reindeer)
elif reindeers_state[leaders[0]][3] < reindeers_state[reindeer][3]:
leaders = [reindeer]
for leader in leaders:
reindeers_state[leader][4] += 1
winner_points = 0
for reindeer in reindeers_state.values():
if reindeer[4] > winner_points:
winner_points = reindeer[4]
return winner_points
with open('input.txt') as input_file:
for line in input_file:
split_line = line.rstrip('\n').rstrip('.').split(' ') # 0, 3, 6, 13
reindeers[split_line[0]] = [int(split_line[3]), int(split_line[6]), int(split_line[13]), 0, 0]
print distance_based_race()
print point_based_race()
| 37.464286
| 126
| 0.632031
|
9034a574b7f1b034ff83a63bd57ac98b92ff0d51
| 1,267
|
py
|
Python
|
satgenpy/tests/test_basic.py
|
KaushikChavali/hypatia
|
6f701ce0fe745fe2687c75e5c90b5f235e6b1ce5
|
[
"MIT"
] | 70
|
2020-10-31T03:53:11.000Z
|
2022-03-30T12:22:24.000Z
|
satgenpy/tests/test_basic.py
|
KaushikChavali/hypatia
|
6f701ce0fe745fe2687c75e5c90b5f235e6b1ce5
|
[
"MIT"
] | 9
|
2021-03-19T06:20:24.000Z
|
2022-03-18T14:03:47.000Z
|
satgenpy/tests/test_basic.py
|
KaushikChavali/hypatia
|
6f701ce0fe745fe2687c75e5c90b5f235e6b1ce5
|
[
"MIT"
] | 39
|
2020-10-02T11:59:08.000Z
|
2022-03-27T16:41:06.000Z
|
# The MIT License (MIT)
#
# Copyright (c) 2020 ETH Zurich
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from satgen import *
import unittest
class TestBasic(unittest.TestCase):
def test_import_worked(self):
print("Importing satgen worked")
| 40.870968
| 80
| 0.770324
|
5afdcfdbcc3f45e501cc9c8305ec79e5cbd016d2
| 42,170
|
py
|
Python
|
custos-client-sdks/custos-python-sdk/build/lib/custos/server/integration/UserManagementService_pb2.py
|
apache/airavata-custos
|
075dd26c364b5b5abe8a4f2b226b2de30474f8e4
|
[
"Apache-2.0"
] | 10
|
2019-05-21T22:42:35.000Z
|
2022-03-25T15:58:09.000Z
|
custos-client-sdks/custos-python-sdk/build/lib/custos/server/integration/UserManagementService_pb2.py
|
apache/airavata-custos
|
075dd26c364b5b5abe8a4f2b226b2de30474f8e4
|
[
"Apache-2.0"
] | 83
|
2019-02-22T12:22:14.000Z
|
2022-03-30T13:42:47.000Z
|
custos-client-sdks/custos-python-sdk/build/lib/custos/server/integration/UserManagementService_pb2.py
|
apache/airavata-custos
|
075dd26c364b5b5abe8a4f2b226b2de30474f8e4
|
[
"Apache-2.0"
] | 20
|
2019-02-22T08:10:05.000Z
|
2021-11-07T19:37:04.000Z
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: UserManagementService.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
import custos.server.core.UserProfileService_pb2 as UserProfileService__pb2
import custos.server.core.IamAdminService_pb2 as IamAdminService__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='UserManagementService.proto',
package='org.apache.custos.user.management.service',
syntax='proto3',
serialized_options=b'P\001Z\004./pb',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1bUserManagementService.proto\x12)org.apache.custos.user.management.service\x1a\x1cgoogle/api/annotations.proto\x1a\x18UserProfileService.proto\x1a\x15IamAdminService.proto\"\xc8\x01\n\x12UserProfileRequest\x12I\n\x0cuser_profile\x18\x01 \x01(\x0b\x32\x33.org.apache.custos.user.profile.service.UserProfile\x12\x11\n\tclient_id\x18\x02 \x01(\t\x12\x11\n\ttenant_id\x18\x03 \x01(\x03\x12\x14\n\x0c\x61\x63\x63\x65ss_token\x18\x04 \x01(\t\x12\x15\n\rclient_secret\x18\x05 \x01(\t\x12\x14\n\x0cperformed_by\x18\x06 \x01(\t\"q\n\x0eGetUserRequest\x12\x10\n\x08username\x18\x01 \x01(\t\x12M\n\x13user_search_request\x18\x02 \x01(\x0b\x32\x30.org.apache.custos.iam.service.UserSearchRequest\"\xa6\x01\n\x0fGetUsersRequest\x12\x11\n\ttenant_id\x18\x01 \x01(\x03\x12\r\n\x05\x65mail\x18\x02 \x01(\t\x12\x10\n\x08username\x18\x03 \x01(\t\x12\x0e\n\x06offset\x18\x04 \x01(\x05\x12\r\n\x05limit\x18\x05 \x01(\x05\x12\x0e\n\x06search\x18\x06 \x01(\t\x12\x15\n\riam_client_id\x18\x07 \x01(\t\x12\x19\n\x11iam_client_secret\x18\x08 \x01(\t\"\x8e\x01\n\rResetPassword\x12\x11\n\ttenant_id\x18\x01 \x01(\x03\x12\x14\n\x0c\x61\x63\x63\x65ss_token\x18\x02 \x01(\t\x12\x10\n\x08username\x18\x03 \x01(\t\x12\x10\n\x08password\x18\x04 \x01(\t\x12\x15\n\riam_client_id\x18\x05 \x01(\t\x12\x19\n\x11iam_client_secret\x18\x06 \x01(\t\"k\n\x14ResetPasswordRequest\x12S\n\x11password_metadata\x18\x01 \x01(\x0b\x32\x38.org.apache.custos.user.management.service.ResetPassword\"\xd9\x01\n\x16LinkUserProfileRequest\x12\x18\n\x10\x63urrent_username\x18\x01 \x01(\t\x12\x19\n\x11previous_username\x18\x02 \x01(\t\x12\x1a\n\x12linking_attributes\x18\x03 \x03(\t\x12\x10\n\x08tenantId\x18\x04 \x01(\x03\x12\x15\n\riam_client_id\x18\x05 \x01(\t\x12\x19\n\x11iam_client_secret\x18\x06 \x01(\t\x12\x14\n\x0c\x61\x63\x63\x65ss_token\x18\x07 \x01(\t\x12\x14\n\x0cperformed_by\x18\x08 \x01(\t\"@\n\x18SynchronizeUserDBRequest\x12\x11\n\ttenant_id\x18\x02 \x01(\x03\x12\x11\n\tclient_id\x18\x04 \x01(\t2\xeb\x1f\n\x15UserManagementService\x12\xa3\x01\n\x0cregisterUser\x12\x32.org.apache.custos.iam.service.RegisterUserRequest\x1a\x33.org.apache.custos.iam.service.RegisterUserResponse\"*\x82\xd3\xe4\x93\x02$\"\x1c/user-management/v1.0.0/user:\x04user\x12\xb1\x01\n\x16registerAndEnableUsers\x12\x33.org.apache.custos.iam.service.RegisterUsersRequest\x1a\x34.org.apache.custos.iam.service.RegisterUsersResponse\",\x82\xd3\xe4\x93\x02&\"\x1d/user-management/v1.0.0/users:\x05users\x12\xa8\x01\n\x11\x61\x64\x64UserAttributes\x12\x37.org.apache.custos.iam.service.AddUserAttributesRequest\x1a..org.apache.custos.iam.service.OperationStatus\"*\x82\xd3\xe4\x93\x02$\"\"/user-management/v1.0.0/attributes\x12\xad\x01\n\x14\x64\x65leteUserAttributes\x12\x39.org.apache.custos.iam.service.DeleteUserAttributeRequest\x1a..org.apache.custos.iam.service.OperationStatus\"*\x82\xd3\xe4\x93\x02$*\"/user-management/v1.0.0/attributes\x12\xa8\x01\n\nenableUser\x12\x30.org.apache.custos.iam.service.UserSearchRequest\x1a\x31.org.apache.custos.iam.service.UserRepresentation\"5\x82\xd3\xe4\x93\x02/\"\'/user-management/v1.0.0/user/activation:\x04user\x12\xab\x01\n\x0b\x64isableUser\x12\x30.org.apache.custos.iam.service.UserSearchRequest\x1a\x31.org.apache.custos.iam.service.UserRepresentation\"7\x82\xd3\xe4\x93\x02\x31\")/user-management/v1.0.0/user/deactivation:\x04user\x12\xaa\x01\n\x14grantAdminPrivileges\x12\x30.org.apache.custos.iam.service.UserSearchRequest\x1a..org.apache.custos.iam.service.OperationStatus\"0\x82\xd3\xe4\x93\x02*\"\"/user-management/v1.0.0/user/admin:\x04user\x12\xab\x01\n\x15removeAdminPrivileges\x12\x30.org.apache.custos.iam.service.UserSearchRequest\x1a..org.apache.custos.iam.service.OperationStatus\"0\x82\xd3\xe4\x93\x02**\"/user-management/v1.0.0/user/admin:\x04user\x12\xa2\x01\n\x0f\x61\x64\x64RolesToUsers\x12\x32.org.apache.custos.iam.service.AddUserRolesRequest\x1a..org.apache.custos.iam.service.OperationStatus\"+\x82\xd3\xe4\x93\x02%\"#/user-management/v1.0.0/users/roles\x12\xa9\x01\n\risUserEnabled\x12\x30.org.apache.custos.iam.service.UserSearchRequest\x1a..org.apache.custos.iam.service.OperationStatus\"6\x82\xd3\xe4\x93\x02\x30\x12./user-management/v1.0.0/user/activation/status\x12\xaa\x01\n\x13isUsernameAvailable\x12\x30.org.apache.custos.iam.service.UserSearchRequest\x1a..org.apache.custos.iam.service.OperationStatus\"1\x82\xd3\xe4\x93\x02+\x12)/user-management/v1.0.0/user/availability\x12\x94\x01\n\x07getUser\x12\x30.org.apache.custos.iam.service.UserSearchRequest\x1a\x31.org.apache.custos.iam.service.UserRepresentation\"$\x82\xd3\xe4\x93\x02\x1e\x12\x1c/user-management/v1.0.0/user\x12\x95\x01\n\tfindUsers\x12/.org.apache.custos.iam.service.FindUsersRequest\x1a\x30.org.apache.custos.iam.service.FindUsersResponse\"%\x82\xd3\xe4\x93\x02\x1f\x12\x1d/user-management/v1.0.0/users\x12\xa0\x01\n\rresetPassword\x12\x30.org.apache.custos.iam.service.ResetUserPassword\x1a..org.apache.custos.iam.service.OperationStatus\"-\x82\xd3\xe4\x93\x02\'\x1a%/user-management/v1.0.0/user/password\x12\x9a\x01\n\ndeleteUser\x12\x30.org.apache.custos.iam.service.UserSearchRequest\x1a..org.apache.custos.iam.service.OperationStatus\"*\x82\xd3\xe4\x93\x02$*\x1c/user-management/v1.0.0/user:\x04user\x12\xa4\x01\n\x0f\x64\x65leteUserRoles\x12\x35.org.apache.custos.iam.service.DeleteUserRolesRequest\x1a..org.apache.custos.iam.service.OperationStatus\"*\x82\xd3\xe4\x93\x02$*\"/user-management/v1.0.0/user/roles\x12\xc3\x01\n\x11updateUserProfile\x12=.org.apache.custos.user.management.service.UserProfileRequest\x1a\x33.org.apache.custos.user.profile.service.UserProfile\":\x82\xd3\xe4\x93\x02\x34\x1a$/user-management/v1.0.0/user/profile:\x0cuser_profile\x12\xb2\x01\n\x0egetUserProfile\x12=.org.apache.custos.user.management.service.UserProfileRequest\x1a\x33.org.apache.custos.user.profile.service.UserProfile\",\x82\xd3\xe4\x93\x02&\x12$/user-management/v1.0.0/user/profile\x12\xb5\x01\n\x11\x64\x65leteUserProfile\x12=.org.apache.custos.user.management.service.UserProfileRequest\x1a\x33.org.apache.custos.user.profile.service.UserProfile\",\x82\xd3\xe4\x93\x02&*$/user-management/v1.0.0/user/profile\x12\xce\x01\n\x1agetAllUserProfilesInTenant\x12=.org.apache.custos.user.management.service.UserProfileRequest\x1a\x42.org.apache.custos.user.profile.service.GetAllUserProfilesResponse\"-\x82\xd3\xe4\x93\x02\'\x12%/user-management/v1.0.0/users/profile\x12\xb9\x01\n\x0flinkUserProfile\x12\x41.org.apache.custos.user.management.service.LinkUserProfileRequest\x1a..org.apache.custos.iam.service.OperationStatus\"3\x82\xd3\xe4\x93\x02-\"+/user-management/v1.0.0/user/profile/mapper\x12\xd8\x01\n\x19getUserProfileAuditTrails\x12\x42.org.apache.custos.user.profile.service.GetUpdateAuditTrailRequest\x1a\x43.org.apache.custos.user.profile.service.GetUpdateAuditTrailResponse\"2\x82\xd3\xe4\x93\x02,\x12*/user-management/v1.0.0/user/profile/audit\x12\xb9\x01\n\x12synchronizeUserDBs\x12\x43.org.apache.custos.user.management.service.SynchronizeUserDBRequest\x1a..org.apache.custos.iam.service.OperationStatus\".\x82\xd3\xe4\x93\x02(\"&/user-management/v1.0.0/db/synchronizeB\x08P\x01Z\x04./pbb\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,UserProfileService__pb2.DESCRIPTOR,IamAdminService__pb2.DESCRIPTOR,])
_USERPROFILEREQUEST = _descriptor.Descriptor(
name='UserProfileRequest',
full_name='org.apache.custos.user.management.service.UserProfileRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='user_profile', full_name='org.apache.custos.user.management.service.UserProfileRequest.user_profile', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='client_id', full_name='org.apache.custos.user.management.service.UserProfileRequest.client_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tenant_id', full_name='org.apache.custos.user.management.service.UserProfileRequest.tenant_id', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='access_token', full_name='org.apache.custos.user.management.service.UserProfileRequest.access_token', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='client_secret', full_name='org.apache.custos.user.management.service.UserProfileRequest.client_secret', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='performed_by', full_name='org.apache.custos.user.management.service.UserProfileRequest.performed_by', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=154,
serialized_end=354,
)
_GETUSERREQUEST = _descriptor.Descriptor(
name='GetUserRequest',
full_name='org.apache.custos.user.management.service.GetUserRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='username', full_name='org.apache.custos.user.management.service.GetUserRequest.username', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='user_search_request', full_name='org.apache.custos.user.management.service.GetUserRequest.user_search_request', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=356,
serialized_end=469,
)
_GETUSERSREQUEST = _descriptor.Descriptor(
name='GetUsersRequest',
full_name='org.apache.custos.user.management.service.GetUsersRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='tenant_id', full_name='org.apache.custos.user.management.service.GetUsersRequest.tenant_id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='email', full_name='org.apache.custos.user.management.service.GetUsersRequest.email', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='username', full_name='org.apache.custos.user.management.service.GetUsersRequest.username', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='offset', full_name='org.apache.custos.user.management.service.GetUsersRequest.offset', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='limit', full_name='org.apache.custos.user.management.service.GetUsersRequest.limit', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='search', full_name='org.apache.custos.user.management.service.GetUsersRequest.search', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='iam_client_id', full_name='org.apache.custos.user.management.service.GetUsersRequest.iam_client_id', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='iam_client_secret', full_name='org.apache.custos.user.management.service.GetUsersRequest.iam_client_secret', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=472,
serialized_end=638,
)
_RESETPASSWORD = _descriptor.Descriptor(
name='ResetPassword',
full_name='org.apache.custos.user.management.service.ResetPassword',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='tenant_id', full_name='org.apache.custos.user.management.service.ResetPassword.tenant_id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='access_token', full_name='org.apache.custos.user.management.service.ResetPassword.access_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='username', full_name='org.apache.custos.user.management.service.ResetPassword.username', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='password', full_name='org.apache.custos.user.management.service.ResetPassword.password', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='iam_client_id', full_name='org.apache.custos.user.management.service.ResetPassword.iam_client_id', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='iam_client_secret', full_name='org.apache.custos.user.management.service.ResetPassword.iam_client_secret', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=641,
serialized_end=783,
)
_RESETPASSWORDREQUEST = _descriptor.Descriptor(
name='ResetPasswordRequest',
full_name='org.apache.custos.user.management.service.ResetPasswordRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='password_metadata', full_name='org.apache.custos.user.management.service.ResetPasswordRequest.password_metadata', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=785,
serialized_end=892,
)
_LINKUSERPROFILEREQUEST = _descriptor.Descriptor(
name='LinkUserProfileRequest',
full_name='org.apache.custos.user.management.service.LinkUserProfileRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='current_username', full_name='org.apache.custos.user.management.service.LinkUserProfileRequest.current_username', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='previous_username', full_name='org.apache.custos.user.management.service.LinkUserProfileRequest.previous_username', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='linking_attributes', full_name='org.apache.custos.user.management.service.LinkUserProfileRequest.linking_attributes', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tenantId', full_name='org.apache.custos.user.management.service.LinkUserProfileRequest.tenantId', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='iam_client_id', full_name='org.apache.custos.user.management.service.LinkUserProfileRequest.iam_client_id', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='iam_client_secret', full_name='org.apache.custos.user.management.service.LinkUserProfileRequest.iam_client_secret', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='access_token', full_name='org.apache.custos.user.management.service.LinkUserProfileRequest.access_token', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='performed_by', full_name='org.apache.custos.user.management.service.LinkUserProfileRequest.performed_by', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=895,
serialized_end=1112,
)
_SYNCHRONIZEUSERDBREQUEST = _descriptor.Descriptor(
name='SynchronizeUserDBRequest',
full_name='org.apache.custos.user.management.service.SynchronizeUserDBRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='tenant_id', full_name='org.apache.custos.user.management.service.SynchronizeUserDBRequest.tenant_id', index=0,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='client_id', full_name='org.apache.custos.user.management.service.SynchronizeUserDBRequest.client_id', index=1,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1114,
serialized_end=1178,
)
_USERPROFILEREQUEST.fields_by_name['user_profile'].message_type = UserProfileService__pb2._USERPROFILE
_GETUSERREQUEST.fields_by_name['user_search_request'].message_type = IamAdminService__pb2._USERSEARCHREQUEST
_RESETPASSWORDREQUEST.fields_by_name['password_metadata'].message_type = _RESETPASSWORD
DESCRIPTOR.message_types_by_name['UserProfileRequest'] = _USERPROFILEREQUEST
DESCRIPTOR.message_types_by_name['GetUserRequest'] = _GETUSERREQUEST
DESCRIPTOR.message_types_by_name['GetUsersRequest'] = _GETUSERSREQUEST
DESCRIPTOR.message_types_by_name['ResetPassword'] = _RESETPASSWORD
DESCRIPTOR.message_types_by_name['ResetPasswordRequest'] = _RESETPASSWORDREQUEST
DESCRIPTOR.message_types_by_name['LinkUserProfileRequest'] = _LINKUSERPROFILEREQUEST
DESCRIPTOR.message_types_by_name['SynchronizeUserDBRequest'] = _SYNCHRONIZEUSERDBREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
UserProfileRequest = _reflection.GeneratedProtocolMessageType('UserProfileRequest', (_message.Message,), {
'DESCRIPTOR' : _USERPROFILEREQUEST,
'__module__' : 'UserManagementService_pb2'
# @@protoc_insertion_point(class_scope:org.apache.custos.user.management.service.UserProfileRequest)
})
_sym_db.RegisterMessage(UserProfileRequest)
GetUserRequest = _reflection.GeneratedProtocolMessageType('GetUserRequest', (_message.Message,), {
'DESCRIPTOR' : _GETUSERREQUEST,
'__module__' : 'UserManagementService_pb2'
# @@protoc_insertion_point(class_scope:org.apache.custos.user.management.service.GetUserRequest)
})
_sym_db.RegisterMessage(GetUserRequest)
GetUsersRequest = _reflection.GeneratedProtocolMessageType('GetUsersRequest', (_message.Message,), {
'DESCRIPTOR' : _GETUSERSREQUEST,
'__module__' : 'UserManagementService_pb2'
# @@protoc_insertion_point(class_scope:org.apache.custos.user.management.service.GetUsersRequest)
})
_sym_db.RegisterMessage(GetUsersRequest)
ResetPassword = _reflection.GeneratedProtocolMessageType('ResetPassword', (_message.Message,), {
'DESCRIPTOR' : _RESETPASSWORD,
'__module__' : 'UserManagementService_pb2'
# @@protoc_insertion_point(class_scope:org.apache.custos.user.management.service.ResetPassword)
})
_sym_db.RegisterMessage(ResetPassword)
ResetPasswordRequest = _reflection.GeneratedProtocolMessageType('ResetPasswordRequest', (_message.Message,), {
'DESCRIPTOR' : _RESETPASSWORDREQUEST,
'__module__' : 'UserManagementService_pb2'
# @@protoc_insertion_point(class_scope:org.apache.custos.user.management.service.ResetPasswordRequest)
})
_sym_db.RegisterMessage(ResetPasswordRequest)
LinkUserProfileRequest = _reflection.GeneratedProtocolMessageType('LinkUserProfileRequest', (_message.Message,), {
'DESCRIPTOR' : _LINKUSERPROFILEREQUEST,
'__module__' : 'UserManagementService_pb2'
# @@protoc_insertion_point(class_scope:org.apache.custos.user.management.service.LinkUserProfileRequest)
})
_sym_db.RegisterMessage(LinkUserProfileRequest)
SynchronizeUserDBRequest = _reflection.GeneratedProtocolMessageType('SynchronizeUserDBRequest', (_message.Message,), {
'DESCRIPTOR' : _SYNCHRONIZEUSERDBREQUEST,
'__module__' : 'UserManagementService_pb2'
# @@protoc_insertion_point(class_scope:org.apache.custos.user.management.service.SynchronizeUserDBRequest)
})
_sym_db.RegisterMessage(SynchronizeUserDBRequest)
DESCRIPTOR._options = None
_USERMANAGEMENTSERVICE = _descriptor.ServiceDescriptor(
name='UserManagementService',
full_name='org.apache.custos.user.management.service.UserManagementService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=1181,
serialized_end=5256,
methods=[
_descriptor.MethodDescriptor(
name='registerUser',
full_name='org.apache.custos.user.management.service.UserManagementService.registerUser',
index=0,
containing_service=None,
input_type=IamAdminService__pb2._REGISTERUSERREQUEST,
output_type=IamAdminService__pb2._REGISTERUSERRESPONSE,
serialized_options=b'\202\323\344\223\002$\"\034/user-management/v1.0.0/user:\004user',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='registerAndEnableUsers',
full_name='org.apache.custos.user.management.service.UserManagementService.registerAndEnableUsers',
index=1,
containing_service=None,
input_type=IamAdminService__pb2._REGISTERUSERSREQUEST,
output_type=IamAdminService__pb2._REGISTERUSERSRESPONSE,
serialized_options=b'\202\323\344\223\002&\"\035/user-management/v1.0.0/users:\005users',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='addUserAttributes',
full_name='org.apache.custos.user.management.service.UserManagementService.addUserAttributes',
index=2,
containing_service=None,
input_type=IamAdminService__pb2._ADDUSERATTRIBUTESREQUEST,
output_type=IamAdminService__pb2._OPERATIONSTATUS,
serialized_options=b'\202\323\344\223\002$\"\"/user-management/v1.0.0/attributes',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='deleteUserAttributes',
full_name='org.apache.custos.user.management.service.UserManagementService.deleteUserAttributes',
index=3,
containing_service=None,
input_type=IamAdminService__pb2._DELETEUSERATTRIBUTEREQUEST,
output_type=IamAdminService__pb2._OPERATIONSTATUS,
serialized_options=b'\202\323\344\223\002$*\"/user-management/v1.0.0/attributes',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='enableUser',
full_name='org.apache.custos.user.management.service.UserManagementService.enableUser',
index=4,
containing_service=None,
input_type=IamAdminService__pb2._USERSEARCHREQUEST,
output_type=IamAdminService__pb2._USERREPRESENTATION,
serialized_options=b'\202\323\344\223\002/\"\'/user-management/v1.0.0/user/activation:\004user',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='disableUser',
full_name='org.apache.custos.user.management.service.UserManagementService.disableUser',
index=5,
containing_service=None,
input_type=IamAdminService__pb2._USERSEARCHREQUEST,
output_type=IamAdminService__pb2._USERREPRESENTATION,
serialized_options=b'\202\323\344\223\0021\")/user-management/v1.0.0/user/deactivation:\004user',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='grantAdminPrivileges',
full_name='org.apache.custos.user.management.service.UserManagementService.grantAdminPrivileges',
index=6,
containing_service=None,
input_type=IamAdminService__pb2._USERSEARCHREQUEST,
output_type=IamAdminService__pb2._OPERATIONSTATUS,
serialized_options=b'\202\323\344\223\002*\"\"/user-management/v1.0.0/user/admin:\004user',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='removeAdminPrivileges',
full_name='org.apache.custos.user.management.service.UserManagementService.removeAdminPrivileges',
index=7,
containing_service=None,
input_type=IamAdminService__pb2._USERSEARCHREQUEST,
output_type=IamAdminService__pb2._OPERATIONSTATUS,
serialized_options=b'\202\323\344\223\002**\"/user-management/v1.0.0/user/admin:\004user',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='addRolesToUsers',
full_name='org.apache.custos.user.management.service.UserManagementService.addRolesToUsers',
index=8,
containing_service=None,
input_type=IamAdminService__pb2._ADDUSERROLESREQUEST,
output_type=IamAdminService__pb2._OPERATIONSTATUS,
serialized_options=b'\202\323\344\223\002%\"#/user-management/v1.0.0/users/roles',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='isUserEnabled',
full_name='org.apache.custos.user.management.service.UserManagementService.isUserEnabled',
index=9,
containing_service=None,
input_type=IamAdminService__pb2._USERSEARCHREQUEST,
output_type=IamAdminService__pb2._OPERATIONSTATUS,
serialized_options=b'\202\323\344\223\0020\022./user-management/v1.0.0/user/activation/status',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='isUsernameAvailable',
full_name='org.apache.custos.user.management.service.UserManagementService.isUsernameAvailable',
index=10,
containing_service=None,
input_type=IamAdminService__pb2._USERSEARCHREQUEST,
output_type=IamAdminService__pb2._OPERATIONSTATUS,
serialized_options=b'\202\323\344\223\002+\022)/user-management/v1.0.0/user/availability',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='getUser',
full_name='org.apache.custos.user.management.service.UserManagementService.getUser',
index=11,
containing_service=None,
input_type=IamAdminService__pb2._USERSEARCHREQUEST,
output_type=IamAdminService__pb2._USERREPRESENTATION,
serialized_options=b'\202\323\344\223\002\036\022\034/user-management/v1.0.0/user',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='findUsers',
full_name='org.apache.custos.user.management.service.UserManagementService.findUsers',
index=12,
containing_service=None,
input_type=IamAdminService__pb2._FINDUSERSREQUEST,
output_type=IamAdminService__pb2._FINDUSERSRESPONSE,
serialized_options=b'\202\323\344\223\002\037\022\035/user-management/v1.0.0/users',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='resetPassword',
full_name='org.apache.custos.user.management.service.UserManagementService.resetPassword',
index=13,
containing_service=None,
input_type=IamAdminService__pb2._RESETUSERPASSWORD,
output_type=IamAdminService__pb2._OPERATIONSTATUS,
serialized_options=b'\202\323\344\223\002\'\032%/user-management/v1.0.0/user/password',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='deleteUser',
full_name='org.apache.custos.user.management.service.UserManagementService.deleteUser',
index=14,
containing_service=None,
input_type=IamAdminService__pb2._USERSEARCHREQUEST,
output_type=IamAdminService__pb2._OPERATIONSTATUS,
serialized_options=b'\202\323\344\223\002$*\034/user-management/v1.0.0/user:\004user',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='deleteUserRoles',
full_name='org.apache.custos.user.management.service.UserManagementService.deleteUserRoles',
index=15,
containing_service=None,
input_type=IamAdminService__pb2._DELETEUSERROLESREQUEST,
output_type=IamAdminService__pb2._OPERATIONSTATUS,
serialized_options=b'\202\323\344\223\002$*\"/user-management/v1.0.0/user/roles',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='updateUserProfile',
full_name='org.apache.custos.user.management.service.UserManagementService.updateUserProfile',
index=16,
containing_service=None,
input_type=_USERPROFILEREQUEST,
output_type=UserProfileService__pb2._USERPROFILE,
serialized_options=b'\202\323\344\223\0024\032$/user-management/v1.0.0/user/profile:\014user_profile',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='getUserProfile',
full_name='org.apache.custos.user.management.service.UserManagementService.getUserProfile',
index=17,
containing_service=None,
input_type=_USERPROFILEREQUEST,
output_type=UserProfileService__pb2._USERPROFILE,
serialized_options=b'\202\323\344\223\002&\022$/user-management/v1.0.0/user/profile',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='deleteUserProfile',
full_name='org.apache.custos.user.management.service.UserManagementService.deleteUserProfile',
index=18,
containing_service=None,
input_type=_USERPROFILEREQUEST,
output_type=UserProfileService__pb2._USERPROFILE,
serialized_options=b'\202\323\344\223\002&*$/user-management/v1.0.0/user/profile',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='getAllUserProfilesInTenant',
full_name='org.apache.custos.user.management.service.UserManagementService.getAllUserProfilesInTenant',
index=19,
containing_service=None,
input_type=_USERPROFILEREQUEST,
output_type=UserProfileService__pb2._GETALLUSERPROFILESRESPONSE,
serialized_options=b'\202\323\344\223\002\'\022%/user-management/v1.0.0/users/profile',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='linkUserProfile',
full_name='org.apache.custos.user.management.service.UserManagementService.linkUserProfile',
index=20,
containing_service=None,
input_type=_LINKUSERPROFILEREQUEST,
output_type=IamAdminService__pb2._OPERATIONSTATUS,
serialized_options=b'\202\323\344\223\002-\"+/user-management/v1.0.0/user/profile/mapper',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='getUserProfileAuditTrails',
full_name='org.apache.custos.user.management.service.UserManagementService.getUserProfileAuditTrails',
index=21,
containing_service=None,
input_type=UserProfileService__pb2._GETUPDATEAUDITTRAILREQUEST,
output_type=UserProfileService__pb2._GETUPDATEAUDITTRAILRESPONSE,
serialized_options=b'\202\323\344\223\002,\022*/user-management/v1.0.0/user/profile/audit',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='synchronizeUserDBs',
full_name='org.apache.custos.user.management.service.UserManagementService.synchronizeUserDBs',
index=22,
containing_service=None,
input_type=_SYNCHRONIZEUSERDBREQUEST,
output_type=IamAdminService__pb2._OPERATIONSTATUS,
serialized_options=b'\202\323\344\223\002(\"&/user-management/v1.0.0/db/synchronize',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_USERMANAGEMENTSERVICE)
DESCRIPTOR.services_by_name['UserManagementService'] = _USERMANAGEMENTSERVICE
# @@protoc_insertion_point(module_scope)
| 55.124183
| 7,123
| 0.779298
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.